diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f58b636 --- /dev/null +++ b/.gitignore @@ -0,0 +1,176 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# History files +.Rhistory +.Rapp.history + +# Session Data files +.RData + +# Example code in package build process +*-Ex.R + +# Output files from R CMD build +/*.tar.gz + +# Output files from R CMD check +/*.Rcheck/ + +# RStudio files +.Rproj.user/ + +# produced vignettes +vignettes/*.html +vignettes/*.pdf + +# OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3 +.httr-oauth + +# knitr and R markdown default cache directories +/*_cache/ +/cache/ + +# Temporary files created by R markdown +*.utf8.md +*.knit.md + +# Shiny token, see https://shiny.rstudio.com/articles/shinyapps.html +rsconnect/ + +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk diff --git a/CrossVApy_0.1.0.tar.gz b/CrossVApy_0.1.0.tar.gz deleted file mode 100644 index 0f02a7e..0000000 Binary files a/CrossVApy_0.1.0.tar.gz and /dev/null differ diff --git a/README.md b/README.md deleted file mode 100644 index f16b652..0000000 --- a/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# pyCrossVA - -Package currently includes 4 functions: -- InSilicoVA -- InterVA4 -- InterVA5 -- Tariff2 - -*InSilico has '.' for n/a fields and due to the differences in ways missing data is read for python and R, *0.001* is given in R for python to differentiate n/a from NaNs. (Just for InSillicoVA) - -*Guide for data entry.xlsx* was referenced for tariff2 -*who_va_output.csv* is a sample data from the original CrossVA package -*odkDownload.csv* is another sample data that is simulated diff --git a/Readme.rst b/Readme.rst new file mode 100644 index 0000000..ce6bffd --- /dev/null +++ b/Readme.rst @@ -0,0 +1,167 @@ +Background +---------- + +About Verbal Autopsy +^^^^^^^^^^^^^^^^^^^^ + +From `Wikipedia `_: + + A verbal autopsy (VA) is a method of gathering health information about a deceased + individual to determine his or her cause of death. Health information and a + description of events prior to death are acquired from conversations or + interviews with a person or persons familiar with the deceased and analyzed by + health professional or computer algorithms to assign a probable cause of death. + + Verbal autopsy is used in settings where most deaths are undocumented. Estimates + suggest a majority of the 60 million annual global deaths occur without medical + attention or official medical certification of the cause of death. The VA method + attempts to establish causes of death for previously undocumented subjects, + allowing scientists to analyze disease patterns and direct public health policy + decisions. + + Noteworthy uses of the verbal autopsy method include the Million Death Study in + India, China's national program to document causes of death in rural areas, and + the Global Burden of Disease Study 2010. + +CrossVA +^^^^^^^^ + +CrossVA is a python package for transforming verbal autopsy data collected using +the 2016 WHO VA instrument (currently, only version 1.5.1) into a format suitable +for openVA. + +The flagship function of this package is the transform() function, which +prepares raw data from a verbal autopsy questionnaire for use in a +verbal autopsy algorithm. The user can either choose to use a default mapping, +or create a custom one of their own design. The default mappings are listed in +`Currently Supported`_ and can be invoked by passing in a tuple as the mapping +argument in ``(input, output)`` format. + + +Project Status +^^^^^^^^^^^^^^ + +This package is a fleshed out prototype of the framework MTIRE is +proposing for the open source CrossVA project going forward. This is an +alpha version (as of Jan 7, 2018) intended to demonstrate full concept +and flexibility, not for use in research or verbal autopsy evaluations. + + +Simple Usage +------------ + +The simplest way to get started with CrossVA is to invoke the ``transform`` function +with a default mapping, and the path to a csv containing your raw verbal autopsy +data. + +.. code-block:: python + + from transform import transform + + transform(("2016WHOv151", "InterVA4"), "path/to/data.csv") + +You can also call the transform function on a Pandas DataFrame, if you wanted to +read in and process the data before calling the function. + +.. code-block:: python + + from transform import transform + + data = pd.read_csv("path/to/data.csv") + data = some_special_function(data) + transform(("2016WHOv151", "InterVA4"), data) + + +Currently Supported +-------------------- + +Inputs +^^^^^^^ + +* 2016 WHO Questionnaire from ODK export, v1.5.1 + +2016 WHO documentation can be found +`here. `_ + + +Outputs +^^^^^^^^ + +* InSillicoVA +* InterVA + +Roadmap +------- + +This is an alpha version of package functionality, with only limited support. +Future versions and updates will include expanding inputs and outputs, as well as +creating more user-facing features. + +Supporting more inputs +^^^^^^^^^^^^^^^^^^^^^^^ + +One component of moving to a production version will be to offer additional +mapping files to support more input formats. The package currently supports +the 2016 WHO v1.5.1 odk export. + +The following is a list of four additional +inputs already in our sights: + +* PHRMC short +* PHRMC long +* WHO 2012 +* WHO 2016 v1.4.1 + +Expanding outputs +^^^^^^^^^^^^^^^^^^ + +One component of moving to a production version will be to offer additional +mapping files to support more output formats. The package currently supports +mapping to the InterVA4 and InsillicoVA format. + +The following is a list of +additional outputs for other algorithms to be supported in future versions: + +* Tarrif +* Tarrif 2.0 +* InterVA5 + + +Expanding user-facing features +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Some of the user-facing features in this version are sparser than we would like +for a production-level package. In this vein, we want to prioritize creating +both good documentation and intuitive features for the user, so that the package +is easy to understand and use. + +* Better error messages + + Adding exception classes to distinguish between mapping, configuration, and + data errors, so that it will be more immediately obvious to the user what + the root cause of the error is. + +* Improving speed + + Adding additional validation checks has slowed down the algorithm from its + original proof of concept speed. We believe this can be further improved + before the package is in a production version. + +* More - and more detail - in validation checks + + Being able to convey to the end-user when the data has unexpected properties + or an incorrect format will be essential to allow the user to understand and + correct the issue. + +Style +----- + +This package was written using google style guide for Python and PEP8 standards. +Tests have been written using doctest. + +License +-------- + +This package is licensed under the GNU GENERAL PUBLIC LICENSE (v3, 2007). +Everyone is permitted to copy and distribute verbatim copies +of this license document, but changing it is not allowed. diff --git a/configuration.py b/configuration.py new file mode 100644 index 0000000..7c19c4b --- /dev/null +++ b/configuration.py @@ -0,0 +1,482 @@ +""" +Structure for Configuration class +""" + +# !/usr/bin/env python3 +# -*- coding: utf-8 -*- + +from mappings import MapCondition +from utils import report_list +from validation import Validation, Err, Warn +import pandas as pd +import numpy as np + +# MAP_PATH = "resources/mapping_configuration_files/" +# EX_MAP_1 = pd.read_csv(MAP_PATH+"example_config_1.csv") +# EX_MAP_2 = pd.read_csv(MAP_PATH+"example_config_2.csv") +# EX_DATA_1 = pd.read_csv("resources/sample_data/who151_va_output.csv") +# create problematic data +# EX_DATA_2 + + +class Configuration(): + """ Configuration class details the relationship between a set of input + data and output data. It is composed of MapConditions that + transform an input data source (2012 WHO, 2016 WHO 141, 2016 WHO 151, + PHRMC SHORT) into a different data form (PHRMC SHORT, InsillicoVA, + InterVA4, InterVA5, or Tarrif2) for verbal autopsy. + + Attributes: + given_columns (Pandas Series): columns of mapping dataframe. + required_columns (Pandas Series): required columns in mapping data. + main_columns (list): the four main columns required in config_data. + valid_relationships (Pandas Series): contains list of valid + relationships to use in comparisons. Relationships should be an + attr of Pandas Series object, or be defined as a subclass of + MapCondition. + config_data (Pandas DataFrame): dataframe containing mapping + relationships written out. + given_prereq (Pandas Series): lists pre-requisites referenced in config + data. + new_columns (Pandas Series): lists the new columns to be created with + config data. + source_columns (Pandas Series): lists the source columns required in + the raw input data. + verbose (int): controls default verbosity of printing to console. + process_strings (boolean): whether or not to remove whitespace and + non-alphanumeric characters from strings in condition field and in + raw_data during mapping. + validation (Validation): a validation object containing the validation + checks made + + """ + required_columns = pd.Series(['New Column Name', + 'New Column Documentation', + 'Source Column ID', + 'Source Column Documentation', + 'Relationship', + 'Condition', 'Prerequisite', + ], + name="expected columns") + + main_columns = ["New Column Name", "Source Column ID", + "Relationship", "Condition"] + + valid_relationships = pd.Series(["gt", "ge", "lt", "le", "between", "eq", + "ne", "contains"], + name="valid relationships") + + def __init__(self, config_data, verbose=1, process_strings=True): + """Inits configuration class from a Pandas DataFrame containing + mapping data. + + Args: + config_data (Pandas DataFrame): dataframe representing the mapping + data relationships, in specified format. + verbose (int): should be in range 1-5, + controls print output, where 0=silent. Defaults to 1. + process_strings (boolean): Whether or not to normalize string + conditions and data. Defaults to True. + + Returns: + Nothing + + Examples: + >>> Configuration(EX_MAP_1) + Unvalidated Configuration instance with 16 mapping conditions + + """ + # TODO check for df & # of rows & these 3 columns + # throw cant instantiate error? + + self.config_data = config_data + self.given_columns = pd.Series(config_data.columns, + name="the given columns") + self.process_strings = process_strings + + def plain_info(aseries, new_name): + """ returns Pandas Series without NAs or duplicates renamed to + `new_name` """ + return aseries.drop_duplicates().dropna().rename(new_name) + + self.given_relationships = plain_info(config_data["Relationship"], + "Relationship column") + self.given_prereq = plain_info(config_data["Prerequisite"], + "Prerequisite column") + self.new_columns = plain_info(config_data["New Column Name"], + "New Column Name column") + self.source_columns = plain_info(config_data["Source Column ID"], + "Source Column in Config") + self.verbose = verbose + self.validation = Validation() + + def __str__(self): + """string representation of class""" + return self.__repr__() + + def __repr__(self): + """string representation of class""" + val_status = ("Validated" if self.validation.is_valid() else + "Unvalidated") + return " ".join([val_status, self.__class__.__name__, "instance with", + str(len(self.config_data)), "mapping conditions"]) + + def list_conditions(self): + """Lists the final mapping conditions contained in Configuration object + + Returns: + list: list of MapConditions, where each MapConditions is + created from a row of processed mapping data. + + Examples: + >>> c = Configuration(EX_MAP_1) + >>> c.list_conditions()[:5] + [, + , + , + , + ] + + """ + # TODO config_data["Standalone"] = config_data["Prerequisite"].isnull() + # self.config_data.sort_values("Standalone", ascending=False) + return [MapCondition.factory(row["Relationship"])(row) + for i, row in self.config_data.iterrows()] + + def validate(self, verbose=None): + """Prepares and validates the Configuration object's mapping conditions. + Validation fails if there are any inoperable errors. Problems that can + be fixed in place are processed and flagged as warnings. + + Args: + verbose (int): controls print output, should be in range 0-5, + each higher level includes the messages of each level below it. + Where verbose = 0, nothing will be printed to console. + Where verbose = 1, print only errors to console, + where verbose = 2, also print warnings, + where verbose = 3, also print suggestions and status checks, + where verbose = 4, also print passing validation checks, + where verbose = 5, also print description of configuration + conditions. + Defaults to None; if none, replace with self.verbose attribute + Returns: + Boolean: boolean representing whether there are any errors that + prevent validation + + Examples: + >>> Configuration(EX_MAP_1).validate() + True + >>> Configuration(EX_MAP_2).validate(verbose=4) + Validating configuration file . . . + + CHECKS PASSED + [X] All expected columns ('New Column Name', 'New Column Documentation', 'Source Column ID', 'Source Column Documentation', 'Relationship', 'Condition', and 'Prerequisite') accounted for in configuration file. + [X] No NA's in column New Column Name detected. + [X] No NA's in column Source Column ID detected. + [X] No leading/trailing spaces column New Column Name detected. + [X] No leading/trailing spaces column Relationship detected. + [X] No leading/trailing spaces column Prerequisite detected. + [X] No leading/trailing spaces column Condition detected. + [X] No whitespace in column Condition detected. + [X] No upper case value(s) in column Relationship detected. + [X] No upper case value(s) in column Condition detected. + [X] No non-alphanumeric value(s) in column Source Column ID detected. + [X] No non-alphanumeric value(s) in column Relationship detected. + [X] No non-alphanumeric value(s) in column Condition detected. + + ERRORS + [!] 1 extraneous column(s) found in the given columns ('unused column') Extraneous column(s) will be ommitted. + [!] 3 values in Relationship column were invalid ('eqqqq', 'another fake', and 'gee'). These must be a valid method of pd.Series, e.g. ('gt', 'ge', 'lt', 'le', 'between', 'eq', 'ne', and 'contains') to be valid. + [!] 2 row(s) containing a numericalrelationship with non-number condition detected in row(s) #8, and #9. + [!] 2 values in Prerequisite column were invalid ('ABDOMM', and 'Placeholder here'). These must be defined in the 'new column name' column of the config file to be valid. + [!] 1 NA's in column Relationship detected in row(s) #3. + [!] 1 NA's in column Condition detected in row(s) #6. + + WARNINGS + [?] 2 duplicate row(s) detected in row(s) #1, and #14. Duplicates will be dropped. + [?] 1 blank row(s) detected in row(s) #10. Blank rows will be dropped. + [?] 2 whitespace in column New Column Name detected in row(s) #6, and #8. Whitespace will be converted to '_' + [?] 1 whitespace in column Relationship detected in row(s) #4. Whitespace will be converted to '_' + [?] 1 whitespace in column Prerequisite detected in row(s) #9. Whitespace will be converted to '_' + [?] 2 non-alphanumeric value(s) in column New Column Name detected in row(s) #5, and #6. This text should be alphanumeric. Non-alphanumeric characters will be removed. + False + """ + if verbose is None: + verbose = self.verbose + + if verbose >= 3: + print("Validating configuration file . . .") + + # Check that all expected columns accounted for + col_passing_msg = " ".join(["All expected columns", + report_list(self.required_columns), + "accounted for in configuration file."]) + self.validation.must_contain(self.given_columns, + self.required_columns, + passing_msg=col_passing_msg) + + # Check for extra columns + self.validation.no_extraneous(self.given_columns, + self.required_columns, + "column") + self.config_data = self.config_data.reindex( + columns=self.required_columns) + + # TODO: check for circular pre-reqs? + + # Check for duplicate rows + self.validation.flag_rows(self.config_data.duplicated(), + flag_criteria="duplicate row(s)", + flag_action="Duplicates will be dropped.") + self.config_data = self.config_data.drop_duplicates() + + # Check for blank rows + self.validation.flag_rows( + pd.Series(np.all(self.config_data.isnull(), axis=1)), + flag_tier=Warn, + flag_criteria="blank row(s)", + flag_action="Blank rows will be dropped.") + self.config_data = self.config_data.dropna(how="all") + + # check all relationships in relationship column are valid + self.validation.all_valid(self.given_relationships, + self.valid_relationships, + "a valid method of pd.Series, e.g. " + + report_list(self.valid_relationships)) + + # check for non-number conditions with numerical relationships + invalid_num = (self.config_data["Relationship"].isin( + ["gt", "ge", "le", "lt"]) & + (pd.to_numeric(self.config_data["Condition"], + errors="coerce").isnull())) + self.validation.flag_rows(invalid_num, + flag_criteria="row(s) containing a numerical" + + "relationship with non-number condition", + flag_tier=Err) + + # check all prerequisite columns are also defined in configuration + self.validation.all_valid(self.given_prereq, self.new_columns, + "defined in the 'new column name' column " + + "of the config file") + + # Check for missing values + self.validation.check_na(self.config_data[self.main_columns]) + + # Processing strings + ws_col = ["New Column Name", "Relationship", "Prerequisite"] + lowercase_col = ["Relationship"] + if self.process_strings: + ws_col.append("Condition") + lowercase_col.append("Condition") + + # Remove whitespace + self.config_data.loc[:, ws_col] = self.validation.fix_whitespace( + self.config_data.loc[:, ws_col].fillna("")) + + # Check for uppercase characters + self.config_data.loc[:, lowercase_col] = self.validation.fix_upcase( + self.config_data.loc[:, lowercase_col].fillna("")) + + # Check that main columns contain only alphanumeric values + self.config_data.loc[:, self.main_columns] = self.validation.fix_alnum( + self.config_data.loc[:, self.main_columns]) + + self.validation.report(verbose) # report + if verbose == 5: + self.describe() + + # return true only if there are zero errors + return self.validation.is_valid() + + def describe(self): + """Prints the mapping relationships in the Configuration object to + console. + + Args: + None + + Returns: + None + + Examples: + >>> Configuration(EX_MAP_1).describe() + MAPPING STATS + + - 16 new columns produced ('AB_POSIT', 'AB_SIZE', 'AC_BRL', + 'AC_CONV', 'AC_COUGH', etc.) + - 12 source columns required ('Id10403', 'Id10362', + 'Id10169', 'Id10221', 'Id10154', etc.) + - 7 relationships invoked ('eq', 'lt', 'between', 'ge', + 'contains', etc.) + - 13 conditions listed ('yes', '14', '10', '21', '15 to 49', + etc.) + - 1 prerequisites checked ('FEMALE') + + """ + print("MAPPING STATS\n") + spacer = " - \t" + + unique_checks = [("New Column Name", "new columns produced"), + ("Source Column ID", "source columns required"), + ("Relationship", "relationships invoked"), + ("Condition", "conditions listed"), + ("Prerequisite", "prerequisites checked")] + + for col_name, context in unique_checks: + print(spacer, self.config_data[col_name].nunique(), context, + report_list(self.config_data[col_name].dropna().unique(), + limit=5)) + + +class CrossVA(): + """Class representing raw VA data, and how to map it to an algorithm + + Attributes: + mapping (type): a validated Configuration object that details how to + transform the type of data in `raw_data` to the desired output. + data (Pandas DataFrame): a Pandas DataFrame containing the raw VA data + prepared_data (Pandas DataFrame): a Pandas DataFrame containing a + prepared form of the VA data to use with the Configuration object. + validation (Validation): Validation object containing the validation + checks that have been made on the raw data and between the raw data + and mapping Configuration. + verbose (int): Controls verbosity of printing to console, 0-5 where 0 + is silent. + """ + + def __init__(self, raw_data, mapping_config, na_values=["dk", "ref", ""], + verbose=2): + """Inits CrossVA class + + Args: + raw_data (Pandas DataFrame): a Pandas DataFrame containing the + raw data + mapping_config (Configuration): a validated Configuration object + that details how to transform the type of data in `raw_data` + to the desired output. + na_values (list): List of values to consider NA. + verbose (int): Controls verbosity of printing to console. Defaults + to 2. + Returns: + type: Description of returned object. + + Examples + >>> CrossVA(EX_DATA_1, Configuration(EX_MAP_1)) + + + """ + # if mapping_config isn't valid, attempt to validate before aborting + if not mapping_config.validation.is_valid(): + if not mapping_config.validate(): + pass + # raise error - mapping_config should be a valid + + # Crop input data to only the columns which have a matching ID + # at the end of their name using regex, and any additional columns + # passed as an argument, and then rename dataframe to just the column + # IDs + self.mapping = mapping_config + new_columns = raw_data.columns.str.extract( + "("+"$|".join(self.mapping.source_columns.tolist())+"$)", + expand=False) + cropped_data = raw_data.loc[:, new_columns.notnull()].copy() + cropped_data = cropped_data.replace(na_values, np.nan) + self.data = cropped_data.rename(columns=pd.Series(new_columns, + raw_data.columns)) + self.prepared_data = pd.DataFrame() + self.verbose = verbose + self.validation = Validation() + + def __str__(self): + """ str representation method """ + return "" + + def __repr__(self): + return ("<" + self.__class__.__name__ + " with " + str(self.data.shape) + + " raw data and " + str(self.mapping) + ">") + + def process(self): + """Applies the given configuration object's mappings to the given raw + data. + + Args: None + + Returns: + Pandas DataFrame: a dataframe where the transformations specified + have been applied to the raw data. + + """ + if not self.validation.is_valid(): + if not self.validate(): + raise ValueError(("Can't process without valid" + " CrossVA instance")) + transformed_data = pd.DataFrame(index=np.arange(len(self.data)), + columns=self.mapping.new_columns) + transformed_data.columns.name = "" + for condition in self.mapping.list_conditions(): + # create column_values (boolean pd.series) representing if + # condition and preq is met in raw data for each row, preserving NA + + cond_result = condition.check(self.prepared_data) + column_val = np.where(cond_result.fillna(False), + condition.check_prereq(transformed_data), + cond_result + ) + # update new column to be met_conditions where new column is + # currently NA or false, but do not overwrite TRUE values (creates + # implicit ANY relationship between conditions) + update_where = ~(transformed_data[condition.name].fillna(False)) + transformed_data.loc[:, condition.name] = np.where(update_where, + column_val, transformed_data[condition.name]) + + return transformed_data + + def validate(self, verbose=None): + """Validates that RawVAData's raw input data and its mapping + configuration object are compatible. + + Args: + verbose (int): int from 0 to 5, representing verbosity of printing + to console. Defaults to None; if None, replaced with + self.verbose attribute. + + Returns: + boolean: True if valid, False if not. + + Examples: + >>> CrossVA(EX_DATA_1, Configuration(EX_MAP_1)).validate() + True + + """ + if verbose is None: + verbose = self.verbose + + if self.mapping.process_strings: + # strip whitespace and replace non-trailing/leading with underscore + self.data.fillna("", inplace=True) + self.data = self.validation.fix_whitespace(self.data) + # make all characters lowercase + self.data = self.validation.fix_upcase(self.data) + # strip for alphanumeric characters + self.data = self.validation.fix_alnum(self.data) + self.data = self.data.replace("", np.nan) + # check all expected columns from config Source Column ID are present + self.validation.must_contain(pd.Series(self.data.columns, + name="input data columns"), + self.mapping.source_columns) + + # TODO Check to see if relationship and conditions correspond to + # logical values in each column + if self.validation.is_valid(): + for mapping_condition in self.mapping.list_conditions(): + self.prepared_data[mapping_condition.source_dtype] = \ + mapping_condition.prepare_data(self.data) + + self.validation.report(verbose) + return self.validation.is_valid() + + +if __name__ == "__main__": + import doctest + doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) diff --git a/docs/.DS_Store b/docs/.DS_Store new file mode 100644 index 0000000..1da8af4 Binary files /dev/null and b/docs/.DS_Store differ diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..2640758 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = python -msphinx +SPHINXPROJ = CrossVA +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/_build/.DS_Store b/docs/_build/.DS_Store new file mode 100644 index 0000000..5a6a847 Binary files /dev/null and b/docs/_build/.DS_Store differ diff --git a/docs/_build/doctrees/about.doctree b/docs/_build/doctrees/about.doctree new file mode 100644 index 0000000..6811fc4 Binary files /dev/null and b/docs/_build/doctrees/about.doctree differ diff --git a/docs/_build/doctrees/config.doctree b/docs/_build/doctrees/config.doctree new file mode 100644 index 0000000..4f765da Binary files /dev/null and b/docs/_build/doctrees/config.doctree differ diff --git a/docs/_build/doctrees/environment.pickle b/docs/_build/doctrees/environment.pickle new file mode 100644 index 0000000..54db3be Binary files /dev/null and b/docs/_build/doctrees/environment.pickle differ diff --git a/docs/_build/doctrees/index.doctree b/docs/_build/doctrees/index.doctree new file mode 100644 index 0000000..df715c0 Binary files /dev/null and b/docs/_build/doctrees/index.doctree differ diff --git a/docs/_build/doctrees/source/configuration.doctree b/docs/_build/doctrees/source/configuration.doctree new file mode 100644 index 0000000..51db18c Binary files /dev/null and b/docs/_build/doctrees/source/configuration.doctree differ diff --git a/docs/_build/doctrees/source/mappings.doctree b/docs/_build/doctrees/source/mappings.doctree new file mode 100644 index 0000000..1ee4689 Binary files /dev/null and b/docs/_build/doctrees/source/mappings.doctree differ diff --git a/docs/_build/doctrees/source/modules.doctree b/docs/_build/doctrees/source/modules.doctree new file mode 100644 index 0000000..2cfcbd5 Binary files /dev/null and b/docs/_build/doctrees/source/modules.doctree differ diff --git a/docs/_build/doctrees/source/tests.doctree b/docs/_build/doctrees/source/tests.doctree new file mode 100644 index 0000000..5883486 Binary files /dev/null and b/docs/_build/doctrees/source/tests.doctree differ diff --git a/docs/_build/doctrees/source/transform.doctree b/docs/_build/doctrees/source/transform.doctree new file mode 100644 index 0000000..34252a7 Binary files /dev/null and b/docs/_build/doctrees/source/transform.doctree differ diff --git a/docs/_build/doctrees/source/utils.doctree b/docs/_build/doctrees/source/utils.doctree new file mode 100644 index 0000000..e302713 Binary files /dev/null and b/docs/_build/doctrees/source/utils.doctree differ diff --git a/docs/_build/doctrees/source/validation.doctree b/docs/_build/doctrees/source/validation.doctree new file mode 100644 index 0000000..0977683 Binary files /dev/null and b/docs/_build/doctrees/source/validation.doctree differ diff --git a/docs/_build/html/.buildinfo b/docs/_build/html/.buildinfo new file mode 100644 index 0000000..c1e12f2 --- /dev/null +++ b/docs/_build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 5bef077ccee28f090d45b664c56f5a1b +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_build/html/.nojekyll b/docs/_build/html/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/docs/_build/html/_modules/configuration.html b/docs/_build/html/_modules/configuration.html new file mode 100644 index 0000000..b2f9c0f --- /dev/null +++ b/docs/_build/html/_modules/configuration.html @@ -0,0 +1,708 @@ + + + + + + + + + + + configuration — CrossVA 1.0a documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for configuration

+"""
+Structure for Configuration class
+"""
+
+# !/usr/bin/env python3
+#  -*- coding: utf-8 -*-
+
+from mappings import MapCondition
+from utils import report_list
+from validation import Validation, Err, Warn
+import pandas as pd
+import numpy as np
+
+# MAP_PATH = "resources/mapping_configuration_files/"
+# EX_MAP_1 = pd.read_csv(MAP_PATH+"example_config_1.csv")
+# EX_MAP_2 = pd.read_csv(MAP_PATH+"example_config_2.csv")
+# EX_DATA_1 = pd.read_csv("resources/sample_data/who151_va_output.csv")
+# create problematic data
+# EX_DATA_2
+
+
+
[docs]class Configuration(): + """ Configuration class details the relationship between a set of input + data and output data. It is composed of MapConditions that + transform an input data source (2012 WHO, 2016 WHO 141, 2016 WHO 151, + PHRMC SHORT) into a different data form (PHRMC SHORT, InsillicoVA, + InterVA4, InterVA5, or Tarrif2) for verbal autopsy. + + Attributes: + given_columns (Pandas Series): columns of mapping dataframe. + required_columns (Pandas Series): required columns in mapping data. + main_columns (list): the four main columns required in config_data. + valid_relationships (Pandas Series): contains list of valid + relationships to use in comparisons. Relationships should be an + attr of Pandas Series object, or be defined as a subclass of + MapCondition. + config_data (Pandas DataFrame): dataframe containing mapping + relationships written out. + given_prereq (Pandas Series): lists pre-requisites referenced in config + data. + new_columns (Pandas Series): lists the new columns to be created with + config data. + source_columns (Pandas Series): lists the source columns required in + the raw input data. + verbose (int): controls default verbosity of printing to console. + process_strings (boolean): whether or not to remove whitespace and + non-alphanumeric characters from strings in condition field and in + raw_data during mapping. + validation (Validation): a validation object containing the validation + checks made + + """ + required_columns = pd.Series(['New Column Name', + 'New Column Documentation', + 'Source Column ID', + 'Source Column Documentation', + 'Relationship', + 'Condition', 'Prerequisite', + ], + name="expected columns") + + main_columns = ["New Column Name", "Source Column ID", + "Relationship", "Condition"] + + valid_relationships = pd.Series(["gt", "ge", "lt", "le", "between", "eq", + "ne", "contains"], + name="valid relationships") + + def __init__(self, config_data, verbose=1, process_strings=True): + """Inits configuration class from a Pandas DataFrame containing + mapping data. + + Args: + config_data (Pandas DataFrame): dataframe representing the mapping + data relationships, in specified format. + verbose (int): should be in range 1-5, + controls print output, where 0=silent. Defaults to 1. + process_strings (boolean): Whether or not to normalize string + conditions and data. Defaults to True. + + Returns: + Nothing + + Examples: + >>> Configuration(EX_MAP_1) + Unvalidated Configuration instance with 16 mapping conditions + + """ + # TODO check for df & # of rows & these 3 columns + # throw cant instantiate error? + + self.config_data = config_data + self.given_columns = pd.Series(config_data.columns, + name="the given columns") + self.process_strings = process_strings + + def plain_info(aseries, new_name): + """ returns Pandas Series without NAs or duplicates renamed to + `new_name` """ + return aseries.drop_duplicates().dropna().rename(new_name) + + self.given_relationships = plain_info(config_data["Relationship"], + "Relationship column") + self.given_prereq = plain_info(config_data["Prerequisite"], + "Prerequisite column") + self.new_columns = plain_info(config_data["New Column Name"], + "New Column Name column") + self.source_columns = plain_info(config_data["Source Column ID"], + "Source Column in Config") + self.verbose = verbose + self.validation = Validation() + + def __str__(self): + """string representation of class""" + return self.__repr__() + + def __repr__(self): + """string representation of class""" + val_status = ("Validated" if self.validation.is_valid() else + "Unvalidated") + return " ".join([val_status, self.__class__.__name__, "instance with", + str(len(self.config_data)), "mapping conditions"]) + +
[docs] def list_conditions(self): + """Lists the final mapping conditions contained in Configuration object + + Returns: + list: list of MapConditions, where each MapConditions is + created from a row of processed mapping data. + + Examples: + >>> c = Configuration(EX_MAP_1) + >>> c.list_conditions()[:5] + [<StrMapCondition: AB_POSIT = [column Id10403].eq(yes)>, + <StrMapCondition: AB_SIZE = [column Id10362].eq(yes)>, + <NumMapCondition: AC_BRL = [column Id10169].lt(14.0)>, + <NumMapCondition: AC_CONV = [column Id10221].lt(10.0)>, + <NumMapCondition: AC_COUGH = [column Id10154].lt(21.0)>] + + """ + # TODO config_data["Standalone"] = config_data["Prerequisite"].isnull() + # self.config_data.sort_values("Standalone", ascending=False) + return [MapCondition.factory(row["Relationship"])(row) + for i, row in self.config_data.iterrows()]
+ +
[docs] def validate(self, verbose=None): + """Prepares and validates the Configuration object's mapping conditions. + Validation fails if there are any inoperable errors. Problems that can + be fixed in place are processed and flagged as warnings. + + Args: + verbose (int): controls print output, should be in range 0-5, + each higher level includes the messages of each level below it. + Where verbose = 0, nothing will be printed to console. + Where verbose = 1, print only errors to console, + where verbose = 2, also print warnings, + where verbose = 3, also print suggestions and status checks, + where verbose = 4, also print passing validation checks, + where verbose = 5, also print description of configuration + conditions. + Defaults to None; if none, replace with self.verbose attribute + Returns: + Boolean: boolean representing whether there are any errors that + prevent validation + + Examples: + >>> Configuration(EX_MAP_1).validate() + True + >>> Configuration(EX_MAP_2).validate(verbose=4) + Validating configuration file . . . + <BLANKLINE> + CHECKS PASSED + [X] All expected columns ('New Column Name', 'New Column Documentation', 'Source Column ID', 'Source Column Documentation', 'Relationship', 'Condition', and 'Prerequisite') accounted for in configuration file. + [X] No NA's in column New Column Name detected. + [X] No NA's in column Source Column ID detected. + [X] No leading/trailing spaces column New Column Name detected. + [X] No leading/trailing spaces column Relationship detected. + [X] No leading/trailing spaces column Prerequisite detected. + [X] No leading/trailing spaces column Condition detected. + [X] No whitespace in column Condition detected. + [X] No upper case value(s) in column Relationship detected. + [X] No upper case value(s) in column Condition detected. + [X] No non-alphanumeric value(s) in column Source Column ID detected. + [X] No non-alphanumeric value(s) in column Relationship detected. + [X] No non-alphanumeric value(s) in column Condition detected. + <BLANKLINE> + ERRORS + [!] 1 extraneous column(s) found in the given columns ('unused column') Extraneous column(s) will be ommitted. + [!] 3 values in Relationship column were invalid ('eqqqq', 'another fake', and 'gee'). These must be a valid method of pd.Series, e.g. ('gt', 'ge', 'lt', 'le', 'between', 'eq', 'ne', and 'contains') to be valid. + [!] 2 row(s) containing a numericalrelationship with non-number condition detected in row(s) #8, and #9. + [!] 2 values in Prerequisite column were invalid ('ABDOMM', and 'Placeholder here'). These must be defined in the 'new column name' column of the config file to be valid. + [!] 1 NA's in column Relationship detected in row(s) #3. + [!] 1 NA's in column Condition detected in row(s) #6. + <BLANKLINE> + WARNINGS + [?] 2 duplicate row(s) detected in row(s) #1, and #14. Duplicates will be dropped. + [?] 1 blank row(s) detected in row(s) #10. Blank rows will be dropped. + [?] 2 whitespace in column New Column Name detected in row(s) #6, and #8. Whitespace will be converted to '_' + [?] 1 whitespace in column Relationship detected in row(s) #4. Whitespace will be converted to '_' + [?] 1 whitespace in column Prerequisite detected in row(s) #9. Whitespace will be converted to '_' + [?] 2 non-alphanumeric value(s) in column New Column Name detected in row(s) #5, and #6. This text should be alphanumeric. Non-alphanumeric characters will be removed. + False + """ + if verbose is None: + verbose = self.verbose + + if verbose >= 3: + print("Validating configuration file . . .") + + # Check that all expected columns accounted for + col_passing_msg = " ".join(["All expected columns", + report_list(self.required_columns), + "accounted for in configuration file."]) + self.validation.must_contain(self.given_columns, + self.required_columns, + passing_msg=col_passing_msg) + + # Check for extra columns + self.validation.no_extraneous(self.given_columns, + self.required_columns, + "column") + self.config_data = self.config_data.reindex( + columns=self.required_columns) + + # TODO: check for circular pre-reqs? + + # Check for duplicate rows + self.validation.flag_rows(self.config_data.duplicated(), + flag_criteria="duplicate row(s)", + flag_action="Duplicates will be dropped.") + self.config_data = self.config_data.drop_duplicates() + + # Check for blank rows + self.validation.flag_rows( + pd.Series(np.all(self.config_data.isnull(), axis=1)), + flag_tier=Warn, + flag_criteria="blank row(s)", + flag_action="Blank rows will be dropped.") + self.config_data = self.config_data.dropna(how="all") + + # check all relationships in relationship column are valid + self.validation.all_valid(self.given_relationships, + self.valid_relationships, + "a valid method of pd.Series, e.g. " + + report_list(self.valid_relationships)) + + # check for non-number conditions with numerical relationships + invalid_num = (self.config_data["Relationship"].isin( + ["gt", "ge", "le", "lt"]) & + (pd.to_numeric(self.config_data["Condition"], + errors="coerce").isnull())) + self.validation.flag_rows(invalid_num, + flag_criteria="row(s) containing a numerical" + + "relationship with non-number condition", + flag_tier=Err) + + # check all prerequisite columns are also defined in configuration + self.validation.all_valid(self.given_prereq, self.new_columns, + "defined in the 'new column name' column " + + "of the config file") + + # Check for missing values + self.validation.check_na(self.config_data[self.main_columns]) + + # Processing strings + ws_col = ["New Column Name", "Relationship", "Prerequisite"] + lowercase_col = ["Relationship"] + if self.process_strings: + ws_col.append("Condition") + lowercase_col.append("Condition") + + # Remove whitespace + self.config_data.loc[:, ws_col] = self.validation.fix_whitespace( + self.config_data.loc[:, ws_col].fillna("")) + + # Check for uppercase characters + self.config_data.loc[:, lowercase_col] = self.validation.fix_upcase( + self.config_data.loc[:, lowercase_col].fillna("")) + + # Check that main columns contain only alphanumeric values + self.config_data.loc[:, self.main_columns] = self.validation.fix_alnum( + self.config_data.loc[:, self.main_columns]) + + self.validation.report(verbose) # report + if verbose == 5: + self.describe() + + # return true only if there are zero errors + return self.validation.is_valid()
+ +
[docs] def describe(self): + """Prints the mapping relationships in the Configuration object to + console. + + Args: + None + + Returns: + None + + Examples: + >>> Configuration(EX_MAP_1).describe() + MAPPING STATS + <BLANKLINE> + - 16 new columns produced ('AB_POSIT', 'AB_SIZE', 'AC_BRL', + 'AC_CONV', 'AC_COUGH', etc.) + - 12 source columns required ('Id10403', 'Id10362', + 'Id10169', 'Id10221', 'Id10154', etc.) + - 7 relationships invoked ('eq', 'lt', 'between', 'ge', + 'contains', etc.) + - 13 conditions listed ('yes', '14', '10', '21', '15 to 49', + etc.) + - 1 prerequisites checked ('FEMALE') + + """ + print("MAPPING STATS\n") + spacer = " - \t" + + unique_checks = [("New Column Name", "new columns produced"), + ("Source Column ID", "source columns required"), + ("Relationship", "relationships invoked"), + ("Condition", "conditions listed"), + ("Prerequisite", "prerequisites checked")] + + for col_name, context in unique_checks: + print(spacer, self.config_data[col_name].nunique(), context, + report_list(self.config_data[col_name].dropna().unique(), + limit=5))
+ + +
[docs]class CrossVA(): + """Class representing raw VA data, and how to map it to an algorithm + + Attributes: + mapping (type): a validated Configuration object that details how to + transform the type of data in `raw_data` to the desired output. + data (Pandas DataFrame): a Pandas DataFrame containing the raw VA data + prepared_data (Pandas DataFrame): a Pandas DataFrame containing a + prepared form of the VA data to use with the Configuration object. + validation (Validation): Validation object containing the validation + checks that have been made on the raw data and between the raw data + and mapping Configuration. + verbose (int): Controls verbosity of printing to console, 0-5 where 0 + is silent. + """ + + def __init__(self, raw_data, mapping_config, na_values=["dk", "ref", ""], + verbose=2): + """Inits CrossVA class + + Args: + raw_data (Pandas DataFrame): a Pandas DataFrame containing the + raw data + mapping_config (Configuration): a validated Configuration object + that details how to transform the type of data in `raw_data` + to the desired output. + na_values (list): List of values to consider NA. + verbose (int): Controls verbosity of printing to console. Defaults + to 2. + Returns: + type: Description of returned object. + + Examples + >>> CrossVA(EX_DATA_1, Configuration(EX_MAP_1)) + <CrossVA with (4, 12) raw data and Validated Configuration instance + with 16 mapping conditions> + + """ + # if mapping_config isn't valid, attempt to validate before aborting + if not mapping_config.validation.is_valid(): + if not mapping_config.validate(): + pass + # raise error - mapping_config should be a valid + + # Crop input data to only the columns which have a matching ID + # at the end of their name using regex, and any additional columns + # passed as an argument, and then rename dataframe to just the column + # IDs + self.mapping = mapping_config + new_columns = raw_data.columns.str.extract( + "("+"$|".join(self.mapping.source_columns.tolist())+"$)", + expand=False) + cropped_data = raw_data.loc[:, new_columns.notnull()].copy() + cropped_data = cropped_data.replace(na_values, np.nan) + self.data = cropped_data.rename(columns=pd.Series(new_columns, + raw_data.columns)) + self.prepared_data = pd.DataFrame() + self.verbose = verbose + self.validation = Validation() + + def __str__(self): + """ str representation method """ + return "" + + def __repr__(self): + return ("<" + self.__class__.__name__ + " with " + str(self.data.shape) + + " raw data and " + str(self.mapping) + ">") + +
[docs] def process(self): + """Applies the given configuration object's mappings to the given raw + data. + + Args: None + + Returns: + Pandas DataFrame: a dataframe where the transformations specified + have been applied to the raw data. + + """ + if not self.validation.is_valid(): + if not self.validate(): + raise ValueError(("Can't process without valid" + " CrossVA instance")) + transformed_data = pd.DataFrame(index=np.arange(len(self.data)), + columns=self.mapping.new_columns) + transformed_data.columns.name = "" + for condition in self.mapping.list_conditions(): + # create column_values (boolean pd.series) representing if + # condition and preq is met in raw data for each row, preserving NA + + cond_result = condition.check(self.prepared_data) + column_val = np.where(cond_result.fillna(False), + condition.check_prereq(transformed_data), + cond_result + ) + # update new column to be met_conditions where new column is + # currently NA or false, but do not overwrite TRUE values (creates + # implicit ANY relationship between conditions) + update_where = ~(transformed_data[condition.name].fillna(False)) + transformed_data.loc[:, condition.name] = np.where(update_where, + column_val, transformed_data[condition.name]) + + return transformed_data
+ +
[docs] def validate(self, verbose=None): + """Validates that RawVAData's raw input data and its mapping + configuration object are compatible. + + Args: + verbose (int): int from 0 to 5, representing verbosity of printing + to console. Defaults to None; if None, replaced with + self.verbose attribute. + + Returns: + boolean: True if valid, False if not. + + Examples: + >>> CrossVA(EX_DATA_1, Configuration(EX_MAP_1)).validate() + True + + """ + if verbose is None: + verbose = self.verbose + + if self.mapping.process_strings: + # strip whitespace and replace non-trailing/leading with underscore + self.data.fillna("", inplace=True) + self.data = self.validation.fix_whitespace(self.data) + # make all characters lowercase + self.data = self.validation.fix_upcase(self.data) + # strip for alphanumeric characters + self.data = self.validation.fix_alnum(self.data) + self.data = self.data.replace("", np.nan) + # check all expected columns from config Source Column ID are present + self.validation.must_contain(pd.Series(self.data.columns, + name="input data columns"), + self.mapping.source_columns) + + # TODO Check to see if relationship and conditions correspond to + # logical values in each column + if self.validation.is_valid(): + for mapping_condition in self.mapping.list_conditions(): + self.prepared_data[mapping_condition.source_dtype] = \ + mapping_condition.prepare_data(self.data) + + self.validation.report(verbose) + return self.validation.is_valid()
+ + +if __name__ == "__main__": + import doctest + doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) +
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/index.html b/docs/_build/html/_modules/index.html new file mode 100644 index 0000000..f6acca6 --- /dev/null +++ b/docs/_build/html/_modules/index.html @@ -0,0 +1,230 @@ + + + + + + + + + + + Overview: module code — CrossVA 1.0a documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Overview: module code
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

All modules for which code is available

+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/mappings.html b/docs/_build/html/_modules/mappings.html new file mode 100644 index 0000000..91215d2 --- /dev/null +++ b/docs/_build/html/_modules/mappings.html @@ -0,0 +1,789 @@ + + + + + + + + + + + mappings — CrossVA 1.0a documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for mappings

+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Defines MapCondition class and its subclasses, each represent a single condition
+that uses a relationship to transform raw data into a boolean column while
+preserving the NA values. 
+"""
+from utils import english_relationship
+
+from abc import ABCMeta, abstractmethod
+import pandas as pd
+import numpy as np
+
+
+
[docs]class MapCondition(metaclass=ABCMeta): + """ Abstract class representing a single mapped condition in the mapping + data, which gives instructions to transform the raw input data into the + form needed for a VA instrument. The main configuration class is composed + of these. + + Attributes: + name (str): the name of the new column to be created + relationship (str): the relationship of the input data to the condition + Should be one of "ge" (greater than or equal to), "gt" + (greater than), "le" (less than or equal to), "lt" (less than), + "eq" (equal to), "ne" (not equal to), + "contains" (if string contains) or "between" + (between the two numbers, inclusive). + preq_column (str or None): name of the pre-requisite column if it exists, + or `None` if no pre-requisite + source (str): the name of the column to be checked + + """ + + def __init__(self, condition_row): + """Inits SingleMapCondition instance with a row of a valid mapping file + (or pd.Series or dictionary containing the correct fields) + to be created. + + Args: + condition_row (Pandas Series or dict): that contains five key-value + pairs. Required keys are "New Column Name", "Relationship", + "Condition", "Prerequisite", and "Source Column ID". This + should come from a valid mapping file with a Configuration + object. + + Returns: + None + + """ + # TODO init checks + self.condition = condition_row["Condition"] + self.name = condition_row["New Column Name"] + self.relationship = condition_row["Relationship"] + self.preq_column = condition_row["Prerequisite"] + self.source_name = condition_row["Source Column ID"] + +
[docs] def factory(relationship): + """static class factory method, which determines which subclass to + return + + Args: + relationship (str): a relationship in (gt, ge, lt, le, ne, eq, + contains, between) that represents a comparison to be + made to the raw data + + Returns: + MapCondition: returns specific subclass that corresponds to the + correct relationship + + Examples: + >>> MapCondition.factory("ge") + <class '__main__.NumMapCondition'> + + >>> MapCondition.factory("eq") + <class '__main__.StrMapCondition'> + + >>> MapCondition.factory("contains") + <class '__main__.ContainsCondition'> + + >>> MapCondition.factory("between") + <class '__main__.BetweenCondition'> + + >>> MapCondition.factory("eqq") + Traceback (most recent call last): + AssertionError: No defined Condition class for eqq type + + """ + if relationship in ["gt", "ge", "lt", "le"]: + return NumMapCondition + if relationship in ["ne", "eq"]: + return StrMapCondition + if relationship == "contains": + return ContainsCondition + if relationship == "between": + return BetweenCondition + assert 0, "No defined Condition class for " + relationship + " type"
+ +
[docs] def check(self, prepared_data): + """Checks the condition against dataframe. Do not check NAs, just add + them back afterward. + + Args: + input_data (Pandas DataFrame): a dataframe containing a created + column with the name specified in self.source_dtype + + Returns: + Pandas Series: returns a bolean series where the condition is met + + Examples: + >>> test_df = pd.DataFrame({"source_test_str": ["test condition",\ + "test condition 2",\ + np.nan],\ + "source_test_num": [4, 5, np.nan]}) + >>> StrMapCondition({"Condition" : "test condition",\ + "New Column Name" : "test new column name",\ + "Relationship" : "eq", "Prerequisite" : None,\ + "Source Column ID" : "source_test"}).check(test_df) + 0 True + 1 False + 2 NaN + Name: source_test_str, dtype: object + + >>> NumMapCondition({"Condition" : 4.5,\ + "New Column Name" : "test new column name",\ + "Relationship" : "ge",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test"}).check(test_df) + 0 False + 1 True + 2 NaN + Name: source_test_num, dtype: object + """ + eval_col = prepared_data[self.source_dtype] + return self._run_check(eval_col).where(eval_col.notnull(), + eval_col)
+ + def _run_check(self, eval_col): + """internal method to check the condition on a given column with no NAs + + Args: + eval (Pandas Series): a Pandas Series containing data to evaluate + + Returns: + Pandas Series: returns a bolean series where the condition is met + + Examples: + >>> test = pd.Series(["A","B","C"]) + >>> StrMapCondition({"Condition" : "B",\ + "New Column Name" : "test new column name",\ + "Relationship" : "eq", "Prerequisite" : None,\ + "Source Column ID" : "source_test"}\ + )._run_check(test) + 0 False + 1 True + 2 False + dtype: bool + + >>> test = pd.Series([1, 2, 3]) + >>> NumMapCondition({"Condition" : 3,\ + "New Column Name" : "test new column name",\ + "Relationship" : "ge",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test"}\ + )._run_check(test) + 0 False + 1 False + 2 True + dtype: bool + """ + return getattr(eval_col, self.relationship)(self.condition) + +
[docs] def check_prereq(self, transformed_data): + """checks for pre-req column status; if there is no pre-req, returns + true, else looks up values of pre-req column from transformed_data + + Args: + transformed_data (Pandas DataFrame): the new dataframe + being created, which contains any pre-req columns + + Returns: + boolean or boolean pd.series: representing whether pre-req is + satisfied + + Examples: + >>> test_df = pd.DataFrame({"preq_one": np.repeat(True,5),\ + "preq_two": np.repeat(False, 5)}) + + If there is no pre-req, simply returns True (Pandas can interpret this + in boolean indexing) + + >>> NumMapCondition({"Condition" : 4.5,\ + "New Column Name" : "test new column name",\ + "Relationship" : "ge",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test"}\ + ).check_prereq(test_df) + True + + If there is a pre-req, then returns the value of transformed_data + with that column. + + >>> NumMapCondition({"Condition" : 4.5,\ + "New Column Name" : "test new column name",\ + "Relationship" : "ge",\ + "Prerequisite" : "preq_one",\ + "Source Column ID" : "source_test"}\ + ).check_prereq(test_df) + 0 True + 1 True + 2 True + 3 True + 4 True + Name: preq_one, dtype: bool + + >>> NumMapCondition({"Condition" : 4.5,\ + "New Column Name" : "test new column name",\ + "Relationship" : "ge",\ + "Prerequisite" : "preq_two",\ + "Source Column ID" : "source_test"}\ + ).check_prereq(test_df) + 0 False + 1 False + 2 False + 3 False + 4 False + Name: preq_two, dtype: bool + """ + if pd.isnull(self.preq_column) or self.preq_column == "": + return True + return transformed_data[self.preq_column]
+ + @property + @abstractmethod + def possible_values(): + """ abstract method stub + generate a non-exhaustive list possible values implied by condition """ + return + +
[docs] def prepare_data(self, raw_data): + """prepares raw_data by ensuring dtypes are correct for each comparison + + Args: + raw_data (dataframe): a data frame containing raw data, including + the column given in self.source_name. + + Returns: + Pandas Series: the column in `raw_data` named in self.source_name,\ + with the attribute self.prep_func applied to it. + + """ + return self.prep_func(raw_data[self.source_name]).where( + raw_data[self.source_name].notnull(), np.nan)
+ +
[docs] def describe(self): + """ just a wrapper for the __str__ function """ + return self.__str__()
+ + def __repr__(self): + """console representation for class + + Examples: + >>> NumMapCondition({"Condition" : 4,\ + "New Column Name" : "test new name",\ + "Relationship" : "ge",\ + "Prerequisite" : "preq_two",\ + "Source Column ID" : "source_test"}) + <NumMapCondition: test new name = [column source_test].ge(4.0)> +""" + return " ".join(["<"+self.__class__.__name__+":\t", + self.name, "=", + "[column "+self.source_name + "]." + + self.relationship + "(" + str(self.condition) + ")>" + ]) + + def __str__(self): + """str representation + + Examples: + >>> print(NumMapCondition({"Condition" : 4,\ + "New Column Name" : "test new column name",\ + "Relationship" : "ge",\ + "Prerequisite" : "preq_two",\ + "Source Column ID" : "source_test"})) + NumMapCondition: New column test new column name is true where input + data column source_test is greater than or equal to 4.0 + + """ + report_as_list = [self.__class__.__name__+":", + "New column", self.name, + "is true where input data column", + self.source_name, + english_relationship(self.relationship), + str(self.condition)] + report_as = " ".join(report_as_list) + + if self.preq_column is None: + return (report_as + "and the new column " + + str(self.preq_column) + " is true.") + return report_as
+ + +
[docs]class StrMapCondition(MapCondition): + """class representing a str condition, inherits from MapCondition + + Attributes: + source_dtype (str): instance attribute, a copy of the instance + attribute self.source_name with "_str" appended, + to represent the expected dtype + prep_func (function): class attribute, a function to apply before + making a string-based comparison. It preserves null values but + changes all else to str. + """ + + def __init__(self, condition_row): + """Inits StrMapCondition + + Args: + condition_row (Pandas Series or dict): see MapCondition's __init__ + + Examples: + >>> StrMapCondition({"Condition" : "test cond",\ + "New Column Name" : "test new name",\ + "Relationship" : "eq",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test"}\ + ) + <StrMapCondition: test new name = [column source_test].eq(test cond)> + """ + super().__init__(condition_row) + self.source_dtype = self.source_name + "_str" + self.prep_func = lambda x: x.astype(str) + +
[docs] def possible_values(self): + """generate a non-exhaustive list possible values implied by condition + + Args: None + + Returns: + list: list containing 4 possible values (empty string, NA, None, + and the self.condition attribute) that might be expected by + this condition + + Examples: + >>> StrMapCondition({"Condition" : "test condition",\ + "New Column Name" : "test new column name",\ + "Relationship" : "eq",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test"}\ + ).possible_values() + ['', nan, None, 'test condition', 'yes', 'no', 'dk', 'ref'] + + """ + return ["", np.nan, None, self.condition, "yes", "no", "dk", "ref"]
+ + +
[docs]class NumMapCondition(MapCondition): + """class representing a numerical condition, inherits from MapCondition + + Attributes: + source_dtype (str): a copy of the instance attribute self.source_name + with "_num" appended, to represent the expected dtype + prep_func (function): class attr, a function to apply before making a + numerical-based comparison. pd.to_numeric() coerces non-number data + to NaN. + """ + + def __init__(self, condition_row, cast_cond=True): + """Inits NumMapCondition + + Args: + condition_row (Pandas Series or dict): see MapCondition's __init__ + cast_cond (bool): Whether to force self.condition attribute to be a + float. Defaults to True. + + Examples: + >>> NumMapCondition({"Condition" : 3,\ + "New Column Name" : "test new name",\ + "Relationship" : "ge",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test"}) + <NumMapCondition: test new name = [column source_test].ge(3.0)> + + """ + super().__init__(condition_row) + if cast_cond: + self.condition = float(condition_row["Condition"]) + self.source_dtype = self.source_name + "_num" + self.prep_func = lambda x: pd.to_numeric(x, errors="coerce") + +
[docs] def possible_values(self): + """generate a non-exhaustive list of possible values implied by + condition + + Args: None + + Returns: + list: list containing range of possible values. If a greater than + relationship, the list will include ints from self.condition + + 1 to self.condition*2. If a less than relationship, it will + include values from 0 to self.condition. If the condition + includes "equal to", then self.condition will also be included. + + Examples: + >>> NumMapCondition({"Condition" : 3,\ + "New Column Name" : "test new name",\ + "Relationship" : "ge",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test"}).possible_values() + [4.0, 5.0, 3.0] + >>> NumMapCondition({"Condition" : 3,\ + "New Column Name" : "test new name",\ + "Relationship" : "lt",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test"}).possible_values() + [0.0, 1.0, 2.0] + + """ + if self.relationship[0] == "g": + possible = np.arange(self.condition+1, self.condition*2) + else: + possible = np.arange(0, self.condition) + if self.relationship[1] == "e": + possible = np.append(possible, self.condition) + return possible.tolist()
+ + +
[docs]class ContainsCondition(StrMapCondition): + """ Subclass of StrMapCondition that overrides ._run_check() method for the + `contains` relationship + """ + + def _run_check(self, eval_col): + """overides _run_check condition of abstract MapCondition. + checks condition against input data to see if input data contains + the substring in the self.condition attribute. + + + Args: + input_data (Pandas DataFrame): a prepared dataframe from a CrossVA + instance containing a column named self.source_dtype + + Returns: + boolean Pandas Series: a Pandas Series that is true where the + column named in self.source_dtype contains the substring in + self.condition + + Examples: + >>> test_df = pd.DataFrame({"source_test_1_str": ["test condition",\ + "test condition 2"],\ + "source_test_2_str": ["test test",\ + "test condition"]}) + >>> ContainsCondition({"Condition" : "test condition",\ + "New Column Name" : "test new column name",\ + "Relationship" : "contains",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test_1"}).check(test_df) + 0 True + 1 True + Name: source_test_1_str, dtype: bool + + >>> ContainsCondition({"Condition" : "test condition",\ + "New Column Name" : "test new column name",\ + "Relationship" : "contains",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test_2"}).check(test_df) + 0 False + 1 True + Name: source_test_2_str, dtype: bool + """ + return eval_col.fillna("").str.contains(self.condition)
+ + +
[docs]class BetweenCondition(NumMapCondition): + """Subclass of NumMapCondition that overrides __init__ and .check() methods + for the `between` relationship + + Attributes: + low (float): a float representing the lowest acceptable value (incl) + high (float): a float representing the highest acceptable value (incl) + """ + + def __init__(self, condition_row): + """Inits between condition. Same as NumMapCondition init, but do not + cast condition_row["condition"] to a float, instead split it at " to " + and convert the first and second elements to floats. + + Args: + condition_row (Pandas Series or dict): see MapCondition's __init__ + + Examples: + >>> BetweenCondition({"Condition" : "3 to 5",\ + "New Column Name" : "test new column name",\ + "Relationship" : "between",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test_1"}) + <BetweenCondition: test new column name = [column + source_test_1].between(3 to 5)> + """ + super().__init__(condition_row, False) + self.low, self.high = [float(e) for e in self.condition.split(" to ")] + + def _run_check(self, eval_col): + """checks condition against input data to see if condition is true. + + Args: + input_data (Pandas DataFrame): a prepared dataframe from a CrossVA + instance containing a column named self.source_dtype + + Returns: + boolean Pandas Series: true where the column named in + self.source_dtype has a number between self.low and self.high, + inclusive + + Examples: + >>> test_df = pd.DataFrame({"source_test_1_num": [1,2,3],\ + "source_test_2_num": [4,5,6]}) + >>> BetweenCondition({"Condition" : "3 to 5",\ + "New Column Name" : "test new column name",\ + "Relationship" : "between",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test_1"}).check(test_df) + 0 False + 1 False + 2 True + Name: source_test_1_num, dtype: bool + >>> BetweenCondition({"Condition" : "3 to 5",\ + "New Column Name" : "test new column name",\ + "Relationship" : "between",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test_2"}).check(test_df) + 0 True + 1 True + 2 False + Name: source_test_2_num, dtype: bool + """ + return eval_col.between(self.low, self.high) + +
[docs] def possible_values(self): + """generate a non-exhaustive list of possible values implied by the + condition + + Args: None + + Returns: + list: a list of integers between self.low - 1 and self.high + 2 + + Examples: + >>> BetweenCondition({"Condition" : "3 to 5",\ + "New Column Name" : "test new column name",\ + "Relationship" : "between",\ + "Prerequisite" : None,\ + "Source Column ID" : "source_test_2"}\ + ).possible_values() + [2.0, 3.0, 4.0, 5.0, 6.0] + + """ + return np.arange(self.low-1, self.high+2).tolist()
+ + +if __name__ == "__main__": + import doctest + doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) +
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pandas/core/series.html b/docs/_build/html/_modules/pandas/core/series.html new file mode 100644 index 0000000..24d9b50 --- /dev/null +++ b/docs/_build/html/_modules/pandas/core/series.html @@ -0,0 +1,4406 @@ + + + + + + + + + + + pandas.core.series — CrossVA 1.0a documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pandas.core.series

+"""
+Data structure for 1-dimensional cross-sectional and time series data
+"""
+from __future__ import division
+
+# pylint: disable=E1101,E1103
+# pylint: disable=W0703,W0622,W0613,W0201
+
+import types
+import warnings
+from textwrap import dedent
+
+import numpy as np
+import numpy.ma as ma
+
+from pandas.core.accessor import CachedAccessor
+from pandas.core.arrays import ExtensionArray
+from pandas.core.dtypes.common import (
+    is_categorical_dtype,
+    is_bool,
+    is_integer, is_integer_dtype,
+    is_float_dtype,
+    is_extension_type,
+    is_extension_array_dtype,
+    is_datetime64tz_dtype,
+    is_timedelta64_dtype,
+    is_object_dtype,
+    is_list_like,
+    is_hashable,
+    is_iterator,
+    is_dict_like,
+    is_scalar,
+    _is_unorderable_exception,
+    _ensure_platform_int,
+    pandas_dtype)
+from pandas.core.dtypes.generic import (
+    ABCSparseArray, ABCDataFrame, ABCIndexClass)
+from pandas.core.dtypes.cast import (
+    maybe_upcast, infer_dtype_from_scalar,
+    maybe_convert_platform,
+    maybe_cast_to_datetime, maybe_castable,
+    construct_1d_arraylike_from_scalar,
+    construct_1d_ndarray_preserving_na,
+    construct_1d_object_array_from_listlike)
+from pandas.core.dtypes.missing import (
+    isna,
+    notna,
+    remove_na_arraylike,
+    na_value_for_dtype)
+
+from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
+                               Float64Index, _ensure_index)
+from pandas.core.indexing import check_bool_indexer, maybe_convert_indices
+from pandas.core import generic, base
+from pandas.core.internals import SingleBlockManager
+from pandas.core.arrays.categorical import Categorical, CategoricalAccessor
+from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
+from pandas.core.indexes.datetimes import DatetimeIndex
+from pandas.core.indexes.timedeltas import TimedeltaIndex
+from pandas.core.indexes.period import PeriodIndex
+from pandas import compat
+from pandas.io.formats.terminal import get_terminal_size
+from pandas.compat import (
+    zip, u, OrderedDict, StringIO, range, get_range_parameters, PY36)
+from pandas.compat.numpy import function as nv
+
+import pandas.core.ops as ops
+import pandas.core.algorithms as algorithms
+
+import pandas.core.common as com
+import pandas.core.nanops as nanops
+import pandas.io.formats.format as fmt
+from pandas.util._decorators import (
+    Appender, deprecate, deprecate_kwarg, Substitution)
+from pandas.util._validators import validate_bool_kwarg
+
+from pandas._libs import index as libindex, tslib as libts, lib, iNaT
+from pandas.core.config import get_option
+from pandas.core.strings import StringMethods
+
+import pandas.plotting._core as gfx
+
+__all__ = ['Series']
+
+_shared_doc_kwargs = dict(
+    axes='index', klass='Series', axes_single_arg="{0 or 'index'}",
+    axis="""
+    axis : {0 or 'index'}
+        Parameter needed for compatibility with DataFrame.
+    """,
+    inplace="""inplace : boolean, default False
+        If True, performs operation inplace and returns None.""",
+    unique='np.ndarray', duplicated='Series',
+    optional_by='', optional_mapper='', optional_labels='', optional_axis='',
+    versionadded_to_excel='\n    .. versionadded:: 0.20.0\n')
+
+
+# see gh-16971
+def remove_na(arr):
+    """Remove null values from array like structure.
+
+    .. deprecated:: 0.21.0
+        Use s[s.notnull()] instead.
+    """
+
+    warnings.warn("remove_na is deprecated and is a private "
+                  "function. Do not use.", FutureWarning, stacklevel=2)
+    return remove_na_arraylike(arr)
+
+
+def _coerce_method(converter):
+    """ install the scalar coercion methods """
+
+    def wrapper(self):
+        if len(self) == 1:
+            return converter(self.iloc[0])
+        raise TypeError("cannot convert the series to "
+                        "{0}".format(str(converter)))
+
+    return wrapper
+
+# ----------------------------------------------------------------------
+# Series class
+
+
+class Series(base.IndexOpsMixin, generic.NDFrame):
+    """
+    One-dimensional ndarray with axis labels (including time series).
+
+    Labels need not be unique but must be a hashable type. The object
+    supports both integer- and label-based indexing and provides a host of
+    methods for performing operations involving the index. Statistical
+    methods from ndarray have been overridden to automatically exclude
+    missing data (currently represented as NaN).
+
+    Operations between Series (+, -, /, *, **) align values based on their
+    associated index values-- they need not be the same length. The result
+    index will be the sorted union of the two indexes.
+
+    Parameters
+    ----------
+    data : array-like, dict, or scalar value
+        Contains data stored in Series
+
+        .. versionchanged :: 0.23.0
+           If data is a dict, argument order is maintained for Python 3.6
+           and later.
+
+    index : array-like or Index (1d)
+        Values must be hashable and have the same length as `data`.
+        Non-unique index values are allowed. Will default to
+        RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index
+        sequence are used, the index will override the keys found in the
+        dict.
+    dtype : numpy.dtype or None
+        If None, dtype will be inferred
+    copy : boolean, default False
+        Copy input data
+    """
+    _metadata = ['name']
+    _accessors = set(['dt', 'cat', 'str'])
+    _deprecations = generic.NDFrame._deprecations | frozenset(
+        ['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value',
+         'from_csv', 'valid'])
+
+    def __init__(self, data=None, index=None, dtype=None, name=None,
+                 copy=False, fastpath=False):
+
+        # we are called internally, so short-circuit
+        if fastpath:
+
+            # data is an ndarray, index is defined
+            if not isinstance(data, SingleBlockManager):
+                data = SingleBlockManager(data, index, fastpath=True)
+            if copy:
+                data = data.copy()
+            if index is None:
+                index = data.index
+
+        else:
+
+            if index is not None:
+                index = _ensure_index(index)
+
+            if data is None:
+                data = {}
+            if dtype is not None:
+                dtype = self._validate_dtype(dtype)
+
+            if isinstance(data, MultiIndex):
+                raise NotImplementedError("initializing a Series from a "
+                                          "MultiIndex is not supported")
+            elif isinstance(data, Index):
+                if name is None:
+                    name = data.name
+
+                if dtype is not None:
+                    # astype copies
+                    data = data.astype(dtype)
+                else:
+                    # need to copy to avoid aliasing issues
+                    data = data._values.copy()
+                copy = False
+
+            elif isinstance(data, np.ndarray):
+                pass
+            elif isinstance(data, Series):
+                if name is None:
+                    name = data.name
+                if index is None:
+                    index = data.index
+                else:
+                    data = data.reindex(index, copy=copy)
+                data = data._data
+            elif isinstance(data, dict):
+                data, index = self._init_dict(data, index, dtype)
+                dtype = None
+                copy = False
+            elif isinstance(data, SingleBlockManager):
+                if index is None:
+                    index = data.index
+                elif not data.index.equals(index) or copy:
+                    # GH#19275 SingleBlockManager input should only be called
+                    # internally
+                    raise AssertionError('Cannot pass both SingleBlockManager '
+                                         '`data` argument and a different '
+                                         '`index` argument.  `copy` must '
+                                         'be False.')
+
+            elif is_extension_array_dtype(data) and dtype is not None:
+                if not data.dtype.is_dtype(dtype):
+                    raise ValueError("Cannot specify a dtype '{}' with an "
+                                     "extension array of a different "
+                                     "dtype ('{}').".format(dtype,
+                                                            data.dtype))
+
+            elif (isinstance(data, types.GeneratorType) or
+                  (compat.PY3 and isinstance(data, map))):
+                data = list(data)
+            elif isinstance(data, (set, frozenset)):
+                raise TypeError("{0!r} type is unordered"
+                                "".format(data.__class__.__name__))
+            else:
+
+                # handle sparse passed here (and force conversion)
+                if isinstance(data, ABCSparseArray):
+                    data = data.to_dense()
+
+            if index is None:
+                if not is_list_like(data):
+                    data = [data]
+                index = com._default_index(len(data))
+            elif is_list_like(data):
+
+                # a scalar numpy array is list-like but doesn't
+                # have a proper length
+                try:
+                    if len(index) != len(data):
+                        raise ValueError(
+                            'Length of passed values is {val}, '
+                            'index implies {ind}'
+                            .format(val=len(data), ind=len(index)))
+                except TypeError:
+                    pass
+
+            # create/copy the manager
+            if isinstance(data, SingleBlockManager):
+                if dtype is not None:
+                    data = data.astype(dtype=dtype, errors='ignore',
+                                       copy=copy)
+                elif copy:
+                    data = data.copy()
+            else:
+                data = _sanitize_array(data, index, dtype, copy,
+                                       raise_cast_failure=True)
+
+                data = SingleBlockManager(data, index, fastpath=True)
+
+        generic.NDFrame.__init__(self, data, fastpath=True)
+
+        self.name = name
+        self._set_axis(0, index, fastpath=True)
+
+    def _init_dict(self, data, index=None, dtype=None):
+        """
+        Derive the "_data" and "index" attributes of a new Series from a
+        dictionary input.
+
+        Parameters
+        ----------
+        data : dict or dict-like
+            Data used to populate the new Series
+        index : Index or index-like, default None
+            index for the new Series: if None, use dict keys
+        dtype : dtype, default None
+            dtype for the new Series: if None, infer from data
+
+        Returns
+        -------
+        _data : BlockManager for the new Series
+        index : index for the new Series
+        """
+        # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]
+        # raises KeyError), so we iterate the entire dict, and align
+        if data:
+            keys, values = zip(*compat.iteritems(data))
+            values = list(values)
+        elif index is not None:
+            # fastpath for Series(data=None). Just use broadcasting a scalar
+            # instead of reindexing.
+            values = na_value_for_dtype(dtype)
+            keys = index
+        else:
+            keys, values = [], []
+
+        # Input is now list-like, so rely on "standard" construction:
+        s = Series(values, index=keys, dtype=dtype)
+
+        # Now we just make sure the order is respected, if any
+        if data and index is not None:
+            s = s.reindex(index, copy=False)
+        elif not PY36 and not isinstance(data, OrderedDict) and data:
+            # Need the `and data` to avoid sorting Series(None, index=[...])
+            # since that isn't really dict-like
+            try:
+                s = s.sort_index()
+            except TypeError:
+                pass
+        return s._data, s.index
+
+    @classmethod
+    def from_array(cls, arr, index=None, name=None, dtype=None, copy=False,
+                   fastpath=False):
+        """Construct Series from array.
+
+        .. deprecated :: 0.23.0
+            Use pd.Series(..) constructor instead.
+
+        """
+        warnings.warn("'from_array' is deprecated and will be removed in a "
+                      "future version. Please use the pd.Series(..) "
+                      "constructor instead.", FutureWarning, stacklevel=2)
+        if isinstance(arr, ABCSparseArray):
+            from pandas.core.sparse.series import SparseSeries
+            cls = SparseSeries
+        return cls(arr, index=index, name=name, dtype=dtype,
+                   copy=copy, fastpath=fastpath)
+
+    @property
+    def _constructor(self):
+        return Series
+
+    @property
+    def _constructor_expanddim(self):
+        from pandas.core.frame import DataFrame
+        return DataFrame
+
+    # types
+    @property
+    def _can_hold_na(self):
+        return self._data._can_hold_na
+
+    _index = None
+
+    def _set_axis(self, axis, labels, fastpath=False):
+        """ override generic, we want to set the _typ here """
+
+        if not fastpath:
+            labels = _ensure_index(labels)
+
+        is_all_dates = labels.is_all_dates
+        if is_all_dates:
+            if not isinstance(labels,
+                              (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
+                try:
+                    labels = DatetimeIndex(labels)
+                    # need to set here because we changed the index
+                    if fastpath:
+                        self._data.set_axis(axis, labels)
+                except (libts.OutOfBoundsDatetime, ValueError):
+                    # labels may exceeds datetime bounds,
+                    # or not be a DatetimeIndex
+                    pass
+
+        self._set_subtyp(is_all_dates)
+
+        object.__setattr__(self, '_index', labels)
+        if not fastpath:
+            self._data.set_axis(axis, labels)
+
+    def _set_subtyp(self, is_all_dates):
+        if is_all_dates:
+            object.__setattr__(self, '_subtyp', 'time_series')
+        else:
+            object.__setattr__(self, '_subtyp', 'series')
+
+    def _update_inplace(self, result, **kwargs):
+        # we want to call the generic version and not the IndexOpsMixin
+        return generic.NDFrame._update_inplace(self, result, **kwargs)
+
+    @property
+    def name(self):
+        return self._name
+
+    @name.setter
+    def name(self, value):
+        if value is not None and not is_hashable(value):
+            raise TypeError('Series.name must be a hashable type')
+        object.__setattr__(self, '_name', value)
+
+    # ndarray compatibility
+    @property
+    def dtype(self):
+        """ return the dtype object of the underlying data """
+        return self._data.dtype
+
+    @property
+    def dtypes(self):
+        """ return the dtype object of the underlying data """
+        return self._data.dtype
+
+    @property
+    def ftype(self):
+        """ return if the data is sparse|dense """
+        return self._data.ftype
+
+    @property
+    def ftypes(self):
+        """ return if the data is sparse|dense """
+        return self._data.ftype
+
+    @property
+    def values(self):
+        """
+        Return Series as ndarray or ndarray-like
+        depending on the dtype
+
+        Returns
+        -------
+        arr : numpy.ndarray or ndarray-like
+
+        Examples
+        --------
+        >>> pd.Series([1, 2, 3]).values
+        array([1, 2, 3])
+
+        >>> pd.Series(list('aabc')).values
+        array(['a', 'a', 'b', 'c'], dtype=object)
+
+        >>> pd.Series(list('aabc')).astype('category').values
+        [a, a, b, c]
+        Categories (3, object): [a, b, c]
+
+        Timezone aware datetime data is converted to UTC:
+
+        >>> pd.Series(pd.date_range('20130101', periods=3,
+        ...                         tz='US/Eastern')).values
+        array(['2013-01-01T05:00:00.000000000',
+               '2013-01-02T05:00:00.000000000',
+               '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')
+
+        """
+        return self._data.external_values()
+
+    @property
+    def _values(self):
+        """ return the internal repr of this data """
+        return self._data.internal_values()
+
+    def _formatting_values(self):
+        """Return the values that can be formatted (used by SeriesFormatter
+        and DataFrameFormatter)
+        """
+        return self._data.formatting_values()
+
+    def get_values(self):
+        """ same as values (but handles sparseness conversions); is a view """
+        return self._data.get_values()
+
+    @property
+    def asobject(self):
+        """Return object Series which contains boxed values.
+
+        .. deprecated :: 0.23.0
+
+           Use ``astype(object)`` instead.
+
+        *this is an internal non-public method*
+        """
+        warnings.warn("'asobject' is deprecated. Use 'astype(object)'"
+                      " instead", FutureWarning, stacklevel=2)
+        return self.astype(object).values
+
+    # ops
+    def ravel(self, order='C'):
+        """
+        Return the flattened underlying data as an ndarray
+
+        See also
+        --------
+        numpy.ndarray.ravel
+        """
+        return self._values.ravel(order=order)
+
+    def compress(self, condition, *args, **kwargs):
+        """
+        Return selected slices of an array along given axis as a Series
+
+        See also
+        --------
+        numpy.ndarray.compress
+        """
+        nv.validate_compress(args, kwargs)
+        return self[condition]
+
+    def nonzero(self):
+        """
+        Return the *integer* indices of the elements that are non-zero
+
+        This method is equivalent to calling `numpy.nonzero` on the
+        series data. For compatibility with NumPy, the return value is
+        the same (a tuple with an array of indices for each dimension),
+        but it will always be a one-item tuple because series only have
+        one dimension.
+
+        Examples
+        --------
+        >>> s = pd.Series([0, 3, 0, 4])
+        >>> s.nonzero()
+        (array([1, 3]),)
+        >>> s.iloc[s.nonzero()[0]]
+        1    3
+        3    4
+        dtype: int64
+
+        >>> s = pd.Series([0, 3, 0, 4], index=['a', 'b', 'c', 'd'])
+        # same return although index of s is different
+        >>> s.nonzero()
+        (array([1, 3]),)
+        >>> s.iloc[s.nonzero()[0]]
+        b    3
+        d    4
+        dtype: int64
+
+        See Also
+        --------
+        numpy.nonzero
+        """
+        return self._values.nonzero()
+
+    def put(self, *args, **kwargs):
+        """
+        Applies the `put` method to its `values` attribute
+        if it has one.
+
+        See also
+        --------
+        numpy.ndarray.put
+        """
+        self._values.put(*args, **kwargs)
+
+    def __len__(self):
+        """
+        return the length of the Series
+        """
+        return len(self._data)
+
+    def view(self, dtype=None):
+        """
+        Create a new view of the Series.
+
+        This function will return a new Series with a view of the same
+        underlying values in memory, optionally reinterpreted with a new data
+        type. The new data type must preserve the same size in bytes as to not
+        cause index misalignment.
+
+        Parameters
+        ----------
+        dtype : data type
+            Data type object or one of their string representations.
+
+        Returns
+        -------
+        Series
+            A new Series object as a view of the same data in memory.
+
+        See Also
+        --------
+        numpy.ndarray.view : Equivalent numpy function to create a new view of
+            the same data in memory.
+
+        Notes
+        -----
+        Series are instantiated with ``dtype=float64`` by default. While
+        ``numpy.ndarray.view()`` will return a view with the same data type as
+        the original array, ``Series.view()`` (without specified dtype)
+        will try using ``float64`` and may fail if the original data type size
+        in bytes is not the same.
+
+        Examples
+        --------
+        >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')
+        >>> s
+        0   -2
+        1   -1
+        2    0
+        3    1
+        4    2
+        dtype: int8
+
+        The 8 bit signed integer representation of `-1` is `0b11111111`, but
+        the same bytes represent 255 if read as an 8 bit unsigned integer:
+
+        >>> us = s.view('uint8')
+        >>> us
+        0    254
+        1    255
+        2      0
+        3      1
+        4      2
+        dtype: uint8
+
+        The views share the same underlying values:
+
+        >>> us[0] = 128
+        >>> s
+        0   -128
+        1     -1
+        2      0
+        3      1
+        4      2
+        dtype: int8
+        """
+        return self._constructor(self._values.view(dtype),
+                                 index=self.index).__finalize__(self)
+
+    def __array__(self, result=None):
+        """
+        the array interface, return my values
+        """
+        return self.get_values()
+
+    def __array_wrap__(self, result, context=None):
+        """
+        Gets called after a ufunc
+        """
+        return self._constructor(result, index=self.index,
+                                 copy=False).__finalize__(self)
+
+    def __array_prepare__(self, result, context=None):
+        """
+        Gets called prior to a ufunc
+        """
+
+        # nice error message for non-ufunc types
+        if context is not None and not isinstance(self._values, np.ndarray):
+            obj = context[1][0]
+            raise TypeError("{obj} with dtype {dtype} cannot perform "
+                            "the numpy op {op}".format(
+                                obj=type(obj).__name__,
+                                dtype=getattr(obj, 'dtype', None),
+                                op=context[0].__name__))
+        return result
+
+    # complex
+    @property
+    def real(self):
+        return self.values.real
+
+    @real.setter
+    def real(self, v):
+        self.values.real = v
+
+    @property
+    def imag(self):
+        return self.values.imag
+
+    @imag.setter
+    def imag(self, v):
+        self.values.imag = v
+
+    # coercion
+    __float__ = _coerce_method(float)
+    __long__ = _coerce_method(int)
+    __int__ = _coerce_method(int)
+
+    def _unpickle_series_compat(self, state):
+        if isinstance(state, dict):
+            self._data = state['_data']
+            self.name = state['name']
+            self.index = self._data.index
+
+        elif isinstance(state, tuple):
+
+            # < 0.12 series pickle
+
+            nd_state, own_state = state
+
+            # recreate the ndarray
+            data = np.empty(nd_state[1], dtype=nd_state[2])
+            np.ndarray.__setstate__(data, nd_state)
+
+            # backwards compat
+            index, name = own_state[0], None
+            if len(own_state) > 1:
+                name = own_state[1]
+
+            # recreate
+            self._data = SingleBlockManager(data, index, fastpath=True)
+            self._index = index
+            self.name = name
+
+        else:
+            raise Exception("cannot unpickle legacy formats -> [%s]" % state)
+
+    # indexers
+    @property
+    def axes(self):
+        """Return a list of the row axis labels"""
+        return [self.index]
+
+    def _ixs(self, i, axis=0):
+        """
+        Return the i-th value or values in the Series by location
+
+        Parameters
+        ----------
+        i : int, slice, or sequence of integers
+
+        Returns
+        -------
+        value : scalar (int) or Series (slice, sequence)
+        """
+        try:
+
+            # dispatch to the values if we need
+            values = self._values
+            if isinstance(values, np.ndarray):
+                return libindex.get_value_at(values, i)
+            else:
+                return values[i]
+        except IndexError:
+            raise
+        except Exception:
+            if isinstance(i, slice):
+                indexer = self.index._convert_slice_indexer(i, kind='iloc')
+                return self._get_values(indexer)
+            else:
+                label = self.index[i]
+                if isinstance(label, Index):
+                    return self.take(i, axis=axis, convert=True)
+                else:
+                    return libindex.get_value_at(self, i)
+
+    @property
+    def _is_mixed_type(self):
+        return False
+
+    def _slice(self, slobj, axis=0, kind=None):
+        slobj = self.index._convert_slice_indexer(slobj,
+                                                  kind=kind or 'getitem')
+        return self._get_values(slobj)
+
+    def __getitem__(self, key):
+        key = com._apply_if_callable(key, self)
+        try:
+            result = self.index.get_value(self, key)
+
+            if not is_scalar(result):
+                if is_list_like(result) and not isinstance(result, Series):
+
+                    # we need to box if loc of the key isn't scalar here
+                    # otherwise have inline ndarray/lists
+                    try:
+                        if not is_scalar(self.index.get_loc(key)):
+                            result = self._constructor(
+                                result, index=[key] * len(result),
+                                dtype=self.dtype).__finalize__(self)
+                    except KeyError:
+                        pass
+            return result
+        except InvalidIndexError:
+            pass
+        except (KeyError, ValueError):
+            if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
+                # kludge
+                pass
+            elif key is Ellipsis:
+                return self
+            elif com.is_bool_indexer(key):
+                pass
+            else:
+
+                # we can try to coerce the indexer (or this will raise)
+                new_key = self.index._convert_scalar_indexer(key,
+                                                             kind='getitem')
+                if type(new_key) != type(key):
+                    return self.__getitem__(new_key)
+                raise
+
+        except Exception:
+            raise
+
+        if is_iterator(key):
+            key = list(key)
+
+        if com.is_bool_indexer(key):
+            key = check_bool_indexer(self.index, key)
+
+        return self._get_with(key)
+
+    def _get_with(self, key):
+        # other: fancy integer or otherwise
+        if isinstance(key, slice):
+            indexer = self.index._convert_slice_indexer(key, kind='getitem')
+            return self._get_values(indexer)
+        elif isinstance(key, ABCDataFrame):
+            raise TypeError('Indexing a Series with DataFrame is not '
+                            'supported, use the appropriate DataFrame column')
+        else:
+            if isinstance(key, tuple):
+                try:
+                    return self._get_values_tuple(key)
+                except Exception:
+                    if len(key) == 1:
+                        key = key[0]
+                        if isinstance(key, slice):
+                            return self._get_values(key)
+                    raise
+
+            # pragma: no cover
+            if not isinstance(key, (list, np.ndarray, Series, Index)):
+                key = list(key)
+
+            if isinstance(key, Index):
+                key_type = key.inferred_type
+            else:
+                key_type = lib.infer_dtype(key)
+
+            if key_type == 'integer':
+                if self.index.is_integer() or self.index.is_floating():
+                    return self.loc[key]
+                else:
+                    return self._get_values(key)
+            elif key_type == 'boolean':
+                return self._get_values(key)
+            else:
+                try:
+                    # handle the dup indexing case (GH 4246)
+                    if isinstance(key, (list, tuple)):
+                        return self.loc[key]
+
+                    return self.reindex(key)
+                except Exception:
+                    # [slice(0, 5, None)] will break if you convert to ndarray,
+                    # e.g. as requested by np.median
+                    # hack
+                    if isinstance(key[0], slice):
+                        return self._get_values(key)
+                    raise
+
+    def _get_values_tuple(self, key):
+        # mpl hackaround
+        if com._any_none(*key):
+            return self._get_values(key)
+
+        if not isinstance(self.index, MultiIndex):
+            raise ValueError('Can only tuple-index with a MultiIndex')
+
+        # If key is contained, would have returned by now
+        indexer, new_index = self.index.get_loc_level(key)
+        return self._constructor(self._values[indexer],
+                                 index=new_index).__finalize__(self)
+
+    def _get_values(self, indexer):
+        try:
+            return self._constructor(self._data.get_slice(indexer),
+                                     fastpath=True).__finalize__(self)
+        except Exception:
+            return self._values[indexer]
+
+    def __setitem__(self, key, value):
+        key = com._apply_if_callable(key, self)
+
+        def setitem(key, value):
+            try:
+                self._set_with_engine(key, value)
+                return
+            except com.SettingWithCopyError:
+                raise
+            except (KeyError, ValueError):
+                values = self._values
+                if (is_integer(key) and
+                        not self.index.inferred_type == 'integer'):
+
+                    values[key] = value
+                    return
+                elif key is Ellipsis:
+                    self[:] = value
+                    return
+                elif com.is_bool_indexer(key):
+                    pass
+                elif is_timedelta64_dtype(self.dtype):
+                    # reassign a null value to iNaT
+                    if isna(value):
+                        value = iNaT
+
+                        try:
+                            self.index._engine.set_value(self._values, key,
+                                                         value)
+                            return
+                        except TypeError:
+                            pass
+
+                self.loc[key] = value
+                return
+
+            except TypeError as e:
+                if (isinstance(key, tuple) and
+                        not isinstance(self.index, MultiIndex)):
+                    raise ValueError("Can only tuple-index with a MultiIndex")
+
+                # python 3 type errors should be raised
+                if _is_unorderable_exception(e):
+                    raise IndexError(key)
+
+            if com.is_bool_indexer(key):
+                key = check_bool_indexer(self.index, key)
+                try:
+                    self._where(~key, value, inplace=True)
+                    return
+                except InvalidIndexError:
+                    pass
+
+            self._set_with(key, value)
+
+        # do the setitem
+        cacher_needs_updating = self._check_is_chained_assignment_possible()
+        setitem(key, value)
+        if cacher_needs_updating:
+            self._maybe_update_cacher()
+
+    def _set_with_engine(self, key, value):
+        values = self._values
+        try:
+            self.index._engine.set_value(values, key, value)
+            return
+        except KeyError:
+            values[self.index.get_loc(key)] = value
+            return
+
+    def _set_with(self, key, value):
+        # other: fancy integer or otherwise
+        if isinstance(key, slice):
+            indexer = self.index._convert_slice_indexer(key, kind='getitem')
+            return self._set_values(indexer, value)
+        else:
+            if isinstance(key, tuple):
+                try:
+                    self._set_values(key, value)
+                except Exception:
+                    pass
+
+            if not isinstance(key, (list, Series, np.ndarray, Series)):
+                try:
+                    key = list(key)
+                except Exception:
+                    key = [key]
+
+            if isinstance(key, Index):
+                key_type = key.inferred_type
+            else:
+                key_type = lib.infer_dtype(key)
+
+            if key_type == 'integer':
+                if self.index.inferred_type == 'integer':
+                    self._set_labels(key, value)
+                else:
+                    return self._set_values(key, value)
+            elif key_type == 'boolean':
+                self._set_values(key.astype(np.bool_), value)
+            else:
+                self._set_labels(key, value)
+
+    def _set_labels(self, key, value):
+        if isinstance(key, Index):
+            key = key.values
+        else:
+            key = com._asarray_tuplesafe(key)
+        indexer = self.index.get_indexer(key)
+        mask = indexer == -1
+        if mask.any():
+            raise ValueError('%s not contained in the index' % str(key[mask]))
+        self._set_values(indexer, value)
+
+    def _set_values(self, key, value):
+        if isinstance(key, Series):
+            key = key._values
+        self._data = self._data.setitem(indexer=key, value=value)
+        self._maybe_update_cacher()
+
+    @deprecate_kwarg(old_arg_name='reps', new_arg_name='repeats')
+    def repeat(self, repeats, *args, **kwargs):
+        """
+        Repeat elements of an Series. Refer to `numpy.ndarray.repeat`
+        for more information about the `repeats` argument.
+
+        See also
+        --------
+        numpy.ndarray.repeat
+        """
+        nv.validate_repeat(args, kwargs)
+        new_index = self.index.repeat(repeats)
+        new_values = self._values.repeat(repeats)
+        return self._constructor(new_values,
+                                 index=new_index).__finalize__(self)
+
+    def get_value(self, label, takeable=False):
+        """Quickly retrieve single value at passed index label
+
+        .. deprecated:: 0.21.0
+            Please use .at[] or .iat[] accessors.
+
+        Parameters
+        ----------
+        label : object
+        takeable : interpret the index as indexers, default False
+
+        Returns
+        -------
+        value : scalar value
+        """
+        warnings.warn("get_value is deprecated and will be removed "
+                      "in a future release. Please use "
+                      ".at[] or .iat[] accessors instead", FutureWarning,
+                      stacklevel=2)
+        return self._get_value(label, takeable=takeable)
+
+    def _get_value(self, label, takeable=False):
+        if takeable is True:
+            return com._maybe_box_datetimelike(self._values[label])
+        return self.index.get_value(self._values, label)
+    _get_value.__doc__ = get_value.__doc__
+
+    def set_value(self, label, value, takeable=False):
+        """Quickly set single value at passed label. If label is not contained,
+        a new object is created with the label placed at the end of the result
+        index.
+
+        .. deprecated:: 0.21.0
+            Please use .at[] or .iat[] accessors.
+
+        Parameters
+        ----------
+        label : object
+            Partial indexing with MultiIndex not allowed
+        value : object
+            Scalar value
+        takeable : interpret the index as indexers, default False
+
+        Returns
+        -------
+        series : Series
+            If label is contained, will be reference to calling Series,
+            otherwise a new object
+        """
+        warnings.warn("set_value is deprecated and will be removed "
+                      "in a future release. Please use "
+                      ".at[] or .iat[] accessors instead", FutureWarning,
+                      stacklevel=2)
+        return self._set_value(label, value, takeable=takeable)
+
+    def _set_value(self, label, value, takeable=False):
+        try:
+            if takeable:
+                self._values[label] = value
+            else:
+                self.index._engine.set_value(self._values, label, value)
+        except KeyError:
+
+            # set using a non-recursive method
+            self.loc[label] = value
+
+        return self
+    _set_value.__doc__ = set_value.__doc__
+
+    def reset_index(self, level=None, drop=False, name=None, inplace=False):
+        """
+        Generate a new DataFrame or Series with the index reset.
+
+        This is useful when the index needs to be treated as a column, or
+        when the index is meaningless and needs to be reset to the default
+        before another operation.
+
+        Parameters
+        ----------
+        level : int, str, tuple, or list, default optional
+            For a Series with a MultiIndex, only remove the specified levels
+            from the index. Removes all levels by default.
+        drop : bool, default False
+            Just reset the index, without inserting it as a column in
+            the new DataFrame.
+        name : object, optional
+            The name to use for the column containing the original Series
+            values. Uses ``self.name`` by default. This argument is ignored
+            when `drop` is True.
+        inplace : bool, default False
+            Modify the Series in place (do not create a new object).
+
+        Returns
+        -------
+        Series or DataFrame
+            When `drop` is False (the default), a DataFrame is returned.
+            The newly created columns will come first in the DataFrame,
+            followed by the original Series values.
+            When `drop` is True, a `Series` is returned.
+            In either case, if ``inplace=True``, no value is returned.
+
+        See Also
+        --------
+        DataFrame.reset_index: Analogous function for DataFrame.
+
+        Examples
+        --------
+
+        >>> s = pd.Series([1, 2, 3, 4], name='foo',
+        ...               index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
+
+        Generate a DataFrame with default index.
+
+        >>> s.reset_index()
+          idx  foo
+        0   a    1
+        1   b    2
+        2   c    3
+        3   d    4
+
+        To specify the name of the new column use `name`.
+
+        >>> s.reset_index(name='values')
+          idx  values
+        0   a       1
+        1   b       2
+        2   c       3
+        3   d       4
+
+        To generate a new Series with the default set `drop` to True.
+
+        >>> s.reset_index(drop=True)
+        0    1
+        1    2
+        2    3
+        3    4
+        Name: foo, dtype: int64
+
+        To update the Series in place, without generating a new one
+        set `inplace` to True. Note that it also requires ``drop=True``.
+
+        >>> s.reset_index(inplace=True, drop=True)
+        >>> s
+        0    1
+        1    2
+        2    3
+        3    4
+        Name: foo, dtype: int64
+
+        The `level` parameter is interesting for Series with a multi-level
+        index.
+
+        >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),
+        ...           np.array(['one', 'two', 'one', 'two'])]
+        >>> s2 = pd.Series(
+        ...     range(4), name='foo',
+        ...     index=pd.MultiIndex.from_arrays(arrays,
+        ...                                     names=['a', 'b']))
+
+        To remove a specific level from the Index, use `level`.
+
+        >>> s2.reset_index(level='a')
+               a  foo
+        b
+        one  bar    0
+        two  bar    1
+        one  baz    2
+        two  baz    3
+
+        If `level` is not set, all levels are removed from the Index.
+
+        >>> s2.reset_index()
+             a    b  foo
+        0  bar  one    0
+        1  bar  two    1
+        2  baz  one    2
+        3  baz  two    3
+        """
+        inplace = validate_bool_kwarg(inplace, 'inplace')
+        if drop:
+            new_index = com._default_index(len(self))
+            if level is not None:
+                if not isinstance(level, (tuple, list)):
+                    level = [level]
+                level = [self.index._get_level_number(lev) for lev in level]
+                if isinstance(self.index, MultiIndex):
+                    if len(level) < self.index.nlevels:
+                        new_index = self.index.droplevel(level)
+
+            if inplace:
+                self.index = new_index
+                # set name if it was passed, otherwise, keep the previous name
+                self.name = name or self.name
+            else:
+                return self._constructor(self._values.copy(),
+                                         index=new_index).__finalize__(self)
+        elif inplace:
+            raise TypeError('Cannot reset_index inplace on a Series '
+                            'to create a DataFrame')
+        else:
+            df = self.to_frame(name)
+            return df.reset_index(level=level, drop=drop)
+
+    def __unicode__(self):
+        """
+        Return a string representation for a particular DataFrame
+
+        Invoked by unicode(df) in py2 only. Yields a Unicode String in both
+        py2/py3.
+        """
+        buf = StringIO(u(""))
+        width, height = get_terminal_size()
+        max_rows = (height if get_option("display.max_rows") == 0 else
+                    get_option("display.max_rows"))
+        show_dimensions = get_option("display.show_dimensions")
+
+        self.to_string(buf=buf, name=self.name, dtype=self.dtype,
+                       max_rows=max_rows, length=show_dimensions)
+        result = buf.getvalue()
+
+        return result
+
+    def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True,
+                  index=True, length=False, dtype=False, name=False,
+                  max_rows=None):
+        """
+        Render a string representation of the Series
+
+        Parameters
+        ----------
+        buf : StringIO-like, optional
+            buffer to write to
+        na_rep : string, optional
+            string representation of NAN to use, default 'NaN'
+        float_format : one-parameter function, optional
+            formatter function to apply to columns' elements if they are floats
+            default None
+        header: boolean, default True
+            Add the Series header (index name)
+        index : bool, optional
+            Add index (row) labels, default True
+        length : boolean, default False
+            Add the Series length
+        dtype : boolean, default False
+            Add the Series dtype
+        name : boolean, default False
+            Add the Series name if not None
+        max_rows : int, optional
+            Maximum number of rows to show before truncating. If None, show
+            all.
+
+        Returns
+        -------
+        formatted : string (if not buffer passed)
+        """
+
+        formatter = fmt.SeriesFormatter(self, name=name, length=length,
+                                        header=header, index=index,
+                                        dtype=dtype, na_rep=na_rep,
+                                        float_format=float_format,
+                                        max_rows=max_rows)
+        result = formatter.to_string()
+
+        # catch contract violations
+        if not isinstance(result, compat.text_type):
+            raise AssertionError("result must be of type unicode, type"
+                                 " of result is {0!r}"
+                                 "".format(result.__class__.__name__))
+
+        if buf is None:
+            return result
+        else:
+            try:
+                buf.write(result)
+            except AttributeError:
+                with open(buf, 'w') as f:
+                    f.write(result)
+
+    def iteritems(self):
+        """
+        Lazily iterate over (index, value) tuples
+        """
+        return zip(iter(self.index), iter(self))
+
+    items = iteritems
+
+    # ----------------------------------------------------------------------
+    # Misc public methods
+
+    def keys(self):
+        """Alias for index"""
+        return self.index
+
+    def to_dict(self, into=dict):
+        """
+        Convert Series to {label -> value} dict or dict-like object.
+
+        Parameters
+        ----------
+        into : class, default dict
+            The collections.Mapping subclass to use as the return
+            object. Can be the actual class or an empty
+            instance of the mapping type you want.  If you want a
+            collections.defaultdict, you must pass it initialized.
+
+            .. versionadded:: 0.21.0
+
+        Returns
+        -------
+        value_dict : collections.Mapping
+
+        Examples
+        --------
+        >>> s = pd.Series([1, 2, 3, 4])
+        >>> s.to_dict()
+        {0: 1, 1: 2, 2: 3, 3: 4}
+        >>> from collections import OrderedDict, defaultdict
+        >>> s.to_dict(OrderedDict)
+        OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
+        >>> dd = defaultdict(list)
+        >>> s.to_dict(dd)
+        defaultdict(<type 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
+        """
+        # GH16122
+        into_c = com.standardize_mapping(into)
+        return into_c(compat.iteritems(self))
+
+    def to_frame(self, name=None):
+        """
+        Convert Series to DataFrame
+
+        Parameters
+        ----------
+        name : object, default None
+            The passed name should substitute for the series name (if it has
+            one).
+
+        Returns
+        -------
+        data_frame : DataFrame
+        """
+        if name is None:
+            df = self._constructor_expanddim(self)
+        else:
+            df = self._constructor_expanddim({name: self})
+
+        return df
+
+    def to_sparse(self, kind='block', fill_value=None):
+        """
+        Convert Series to SparseSeries
+
+        Parameters
+        ----------
+        kind : {'block', 'integer'}
+        fill_value : float, defaults to NaN (missing)
+
+        Returns
+        -------
+        sp : SparseSeries
+        """
+        from pandas.core.sparse.series import SparseSeries
+        return SparseSeries(self, kind=kind,
+                            fill_value=fill_value).__finalize__(self)
+
+    def _set_name(self, name, inplace=False):
+        """
+        Set the Series name.
+
+        Parameters
+        ----------
+        name : str
+        inplace : bool
+            whether to modify `self` directly or return a copy
+        """
+        inplace = validate_bool_kwarg(inplace, 'inplace')
+        ser = self if inplace else self.copy()
+        ser.name = name
+        return ser
+
+    # ----------------------------------------------------------------------
+    # Statistics, overridden ndarray methods
+
+    # TODO: integrate bottleneck
+
+    def count(self, level=None):
+        """
+        Return number of non-NA/null observations in the Series
+
+        Parameters
+        ----------
+        level : int or level name, default None
+            If the axis is a MultiIndex (hierarchical), count along a
+            particular level, collapsing into a smaller Series
+
+        Returns
+        -------
+        nobs : int or Series (if level specified)
+        """
+        if level is None:
+            return notna(com._values_from_object(self)).sum()
+
+        if isinstance(level, compat.string_types):
+            level = self.index._get_level_number(level)
+
+        lev = self.index.levels[level]
+        lab = np.array(self.index.labels[level], subok=False, copy=True)
+
+        mask = lab == -1
+        if mask.any():
+            lab[mask] = cnt = len(lev)
+            lev = lev.insert(cnt, lev._na_value)
+
+        obs = lab[notna(self.values)]
+        out = np.bincount(obs, minlength=len(lev) or None)
+        return self._constructor(out, index=lev,
+                                 dtype='int64').__finalize__(self)
+
+    def mode(self):
+        """Return the mode(s) of the dataset.
+
+        Always returns Series even if only one value is returned.
+
+        Returns
+        -------
+        modes : Series (sorted)
+        """
+        # TODO: Add option for bins like value_counts()
+        return algorithms.mode(self)
+
+    def unique(self):
+        """
+        Return unique values of Series object.
+
+        Uniques are returned in order of appearance. Hash table-based unique,
+        therefore does NOT sort.
+
+        Returns
+        -------
+        ndarray or Categorical
+            The unique values returned as a NumPy array. In case of categorical
+            data type, returned as a Categorical.
+
+        See Also
+        --------
+        pandas.unique : top-level unique method for any 1-d array-like object.
+        Index.unique : return Index with unique values from an Index object.
+
+        Examples
+        --------
+        >>> pd.Series([2, 1, 3, 3], name='A').unique()
+        array([2, 1, 3])
+
+        >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
+        array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
+
+        >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern')
+        ...            for _ in range(3)]).unique()
+        array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
+              dtype=object)
+
+        An unordered Categorical will return categories in the order of
+        appearance.
+
+        >>> pd.Series(pd.Categorical(list('baabc'))).unique()
+        [b, a, c]
+        Categories (3, object): [b, a, c]
+
+        An ordered Categorical preserves the category ordering.
+
+        >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'),
+        ...                          ordered=True)).unique()
+        [b, a, c]
+        Categories (3, object): [a < b < c]
+        """
+        result = super(Series, self).unique()
+
+        if is_datetime64tz_dtype(self.dtype):
+            # we are special casing datetime64tz_dtype
+            # to return an object array of tz-aware Timestamps
+
+            # TODO: it must return DatetimeArray with tz in pandas 2.0
+            result = result.astype(object).values
+
+        return result
+
+    def drop_duplicates(self, keep='first', inplace=False):
+        """
+        Return Series with duplicate values removed.
+
+        Parameters
+        ----------
+        keep : {'first', 'last', ``False``}, default 'first'
+            - 'first' : Drop duplicates except for the first occurrence.
+            - 'last' : Drop duplicates except for the last occurrence.
+            - ``False`` : Drop all duplicates.
+        inplace : boolean, default ``False``
+            If ``True``, performs operation inplace and returns None.
+
+        Returns
+        -------
+        deduplicated : Series
+
+        See Also
+        --------
+        Index.drop_duplicates : equivalent method on Index
+        DataFrame.drop_duplicates : equivalent method on DataFrame
+        Series.duplicated : related method on Series, indicating duplicate
+            Series values.
+
+        Examples
+        --------
+        Generate an Series with duplicated entries.
+
+        >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
+        ...               name='animal')
+        >>> s
+        0      lama
+        1       cow
+        2      lama
+        3    beetle
+        4      lama
+        5     hippo
+        Name: animal, dtype: object
+
+        With the 'keep' parameter, the selection behaviour of duplicated values
+        can be changed. The value 'first' keeps the first occurrence for each
+        set of duplicated entries. The default value of keep is 'first'.
+
+        >>> s.drop_duplicates()
+        0      lama
+        1       cow
+        3    beetle
+        5     hippo
+        Name: animal, dtype: object
+
+        The value 'last' for parameter 'keep' keeps the last occurrence for
+        each set of duplicated entries.
+
+        >>> s.drop_duplicates(keep='last')
+        1       cow
+        3    beetle
+        4      lama
+        5     hippo
+        Name: animal, dtype: object
+
+        The value ``False`` for parameter 'keep' discards all sets of
+        duplicated entries. Setting the value of 'inplace' to ``True`` performs
+        the operation inplace and returns ``None``.
+
+        >>> s.drop_duplicates(keep=False, inplace=True)
+        >>> s
+        1       cow
+        3    beetle
+        5     hippo
+        Name: animal, dtype: object
+        """
+        return super(Series, self).drop_duplicates(keep=keep, inplace=inplace)
+
+    def duplicated(self, keep='first'):
+        """
+        Indicate duplicate Series values.
+
+        Duplicated values are indicated as ``True`` values in the resulting
+        Series. Either all duplicates, all except the first or all except the
+        last occurrence of duplicates can be indicated.
+
+        Parameters
+        ----------
+        keep : {'first', 'last', False}, default 'first'
+            - 'first' : Mark duplicates as ``True`` except for the first
+              occurrence.
+            - 'last' : Mark duplicates as ``True`` except for the last
+              occurrence.
+            - ``False`` : Mark all duplicates as ``True``.
+
+        Examples
+        --------
+        By default, for each set of duplicated values, the first occurrence is
+        set on False and all others on True:
+
+        >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama'])
+        >>> animals.duplicated()
+        0    False
+        1    False
+        2     True
+        3    False
+        4     True
+        dtype: bool
+
+        which is equivalent to
+
+        >>> animals.duplicated(keep='first')
+        0    False
+        1    False
+        2     True
+        3    False
+        4     True
+        dtype: bool
+
+        By using 'last', the last occurrence of each set of duplicated values
+        is set on False and all others on True:
+
+        >>> animals.duplicated(keep='last')
+        0     True
+        1    False
+        2     True
+        3    False
+        4    False
+        dtype: bool
+
+        By setting keep on ``False``, all duplicates are True:
+
+        >>> animals.duplicated(keep=False)
+        0     True
+        1    False
+        2     True
+        3    False
+        4     True
+        dtype: bool
+
+        Returns
+        -------
+        pandas.core.series.Series
+
+        See Also
+        --------
+        pandas.Index.duplicated : Equivalent method on pandas.Index
+        pandas.DataFrame.duplicated : Equivalent method on pandas.DataFrame
+        pandas.Series.drop_duplicates : Remove duplicate values from Series
+        """
+        return super(Series, self).duplicated(keep=keep)
+
+    def idxmin(self, axis=None, skipna=True, *args, **kwargs):
+        """
+        Return the row label of the minimum value.
+
+        If multiple values equal the minimum, the first row label with that
+        value is returned.
+
+        Parameters
+        ----------
+        skipna : boolean, default True
+            Exclude NA/null values. If the entire Series is NA, the result
+            will be NA.
+        axis : int, default 0
+            For compatibility with DataFrame.idxmin. Redundant for application
+            on Series.
+        *args, **kwargs
+            Additional keywors have no effect but might be accepted
+            for compatibility with NumPy.
+
+        Returns
+        -------
+        idxmin : Index of minimum of values.
+
+        Raises
+        ------
+        ValueError
+            If the Series is empty.
+
+        Notes
+        -----
+        This method is the Series version of ``ndarray.argmin``. This method
+        returns the label of the minimum, while ``ndarray.argmin`` returns
+        the position. To get the position, use ``series.values.argmin()``.
+
+        See Also
+        --------
+        numpy.argmin : Return indices of the minimum values
+            along the given axis.
+        DataFrame.idxmin : Return index of first occurrence of minimum
+            over requested axis.
+        Series.idxmax : Return index *label* of the first occurrence
+            of maximum of values.
+
+        Examples
+        --------
+        >>> s = pd.Series(data=[1, None, 4, 1],
+        ...               index=['A' ,'B' ,'C' ,'D'])
+        >>> s
+        A    1.0
+        B    NaN
+        C    4.0
+        D    1.0
+        dtype: float64
+
+        >>> s.idxmin()
+        'A'
+
+        If `skipna` is False and there is an NA value in the data,
+        the function returns ``nan``.
+
+        >>> s.idxmin(skipna=False)
+        nan
+        """
+        skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
+        i = nanops.nanargmin(com._values_from_object(self), skipna=skipna)
+        if i == -1:
+            return np.nan
+        return self.index[i]
+
+    def idxmax(self, axis=0, skipna=True, *args, **kwargs):
+        """
+        Return the row label of the maximum value.
+
+        If multiple values equal the maximum, the first row label with that
+        value is returned.
+
+        Parameters
+        ----------
+        skipna : boolean, default True
+            Exclude NA/null values. If the entire Series is NA, the result
+            will be NA.
+        axis : int, default 0
+            For compatibility with DataFrame.idxmax. Redundant for application
+            on Series.
+        *args, **kwargs
+            Additional keywors have no effect but might be accepted
+            for compatibility with NumPy.
+
+        Returns
+        -------
+        idxmax : Index of maximum of values.
+
+        Raises
+        ------
+        ValueError
+            If the Series is empty.
+
+        Notes
+        -----
+        This method is the Series version of ``ndarray.argmax``. This method
+        returns the label of the maximum, while ``ndarray.argmax`` returns
+        the position. To get the position, use ``series.values.argmax()``.
+
+        See Also
+        --------
+        numpy.argmax : Return indices of the maximum values
+            along the given axis.
+        DataFrame.idxmax : Return index of first occurrence of maximum
+            over requested axis.
+        Series.idxmin : Return index *label* of the first occurrence
+            of minimum of values.
+
+        Examples
+        --------
+        >>> s = pd.Series(data=[1, None, 4, 3, 4],
+        ...               index=['A', 'B', 'C', 'D', 'E'])
+        >>> s
+        A    1.0
+        B    NaN
+        C    4.0
+        D    3.0
+        E    4.0
+        dtype: float64
+
+        >>> s.idxmax()
+        'C'
+
+        If `skipna` is False and there is an NA value in the data,
+        the function returns ``nan``.
+
+        >>> s.idxmax(skipna=False)
+        nan
+        """
+        skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
+        i = nanops.nanargmax(com._values_from_object(self), skipna=skipna)
+        if i == -1:
+            return np.nan
+        return self.index[i]
+
+    # ndarray compat
+    argmin = deprecate(
+        'argmin', idxmin, '0.21.0',
+        msg=dedent("""\
+        'argmin' is deprecated, use 'idxmin' instead. The behavior of 'argmin'
+        will be corrected to return the positional minimum in the future.
+        Use 'series.values.argmin' to get the position of the minimum now.""")
+    )
+    argmax = deprecate(
+        'argmax', idxmax, '0.21.0',
+        msg=dedent("""\
+        'argmax' is deprecated, use 'idxmax' instead. The behavior of 'argmax'
+        will be corrected to return the positional maximum in the future.
+        Use 'series.values.argmax' to get the position of the maximum now.""")
+    )
+
+    def round(self, decimals=0, *args, **kwargs):
+        """
+        Round each value in a Series to the given number of decimals.
+
+        Parameters
+        ----------
+        decimals : int
+            Number of decimal places to round to (default: 0).
+            If decimals is negative, it specifies the number of
+            positions to the left of the decimal point.
+
+        Returns
+        -------
+        Series object
+
+        See Also
+        --------
+        numpy.around
+        DataFrame.round
+
+        """
+        nv.validate_round(args, kwargs)
+        result = com._values_from_object(self).round(decimals)
+        result = self._constructor(result, index=self.index).__finalize__(self)
+
+        return result
+
+    def quantile(self, q=0.5, interpolation='linear'):
+        """
+        Return value at the given quantile, a la numpy.percentile.
+
+        Parameters
+        ----------
+        q : float or array-like, default 0.5 (50% quantile)
+            0 <= q <= 1, the quantile(s) to compute
+        interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
+            .. versionadded:: 0.18.0
+
+            This optional parameter specifies the interpolation method to use,
+            when the desired quantile lies between two data points `i` and `j`:
+
+                * linear: `i + (j - i) * fraction`, where `fraction` is the
+                  fractional part of the index surrounded by `i` and `j`.
+                * lower: `i`.
+                * higher: `j`.
+                * nearest: `i` or `j` whichever is nearest.
+                * midpoint: (`i` + `j`) / 2.
+
+        Returns
+        -------
+        quantile : float or Series
+            if ``q`` is an array, a Series will be returned where the
+            index is ``q`` and the values are the quantiles.
+
+        Examples
+        --------
+        >>> s = Series([1, 2, 3, 4])
+        >>> s.quantile(.5)
+        2.5
+        >>> s.quantile([.25, .5, .75])
+        0.25    1.75
+        0.50    2.50
+        0.75    3.25
+        dtype: float64
+
+        See Also
+        --------
+        pandas.core.window.Rolling.quantile
+        """
+
+        self._check_percentile(q)
+
+        result = self._data.quantile(qs=q, interpolation=interpolation)
+
+        if is_list_like(q):
+            return self._constructor(result,
+                                     index=Float64Index(q),
+                                     name=self.name)
+        else:
+            # scalar
+            return result
+
+    def corr(self, other, method='pearson', min_periods=None):
+        """
+        Compute correlation with `other` Series, excluding missing values
+
+        Parameters
+        ----------
+        other : Series
+        method : {'pearson', 'kendall', 'spearman'}
+            * pearson : standard correlation coefficient
+            * kendall : Kendall Tau correlation coefficient
+            * spearman : Spearman rank correlation
+        min_periods : int, optional
+            Minimum number of observations needed to have a valid result
+
+
+        Returns
+        -------
+        correlation : float
+        """
+        this, other = self.align(other, join='inner', copy=False)
+        if len(this) == 0:
+            return np.nan
+        return nanops.nancorr(this.values, other.values, method=method,
+                              min_periods=min_periods)
+
+    def cov(self, other, min_periods=None):
+        """
+        Compute covariance with Series, excluding missing values
+
+        Parameters
+        ----------
+        other : Series
+        min_periods : int, optional
+            Minimum number of observations needed to have a valid result
+
+        Returns
+        -------
+        covariance : float
+
+        Normalized by N-1 (unbiased estimator).
+        """
+        this, other = self.align(other, join='inner', copy=False)
+        if len(this) == 0:
+            return np.nan
+        return nanops.nancov(this.values, other.values,
+                             min_periods=min_periods)
+
+    def diff(self, periods=1):
+        """
+        First discrete difference of element.
+
+        Calculates the difference of a Series element compared with another
+        element in the Series (default is element in previous row).
+
+        Parameters
+        ----------
+        periods : int, default 1
+            Periods to shift for calculating difference, accepts negative
+            values.
+
+        Returns
+        -------
+        diffed : Series
+
+        See Also
+        --------
+        Series.pct_change: Percent change over given number of periods.
+        Series.shift: Shift index by desired number of periods with an
+            optional time freq.
+        DataFrame.diff: First discrete difference of object
+
+        Examples
+        --------
+        Difference with previous row
+
+        >>> s = pd.Series([1, 1, 2, 3, 5, 8])
+        >>> s.diff()
+        0    NaN
+        1    0.0
+        2    1.0
+        3    1.0
+        4    2.0
+        5    3.0
+        dtype: float64
+
+        Difference with 3rd previous row
+
+        >>> s.diff(periods=3)
+        0    NaN
+        1    NaN
+        2    NaN
+        3    2.0
+        4    4.0
+        5    6.0
+        dtype: float64
+
+        Difference with following row
+
+        >>> s.diff(periods=-1)
+        0    0.0
+        1   -1.0
+        2   -1.0
+        3   -2.0
+        4   -3.0
+        5    NaN
+        dtype: float64
+        """
+        result = algorithms.diff(com._values_from_object(self), periods)
+        return self._constructor(result, index=self.index).__finalize__(self)
+
+    def autocorr(self, lag=1):
+        """
+        Lag-N autocorrelation
+
+        Parameters
+        ----------
+        lag : int, default 1
+            Number of lags to apply before performing autocorrelation.
+
+        Returns
+        -------
+        autocorr : float
+        """
+        return self.corr(self.shift(lag))
+
+    def dot(self, other):
+        """
+        Matrix multiplication with DataFrame or inner-product with Series
+        objects. Can also be called using `self @ other` in Python >= 3.5.
+
+        Parameters
+        ----------
+        other : Series or DataFrame
+
+        Returns
+        -------
+        dot_product : scalar or Series
+        """
+        from pandas.core.frame import DataFrame
+        if isinstance(other, (Series, DataFrame)):
+            common = self.index.union(other.index)
+            if (len(common) > len(self.index) or
+                    len(common) > len(other.index)):
+                raise ValueError('matrices are not aligned')
+
+            left = self.reindex(index=common, copy=False)
+            right = other.reindex(index=common, copy=False)
+            lvals = left.values
+            rvals = right.values
+        else:
+            left = self
+            lvals = self.values
+            rvals = np.asarray(other)
+            if lvals.shape[0] != rvals.shape[0]:
+                raise Exception('Dot product shape mismatch, %s vs %s' %
+                                (lvals.shape, rvals.shape))
+
+        if isinstance(other, DataFrame):
+            return self._constructor(np.dot(lvals, rvals),
+                                     index=other.columns).__finalize__(self)
+        elif isinstance(other, Series):
+            return np.dot(lvals, rvals)
+        elif isinstance(rvals, np.ndarray):
+            return np.dot(lvals, rvals)
+        else:  # pragma: no cover
+            raise TypeError('unsupported type: %s' % type(other))
+
+    def __matmul__(self, other):
+        """ Matrix multiplication using binary `@` operator in Python>=3.5 """
+        return self.dot(other)
+
+    def __rmatmul__(self, other):
+        """ Matrix multiplication using binary `@` operator in Python>=3.5 """
+        return self.dot(other)
+
+    @Substitution(klass='Series')
+    @Appender(base._shared_docs['searchsorted'])
+    @deprecate_kwarg(old_arg_name='v', new_arg_name='value')
+    def searchsorted(self, value, side='left', sorter=None):
+        if sorter is not None:
+            sorter = _ensure_platform_int(sorter)
+        return self._values.searchsorted(Series(value)._values,
+                                         side=side, sorter=sorter)
+
+    # -------------------------------------------------------------------
+    # Combination
+
+    def append(self, to_append, ignore_index=False, verify_integrity=False):
+        """
+        Concatenate two or more Series.
+
+        Parameters
+        ----------
+        to_append : Series or list/tuple of Series
+        ignore_index : boolean, default False
+            If True, do not use the index labels.
+
+            .. versionadded:: 0.19.0
+
+        verify_integrity : boolean, default False
+            If True, raise Exception on creating index with duplicates
+
+        Notes
+        -----
+        Iteratively appending to a Series can be more computationally intensive
+        than a single concatenate. A better solution is to append values to a
+        list and then concatenate the list with the original Series all at
+        once.
+
+        See also
+        --------
+        pandas.concat : General function to concatenate DataFrame, Series
+            or Panel objects
+
+        Returns
+        -------
+        appended : Series
+
+        Examples
+        --------
+        >>> s1 = pd.Series([1, 2, 3])
+        >>> s2 = pd.Series([4, 5, 6])
+        >>> s3 = pd.Series([4, 5, 6], index=[3,4,5])
+        >>> s1.append(s2)
+        0    1
+        1    2
+        2    3
+        0    4
+        1    5
+        2    6
+        dtype: int64
+
+        >>> s1.append(s3)
+        0    1
+        1    2
+        2    3
+        3    4
+        4    5
+        5    6
+        dtype: int64
+
+        With `ignore_index` set to True:
+
+        >>> s1.append(s2, ignore_index=True)
+        0    1
+        1    2
+        2    3
+        3    4
+        4    5
+        5    6
+        dtype: int64
+
+        With `verify_integrity` set to True:
+
+        >>> s1.append(s2, verify_integrity=True)
+        Traceback (most recent call last):
+        ...
+        ValueError: Indexes have overlapping values: [0, 1, 2]
+
+
+        """
+        from pandas.core.reshape.concat import concat
+
+        if isinstance(to_append, (list, tuple)):
+            to_concat = [self] + to_append
+        else:
+            to_concat = [self, to_append]
+        return concat(to_concat, ignore_index=ignore_index,
+                      verify_integrity=verify_integrity)
+
+    def _binop(self, other, func, level=None, fill_value=None):
+        """
+        Perform generic binary operation with optional fill value
+
+        Parameters
+        ----------
+        other : Series
+        func : binary operator
+        fill_value : float or object
+            Value to substitute for NA/null values. If both Series are NA in a
+            location, the result will be NA regardless of the passed fill value
+        level : int or level name, default None
+            Broadcast across a level, matching Index values on the
+            passed MultiIndex level
+
+        Returns
+        -------
+        combined : Series
+        """
+        if not isinstance(other, Series):
+            raise AssertionError('Other operand must be Series')
+
+        new_index = self.index
+        this = self
+
+        if not self.index.equals(other.index):
+            this, other = self.align(other, level=level, join='outer',
+                                     copy=False)
+            new_index = this.index
+
+        this_vals, other_vals = ops.fill_binop(this.values, other.values,
+                                               fill_value)
+
+        with np.errstate(all='ignore'):
+            result = func(this_vals, other_vals)
+        name = ops.get_op_result_name(self, other)
+        result = self._constructor(result, index=new_index, name=name)
+        result = result.__finalize__(self)
+        if name is None:
+            # When name is None, __finalize__ overwrites current name
+            result.name = None
+        return result
+
+    def combine(self, other, func, fill_value=np.nan):
+        """
+        Perform elementwise binary operation on two Series using given function
+        with optional fill value when an index is missing from one Series or
+        the other
+
+        Parameters
+        ----------
+        other : Series or scalar value
+        func : function
+            Function that takes two scalars as inputs and return a scalar
+        fill_value : scalar value
+
+        Returns
+        -------
+        result : Series
+
+        Examples
+        --------
+        >>> s1 = Series([1, 2])
+        >>> s2 = Series([0, 3])
+        >>> s1.combine(s2, lambda x1, x2: x1 if x1 < x2 else x2)
+        0    0
+        1    2
+        dtype: int64
+
+        See Also
+        --------
+        Series.combine_first : Combine Series values, choosing the calling
+            Series's values first
+        """
+        if isinstance(other, Series):
+            new_index = self.index.union(other.index)
+            new_name = ops.get_op_result_name(self, other)
+            new_values = np.empty(len(new_index), dtype=self.dtype)
+            for i, idx in enumerate(new_index):
+                lv = self.get(idx, fill_value)
+                rv = other.get(idx, fill_value)
+                with np.errstate(all='ignore'):
+                    new_values[i] = func(lv, rv)
+        else:
+            new_index = self.index
+            with np.errstate(all='ignore'):
+                new_values = func(self._values, other)
+            new_name = self.name
+        return self._constructor(new_values, index=new_index, name=new_name)
+
+    def combine_first(self, other):
+        """
+        Combine Series values, choosing the calling Series's values
+        first. Result index will be the union of the two indexes
+
+        Parameters
+        ----------
+        other : Series
+
+        Returns
+        -------
+        combined : Series
+
+        Examples
+        --------
+        >>> s1 = pd.Series([1, np.nan])
+        >>> s2 = pd.Series([3, 4])
+        >>> s1.combine_first(s2)
+        0    1.0
+        1    4.0
+        dtype: float64
+
+        See Also
+        --------
+        Series.combine : Perform elementwise operation on two Series
+            using a given function
+        """
+        new_index = self.index.union(other.index)
+        this = self.reindex(new_index, copy=False)
+        other = other.reindex(new_index, copy=False)
+        # TODO: do we need name?
+        name = ops.get_op_result_name(self, other)  # noqa
+        rs_vals = com._where_compat(isna(this), other._values, this._values)
+        return self._constructor(rs_vals, index=new_index).__finalize__(self)
+
+    def update(self, other):
+        """
+        Modify Series in place using non-NA values from passed
+        Series. Aligns on index
+
+        Parameters
+        ----------
+        other : Series
+
+        Examples
+        --------
+        >>> s = pd.Series([1, 2, 3])
+        >>> s.update(pd.Series([4, 5, 6]))
+        >>> s
+        0    4
+        1    5
+        2    6
+        dtype: int64
+
+        >>> s = pd.Series(['a', 'b', 'c'])
+        >>> s.update(pd.Series(['d', 'e'], index=[0, 2]))
+        >>> s
+        0    d
+        1    b
+        2    e
+        dtype: object
+
+        >>> s = pd.Series([1, 2, 3])
+        >>> s.update(pd.Series([4, 5, 6, 7, 8]))
+        >>> s
+        0    4
+        1    5
+        2    6
+        dtype: int64
+
+        If ``other`` contains NaNs the corresponding values are not updated
+        in the original Series.
+
+        >>> s = pd.Series([1, 2, 3])
+        >>> s.update(pd.Series([4, np.nan, 6]))
+        >>> s
+        0    4
+        1    2
+        2    6
+        dtype: int64
+
+        """
+        other = other.reindex_like(self)
+        mask = notna(other)
+
+        self._data = self._data.putmask(mask=mask, new=other, inplace=True)
+        self._maybe_update_cacher()
+
+    # ----------------------------------------------------------------------
+    # Reindexing, sorting
+
+    def sort_values(self, axis=0, ascending=True, inplace=False,
+                    kind='quicksort', na_position='last'):
+        """
+        Sort by the values.
+
+        Sort a Series in ascending or descending order by some
+        criterion.
+
+        Parameters
+        ----------
+        axis : {0 or 'index'}, default 0
+            Axis to direct sorting. The value 'index' is accepted for
+            compatibility with DataFrame.sort_values.
+        ascending : bool, default True
+            If True, sort values in ascending order, otherwise descending.
+        inplace : bool, default False
+            If True, perform operation in-place.
+        kind : {'quicksort', 'mergesort' or 'heapsort'}, default 'quicksort'
+            Choice of sorting algorithm. See also :func:`numpy.sort` for more
+            information. 'mergesort' is the only stable  algorithm.
+        na_position : {'first' or 'last'}, default 'last'
+            Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
+            the end.
+
+        Returns
+        -------
+        Series
+            Series ordered by values.
+
+        See Also
+        --------
+        Series.sort_index : Sort by the Series indices.
+        DataFrame.sort_values : Sort DataFrame by the values along either axis.
+        DataFrame.sort_index : Sort DataFrame by indices.
+
+        Examples
+        --------
+        >>> s = pd.Series([np.nan, 1, 3, 10, 5])
+        >>> s
+        0     NaN
+        1     1.0
+        2     3.0
+        3     10.0
+        4     5.0
+        dtype: float64
+
+        Sort values ascending order (default behaviour)
+
+        >>> s.sort_values(ascending=True)
+        1     1.0
+        2     3.0
+        4     5.0
+        3    10.0
+        0     NaN
+        dtype: float64
+
+        Sort values descending order
+
+        >>> s.sort_values(ascending=False)
+        3    10.0
+        4     5.0
+        2     3.0
+        1     1.0
+        0     NaN
+        dtype: float64
+
+        Sort values inplace
+
+        >>> s.sort_values(ascending=False, inplace=True)
+        >>> s
+        3    10.0
+        4     5.0
+        2     3.0
+        1     1.0
+        0     NaN
+        dtype: float64
+
+        Sort values putting NAs first
+
+        >>> s.sort_values(na_position='first')
+        0     NaN
+        1     1.0
+        2     3.0
+        4     5.0
+        3    10.0
+        dtype: float64
+
+        Sort a series of strings
+
+        >>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])
+        >>> s
+        0    z
+        1    b
+        2    d
+        3    a
+        4    c
+        dtype: object
+
+        >>> s.sort_values()
+        3    a
+        1    b
+        4    c
+        2    d
+        0    z
+        dtype: object
+        """
+        inplace = validate_bool_kwarg(inplace, 'inplace')
+        axis = self._get_axis_number(axis)
+
+        # GH 5856/5853
+        if inplace and self._is_cached:
+            raise ValueError("This Series is a view of some other array, to "
+                             "sort in-place you must create a copy")
+
+        def _try_kind_sort(arr):
+            # easier to ask forgiveness than permission
+            try:
+                # if kind==mergesort, it can fail for object dtype
+                return arr.argsort(kind=kind)
+            except TypeError:
+                # stable sort not available for object dtype
+                # uses the argsort default quicksort
+                return arr.argsort(kind='quicksort')
+
+        arr = self._values
+        sortedIdx = np.empty(len(self), dtype=np.int32)
+
+        bad = isna(arr)
+
+        good = ~bad
+        idx = com._default_index(len(self))
+
+        argsorted = _try_kind_sort(arr[good])
+
+        if is_list_like(ascending):
+            if len(ascending) != 1:
+                raise ValueError('Length of ascending (%d) must be 1 '
+                                 'for Series' % (len(ascending)))
+            ascending = ascending[0]
+
+        if not is_bool(ascending):
+            raise ValueError('ascending must be boolean')
+
+        if not ascending:
+            argsorted = argsorted[::-1]
+
+        if na_position == 'last':
+            n = good.sum()
+            sortedIdx[:n] = idx[good][argsorted]
+            sortedIdx[n:] = idx[bad]
+        elif na_position == 'first':
+            n = bad.sum()
+            sortedIdx[n:] = idx[good][argsorted]
+            sortedIdx[:n] = idx[bad]
+        else:
+            raise ValueError('invalid na_position: {!r}'.format(na_position))
+
+        result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
+
+        if inplace:
+            self._update_inplace(result)
+        else:
+            return result.__finalize__(self)
+
+    def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
+                   kind='quicksort', na_position='last', sort_remaining=True):
+        """
+        Sort Series by index labels.
+
+        Returns a new Series sorted by label if `inplace` argument is
+        ``False``, otherwise updates the original series and returns None.
+
+        Parameters
+        ----------
+        axis : int, default 0
+            Axis to direct sorting. This can only be 0 for Series.
+        level : int, optional
+            If not None, sort on values in specified index level(s).
+        ascending : bool, default true
+            Sort ascending vs. descending.
+        inplace : bool, default False
+            If True, perform operation in-place.
+        kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
+            Choice of sorting algorithm. See also :func:`numpy.sort` for more
+            information.  'mergesort' is the only stable algorithm. For
+            DataFrames, this option is only applied when sorting on a single
+            column or label.
+        na_position : {'first', 'last'}, default 'last'
+            If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end.
+            Not implemented for MultiIndex.
+        sort_remaining : bool, default True
+            If true and sorting by level and index is multilevel, sort by other
+            levels too (in order) after sorting by specified level.
+
+        Returns
+        -------
+        pandas.Series
+            The original Series sorted by the labels
+
+        See Also
+        --------
+        DataFrame.sort_index: Sort DataFrame by the index
+        DataFrame.sort_values: Sort DataFrame by the value
+        Series.sort_values : Sort Series by the value
+
+        Examples
+        --------
+        >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4])
+        >>> s.sort_index()
+        1    c
+        2    b
+        3    a
+        4    d
+        dtype: object
+
+        Sort Descending
+
+        >>> s.sort_index(ascending=False)
+        4    d
+        3    a
+        2    b
+        1    c
+        dtype: object
+
+        Sort Inplace
+
+        >>> s.sort_index(inplace=True)
+        >>> s
+        1    c
+        2    b
+        3    a
+        4    d
+        dtype: object
+
+        By default NaNs are put at the end, but use `na_position` to place
+        them at the beginning
+
+        >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan])
+        >>> s.sort_index(na_position='first')
+        NaN     d
+         1.0    c
+         2.0    b
+         3.0    a
+        dtype: object
+
+        Specify index level to sort
+
+        >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo',
+        ...                     'baz', 'baz', 'bar', 'bar']),
+        ...           np.array(['two', 'one', 'two', 'one',
+        ...                     'two', 'one', 'two', 'one'])]
+        >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays)
+        >>> s.sort_index(level=1)
+        bar  one    8
+        baz  one    6
+        foo  one    4
+        qux  one    2
+        bar  two    7
+        baz  two    5
+        foo  two    3
+        qux  two    1
+        dtype: int64
+
+        Does not sort by remaining levels when sorting by levels
+
+        >>> s.sort_index(level=1, sort_remaining=False)
+        qux  one    2
+        foo  one    4
+        baz  one    6
+        bar  one    8
+        qux  two    1
+        foo  two    3
+        baz  two    5
+        bar  two    7
+        dtype: int64
+        """
+        # TODO: this can be combined with DataFrame.sort_index impl as
+        # almost identical
+        inplace = validate_bool_kwarg(inplace, 'inplace')
+        axis = self._get_axis_number(axis)
+        index = self.index
+
+        if level is not None:
+            new_index, indexer = index.sortlevel(level, ascending=ascending,
+                                                 sort_remaining=sort_remaining)
+        elif isinstance(index, MultiIndex):
+            from pandas.core.sorting import lexsort_indexer
+            labels = index._sort_levels_monotonic()
+            indexer = lexsort_indexer(labels._get_labels_for_sorting(),
+                                      orders=ascending,
+                                      na_position=na_position)
+        else:
+            from pandas.core.sorting import nargsort
+
+            # Check monotonic-ness before sort an index
+            # GH11080
+            if ((ascending and index.is_monotonic_increasing) or
+                    (not ascending and index.is_monotonic_decreasing)):
+                if inplace:
+                    return
+                else:
+                    return self.copy()
+
+            indexer = nargsort(index, kind=kind, ascending=ascending,
+                               na_position=na_position)
+
+        indexer = _ensure_platform_int(indexer)
+        new_index = index.take(indexer)
+        new_index = new_index._sort_levels_monotonic()
+
+        new_values = self._values.take(indexer)
+        result = self._constructor(new_values, index=new_index)
+
+        if inplace:
+            self._update_inplace(result)
+        else:
+            return result.__finalize__(self)
+
+    def argsort(self, axis=0, kind='quicksort', order=None):
+        """
+        Overrides ndarray.argsort. Argsorts the value, omitting NA/null values,
+        and places the result in the same locations as the non-NA values
+
+        Parameters
+        ----------
+        axis : int (can only be zero)
+        kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
+            Choice of sorting algorithm. See np.sort for more
+            information. 'mergesort' is the only stable algorithm
+        order : ignored
+
+        Returns
+        -------
+        argsorted : Series, with -1 indicated where nan values are present
+
+        See also
+        --------
+        numpy.ndarray.argsort
+        """
+        values = self._values
+        mask = isna(values)
+
+        if mask.any():
+            result = Series(-1, index=self.index, name=self.name,
+                            dtype='int64')
+            notmask = ~mask
+            result[notmask] = np.argsort(values[notmask], kind=kind)
+            return self._constructor(result,
+                                     index=self.index).__finalize__(self)
+        else:
+            return self._constructor(
+                np.argsort(values, kind=kind), index=self.index,
+                dtype='int64').__finalize__(self)
+
+    def nlargest(self, n=5, keep='first'):
+        """
+        Return the largest `n` elements.
+
+        Parameters
+        ----------
+        n : int
+            Return this many descending sorted values
+        keep : {'first', 'last'}, default 'first'
+            Where there are duplicate values:
+            - ``first`` : take the first occurrence.
+            - ``last`` : take the last occurrence.
+
+        Returns
+        -------
+        top_n : Series
+            The n largest values in the Series, in sorted order
+
+        Notes
+        -----
+        Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
+        relative to the size of the ``Series`` object.
+
+        See Also
+        --------
+        Series.nsmallest
+
+        Examples
+        --------
+        >>> import pandas as pd
+        >>> import numpy as np
+        >>> s = pd.Series(np.random.randn(10**6))
+        >>> s.nlargest(10)  # only sorts up to the N requested
+        219921    4.644710
+        82124     4.608745
+        421689    4.564644
+        425277    4.447014
+        718691    4.414137
+        43154     4.403520
+        283187    4.313922
+        595519    4.273635
+        503969    4.250236
+        121637    4.240952
+        dtype: float64
+        """
+        return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
+
+    def nsmallest(self, n=5, keep='first'):
+        """
+        Return the smallest `n` elements.
+
+        Parameters
+        ----------
+        n : int
+            Return this many ascending sorted values
+        keep : {'first', 'last'}, default 'first'
+            Where there are duplicate values:
+            - ``first`` : take the first occurrence.
+            - ``last`` : take the last occurrence.
+
+        Returns
+        -------
+        bottom_n : Series
+            The n smallest values in the Series, in sorted order
+
+        Notes
+        -----
+        Faster than ``.sort_values().head(n)`` for small `n` relative to
+        the size of the ``Series`` object.
+
+        See Also
+        --------
+        Series.nlargest
+
+        Examples
+        --------
+        >>> import pandas as pd
+        >>> import numpy as np
+        >>> s = pd.Series(np.random.randn(10**6))
+        >>> s.nsmallest(10)  # only sorts up to the N requested
+        288532   -4.954580
+        732345   -4.835960
+        64803    -4.812550
+        446457   -4.609998
+        501225   -4.483945
+        669476   -4.472935
+        973615   -4.401699
+        621279   -4.355126
+        773916   -4.347355
+        359919   -4.331927
+        dtype: float64
+        """
+        return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
+
+    def sortlevel(self, level=0, ascending=True, sort_remaining=True):
+        """Sort Series with MultiIndex by chosen level. Data will be
+        lexicographically sorted by the chosen level followed by the other
+        levels (in order),
+
+        .. deprecated:: 0.20.0
+            Use :meth:`Series.sort_index`
+
+        Parameters
+        ----------
+        level : int or level name, default None
+        ascending : bool, default True
+
+        Returns
+        -------
+        sorted : Series
+
+        See Also
+        --------
+        Series.sort_index(level=...)
+
+        """
+        warnings.warn("sortlevel is deprecated, use sort_index(level=...)",
+                      FutureWarning, stacklevel=2)
+        return self.sort_index(level=level, ascending=ascending,
+                               sort_remaining=sort_remaining)
+
+    def swaplevel(self, i=-2, j=-1, copy=True):
+        """
+        Swap levels i and j in a MultiIndex
+
+        Parameters
+        ----------
+        i, j : int, string (can be mixed)
+            Level of index to be swapped. Can pass level name as string.
+
+        Returns
+        -------
+        swapped : Series
+
+        .. versionchanged:: 0.18.1
+
+           The indexes ``i`` and ``j`` are now optional, and default to
+           the two innermost levels of the index.
+
+        """
+        new_index = self.index.swaplevel(i, j)
+        return self._constructor(self._values, index=new_index,
+                                 copy=copy).__finalize__(self)
+
+    def reorder_levels(self, order):
+        """
+        Rearrange index levels using input order. May not drop or duplicate
+        levels
+
+        Parameters
+        ----------
+        order : list of int representing new level order.
+               (reference level by number or key)
+        axis : where to reorder levels
+
+        Returns
+        -------
+        type of caller (new object)
+        """
+        if not isinstance(self.index, MultiIndex):  # pragma: no cover
+            raise Exception('Can only reorder levels on a hierarchical axis.')
+
+        result = self.copy()
+        result.index = result.index.reorder_levels(order)
+        return result
+
+    def unstack(self, level=-1, fill_value=None):
+        """
+        Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
+        The level involved will automatically get sorted.
+
+        Parameters
+        ----------
+        level : int, string, or list of these, default last level
+            Level(s) to unstack, can pass level name
+        fill_value : replace NaN with this value if the unstack produces
+            missing values
+
+            .. versionadded:: 0.18.0
+
+        Examples
+        --------
+        >>> s = pd.Series([1, 2, 3, 4],
+        ...     index=pd.MultiIndex.from_product([['one', 'two'], ['a', 'b']]))
+        >>> s
+        one  a    1
+             b    2
+        two  a    3
+             b    4
+        dtype: int64
+
+        >>> s.unstack(level=-1)
+             a  b
+        one  1  2
+        two  3  4
+
+        >>> s.unstack(level=0)
+           one  two
+        a    1    3
+        b    2    4
+
+        Returns
+        -------
+        unstacked : DataFrame
+        """
+        from pandas.core.reshape.reshape import unstack
+        return unstack(self, level, fill_value)
+
+    # ----------------------------------------------------------------------
+    # function application
+
+    def map(self, arg, na_action=None):
+        """
+        Map values of Series using input correspondence (a dict, Series, or
+        function).
+
+        Parameters
+        ----------
+        arg : function, dict, or Series
+            Mapping correspondence.
+        na_action : {None, 'ignore'}
+            If 'ignore', propagate NA values, without passing them to the
+            mapping correspondence.
+
+        Returns
+        -------
+        y : Series
+            Same index as caller.
+
+        Examples
+        --------
+
+        Map inputs to outputs (both of type `Series`):
+
+        >>> x = pd.Series([1,2,3], index=['one', 'two', 'three'])
+        >>> x
+        one      1
+        two      2
+        three    3
+        dtype: int64
+
+        >>> y = pd.Series(['foo', 'bar', 'baz'], index=[1,2,3])
+        >>> y
+        1    foo
+        2    bar
+        3    baz
+
+        >>> x.map(y)
+        one   foo
+        two   bar
+        three baz
+
+        If `arg` is a dictionary, return a new Series with values converted
+        according to the dictionary's mapping:
+
+        >>> z = {1: 'A', 2: 'B', 3: 'C'}
+
+        >>> x.map(z)
+        one   A
+        two   B
+        three C
+
+        Use na_action to control whether NA values are affected by the mapping
+        function.
+
+        >>> s = pd.Series([1, 2, 3, np.nan])
+
+        >>> s2 = s.map('this is a string {}'.format, na_action=None)
+        0    this is a string 1.0
+        1    this is a string 2.0
+        2    this is a string 3.0
+        3    this is a string nan
+        dtype: object
+
+        >>> s3 = s.map('this is a string {}'.format, na_action='ignore')
+        0    this is a string 1.0
+        1    this is a string 2.0
+        2    this is a string 3.0
+        3                     NaN
+        dtype: object
+
+        See Also
+        --------
+        Series.apply : For applying more complex functions on a Series.
+        DataFrame.apply : Apply a function row-/column-wise.
+        DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
+
+        Notes
+        -----
+        When `arg` is a dictionary, values in Series that are not in the
+        dictionary (as keys) are converted to ``NaN``. However, if the
+        dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
+        provides a method for default values), then this default is used
+        rather than ``NaN``:
+
+        >>> from collections import Counter
+        >>> counter = Counter()
+        >>> counter['bar'] += 1
+        >>> y.map(counter)
+        1    0
+        2    1
+        3    0
+        dtype: int64
+        """
+        new_values = super(Series, self)._map_values(
+            arg, na_action=na_action)
+        return self._constructor(new_values,
+                                 index=self.index).__finalize__(self)
+
+    def _gotitem(self, key, ndim, subset=None):
+        """
+        sub-classes to define
+        return a sliced object
+
+        Parameters
+        ----------
+        key : string / list of selections
+        ndim : 1,2
+            requested ndim of result
+        subset : object, default None
+            subset to act on
+        """
+        return self
+
+    _agg_doc = dedent("""
+    Examples
+    --------
+
+    >>> s = Series(np.random.randn(10))
+
+    >>> s.agg('min')
+    -1.3018049988556679
+
+    >>> s.agg(['min', 'max'])
+    min   -1.301805
+    max    1.127688
+    dtype: float64
+
+    See also
+    --------
+    pandas.Series.apply
+    pandas.Series.transform
+
+    """)
+
+    @Appender(_agg_doc)
+    @Appender(generic._shared_docs['aggregate'] % dict(
+        versionadded='.. versionadded:: 0.20.0',
+        **_shared_doc_kwargs))
+    def aggregate(self, func, axis=0, *args, **kwargs):
+        axis = self._get_axis_number(axis)
+        result, how = self._aggregate(func, *args, **kwargs)
+        if result is None:
+
+            # we can be called from an inner function which
+            # passes this meta-data
+            kwargs.pop('_axis', None)
+            kwargs.pop('_level', None)
+
+            # try a regular apply, this evaluates lambdas
+            # row-by-row; however if the lambda is expected a Series
+            # expression, e.g.: lambda x: x-x.quantile(0.25)
+            # this will fail, so we can try a vectorized evaluation
+
+            # we cannot FIRST try the vectorized evaluation, because
+            # then .agg and .apply would have different semantics if the
+            # operation is actually defined on the Series, e.g. str
+            try:
+                result = self.apply(func, *args, **kwargs)
+            except (ValueError, AttributeError, TypeError):
+                result = func(self, *args, **kwargs)
+
+        return result
+
+    agg = aggregate
+
+    def apply(self, func, convert_dtype=True, args=(), **kwds):
+        """
+        Invoke function on values of Series. Can be ufunc (a NumPy function
+        that applies to the entire Series) or a Python function that only works
+        on single values
+
+        Parameters
+        ----------
+        func : function
+        convert_dtype : boolean, default True
+            Try to find better dtype for elementwise function results. If
+            False, leave as dtype=object
+        args : tuple
+            Positional arguments to pass to function in addition to the value
+        Additional keyword arguments will be passed as keywords to the function
+
+        Returns
+        -------
+        y : Series or DataFrame if func returns a Series
+
+        See also
+        --------
+        Series.map: For element-wise operations
+        Series.agg: only perform aggregating type operations
+        Series.transform: only perform transformating type operations
+
+        Examples
+        --------
+
+        Create a series with typical summer temperatures for each city.
+
+        >>> import pandas as pd
+        >>> import numpy as np
+        >>> series = pd.Series([20, 21, 12], index=['London',
+        ... 'New York','Helsinki'])
+        >>> series
+        London      20
+        New York    21
+        Helsinki    12
+        dtype: int64
+
+        Square the values by defining a function and passing it as an
+        argument to ``apply()``.
+
+        >>> def square(x):
+        ...     return x**2
+        >>> series.apply(square)
+        London      400
+        New York    441
+        Helsinki    144
+        dtype: int64
+
+        Square the values by passing an anonymous function as an
+        argument to ``apply()``.
+
+        >>> series.apply(lambda x: x**2)
+        London      400
+        New York    441
+        Helsinki    144
+        dtype: int64
+
+        Define a custom function that needs additional positional
+        arguments and pass these additional arguments using the
+        ``args`` keyword.
+
+        >>> def subtract_custom_value(x, custom_value):
+        ...     return x-custom_value
+
+        >>> series.apply(subtract_custom_value, args=(5,))
+        London      15
+        New York    16
+        Helsinki     7
+        dtype: int64
+
+        Define a custom function that takes keyword arguments
+        and pass these arguments to ``apply``.
+
+        >>> def add_custom_values(x, **kwargs):
+        ...     for month in kwargs:
+        ...         x+=kwargs[month]
+        ...     return x
+
+        >>> series.apply(add_custom_values, june=30, july=20, august=25)
+        London      95
+        New York    96
+        Helsinki    87
+        dtype: int64
+
+        Use a function from the Numpy library.
+
+        >>> series.apply(np.log)
+        London      2.995732
+        New York    3.044522
+        Helsinki    2.484907
+        dtype: float64
+
+
+        """
+        if len(self) == 0:
+            return self._constructor(dtype=self.dtype,
+                                     index=self.index).__finalize__(self)
+
+        # dispatch to agg
+        if isinstance(func, (list, dict)):
+            return self.aggregate(func, *args, **kwds)
+
+        # if we are a string, try to dispatch
+        if isinstance(func, compat.string_types):
+            return self._try_aggregate_string_function(func, *args, **kwds)
+
+        # handle ufuncs and lambdas
+        if kwds or args and not isinstance(func, np.ufunc):
+            f = lambda x: func(x, *args, **kwds)
+        else:
+            f = func
+
+        with np.errstate(all='ignore'):
+            if isinstance(f, np.ufunc):
+                return f(self)
+
+            # row-wise access
+            if is_extension_type(self.dtype):
+                mapped = self._values.map(f)
+            else:
+                values = self.astype(object).values
+                mapped = lib.map_infer(values, f, convert=convert_dtype)
+
+        if len(mapped) and isinstance(mapped[0], Series):
+            from pandas.core.frame import DataFrame
+            return DataFrame(mapped.tolist(), index=self.index)
+        else:
+            return self._constructor(mapped,
+                                     index=self.index).__finalize__(self)
+
+    def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
+                filter_type=None, **kwds):
+        """
+        perform a reduction operation
+
+        if we have an ndarray as a value, then simply perform the operation,
+        otherwise delegate to the object
+
+        """
+        delegate = self._values
+        if isinstance(delegate, np.ndarray):
+            # Validate that 'axis' is consistent with Series's single axis.
+            if axis is not None:
+                self._get_axis_number(axis)
+            if numeric_only:
+                raise NotImplementedError('Series.{0} does not implement '
+                                          'numeric_only.'.format(name))
+            with np.errstate(all='ignore'):
+                return op(delegate, skipna=skipna, **kwds)
+
+        return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna,
+                                numeric_only=numeric_only,
+                                filter_type=filter_type, **kwds)
+
+    def _reindex_indexer(self, new_index, indexer, copy):
+        if indexer is None:
+            if copy:
+                return self.copy()
+            return self
+
+        new_values = algorithms.take_1d(self._values, indexer,
+                                        allow_fill=True, fill_value=None)
+        return self._constructor(new_values, index=new_index)
+
+    def _needs_reindex_multi(self, axes, method, level):
+        """ check if we do need a multi reindex; this is for compat with
+        higher dims
+        """
+        return False
+
+    @Appender(generic._shared_docs['align'] % _shared_doc_kwargs)
+    def align(self, other, join='outer', axis=None, level=None, copy=True,
+              fill_value=None, method=None, limit=None, fill_axis=0,
+              broadcast_axis=None):
+        return super(Series, self).align(other, join=join, axis=axis,
+                                         level=level, copy=copy,
+                                         fill_value=fill_value, method=method,
+                                         limit=limit, fill_axis=fill_axis,
+                                         broadcast_axis=broadcast_axis)
+
+    def rename(self, index=None, **kwargs):
+        """Alter Series index labels or name
+
+        Function / dict values must be unique (1-to-1). Labels not contained in
+        a dict / Series will be left as-is. Extra labels listed don't throw an
+        error.
+
+        Alternatively, change ``Series.name`` with a scalar value.
+
+        See the :ref:`user guide <basics.rename>` for more.
+
+        Parameters
+        ----------
+        index : scalar, hashable sequence, dict-like or function, optional
+            dict-like or functions are transformations to apply to
+            the index.
+            Scalar or hashable sequence-like will alter the ``Series.name``
+            attribute.
+        copy : boolean, default True
+            Also copy underlying data
+        inplace : boolean, default False
+            Whether to return a new Series. If True then value of copy is
+            ignored.
+        level : int or level name, default None
+            In case of a MultiIndex, only rename labels in the specified
+            level.
+
+        Returns
+        -------
+        renamed : Series (new object)
+
+        See Also
+        --------
+        pandas.Series.rename_axis
+
+        Examples
+        --------
+
+        >>> s = pd.Series([1, 2, 3])
+        >>> s
+        0    1
+        1    2
+        2    3
+        dtype: int64
+        >>> s.rename("my_name") # scalar, changes Series.name
+        0    1
+        1    2
+        2    3
+        Name: my_name, dtype: int64
+        >>> s.rename(lambda x: x ** 2)  # function, changes labels
+        0    1
+        1    2
+        4    3
+        dtype: int64
+        >>> s.rename({1: 3, 2: 5})  # mapping, changes labels
+        0    1
+        3    2
+        5    3
+        dtype: int64
+
+        """
+        kwargs['inplace'] = validate_bool_kwarg(kwargs.get('inplace', False),
+                                                'inplace')
+
+        non_mapping = is_scalar(index) or (is_list_like(index) and
+                                           not is_dict_like(index))
+        if non_mapping:
+            return self._set_name(index, inplace=kwargs.get('inplace'))
+        return super(Series, self).rename(index=index, **kwargs)
+
+    @Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)
+    def reindex(self, index=None, **kwargs):
+        return super(Series, self).reindex(index=index, **kwargs)
+
+    def drop(self, labels=None, axis=0, index=None, columns=None,
+             level=None, inplace=False, errors='raise'):
+        """
+        Return Series with specified index labels removed.
+
+        Remove elements of a Series based on specifying the index labels.
+        When using a multi-index, labels on different levels can be removed
+        by specifying the level.
+
+        Parameters
+        ----------
+        labels : single label or list-like
+            Index labels to drop.
+        axis : 0, default 0
+            Redundant for application on Series.
+        index, columns : None
+            Redundant for application on Series, but index can be used instead
+            of labels.
+
+            .. versionadded:: 0.21.0
+        level : int or level name, optional
+            For MultiIndex, level for which the labels will be removed.
+        inplace : bool, default False
+            If True, do operation inplace and return None.
+        errors : {'ignore', 'raise'}, default 'raise'
+            If 'ignore', suppress error and only existing labels are dropped.
+
+        Returns
+        -------
+        dropped : pandas.Series
+
+        See Also
+        --------
+        Series.reindex : Return only specified index labels of Series.
+        Series.dropna : Return series without null values.
+        Series.drop_duplicates : Return Series with duplicate values removed.
+        DataFrame.drop : Drop specified labels from rows or columns.
+
+        Raises
+        ------
+        KeyError
+            If none of the labels are found in the index.
+
+        Examples
+        --------
+        >>> s = pd.Series(data=np.arange(3), index=['A','B','C'])
+        >>> s
+        A  0
+        B  1
+        C  2
+        dtype: int64
+
+        Drop labels B en C
+
+        >>> s.drop(labels=['B','C'])
+        A  0
+        dtype: int64
+
+        Drop 2nd level label in MultiIndex Series
+
+        >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
+        ...                              ['speed', 'weight', 'length']],
+        ...                      labels=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
+        ...                              [0, 1, 2, 0, 1, 2, 0, 1, 2]])
+        >>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
+        ...               index=midx)
+        >>> s
+        lama    speed      45.0
+                weight    200.0
+                length      1.2
+        cow     speed      30.0
+                weight    250.0
+                length      1.5
+        falcon  speed     320.0
+                weight      1.0
+                length      0.3
+        dtype: float64
+
+        >>> s.drop(labels='weight', level=1)
+        lama    speed      45.0
+                length      1.2
+        cow     speed      30.0
+                length      1.5
+        falcon  speed     320.0
+                length      0.3
+        dtype: float64
+        """
+        return super(Series, self).drop(labels=labels, axis=axis, index=index,
+                                        columns=columns, level=level,
+                                        inplace=inplace, errors=errors)
+
+    @Substitution(**_shared_doc_kwargs)
+    @Appender(generic.NDFrame.fillna.__doc__)
+    def fillna(self, value=None, method=None, axis=None, inplace=False,
+               limit=None, downcast=None, **kwargs):
+        return super(Series, self).fillna(value=value, method=method,
+                                          axis=axis, inplace=inplace,
+                                          limit=limit, downcast=downcast,
+                                          **kwargs)
+
+    @Appender(generic._shared_docs['replace'] % _shared_doc_kwargs)
+    def replace(self, to_replace=None, value=None, inplace=False, limit=None,
+                regex=False, method='pad'):
+        return super(Series, self).replace(to_replace=to_replace, value=value,
+                                           inplace=inplace, limit=limit,
+                                           regex=regex, method=method)
+
+    @Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
+    def shift(self, periods=1, freq=None, axis=0):
+        return super(Series, self).shift(periods=periods, freq=freq, axis=axis)
+
+    def reindex_axis(self, labels, axis=0, **kwargs):
+        """Conform Series to new index with optional filling logic.
+
+        .. deprecated:: 0.21.0
+            Use ``Series.reindex`` instead.
+        """
+        # for compatibility with higher dims
+        if axis != 0:
+            raise ValueError("cannot reindex series on non-zero axis!")
+        msg = ("'.reindex_axis' is deprecated and will be removed in a future "
+               "version. Use '.reindex' instead.")
+        warnings.warn(msg, FutureWarning, stacklevel=2)
+
+        return self.reindex(index=labels, **kwargs)
+
+    def memory_usage(self, index=True, deep=False):
+        """
+        Return the memory usage of the Series.
+
+        The memory usage can optionally include the contribution of
+        the index and of elements of `object` dtype.
+
+        Parameters
+        ----------
+        index : bool, default True
+            Specifies whether to include the memory usage of the Series index.
+        deep : bool, default False
+            If True, introspect the data deeply by interrogating
+            `object` dtypes for system-level memory consumption, and include
+            it in the returned value.
+
+        Returns
+        -------
+        int
+            Bytes of memory consumed.
+
+        See Also
+        --------
+        numpy.ndarray.nbytes : Total bytes consumed by the elements of the
+            array.
+        DataFrame.memory_usage : Bytes consumed by a DataFrame.
+
+        Examples
+        --------
+
+        >>> s = pd.Series(range(3))
+        >>> s.memory_usage()
+        104
+
+        Not including the index gives the size of the rest of the data, which
+        is necessarily smaller:
+
+        >>> s.memory_usage(index=False)
+        24
+
+        The memory footprint of `object` values is ignored by default:
+
+        >>> s = pd.Series(["a", "b"])
+        >>> s.values
+        array(['a', 'b'], dtype=object)
+        >>> s.memory_usage()
+        96
+        >>> s.memory_usage(deep=True)
+        212
+        """
+        v = super(Series, self).memory_usage(deep=deep)
+        if index:
+            v += self.index.memory_usage(deep=deep)
+        return v
+
+    @Appender(generic._shared_docs['_take'])
+    def _take(self, indices, axis=0, is_copy=False):
+
+        indices = _ensure_platform_int(indices)
+        new_index = self.index.take(indices)
+
+        if is_categorical_dtype(self):
+            # https://github.com/pandas-dev/pandas/issues/20664
+            # TODO: remove when the default Categorical.take behavior changes
+            indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
+            kwargs = {'allow_fill': False}
+        else:
+            kwargs = {}
+        new_values = self._values.take(indices, **kwargs)
+
+        result = (self._constructor(new_values, index=new_index,
+                                    fastpath=True).__finalize__(self))
+
+        # Maybe set copy if we didn't actually change the index.
+        if is_copy:
+            if not result._get_axis(axis).equals(self._get_axis(axis)):
+                result._set_is_copy(self)
+
+        return result
+
+    def isin(self, values):
+        """
+        Check whether `values` are contained in Series.
+
+        Return a boolean Series showing whether each element in the Series
+        matches an element in the passed sequence of `values` exactly.
+
+        Parameters
+        ----------
+        values : set or list-like
+            The sequence of values to test. Passing in a single string will
+            raise a ``TypeError``. Instead, turn a single string into a
+            list of one element.
+
+            .. versionadded:: 0.18.1
+
+              Support for values as a set.
+
+        Returns
+        -------
+        isin : Series (bool dtype)
+
+        Raises
+        ------
+        TypeError
+          * If `values` is a string
+
+        See Also
+        --------
+        pandas.DataFrame.isin : equivalent method on DataFrame
+
+        Examples
+        --------
+
+        >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
+        ...                'hippo'], name='animal')
+        >>> s.isin(['cow', 'lama'])
+        0     True
+        1     True
+        2     True
+        3    False
+        4     True
+        5    False
+        Name: animal, dtype: bool
+
+        Passing a single string as ``s.isin('lama')`` will raise an error. Use
+        a list of one element instead:
+
+        >>> s.isin(['lama'])
+        0     True
+        1    False
+        2     True
+        3    False
+        4     True
+        5    False
+        Name: animal, dtype: bool
+        """
+        result = algorithms.isin(self, values)
+        return self._constructor(result, index=self.index).__finalize__(self)
+
+    def between(self, left, right, inclusive=True):
+        """
+        Return boolean Series equivalent to left <= series <= right.
+
+        This function returns a boolean vector containing `True` wherever the
+        corresponding Series element is between the boundary values `left` and
+        `right`. NA values are treated as `False`.
+
+        Parameters
+        ----------
+        left : scalar
+            Left boundary.
+        right : scalar
+            Right boundary.
+        inclusive : bool, default True
+            Include boundaries.
+
+        Returns
+        -------
+        Series
+            Each element will be a boolean.
+
+        Notes
+        -----
+        This function is equivalent to ``(left <= ser) & (ser <= right)``
+
+        See Also
+        --------
+        pandas.Series.gt : Greater than of series and other
+        pandas.Series.lt : Less than of series and other
+
+        Examples
+        --------
+        >>> s = pd.Series([2, 0, 4, 8, np.nan])
+
+        Boundary values are included by default:
+
+        >>> s.between(1, 4)
+        0     True
+        1    False
+        2     True
+        3    False
+        4    False
+        dtype: bool
+
+        With `inclusive` set to ``False`` boundary values are excluded:
+
+        >>> s.between(1, 4, inclusive=False)
+        0     True
+        1    False
+        2    False
+        3    False
+        4    False
+        dtype: bool
+
+        `left` and `right` can be any scalar value:
+
+        >>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
+        >>> s.between('Anna', 'Daniel')
+        0    False
+        1     True
+        2     True
+        3    False
+        dtype: bool
+        """
+        if inclusive:
+            lmask = self >= left
+            rmask = self <= right
+        else:
+            lmask = self > left
+            rmask = self < right
+
+        return lmask & rmask
+
+    @classmethod
+    def from_csv(cls, path, sep=',', parse_dates=True, header=None,
+                 index_col=0, encoding=None, infer_datetime_format=False):
+        """Read CSV file.
+
+        .. deprecated:: 0.21.0
+            Use :func:`pandas.read_csv` instead.
+
+        It is preferable to use the more powerful :func:`pandas.read_csv`
+        for most general purposes, but ``from_csv`` makes for an easy
+        roundtrip to and from a file (the exact counterpart of
+        ``to_csv``), especially with a time Series.
+
+        This method only differs from :func:`pandas.read_csv` in some defaults:
+
+        - `index_col` is ``0`` instead of ``None`` (take first column as index
+          by default)
+        - `header` is ``None`` instead of ``0`` (the first row is not used as
+          the column names)
+        - `parse_dates` is ``True`` instead of ``False`` (try parsing the index
+          as datetime by default)
+
+        With :func:`pandas.read_csv`, the option ``squeeze=True`` can be used
+        to return a Series like ``from_csv``.
+
+        Parameters
+        ----------
+        path : string file path or file handle / StringIO
+        sep : string, default ','
+            Field delimiter
+        parse_dates : boolean, default True
+            Parse dates. Different default from read_table
+        header : int, default None
+            Row to use as header (skip prior rows)
+        index_col : int or sequence, default 0
+            Column to use for index. If a sequence is given, a MultiIndex
+            is used. Different default from read_table
+        encoding : string, optional
+            a string representing the encoding to use if the contents are
+            non-ascii, for python versions prior to 3
+        infer_datetime_format: boolean, default False
+            If True and `parse_dates` is True for a column, try to infer the
+            datetime format based on the first datetime string. If the format
+            can be inferred, there often will be a large parsing speed-up.
+
+        See also
+        --------
+        pandas.read_csv
+
+        Returns
+        -------
+        y : Series
+        """
+
+        # We're calling `DataFrame.from_csv` in the implementation,
+        # which will propagate a warning regarding `from_csv` deprecation.
+        from pandas.core.frame import DataFrame
+        df = DataFrame.from_csv(path, header=header, index_col=index_col,
+                                sep=sep, parse_dates=parse_dates,
+                                encoding=encoding,
+                                infer_datetime_format=infer_datetime_format)
+        result = df.iloc[:, 0]
+        if header is None:
+            result.index.name = result.name = None
+
+        return result
+
+    def to_csv(self, path=None, index=True, sep=",", na_rep='',
+               float_format=None, header=False, index_label=None,
+               mode='w', encoding=None, compression=None, date_format=None,
+               decimal='.'):
+        """
+        Write Series to a comma-separated values (csv) file
+
+        Parameters
+        ----------
+        path : string or file handle, default None
+            File path or object, if None is provided the result is returned as
+            a string.
+        na_rep : string, default ''
+            Missing data representation
+        float_format : string, default None
+            Format string for floating point numbers
+        header : boolean, default False
+            Write out series name
+        index : boolean, default True
+            Write row names (index)
+        index_label : string or sequence, default None
+            Column label for index column(s) if desired. If None is given, and
+            `header` and `index` are True, then the index names are used. A
+            sequence should be given if the DataFrame uses MultiIndex.
+        mode : Python write mode, default 'w'
+        sep : character, default ","
+            Field delimiter for the output file.
+        encoding : string, optional
+            a string representing the encoding to use if the contents are
+            non-ascii, for python versions prior to 3
+        compression : string, optional
+            A string representing the compression to use in the output file.
+            Allowed values are 'gzip', 'bz2', 'zip', 'xz'. This input is only
+            used when the first argument is a filename.
+        date_format: string, default None
+            Format string for datetime objects.
+        decimal: string, default '.'
+            Character recognized as decimal separator. E.g. use ',' for
+            European data
+        """
+        from pandas.core.frame import DataFrame
+        df = DataFrame(self)
+        # result is only a string if no path provided, otherwise None
+        result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep,
+                           float_format=float_format, header=header,
+                           index_label=index_label, mode=mode,
+                           encoding=encoding, compression=compression,
+                           date_format=date_format, decimal=decimal)
+        if path is None:
+            return result
+
+    @Appender(generic._shared_docs['to_excel'] % _shared_doc_kwargs)
+    def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
+                 float_format=None, columns=None, header=True, index=True,
+                 index_label=None, startrow=0, startcol=0, engine=None,
+                 merge_cells=True, encoding=None, inf_rep='inf', verbose=True):
+        df = self.to_frame()
+        df.to_excel(excel_writer=excel_writer, sheet_name=sheet_name,
+                    na_rep=na_rep, float_format=float_format, columns=columns,
+                    header=header, index=index, index_label=index_label,
+                    startrow=startrow, startcol=startcol, engine=engine,
+                    merge_cells=merge_cells, encoding=encoding,
+                    inf_rep=inf_rep, verbose=verbose)
+
+    @Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
+    def isna(self):
+        return super(Series, self).isna()
+
+    @Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
+    def isnull(self):
+        return super(Series, self).isnull()
+
+    @Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
+    def notna(self):
+        return super(Series, self).notna()
+
+    @Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
+    def notnull(self):
+        return super(Series, self).notnull()
+
+    def dropna(self, axis=0, inplace=False, **kwargs):
+        """
+        Return a new Series with missing values removed.
+
+        See the :ref:`User Guide <missing_data>` for more on which values are
+        considered missing, and how to work with missing data.
+
+        Parameters
+        ----------
+        axis : {0 or 'index'}, default 0
+            There is only one axis to drop values from.
+        inplace : bool, default False
+            If True, do operation inplace and return None.
+        **kwargs
+            Not in use.
+
+        Returns
+        -------
+        Series
+            Series with NA entries dropped from it.
+
+        See Also
+        --------
+        Series.isna: Indicate missing values.
+        Series.notna : Indicate existing (non-missing) values.
+        Series.fillna : Replace missing values.
+        DataFrame.dropna : Drop rows or columns which contain NA values.
+        Index.dropna : Drop missing indices.
+
+        Examples
+        --------
+        >>> ser = pd.Series([1., 2., np.nan])
+        >>> ser
+        0    1.0
+        1    2.0
+        2    NaN
+        dtype: float64
+
+        Drop NA values from a Series.
+
+        >>> ser.dropna()
+        0    1.0
+        1    2.0
+        dtype: float64
+
+        Keep the Series with valid entries in the same variable.
+
+        >>> ser.dropna(inplace=True)
+        >>> ser
+        0    1.0
+        1    2.0
+        dtype: float64
+
+        Empty strings are not considered NA values. ``None`` is considered an
+        NA value.
+
+        >>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay'])
+        >>> ser
+        0       NaN
+        1         2
+        2       NaT
+        3
+        4      None
+        5    I stay
+        dtype: object
+        >>> ser.dropna()
+        1         2
+        3
+        5    I stay
+        dtype: object
+        """
+        inplace = validate_bool_kwarg(inplace, 'inplace')
+        kwargs.pop('how', None)
+        if kwargs:
+            raise TypeError('dropna() got an unexpected keyword '
+                            'argument "{0}"'.format(list(kwargs.keys())[0]))
+
+        axis = self._get_axis_number(axis or 0)
+
+        if self._can_hold_na:
+            result = remove_na_arraylike(self)
+            if inplace:
+                self._update_inplace(result)
+            else:
+                return result
+        else:
+            if inplace:
+                # do nothing
+                pass
+            else:
+                return self.copy()
+
+    def valid(self, inplace=False, **kwargs):
+        """Return Series without null values.
+
+        .. deprecated:: 0.23.0
+            Use :meth:`Series.dropna` instead.
+        """
+        warnings.warn("Method .valid will be removed in a future version. "
+                      "Use .dropna instead.", FutureWarning, stacklevel=2)
+        return self.dropna(inplace=inplace, **kwargs)
+
+    # ----------------------------------------------------------------------
+    # Time series-oriented methods
+
+    def to_timestamp(self, freq=None, how='start', copy=True):
+        """
+        Cast to datetimeindex of timestamps, at *beginning* of period
+
+        Parameters
+        ----------
+        freq : string, default frequency of PeriodIndex
+            Desired frequency
+        how : {'s', 'e', 'start', 'end'}
+            Convention for converting period to timestamp; start of period
+            vs. end
+
+        Returns
+        -------
+        ts : Series with DatetimeIndex
+        """
+        new_values = self._values
+        if copy:
+            new_values = new_values.copy()
+
+        new_index = self.index.to_timestamp(freq=freq, how=how)
+        return self._constructor(new_values,
+                                 index=new_index).__finalize__(self)
+
+    def to_period(self, freq=None, copy=True):
+        """
+        Convert Series from DatetimeIndex to PeriodIndex with desired
+        frequency (inferred from index if not passed)
+
+        Parameters
+        ----------
+        freq : string, default
+
+        Returns
+        -------
+        ts : Series with PeriodIndex
+        """
+        new_values = self._values
+        if copy:
+            new_values = new_values.copy()
+
+        new_index = self.index.to_period(freq=freq)
+        return self._constructor(new_values,
+                                 index=new_index).__finalize__(self)
+
+    # ----------------------------------------------------------------------
+    # Accessor Methods
+    # ----------------------------------------------------------------------
+    str = CachedAccessor("str", StringMethods)
+    dt = CachedAccessor("dt", CombinedDatetimelikeProperties)
+    cat = CachedAccessor("cat", CategoricalAccessor)
+    plot = CachedAccessor("plot", gfx.SeriesPlotMethods)
+
+    # ----------------------------------------------------------------------
+    # Add plotting methods to Series
+    hist = gfx.hist_series
+
+
+Series._setup_axes(['index'], info_axis=0, stat_axis=0, aliases={'rows': 0},
+                   docs={'index': 'The index (axis labels) of the Series.'})
+Series._add_numeric_operations()
+Series._add_series_only_operations()
+Series._add_series_or_dataframe_operations()
+
+# Add arithmetic!
+ops.add_flex_arithmetic_methods(Series)
+ops.add_special_arithmetic_methods(Series)
+
+
+# -----------------------------------------------------------------------------
+# Supplementary functions
+
+
+def _sanitize_index(data, index, copy=False):
+    """ sanitize an index type to return an ndarray of the underlying, pass
+    thru a non-Index
+    """
+
+    if index is None:
+        return data
+
+    if len(data) != len(index):
+        raise ValueError('Length of values does not match length of ' 'index')
+
+    if isinstance(data, ABCIndexClass) and not copy:
+        pass
+    elif isinstance(data, (PeriodIndex, DatetimeIndex)):
+        data = data._values
+        if copy:
+            data = data.copy()
+
+    elif isinstance(data, np.ndarray):
+
+        # coerce datetimelike types
+        if data.dtype.kind in ['M', 'm']:
+            data = _sanitize_array(data, index, copy=copy)
+
+    return data
+
+
+def _sanitize_array(data, index, dtype=None, copy=False,
+                    raise_cast_failure=False):
+    """ sanitize input data to an ndarray, copy if specified, coerce to the
+    dtype if specified
+    """
+
+    if dtype is not None:
+        dtype = pandas_dtype(dtype)
+
+    if isinstance(data, ma.MaskedArray):
+        mask = ma.getmaskarray(data)
+        if mask.any():
+            data, fill_value = maybe_upcast(data, copy=True)
+            data[mask] = fill_value
+        else:
+            data = data.copy()
+
+    def _try_cast(arr, take_fast_path):
+
+        # perf shortcut as this is the most common case
+        if take_fast_path:
+            if maybe_castable(arr) and not copy and dtype is None:
+                return arr
+
+        try:
+            subarr = maybe_cast_to_datetime(arr, dtype)
+            # Take care in creating object arrays (but iterators are not
+            # supported):
+            if is_object_dtype(dtype) and (is_list_like(subarr) and
+                                           not (is_iterator(subarr) or
+                                           isinstance(subarr, np.ndarray))):
+                subarr = construct_1d_object_array_from_listlike(subarr)
+            elif not is_extension_type(subarr):
+                subarr = construct_1d_ndarray_preserving_na(subarr, dtype,
+                                                            copy=copy)
+        except (ValueError, TypeError):
+            if is_categorical_dtype(dtype):
+                # We *do* allow casting to categorical, since we know
+                # that Categorical is the only array type for 'category'.
+                subarr = Categorical(arr, dtype.categories,
+                                     ordered=dtype.ordered)
+            elif is_extension_array_dtype(dtype):
+                # We don't allow casting to third party dtypes, since we don't
+                # know what array belongs to which type.
+                msg = ("Cannot cast data to extension dtype '{}'. "
+                       "Pass the extension array directly.".format(dtype))
+                raise ValueError(msg)
+
+            elif dtype is not None and raise_cast_failure:
+                raise
+            else:
+                subarr = np.array(arr, dtype=object, copy=copy)
+        return subarr
+
+    # GH #846
+    if isinstance(data, (np.ndarray, Index, Series)):
+
+        if dtype is not None:
+            subarr = np.array(data, copy=False)
+
+            # possibility of nan -> garbage
+            if is_float_dtype(data.dtype) and is_integer_dtype(dtype):
+                if not isna(data).any():
+                    subarr = _try_cast(data, True)
+                elif copy:
+                    subarr = data.copy()
+            else:
+                subarr = _try_cast(data, True)
+        elif isinstance(data, Index):
+            # don't coerce Index types
+            # e.g. indexes can have different conversions (so don't fast path
+            # them)
+            # GH 6140
+            subarr = _sanitize_index(data, index, copy=copy)
+        else:
+
+            # we will try to copy be-definition here
+            subarr = _try_cast(data, True)
+
+    elif isinstance(data, ExtensionArray):
+        subarr = data
+
+        if dtype is not None and not data.dtype.is_dtype(dtype):
+            msg = ("Cannot coerce extension array to dtype '{typ}'. "
+                   "Do the coercion before passing to the constructor "
+                   "instead.".format(typ=dtype))
+            raise ValueError(msg)
+
+        if copy:
+            subarr = data.copy()
+        return subarr
+
+    elif isinstance(data, (list, tuple)) and len(data) > 0:
+        if dtype is not None:
+            try:
+                subarr = _try_cast(data, False)
+            except Exception:
+                if raise_cast_failure:  # pragma: no cover
+                    raise
+                subarr = np.array(data, dtype=object, copy=copy)
+                subarr = lib.maybe_convert_objects(subarr)
+
+        else:
+            subarr = maybe_convert_platform(data)
+
+        subarr = maybe_cast_to_datetime(subarr, dtype)
+
+    elif isinstance(data, range):
+        # GH 16804
+        start, stop, step = get_range_parameters(data)
+        arr = np.arange(start, stop, step, dtype='int64')
+        subarr = _try_cast(arr, False)
+    else:
+        subarr = _try_cast(data, False)
+
+    # scalar like, GH
+    if getattr(subarr, 'ndim', 0) == 0:
+        if isinstance(data, list):  # pragma: no cover
+            subarr = np.array(data, dtype=object)
+        elif index is not None:
+            value = data
+
+            # figure out the dtype from the value (upcast if necessary)
+            if dtype is None:
+                dtype, value = infer_dtype_from_scalar(value)
+            else:
+                # need to possibly convert the value here
+                value = maybe_cast_to_datetime(value, dtype)
+
+            subarr = construct_1d_arraylike_from_scalar(
+                value, len(index), dtype)
+
+        else:
+            return subarr.item()
+
+    # the result that we want
+    elif subarr.ndim == 1:
+        if index is not None:
+
+            # a 1-element ndarray
+            if len(subarr) != len(index) and len(subarr) == 1:
+                subarr = construct_1d_arraylike_from_scalar(
+                    subarr[0], len(index), subarr.dtype)
+
+    elif subarr.ndim > 1:
+        if isinstance(data, np.ndarray):
+            raise Exception('Data must be 1-dimensional')
+        else:
+            subarr = com._asarray_tuplesafe(data, dtype=dtype)
+
+    # This is to prevent mixed-type Series getting all casted to
+    # NumPy string type, e.g. NaN --> '-1#IND'.
+    if issubclass(subarr.dtype.type, compat.string_types):
+        # GH 16605
+        # If not empty convert the data to dtype
+        # GH 19853: If data is a scalar, subarr has already the result
+        if not is_scalar(data):
+            if not np.all(isna(data)):
+                data = np.array(data, dtype=dtype, copy=False)
+            subarr = np.array(data, dtype=object, copy=copy)
+
+    return subarr
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/transform.html b/docs/_build/html/_modules/transform.html new file mode 100644 index 0000000..116439e --- /dev/null +++ b/docs/_build/html/_modules/transform.html @@ -0,0 +1,356 @@ + + + + + + + + + + + transform — CrossVA 1.0a documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for transform

+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Defines main CrossVA function, `transform` which maps raw VA data into data
+for use with a VA algorithm in OpenVA.
+"""
+
+import pandas as pd
+from configuration import Configuration, CrossVA
+from utils import flexible_read
+
+
+
[docs]def transform(mapping, raw_data, verbose=2, preserve_na=True): + """transforms raw VA data (`raw_data`) into data suitable for use with a VA + algorithm, according to the specified transformations given in `mapping`. + + Args: + mapping (string, tuple or Pandas DataFrame): Should be either a tuple + in form (input, output), a path to csv containing a configuration + data file, or a Pandas DataFrame containing configuration data + raw_data (string or Pandas DataFrame): raw verbal autopsy data to + process + verbose (int): integer from 0 to 5, controlling how much status detail + is printed to console. Silent if 0. Defaults to 2. + preserve_na (bool): whether to preserve NAs in data, or to count them + as FALSE. Overridden with True for InsillicoVA, False for InterVA4 + when mapping is given as a tuple. + + Returns: + Pandas DataFrame: the raw data transformed according to specifications + given in mapping data. + + Examples: + You can specify the mapping as (input, output) and the path to csv: + + >>> transform(("2016WHOv151", "InterVA4"), "resources/sample_data/mock_data_1.csv").iloc[:5,:5] + AB_POSIT AB_SIZE ABD_MASS ABD_PAIN ABDOM + 0 False True False True False + 1 False False True False True + 2 False False True False False + 3 True False False False False + 4 False False False True False + + >>> transform(("2016WHOv151", "InsillicoVA"), "resources/sample_data/mock_data_1.csv").iloc[:5,:5] + AB_POSIT AB_SIZE ABD_MASS ABD_PAIN ABDOM + 0 False True False True False + 1 NaN NaN True NaN True + 2 NaN False True False NaN + 3 True False NaN False NaN + 4 NaN NaN NaN True False + + Or you can give the data and mapping as Pandas DataFrames: + + >>> my_special_data = pd.read_csv("resources/sample_data/mock_data_1.csv") + >>> my_special_mapping = pd.read_csv("resources/mapping_configuration_files/example_config_1.csv") + >>> transform(my_special_mapping, my_special_data).iloc[:5,:5] + AB_POSIT AB_SIZE AC_BRL AC_CONV AC_COUGH + 0 False True True False True + 1 NaN NaN True False False + 2 NaN False True True False + 3 True False True False True + 4 NaN NaN True True True + + However, the mapping-data relationship must be valid. + + >>> bad_mapping = pd.read_csv("resources/mapping_configuration_files/example_config_2.csv") + >>> transform(bad_mapping, my_special_data).iloc[:5,:5] + Traceback (most recent call last): + ValueError: Configuration from mapping file must be valid before + transform. + + """ + mapping_data = pd.DataFrame() + # TODO - this is clunky in current form, could stand to be more elegant + # read in mapping data + if isinstance(mapping, tuple): # if mapping is in (input, output) format + internal_path = "resources/mapping_configuration_files/" + if len(mapping) == 2: + supported_outputs = ["InterVA4", "InsillicoVA"] + supported_inputs = ["2016WHOv151"] + if mapping[0] in supported_inputs: + if mapping[1] in supported_outputs: + preserve_na = mapping[1] == "InsillicoVA" #overides given + mapping_data = pd.read_csv((f"{internal_path}" + f"{mapping[0]}_to_" + f"{mapping[1]}.csv")) + else: + raise ValueError(("Output not supported. Expected one of" + f" {supported_outputs}, but received " + f"'{mapping[1]}'")) + else: + raise ValueError(("Input not supported. Expected one of" + f" {supported_inputs}, but received " + f"'{mapping[0]}'")) + else: + raise ValueError(("If mapping is tuple, input should be of length " + "two in the form (input type, output type)")) + + else: + mapping_data = flexible_read(mapping) + + if mapping_data.empty: # this shouldn't happen; if it does, raise + raise ValueError(("No valid mapping data provided to transform. Should be" + " either a tuple in form (input, output), a path to csv" + " or a Pandas DataFrame.")) + + # init configuration obj from given mapping data + config = Configuration(mapping_data, verbose, process_strings=False) + + # if the configuration isn't valid, or if the data isn't valid for the + # config file, then raise error + if not config.validate(): + # Raise Error + raise ValueError(("Configuration from mapping file must be valid " + "before transform.")) + + # TODO adds args to init based on data type? + input_data = flexible_read(raw_data) + cross_va = CrossVA(input_data, config) + if not cross_va.validate(): + raise ValueError(("Cannot transform if provided raw data and " + "configuration file are not a valid match.")) + final_data = cross_va.process() + + if preserve_na: + return final_data + return final_data.fillna(False)
+ +# transform(("2016WHOv151", "InterVA4"), "resources/sample_data/mock_data_1.csv") + +
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/utils.html b/docs/_build/html/_modules/utils.html new file mode 100644 index 0000000..8b0a48b --- /dev/null +++ b/docs/_build/html/_modules/utils.html @@ -0,0 +1,364 @@ + + + + + + + + + + + utils — CrossVA 1.0a documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for utils

+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Convenience functions for the CrossVA module, which help to provide a more
+user-friendly experience with inputs and error messages across different files.
+"""
+import re
+import pandas as pd
+
+
+
[docs]def report_list(alist, limit=10, paren=True): + """Converts alist into a user-friendly string for clearer error messages. + Each element is reported single quotes and seperated by commas, with the + last element preceded by " and ". + When limit is shorter than the list, cuts the list at the limit, + omits the 'and', and ends with 'etc.' to indicate incompleteness. + + Args: + alist (list): Description of parameter `alist`. + limit (int): The maximum number of items to report. If more than limit, + the list is reported without conjunction and ends with "etc." + Defaults to 10. + paren (boolean): Encloses string in parentheses if true. Defaults to + True. + + Returns: + str: human-friendly sentence describing the items in alist + + Raises: TODO + + Examples + >>> report_list(["A","B","C"]) + "('A', 'B', and 'C')" + + >>> report_list(["A","B","C"], limit=2) + "('A', 'B', etc.)" + + >>> report_list(["A","B","C"], limit=2, paren=False) + "'A', 'B', etc." + + >>> report_list([]) + '' + """ + if len(alist) == 0: + return "" + + if limit is not None: + if len(alist) <= limit: + limit = None + else: + alist = alist[:limit] + + str_list = "'" + "', '".join([str(a) for a in alist]) + "'" + if limit is None: + report = re.sub(r'(.*), ', r'\1, and ', + str_list) # use "and" if complete + else: + report = str_list + ", etc." # end with etc. if incomplete + + if paren: + return "("+report+")" + return report
+ + +
[docs]def flexible_read(path_or_df): + """Takes either a path or a Pandas DataFrame, if path, read in as a pandas + dataframe. Convenience method to add input flexibility for main transform + method. + + Args: + path_or_df (string or Pandas DataFrame): Either a string representing + a path to the file containing the data, or a dataframe that has + already been read into Python. + + Returns: + Pandas DataFrame: either the data at the given path as read by pandas, + or the DataFrame constructor used on the path_or_df argument + + Raises: TODO + + Examples: + Can return a dataframe from a string: + >>> flexible_read("resources/sample_data/mock_data_1.csv").head() + ID Id10004 ... isNeonatal1 isNeonatal2 + 0 0 wet ... 1 Not Answered + 1 1 wet ... Not Answered NaN + 2 2 dry ... NaN 1 + 3 3 Not Answered ... Not Answered Not Answered + 4 4 dry ... 1 NaN + <BLANKLINE> + [5 rows x 252 columns] + + Or apply the pandas dataframe constructor to the input: + >>> flexible_read(np.arange(9).reshape(3,3)) + 0 1 2 + 0 0 1 2 + 1 3 4 5 + 2 6 7 8 + """ + if isinstance(path_or_df, str): # if mapping is path + ext = path_or_df.split(".")[:-1] # file extension + if ext in ["xlsm", "xlsx", "xls"]: + return_df = pd.read_excel(ext) + else: + return_df = pd.read_csv(path_or_df) + else: + return_df = pd.DataFrame(path_or_df) + return return_df
+ + +
[docs]def english_relationship(rel): + """Returns abbreviated relationship as full english phrase. + + Args: + rel (str): a string with the relationship being translated, e.g., "gt" + + Returns: + str: a string with the relationship as a longer english phrase e.g., + "greater than". If relationship not defined in the dict english, + then this method returns rel without modification. + + Raises: TODO + + Examples + >>> english_relationship("gt") + 'is greater than' + + >>> english_relationship("unknown") + 'unknown' + + """ + english = {"gt": "greater than", "ge": "greater than or equal to", + "lt": "less than", "le": "less than or equal to", + "ne": "not equal to", "eq": "equal to", + "between": "between"} + if rel in english.keys(): + return "is " + english[rel] + return rel
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/validation.html b/docs/_build/html/_modules/validation.html new file mode 100644 index 0000000..728664d --- /dev/null +++ b/docs/_build/html/_modules/validation.html @@ -0,0 +1,984 @@ + + + + + + + + + + + validation — CrossVA 1.0a documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for validation

+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Module containing Validation class, and Vcheck class and its subclasses
+"""
+from abc import ABCMeta, abstractmethod
+from utils import report_list
+import pandas as pd
+import re
+
+
+
[docs]class VCheck(metaclass=ABCMeta): + """Abstract class fior a single validation check""" + + def __init__(self, message): + """Inits VCheck class + + Args: + message (str): the message associated with the validation check, + which should describe why the check has passed or failed. + + Examples: + >>> VCheck("Test Message") + Traceback (most recent call last): + TypeError: Can't instantiate abstract class VCheck with abstract + methods bullet, level, tier, title + """ + self.message = message + +
[docs] def expand(self): + """Expands VCheck information as a Pandas Series + + Args: + None + + Returns: + Pandas Series: representing VCheck attributes as a Pandas Series + + Examples: + >>> Err("Error Message").expand() + Tier Error + Bullet [!] + Level 1 + Title ERRORS + Message Error Message + dtype: object + """ + return pd.Series([self.tier(), self.bullet(), self.level(), + self.title(), self.message], + ["Tier", "Bullet", "Level", "Title", "Message"])
+ + @property + @abstractmethod + def tier(self): + """abstract property, must be overriden. + Should be str, representing name of VCheck tier""" + return + + @property + @abstractmethod + def bullet(self): + """abstract property, must be overriden. + Should be a str, representing a bullet point""" + return + + @property + @abstractmethod + def level(self): + """abstract property, must be overriden. + Should be int ,representing VCheck tier""" + return + + @property + @abstractmethod + def title(self): + """abstract property, must be overriden. + Should be str, representing title of VCheck type""" + return
+ + +
[docs]class Err(VCheck): + """VCheck subclass representing a serious problem in data validation + that prevents validation. + + Examples: + >>> Err("This is a data validation error").expand() + Tier Error + Bullet [!] + Level 1 + Title ERRORS + Message This is a data validation error + dtype: object + """ + +
[docs] def tier(self): + return "Error"
+ +
[docs] def bullet(self): + return "[!]"
+ +
[docs] def level(self): + return 1
+ +
[docs] def title(self): + return "ERRORS"
+ + +
[docs]class Warn(VCheck): + """VCheck subclass representing a problem in data validation that + can be fixed in place, but would otherwise prevent validation. + + Examples: + >>> Warn("This is a data validation warning").expand() + Tier Warning + Bullet [?] + Level 2 + Title WARNINGS + Message This is a data validation warning + dtype: object + """ + +
[docs] def tier(self): + return "Warning"
+ +
[docs] def bullet(self): + return "[?]"
+ +
[docs] def level(self): + return 2
+ +
[docs] def title(self): + return "WARNINGS"
+ + +
[docs]class Suggest(VCheck): + """VCheck subclass representing a minor problem with data that + does not prevent data validation. + + Examples: + >>> Suggest("This is a data validation suggestion").expand() + Tier Suggestion + Bullet [i] + Level 3 + Title SUGGESTIONS + Message This is a data validation suggestion + dtype: object + """ + +
[docs] def tier(self): + return "Suggestion"
+ +
[docs] def bullet(self): + return "[i]"
+ +
[docs] def level(self): + return 3
+ +
[docs] def title(self): + return "SUGGESTIONS"
+ + +
[docs]class Passing(VCheck): + """VCheck subclass representing a passed check in data validation, where + there is no problem. + + Examples: + >>> Passing("This is a passing data validation check").expand() + Tier Passing + Bullet [X] + Level 4 + Title CHECKS PASSED + Message This is a passing data validation check + dtype: object + """ + +
[docs] def tier(self): + return "Passing"
+ +
[docs] def bullet(self): + return "[X]"
+ +
[docs] def level(self): + return 4
+ +
[docs] def title(self): + return "CHECKS PASSED"
+ + +
[docs]class Validation(): + """Validation object represents an organized dataframe of validation checks + + Attributes: + vchecks (Pandas DataFrame): a dataframe containing the expanded form + of the VCheck instances that have been added. + """ + + def __init__(self): + """inits Validation class""" + self.vchecks = pd.DataFrame() + + def _add_condition(self, flagged_series, pass_check, fail_check): + """Internal method that adds a fail_check to the self.vchecks attribute + if any in flagged_series are True, else adds pass_check to the + self.vchecks attribute. Both fail_check and pass_check are appended to + self.vchecks through their .expand() method, which returns their + information as a Pandas Series. + + Args: + flagged_series (Pandas Series): a boolean Pandas Series, where True + represents a failed condition that has been flagged. + pass_check (VCheck): the VCheck to report if the check passes + fail_check (VCheck): the VCheck to report if the check fails + + Returns: + None + + Examples: + >>> v = Validation() + >>> v._add_condition(pd.Series([False, False, False]),\ + Passing("Passed test"), Err("Failed test")) + >>> v._add_condition(pd.Series([False, False, True]),\ + Passing("Passed test"), Err("Failed test")) + >>> v.vchecks + Bullet Level Message Tier Title + 0 [X] 4.0 Passed test Passing CHECKS PASSED + 1 [!] 1.0 Failed test Error ERRORS + """ + if flagged_series.sum() > 0: + self.vchecks = self.vchecks.append(fail_check.expand(), + ignore_index=True) + else: + self.vchecks = self.vchecks.append(pass_check.expand(), + ignore_index=True) + +
[docs] def must_contain(self, given, required, passing_msg=""): + """adds a validation check where `given` must contain every item in + `required` at least once to pass, and `fail_check` is `Err`, + (fails validation). + + Args: + given (Pandas Series): the items representing input given + required (Pandas Series): the items required to be in `given` + passing_msg (str): Message to return if all items in `expected` are + listed in `given`. Defaults to "". + + Returns: + None + + Examples: + >>> v = Validation() + >>> v.must_contain(pd.Series(["a","b","c"], name="example input"),\ + pd.Series(["a","b"],\ + name="example requirement(s)"),\ + "all included") + >>> v.must_contain(pd.Series(["a","b","c"], name="example input"),\ + pd.Series(["a","b","d"],\ + name="example requirement(s)")) + >>> v.report(verbose=4) + CHECKS PASSED + [X] all included + <BLANKLINE> + ERRORS + [!] 1 ('d') example requirement(s) were missing + from example input. These must be included. + """ + # Comparison is true (fails) when an item in required isn't in given + comparison = ~required.isin(given) + fail_msg = " ".join([str(comparison.sum()), + report_list(required[comparison]), + str(required.name), + "were missing from", str(given.name)+".", + "These must be included."]) + self._add_condition(comparison, Passing(passing_msg), + Err(fail_msg))
+ +
[docs] def no_extraneous(self, given, relevant, value_type): + """adds a validation check where all values in `given` should also be + in `relevant` to pass. `fail_check` is `Warn` + + Args: + given (Pandas Series): the items representing input given + relevant (Pandas Series): all items in `given` that will be used + value_type (str): string describing the kind of noun that is + listed in `given` + + Returns: + None + + Examples: + >>> v = Validation() + >>> v.no_extraneous(pd.Series(["a","b"], name="example input"),\ + pd.Series(["a","b","c"],\ + name="relevant value(s)"), "example") + >>> v.no_extraneous(pd.Series(["a","b","c"], name="example input"),\ + pd.Series(["a","d"],\ + name="relevant value(s)"), "example") + >>> v.report(verbose=4) + <BLANKLINE> + CHECKS PASSED + [X] No extraneous example found in example input. + <BLANKLINE> + ERRORS + [!] 2 extraneous example(s) found in example input + ('b', and 'c') Extraneous example(s) will be ommitted. + """ + # comparison is true (fails) when an item in `given` isn't in + # `relevant` + comparison = ~given.isin(relevant) + fail_msg = " ".join([str(comparison.sum()), + "extraneous", value_type+"(s)", + "found in", str(given.name), + report_list(given[comparison], limit=5), + "Extraneous", value_type+"(s)", + "will be ommitted."]) + passing_msg = " ".join(["No extraneous", value_type, "found in", + str(given.name)+"."]) + self._add_condition(comparison, Passing(passing_msg), + Err(fail_msg))
+ +
[docs] def all_valid(self, given, valid, definition): + """adds a validation check where all values in `given` must be in `valid` + to pass. `fail_check` is `Err` (fails validation). + + Args: + given (Pandas Series): the items representing input given + valid (Pandas Series): list of all possible valid items accepted in + `given` + definition (str): string describing what makes an item in `given` + be in `valid` + + Returns: + None + + Examples: + >>> v = Validation() + >>> v.all_valid(pd.Series(["a","b"], name="example input"),\ + pd.Series(["a","b","c"],\ + name="valid value(s)"), "pre-defined") + >>> v.all_valid(pd.Series(["a","b","c"], name="example input"),\ + pd.Series(["a","d"],\ + name="valid value(s)"), "'a' or 'd'") + >>> v.report(verbose=4) + <BLANKLINE> + CHECKS PASSED + [X] All values in example input are valid. + <BLANKLINE> + ERRORS + [!] 2 values in example input were invalid ('b', and 'c'). + These must be 'a' or 'd' to be valid. + """ + # comparison is true (fails) when an item in `given` isn't in `valid` + comparison = ~given.isin(valid) + passing_msg = " ".join(["All values in", str(given.name), + "are valid."]) + fail_msg = " ".join([str((comparison).sum()), "values in", + str(given.name), + "were invalid", report_list( + given[(comparison)]) + ".", + "These must be", definition, "to be valid."]) + self._add_condition(comparison, Passing(passing_msg), + Err(fail_msg))
+ +
[docs] def flag_rows(self, flag_where, flag_criteria, flag_action="", + flag_tier=Warn): + """Adds a validation check seeing if any values in flag_where are true, + where fail_check is of type flag_tier. Note that rows are reported + counting from 0. + + Args: + flag_where (Pandas Series): a boolean Pandas Series where True + represents a failed check. + flag_criteria (str): a noun clause describing the criteria for an + item to be flagged in `flag_where` + flag_action (str): string describing the action to be taken if + an item is flagged. Defaults to "". + flag_tier (VCheck): should be either Suggest, Warn, or Err, is + the seriousness of the failed check. + + Returns: + None + + Examples: + >>> v = Validation() + >>> v.flag_rows(pd.Series([False, False]),\ + flag_criteria="true values") + >>> v.flag_rows(pd.Series([False, True]),\ + flag_criteria="true values") + >>> v.report(verbose=4) + <BLANKLINE> + CHECKS PASSED + [X] No true values detected. + <BLANKLINE> + WARNINGS + [?] 1 true values detected in row(s) #1. + """ + passing_msg = " ".join(["No", flag_criteria, "detected."]) + fail_msg = " ".join([str(flag_where.sum()), flag_criteria, + "detected in row(s)", report_row( + flag_where)+".", + flag_action]) + + self._add_condition(flag_where, Passing(passing_msg), + flag_tier(fail_msg))
+ + def _check_df(self, df, condition, flag_criteria, flag_action="", + flag_tier=Warn): + """Adds a validation check flagging the rows in every column of `df` + where applying the function condition changes the value of the element. + Passes flag_criteria, flag_action and flag_tier on to flag_rows(). + + Args: + df (Pandas DataFrame): a Pandas DataFrame where each column should + be checked + condition (function): a function that corrects possible errors in + each column, but does not change elements which are already + fine. + flag_criteria (str): a noun clause describing the criteria for an + item to be changed by `condition` + flag_action (str): string describing the action to be taken if + an item is flagged. Defaults to "". + flag_tier (VCheck): should be either Suggest, Warn, or Err, is + the seriousness of the failed check. + Returns: + Pandas DataFrame: the Pandas DataFrame where condition() has been + applied to every column. + + Examples: + >>> v = Validation() + >>> test_df = pd.DataFrame({"A":["a","B","c"], "B":["D","e","F"]}) + >>> v._check_df(test_df, str.lower, flag_criteria="lowercase char") + A B + 0 a d + 1 b e + 2 c f + + >>> v._check_df(test_df, str.upper, flag_criteria="uppercase char") + A B + 0 A D + 1 B E + 2 C F + + >>> v.report(verbose=4) + <BLANKLINE> + WARNINGS + [?] 1 lowercase char column A detected in row(s) #1. + [?] 2 lowercase char column B detected in row(s) #0, and #2. + [?] 2 uppercase char column A detected in row(s) #0, and #2. + [?] 1 uppercase char column B detected in row(s) #1. + + """ + df = df.copy() + for name, aseries in df.iteritems(): + applied = aseries.apply(condition) + self.flag_rows(applied.ne(aseries), + flag_criteria + " column " + str(name), + flag_action, + flag_tier) + df.loc[:, name] = applied + return df + +
[docs] def check_na(self, df): + """Adds a validation check flagging the rows in every column of `df` + that are `None` + + Args: + df (Pandas DataFrame): a Pandas DataFrame with columns that should + have no NA values + + Returns: + None + + Examples: + >>> v = Validation() + >>> test_df = pd.DataFrame({"A":["a","B","c"], "B":["D","e",None]}) + >>> v.check_na(test_df) + >>> v.report(verbose=4) + <BLANKLINE> + CHECKS PASSED + [X] No NA's in column A detected. + <BLANKLINE> + ERRORS + [!] 1 NA's in column B detected in row(s) #2. + """ + self._check_df(df, + condition=lambda x: "" if x is None else x, + flag_criteria="NA's in", + flag_tier=Err + )
+ +
[docs] def fix_whitespace(self, df): + """Adds a validation check flagging the rows in every column of `df` + that contain whitespace + + Args: + df (Pandas DataFrame): a Pandas DataFrame with columns that should + have no whitespace + + Returns: + Pandas DataFrame: `df` where whitespace is replaced with an + underscore + + Examples: + >>> v = Validation() + >>> test_df = pd.DataFrame({"A":["a"," B ","Test Data"],\ + "B":["D"," e","F "]}) + >>> v.fix_whitespace(test_df) + A B + 0 a D + 1 B e + 2 Test_Data F + >>> v.report(verbose=4) + <BLANKLINE> + CHECKS PASSED + [X] No whitespace in column B detected. + <BLANKLINE> + WARNINGS + [?] 1 leading/trailing spaces column A detected in row(s) + #1. Leading or trailing spaces will be removed. + [?] 2 leading/trailing spaces column B detected in row(s) + #1, and #2. Leading/trailing spaces will be removed. + [?] 1 whitespace in column A detected in row(s) #2. Whitespace + will be converted to '_' + """ + stripped_df = self._check_df(df.fillna("").astype(str), + str.strip, + flag_criteria="leading/trailing " + "spaces", + flag_action="Leading/trailing spaces " + "will be removed.") + # pass stripped_df to check_df with a regex expression to replace + # remaining whitespace with underscores, except for the " to " + # construction + return self._check_df(stripped_df.fillna("").astype(str), + lambda x: re.sub(r"(?<!to)\s(?!to)", "_", x), + flag_criteria="whitespace in", + flag_action="Whitespace will be converted to '_'" + )
+ +
[docs] def fix_alnum(self, df): + """Adds a validation check flagging the rows in every column of `df` + that contain non-alphanumeric characters. Regex removes all characters + that are not alpha-numeric, but leaves periods that are part of a + number. + + Args: + df (Pandas DataFrame): a Pandas DataFrame with columns that should + have only alphanumeric characters + + Returns: + Pandas DataFrame: `df` where alphanumeric characters are removed + + Examples: + >>> v = Validation() + >>> test_df = pd.DataFrame({"A":["a","3.0","c"],\ + "B":["??.test","test<>!",";test_data"]}) + >>> v.fix_alnum(test_df) + A B + 0 a test + 1 3.0 test + 2 c test_data + >>> v.report(verbose=4) + <BLANKLINE> + CHECKS PASSED + [X] No non-alphanumeric value(s) in column A detected. + <BLANKLINE> + WARNINGS + [?] 3 non-alphanumeric value(s) in column B detected in row(s) + #0, #1, and #2. This text should be alphanumeric. Non-alphanumeric + characters will be removed. + + """ + return self._check_df(df, + lambda x: re.sub(r"[^a-zA-Z0-9_ -\.]|\.(?!\d)", + r"", str(x)), + flag_criteria="non-alphanumeric value(s) in", + flag_action="This text should be alphanumeric. " + "Non-alphanumeric characters will be removed." + )
+ +
[docs] def fix_lowcase(self, df): + """Adds a validation check flagging the rows in every column of `df` + that contain lowercase characters. + + Args: + df (Pandas DataFrame): a Pandas DataFrame with columns that should + have only uppercase characters + + Returns: + Pandas DataFrame: `df` where all characters are uppercase + + Examples: + >>> v = Validation() + >>> test_df = pd.DataFrame({"A":["a","B","c"], "B":["D","e","F"]}) + >>> v.fix_lowcase(test_df) + A B + 0 A D + 1 B E + 2 C F + >>> v.report(verbose=4) + <BLANKLINE> + WARNINGS + [?] 2 lower case value(s) in column A detected in row(s) #0, + and #2. Convention to have this text be uppercase. Lower case text + will be made uppercase. + [?] 1 lower case value(s) in column B detected in row(s) #1. + Convention to have this text be uppercase. Lower case text will be + made uppercase. + + """ + return self._check_df(df.astype(str), + lambda x: x.upper(), + flag_criteria="lower case value(s) in ", + flag_action="Convention to have this text be " + "uppercase. Lower case text will be made " + "uppercase.")
+ +
[docs] def fix_upcase(self, df): + """Adds a validation check flagging the rows in every column of `df` + that contain uppercase characters + + Args: + df (Pandas DataFrame): a Pandas DataFrame with columns that should + have only lowercase characters + + Returns: + Pandas DataFrame: `df` where all characters are lowercase + + Examples: + >>> v = Validation() + >>> test_df = pd.DataFrame({"A":["a","B","c"], "B":["D","e","F"]}) + >>> v.fix_upcase(test_df) + A B + 0 a d + 1 b e + 2 c f + >>> v.report(verbose=4) + <BLANKLINE> + WARNINGS + [?] 1 upper case value(s) in column A detected in row(s) #1. + Convention is to have this text be lowercase. Upper case text will + be made lowercase. + [?] 2 upper case value(s) in column B detected in row(s) #0, + and #2. Convention is to have this text be lowercase. Upper case + text will be made lowercase. + """ + return self._check_df(df.astype(str), + lambda x: x.lower(), + flag_criteria="upper case value(s) in", + flag_action="Convention is to have this text be " + "lowercase. Upper case text will be made" + " lowercase.")
+ +
[docs] def is_valid(self): + """Checks to see if instance is valid. + + Args: + None + + Returns: + bool: True if is valid (has no errors in vchecks) and False if + instance has errors or where vchecks is empty. + + Examples: + >>> Validation().is_valid() + False + >>> v = Validation() + >>> v.must_contain(pd.Series(["A", "B"]), pd.Series(["B"])) + >>> v.is_valid() + True + >>> v.must_contain(pd.Series(["A", "B"]), pd.Series(["C"])) + >>> v.is_valid() + False + """ + if self.vchecks.empty: + return False + return (self.vchecks["Tier"] == "Error").sum() == 0
+ +
[docs] def report(self, verbose=2): + """Prints the checks in the vchecks attribute + + Args: + verbose (int): Parameter controlling how much to print by filtering + for the level in each vchecks row to be less than or equal to + verbose. Defaults to 2 (print only converted `Warn` and `Err` + checks) + + Returns: + None + + Examples: + >>> v = Validation() + >>> v._add_condition(pd.Series([False, False, False]),\ + Passing("Passed test"), Err("Failed test")) + >>> v._add_condition(pd.Series([False, False, False]),\ + Passing("Passed test 2"), Err("Failed test")) + >>> v._add_condition(pd.Series([False, False, True]),\ + Passing("Passed test"), Err("Error test")) + >>> v._add_condition(pd.Series([False, False, True]),\ + Passing("Passed test"), Warn("Warn test")) + >>> v._add_condition(pd.Series([False, False, True]),\ + Passing(""), Suggest("Suggest test")) + >>> v.report(verbose=1) + <BLANKLINE> + ERRORS + [!] Error test + >>> v.report(verbose=4) + <BLANKLINE> + CHECKS PASSED + [X] Passed test + [X] Passed test 2 + <BLANKLINE> + ERRORS + [!] Error test + <BLANKLINE> + SUGGESTIONS + [i] Suggest test + <BLANKLINE> + WARNINGS + [?] Warn test + """ + if self.vchecks.empty: + print("No validation checks made.") + return + within_verbose = self.vchecks[self.vchecks["Level"] <= verbose] + final_reports = within_verbose.groupby("Title") + for title in final_reports.groups: + print("\n", title) + for i, single_report in final_reports.get_group(title).iterrows(): + print(single_report["Bullet"], "\t", + single_report["Message"])
+ + +
[docs]def report_row(flag_where): + """A helper method to return an english explanation of what rows have been + flagged with a failed validation check. + + Args: + flag_where (Pandas Series): boolean Pandas Series representing failed + validation checks. + + Returns: + str: a string reporting the index of the flagged rows + + Examples: + >>> report_row(pd.Series([True, True, False, True, False])) + '#0, #1, and #3' + + """ + flagged = flag_where[flag_where.fillna(False)] + if flagged.index.is_numeric(): + unformatted_report = report_list(flagged.index.tolist(), paren=False) + return re.sub(r"'(\d+)'", r"#\1", unformatted_report) + return report_list(flagged.index.tolist(), paren=False)
+ + +if __name__ == "__main__": + import doctest + doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) +
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_sources/about.rst.txt b/docs/_build/html/_sources/about.rst.txt new file mode 100644 index 0000000..482827f --- /dev/null +++ b/docs/_build/html/_sources/about.rst.txt @@ -0,0 +1 @@ +.. include:: ../readme.rst diff --git a/docs/_build/html/_sources/config.rst.txt b/docs/_build/html/_sources/config.rst.txt new file mode 100644 index 0000000..edb8d70 --- /dev/null +++ b/docs/_build/html/_sources/config.rst.txt @@ -0,0 +1 @@ +.. include:: ../resources/mapping_configuration_files/readme.rst diff --git a/docs/_build/html/_sources/index.rst.txt b/docs/_build/html/_sources/index.rst.txt new file mode 100644 index 0000000..1cc7d09 --- /dev/null +++ b/docs/_build/html/_sources/index.rst.txt @@ -0,0 +1,33 @@ + +CrossVA documentation +=================================== + +.. toctree:: + :maxdepth: 2 + :caption: Getting Started + + about + +.. toctree:: + :maxdepth: 2 + :caption: Custom Mappings + + Configuration Files + +.. toctree:: + :maxdepth: 2 + :caption: Under the hood + + transform function + source/configuration + source/validation + source/mappings + +.. include:: ../Readme.rst + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/_build/html/_sources/source/configuration.rst.txt b/docs/_build/html/_sources/source/configuration.rst.txt new file mode 100644 index 0000000..f547b88 --- /dev/null +++ b/docs/_build/html/_sources/source/configuration.rst.txt @@ -0,0 +1,7 @@ +configuration module +==================== + +.. automodule:: configuration + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/source/mappings.rst.txt b/docs/_build/html/_sources/source/mappings.rst.txt new file mode 100644 index 0000000..1c22079 --- /dev/null +++ b/docs/_build/html/_sources/source/mappings.rst.txt @@ -0,0 +1,7 @@ +mappings module +=============== + +.. automodule:: mappings + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/source/modules.rst.txt b/docs/_build/html/_sources/source/modules.rst.txt new file mode 100644 index 0000000..1760eb1 --- /dev/null +++ b/docs/_build/html/_sources/source/modules.rst.txt @@ -0,0 +1,12 @@ +CrossVA +======= + +.. toctree:: + :maxdepth: 4 + + configuration + mappings + tests + transform + utils + validation diff --git a/docs/_build/html/_sources/source/tests.rst.txt b/docs/_build/html/_sources/source/tests.rst.txt new file mode 100644 index 0000000..03031c4 --- /dev/null +++ b/docs/_build/html/_sources/source/tests.rst.txt @@ -0,0 +1,7 @@ +tests module +============ + +.. automodule:: tests + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/source/transform.rst.txt b/docs/_build/html/_sources/source/transform.rst.txt new file mode 100644 index 0000000..8330c5e --- /dev/null +++ b/docs/_build/html/_sources/source/transform.rst.txt @@ -0,0 +1,7 @@ +transform module +================ + +.. automodule:: transform + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/source/utils.rst.txt b/docs/_build/html/_sources/source/utils.rst.txt new file mode 100644 index 0000000..87aedaf --- /dev/null +++ b/docs/_build/html/_sources/source/utils.rst.txt @@ -0,0 +1,7 @@ +utils module +============ + +.. automodule:: utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/source/validation.rst.txt b/docs/_build/html/_sources/source/validation.rst.txt new file mode 100644 index 0000000..a0c6476 --- /dev/null +++ b/docs/_build/html/_sources/source/validation.rst.txt @@ -0,0 +1,7 @@ +validation module +================= + +.. automodule:: validation + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_static/ajax-loader.gif b/docs/_build/html/_static/ajax-loader.gif new file mode 100644 index 0000000..61faf8c Binary files /dev/null and b/docs/_build/html/_static/ajax-loader.gif differ diff --git a/docs/_build/html/_static/basic.css b/docs/_build/html/_static/basic.css new file mode 100644 index 0000000..6df76b0 --- /dev/null +++ b/docs/_build/html/_static/basic.css @@ -0,0 +1,639 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox input[type="text"] { + width: 170px; +} + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +dl { + margin-bottom: 15px; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, .highlighted { + background-color: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: relative; + left: 0px; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/_build/html/_static/comment-bright.png b/docs/_build/html/_static/comment-bright.png new file mode 100644 index 0000000..15e27ed Binary files /dev/null and b/docs/_build/html/_static/comment-bright.png differ diff --git a/docs/_build/html/_static/comment-close.png b/docs/_build/html/_static/comment-close.png new file mode 100644 index 0000000..4d91bcf Binary files /dev/null and b/docs/_build/html/_static/comment-close.png differ diff --git a/docs/_build/html/_static/comment.png b/docs/_build/html/_static/comment.png new file mode 100644 index 0000000..dfbc0cb Binary files /dev/null and b/docs/_build/html/_static/comment.png differ diff --git a/docs/_build/html/_static/css/badge_only.css b/docs/_build/html/_static/css/badge_only.css new file mode 100644 index 0000000..323730a --- /dev/null +++ b/docs/_build/html/_static/css/badge_only.css @@ -0,0 +1 @@ +.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../fonts/fontawesome-webfont.eot");src:url("../fonts/fontawesome-webfont.eot?#iefix") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff") format("woff"),url("../fonts/fontawesome-webfont.ttf") format("truetype"),url("../fonts/fontawesome-webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} diff --git a/docs/_build/html/_static/css/theme.css b/docs/_build/html/_static/css/theme.css new file mode 100644 index 0000000..b19dbfe --- /dev/null +++ b/docs/_build/html/_static/css/theme.css @@ -0,0 +1,6 @@ +/* sphinx_rtd_theme version 0.4.2 | MIT license */ +/* Built 20181005 13:10 */ +*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none}[hidden]{display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:hover,a:active{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;color:#000;text-decoration:none}mark{background:#ff0;color:#000;font-style:italic;font-weight:bold}pre,code,.rst-content tt,.rst-content code,kbd,samp{font-family:monospace,serif;_font-family:"courier new",monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:before,q:after{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}ul,ol,dl{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure{margin:0}form{margin:0}fieldset{border:0;margin:0;padding:0}label{cursor:pointer}legend{border:0;*margin-left:-7px;padding:0;white-space:normal}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0;*width:13px;*height:13px}input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type="search"]::-webkit-search-decoration,input[type="search"]::-webkit-search-cancel-button{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}textarea{overflow:auto;vertical-align:top;resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none !important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{html,body,section{background:none !important}*{box-shadow:none !important;text-shadow:none !important;filter:none !important;-ms-filter:none !important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:.5cm}p,h2,.rst-content .toctree-wrapper p.caption,h3{orphans:3;widows:3}h2,.rst-content .toctree-wrapper p.caption,h3{page-break-after:avoid}}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo,.rst-content .admonition,.btn,input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"],select,textarea,.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a,.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a,.wy-nav-top a{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:'FontAwesome';src:url("../fonts/fontawesome-webfont.eot?v=4.7.0");src:url("../fonts/fontawesome-webfont.eot?#iefix&v=4.7.0") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff2?v=4.7.0") format("woff2"),url("../fonts/fontawesome-webfont.woff?v=4.7.0") format("woff"),url("../fonts/fontawesome-webfont.ttf?v=4.7.0") format("truetype"),url("../fonts/fontawesome-webfont.svg?v=4.7.0#fontawesomeregular") format("svg");font-weight:normal;font-style:normal}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.3333333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.2857142857em;text-align:center}.fa-ul{padding-left:0;margin-left:2.1428571429em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.1428571429em;width:2.1428571429em;top:.1428571429em;text-align:center}.fa-li.fa-lg{left:-1.8571428571em}.fa-border{padding:.2em .25em .15em;border:solid 0.08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.wy-menu-vertical li span.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand,.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand,.rst-content .fa-pull-left.admonition-title,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content dl dt .fa-pull-left.headerlink,.rst-content p.caption .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.rst-content code.download span.fa-pull-left:first-child,.fa-pull-left.icon{margin-right:.3em}.fa.fa-pull-right,.wy-menu-vertical li span.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand,.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand,.rst-content .fa-pull-right.admonition-title,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content dl dt .fa-pull-right.headerlink,.rst-content p.caption .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.rst-content code.download span.fa-pull-right:first-child,.fa-pull-right.icon{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.wy-menu-vertical li span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.rst-content .pull-left.admonition-title,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content dl dt .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.rst-content code.download span.pull-left:first-child,.pull-left.icon{margin-right:.3em}.fa.pull-right,.wy-menu-vertical li span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.rst-content .pull-right.admonition-title,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content dl dt .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.rst-content code.download span.pull-right:first-child,.pull-right.icon{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-remove:before,.fa-close:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-gear:before,.fa-cog:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-rotate-right:before,.fa-repeat:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.rst-content .admonition-title:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-warning:before,.fa-exclamation-triangle:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-gears:before,.fa-cogs:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-save:before,.fa-floppy-o:before{content:""}.fa-square:before{content:""}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.wy-dropdown .caret:before,.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-unsorted:before,.fa-sort:before{content:""}.fa-sort-down:before,.fa-sort-desc:before{content:""}.fa-sort-up:before,.fa-sort-asc:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-legal:before,.fa-gavel:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-flash:before,.fa-bolt:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-paste:before,.fa-clipboard:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-unlink:before,.fa-chain-broken:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:""}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:""}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:""}.fa-euro:before,.fa-eur:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-rupee:before,.fa-inr:before{content:""}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:""}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:""}.fa-won:before,.fa-krw:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-turkish-lira:before,.fa-try:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-institution:before,.fa-bank:before,.fa-university:before{content:""}.fa-mortar-board:before,.fa-graduation-cap:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:""}.fa-file-zip-o:before,.fa-file-archive-o:before{content:""}.fa-file-sound-o:before,.fa-file-audio-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-resistance:before,.fa-rebel:before{content:""}.fa-ge:before,.fa-empire:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-send:before,.fa-paper-plane:before{content:""}.fa-send-o:before,.fa-paper-plane-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-hotel:before,.fa-bed:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-yc:before,.fa-y-combinator:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery:before,.fa-battery-full:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-tv:before,.fa-television:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-asl-interpreting:before,.fa-american-sign-language-interpreting:before{content:""}.fa-deafness:before,.fa-hard-of-hearing:before,.fa-deaf:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-signing:before,.fa-sign-language:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-vcard:before,.fa-address-card:before{content:""}.fa-vcard-o:before,.fa-address-card-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer:before,.fa-thermometer-full:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bathtub:before,.fa-s15:before,.fa-bath:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0, 0, 0, 0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context{font-family:inherit}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before{font-family:"FontAwesome";display:inline-block;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa,a .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,a .rst-content .admonition-title,.rst-content a .admonition-title,a .rst-content h1 .headerlink,.rst-content h1 a .headerlink,a .rst-content h2 .headerlink,.rst-content h2 a .headerlink,a .rst-content h3 .headerlink,.rst-content h3 a .headerlink,a .rst-content h4 .headerlink,.rst-content h4 a .headerlink,a .rst-content h5 .headerlink,.rst-content h5 a .headerlink,a .rst-content h6 .headerlink,.rst-content h6 a .headerlink,a .rst-content dl dt .headerlink,.rst-content dl dt a .headerlink,a .rst-content p.caption .headerlink,.rst-content p.caption a .headerlink,a .rst-content table>caption .headerlink,.rst-content table>caption a .headerlink,a .rst-content tt.download span:first-child,.rst-content tt.download a span:first-child,a .rst-content code.download span:first-child,.rst-content code.download a span:first-child,a .icon{display:inline-block;text-decoration:inherit}.btn .fa,.btn .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .btn span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.btn .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.btn .rst-content .admonition-title,.rst-content .btn .admonition-title,.btn .rst-content h1 .headerlink,.rst-content h1 .btn .headerlink,.btn .rst-content h2 .headerlink,.rst-content h2 .btn .headerlink,.btn .rst-content h3 .headerlink,.rst-content h3 .btn .headerlink,.btn .rst-content h4 .headerlink,.rst-content h4 .btn .headerlink,.btn .rst-content h5 .headerlink,.rst-content h5 .btn .headerlink,.btn .rst-content h6 .headerlink,.rst-content h6 .btn .headerlink,.btn .rst-content dl dt .headerlink,.rst-content dl dt .btn .headerlink,.btn .rst-content p.caption .headerlink,.rst-content p.caption .btn .headerlink,.btn .rst-content table>caption .headerlink,.rst-content table>caption .btn .headerlink,.btn .rst-content tt.download span:first-child,.rst-content tt.download .btn span:first-child,.btn .rst-content code.download span:first-child,.rst-content code.download .btn span:first-child,.btn .icon,.nav .fa,.nav .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand,.nav .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.nav .rst-content .admonition-title,.rst-content .nav .admonition-title,.nav .rst-content h1 .headerlink,.rst-content h1 .nav .headerlink,.nav .rst-content h2 .headerlink,.rst-content h2 .nav .headerlink,.nav .rst-content h3 .headerlink,.rst-content h3 .nav .headerlink,.nav .rst-content h4 .headerlink,.rst-content h4 .nav .headerlink,.nav .rst-content h5 .headerlink,.rst-content h5 .nav .headerlink,.nav .rst-content h6 .headerlink,.rst-content h6 .nav .headerlink,.nav .rst-content dl dt .headerlink,.rst-content dl dt .nav .headerlink,.nav .rst-content p.caption .headerlink,.rst-content p.caption .nav .headerlink,.nav .rst-content table>caption .headerlink,.rst-content table>caption .nav .headerlink,.nav .rst-content tt.download span:first-child,.rst-content tt.download .nav span:first-child,.nav .rst-content code.download span:first-child,.rst-content code.download .nav span:first-child,.nav .icon{display:inline}.btn .fa.fa-large,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.btn .rst-content .fa-large.admonition-title,.rst-content .btn .fa-large.admonition-title,.btn .rst-content h1 .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.btn .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .btn .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .btn span.fa-large:first-child,.btn .rst-content code.download span.fa-large:first-child,.rst-content code.download .btn span.fa-large:first-child,.btn .fa-large.icon,.nav .fa.fa-large,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand,.nav .rst-content .fa-large.admonition-title,.rst-content .nav .fa-large.admonition-title,.nav .rst-content h1 .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.nav .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.nav .rst-content code.download span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.nav .fa-large.icon{line-height:.9em}.btn .fa.fa-spin,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.btn .rst-content .fa-spin.admonition-title,.rst-content .btn .fa-spin.admonition-title,.btn .rst-content h1 .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.btn .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .btn .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .btn span.fa-spin:first-child,.btn .rst-content code.download span.fa-spin:first-child,.rst-content code.download .btn span.fa-spin:first-child,.btn .fa-spin.icon,.nav .fa.fa-spin,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand,.nav .rst-content .fa-spin.admonition-title,.rst-content .nav .fa-spin.admonition-title,.nav .rst-content h1 .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.nav .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.nav .rst-content code.download span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.nav .fa-spin.icon{display:inline-block}.btn.fa:before,.wy-menu-vertical li span.btn.toctree-expand:before,.rst-content .btn.admonition-title:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content dl dt .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.rst-content code.download span.btn:first-child:before,.btn.icon:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.rst-content code.download span.btn:first-child:hover:before,.btn.icon:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before,.btn-mini .rst-content .admonition-title:before,.rst-content .btn-mini .admonition-title:before,.btn-mini .rst-content h1 .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.btn-mini .rst-content dl dt .headerlink:before,.rst-content dl dt .btn-mini .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.rst-content tt.download .btn-mini span:first-child:before,.btn-mini .rst-content code.download span:first-child:before,.rst-content code.download .btn-mini span:first-child:before,.btn-mini .icon:before{font-size:14px;vertical-align:-15%}.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo,.rst-content .admonition{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.wy-alert-title,.rst-content .admonition-title{color:#fff;font-weight:bold;display:block;color:#fff;background:#6ab0de;margin:-12px;padding:6px 12px;margin-bottom:12px}.wy-alert.wy-alert-danger,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.admonition{background:#fdf3f2}.wy-alert.wy-alert-danger .wy-alert-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .danger .wy-alert-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .danger .admonition-title,.rst-content .error .admonition-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition .admonition-title{background:#f29f97}.wy-alert.wy-alert-warning,.rst-content .wy-alert-warning.note,.rst-content .attention,.rst-content .caution,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.tip,.rst-content .warning,.rst-content .wy-alert-warning.seealso,.rst-content .admonition-todo,.rst-content .wy-alert-warning.admonition{background:#ffedcc}.wy-alert.wy-alert-warning .wy-alert-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .attention .wy-alert-title,.rst-content .caution .wy-alert-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .attention .admonition-title,.rst-content .caution .admonition-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .warning .admonition-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .admonition-todo .admonition-title,.rst-content .wy-alert-warning.admonition .admonition-title{background:#f0b37e}.wy-alert.wy-alert-info,.rst-content .note,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.rst-content .seealso,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.admonition{background:#e7f2fa}.wy-alert.wy-alert-info .wy-alert-title,.rst-content .note .wy-alert-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.rst-content .note .admonition-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .seealso .admonition-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition .admonition-title{background:#6ab0de}.wy-alert.wy-alert-success,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.warning,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.admonition{background:#dbfaf4}.wy-alert.wy-alert-success .wy-alert-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .hint .wy-alert-title,.rst-content .important .wy-alert-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .hint .admonition-title,.rst-content .important .admonition-title,.rst-content .tip .admonition-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition .admonition-title{background:#1abc9c}.wy-alert.wy-alert-neutral,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.admonition{background:#f3f6f6}.wy-alert.wy-alert-neutral .wy-alert-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition .admonition-title{color:#404040;background:#e1e4e5}.wy-alert.wy-alert-neutral a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a{color:#2980B9}.wy-alert p:last-child,.rst-content .note p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.rst-content .seealso p:last-child,.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0px;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,0.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27AE60}.wy-tray-container li.wy-tray-item-info{background:#2980B9}.wy-tray-container li.wy-tray-item-warning{background:#E67E22}.wy-tray-container li.wy-tray-item-danger{background:#E74C3C}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width: 768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px 12px;color:#fff;border:1px solid rgba(0,0,0,0.1);background-color:#27AE60;text-decoration:none;font-weight:normal;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:0px 1px 2px -1px rgba(255,255,255,0.5) inset,0px -2px 0px 0px rgba(0,0,0,0.1) inset;outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:0px -1px 0px 0px rgba(0,0,0,0.05) inset,0px 2px 0px 0px rgba(0,0,0,0.1) inset;padding:8px 12px 6px 12px}.btn:visited{color:#fff}.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn-disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn-disabled:hover,.btn-disabled:focus,.btn-disabled:active{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980B9 !important}.btn-info:hover{background-color:#2e8ece !important}.btn-neutral{background-color:#f3f6f6 !important;color:#404040 !important}.btn-neutral:hover{background-color:#e5ebeb !important;color:#404040}.btn-neutral:visited{color:#404040 !important}.btn-success{background-color:#27AE60 !important}.btn-success:hover{background-color:#295 !important}.btn-danger{background-color:#E74C3C !important}.btn-danger:hover{background-color:#ea6153 !important}.btn-warning{background-color:#E67E22 !important}.btn-warning:hover{background-color:#e98b39 !important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f !important}.btn-link{background-color:transparent !important;color:#2980B9;box-shadow:none;border-color:transparent !important}.btn-link:hover{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:active{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:visited{color:#9B59B6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:before,.wy-btn-group:after{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:solid 1px #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,0.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980B9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:solid 1px #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type="search"]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980B9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned input,.wy-form-aligned textarea,.wy-form-aligned select,.wy-form-aligned .wy-help-inline,.wy-form-aligned label{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{border:0;margin:0;padding:0}legend{display:block;width:100%;border:0;padding:0;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label{display:block;margin:0 0 .3125em 0;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;*zoom:1;max-width:68em;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#E74C3C}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full input[type="text"],.wy-control-group .wy-form-full input[type="password"],.wy-control-group .wy-form-full input[type="email"],.wy-control-group .wy-form-full input[type="url"],.wy-control-group .wy-form-full input[type="date"],.wy-control-group .wy-form-full input[type="month"],.wy-control-group .wy-form-full input[type="time"],.wy-control-group .wy-form-full input[type="datetime"],.wy-control-group .wy-form-full input[type="datetime-local"],.wy-control-group .wy-form-full input[type="week"],.wy-control-group .wy-form-full input[type="number"],.wy-control-group .wy-form-full input[type="search"],.wy-control-group .wy-form-full input[type="tel"],.wy-control-group .wy-form-full input[type="color"],.wy-control-group .wy-form-halves input[type="text"],.wy-control-group .wy-form-halves input[type="password"],.wy-control-group .wy-form-halves input[type="email"],.wy-control-group .wy-form-halves input[type="url"],.wy-control-group .wy-form-halves input[type="date"],.wy-control-group .wy-form-halves input[type="month"],.wy-control-group .wy-form-halves input[type="time"],.wy-control-group .wy-form-halves input[type="datetime"],.wy-control-group .wy-form-halves input[type="datetime-local"],.wy-control-group .wy-form-halves input[type="week"],.wy-control-group .wy-form-halves input[type="number"],.wy-control-group .wy-form-halves input[type="search"],.wy-control-group .wy-form-halves input[type="tel"],.wy-control-group .wy-form-halves input[type="color"],.wy-control-group .wy-form-thirds input[type="text"],.wy-control-group .wy-form-thirds input[type="password"],.wy-control-group .wy-form-thirds input[type="email"],.wy-control-group .wy-form-thirds input[type="url"],.wy-control-group .wy-form-thirds input[type="date"],.wy-control-group .wy-form-thirds input[type="month"],.wy-control-group .wy-form-thirds input[type="time"],.wy-control-group .wy-form-thirds input[type="datetime"],.wy-control-group .wy-form-thirds input[type="datetime-local"],.wy-control-group .wy-form-thirds input[type="week"],.wy-control-group .wy-form-thirds input[type="number"],.wy-control-group .wy-form-thirds input[type="search"],.wy-control-group .wy-form-thirds input[type="tel"],.wy-control-group .wy-form-thirds input[type="color"]{width:100%}.wy-control-group .wy-form-full{float:left;display:block;margin-right:2.3576515979%;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.3576515979%;width:48.821174201%}.wy-control-group .wy-form-halves:last-child{margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n+1){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.3576515979%;width:31.7615656014%}.wy-control-group .wy-form-thirds:last-child{margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control{margin:6px 0 0 0;font-size:90%}.wy-control-no-input{display:inline-block;margin:6px 0 0 0;font-size:90%}.wy-control-group.fluid-input input[type="text"],.wy-control-group.fluid-input input[type="password"],.wy-control-group.fluid-input input[type="email"],.wy-control-group.fluid-input input[type="url"],.wy-control-group.fluid-input input[type="date"],.wy-control-group.fluid-input input[type="month"],.wy-control-group.fluid-input input[type="time"],.wy-control-group.fluid-input input[type="datetime"],.wy-control-group.fluid-input input[type="datetime-local"],.wy-control-group.fluid-input input[type="week"],.wy-control-group.fluid-input input[type="number"],.wy-control-group.fluid-input input[type="search"],.wy-control-group.fluid-input input[type="tel"],.wy-control-group.fluid-input input[type="color"]{width:100%}.wy-form-message-inline{display:inline-block;padding-left:.3em;color:#666;vertical-align:middle;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;*overflow:visible}input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type="datetime-local"]{padding:.34375em .625em}input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}input[type="text"]:focus,input[type="password"]:focus,input[type="email"]:focus,input[type="url"]:focus,input[type="date"]:focus,input[type="month"]:focus,input[type="time"]:focus,input[type="datetime"]:focus,input[type="datetime-local"]:focus,input[type="week"]:focus,input[type="number"]:focus,input[type="search"]:focus,input[type="tel"]:focus,input[type="color"]:focus{outline:0;outline:thin dotted \9;border-color:#333}input.no-focus:focus{border-color:#ccc !important}input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:1px auto #129FEA}input[type="text"][disabled],input[type="password"][disabled],input[type="email"][disabled],input[type="url"][disabled],input[type="date"][disabled],input[type="month"][disabled],input[type="time"][disabled],input[type="datetime"][disabled],input[type="datetime-local"][disabled],input[type="week"][disabled],input[type="number"][disabled],input[type="search"][disabled],input[type="tel"][disabled],input[type="color"][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,textarea:focus:invalid,select:focus:invalid{color:#E74C3C;border:1px solid #E74C3C}input:focus:invalid:focus,textarea:focus:invalid:focus,select:focus:invalid:focus{border-color:#E74C3C}input[type="file"]:focus:invalid:focus,input[type="radio"]:focus:invalid:focus,input[type="checkbox"]:focus:invalid:focus{outline-color:#E74C3C}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type="radio"][disabled],input[type="checkbox"][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:solid 1px #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{position:absolute;content:"";display:block;left:0;top:0;width:36px;height:12px;border-radius:4px;background:#ccc;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{position:absolute;content:"";display:block;width:18px;height:18px;border-radius:4px;background:#999;left:-3px;top:-3px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27AE60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#E74C3C}.wy-control-group.wy-control-group-error input[type="text"],.wy-control-group.wy-control-group-error input[type="password"],.wy-control-group.wy-control-group-error input[type="email"],.wy-control-group.wy-control-group-error input[type="url"],.wy-control-group.wy-control-group-error input[type="date"],.wy-control-group.wy-control-group-error input[type="month"],.wy-control-group.wy-control-group-error input[type="time"],.wy-control-group.wy-control-group-error input[type="datetime"],.wy-control-group.wy-control-group-error input[type="datetime-local"],.wy-control-group.wy-control-group-error input[type="week"],.wy-control-group.wy-control-group-error input[type="number"],.wy-control-group.wy-control-group-error input[type="search"],.wy-control-group.wy-control-group-error input[type="tel"],.wy-control-group.wy-control-group-error input[type="color"]{border:solid 1px #E74C3C}.wy-control-group.wy-control-group-error textarea{border:solid 1px #E74C3C}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27AE60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#E74C3C}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#E67E22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980B9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width: 480px){.wy-form button[type="submit"]{margin:.7em 0 0}.wy-form input[type="text"],.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:.3em;display:block}.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0 0}.wy-form .wy-help-inline,.wy-form-message-inline,.wy-form-message{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width: 768px){.tablet-hide{display:none}}@media screen and (max-width: 480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.wy-table,.rst-content table.docutils,.rst-content table.field-list{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.wy-table caption,.rst-content table.docutils caption,.rst-content table.field-list caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td,.wy-table th,.rst-content table.docutils th,.rst-content table.field-list th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.wy-table td:first-child,.rst-content table.docutils td:first-child,.rst-content table.field-list td:first-child,.wy-table th:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list th:first-child{border-left-width:0}.wy-table thead,.rst-content table.docutils thead,.rst-content table.field-list thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.wy-table thead th,.rst-content table.docutils thead th,.rst-content table.field-list thead th{font-weight:bold;border-bottom:solid 2px #e1e4e5}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td{background-color:transparent;vertical-align:middle}.wy-table td p,.rst-content table.docutils td p,.rst-content table.field-list td p{line-height:18px}.wy-table td p:last-child,.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child{margin-bottom:0}.wy-table .wy-table-cell-min,.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min{width:1%;padding-right:0}.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:gray;font-size:90%}.wy-table-tertiary{color:gray;font-size:80%}.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td,.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td{background-color:#f3f6f6}.wy-table-backed{background-color:#f3f6f6}.wy-table-bordered-all,.rst-content table.docutils{border:1px solid #e1e4e5}.wy-table-bordered-all td,.rst-content table.docutils td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.wy-table-bordered-all tbody>tr:last-child td,.rst-content table.docutils tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px 0;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0 !important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980B9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9B59B6}html{height:100%;overflow-x:hidden}body{font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;font-weight:normal;color:#404040;min-height:100%;overflow-x:hidden;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#E67E22 !important}a.wy-text-warning:hover{color:#eb9950 !important}.wy-text-info{color:#2980B9 !important}a.wy-text-info:hover{color:#409ad5 !important}.wy-text-success{color:#27AE60 !important}a.wy-text-success:hover{color:#36d278 !important}.wy-text-danger{color:#E74C3C !important}a.wy-text-danger:hover{color:#ed7669 !important}.wy-text-neutral{color:#404040 !important}a.wy-text-neutral:hover{color:#595959 !important}h1,h2,.rst-content .toctree-wrapper p.caption,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif}p{line-height:24px;margin:0;font-size:16px;margin-bottom:24px}h1{font-size:175%}h2,.rst-content .toctree-wrapper p.caption{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}code,.rst-content tt,.rst-content code{white-space:nowrap;max-width:100%;background:#fff;border:solid 1px #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;color:#E74C3C;overflow-x:auto}code.code-large,.rst-content tt.code-large{font-size:90%}.wy-plain-list-disc,.rst-content .section ul,.rst-content .toctree-wrapper ul,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.wy-plain-list-disc li,.rst-content .section ul li,.rst-content .toctree-wrapper ul li,article ul li{list-style:disc;margin-left:24px}.wy-plain-list-disc li p:last-child,.rst-content .section ul li p:last-child,.rst-content .toctree-wrapper ul li p:last-child,article ul li p:last-child{margin-bottom:0}.wy-plain-list-disc li ul,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li ul,article ul li ul{margin-bottom:0}.wy-plain-list-disc li li,.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,article ul li li{list-style:circle}.wy-plain-list-disc li li li,.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,article ul li li li{list-style:square}.wy-plain-list-disc li ol li,.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,article ul li ol li{list-style:decimal}.wy-plain-list-decimal,.rst-content .section ol,.rst-content ol.arabic,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.wy-plain-list-decimal li,.rst-content .section ol li,.rst-content ol.arabic li,article ol li{list-style:decimal;margin-left:24px}.wy-plain-list-decimal li p:last-child,.rst-content .section ol li p:last-child,.rst-content ol.arabic li p:last-child,article ol li p:last-child{margin-bottom:0}.wy-plain-list-decimal li ul,.rst-content .section ol li ul,.rst-content ol.arabic li ul,article ol li ul{margin-bottom:0}.wy-plain-list-decimal li ul li,.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:before,.wy-breadcrumbs:after{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.wy-breadcrumbs li code,.wy-breadcrumbs li .rst-content tt,.rst-content .wy-breadcrumbs li tt{padding:5px;border:none;background:none}.wy-breadcrumbs li code.literal,.wy-breadcrumbs li .rst-content tt.literal,.rst-content .wy-breadcrumbs li tt.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width: 480px){.wy-breadcrumbs-extra{display:none}.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:before,.wy-menu-horiz:after{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz ul,.wy-menu-horiz li{display:inline-block}.wy-menu-horiz li:hover{background:rgba(255,255,255,0.1)}.wy-menu-horiz li.divide-left{border-left:solid 1px #404040}.wy-menu-horiz li.divide-right{border-right:solid 1px #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{height:32px;display:inline-block;line-height:32px;padding:0 1.618em;margin-bottom:0;display:block;font-weight:bold;text-transform:uppercase;font-size:80%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:solid 1px #404040}.wy-menu-vertical li.divide-bottom{border-bottom:solid 1px #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:gray;border-right:solid 1px #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.wy-menu-vertical li code,.wy-menu-vertical li .rst-content tt,.rst-content .wy-menu-vertical li tt{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a{color:#404040;padding:.4045em 1.618em;font-weight:bold;position:relative;background:#fcfcfc;border:none;padding-left:1.618em -4px}.wy-menu-vertical li.on a:hover,.wy-menu-vertical li.current>a:hover{background:#fcfcfc}.wy-menu-vertical li.on a:hover span.toctree-expand,.wy-menu-vertical li.current>a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand{display:block;font-size:.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:solid 1px #c9c9c9;border-top:solid 1px #c9c9c9}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a{color:#404040}.wy-menu-vertical li.toctree-l1.current li.toctree-l2>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>ul{display:none}.wy-menu-vertical li.toctree-l1.current li.toctree-l2.current>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3.current>ul{display:block}.wy-menu-vertical li.toctree-l2.current>a{background:#c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{display:block;background:#c9c9c9;padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3{font-size:.9em}.wy-menu-vertical li.toctree-l3.current>a{background:#bdbdbd;padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{display:block;background:#bdbdbd;padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:normal}.wy-menu-vertical a{display:inline-block;line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980B9;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980B9;text-align:center;padding:.809em;display:block;color:#fcfcfc;margin-bottom:.809em}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em auto;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a{color:#fcfcfc;font-size:100%;font-weight:bold;display:inline-block;padding:4px 6px;margin-bottom:.809em}.wy-side-nav-search>a:hover,.wy-side-nav-search .wy-dropdown>a:hover{background:rgba(255,255,255,0.1)}.wy-side-nav-search>a img.logo,.wy-side-nav-search .wy-dropdown>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search>a.icon img.logo,.wy-side-nav-search .wy-dropdown>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:normal;color:rgba(255,255,255,0.3)}.wy-nav .wy-menu-vertical header{color:#2980B9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980B9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980B9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:before,.wy-nav-top:after{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:bold}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,0.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:gray}footer p{margin-bottom:12px}footer span.commit code,footer span.commit .rst-content tt,.rst-content footer span.commit tt{padding:0px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;font-size:1em;background:none;border:none;color:gray}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:before,.rst-footer-buttons:after{width:100%}.rst-footer-buttons:before,.rst-footer-buttons:after{display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:before,.rst-breadcrumbs-buttons:after{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:solid 1px #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:solid 1px #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:gray;font-size:90%}@media screen and (max-width: 768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-side-scroll{width:auto}.wy-side-nav-search{width:auto}.wy-menu.wy-menu-vertical{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width: 1100px){.wy-nav-content-wrap{background:rgba(0,0,0,0.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,footer,.wy-nav-side{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .icon{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content img{max-width:100%;height:auto}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure p:last-child.caption{margin-bottom:0px}.rst-content div.figure.align-center{text-align:center}.rst-content .section>img,.rst-content .section>a>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px 12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;display:block;overflow:auto}.rst-content pre.literal-block,.rst-content div[class^='highlight']{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px 0}.rst-content pre.literal-block div[class^='highlight'],.rst-content div[class^='highlight'] div[class^='highlight']{padding:0px;border:none;margin:0}.rst-content div[class^='highlight'] td.code{width:100%}.rst-content .linenodiv pre{border-right:solid 1px #e6e9ea;margin:0;padding:12px 12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^='highlight'] pre{white-space:pre;margin:0;padding:12px 12px;display:block;overflow:auto}.rst-content div[class^='highlight'] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content pre.literal-block,.rst-content div[class^='highlight'] pre,.rst-content .linenodiv pre{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;font-size:12px;line-height:1.4}@media print{.rst-content .codeblock,.rst-content div[class^='highlight'],.rst-content div[class^='highlight'] pre{white-space:pre-wrap}}.rst-content .note .last,.rst-content .attention .last,.rst-content .caution .last,.rst-content .danger .last,.rst-content .error .last,.rst-content .hint .last,.rst-content .important .last,.rst-content .tip .last,.rst-content .warning .last,.rst-content .seealso .last,.rst-content .admonition-todo .last,.rst-content .admonition .last{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,0.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent !important;border-color:rgba(0,0,0,0.1) !important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha li{list-style:upper-alpha}.rst-content .section ol p,.rst-content .section ul p{margin-bottom:12px}.rst-content .section ol p:last-child,.rst-content .section ul p:last-child{margin-bottom:24px}.rst-content .line-block{margin-left:0px;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0px}.rst-content .topic-title{font-weight:bold;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0px 0px 24px 24px}.rst-content .align-left{float:left;margin:0px 24px 24px 0px}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content .toctree-wrapper p.caption .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink{visibility:hidden;font-size:14px}.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content .toctree-wrapper p.caption .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content p.caption .headerlink:after,.rst-content table>caption .headerlink:after{content:"";font-family:FontAwesome}.rst-content h1:hover .headerlink:after,.rst-content h2:hover .headerlink:after,.rst-content .toctree-wrapper p.caption:hover .headerlink:after,.rst-content h3:hover .headerlink:after,.rst-content h4:hover .headerlink:after,.rst-content h5:hover .headerlink:after,.rst-content h6:hover .headerlink:after,.rst-content dl dt:hover .headerlink:after,.rst-content p.caption:hover .headerlink:after,.rst-content table>caption:hover .headerlink:after{visibility:visible}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:solid 1px #e1e4e5}.rst-content .sidebar p,.rst-content .sidebar ul,.rst-content .sidebar dl{font-size:90%}.rst-content .sidebar .last{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif;font-weight:bold;background:#e1e4e5;padding:6px 12px;margin:-24px;margin-bottom:24px;font-size:100%}.rst-content .highlighted{background:#F1C40F;display:inline-block;font-weight:bold;padding:0 6px}.rst-content .footnote-reference,.rst-content .citation-reference{vertical-align:baseline;position:relative;top:-0.4em;line-height:0;font-size:90%}.rst-content table.docutils.citation,.rst-content table.docutils.footnote{background:none;border:none;color:gray}.rst-content table.docutils.citation td,.rst-content table.docutils.citation tr,.rst-content table.docutils.footnote td,.rst-content table.docutils.footnote tr{border:none;background-color:transparent !important;white-space:normal}.rst-content table.docutils.citation td.label,.rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}.rst-content table.docutils.citation tt,.rst-content table.docutils.citation code,.rst-content table.docutils.footnote tt,.rst-content table.docutils.footnote code{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}.rst-content table.docutils td .last,.rst-content table.docutils td .last :last-child{margin-bottom:0}.rst-content table.field-list{border:none}.rst-content table.field-list td{border:none}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content tt,.rst-content tt,.rst-content code{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;padding:2px 5px}.rst-content tt big,.rst-content tt em,.rst-content tt big,.rst-content code big,.rst-content tt em,.rst-content code em{font-size:100% !important;line-height:normal}.rst-content tt.literal,.rst-content tt.literal,.rst-content code.literal{color:#E74C3C}.rst-content tt.xref,a .rst-content tt,.rst-content tt.xref,.rst-content code.xref,a .rst-content tt,a .rst-content code{font-weight:bold;color:#404040}.rst-content pre,.rst-content kbd,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace}.rst-content a tt,.rst-content a tt,.rst-content a code{color:#2980B9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:bold;margin-bottom:12px}.rst-content dl p,.rst-content dl table,.rst-content dl ul,.rst-content dl ol{margin-bottom:12px !important}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl:not(.docutils){margin-bottom:24px}.rst-content dl:not(.docutils) dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980B9;border-top:solid 3px #6ab0de;padding:6px;position:relative}.rst-content dl:not(.docutils) dt:before{color:#6ab0de}.rst-content dl:not(.docutils) dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dl dt{margin-bottom:6px;border:none;border-left:solid 3px #ccc;background:#f0f0f0;color:#555}.rst-content dl:not(.docutils) dl dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dt:first-child{margin-top:0}.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) code{font-weight:bold}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) code.descclassname{background-color:transparent;border:none;padding:0;font-size:100% !important}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname{font-weight:bold}.rst-content dl:not(.docutils) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:bold}.rst-content dl:not(.docutils) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-link,.rst-content .viewcode-back{display:inline-block;color:#27AE60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:bold}.rst-content tt.download,.rst-content code.download{background:inherit;padding:inherit;font-weight:normal;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content tt.download span:first-child,.rst-content code.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #7fbbe3;background:#e7f2fa;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width: 480px){.rst-content .sidebar{width:100%}}span[id*='MathJax-Span']{color:#404040}.math{text-align:center}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-regular.eot");src:url("../fonts/Lato/lato-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-regular.woff2") format("woff2"),url("../fonts/Lato/lato-regular.woff") format("woff"),url("../fonts/Lato/lato-regular.ttf") format("truetype");font-weight:400;font-style:normal}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-bold.eot");src:url("../fonts/Lato/lato-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-bold.woff2") format("woff2"),url("../fonts/Lato/lato-bold.woff") format("woff"),url("../fonts/Lato/lato-bold.ttf") format("truetype");font-weight:700;font-style:normal}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-bolditalic.eot");src:url("../fonts/Lato/lato-bolditalic.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-bolditalic.woff2") format("woff2"),url("../fonts/Lato/lato-bolditalic.woff") format("woff"),url("../fonts/Lato/lato-bolditalic.ttf") format("truetype");font-weight:700;font-style:italic}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-italic.eot");src:url("../fonts/Lato/lato-italic.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-italic.woff2") format("woff2"),url("../fonts/Lato/lato-italic.woff") format("woff"),url("../fonts/Lato/lato-italic.ttf") format("truetype");font-weight:400;font-style:italic}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:400;src:url("../fonts/RobotoSlab/roboto-slab.eot");src:url("../fonts/RobotoSlab/roboto-slab-v7-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.woff2") format("woff2"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.woff") format("woff"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.ttf") format("truetype")}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:700;src:url("../fonts/RobotoSlab/roboto-slab-v7-bold.eot");src:url("../fonts/RobotoSlab/roboto-slab-v7-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.woff2") format("woff2"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.woff") format("woff"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.ttf") format("truetype")} diff --git a/docs/_build/html/_static/doctools.js b/docs/_build/html/_static/doctools.js new file mode 100644 index 0000000..5654977 --- /dev/null +++ b/docs/_build/html/_static/doctools.js @@ -0,0 +1,287 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s == 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node) { + if (node.nodeType == 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { + var span = document.createElement("span"); + span.className = className; + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this); + }); + } + } + return this.each(function() { + highlight(this); + }); +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated == 'undefined') + return string; + return (typeof translated == 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated == 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) == 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this == '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); \ No newline at end of file diff --git a/docs/_build/html/_static/down-pressed.png b/docs/_build/html/_static/down-pressed.png new file mode 100644 index 0000000..5756c8c Binary files /dev/null and b/docs/_build/html/_static/down-pressed.png differ diff --git a/docs/_build/html/_static/down.png b/docs/_build/html/_static/down.png new file mode 100644 index 0000000..1b3bdad Binary files /dev/null and b/docs/_build/html/_static/down.png differ diff --git a/docs/_build/html/_static/file.png b/docs/_build/html/_static/file.png new file mode 100644 index 0000000..a858a41 Binary files /dev/null and b/docs/_build/html/_static/file.png differ diff --git a/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf b/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf new file mode 100644 index 0000000..809c1f5 Binary files /dev/null and b/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf differ diff --git a/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf b/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf new file mode 100644 index 0000000..fc981ce Binary files /dev/null and b/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf differ diff --git a/docs/_build/html/_static/fonts/Inconsolata.ttf b/docs/_build/html/_static/fonts/Inconsolata.ttf new file mode 100644 index 0000000..4b8a36d Binary files /dev/null and b/docs/_build/html/_static/fonts/Inconsolata.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato-Bold.ttf b/docs/_build/html/_static/fonts/Lato-Bold.ttf new file mode 100644 index 0000000..1d23c70 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato-Bold.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato-Regular.ttf b/docs/_build/html/_static/fonts/Lato-Regular.ttf new file mode 100644 index 0000000..0f3d0f8 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato-Regular.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.eot b/docs/_build/html/_static/fonts/Lato/lato-bold.eot new file mode 100644 index 0000000..3361183 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bold.eot differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.ttf b/docs/_build/html/_static/fonts/Lato/lato-bold.ttf new file mode 100644 index 0000000..29f691d Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bold.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.woff b/docs/_build/html/_static/fonts/Lato/lato-bold.woff new file mode 100644 index 0000000..c6dff51 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bold.woff differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 b/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 new file mode 100644 index 0000000..bb19504 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot new file mode 100644 index 0000000..3d41549 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf new file mode 100644 index 0000000..f402040 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff new file mode 100644 index 0000000..88ad05b Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 new file mode 100644 index 0000000..c4e3d80 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.eot b/docs/_build/html/_static/fonts/Lato/lato-italic.eot new file mode 100644 index 0000000..3f82642 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-italic.eot differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.ttf b/docs/_build/html/_static/fonts/Lato/lato-italic.ttf new file mode 100644 index 0000000..b4bfc9b Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-italic.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.woff b/docs/_build/html/_static/fonts/Lato/lato-italic.woff new file mode 100644 index 0000000..76114bc Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-italic.woff differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 b/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 new file mode 100644 index 0000000..3404f37 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.eot b/docs/_build/html/_static/fonts/Lato/lato-regular.eot new file mode 100644 index 0000000..11e3f2a Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-regular.eot differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.ttf b/docs/_build/html/_static/fonts/Lato/lato-regular.ttf new file mode 100644 index 0000000..74decd9 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-regular.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.woff b/docs/_build/html/_static/fonts/Lato/lato-regular.woff new file mode 100644 index 0000000..ae1307f Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-regular.woff differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 b/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 new file mode 100644 index 0000000..3bf9843 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf b/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf new file mode 100644 index 0000000..df5d1df Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf b/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf new file mode 100644 index 0000000..eb52a79 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot new file mode 100644 index 0000000..79dc8ef Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf new file mode 100644 index 0000000..df5d1df Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff new file mode 100644 index 0000000..6cb6000 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 new file mode 100644 index 0000000..7059e23 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot new file mode 100644 index 0000000..2f7ca78 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf new file mode 100644 index 0000000..eb52a79 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff new file mode 100644 index 0000000..f815f63 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 new file mode 100644 index 0000000..f2c76e5 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.eot b/docs/_build/html/_static/fonts/fontawesome-webfont.eot new file mode 100644 index 0000000..e9f60ca Binary files /dev/null and b/docs/_build/html/_static/fonts/fontawesome-webfont.eot differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.svg b/docs/_build/html/_static/fonts/fontawesome-webfont.svg new file mode 100644 index 0000000..855c845 --- /dev/null +++ b/docs/_build/html/_static/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.ttf b/docs/_build/html/_static/fonts/fontawesome-webfont.ttf new file mode 100644 index 0000000..35acda2 Binary files /dev/null and b/docs/_build/html/_static/fonts/fontawesome-webfont.ttf differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.woff b/docs/_build/html/_static/fonts/fontawesome-webfont.woff new file mode 100644 index 0000000..400014a Binary files /dev/null and b/docs/_build/html/_static/fonts/fontawesome-webfont.woff differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 b/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 new file mode 100644 index 0000000..4d13fc6 Binary files /dev/null and b/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 differ diff --git a/docs/_build/html/_static/jquery-3.1.0.js b/docs/_build/html/_static/jquery-3.1.0.js new file mode 100644 index 0000000..f2fc274 --- /dev/null +++ b/docs/_build/html/_static/jquery-3.1.0.js @@ -0,0 +1,10074 @@ +/*eslint-disable no-unused-vars*/ +/*! + * jQuery JavaScript Library v3.1.0 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2016-07-07T21:44Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + + + + function DOMEval( code, doc ) { + doc = doc || document; + + var script = doc.createElement( "script" ); + + script.text = code; + doc.head.appendChild( script ).parentNode.removeChild( script ); + } +/* global Symbol */ +// Defining this global in .eslintrc would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.1.0", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + return num != null ? + + // Return just the one element from the set + ( num < 0 ? this[ num + this.length ] : this[ num ] ) : + + // Return all the elements in a clean array + slice.call( this ); + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = jQuery.isArray( copy ) ) ) ) { + + if ( copyIsArray ) { + copyIsArray = false; + clone = src && jQuery.isArray( src ) ? src : []; + + } else { + clone = src && jQuery.isPlainObject( src ) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isFunction: function( obj ) { + return jQuery.type( obj ) === "function"; + }, + + isArray: Array.isArray, + + isWindow: function( obj ) { + return obj != null && obj === obj.window; + }, + + isNumeric: function( obj ) { + + // As of jQuery 3.0, isNumeric is limited to + // strings and numbers (primitives or objects) + // that can be coerced to finite numbers (gh-2662) + var type = jQuery.type( obj ); + return ( type === "number" || type === "string" ) && + + // parseFloat NaNs numeric-cast false positives ("") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + !isNaN( obj - parseFloat( obj ) ); + }, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + globalEval: function( code ) { + DOMEval( code ); + }, + + // Convert dashed to camelCase; used by the css and data modules + // Support: IE <=9 - 11, Edge 12 - 13 + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + nodeName: function( elem, name ) { + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: Date.now, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.0 + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2016-01-04 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\x80-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + // Known :disabled false positives: + // IE: *[disabled]:not(button, input, select, textarea, optgroup, option, menuitem, fieldset) + // not IE: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Check form elements and option elements for explicit disabling + return "label" in elem && elem.disabled === disabled || + "form" in elem && elem.disabled === disabled || + + // Check non-disabled form elements for fieldset[disabled] ancestors + "form" in elem && elem.disabled === false && ( + // Support: IE6-11+ + // Ancestry is covered for us + elem.isDisabled === disabled || + + // Otherwise, assume any non-