first commit

pull/17/head
robertmartin8 2018-05-29 21:34:59 +08:00
commit ba08915524
26 changed files with 8725 additions and 0 deletions

1
MANIFEST.in 100755
View File

@ -0,0 +1 @@
include README.rst

37
README.rst 100755
View File

@ -0,0 +1,37 @@
##############
PyPortfolioOpt
##############
************
Introduction
************
PyPortfolioOpt is a simple library that contains widely used portfolio optimisation techniques, with
a number of novel/experimental features.
*********************
Currently Implemented
*********************
Implemented
Efficient frontier
*******
Testing
*******
Test use a returns dataset using daily returns for 20 tickers. These tickers have been informally selected
to meet a number of criteria
- reasonably liquid
- different performances and volatilities
- different amounts of data to test robustness
****************
Design decisions
****************
- Should be easy to swap out components to test
- Some robustness to missing data

216
docs/Makefile 100755
View File

@ -0,0 +1,216 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " applehelp to make an Apple Help Book"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
@echo " coverage to run coverage check of the documentation (if enabled)"
.PHONY: clean
clean:
rm -rf $(BUILDDIR)/*
.PHONY: html
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
.PHONY: dirhtml
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
.PHONY: singlehtml
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
.PHONY: pickle
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
.PHONY: json
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
.PHONY: htmlhelp
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
.PHONY: qthelp
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/package_template.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/package_template.qhc"
.PHONY: applehelp
applehelp:
$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
@echo
@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
@echo "N.B. You won't be able to view it unless you put it in" \
"~/Library/Documentation/Help or install it in your application" \
"bundle."
.PHONY: devhelp
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/package_template"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/package_template"
@echo "# devhelp"
.PHONY: epub
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
.PHONY: latex
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
.PHONY: latexpdf
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
.PHONY: latexpdfja
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
.PHONY: text
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
.PHONY: man
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
.PHONY: texinfo
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
.PHONY: info
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
.PHONY: gettext
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
.PHONY: changes
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
.PHONY: linkcheck
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
.PHONY: doctest
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
.PHONY: coverage
coverage:
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
@echo "Testing of coverage in the sources finished, look at the " \
"results in $(BUILDDIR)/coverage/python.txt."
.PHONY: xml
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
.PHONY: pseudoxml
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

View File

@ -0,0 +1,6 @@
PyPortfolioOpt
====================
.. module::PyPortfolioOpt
.. autofunction:: add

291
docs/conf.py 100755
View File

@ -0,0 +1,291 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# package_template documentation build configuration file, created by
# sphinx-quickstart on Fri May 13 14:31:17 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "PyPortfolioOpt"
copyright = "2018, Robert Andrew Martin"
author = "Robert Andrew Martin"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "package_templatedoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"package_template.tex",
"package\\_template Documentation",
"Computational Modelling Group",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "package_template", "package_template Documentation", [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"package_template",
"package_template Documentation",
author,
"package_template",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False

14
docs/index.rst 100755
View File

@ -0,0 +1,14 @@
Package template |version|
==========================
This is sample documentation for the Python package template. To start using
Sphinx docs, create an empty ``docs`` folder and run::
sphinx-quickstart
Contents:
.. toctree::
:maxdepth: 2
arith

BIN
media/logo_v0.pages 100644

Binary file not shown.

View File

@ -0,0 +1 @@
{"last_check":"2018-05-27T02:16:45Z","pypi_version":"10.0.1"}

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,74 @@
import numpy as np
# TODO discrete portfolio allocation input and return types
def discrete_portfolio_allocation(
weights, min_allocation, portfolio_size, latest_prices
):
"""
Generates a discrete allocation based on continuous weights, using a greedy algorithm,
then stores in an instance variable as a list of tuples
:return: 1. a dict containing the ticker and the number of shares that should be purchased,
2. leftover funds
"""
# Drop any companies with negligible weights
nonzero_cont_allocation = [i for i in weights if i[1] > min_allocation]
remaining_tickers = [i[0] for i in nonzero_cont_allocation]
print(
f"{len(weights) - len(nonzero_cont_allocation)} out of {len(weights)} tickers were removed"
)
print(f"Remaining tickers: {remaining_tickers}\n")
nonzero_cont_allocation.sort(key=lambda x: x[1])
nonzero_cont_allocation = nonzero_cont_allocation[::-1]
available_funds = portfolio_size
shares_purchased = []
share_prices = []
for pair in nonzero_cont_allocation:
ticker, weight = pair
share_price = latest_prices[ticker]
n_shares = int(weight * portfolio_size / share_price)
cost_basis = n_shares * share_price
if cost_basis > available_funds:
n_shares = int(available_funds // share_price)
if n_shares == 0:
print("Insufficient funds")
available_funds -= cost_basis
shares_purchased.append(n_shares)
share_prices.append(share_price)
# Second round
while available_funds > 0:
actual_weights = np.array(share_prices) * np.array(shares_purchased)
actual_weights /= actual_weights.sum()
ideal_weights = np.array([i[1] for i in nonzero_cont_allocation])
deficit = ideal_weights - actual_weights
idx = np.argmax(deficit)
ticker, weight = nonzero_cont_allocation[idx]
share_price = latest_prices[ticker]
counter = 0
while share_price > available_funds:
# Find the second highest deficit and carry on
deficit[idx] = 0
idx = np.argmax(deficit)
if deficit[idx] < 0 or counter == 10:
break
ticker, weight = nonzero_cont_allocation[idx]
share_price = latest_prices[ticker]
counter += 1
if deficit[idx] < 0 or counter == 10:
break
shares_purchased[idx] += 1
available_funds -= share_price
print(f"Funds remaining: {available_funds:.2f}")
# The instance variable is a list of tuples, while the returned value is a dict.
num_shares = list(zip([i[0] for i in nonzero_cont_allocation], shares_purchased))
return dict(num_shares), available_funds

View File

@ -0,0 +1,173 @@
import numpy as np
import scipy.optimize as sco
from . import objective_functions
import warnings
# TODO investigate market neutral for efficient risk
class EfficientFrontier:
def __init__(self, expected_returns, cov_matrix, weight_bounds=(0, 1)):
"""
:param expected_returns: expected returns for each asset
:type expected_returns: pd.Series, list, np vector.
:param cov_matrix: covariance of returns for each asset
:type cov_matrix: pd.DataFrame or np.array
:param weight_bounds: minimum and maximum weight of an asset, defaults to (0, 1)
:param weight_bounds: tuple, optional
"""
# Inputs
self.expected_returns = expected_returns
self.cov_matrix = cov_matrix
self.n_assets = len(expected_returns)
self.tickers = list(expected_returns.index)
# Optimisation parameters
self.initial_guess = np.array([1 / self.n_assets] * self.n_assets)
self.constraints = [{"type": "eq", "fun": lambda x: np.sum(x) - 1}]
self.bounds = self._make_valid_bounds(weight_bounds)
# Optional
self.risk_free_rate = 0.02
# Outputs
self.weights = None
def _make_valid_bounds(self, test_bounds):
if test_bounds[0] is not None:
if test_bounds[0] * self.n_assets > 1:
raise ValueError("Lower bound is too high")
return (test_bounds,) * self.n_assets
def max_sharpe(self, alpha=0, risk_free_rate=0.02):
"""
The 'tangent' portfolio that maximises the Sharpe Ratio. The Sharpe ratio is defined as
.. math::
\frac{\mu - R_f}{\sigma}
:param risk_free_rate: risk free rate of borrowing/lending, defaults to 0.02
:type risk_free_rate: float, optional
:return: portfolio weights
:rtype: dictionary: keys are tickers (string), values are weights (float)
"""
self.risk_free_rate = risk_free_rate
args = (self.expected_returns, self.cov_matrix, alpha, risk_free_rate)
constraints = self.constraints
result = sco.minimize(
objective_functions.negative_sharpe,
x0=self.initial_guess,
args=args,
method="SLSQP",
bounds=self.bounds,
constraints=constraints,
)
self.weights = result["x"]
return dict(zip(self.tickers, self.weights))
def min_volatility(self, alpha=0):
args = (self.cov_matrix, alpha)
constraints = self.constraints
result = sco.minimize(
objective_functions.volatility,
x0=self.initial_guess,
args=args,
method="SLSQP",
bounds=self.bounds,
constraints=constraints,
)
self.weights = result["x"]
return dict(zip(self.tickers, self.weights))
def efficient_risk(self, target_risk, alpha=0, risk_free_rate=0.02):
"""
Calculates the Sharpe-maximising portfolio for a given target risk
:param self.expected_returns: array of mean returns for a number of stocks
:param self.cov_matrix: covariance of these stocks.
:param target_risk: the target return
:param risk_free_rate: defaults to zero
:return: the weights of the portfolio that minimise risk for this target return
"""
self.n_assets = len(self.expected_returns)
args = (self.expected_returns, self.cov_matrix, alpha, risk_free_rate)
constraints = self.constraints + [
{
"type": "ineq",
"fun": lambda w: target_risk
- objective_functions.volatility(w, self.cov_matrix),
}
]
result = sco.minimize(
objective_functions.negative_sharpe,
x0=self.initial_guess,
args=args,
method="SLSQP",
bounds=self.bounds,
constraints=constraints,
)
self.weights = result["x"]
return dict(zip(self.tickers, self.weights))
def efficient_return(self, target_return, alpha=0, market_neutral=False):
"""
Calculates the "Markowitz" portfolio, minimising risk for a target return
:param self.expected_returns: array of mean returns for a number of stocks
:param self.cov_matrix: covariance of these stocks.
:param target_risk: the target return
:param risk_free_rate: defaults to zero
:return: the weights of the portfolio that minimise risk for this target return
"""
self.n_assets = len(self.expected_returns)
args = (self.cov_matrix, alpha)
target_constraint = {
"type": "eq",
"fun": lambda w: w.dot(self.expected_returns) - target_return,
}
if market_neutral:
if self.bounds[0][0] is not None and self.bounds[0][0] >= 0:
warnings.warn(
"Market neutrality requires shorting - bounds have been amended",
RuntimeWarning,
)
self.bounds = self._make_valid_bounds((-1, 1))
constraints = [
{"type": "eq", "fun": lambda x: np.sum(x)},
target_constraint,
]
else:
constraints = self.constraints + [target_constraint]
result = sco.minimize(
objective_functions.volatility,
x0=self.initial_guess,
args=args,
method="SLSQP",
bounds=self.bounds,
constraints=constraints,
)
self.weights = result["x"]
return dict(zip(self.tickers, self.weights))
def portfolio_performance(self, verbose=False):
"""
Calculates the performance given the calculated weights of the portfolio
:return: [description]
:rtype: [type]
"""
if self.weights is None:
raise ValueError("Weights not calculated yet")
sigma = objective_functions.volatility(self.weights, self.cov_matrix)
mu = self.weights.dot(self.expected_returns)
sharpe = -objective_functions.negative_sharpe(
self.weights, self.expected_returns, self.cov_matrix, self.risk_free_rate
)
if verbose:
print("Expected return:", mu)
print("Volatility:", sigma)
print("Sharpe:", sharpe)
return mu, sigma, sharpe

View File

@ -0,0 +1,36 @@
"""
This module implements possible models for the expected return.
It is assumed that daily returns are provided, though in reality the below methods are agnostic
to the time period (just changed the frequency parameter to annualise).
"""
def mean_historical_return(daily_returns, frequency=252):
"""
Annualises mean daily historical return.
:param daily_returns: Daily returns, each row is a date and each column is a ticker
:type daily_returns: pd.DataFrame
:param frequency: number of days (more generally, number of your desired time period)
in a trading year, defaults to 252 days.
:param frequency: int, optional
:return: annualised mean daily return
:rtype: pd.Series
"""
return daily_returns.mean() * frequency
def ema_historical_return(daily_returns, frequency=252, span=500):
"""
Annualised exponentially-weighted mean of daily historical return, giving
higher weight to more recent data.
:param daily_returns: Daily returns, each row is a date and each column is a ticker
:type daily_returns: pd.DataFrame
:param frequency: number of days (more generally, number of your desired time period)
in a trading year, defaults to 252 days.
:param frequency: int, optional
:param span: the time period for the EMA, defaults to 500-day EMA.
:type span: int, optional
:return: annualised exponentially-weighted mean daily return
:rtype: pd.Series
"""
return daily_returns.ewm(span=span).mean().iloc[-1] * frequency

View File

@ -0,0 +1,49 @@
"""
This model implements possible objective functions for efficient optimisation
:return: [description]
:rtype: [type]
"""
import numpy as np
def negative_mean_return(weights, expected_returns):
"""
Negative mean return of a portfolio
:param weights: normalised weights
:type weights: np.array
:param expected_returns: mean returns of the assets
:type expected_returns: pd.Series
:return: negative mean return
:rtype: float
"""
return -weights.dot(expected_returns)
def negative_sharpe(
weights, expected_returns, cov_matrix, alpha=0, risk_free_rate=0.02
):
"""
Negative Sharpe Ratio of a given portfolio
:param weights: normalised weights
:param expected_returns: mean returns for a number of stocks
:param cov_matrix: covariance of these stocks.
:param risk_free_rate: defaults to zero
:return: the negative Sharpe ratio
"""
mu = weights.dot(expected_returns)
sigma = np.sqrt(np.dot(weights, np.dot(cov_matrix, weights.T)))
L2_reg = alpha * (weights ** 2).sum()
return -(mu - risk_free_rate) / sigma + L2_reg
def volatility(weights, cov_matrix, alpha=0):
"""
Volatility of a given portfolio
:param weights: normalised weights
:param cov_matrix: covariance of these stocks.
:return:
"""
L2_reg = alpha * (weights ** 2).sum()
return np.sqrt(np.dot(weights.T, np.dot(cov_matrix, weights))) + L2_reg

View File

@ -0,0 +1,19 @@
"""
This module implements possible models for risk of a portfolio
"""
import pandas as pd
import warnings
def sample_cov(daily_returns):
"""
Calculates the sample covariance matrix of daily returns, then annualises.
:param daily_returns: Daily returns, each row is a date and each column is a ticker
:type daily_returns: pd.DataFrame or array-like
:returns: annualised sample covariance matrix of daily returns
:rtype: pd.DataFrame
"""
if not isinstance(daily_returns, pd.DataFrame):
warnings.warn("daily_returns is not a dataframe", RuntimeWarning)
daily_returns = pd.DataFrame(daily_returns)
return daily_returns.cov() * 252

2
pytest.ini 100644
View File

@ -0,0 +1,2 @@
[pytest]
addopts = -v --ignore=lib

3
pyvenv.cfg 100644
View File

@ -0,0 +1,3 @@
home = /Users/Robert/anaconda/bin
include-system-site-packages = false
version = 3.6.5

6
readthedocs.yml 100755
View File

@ -0,0 +1,6 @@
python:
version: 3
pip_install: true
# For more fields that can be specified here, see:
# http://docs.readthedocs.io/en/latest/yaml-config.html

92
scratch.py 100644
View File

@ -0,0 +1,92 @@
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt.risk_models import sample_cov
from pypfopt.expected_returns import mean_historical_return
from pypfopt.tests.utilities_for_tests import setup_efficient_frontier
import pandas as pd
df = pd.read_csv("pypfopt/tests/stock_returns.csv", parse_dates=True, index_col="date")
e_ret = mean_historical_return(df)
cov = sample_cov(df)
ef = setup_efficient_frontier()
w = ef.max_sharpe()
ef.portfolio_performance(verbose=True)
"""
Volatility: 0.21671629525656422
Expected return: 0.33035542211545876
Sharpe: 1.4320816150351678
"""
ef = EfficientFrontier(e_ret, cov, weight_bounds=(0, 0.15))
w = ef.max_sharpe()
ef.portfolio_performance(verbose=True)
"""
Volatility: 0.21671629525656422
Expected return: 0.33035542211545876
Sharpe: 1.4320816150351678
"""
ef = setup_efficient_frontier()
w = ef.min_volatility()
ef.portfolio_performance(verbose=True)
"""
Expected return: 0.1793245141665063
Volatility: 0.15915107045094778
Sharpe: 0.9981835740658117
"""
ef = setup_efficient_frontier()
w = ef.efficient_risk(0.19)
ef.portfolio_performance(verbose=True)
"""
Expected return: 0.28577470210889416
Volatility: 0.1900001239293301
Sharpe: 1.3964928761303517
"""
ef = setup_efficient_frontier()
w = ef.efficient_return(0.25)
ef.portfolio_performance(verbose=True)
"""
Expected return: 0.2500000000006342
Volatility: 0.17388540121530308
Sharpe: 1.3205072040538786
"""
ef = EfficientFrontier(e_ret, cov)
sharpes = []
for i in range(10):
ef.max_sharpe(risk_free_rate=i / 100)
sharpe = ef.portfolio_performance(verbose=True)[2]
sharpes.append(sharpe)
ef = setup_efficient_frontier()
w = ef.max_sharpe(alpha=1)
sum(ef.weights > 0.02)
ef.portfolio_performance(verbose=True)
ef = setup_efficient_frontier()
w = ef.min_volatility(alpha=1)
sum(ef.weights > 0.02)
ef.portfolio_performance(verbose=True)
"""
Expected return: 0.2211888419683154
Volatility: 0.18050174016287326
Sharpe: 1.1133499289183508
"""
# test shorts
e_ret[::2] *= -1
ef = EfficientFrontier(e_ret, cov, weight_bounds=(None, None))
ef.max_sharpe()
# market neutral
ef = setup_efficient_frontier()
ef.bounds = ((-1, 1),) * 20
ef.max_sharpe(market_neutral=True)

28
setup.py 100755
View File

@ -0,0 +1,28 @@
from distutils.core import setup
with open("README.rst") as f:
readme = f.read()
setup(
name="PyPortfolioOpt",
version="0.1",
description="PyPortfolioOpt: Efficient Frontier, Black Litterman, Monte Carlo optimisation methods",
long_description=readme,
author="Robert Andrew Martin",
author_email="martin.robertandrew @ gmail.com",
packages=["pypfopt", "pypfopt.tests"],
classifiers=[
"Development Status :: 1 - Planning",
"Environment :: Console",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Office/Business :: Financial",
"Topic :: Office/Business :: Financial :: Investment",
"Topic :: Scientific/Engineering :: Mathematics",
],
)

View File

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,417 @@
import pandas as pd
from pypfopt.efficient_frontier import EfficientFrontier
from tests.utilities_for_tests import get_data, setup_efficient_frontier
import pytest
import numpy as np
import warnings
def test_data_source():
df = get_data()
assert isinstance(df, pd.DataFrame)
assert df.shape[1] == 20
assert len(df) == 7125
assert df.index.is_all_dates
def test_portfolio_performance():
ef = setup_efficient_frontier()
with pytest.raises(ValueError):
ef.portfolio_performance()
ef.max_sharpe()
assert ef.portfolio_performance()
def test_max_sharpe_long_only():
ef = setup_efficient_frontier()
w = ef.max_sharpe()
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.3303554237026972, 0.21671629636481254, 1.4288438866031374),
)
def test_max_sharpe_short():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(None, None)
)
w = ef.max_sharpe()
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.40723757138191374, 0.24823079451957306, 1.5524922427959371),
)
sharpe = ef.portfolio_performance()[2]
ef_long_only = setup_efficient_frontier()
ef_long_only.max_sharpe()
long_only_sharpe = ef_long_only.portfolio_performance()[2]
assert sharpe > long_only_sharpe
def test_max_sharpe_L2_reg():
ef = setup_efficient_frontier()
w = ef.max_sharpe(alpha=1)
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.3062919882686126, 0.20291367026287507, 1.4087639167552641),
)
def test_max_sharpe_L2_reg_many_values():
ef = setup_efficient_frontier()
ef.max_sharpe()
# Count the number of weights more 1%
initial_number = sum(ef.weights > 0.01)
for a in np.arange(0.5, 5, 0.5):
ef.max_sharpe(alpha=a)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
new_number = sum(ef.weights > 0.01)
# Higher alpha should reduce the number of small weights
assert new_number >= initial_number
initial_number = new_number
def test_max_sharpe_L2_reg_limit_case():
ef = setup_efficient_frontier()
ef.max_sharpe(alpha=1e10)
equal_weights = np.array([1 / ef.n_assets] * ef.n_assets)
np.testing.assert_array_almost_equal(ef.weights, equal_weights)
def test_max_sharpe_L2_reg_reduces_sharpe():
# L2 reg should reduce the number of small weights at the cost of Sharpe
ef_no_reg = setup_efficient_frontier()
ef_no_reg.max_sharpe()
sharpe_no_reg = ef_no_reg.portfolio_performance()[2]
ef = setup_efficient_frontier()
ef.max_sharpe(alpha=1)
sharpe = ef.portfolio_performance()[2]
assert sharpe < sharpe_no_reg
def test_max_sharpe_L2_reg_with_shorts():
ef_no_reg = setup_efficient_frontier()
ef_no_reg.max_sharpe()
initial_number = sum(ef_no_reg.weights > 0.01)
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(None, None)
)
w = ef.max_sharpe(alpha=1)
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.3236047844566581, 0.20241509723550233, 1.4969817524033966),
)
new_number = sum(ef.weights > 0.01)
assert new_number >= initial_number
def test_max_sharpe_risk_free_rate():
ef = setup_efficient_frontier()
ef.max_sharpe()
_, _, initial_sharpe = ef.portfolio_performance()
ef.max_sharpe(risk_free_rate=0.10)
_, _, new_sharpe = ef.portfolio_performance()
assert new_sharpe <= initial_sharpe
ef.max_sharpe(risk_free_rate=0)
_, _, new_sharpe = ef.portfolio_performance()
assert new_sharpe >= initial_sharpe
def test_min_volatility():
ef = setup_efficient_frontier()
w = ef.min_volatility()
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.1793245141665063, 0.15915107045094778, 0.9981835740658117),
)
def test_min_volatility_short():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(None, None)
)
w = ef.min_volatility()
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.17225673749865328, 0.15559209747801794, 0.9752992044136976),
)
# Shorting should reduce volatility
volatility = ef.portfolio_performance()[1]
ef_long_only = setup_efficient_frontier()
ef_long_only.min_volatility()
long_only_volatility = ef_long_only.portfolio_performance()[1]
assert volatility < long_only_volatility
def test_min_volatility_L2_reg():
ef = setup_efficient_frontier()
w = ef.min_volatility(alpha=1)
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.2211888419683154, 0.18050174016287326, 1.1133499289183508),
)
def test_min_volatility_L2_reg_many_values():
ef = setup_efficient_frontier()
ef.min_volatility()
# Count the number of weights more 1%
initial_number = sum(ef.weights > 0.01)
for a in np.arange(0.5, 5, 0.5):
ef.min_volatility(alpha=a)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
new_number = sum(ef.weights > 0.01)
# Higher alpha should reduce the number of small weights
assert new_number >= initial_number
initial_number = new_number
def test_efficient_risk():
ef = setup_efficient_frontier()
w = ef.efficient_risk(0.19)
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(), (0.285775, 0.19, 1.396493), atol=1e-6
)
def test_efficient_risk_short():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(None, None)
)
w = ef.efficient_risk(0.19)
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.30468522897560224, 0.19, 1.4947624032507056),
atol=1e6,
)
sharpe = ef.portfolio_performance()[2]
ef_long_only = setup_efficient_frontier()
ef_long_only.efficient_return(0.25)
long_only_sharpe = ef_long_only.portfolio_performance()[2]
assert sharpe > long_only_sharpe
def test_efficient_risk_many_values():
ef = setup_efficient_frontier()
for target_risk in np.arange(0.16, 0.21, 0.01):
ef.efficient_risk(target_risk)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
volatility = ef.portfolio_performance()[1]
assert abs(target_risk - volatility) < 0.05
def test_efficient_risk_L2_reg():
ef = setup_efficient_frontier()
w = ef.efficient_risk(0.19, alpha=1)
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.2843888327412046, 0.19, 1.3895318474675356),
atol=1e-6,
)
def test_efficient_risk_L2_reg_many_values():
ef = setup_efficient_frontier()
ef.efficient_risk(0.19)
# Count the number of weights more 1%
initial_number = sum(ef.weights > 0.01)
for a in np.arange(0.5, 5, 0.5):
ef.efficient_risk(0.19, alpha=a)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
new_number = sum(ef.weights > 0.01)
# Higher alpha should reduce the number of small weights
assert new_number >= initial_number
initial_number = new_number
def test_efficient_return():
ef = setup_efficient_frontier()
w = ef.efficient_return(0.25)
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(), (0.25, 0.173885, 1.320507), atol=1e-6
)
def test_efficient_return_many_values():
ef = setup_efficient_frontier()
for target_return in np.arange(0.19, 0.30, 0.01):
ef.efficient_return(target_return)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
mean_return = ef.portfolio_performance()[0]
assert abs(target_return - mean_return) < 0.05
def test_efficient_return_short():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(None, None)
)
w = ef.efficient_return(0.25)
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(), (0.25, 0.16826260520748268, 1.3641098601259731)
)
sharpe = ef.portfolio_performance()[2]
ef_long_only = setup_efficient_frontier()
ef_long_only.efficient_return(0.25)
long_only_sharpe = ef_long_only.portfolio_performance()[2]
assert sharpe > long_only_sharpe
def test_efficient_return_L2_reg():
ef = setup_efficient_frontier()
w = ef.efficient_return(0.25, alpha=1)
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(), (0.25, 0.18813935436629708, 1.221273523695721)
)
def test_efficient_return_L2_reg_many_values():
ef = setup_efficient_frontier()
ef.efficient_return(0.25)
# Count the number of weights more 1%
initial_number = sum(ef.weights > 0.01)
for a in np.arange(0.5, 5, 0.5):
ef.efficient_return(0.25, alpha=a)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
new_number = sum(ef.weights > 0.01)
# Higher alpha should reduce the number of small weights
assert new_number >= initial_number
initial_number = new_number
def test_efficient_return_market_neutral():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(-1, 1)
)
w = ef.efficient_return(0.25, market_neutral=True)
assert isinstance(w, dict)
assert list(w.keys()) == ef.tickers
assert list(w.keys()) == list(ef.expected_returns.index)
np.testing.assert_almost_equal(ef.weights.sum(), 0)
assert (ef.weights < 1).all() and (ef.weights > -1).all()
np.testing.assert_almost_equal(
ef.portfolio_performance(),
(0.24999999999755498, 0.20567338787141307, 1.1087493060316183),
)
sharpe = ef.portfolio_performance()[2]
ef_long_only = setup_efficient_frontier()
ef_long_only.efficient_return(0.25)
long_only_sharpe = ef_long_only.portfolio_performance()[2]
assert long_only_sharpe > sharpe
def test_efficient_return_market_neutral_warning():
ef = setup_efficient_frontier()
with warnings.catch_warnings(record=True) as w:
ef.efficient_return(0.25, market_neutral=True)
assert len(w) == 1
assert issubclass(w[0].category, RuntimeWarning)
assert (
str(w[0].message)
== "Market neutrality requires shorting - bounds have been amended"
)
def test_custom_upper_bound():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(0, 0.10)
)
ef.max_sharpe()
ef.portfolio_performance()
assert ef.weights.max() <= 0.1
np.testing.assert_almost_equal(ef.weights.sum(), 1)
def test_custom_lower_bound():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(0.02, 1)
)
ef.max_sharpe()
assert ef.weights.min() >= 0.02
np.testing.assert_almost_equal(ef.weights.sum(), 1)
def test_custom_bounds():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(0.03, 0.13)
)
ef.max_sharpe()
assert ef.weights.min() >= 0.03
assert ef.weights.max() <= 0.13
np.testing.assert_almost_equal(ef.weights.sum(), 1)
def test_custom_bounds_error():
with pytest.raises(ValueError):
EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(0.06, 1)
)
assert EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(0, 1)
)

View File

View File

@ -0,0 +1,64 @@
from pypfopt import objective_functions
import pandas as pd
import numpy as np
from tests.utilities_for_tests import get_data
from pypfopt.expected_returns import mean_historical_return
from pypfopt.risk_models import sample_cov
def test_negative_mean_return_dummy():
w = np.array([0.3, 0.1, 0.2, 0.25, 0.15])
e_rets = pd.Series([0.19, 0.08, 0.09, 0.23, 0.17])
negative_mu = objective_functions.negative_mean_return(w, e_rets)
assert isinstance(negative_mu, float)
assert negative_mu < 0
assert negative_mu == -w.dot(e_rets)
assert negative_mu == -(w * e_rets).sum()
def test_negative_mean_return_real():
df = get_data()
e_rets = mean_historical_return(df)
w = np.array([1 / len(e_rets)] * len(e_rets))
negative_mu = objective_functions.negative_mean_return(w, e_rets)
assert isinstance(negative_mu, float)
assert negative_mu < 0
assert negative_mu == -w.dot(e_rets)
assert negative_mu == -(w * e_rets).sum()
np.testing.assert_almost_equal(-e_rets.sum() / len(e_rets), negative_mu)
def test_negative_sharpe():
df = get_data()
e_rets = mean_historical_return(df)
S = sample_cov(df)
w = np.array([1 / len(e_rets)] * len(e_rets))
sharpe = objective_functions.negative_sharpe(w, e_rets, S)
assert isinstance(sharpe, float)
assert sharpe < 0
sigma = np.sqrt(np.dot(w, np.dot(S, w.T)))
negative_mu = objective_functions.negative_mean_return(w, e_rets)
np.testing.assert_almost_equal(sharpe * sigma - 0.02, negative_mu)
# Risk free rate increasing should lead to negative Sharpe increasing.
assert sharpe < objective_functions.negative_sharpe(
w, e_rets, S, risk_free_rate=0.1
)
def test_volatility_dummy():
w = np.array([0.4, 0.4, 0.2])
data = np.diag([0.5, 0.8, 0.9])
test_vol = objective_functions.volatility(w, data)
np.testing.assert_almost_equal(test_vol, 0.244 ** 0.5)
def test_volatility():
df = get_data()
S = sample_cov(df)
w = np.array([1 / df.shape[1]] * df.shape[1])
vol = objective_functions.volatility(w, S)
np.testing.assert_almost_equal(vol, 0.21209018103844543)

View File

@ -0,0 +1,52 @@
import pandas as pd
import numpy as np
from pypfopt import risk_models
from tests.utilities_for_tests import get_data
import warnings
def test_sample_cov_dummy():
data = pd.DataFrame(
[
[4.0, 2.0, 0.6],
[4.2, 2.1, 0.59],
[3.9, 2.0, 0.58],
[4.3, 2.1, 0.62],
[4.1, 2.2, 0.63],
]
)
test_answer = pd.DataFrame(
[
[0.02500, 0.00750, 0.00175],
[0.00750, 0.00700, 0.00135],
[0.00175, 0.00135, 0.00043],
]
)
S = risk_models.sample_cov(data) / 252
pd.testing.assert_frame_equal(S, test_answer)
def test_sample_cov_real():
df = get_data()
S = risk_models.sample_cov(df)
assert S.shape == (20, 20)
assert S.index.equals(df.columns)
assert S.index.equals(S.columns)
assert S.notnull().all().all()
def test_sample_cov_type_warning():
df = get_data()
cov_from_df = risk_models.sample_cov(df)
returns_as_array = np.array(df)
with warnings.catch_warnings(record=True) as w:
cov_from_array = risk_models.sample_cov(returns_as_array)
assert len(w) == 1
assert issubclass(w[0].category, RuntimeWarning)
assert str(w[0].message) == "daily_returns is not a dataframe"
np.testing.assert_array_almost_equal(
cov_from_df.values, cov_from_array.values, decimal=6
)

View File

@ -0,0 +1,17 @@
import pandas as pd
from pypfopt import expected_returns
from pypfopt import risk_models
from pypfopt.efficient_frontier import EfficientFrontier
def get_data():
return pd.read_csv("tests/stock_returns.csv", parse_dates=True, index_col="date")
def setup_efficient_frontier(data_only=False):
df = get_data()
mean_return = expected_returns.mean_historical_return(df)
sample_cov_matrix = risk_models.sample_cov(df)
if data_only:
return mean_return, sample_cov_matrix
return EfficientFrontier(mean_return, sample_cov_matrix)