Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
96 changes: 54 additions & 42 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -94,25 +94,31 @@ demonstrating how easy it is to find the long-only portfolio
that maximises the Sharpe ratio (a measure of risk-adjusted returns).

```python
import pandas as pd
from pypfopt import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
>>> import pandas as pd
>>> from pypfopt import EfficientFrontier
>>> from pypfopt import risk_models
>>> from pypfopt import expected_returns

# Read in price data
df = pd.read_csv("tests/resources/stock_prices.csv", parse_dates=True, index_col="date")
>>> df = pd.read_csv("tests/resources/stock_prices.csv", parse_dates=True, index_col="date")

# Calculate expected returns and sample covariance
mu = expected_returns.mean_historical_return(df)
S = risk_models.sample_cov(df)
>>> mu = expected_returns.mean_historical_return(df)
>>> S = risk_models.sample_cov(df)

# Optimize for maximal Sharpe ratio
ef = EfficientFrontier(mu, S)
raw_weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
ef.save_weights_to_file("weights.csv") # saves to file
print(cleaned_weights)
ef.portfolio_performance(verbose=True)
>>> ef = EfficientFrontier(mu, S)
>>> raw_weights = ef.max_sharpe()
>>> cleaned_weights = ef.clean_weights()
>>> ef.save_weights_to_file("weights.csv") # saves to file
>>> cleaned_weights
OrderedDict({'GOOG': 0.0458, 'AAPL': 0.06743, 'FB': 0.2008, 'BABA': 0.08494, 'AMZN': 0.03525, 'GE': 0.0, 'AMD': 0.0, 'WMT': 0.0, 'BAC': 0.0, 'GM': 0.0, 'T': 0.0, 'UAA': 0.0, 'SHLD': 0.0, 'XOM': 0.0, 'RRC': 0.0, 'BBY': 0.01587, 'MA': 0.3287, 'PFE': 0.20394, 'JPM': 0.0, 'SBUX': 0.01726})
>>> ef.portfolio_performance(verbose=True)
Expected annual return: 29.9%
Annual volatility: 21.8%
Sharpe Ratio: 1.38
(0.29944709161230304, 0.21764331681393406, 1.375861643701672)

```

This outputs the following weights:
Expand Down Expand Up @@ -150,22 +156,17 @@ convert the above continuous weights to an actual allocation
that you could buy. Just enter the most recent prices, and the desired portfolio size ($10,000 in this example):

```python
from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices
>>> from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices

>>> latest_prices = get_latest_prices(df)

latest_prices = get_latest_prices(df)
>>> da = DiscreteAllocation(cleaned_weights, latest_prices, total_portfolio_value=10000)
>>> allocation, leftover = da.greedy_portfolio()
>>> print("Discrete allocation:", allocation)
Discrete allocation: {'MA': 19, 'PFE': 57, 'FB': 12, 'BABA': 4, 'AAPL': 4, 'GOOG': 1, 'SBUX': 2, 'BBY': 2}
>>> print("Funds remaining: ${:.2f}".format(leftover))
Funds remaining: $17.46

da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=10000)
allocation, leftover = da.greedy_portfolio()
print("Discrete allocation:", allocation)
print("Funds remaining: ${:.2f}".format(leftover))
```

```txt
12 out of 20 tickers were removed
Discrete allocation: {'GOOG': 1, 'AAPL': 4, 'FB': 12, 'BABA': 4, 'BBY': 2,
'MA': 20, 'PFE': 54, 'SBUX': 1}
Funds remaining: $11.89
```

_Disclaimer: nothing about this project constitues investment advice,
Expand Down Expand Up @@ -255,20 +256,24 @@ The covariance matrix encodes not just the volatility of an asset, but also how
- Long/short: by default all of the mean-variance optimization methods in PyPortfolioOpt are long-only, but they can be initialised to allow for short positions by changing the weight bounds:

```python
ef = EfficientFrontier(mu, S, weight_bounds=(-1, 1))
>>> ef = EfficientFrontier(mu, S, weight_bounds=(-1, 1))

```

- Market neutrality: for the `efficient_risk` and `efficient_return` methods, PyPortfolioOpt provides an option to form a market-neutral portfolio (i.e weights sum to zero). This is not possible for the max Sharpe portfolio and the min volatility portfolio because in those cases because they are not invariant with respect to leverage. Market neutrality requires negative weights:

```python
ef = EfficientFrontier(mu, S, weight_bounds=(-1, 1))
ef.efficient_return(target_return=0.2, market_neutral=True)
>>> ef = EfficientFrontier(mu, S, weight_bounds=(-1, 1))
>>> ef.efficient_return(target_return=0.2, market_neutral=True)
OrderedDict({'GOOG': 0.0747287764570896, 'AAPL': 0.0532061998403115, 'FB': 0.0663647763595121, 'BABA': 0.0115771487708806, 'AMZN': 0.051794511454659, 'GE': -0.0594560621731438, 'AMD': -0.0678975317682523, 'WMT': -0.0817205719345985, 'BAC': -0.1413007724407138, 'GM': -0.1402101962690842, 'T': -0.13713261204016, 'UAA': 0.0002656163909862, 'SHLD': -0.0705951831340284, 'XOM': -0.0775452287164678, 'RRC': -0.0510171940919588, 'BBY': 0.0349455362769414, 'MA': 0.375760614087238, 'PFE': 0.1111984245745791, 'JPM': 0.0140774027288155, 'SBUX': 0.0329563456273947})

```

- Minimum/maximum position size: it may be the case that you want no security to form more than 10% of your portfolio. This is easy to encode:

```python
ef = EfficientFrontier(mu, S, weight_bounds=(0, 0.1))
>>> ef = EfficientFrontier(mu, S, weight_bounds=(0, 0.1))

```

One issue with mean-variance optimization is that it leads to many zero-weights. While these are
Expand All @@ -277,9 +282,12 @@ mean-variance portfolios to underperform out-of-sample. To that end, I have intr
objective function that can reduce the number of negligible weights for any of the objective functions. Essentially, it adds a penalty (parameterised by `gamma`) on small weights, with a term that looks just like L2 regularisation in machine learning. It may be necessary to try several `gamma` values to achieve the desired number of non-negligible weights. For the test portfolio of 20 securities, `gamma ~ 1` is sufficient

```python
ef = EfficientFrontier(mu, S)
ef.add_objective(objective_functions.L2_reg, gamma=1)
ef.max_sharpe()
>>> from pypfopt import objective_functions
>>> ef = EfficientFrontier(mu, S)
>>> ef.add_objective(objective_functions.L2_reg, gamma=1)
>>> ef.max_sharpe()
OrderedDict({'GOOG': 0.0819942016928946, 'AAPL': 0.0918509031802692, 'FB': 0.1073667333688086, 'BABA': 0.0680482478876387, 'AMZN': 0.1010796289877925, 'GE': 0.0309429468523964, 'AMD': 0.0, 'WMT': 0.0353042020828323, 'BAC': 0.0001739443220274, 'GM': 0.0, 'T': 0.0274224141523135, 'UAA': 0.0182927430888646, 'SHLD': 0.0, 'XOM': 0.0465545659178931, 'RRC': 0.0023903728743853, 'BBY': 0.0644567269626333, 'MA': 0.1426239959760212, 'PFE': 0.0840602539751452, 'JPM': 0.0279123528004041, 'SBUX': 0.0695257658776802})

```

### Black-Litterman allocation
Expand All @@ -291,13 +299,17 @@ the mean historical return. Check out the [docs](https://pyportfolioopt.readthed
on formatting inputs.

```python
S = risk_models.sample_cov(df)
viewdict = {"AAPL": 0.20, "BBY": -0.30, "BAC": 0, "SBUX": -0.2, "T": 0.131321}
bl = BlackLittermanModel(S, pi="equal", absolute_views=viewdict, omega="default")
rets = bl.bl_returns()
>>> from pypfopt import risk_models, BlackLittermanModel

>>> S = risk_models.sample_cov(df)
>>> viewdict = {"AAPL": 0.20, "BBY": -0.30, "BAC": 0, "SBUX": -0.2, "T": 0.131321}
>>> bl = BlackLittermanModel(S, pi="equal", absolute_views=viewdict, omega="default")
>>> rets = bl.bl_returns()

>>> ef = EfficientFrontier(rets, S)
>>> ef.max_sharpe()
OrderedDict({'GOOG': 0.0, 'AAPL': 0.174876233679978, 'FB': 0.0503356854111169, 'BABA': 0.0950548676769248, 'AMZN': 0.0, 'GE': 0.0, 'AMD': 0.0, 'WMT': 0.0, 'BAC': 0.0, 'GM': 0.0, 'T': 0.5235307090794277, 'UAA': 0.0, 'SHLD': 0.0, 'XOM': 0.1298058417907498, 'RRC': 0.0, 'BBY': 0.0, 'MA': 0.0, 'PFE': 0.0263966623618028, 'JPM': 0.0, 'SBUX': 0.0})

ef = EfficientFrontier(rets, S)
ef.max_sharpe()
```

### Other optimizers
Expand Down Expand Up @@ -342,8 +354,7 @@ Tests are written in pytest (much more intuitive than `unittest` and the variant
PyPortfolioOpt provides a test dataset of daily returns for 20 tickers:

```python
['GOOG', 'AAPL', 'FB', 'BABA', 'AMZN', 'GE', 'AMD', 'WMT', 'BAC', 'GM',
'T', 'UAA', 'SHLD', 'XOM', 'RRC', 'BBY', 'MA', 'PFE', 'JPM', 'SBUX']
['GOOG', 'AAPL', 'FB', 'BABA', 'AMZN', 'GE', 'AMD', 'WMT', 'BAC', 'GM', 'T', 'UAA', 'SHLD', 'XOM', 'RRC', 'BBY', 'MA', 'PFE', 'JPM', 'SBUX']
```

These tickers have been informally selected to meet several criteria:
Expand Down Expand Up @@ -390,7 +401,7 @@ Contributions are _most welcome_. Have a look at the [Contribution Guide](https:
I'd like to thank all of the people who have contributed to PyPortfolioOpt since its release in 2018.
Special shout-outs to:

- Tuan Tran (who is now the primary maintainer!)
- Tuan Tran
- Philipp Schiele
- Carl Peasnell
- Felipe Schneider
Expand All @@ -400,3 +411,4 @@ Special shout-outs to:
- Thomas Schmelzer
- Rich Caputo
- Nicolas Knudde
- Franz Kiraly
8 changes: 4 additions & 4 deletions pypfopt/base_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@
"""

import collections
from collections.abc import Iterable
import copy
import json
import warnings
from collections.abc import Iterable
from typing import List
import warnings

import cvxpy as cp
import numpy as np
Expand Down Expand Up @@ -571,11 +571,11 @@ def portfolio_performance(
print("Expected annual return: {:.1f}%".format(100 * mu))
print("Annual volatility: {:.1f}%".format(100 * sigma))
print("Sharpe Ratio: {:.2f}".format(sharpe))
return mu, sigma, sharpe
return float(mu), float(sigma), float(sharpe)
else:
if verbose:
print("Annual volatility: {:.1f}%".format(100 * sigma))
return None, sigma, None
return None, float(sigma), None


def _get_all_args(expression: cp.Expression) -> List[cp.Expression]:
Expand Down
91 changes: 91 additions & 0 deletions tests/test_docs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
"""Tests that README.md Python code blocks execute without errors.

This test extracts all fenced code blocks labeled as Python from README.md and
executes them sequentially in a shared namespace. This ensures the examples in
our documentation stay correct as the code evolves.
"""

import doctest
from doctest import ELLIPSIS, IGNORE_EXCEPTION_DETAIL, NORMALIZE_WHITESPACE
import math
from pathlib import Path

import pytest


@pytest.fixture()
def readme_path() -> Path:
"""Provide the path to the project's README.md file.

This fixture searches for the README.md file by starting in the current
directory and moving up through parent directories until it finds the file.

Returns:
-------
Path
Path to the README.md file

Raises:
------
FileNotFoundError
If the README.md file cannot be found in any parent directory

"""
current_dir = Path(__file__).resolve().parent
while current_dir != current_dir.parent:
candidate = current_dir / "README.md"
if candidate.is_file():
return candidate
current_dir = current_dir.parent
raise FileNotFoundError("README.md not found in any parent directory")


class FloatTolerantOutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
# First try vanilla doctest comparison
if super().check_output(want, got, optionflags):
return True

# Try float-tolerant comparison
try:
# Extract floats from both strings
want_floats = [
float(x)
for x in want.replace(",", " ").split()
if x.replace(".", "", 1).replace("-", "", 1).isdigit()
]
got_floats = [
float(x)
for x in got.replace(",", " ").split()
if x.replace(".", "", 1).replace("-", "", 1).isdigit()
]

if len(want_floats) != len(got_floats):
return False

# Compare with tolerance
print(want_floats)
print(got_floats)

return all(
math.isclose(w, g, rel_tol=1e-3, abs_tol=1e-5)
for w, g in zip(want_floats, got_floats)
)
except Exception:
return False


def test_doc(readme_path):
parser = doctest.DocTestParser()
runner = doctest.DocTestRunner(
checker=FloatTolerantOutputChecker(),
optionflags=ELLIPSIS | NORMALIZE_WHITESPACE | IGNORE_EXCEPTION_DETAIL,
)

with open(readme_path) as f:
doc = f.read()

test = parser.get_doctest(doc, {}, readme_path.name, readme_path, 0)
result = runner.run(test)

assert result.failed == 0
Loading