As Edward
is deprecated and requires an older version of TensorFlow one can create a dedicated virtual environment for the following example
$ python3 --version
Python 3.6.8
$ python3 -m venv edward
$ source edward/bin/activate
(edward) $ pip3 install --upgrade pip setuptools wheel
(edward) $ cat edward.txt
tensorflow==1.7
edward~=1.3
scipy~=1.2
pandas~=0.24
matplotlib~=3.0
(edward) $ pip3 install -r edward.txt
I have a very simple minimal working example of using Hamiltonian Monte Carlo with Edward called edward_old.py
#!/usr/bin/env python3
import numpy as np
import scipy.stats
import tensorflow as tf
import edward as ed
import pandas as pd
import matplotlib.pyplot as plt
def generate_samples(data, n_samples):
# Pick initial point for MCMC chains based on the data
low, med, high = np.percentile(data, (16, 50, 84))
mu_init = np.float32(med)
t_init = np.float32(np.log(0.5 * (high - low)))
# Build a very simple model
mu = ed.models.Uniform(-1.0, 1.0)
t = ed.models.Uniform(*np.log((0.05, 1.0), dtype=np.float32))
X = ed.models.Normal(
loc=tf.fill(data.shape, mu), scale=tf.fill(data.shape, tf.exp(t))
)
# Emperical samples of a sclar
q_mu = ed.models.Empirical(params=tf.Variable(tf.fill((n_samples,), mu_init)))
q_t = ed.models.Empirical(params=tf.Variable(tf.fill((n_samples,), t_init)))
# Run inference using HMC to generate samples.
with tf.Session() as sess:
inference = ed.HMC({mu: q_mu, t: q_t}, data={X: data})
inference.run(step_size=0.01, n_steps=10)
mu_samples, t_samples = sess.run([q_mu.params, q_t.params])
return mu_samples, t_samples
def visualize(samples, mu_grid, sigma_grid):
fig, ax = plt.subplots(1, 1, figsize=(6, 5))
ax.scatter(samples['mu'], samples['sigma'], s=5, lw=0, c='black')
ax.set_xlim(mu_grid[0], mu_grid[-1])
ax.set_ylim(sigma_grid[0], sigma_grid[-1])
ax.set_title('Edward')
ax.set_xlabel('$\mu$')
ax.set_ylabel('$\sigma$')
plt.savefig('edward_old.pdf')
def main():
np.random.seed(0)
tf.set_random_seed(0)
# Generate pseudodata from draws from a single normal distribution
dist_mean = 0.0
dist_std = 0.5
n_events = 5000
toy_data = scipy.stats.norm.rvs(dist_mean, dist_std, size=n_events)
mu_samples, t_samples = generate_samples(toy_data, n_events)
samples = pd.DataFrame({'mu': mu_samples, 'sigma': np.exp(t_samples)})
n_grid = 50
mu_grid = np.linspace(*np.percentile(mu_samples, (0.5, 99.5)), n_grid)
sigma_grid = np.linspace(*np.exp(np.percentile(t_samples, (0.5, 99.5))), n_grid)
visualize(samples, mu_grid, sigma_grid)
if __name__ == '__main__':
main()
which produces the plot below through
(edward) $ python3 edward_old.py
However, when I try to replicate it using TensorFlow Probability and Edward2 with the following environment
$ python3 --version
Python 3.6.8
$ python3 -m venv tfp-edward2
$ source tfp-edward2/bin/activate
(tfp-edward2) $ pip3 install --upgrade pip setuptools wheel
(tfp-edward2) $ cat tfp-edward2.txt
tensorflow~=1.13
tensorflow-probability~=0.6
scipy~=1.2
pandas~=0.24
matplotlib~=3.0
(tfp-edward2) $ pip3 install -r tfp-edward2.txt
and the following changes from edward_old.py
's generate_samples
in a file called edward2.py
#!/usr/bin/env python3
import numpy as np
import scipy.stats
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import edward2 as ed
import pandas as pd
import matplotlib.pyplot as plt
def generate_samples(data, n_samples):
# Pick initial point for MCMC chains based on the data
low, med, high = np.percentile(data, (16, 50, 84))
mu_init = np.float32(med)
t_init = np.float32(np.log(0.5 * (high - low)))
def model(data_shape):
mu = ed.Uniform(
low=tf.fill(data_shape, -1.0), high=tf.fill(data_shape, 1.0), name="mu"
)
t = ed.Uniform(
low=tf.log(tf.fill(data_shape, 0.05)),
high=tf.log(tf.fill(data_shape, 1.0)),
name="t",
)
x = ed.Normal(loc=mu, scale=tf.exp(t), name="x")
return x
log_joint = ed.make_log_joint_fn(model)
def target_log_prob_fn(mu, t):
"""Target log-probability as a function of states."""
return log_joint(data.shape, mu=mu, t=t, x=data)
step_size = tf.get_variable(
name='step_size',
initializer=0.01,
use_resource=True, # For TFE compatibility
trainable=False,
)
num_burnin_steps = 1000
hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=5,
step_size=step_size,
step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(
num_adaptation_steps=int(num_burnin_steps * 0.8)
),
)
# How should these be done?
q_mu = tf.random_normal(data.shape, mean=mu_init)
q_t = tf.random_normal(data.shape, mean=t_init)
states, kernel_results = tfp.mcmc.sample_chain(
num_results=n_samples,
current_state=[q_mu, q_t],
kernel=hmc_kernel,
num_burnin_steps=num_burnin_steps,
)
# Initialize all constructed variables.
init_op = tf.global_variables_initializer()
# Run the inference using HMC to generate samples
with tf.Session() as sess:
init_op.run()
states_, results_ = sess.run([states, kernel_results])
mu_samples, t_samples = states_[0][0], states_[1][0]
return mu_samples, t_samples
Running with
(tfp-edward2) $ python3 edward2.py
shows that there are some obvious problems. I don't think I am formulating the equivalent of the ed.models.Empirical
correctly, so if there are thoughts on that or anything else I am doing wrong that would be great.
I have tried to follow the "Upgrading from Edward to Edward2" examples already, but I haven't been able to understand them enough to transfer from the example used there of the deep_exponential_family
model to this example.
The problem I created for myself was totally messing up the shapes of my distributions. What I failed to properly grasp at first was that the current_state
of my tfp.mcmc.sample_chain
should have been scalars (shape==()
) that represented the initial positions of the chains. Once I realized this, then it became clear that those positions, q_mu
and q_t
had quite the wrong shape and should be the mean of samples from the positions determined from the data
q_mu = tf.reduce_mean(tf.random_normal((1000,), mean=mu_init))
q_t = tf.reduce_mean(tf.random_normal((1000,), mean=t_init))
As these values are scalars, then I had been creating the shapes of my model wrong as well. I had been creating samples of my random variables that were the same shape as my data mistakenly thinking that this was just moving the shaping of x
to the shape of mu
and t
. Of course mu
and t
are meant to be scalar random variables from their respective Uniform distributions which then are the parameters for x
's Normal distribution from which data.shape
samples are drawn.
def model(data_shape):
mu = ed.Uniform(low=-1.0, high=1.0, name="mu")
t = ed.Uniform(low=tf.log(0.05), high=tf.log(1.0), name="t")
x = ed.Normal(
loc=tf.fill(data_shape, mu), scale=tf.fill(data_shape, tf.exp(t)), name="x"
)
return x
Once this is done the only thing left to do is to properly access the states now
with tf.Session() as sess:
init_op.run()
states_, results_ = sess.run([states, kernel_results])
mu_samples, t_samples = (states_[0], states_[1])
and that produces the image below with
(tfp-edward2) $ python3 edward2.py
which is a good match for the original using Edward
.
The fully corrected script is below
#!/usr/bin/env python3
import numpy as np
import scipy.stats
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import edward2 as ed
import pandas as pd
import matplotlib.pyplot as plt
def generate_samples(data, n_samples):
# Pick initial point for MCMC chains based on the data
low, med, high = np.percentile(data, (16, 50, 84))
mu_init = np.float32(med)
t_init = np.float32(np.log(0.5 * (high - low)))
def model(data_shape):
mu = ed.Uniform(low=-1.0, high=1.0, name="mu")
t = ed.Uniform(low=tf.log(0.05), high=tf.log(1.0), name="t")
x = ed.Normal(
loc=tf.fill(data_shape, mu), scale=tf.fill(data_shape, tf.exp(t)), name="x"
)
return x
log_joint = ed.make_log_joint_fn(model)
def target_log_prob_fn(mu, t):
"""Target log-probability as a function of states."""
return log_joint(data.shape, mu=mu, t=t, x=data)
step_size = tf.get_variable(
name='step_size',
initializer=0.01,
use_resource=True, # For TFE compatibility
trainable=False,
)
num_burnin_steps = 1000
hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=5,
step_size=step_size,
step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(
num_adaptation_steps=int(num_burnin_steps * 0.8)
),
)
# Initial states of chains
q_mu = tf.reduce_mean(tf.random_normal((1000,), mean=mu_init))
q_t = tf.reduce_mean(tf.random_normal((1000,), mean=t_init))
states, kernel_results = tfp.mcmc.sample_chain(
num_results=n_samples,
current_state=[q_mu, q_t],
kernel=hmc_kernel,
num_burnin_steps=num_burnin_steps,
)
# Initialize all constructed variables.
init_op = tf.global_variables_initializer()
# Run the inference using HMC to generate samples
with tf.Session() as sess:
init_op.run()
states_, results_ = sess.run([states, kernel_results])
mu_samples, t_samples = (states_[0], states_[1])
return mu_samples, t_samples
def visualize(samples, mu_grid, sigma_grid):
fig, ax = plt.subplots(1, 1, figsize=(6, 5))
ax.scatter(samples['mu'], samples['sigma'], s=5, lw=0, c='black')
ax.set_xlim(mu_grid[0], mu_grid[-1])
ax.set_ylim(sigma_grid[0], sigma_grid[-1])
ax.set_title('tfp and Edward2')
ax.set_xlabel('$\mu$')
ax.set_ylabel('$\sigma$')
plt.savefig('tfp-edward2.pdf')
plt.savefig('tfp-edward2.png')
def main():
np.random.seed(0)
tf.set_random_seed(0)
# Generate pseudodata from draws from a single normal distribution
dist_mean = 0.0
dist_std = 0.5
n_events = 5000
toy_data = scipy.stats.norm.rvs(dist_mean, dist_std, size=n_events)
mu_samples, t_samples = generate_samples(toy_data, n_events)
samples = pd.DataFrame({'mu': mu_samples, 'sigma': np.exp(t_samples)})
n_grid = 50
mu_grid = np.linspace(*np.percentile(mu_samples, (0.5, 99.5)), n_grid)
sigma_grid = np.linspace(*np.exp(np.percentile(t_samples, (0.5, 99.5))), n_grid)
visualize(samples, mu_grid, sigma_grid)
if __name__ == '__main__':
main()