Here we provide some codes for machine learning, which may be useful for later research. The codes depend on autograd "https://github.com/HIPS/autograd".
Logistic regression
import autograd.numpy as np # Thinly-wrapped version of Numpy
from autograd import grad
import matplotlib.pyplot as plt
def sigmoid(x):
return 0.5*(np.tanh(x/2.0) + 1)
def logistic_predictions(weights, inputs):
return sigmoid(np.dot(inputs, weights))
def training_loss(weights):
preds = logistic_predictions(weights, inputs)
log_label_probabilities = np.log(preds)*targets + np.log((1-preds))*(1-targets)
return -np.sum(log_label_probabilities)
inputs = np.array([[0.52, 1.12, 0.77],
[0.88, -1.08, 0.15],
[0.52, 0.06, -1.30],
[0.74, -2.49, 1.39]])
targets = np.array([True, True, False, False])
training_gradient_fun = grad(training_loss)
weights = np.array([0.0, 0.0, 0.0])
print("Initial loss: ", training_loss(weights))
errors = []
for i in range(1000):
weights -= training_gradient_fun(weights)*0.01
# 對于這個(gè)Logistic regression的簡單程序挽封,我們可以手動計(jì)算導(dǎo)數(shù)如下:
#weights -= 0.01*(np.dot(logistic_predictions(weights, inputs) - targets, inputs))
errors.append(training_loss(weights))
print("Trained loss: ", training_loss(weights))
plt.plot(errors)
Black box variational inference
The approximate probability measure is chosen to be Gaussian, and the true probability measure is a mixture Gaussian with two components.
import matplotlib.pyplot as plt
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.stats.multivariate_normal as mvn
import autograd.scipy.stats.norm as norm
from autograd import grad
from autograd.misc.optimizers import adam
def black_box_variational_inference(logprob, D, num_samples):
def unpack_params(params):
# Variational approximate pdf is a diagonal Gaussian
mean, log_std = params[:D], params[D:]
return mean, log_std
def gaussian_entropy(log_std):
return 0.5*D*(1.0 + np.log(2*np.pi)) + np.sum(log_std)
rs = npr.RandomState(0)
def variational_objective(params, t):
"""
Provides a stochastic estimate of the variational lower bound
"""
mean, log_std = unpack_params(params)
samples = rs.randn(num_samples, D)*np.exp(log_std) + mean
lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples))
return -lower_bound
gradient = grad(variational_objective)
return variational_objective, gradient, unpack_params
if __name__ == "__main__":
# Specify an inference problem by its unnormalized log-density
D = 2
def log_density(x):
mu1 = np.array([-0.7, -0.7])
mu2 = np.array([0.7, 0.7])
sig1 = np.array([[1.0, 0.0], [0.0, 0.1]])
sig2 = np.array([[0.1, 0.0], [0.0, 1.0]])
mu_density = np.log(mvn.pdf(x, mu1, sig1) + mvn.pdf(x, mu2, sig2))
return mu_density
# Build variational objective
objective, gradient, unpack_params = \
black_box_variational_inference(log_density, D, num_samples=200)
# Set up plotting code
def plot_isocontours(ax, func, xlimits=[-2, 2], ylimits=[-2, 2], numticks=101):
x = np.linspace(*xlimits, num=numticks)
y = np.linspace(*ylimits, num=numticks)
X, Y = np.meshgrid(x, y)
zs = func(np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T)
Z = zs.reshape(X.shape)
plt.contour(X, Y, Z)
ax.set_yticks([])
ax.set_xticks([])
# Set up figure
fig = plt.figure(figsize=(8, 8), facecolor="white")
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)
def callback(params, t, g):
print("Iteration {} lower bound {}".format(t, -objective(params, t)))
plt.cla()
target_distribution = lambda x: np.exp(log_density(x))
plot_isocontours(ax, target_distribution)
mean, log_std = unpack_params(params)
variational_contour = lambda x: mvn.pdf(x, mean, np.diag(np.exp(2*log_std)))
plot_isocontours(ax, variational_contour)
plt.draw()
plt.pause(1.0/10.0)
print("Optimizing variational parameters...")
init_mean = -1*np.ones(D)
init_log_std = -5*np.ones(D)
init_var_params = np.concatenate([init_mean, init_log_std])
variational_params = adam(gradient, init_var_params, step_size=0.1,
num_iters=200, callback=callback)