< Home
6. Density Estimation¶
Assignment¶
-Fit a probability distribution to your data
I execute Neil's sample code as changing from sample data to Wine.dataset(my dataset). I asked chatGPT about the changing code part.
I also learnt by YouTube(https://www.youtube.com/watch?v=b1JOPONwYmw) about the k-means. I tried to change several variables to fit my own dataset.
Clustering¶
Clustering is grouping data based on the similarity between data points.
k-means¶
The k-means is a technique that divides data into k clusters, maximising the similarity of data within each cluster.
Voronoi tesselation¶
In [7]:
import matplotlib.pyplot as plt
from scipy.spatial import Voronoi,voronoi_plot_2d
import numpy as np
import time
import pandas as pd
#
# data from Wine.dataset
#
df = pd.read_csv('./datasets/Wine_dataset.csv')
x = df['Flavanoids'].to_numpy()
y = df['Color intensity'].to_numpy()
# 必要なら標準化(強く推奨)
x = (x - np.mean(x)) / np.std(x)
y = (y - np.mean(y)) / np.std(y)
#
# k-means parameters
#
#npts = 1000
nsteps = 1000
momentum = 0.
#xs = [0,5,10]
#ys = [0,10,5]
#np.random.seed(10)
def kmeans(x,y,momentum,nclusters):
#
# choose starting points
#
indices = np.random.uniform(low=0,high=len(x),size=nclusters).astype(int)
mux = x[indices]
muy = y[indices]
#
# do k-means iteration
#
for i in range(nsteps):
#
# find closest points
#
X = np.outer(x,np.ones(len(mux)))
Y = np.outer(y,np.ones(len(muy)))
Mux = np.outer(np.ones(len(x)),mux)
Muy = np.outer(np.ones(len(x)),muy)
distances = np.sqrt((X-Mux)**2+(Y-Muy)**2)
mins = np.argmin(distances,axis=1)
#
# update means
#
for i in range(len(mux)):
index = np.where(mins == i)
mux[i] = np.sum(x[index])/len(index[0])
muy[i] = np.sum(y[index])/len(index[0])
#
# find distances
#
distances = 0
for i in range(len(mux)):
index = np.where(mins == i)
distances += np.sum(np.sqrt((x[index]-mux[i])**2+(y[index]-muy[i])**2))
return mux,muy,distances
def plot_kmeans(x,y,mux,muy):
xmin = np.min(x)
xmax = np.max(x)
ymin = np.min(y)
ymax = np.max(y)
fig,ax = plt.subplots()
plt.plot(x,y,'.')
plt.plot(mux,muy,'r.',markersize=20)
plt.xlim(xmin,xmax)
plt.ylim(xmin,xmax)
plt.title(f"{len(mux)} clusters")
plt.show()
def plot_Voronoi(x,y,mux,muy):
xmin = np.min(x)
xmax = np.max(x)
ymin = np.min(y)
ymax = np.max(y)
fig,ax = plt.subplots()
plt.plot(x,y,'.')
vor = Voronoi(np.stack((mux,muy),axis=1))
voronoi_plot_2d(vor,ax=ax,show_points=True,show_vertices=False,point_size=20)
plt.xlim(xmin,xmax)
plt.ylim(ymin,ymax)
plt.title(f"{len(mux)} clusters")
plt.show()
distances = np.zeros(5)
mux,muy,distances[0] = kmeans(x,y,momentum,1)
plot_kmeans(x,y,mux,muy)
mux,muy,distances[1] = kmeans(x,y,momentum,2)
plot_kmeans(x,y,mux,muy)
mux,muy,distances[2] = kmeans(x,y,momentum,3)
plot_Voronoi(x,y,mux,muy)
mux,muy,distances[3] = kmeans(x,y,momentum,4)
plot_Voronoi(x,y,mux,muy)
mux,muy,distances[4] = kmeans(x,y,momentum,5)
plot_Voronoi(x,y,mux,muy)
fig,ax = plt.subplots()
plt.plot(np.arange(1,6),distances,'o')
plt.xlabel('number of clusters')
plt.ylabel('total distances to clusters')
ax.xaxis.get_major_locator().set_params(integer=True)
plt.show()
As advice from Tsuchiya-san, I made the graph with sklearn
In [3]:
from sklearn.cluster import KMeans
x = df.loc[:,['Flavanoids','Color intensity']].to_numpy()
kmeans_model = KMeans(n_clusters = 3).fit(x)
kmean_labels = kmeans_model.labels_
x = df['Flavanoids']
y = df['Color intensity']
In [5]:
plt.figure(figsize=(8,6))
plt.scatter(x,y,c=kmean_labels,cmap="viridis",s=20)
plt.show()
Gaussian mixture models¶
In [3]:
import matplotlib.pyplot as plt
from scipy.spatial import Voronoi,voronoi_plot_2d
import numpy as np
import time
import pandas as pd
#
# Gaussuam mixture model parameters
#
npts = xm.shape[0]
nclusters = 3
nsteps = 25
nplot = 100
xs = [0,5,10]
ys = [0,10,5]
np.random.seed(0)
#
# data from WineDataset
#
df = pd.read_csv('./datasets/Wine_dataset.csv')
x = df['Flavanoids'].to_numpy()
y = df['Color intensity'].to_numpy()
#
# choose starting points and initialize
#
indices = np.random.uniform(low=0,high=len(x),size=nclusters).astype(int)
mux = x[indices]
muy = y[indices]
varx = (np.max(x)-np.min(x))**2
vary = (np.max(y)-np.min(y))**2
pc = np.ones(nclusters)/nclusters
#
# plot before iteration
#
fig,ax = plt.subplots()
plt.plot(x,y,'.')
plt.errorbar(mux,muy,xerr=np.sqrt(varx),yerr=np.sqrt(vary),fmt='r.',markersize=20)
plt.autoscale()
plt.title('before iteration')
plt.show()
#
# do E-M iterations
#
for i in range(nsteps):
#
# construct matrices
#
xm = np.outer(x,np.ones(nclusters))
ym = np.outer(y,np.ones(nclusters))
muxm = np.outer(np.ones(npts),mux)
muym = np.outer(np.ones(npts),muy)
varxm = np.outer(np.ones(npts),varx)
varym = np.outer(np.ones(npts),varx)
pcm = np.outer(np.ones(npts),pc)
#
# use model to update probabilities
#
pvgc = (1/np.sqrt(2*np.pi*varxm))*\
np.exp(-(xm-muxm)**2/(2*varxm))*\
(1/np.sqrt(2*np.pi*varym))*\
np.exp(-(ym-muym)**2/(2*varym))
pvc = pvgc*np.outer(np.ones(npts),pc)
pcgv = pvc/np.outer(np.sum(pvc,1),np.ones(nclusters))
#
# use probabilities to update model
#
pc = np.sum(pcgv,0)/npts
mux = np.sum(xm*pcgv,0)/(npts*pc)
muy = np.sum(ym*pcgv,0)/(npts*pc)
varx = 0.1+np.sum((xm-muxm)**2*pcgv,0)/(npts*pc)
vary = 0.1+np.sum((ym-muym)**2*pcgv,0)/(npts*pc)
#
# plot after iteration
#
fig,ax = plt.subplots()
plt.plot(x,y,'.')
plt.errorbar(mux,muy,xerr=np.sqrt(varx),yerr=np.sqrt(vary),fmt='r.',markersize=20)
plt.autoscale()
plt.title('after iteration')
plt.show()
#
# plot distribution
#
xplot = np.linspace(np.min(x),np.max(x),nplot)
yplot = np.linspace(np.min(y),np.max(y),nplot)
(X,Y) = np.meshgrid(xplot,yplot)
p = np.zeros((nplot,nplot))
for c in range(nclusters):
p += np.exp(-(X-mux[c])**2/(2*varx[c]))/np.sqrt(2*np.pi*varx[c])\
*np.exp(-(Y-muy[c])**2/(2*vary[c]))/np.sqrt(2*np.pi*vary[c])\
*pc[c]
fig, ax = plt.subplots(subplot_kw={"projection":"3d"})
ax.plot_surface(X,Y,p)
plt.title('probability distribution')
plt.show()