From ce418b7856bbea53fff1ab59020c884ee824815f Mon Sep 17 00:00:00 2001 From: ligerfotis Date: Fri, 26 May 2023 13:15:50 +0200 Subject: [PATCH] upload solution for assignment 5 --- assignment 5/iml_assignmnet5_solved.ipynb | 682 ++++++++++++++++++++++ 1 file changed, 682 insertions(+) create mode 100644 assignment 5/iml_assignmnet5_solved.ipynb diff --git a/assignment 5/iml_assignmnet5_solved.ipynb b/assignment 5/iml_assignmnet5_solved.ipynb new file mode 100644 index 0000000..c4ae8f8 --- /dev/null +++ b/assignment 5/iml_assignmnet5_solved.ipynb @@ -0,0 +1,682 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "### Solution for Assignment 5 of the course \"Introduction to Machine Learning\" at the University of Leoben.\n", + "##### Author: Fotios Lygerakis\n", + "##### Semester: SS 2022/2023" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "# Perceptron Algorithm for Classification of Iris Dataset" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 1, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(150, 4)\n", + "(150,)\n" + ] + } + ], + "source": [ + "# load the iris dataset\n", + "from sklearn.datasets import load_iris\n", + "from sklearn.metrics import accuracy_score\n", + "import numpy as np\n", + "\n", + "iris = load_iris()\n", + "X = iris.data\n", + "y = iris.target\n", + "print(X.shape)\n", + "print(y.shape)" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Preprocess the data" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 2, + "outputs": [], + "source": [ + "# Preprocess the data\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "# split the scaled data into train and test sets\n", + "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Define the perceptron algorithm" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 3, + "outputs": [], + "source": [ + "# Define the perceptron algorithm\n", + "class MultiClassPerceptron:\n", + " def __init__(self, input_dim, output_dim, lr=0.01, epochs=1000):\n", + " self.W = np.random.randn(input_dim, output_dim)\n", + " self.b = np.zeros((1, output_dim))\n", + " self.lr = lr\n", + " self.epochs = epochs\n", + "\n", + " def forward(self, X):\n", + " self.z = np.dot(X, self.W) + self.b\n", + " self.y_hat = np.exp(self.z) / np.sum(np.exp(self.z), axis=1, keepdims=True)\n", + "\n", + " def backward(self, X, y):\n", + " m = X.shape[0] # number of samples\n", + " # Calculate the gradients\n", + " grad_z = self.y_hat # shape (m, C)\n", + " # Subtract 1 from the predicted class for each sample\n", + " grad_z[range(m), y] -= 1 # shape (m, C)\n", + " # Calculate the gradients with respect to the parameters\n", + " grad_W = np.dot(X.T, grad_z) # shape (n, C)\n", + " # Reshape the gradients into a 2-D array\n", + " grad_b = np.sum(grad_z, axis=0, keepdims=True) # shape (1, C)\n", + " # Update the parameters\n", + " self.W -= self.lr * grad_W # shape (n, C)\n", + " self.b -= self.lr * grad_b # shape (1, C)\n", + "\n", + " def fit(self, X, y):\n", + " for epoch in range(self.epochs):\n", + " self.forward(X)\n", + " self.backward(X, y)\n", + "\n", + " def predict(self, X):\n", + " self.forward(X)\n", + " return np.argmax(self.y_hat, axis=1)" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Train the model" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 4, + "outputs": [], + "source": [ + "# Train the model\n", + "p = MultiClassPerceptron(input_dim=X_train.shape[1], output_dim=3, lr=0.01, epochs=1000)\n", + "p.fit(X_train, y_train)\n", + "predictions_train = p.predict(X_train)\n", + "predictions = p.predict(X_test)" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Evaluate the model" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 5, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Perceptron classification train accuracy 0.975\n", + "Perceptron classification accuracy 1.0\n" + ] + } + ], + "source": [ + "# evaluate train accuracy\n", + "print(\"Perceptron classification train accuracy\", accuracy_score(y_train, predictions_train))\n", + "print(\"Perceptron classification accuracy\", accuracy_score(y_test, predictions))" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Non-linear feature transformation on the concrete compressive strength dataset" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 6, + "outputs": [], + "source": [ + "def polynomial_features(X, degree):\n", + " \"\"\"\n", + " Creates a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree.\n", + " For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].\n", + " Parameters\n", + " ----------\n", + " X : array-like, shape (n_samples, n_features)\n", + " The input samples.\n", + " degree : int\n", + " The degree of the polynomial features.\n", + " Returns\n", + " -------\n", + " X_new : array-like, shape (n_samples, 1 + n_features + n_features*(n_features+1)/2)\n", + " The polynomial features with degree `degree`.\n", + " \"\"\"\n", + " n_samples, n_features = np.shape(X)\n", + " new_features = np.ones(shape=(n_samples, 1))\n", + "\n", + " for i in range(n_features):\n", + " for j in range(1, degree+1):\n", + " # create a new column for each feature, with values raised to the power of j\n", + " new_col = np.power(X[:, i], j) # shape (n_samples, 1)\n", + " # reshape the new column to a 2-D array\n", + " new_col = new_col.reshape(n_samples, 1) # shape (n_samples, 1)\n", + " # append the new column to the new_features array\n", + " new_features = np.hstack((new_features, new_col)) # shape (n_samples, j+1)\n", + "\n", + " return new_features" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 7, + "outputs": [], + "source": [ + "# Non-linear feature transformation\n", + "import pandas as pd\n", + "from sklearn.preprocessing import PolynomialFeatures\n", + "from sklearn.linear_model import LinearRegression\n", + "from sklearn.metrics import mean_squared_error, r2_score\n", + "\n", + "# load the concrete compressive strength dataset\n", + "df = pd.read_excel('Concrete_Data.xls')\n", + "\n", + "# split the data into train and test sets\n", + "X = df.drop(['Concrete compressive strength(MPa, megapascals) '], axis=1)\n", + "y = df['Concrete compressive strength(MPa, megapascals) ']\n", + "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n", + "\n", + "# transform the features into second degree polynomial features\n", + "poly = PolynomialFeatures(degree=2)\n", + "X_train_poly = poly.fit_transform(X_train)\n", + "X_test_poly = poly.transform(X_test)\n", + "\n", + "X_train_poly_custom = polynomial_features(X_train.values, degree=2)\n", + "X_test_poly_custom = polynomial_features(X_test.values, degree=2)\n" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Train the linear regression model" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 8, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mean squared error (train poly custom): 64.55\n", + "Mean squared error (test poly custom): 58.28\n", + "Mean squared error (train): 110.66\n", + "Mean squared error (test): 95.98\n", + "R^2 (train poly custom): 0.77\n", + "R^2 (test poly custom): 0.77\n", + "R^2 (train): 0.61\n", + "R^2 (test): 0.63\n" + ] + } + ], + "source": [ + "# Train the model\n", + "lr_poly_custom = LinearRegression()\n", + "lr = LinearRegression()\n", + "# fit the model\n", + "lr_poly_custom.fit(X_train_poly_custom, y_train)\n", + "lr.fit(X_train, y_train)\n", + "# predict values from the polynomial transformed features\n", + "predictions_poly_custom_train = lr_poly_custom.predict(X_train_poly_custom)\n", + "predictions_poly_custom = lr_poly_custom.predict(X_test_poly_custom)\n", + "# predict values from the original features\n", + "predictions_train = lr.predict(X_train)\n", + "predictions = lr.predict(X_test)\n", + "\n", + "# mean squared error\n", + "print(\"Mean squared error (train poly custom): {:.2f}\".format(mean_squared_error(y_train, predictions_poly_custom_train)))\n", + "print(\"Mean squared error (test poly custom): {:.2f}\".format(mean_squared_error(y_test, predictions_poly_custom)))\n", + "print(\"Mean squared error (train): {:.2f}\".format(mean_squared_error(y_train, predictions_train)))\n", + "print(\"Mean squared error (test): {:.2f}\".format(mean_squared_error(y_test, predictions)))\n", + "\n", + "# coefficient of determination (R^2)\n", + "print(\"R^2 (train poly custom): {:.2f}\".format(r2_score(y_train, predictions_poly_custom_train)))\n", + "print(\"R^2 (test poly custom): {:.2f}\".format(r2_score(y_test, predictions_poly_custom)))\n", + "print(\"R^2 (train): {:.2f}\".format(r2_score(y_train, predictions_train)))\n", + "print(\"R^2 (test): {:.2f}\".format(r2_score(y_test, predictions)))\n", + "\n" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "RBFs on the California Housing Prices dataset" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 9, + "outputs": [], + "source": [ + "def rbf_kernel(X, centers, gamma):\n", + " # Pairwise Euclidean distances calculation:\n", + " # Compute the squared Euclidean distances between each sample and each center using broadcasting:\n", + " # - Subtract each center from each sample to get a difference matrix of shape (n_samples, n_centers, n_features)\n", + " # - Square each element in the difference matrix\n", + " # - Sum the squared differences along the feature axis to get the squared distances matrix of shape (n_samples, n_centers)\n", + " # - Take the square root of each element in the squared distances matrix to obtain the pairwise Euclidean distances matrix of shape (n_samples, n_centers)\n", + " dists = np.sqrt(((X[:, np.newaxis] - centers)**2).sum(axis=2)) # shape (n_samples, n_centers)\n", + " # Compute the RBF values for each distance using the Gaussian kernel\n", + " rbf_vals = np.exp(-gamma * dists**2) # shape (n_samples, n_centers)\n", + " return rbf_vals" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 10, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Linear regression on original data:\n", + "MSE: 0.5558915986952443\n", + "R^2: 0.5757877060324508\n", + "\n", + "Linear regression on RBF-transformed data:\n", + "MSE: 0.37106446913117447\n", + "R^2: 0.7168330839511696\n" + ] + } + ], + "source": [ + "from sklearn.datasets import fetch_california_housing\n", + "from sklearn.preprocessing import StandardScaler\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.linear_model import LinearRegression\n", + "from sklearn.metrics import mean_squared_error, r2_score\n", + "\n", + "# Load the California Housing Prices dataset\n", + "data = fetch_california_housing()\n", + "X = data['data']\n", + "y = data['target']\n", + "\n", + "# Split the data into training and testing sets\n", + "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n", + "\n", + "# Standardize the features\n", + "scaler = StandardScaler()\n", + "X_train_std = scaler.fit_transform(X_train)\n", + "X_test_std = scaler.transform(X_test)\n", + "\n", + "# Choose the number of centroids and the RBF kernel width\n", + "num_centroids = 100\n", + "gamma = 0.1\n", + "\n", + "# Randomly select the centroids from the training set\n", + "np.random.seed(42)\n", + "idx = np.random.choice(X_train_std.shape[0], num_centroids, replace=False)\n", + "centroids = X_train_std[idx] # (100, 8)\n", + "\n", + "# Compute the RBF features for the training and testing sets\n", + "rbf_train = rbf_kernel(X_train_std, centroids, gamma) # (16512, 100)\n", + "rbf_test = rbf_kernel(X_test_std, centroids, gamma) # (4128, 100)\n", + "\n", + "# Fit a linear regression model on the original and RBF-transformed data\n", + "linreg_orig = LinearRegression().fit(X_train_std, y_train)\n", + "linreg_rbf = LinearRegression().fit(rbf_train, y_train)\n", + "\n", + "# Evaluate the models on the testing set\n", + "y_pred_orig = linreg_orig.predict(X_test_std)\n", + "mse_orig = mean_squared_error(y_test, y_pred_orig)\n", + "r2_orig = r2_score(y_test, y_pred_orig)\n", + "\n", + "y_pred_rbf = linreg_rbf.predict(rbf_test)\n", + "mse_rbf = mean_squared_error(y_test, y_pred_rbf)\n", + "r2_rbf = r2_score(y_test, y_pred_rbf)\n", + "\n", + "# Print the results\n", + "print(\"Linear regression on original data:\")\n", + "print(\"MSE:\", mse_orig)\n", + "print(\"R^2:\", r2_orig)\n", + "\n", + "print(\"\\nLinear regression on RBF-transformed data:\")\n", + "print(\"MSE:\", mse_rbf)\n", + "print(\"R^2:\", r2_rbf)\n" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "# **(Bonus)** Multilayer Perceptron Algorithm for Regression of Concrete Compressive Strength Dataset" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Download the Concrete Compressive Strength Dataset from the UCI Machine Learning Repository." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 11, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(1030, 9)\n" + ] + } + ], + "source": [ + "# Download the Concrete Compressive Strength Dataset from the UCI Machine Learning Repository.\n", + "import pandas as pd\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.preprocessing import StandardScaler\n", + "import numpy as np\n", + "\n", + "df = pd.read_excel('Concrete_Data.xls')\n", + "print(df.shape)\n", + "# df.head()" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Preprocess the data" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 12, + "outputs": [], + "source": [ + "# Preprocess the data\n", + "X = df.iloc[:, :-1].values\n", + "y = df.iloc[:, -1].values.reshape(-1, 1)\n", + "\n", + "# Normalize the features\n", + "X_norm = StandardScaler().fit_transform(X)\n", + "\n", + "# Split the data into training and testing sets\n", + "X_train, X_test, y_train, y_test = train_test_split(X_norm, y, test_size=0.2, random_state=42)" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Define the multilayer perceptron algorithm" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 13, + "outputs": [], + "source": [ + "# a multilayer perceptron algorithm class for regression problems\n", + "class MLP:\n", + " def __init__(self, input_dim, hidden_dim, output_dim, lr=0.01, epochs=1000):\n", + " self.W1 = np.random.randn(input_dim, hidden_dim)\n", + " self.b1 = np.zeros((1, hidden_dim))\n", + " self.W2 = np.random.randn(hidden_dim, output_dim)\n", + " self.b2 = np.zeros((1, output_dim))\n", + " self.lr = lr\n", + " self.epochs = epochs\n", + "\n", + " def forward(self, X):\n", + " # forward propagation through our network\n", + " self.z1 = np.dot(X, self.W1) + self.b1\n", + " # activation function\n", + " self.a1 = np.tanh(self.z1)\n", + " # output layer\n", + " self.z2 = np.dot(self.a1, self.W2) + self.b2\n", + " # final activation function\n", + " self.y_hat = self.z2\n", + "\n", + " def backward(self, X, y):\n", + " # number of samples\n", + " m = X.shape[0]\n", + " # output layer gradient\n", + " self.loss = np.mean((self.y_hat - y) ** 2) # MSE loss. shape (n_samples, output_dim)\n", + " # output layer gradient\n", + " delta2 = (self.y_hat - y) # shape (n_samples, output_dim)\n", + " # hidden layer gradient\n", + " dW2 = np.dot(self.a1.T, delta2) # shape (hidden_dim, output_dim)\n", + " # bias gradient\n", + " db2 = np.sum(delta2, axis=0, keepdims=True) # shape (1, output_dim)\n", + " # hidden layer gradient\n", + " delta1 = np.dot(delta2, self.W2.T) * (1 - np.power(self.a1, 2)) # shape (n_samples, hidden_dim)\n", + " # input layer gradient\n", + " dW1 = np.dot(X.T, delta1) # shape (input_dim, hidden_dim)\n", + " # bias gradient\n", + " db1 = np.sum(delta1, axis=0) # shape (1, hidden_dim)\n", + " # update parameters\n", + " self.W2 -= self.lr * dW2 / m\n", + " self.b2 -= self.lr * db2 / m\n", + " self.W1 -= self.lr * dW1 / m\n", + " self.b1 -= self.lr * db1 / m\n", + "\n", + " def fit(self, X, y):\n", + " for epoch in range(self.epochs):\n", + " self.forward(X)\n", + " self.backward(X, y)\n", + "\n", + " def predict(self, X):\n", + " self.forward(X)\n", + " return self.y_hat\n" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Train the model" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 14, + "outputs": [], + "source": [ + "# Create an instance of the MLP class\n", + "mlp = MLP(input_dim=X_train.shape[1], hidden_dim=10, output_dim=1, lr=0.01, epochs=1000)\n", + "# Train the model\n", + "mlp.fit(X_train, y_train)" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Evaluate the model" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 15, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mean Squared Error: 36.8911071801165\n" + ] + } + ], + "source": [ + "# Evaluate the model\n", + "from sklearn.metrics import mean_squared_error\n", + "\n", + "y_pred = mlp.predict(X_test)\n", + "print(\"Mean Squared Error:\", mean_squared_error(y_test, y_pred))" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Compare the results with the linear regression model" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 16, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mean Squared Error: 95.97548435337708\n" + ] + } + ], + "source": [ + "# Compare the results with the linear regression model\n", + "from sklearn.linear_model import LinearRegression\n", + "\n", + "lr = LinearRegression()\n", + "lr.fit(X_train, y_train)\n", + "y_pred = lr.predict(X_test)\n", + "print(\"Mean Squared Error:\", mean_squared_error(y_test, y_pred))" + ], + "metadata": { + "collapsed": false + } + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +}