From 9b81d7e23917b261d0f45da5484566d8d240d144 Mon Sep 17 00:00:00 2001 From: ligerfotis Date: Thu, 11 May 2023 16:38:03 +0200 Subject: [PATCH] post assignment 5 --- assignment 5/README.md | 60 +++ assignment 5/iml_assignmnet5_unsolved.ipynb | 547 ++++++++++++++++++++ 2 files changed, 607 insertions(+) create mode 100644 assignment 5/README.md create mode 100644 assignment 5/iml_assignmnet5_unsolved.ipynb diff --git a/assignment 5/README.md b/assignment 5/README.md new file mode 100644 index 0000000..ae839c0 --- /dev/null +++ b/assignment 5/README.md @@ -0,0 +1,60 @@ +# Description of Assignment 4 +### Author: [Fotios Lygerakis](https://github.com/ligerfotis) + +### Implement the Perceptron algorithm for classification of the Iris dataset. +* Use load_iris() from sklearn.datasets to load the Iris dataset. + * The Iris dataset is a multiclass classification dataset with 3 classes. + * The Iris dataset has 4 features and 150 samples. + * The Iris dataset is a balanced dataset with 50 samples per class. +* Split the dataset into train and test sets. +* Scale the features. +* Implement the Perceptron algorithm for classification. + * Use the unit step function as the activation function. +* Evaluate the model on the test set. + * Use accuracy_score() from sklearn.metrics to calculate the accuracy of the model. +* Do some hyperparameter tuning to improve the accuracy of the model. + * Try different values of the learning rate and the number of iterations. + * Print the accuracy of the model for different values of the learning rate and the number of iterations. +### Implement Non-linear feature transformation for regression of the Concrete Compressive Strength dataset. +* Use read_excel() from pandas to load the dataset. +* The Concrete Compressive Strength dataset + * is a regression dataset with 1 target variable. + * has 8 features and 1030 samples. + * has 1 target variable. +* Split the dataset into train and test sets. +* Scale the features. +* **Polynomial feature transformation** + * Implement the polynomial_features() function to transform the features. + * Use LinearRegression() from sklearn.linear_model to train a linear regression model. + * Evaluate the model on the test set. + * Use mean_squared_error() from sklearn.metrics to calculate the mean squared error of the model. + * Use r2_score() from sklearn.metrics to calculate the R2 score of the model. + * Train a linear regression model on the polynomial features varying the degree of the polynomial from 1 to 4. + * Evaluate the models trained on the polynomial features on the test set and compare the mean squared error of the models. + * Discuss the results in the report. +* **Radial Basis Function (RBF) feature transformation** + * Implement the rbf_features() function to transform the features. + * Use LinearRegression() from sklearn.linear_model to train a linear regression model. + * Evaluate the model on the test set. + * Use mean_squared_error() from sklearn.metrics to calculate the mean squared error of the model. + * Use r2_score() from sklearn.metrics to calculate the R2 score of the model. + * Train a linear regression model on the RBF features varying the gamma parameter from 0.1 to 10. + * Evaluate the models trained on the RBF features on the test set and compare the mean squared error of the models. + * Discuss the results in the report. +### **(Bonus)** Implement the Multilayer Perceptron algorithm (2 layers) for regression of the Concrete Compressive Strength dataset. +* Use train_test_split() from sklearn.model_selection to split the dataset into train and test sets. +* Use StandardScaler() from sklearn.preprocessing to scale the features. +* Implement the Multilayer Perceptron algorithm for regression. + * The Multilayer Perceptron algorithm is a neural network with 2 layers. + * Implement the forward propagation algorithm. + * Implement the backward propagation algorithm. + * Use the tanh function as the activation function for the hidden layer. + * Use the identity function as the activation function for the output layer. +* Evaluate the model on the test set. +* Do some hyperparameter tuning to improve the accuracy of the model. + * Try different values of the learning rate and the number of epochs. + * Plot the mean squared error of the model for different values of the learning rate and the number of epochs. +* Compare the performance of the Multilayer Perceptron algorithm with the Linear Regression algorithm. + * Use LinearRegression() from sklearn.linear_model to train a linear regression model. + * Use mean_squared_error() from sklearn.metrics to calculate the mean squared error of the linear regression model. +* Compare the mean squared error of the linear regression model with the mean squared error of the multilayer perceptron model. diff --git a/assignment 5/iml_assignmnet5_unsolved.ipynb b/assignment 5/iml_assignmnet5_unsolved.ipynb new file mode 100644 index 0000000..bd06111 --- /dev/null +++ b/assignment 5/iml_assignmnet5_unsolved.ipynb @@ -0,0 +1,547 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Perceptron Algorithm for Classification of Iris Dataset" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 39, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(150, 4)\n", + "(150,)\n" + ] + } + ], + "source": [ + "# load the iris dataset\n", + "from sklearn.datasets import load_iris\n", + "from sklearn.metrics import accuracy_score\n", + "import numpy as np\n", + "\n", + "iris = load_iris()\n", + "X = iris.data\n", + "y = iris.target" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Preprocess the data" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 40, + "outputs": [], + "source": [ + "# Preprocess the data\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "# ToDo: split the data into train and test sets\n", + "X_train, X_test, y_train, y_test = None, None, None, None" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Define the perceptron algorithm" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 41, + "outputs": [], + "source": [ + "# Define the perceptron algorithm\n", + "class Perceptron:\n", + " def __init__(self, learning_rate=0.01, n_iters=1000):\n", + " pass\n", + "\n", + " # define the fit function to train the model\n", + "\n", + " # define the predict function to predict labels\n", + "\n", + " def _unit_step_func(self, x):\n", + " pass" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Train the model" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 42, + "outputs": [], + "source": [ + "# Train the model\n", + "p = Perceptron(learning_rate=0.01, n_iters=1000)\n", + "p.fit(X_train, y_train)\n", + "predictions_train = p.predict(X_train)\n", + "predictions = p.predict(X_test)" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Evaluate the model" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 44, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Perceptron classification train accuracy 0.3416666666666667\n", + "Perceptron classification accuracy 0.3\n" + ] + } + ], + "source": [ + "# evaluate train accuracy\n", + "print(\"Perceptron classification train accuracy\", accuracy_score(y_train, predictions_train))\n", + "print(\"Perceptron classification accuracy\", accuracy_score(y_test, predictions))" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Non-linear feature transformation on the concrete compressive strength dataset" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 45, + "outputs": [], + "source": [ + "from itertools import combinations_with_replacement\n", + "\n", + "# ToDO: implement the polynomial_features() function\n", + "def polynomial_features(X, degree):\n", + " pass" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 46, + "outputs": [], + "source": [ + "# Non-linear feature transformation\n", + "import pandas as pd\n", + "from sklearn.linear_model import LinearRegression\n", + "from sklearn.metrics import mean_squared_error, r2_score\n", + "\n", + "# load the concrete compressive strength dataset\n", + "df = pd.read_excel('Concrete_Data.xls')\n", + "\n", + "# ToDo: split the data into train and test sets\n", + "X_train, X_test, y_train, y_test = None, None, None, None\n", + "\n", + "# transform the features into second degree polynomial features\n", + "X_train_poly_custom = polynomial_features(X_train.values, degree=2)\n", + "X_test_poly_custom = polynomial_features(X_test.values, degree=2)\n" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Train the linear regression model" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 47, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mean squared error (train poly custom): 64.55\n", + "Mean squared error (test poly custom): 58.28\n", + "Mean squared error (train): 110.66\n", + "Mean squared error (test): 95.98\n", + "R^2 (train poly custom): 0.77\n", + "R^2 (test poly custom): 0.77\n", + "R^2 (train): 0.61\n", + "R^2 (test): 0.63\n" + ] + } + ], + "source": [ + "# Train the model\n", + "lr_poly_custom = LinearRegression()\n", + "lr = LinearRegression()\n", + "# fit the model\n", + "lr_poly_custom.fit(X_train_poly_custom, y_train)\n", + "lr.fit(X_train, y_train)\n", + "# predict values from the polynomial transformed features\n", + "predictions_poly_custom_train = lr_poly_custom.predict(X_train_poly_custom)\n", + "predictions_poly_custom = lr_poly_custom.predict(X_test_poly_custom)\n", + "# predict values from the original features\n", + "predictions_train = lr.predict(X_train)\n", + "predictions = lr.predict(X_test)\n", + "\n", + "# mean squared error\n", + "print(\"Mean squared error (train poly custom): {:.2f}\".format(mean_squared_error(y_train, predictions_poly_custom_train)))\n", + "print(\"Mean squared error (test poly custom): {:.2f}\".format(mean_squared_error(y_test, predictions_poly_custom)))\n", + "print(\"Mean squared error (train): {:.2f}\".format(mean_squared_error(y_train, predictions_train)))\n", + "print(\"Mean squared error (test): {:.2f}\".format(mean_squared_error(y_test, predictions)))\n", + "\n", + "# coefficient of determination (R^2)\n", + "print(\"R^2 (train poly custom): {:.2f}\".format(r2_score(y_train, predictions_poly_custom_train)))\n", + "print(\"R^2 (test poly custom): {:.2f}\".format(r2_score(y_test, predictions_poly_custom)))\n", + "print(\"R^2 (train): {:.2f}\".format(r2_score(y_train, predictions_train)))\n", + "print(\"R^2 (test): {:.2f}\".format(r2_score(y_test, predictions)))\n", + "\n" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "RBFs on the California Housing Prices dataset" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 48, + "outputs": [], + "source": [ + "# ToDO: implement the rbf_kernel() function\n", + "def rbf_kernel(X, centers, gamma):\n", + " pass" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 49, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Linear regression on original data:\n", + "MSE: 0.5558915986952443\n", + "R^2: 0.5757877060324508\n", + "\n", + "Linear regression on RBF-transformed data:\n", + "MSE: 0.37106446913117447\n", + "R^2: 0.7168330839511696\n" + ] + } + ], + "source": [ + "from sklearn.datasets import fetch_california_housing\n", + "from sklearn.preprocessing import StandardScaler\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.linear_model import LinearRegression\n", + "from sklearn.metrics import mean_squared_error, r2_score\n", + "\n", + "# Load the California Housing Prices dataset\n", + "data = fetch_california_housing()\n", + "X = data['data']\n", + "y = data['target']\n", + "\n", + "# ToDo: split the data into training and testing sets\n", + "\n", + "# ToDo: standardize the data\n", + "X_train_std = None\n", + "X_test_std = None\n", + "\n", + "# Choose the number of centroids and the RBF kernel width\n", + "num_centroids = 100\n", + "gamma = 0.1\n", + "\n", + "# Randomly select the centroids from the training set\n", + "np.random.seed(42)\n", + "idx = np.random.choice(X_train_std.shape[0], num_centroids, replace=False)\n", + "centroids = X_train_std[idx]\n", + "\n", + "# Compute the RBF features for the training and testing sets\n", + "rbf_train = rbf_kernel(X_train_std, centroids, gamma)\n", + "rbf_test = rbf_kernel(X_test_std, centroids, gamma)\n", + "\n", + "# Fit a linear regression model on the original and RBF-transformed data\n", + "linreg_orig = LinearRegression().fit(X_train_std, y_train)\n", + "linreg_rbf = LinearRegression().fit(rbf_train, y_train)\n", + "\n", + "# Evaluate the models on the testing set\n", + "y_pred_orig = linreg_orig.predict(X_test_std)\n", + "mse_orig = mean_squared_error(y_test, y_pred_orig)\n", + "r2_orig = r2_score(y_test, y_pred_orig)\n", + "\n", + "y_pred_rbf = linreg_rbf.predict(rbf_test)\n", + "mse_rbf = mean_squared_error(y_test, y_pred_rbf)\n", + "r2_rbf = r2_score(y_test, y_pred_rbf)\n", + "\n", + "# Print the results\n", + "print(\"Linear regression on original data:\")\n", + "print(\"MSE:\", mse_orig)\n", + "print(\"R^2:\", r2_orig)\n", + "\n", + "print(\"\\nLinear regression on RBF-transformed data:\")\n", + "print(\"MSE:\", mse_rbf)\n", + "print(\"R^2:\", r2_rbf)\n" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "# **(Bonus)** Multilayer Perceptron Algorithm for Regression of Concrete Compressive Strength Dataset" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Download the Concrete Compressive Strength Dataset from the UCI Machine Learning Repository." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 50, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(1030, 9)\n" + ] + } + ], + "source": [ + "# Download the Concrete Compressive Strength Dataset from the UCI Machine Learning Repository.\n", + "import pandas as pd\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.preprocessing import StandardScaler\n", + "from sklearn.metrics import mean_squared_error\n", + "\n", + "import numpy as np\n", + "\n", + "# ToDo: load the dataset" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Preprocess the data" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 51, + "outputs": [], + "source": [ + "# Preprocess the data\n", + "\n", + "# ToDo: normalize the features\n", + "\n", + "# ToDo: split the data into training and testing sets" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Define the multilayer perceptron algorithm" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 52, + "outputs": [], + "source": [ + "# ToDo: Implement the functions in the MLP class\n", + "class MLP:\n", + " def __init__(self, input_dim, hidden_dim, output_dim, lr=0.01, epochs=1000):\n", + " pass\n" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Train the model" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 53, + "outputs": [], + "source": [ + "# Create an instance of the MLP class\n", + "mlp = MLP(input_dim=X_train.shape[1], hidden_dim=10, output_dim=1, lr=0.01, epochs=1000)\n", + "# Train the model\n", + "mlp.fit(X_train, y_train)" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Evaluate the model" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 54, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mean Squared Error: 36.8911071801165\n" + ] + } + ], + "source": [ + "# Evaluate the model\n", + "y_pred = mlp.predict(X_test)\n", + "print(\"Mean Squared Error:\", mean_squared_error(y_test, y_pred))" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Compare the results with the linear regression model" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 19, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mean Squared Error: 95.97548435337708\n" + ] + } + ], + "source": [ + "# ToDo: fit a linear regression model on the training data" + ], + "metadata": { + "collapsed": false + } + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +}