diff --git a/LinearRegression.r b/LinearRegression.r index 2d3c8f1..b43bf88 100644 --- a/LinearRegression.r +++ b/LinearRegression.r @@ -233,7 +233,7 @@ for(i in 1:10) trSize = 10 * i; trainingData <- head(dataS , trSize) - trmodel <- lm(y ~ x1 + x2 +x3 +x4 +x5 +x6 +x7 , data =dataS , x=T , y=T) + trmodel <- lm(y ~ x1 + x2 +x3 +x4 +x5 +x6 +x7 , data =trainingData , x=T , y=T) predictedY <- predict ( trmodel , trainingData) predictedY @@ -287,4 +287,4 @@ for(i in 1: 10) } plot(MSE) -lines(x = MSE , y= NULL , type ="l" , col="blue") \ No newline at end of file +lines(x = MSE , y= NULL , type ="l" , col="blue") diff --git a/Neural_Network.ipynb b/Neural_Network.ipynb index 7449d0a..ffe3081 100644 --- a/Neural_Network.ipynb +++ b/Neural_Network.ipynb @@ -4,15 +4,7 @@ "cell_type": "code", "execution_count": 1, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(3, 4)\n" - ] - } - ], + "outputs": [], "source": [ "#Source code for Artificial Neural Network using python\n", "import numpy as np\n", @@ -27,7 +19,7 @@ "\n", "#Derivative of Sigmoid Function\n", "def derivatives_sigmoid(x):\n", - " return x * (1 - x)\n", + " return sigmoid(x) * (1 - sigmoid(x))\n", "\n", "#Variable initialization\n", "epoch=5000 #Setting training iterations\n", @@ -45,12 +37,11 @@ }, { "cell_type": "code", - "execution_count": 3, - "metadata": { - "collapsed": true - }, + "execution_count": 2, + "metadata": {}, "outputs": [], "source": [ + "\n", "for i in range(epoch):\n", "#Forward Propogation\n", " hidden_layer_input1=np.dot(X,wh)\n", @@ -58,66 +49,40 @@ " hiddenlayer_activations = sigmoid(hidden_layer_input)\n", " output_layer_input1=np.dot(hiddenlayer_activations,wout)\n", " output_layer_input= output_layer_input1+ bout\n", - " output = sigmoid(output_layer_input)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ + " output = sigmoid(output_layer_input)\n", + " \n", "#Backpropagation\n", - "E = y-output\n", - "slope_output_layer = derivatives_sigmoid(output)\n", - "slope_hidden_layer = derivatives_sigmoid(hiddenlayer_activations)\n", - "d_output = E * slope_output_layer\n", - "Error_at_hidden_layer = d_output.dot(wout.T)\n", - "d_hiddenlayer = Error_at_hidden_layer * slope_hidden_layer\n", - "wout += hiddenlayer_activations.T.dot(d_output) *lr\n", - "bout += np.sum(d_output, axis=0,keepdims=True) *lr\n", - "wh += X.T.dot(d_hiddenlayer) *lr\n", - "bh += np.sum(d_hiddenlayer, axis=0,keepdims=True) *lr" + " E = y-output\n", + " slope_output_layer = derivatives_sigmoid(output)\n", + " slope_hidden_layer = derivatives_sigmoid(hiddenlayer_activations)\n", + " d_output = E * slope_output_layer\n", + " Error_at_hidden_layer = d_output.dot(wout.T)\n", + " d_hiddenlayer = Error_at_hidden_layer * slope_hidden_layer\n", + " wout += hiddenlayer_activations.T.dot(d_output) *lr\n", + " bout += np.sum(d_output, axis=0,keepdims=True) *lr\n", + " wh += X.T.dot(d_hiddenlayer) *lr\n", + " bh += np.sum(d_hiddenlayer, axis=0,keepdims=True) *lr" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[[0.82938945]\n", - " [0.84310929]\n", - " [0.83800391]]\n" + "[[0.9980349 ]\n", + " [0.99553424]\n", + " [0.00723341]]\n" ] } ], "source": [ + "\n", "print (output)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [] } ], "metadata": { @@ -136,7 +101,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.3" + "version": "3.7.0" } }, "nbformat": 4, diff --git a/README.md b/README.md index ee23961..7e1a522 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,5 @@ # MLA -Machine Learning and Algorithms Codes for BE IT. - - -Codes are contributed by Varad and Stony. +Machine Learning and Applications Codes SPPU BE IT 2015 Course `my_data.txt` is the dataset for Linear Regression.