Skip to content

Commit

Permalink
final commit
Browse files Browse the repository at this point in the history
  • Loading branch information
camilaborinsky committed May 10, 2022
2 parents ac157f9 + 99a9593 commit 27af452
Show file tree
Hide file tree
Showing 12 changed files with 229 additions and 41 deletions.
Empty file added TP3/ex_1/__init__.py
Empty file.
26 changes: 17 additions & 9 deletions TP3/ex_1/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
from perceptrons.simple_linear_perceptron import SimpleLinearPerceptron
from perceptrons.non_linear_perceptron import NonLinearPerceptron

def main():
""" def main():
training_set = [[-1, 1], [1, -1], [-1, -1], [1, 1]]
expected_output = [[-1], [-1], [-1], [1]]
expected_output = [[1], [1], [-1], [-1]]
p = SimpleStepPerceptron(expected_output, training_set, 0.1)
#p = SimpleLinearPerceptron(expected_output, training_set, 0.1)
#p = NonLinearPerceptron(expected_output, training_set, 0.1)
Expand All @@ -19,9 +19,20 @@ def main():
with open("resources/perceptron_data.txt", "w") as f:
f.write("")
f.close()
w_min, i = p.learn(20, lambda i, error, weights: open("resources/perceptron_data.txt", "a").write("{}\t{}\t{}\n".format(i, error, weights)))
plot_decision_boundary(training_set, w_min, expected_output)
w_min, error, i = p.learn(20, lambda i, error, weights: open("resources/perceptron_data.txt", "a").write("{}\t{}\t{}\n".format(i, error, weights)))
plot_decision_boundary(training_set, w_min, expected_output) """

def main():
config_file_path = "ex_1/resources/config.json"
results_file_path = "ex_1/resources/perceptron_data.txt"
learning_rates_results = "ex_1/resources/learning_rates.txt"
open(results_file_path, "w").close()

training_set, expected_output, learn_rate, epoch_limit, execution_count, random_weights = main.parse_config(config_file_path)
p = SimpleStepPerceptron(expected_output, training_set, learn_rate)
w_min, error_min, i = p.learn(epoch_limit*len(training_set), lambda i, error, weights: open(results_file_path, "a").write("{}\t{}\t{}\n".format(i, error, weights)), random_weights=random_weights)
main.plot_decision_boundary(training_set, w_min, expected_output)



def main_iteration_vs_learning_rate():
Expand All @@ -36,15 +47,11 @@ def main_iteration_vs_learning_rate():
print("W min"+ str(w_min) + " iteration: "+ str(i))

iterations.append(i)
#TODO: a chequear esto, no estoy super segura que lo de desviacion estandar sea asi y con el avg...
error = np.std(iterations)
#where iterations is centered
avg_iterations = np.mean(iterations)
open("resources/learning_rates.txt", "a").write("{},{},{}\n".format(learn_rate, avg_iterations, error))

#plot_decision_boundary(training_set, w_min, expected_output)



def plot_decision_boundary(X, w, ex):

Expand Down Expand Up @@ -119,4 +126,5 @@ def iteration_vs_error(file_path):
i, e, w = line.strip().split("\t")
iterations.append(int(i))
errors.append(float(e))
return iterations, errors
return iterations, errors

7 changes: 7 additions & 0 deletions TP3/ex_1/resources/perceptron_data.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
0 4.0 [ 0.9 0.9 -0.9]
1 2.0 [ 2.7 -0.9 0.9]
2 2.0 [ 2.7 -0.9 0.9]
3 2.0 [0.9 0.9 2.7]
4 2.0 [0.9 0.9 2.7]
5 2.0 [0.9 0.9 2.7]
6 0.0 [2.7 2.7 0.9]
2 changes: 1 addition & 1 deletion TP3/ex_2/main_2_valid.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ def main():
linear_perceptron = SimpleLinearPerceptron(expected, training, learn_rate=0.8)
linear_perceptron.learn(iteration_limit=500, callback=(lambda i, error, weights : write_error_vs_iteration("resources/ex_2/testing", error[0], i)))
error_vs_iteration("resources/ex_2/testing")
main()

2 changes: 1 addition & 1 deletion TP3/ex_3/resources/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
"activation_function": "sigmoid",
"path_to_data": "ex_2/resources/training",
"epoch_limit": 100,
"execution_count": 20,
"execution_count": 1,
"subproblem": 3,
"training_set": [
[1, 1],
Expand Down
10 changes: 0 additions & 10 deletions TP3/ex_3/resources/training/training_with_noise.txt

This file was deleted.

4 changes: 2 additions & 2 deletions TP3/final_1.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 31,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -1306,7 +1306,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
"version": "3.9.7"
},
"orig_nbformat": 4
},
Expand Down
12 changes: 7 additions & 5 deletions TP3/graphing.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#function that reads i, error and w[] from resources/perceptron_data.txt and animates the different weights
def w_evolution():
training_set = [[-1, 1], [1, -1], [-1, -1], [1, 1]]
expected_output = [-1, -1, -1, 1]
expected_output = [1, 1, -1, -1]
with open("resources/perceptron_data.txt", "r") as f:
lines = f.readlines()
i = []
Expand All @@ -15,10 +15,12 @@ def w_evolution():
split = line.split("\t")
i.append(int(split[0]))
error.append(float(split[1]))
split[2] = split[2].replace("\n", "").replace("[", "").replace("]", "")
split[2] = split[2].replace("\n", "").replace("[", "").replace("]", "").replace(" ", " ")
print("split[2] = " + split[2])
aux = []
for item in split[2].split(" "):
print("line = " + str(line))
for item in split[2].replace(" ", ",").replace(" ",",").split(","):
print("item = " + str(item))
aux.append(float(item))
w.append(aux)
f.close()
Expand Down Expand Up @@ -89,5 +91,5 @@ def error_vs_iteration(file_path, exp):
plt.show()


#w_evolution()
# iterations_vs_learning_rate()
w_evolution()
#iterations_vs_learning_rate()
31 changes: 31 additions & 0 deletions TP3/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# TP3: grupo 3
Integrantes:
* Ana Cruz
* Agustin Spitzner
* Camila Borinsky

## Estructura del proyecto
El proyecto se divide en cuatro partes principales. Por cada ejercicio del enunciado a resolver tenemos una carpeta que incluye las resoluciones particulares de cada ejercicio y los subíndices correspondientes. Por otro lado, tenemos las implementaciones de las distintas clases de perceptrones, todo dentro de la carpeta "perceptrons". También tenemos la carpeta utils, que contiene únicamente al archivo file_utils que tiene funciones de parseo. Por último, en el root tenemos múltiples archivos de jupiter notebook, que sirven para hacer demostraciones de las resoluciones de los distintos ejercicios y sus resultados.

## Notebooks
El proyecto contiene tres tipos de notebooks: final, results y setup. Los cuadernos de setup son bastante simples y los creamos con el objetivo de borrar el contenido de distintos archivos de texto que usamos para ir guardando información de varias corridas. Queremos poder hacer este reset para cuando queremos abrir un espacio de corridas nuevo. Los cuadernos de nombre final_x son los responsables de correr los fragmentos de código necesarios para ejecutar las corridas del ejercicio x. Para estos hay varios casos que requieren de varias corridas de un mismo algoritmo para luego calcular promedios. Estas corridas se guardan en archivos de texto dentro de la carpeta del ejercicio correspondiente. Por ultimo los cuadernos de results son para visualizar distintos graficos relevantes de cada ejercicio.

## Ejercicio 1
Para correr el ejercicio 1, ya sea para probar el XOR o el AND, hay que correr desde el root del proyecto (léase, estar parado en TP3) lo siguiente:
```
python -m ex_1.main
````
Para elegir que conjunto de entrenamiento usar (Si el AND o el XOR), podemos entrar a la carpeta ex_1/resources y en el archivo config.json cambiar los parámetros necesarios.

## Ejercicio 2
Correr el ejercicio 2 es bastante análogo al ejercicio 1, donde simplemente hay que poblar el config.json que esta dentro de la carpeta ex_2/resources y luego correr:
```
python -m ex_2.main
```
En cuanto al archivo de configuración, tenemos una variable booleana que indica si se quiere usar el método de momentum o no, un entero llamado cross_validation que sería el k de validación cruzada. Es decir, la cantidad de subconjuntos que se van a armar a partir del conjunto de entrenamiento.


## Ejercicio 3
Para el ejercicio 3, la manera ideal de conseguir los resultados es mediante el cuaderno "results_3". Anteriormente se debe cambiar el config.json dentro de la carpeta de ex_3 para indicar si el subejercicio a correr es el 2 o el 3 y cualquier otro parámetro.


11 changes: 0 additions & 11 deletions TP3/resources/perceptron_data.txt
Original file line number Diff line number Diff line change
@@ -1,11 +0,0 @@
0 4.0 [-0.1 0.1 0.1]
1 2.0 [ 0.1 0.3 -0.1]
2 2.0 [ 0.1 0.3 -0.1]
3 2.0 [ 0.1 0.3 -0.1]
4 2.0 [ 0.1 0.3 -0.1]
5 2.0 [ 0.1 0.3 -0.1]
6 2.0 [ 0.1 0.3 -0.1]
7 2.0 [ 0.1 0.3 -0.1]
8 2.0 [ 0.1 0.3 -0.1]
9 2.0 [ 0.3 0.1 0.1]
10 0.0 [ 0.1 0.3 0.3]
100 changes: 99 additions & 1 deletion TP3/results_1.ipynb

Large diffs are not rendered by default.

65 changes: 64 additions & 1 deletion TP3/results_3.ipynb

Large diffs are not rendered by default.

0 comments on commit 27af452

Please sign in to comment.