This commit is contained in:
JOLIMAITRE Matthieu 2024-05-09 05:11:09 +02:00
parent 1395fdd566
commit ca18a5ada7
8 changed files with 379 additions and 0 deletions

1
ia/tp5/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
/venv

88
ia/tp5/questions.md Normal file
View file

@ -0,0 +1,88 @@
# Questions TP5 d'IA
> Matthieu Jolimaitre
## Partie 1
### 1. Daprès le modèle ajusté, quelle est la température attendue si la puissance est de 120W ?
```
f(120) = 64.61248085407703
```
ce qui implique que la température attendue si la puissance est de 120W est de
64.61°.
### 2. Que vaut la RMSE et quelle est son unité ?
```
RMSE = 5.662048552655999
```
L'unité de la RMSE est un écart de températures en degrés.
### 3. Quelle est la valeur de température qui admet la plus grande erreur dapproximation ?
```
Maximal error = 14.337368012120734
for (x, y) = (153.89061334817407, 91.90827168300679)
```
La valeur de température qui admet la plus grande erreur d'approximation est de
91.91° pour une puissance de 153.89W.
### 4. Le modèle est-il meilleur ou moins bon que sur le jeu de données précédent ? Pour quelle raison ce modèle est, ou nest pas, adapté à ces données ?
```
f(120) = 44.42433903389104
RMSE = 13.428963137234206
Maximal error = 47.681726938444285
for (x, y) = (179.85011085382266, 113.05473219321803)
```
Le modèle est moins bon que sur le jeu de données précédent. En effet, la RMSE
est plus élevée. Le modèle n'est pas adapté à ces données car il ne suit pas la
tendance des données qui semble être parabolique ou exponentielle plutôt que
linéaire.
## Partie 2
#### 5. En combien depochs le perceptron converge-t-il ?
```
M=1, score=0.966
M=2, score=0.974
M=3, score=1.0
```
Le perceptron converge en 3 epochs.
#### 6. En combien depochs le perceptron converge-t-il ?
```
...
M=1, score=0.908
M=2, score=0.922
M=3, score=0.924
M=4, score=0.916
M=5, score=0.912
M=6, score=0.93
...
```
Le perceptron semble converger en 5 epochs avant de regresser.
#### 7. Que valent ces métriques avec ce nouveaux jeu de données avec 𝑀 = 25 epochs et un pas dapprentissage 𝜌 = 0.01 ? Que vaut la loss définie par lEquation 3 qui est minimisée par le Perceptron ?
```
precision = 0.9766666666666667
recall = 0.8987730061349694
F1 = 0.4680511182108626
```
```
loss = 40
```
Le perceptron n'a pas pu converger d'avantage et a atteint une loss de 40. Ce
qui signifie que 40 individus ont été mal classifiés.

2
ia/tp5/requirements.txt Normal file
View file

@ -0,0 +1,2 @@
numpy
matplotlib

7
ia/tp5/setup.sh Executable file
View file

@ -0,0 +1,7 @@
#!/bin/sh
cd "$(dirname "$(realpath "$0")")"
python3 -m venv venv
source venv/bin/activate
pip install -r requirements.txt

56
ia/tp5/src/ex1_regression1.py Executable file
View file

@ -0,0 +1,56 @@
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
x = np.array([107.01819675010165,142.97461762841453,128.3592388893137,178.48859894769942,105.07512391405761,133.9662346986653,106.88633746415002,165.9304901016704,175.27615886513382,99.8473974473511,152.9242549507464,63.265825397243646,77.1539782923285,170.32756297804593,59.2346875657253,61.32680896120029,52.62839166724234,158.24057992123193,151.16037762348054,163.1015792720865,177.22038449025933,153.89061334817407,109.99231709288114,151.46879291723923,65.37567536296132,133.1897327725781,68.63592736317604,172.8069592164459,70.97023732504951,103.90605219876807,84.3922295736015,150.65037962644817,109.29954318815132,134.9040823105018,52.44267405672617,130.29261461986403,129.57244395391479,130.2014195937184,172.68725020690113,138.63663888345286,96.73602707459219,106.81415399391439,140.69205547054443,57.82931131180508,136.6796730079368,137.18292305036073,77.34973293959932,66.76041869513094,91.00568562014391,97.28240022254093,])
y = np.array([58.604012802664876,61.568938988109096,63.9339339649278,82.30116210624541,60.16795400802,64.93076361223594,58.199364484080114,78.94874665221919,96.59629220870175,55.08553593594234,80.59085994712159,42.765533811305836,51.2101273466327,89.89235901983224,40.98731129137799,41.45808065513656,38.63780451473805,73.0967722436298,71.29285705472836,79.97576728127771,92.61811453529036,91.90827168300679,61.526181495163584,63.194648094263655,43.7942773055833,64.14802974784072,44.56028490073529,89.47188535978337,44.40026349403853,59.98046502154318,48.28512591278614,84.23051603949706,62.02917662565915,62.1335255880633,38.81183960757261,62.94408352854921,79.54097547379997,77.90871790408256,71.16968980369258,74.30802693800463,55.39297558330232,62.1570061057422,75.8196079986226,41.200254600443635,77.03603317805609,67.24949240498717,50.174757752678055,43.80774399266216,56.51718964090277,56.610351455109445,])
N = len(x)
#
# step 01
#
# - build data matrix X
# - compute optimal w
#
X = np.ones((N, 2))
X[:, 1] = x
w = np.linalg.inv(X.T @ X) @ X.T @ y
#
# step 02
#
# - plot the model as a line y = w0 + w1 * x s
#
plt.figure()
plt.scatter(x, y)
x_min = np.min(x)
x_max = np.max(x)
x_plot = np.linspace(x_min, x_max, 100)
y_plot = w[0] + w[1] * x_plot
plt.plot(x_plot, y_plot, color='orange')
#
# step 03
#
# - predict new value f(x)
# - compute RMSE
# - compute maximal error
#
prediction = w[0] + w[1] * 120
print(f'f(120) = {prediction}')
y_pred = X @ w
rmse = np.sqrt(np.sum((y - y_pred) ** 2) / N)
print(f'RMSE = {rmse}')
idx = np.argmax(np.abs(y - y_pred))
max_error = np.abs(y[idx] - y_pred[idx])
print(f'Maximal error = {max_error}')
print(f'for (x, y) = ({x[idx]}, {y[idx]})')
plt.show()

41
ia/tp5/src/ex1_regression2.py Executable file
View file

@ -0,0 +1,41 @@
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
x = np.array([140.02910671010955,144.28305637655325,115.17216965047129,174.2908725140191,133.71872589985287,105.10115631256336,128.83111783663017,52.49511578021336,89.2047261676914,135.82255987404903,87.71008893735774,130.3420057698494,105.7399311229496,67.61162834891853,88.776702374284,124.09543839116444,126.81345896226252,124.66228235044524,134.91610658142736,134.77342510021955,106.08439660641662,166.5510574606382,97.78304310622656,106.66244028453148,165.95003615203737,154.80521857599115,141.5055158602476,63.02949535059915,169.53273978680755,142.85136894138446,179.85011085382266,69.42827960553919,162.85638745786787,71.12408150792874,130.02274335689975,66.0965977704274,160.24106981189047,154.9514646342514,123.98309601989713,102.93382863937995,58.991709409167946,140.66574050879328,108.96054874814895,143.86722793114524,162.6297023707218,176.81779565037516,161.25443451103945,51.522830944050256,96.7971483821873,144.89877311512754,])
y = np.array([42.81605146528971,36.28330545155218,31.68572329595792,76.93511586659231,39.67091689151953,36.75433594698061,43.668607504626735,35.36147584081617,37.15391917695597,54.600834054029775,39.53711133759362,40.70788477452493,31.238140682578088,32.803846883928486,33.26497225482958,32.23737408256554,26.644077012420333,35.07966603555466,27.18589689633954,34.824784598255725,30.995750167590458,87.49142265447308,35.718581012669205,32.94444212403231,89.28741953891095,38.377360047677826,47.934251797831806,33.86693859748784,72.15159937429533,52.39883472306155,113.05473219321803,32.358600372044044,71.84653139192467,36.770050235085684,34.448886069864834,35.7827916825874,59.57702285934876,61.263901673061625,35.90307044444734,36.06547033199114,34.36729122240489,48.69663447942126,35.42784626944333,34.556197150504495,51.345881947334895,82.19073242621832,50.649674506375675,34.96619043312894,38.77250612852849,48.018188311243144,])
N = len(x)
#
# step 04
#
X = np.ones((N, 2))
X[:, 1] = x
w = np.linalg.inv(X.T @ X) @ X.T @ y
plt.figure()
plt.scatter(x, y)
x_min = np.min(x)
x_max = np.max(x)
x_plot = np.linspace(x_min, x_max, 100)
y_plot = w[0] + w[1] * x_plot
plt.plot(x_plot, y_plot, color='orange')
prediction = w[0] + w[1] * 120
print(f'f(120) = {prediction}')
y_pred = X @ w
rmse = np.sqrt(np.sum((y - y_pred) ** 2) / N)
print(f'RMSE = {rmse}')
idx = np.argmax(np.abs(y - y_pred))
max_error = np.abs(y[idx] - y_pred[idx])
print(f'Maximal error = {max_error}')
print(f'for (x, y) = ({x[idx]}, {y[idx]})')
plt.show()

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long