Jovian
⭐️
Sign In
In [19]:
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import datasets
In [20]:
iris = datasets.load_iris()
print('Loaded Iris Dataset')
Loaded Iris Dataset
In [21]:
x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.5)
print('Dataset Split')
print('Train: ', x_train.shape, y_train.shape)
print('Test: ', x_test.shape, y_test.shape)
Dataset Split Train: (75, 4) (75,) Test: (75, 4) (75,)
In [22]:
for i in range(len(iris.target_names)):
    print('Label ', i, ' - ', str(iris.target_names[i]))
Label 0 - setosa Label 1 - versicolor Label 2 - virginica
In [23]:
classifier = KNeighborsClassifier(n_neighbors=2)
classifier.fit(x_train, y_train)
y_pred = classifier.predict(x_test)

for r in range(len(x_test)):
    print('Sample: ', str(x_test[r]), '  Actual: ', str(y_test[r]), '  Prediction: ', str(y_pred[r]))
print('Classifier Accuracy: ', classifier.score(x_test, y_test))
Sample: [6. 2.9 4.5 1.5] Actual: 1 Prediction: 1 Sample: [6.5 3. 5.2 2. ] Actual: 2 Prediction: 2 Sample: [6.5 2.8 4.6 1.5] Actual: 1 Prediction: 1 Sample: [5.8 2.7 5.1 1.9] Actual: 2 Prediction: 2 Sample: [6.6 3. 4.4 1.4] Actual: 1 Prediction: 1 Sample: [5.5 3.5 1.3 0.2] Actual: 0 Prediction: 0 Sample: [6.1 2.8 4. 1.3] Actual: 1 Prediction: 1 Sample: [5. 3. 1.6 0.2] Actual: 0 Prediction: 0 Sample: [5.4 3. 4.5 1.5] Actual: 1 Prediction: 1 Sample: [5. 3.4 1.5 0.2] Actual: 0 Prediction: 0 Sample: [6.5 3. 5.5 1.8] Actual: 2 Prediction: 2 Sample: [5.3 3.7 1.5 0.2] Actual: 0 Prediction: 0 Sample: [5. 3.2 1.2 0.2] Actual: 0 Prediction: 0 Sample: [6.8 2.8 4.8 1.4] Actual: 1 Prediction: 1 Sample: [5.9 3. 4.2 1.5] Actual: 1 Prediction: 1 Sample: [7.4 2.8 6.1 1.9] Actual: 2 Prediction: 2 Sample: [7.2 3. 5.8 1.6] Actual: 2 Prediction: 2 Sample: [6.6 2.9 4.6 1.3] Actual: 1 Prediction: 1 Sample: [5. 3.6 1.4 0.2] Actual: 0 Prediction: 0 Sample: [6.4 2.7 5.3 1.9] Actual: 2 Prediction: 2 Sample: [5.5 2.4 3.8 1.1] Actual: 1 Prediction: 1 Sample: [6.2 2.9 4.3 1.3] Actual: 1 Prediction: 1 Sample: [7.7 2.8 6.7 2. ] Actual: 2 Prediction: 2 Sample: [4.8 3.1 1.6 0.2] Actual: 0 Prediction: 0 Sample: [6. 3. 4.8 1.8] Actual: 2 Prediction: 1 Sample: [5.1 3.5 1.4 0.3] Actual: 0 Prediction: 0 Sample: [6.7 3.1 4.4 1.4] Actual: 1 Prediction: 1 Sample: [5.5 2.6 4.4 1.2] Actual: 1 Prediction: 1 Sample: [4.8 3. 1.4 0.1] Actual: 0 Prediction: 0 Sample: [6.4 3.2 5.3 2.3] Actual: 2 Prediction: 2 Sample: [6.3 3.3 6. 2.5] Actual: 2 Prediction: 2 Sample: [7.7 2.6 6.9 2.3] Actual: 2 Prediction: 2 Sample: [5.5 4.2 1.4 0.2] Actual: 0 Prediction: 0 Sample: [4.6 3.4 1.4 0.3] Actual: 0 Prediction: 0 Sample: [6.8 3. 5.5 2.1] Actual: 2 Prediction: 2 Sample: [5.8 2.7 3.9 1.2] Actual: 1 Prediction: 1 Sample: [7.2 3.2 6. 1.8] Actual: 2 Prediction: 2 Sample: [6. 2.2 5. 1.5] Actual: 2 Prediction: 1 Sample: [5.7 2.6 3.5 1. ] Actual: 1 Prediction: 1 Sample: [5.6 3. 4.5 1.5] Actual: 1 Prediction: 1 Sample: [6.2 3.4 5.4 2.3] Actual: 2 Prediction: 2 Sample: [6.5 3.2 5.1 2. ] Actual: 2 Prediction: 1 Sample: [4.6 3.2 1.4 0.2] Actual: 0 Prediction: 0 Sample: [4.9 3.6 1.4 0.1] Actual: 0 Prediction: 0 Sample: [5.8 4. 1.2 0.2] Actual: 0 Prediction: 0 Sample: [4.6 3.1 1.5 0.2] Actual: 0 Prediction: 0 Sample: [5.2 2.7 3.9 1.4] Actual: 1 Prediction: 1 Sample: [4.7 3.2 1.6 0.2] Actual: 0 Prediction: 0 Sample: [4.9 3. 1.4 0.2] Actual: 0 Prediction: 0 Sample: [4.9 2.4 3.3 1. ] Actual: 1 Prediction: 1 Sample: [5.8 2.8 5.1 2.4] Actual: 2 Prediction: 2 Sample: [5.6 2.7 4.2 1.3] Actual: 1 Prediction: 1 Sample: [5.1 3.3 1.7 0.5] Actual: 0 Prediction: 0 Sample: [6.3 2.8 5.1 1.5] Actual: 2 Prediction: 1 Sample: [6.1 2.9 4.7 1.4] Actual: 1 Prediction: 1 Sample: [5.7 2.8 4.5 1.3] Actual: 1 Prediction: 1 Sample: [5.1 3.8 1.5 0.3] Actual: 0 Prediction: 0 Sample: [7. 3.2 4.7 1.4] Actual: 1 Prediction: 1 Sample: [5.8 2.6 4. 1.2] Actual: 1 Prediction: 1 Sample: [5.1 3.7 1.5 0.4] Actual: 0 Prediction: 0 Sample: [4.6 3.6 1. 0.2] Actual: 0 Prediction: 0 Sample: [5.8 2.7 4.1 1. ] Actual: 1 Prediction: 1 Sample: [6.8 3.2 5.9 2.3] Actual: 2 Prediction: 2 Sample: [6. 2.2 4. 1. ] Actual: 1 Prediction: 1 Sample: [5.5 2.5 4. 1.3] Actual: 1 Prediction: 1 Sample: [5.2 4.1 1.5 0.1] Actual: 0 Prediction: 0 Sample: [5.7 3.8 1.7 0.3] Actual: 0 Prediction: 0 Sample: [4.8 3.4 1.6 0.2] Actual: 0 Prediction: 0 Sample: [6.9 3.2 5.7 2.3] Actual: 2 Prediction: 2 Sample: [5.2 3.4 1.4 0.2] Actual: 0 Prediction: 0 Sample: [6.3 2.7 4.9 1.8] Actual: 2 Prediction: 2 Sample: [5.4 3.4 1.7 0.2] Actual: 0 Prediction: 0 Sample: [4.4 2.9 1.4 0.2] Actual: 0 Prediction: 0 Sample: [7.9 3.8 6.4 2. ] Actual: 2 Prediction: 2 Sample: [6.9 3.1 4.9 1.5] Actual: 1 Prediction: 1 Classifier Accuracy: 0.9466666666666667
In [24]:
from sklearn.metrics import classification_report, confusion_matrix
print('Confusion Matrix')
print(confusion_matrix(y_test, y_pred))
print()
print('Accuracy Metrics')
print(classification_report(y_test, y_pred))
Confusion Matrix [[27 0 0] [ 0 26 0] [ 0 4 18]] Accuracy Metrics precision recall f1-score support 0 1.00 1.00 1.00 27 1 0.87 1.00 0.93 26 2 1.00 0.82 0.90 22 accuracy 0.95 75 macro avg 0.96 0.94 0.94 75 weighted avg 0.95 0.95 0.95 75
In [ ]: