File size: 470 Bytes
6377159
 
 
d23d936
6377159
 
 
 
 
 
 
ffc3a3a
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
from sklearn.datasets import load_iris
from sklearn.preprocessing import OneHotEncoder, StandardScaler

def iris():
    """
    returns a tuple of numpy arrays containing the
    iris dataset split into training and testing sets
    after being normalized and one-hot encoded 
    """
    iris = load_iris()
    scaler = StandardScaler()
    x = scaler.fit_transform(iris.data)
    y = OneHotEncoder().fit_transform(iris.target.reshape(-1, 1)).toarray()
    return x, y