Beispiel Deep Learning mit Python Deutsch




[ad_1]

In diesem Video erkenne ich mit Hilfe von Deep Learning handschriftliche Ziffern auf Bildern.

Source


[ad_2]

Comment List

  • Johannes Wenisch
    December 17, 2020

    Vielen Dank, ich habe viele Videos ueber deep learning gesehen, Ihre Video hat viele Begriffe und Ablaeufe leuchtend gemacht, vielen dank

  • Johannes Wenisch
    December 17, 2020

    Es geht aber das kann doch jeder bei so eine einfache Aufgabe 99 % zu bekommen, ich kriege 85 % bei aber mit 11 variablen, wobei die erste drei nicht von belang sind und werden nicht berücksichtigt, kannst du es verbessern?
    Datensatz kannst du dir selber bauen mit
    id
    Stadt
    land
    bankScore
    Genre
    alter
    Tenure
    Balance
    NumOfProducts
    wie viele Kreditkarten
    ist aktive oder nicht
    wie viel coole hat er
    Und wird uns dieser Kunde verlasen, oder nicht?

    import numpy as np
    import matplotlib.pyplot as plt
    import pandas as pd

    # Importar el data set
    dataset = pd.read_csv('Churn_Modelling.csv')

    X = dataset.iloc[:, 3:13].values
    y = dataset.iloc[:, 13].values

    # Codificar datos categóricos
    from sklearn.preprocessing import LabelEncoder
    labelencoder_X_1 = LabelEncoder()
    X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
    labelencoder_X_2 = LabelEncoder()
    X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])

    #El OneHotEncoder en las nuevas versiones está OBSOLETO
    #onehotencoder = OneHotEncoder(categorical_features=[1])
    #X = onehotencoder.fit_transform(X).toarray()

    from sklearn.preprocessing import OneHotEncoder
    from sklearn.compose import ColumnTransformer

    transformer = ColumnTransformer(
    transformers=[
    ("Churn_Modelling", # Un nombre de la transformación
    OneHotEncoder(categories='auto'), # La clase a la que transformar
    [1] # Las columnas a transformar.
    )
    ], remainder='passthrough'
    )

    X = transformer.fit_transform(X)
    X = X[:, 1:]

    # Dividir el data set en conjunto de entrenamiento y conjunto de testing
    from sklearn.model_selection import train_test_split
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)

    # Escalado de variables
    from sklearn.preprocessing import StandardScaler
    sc_X = StandardScaler()
    X_train = sc_X.fit_transform(X_train)
    X_test = sc_X.transform(X_test)

    # Parte 2 – Construir la RNA

    # Importar Keras y librerías adicionales
    import keras
    from keras.models import Sequential
    from keras.layers import Dense
    from keras.layers import Dropout

    # Inicializar la RNA
    classifier = Sequential()

    # Añadir las capas de entrada y primera capa oculta
    classifier.add(Dense(units = 6, kernel_initializer = "uniform", activation = "relu", input_dim = 11))
    classifier.add(Dropout(p = 0.1))

    # Añadir la segunda capa oculta
    classifier.add(Dense(units = 6, kernel_initializer = "uniform", activation = "relu"))
    classifier.add(Dropout(p = 0.1))

    # Añadir la capa de salida
    classifier.add(Dense(units = 1, kernel_initializer = "uniform", activation = "sigmoid"))

    # Compilar la RNA
    classifier.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ["accuracy"])

    # Ajustamos la RNA al Conjunto de Entrenamiento
    classifier.fit(X_train, y_train, batch_size = 10, epochs = 100)

    # Parte 3 – Evaluar el modelo y calcular predicciones finales

    # Predicción de los resultados con el Conjunto de Testing
    y_pred = classifier.predict(X_test)
    y_pred = (y_pred>0.5)

    # Elaborar una matriz de confusión
    from sklearn.metrics import confusion_matrix
    cm = confusion_matrix(y_test, y_pred)
    print((cm[0][0]+cm[1][1])/cm.sum())

    ## Parte 4 – Evaluar, mejorar y Ajustar la RNA

    ### Evaluar la *RNA*
    from keras.wrappers.scikit_learn import KerasClassifier
    from sklearn.model_selection import cross_val_score

    def build_classifier():
    classifier = Sequential()
    classifier.add(Dense(units = 6, kernel_initializer = "uniform", activation = "relu", input_dim = 11))
    classifier.add(Dense(units = 6, kernel_initializer = "uniform", activation = "relu"))
    classifier.add(Dense(units = 1, kernel_initializer = "uniform", activation = "sigmoid"))
    classifier.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ["accuracy"])
    return classifier

    classifier = KerasClassifier(build_fn = build_classifier, batch_size = 10, nb_epoch = 100)
    accuracies = cross_val_score(estimator=classifier, X = X_train, y = y_train, cv = 10, n_jobs=-1, verbose = 1)

    mean = accuracies.mean()
    variance = accuracies.std()

    ### Mejorar la RNA
    #### Regularización de Dropout para evitar el overfitting

    ### Ajustar la RNA
    from sklearn.model_selection import GridSearchCV # sklearn.grid_search

    def build_classifier(optimizer):
    classifier = Sequential()
    classifier.add(Dense(units = 6, kernel_initializer = "uniform", activation = "relu", input_dim = 11))
    classifier.add(Dense(units = 6, kernel_initializer = "uniform", activation = "relu"))
    classifier.add(Dense(units = 1, kernel_initializer = "uniform", activation = "sigmoid"))
    classifier.compile(optimizer = optimizer, loss = "binary_crossentropy", metrics = ["accuracy"])
    return classifier

    classifier = KerasClassifier(build_fn = build_classifier)

    parameters = {
    'batch_size' : [25,32],
    'nb_epoch' : [100, 500],
    'optimizer' : ['adam', 'rmsprop']
    }

    grid_search = GridSearchCV(estimator = classifier,
    param_grid = parameters,
    scoring = 'accuracy',
    cv = 10)
    grid_search = grid_search.fit(X_train, y_train)
    best_parameters = grid_search.best_params_
    best_accuracy = grid_search.best_score_

    new_prediction = classifier.predict(sc_X.transform(np.array([[0,0,600, 1, 40, 3, 60000, 2, 1, 1, 50000]])))
    new_prediction = (new_prediction > 0.5)
    print(new_prediction)

  • Johannes Wenisch
    December 17, 2020

    Was sind das denn für 32 Filter? Woher weiß man, dass diese hier zu benutzen sind?

  • Johannes Wenisch
    December 17, 2020

    stark (y)

Write a comment