我是靠谱客的博主 有魅力小笼包,最近开发中收集的这篇文章主要介绍keras的例子,参照可完整实现所有代码,觉得挺不错的,现在分享给大家,希望可以做个参考。

概述

https://blog.csdn.net/liuxiao214/article/details/79027109

后面有时间再在这总结一下keras的特点以及如何去使用。

-------------------------------------先把位置占着-------------

哈哈

Keras分为两种不同的建模方式,


Sequential models:这种方法用于实现一些简单的模型。你只需要向一些存在的模型中添加层就行了。
Functional API:Keras的API是非常强大的,你可以利用这些API来构造更加复杂的模型,比如多输出模型,有向无环图等等
 

下面是模型的定义:如果想要添加之间add就可以了  相当于先定义模型队列,然后再在里面添加一层一层的网络

def define_model():

    model = Sequential()

    # setup first conv layer
    model.add(Conv2D(32, (3, 3), activation="relu",
                     input_shape=(120, 120, 3), padding='same'))  # [10, 120, 120, 32]

    # setup first maxpooling layer
    model.add(MaxPooling2D(pool_size=(2, 2)))  # [10, 60, 60, 32]

    # setup second conv layer
    model.add(Conv2D(8, kernel_size=(3, 3), activation="relu",
                     padding='same'))  # [10, 60, 60, 8]

    # setup second maxpooling layer
    model.add(MaxPooling2D(pool_size=(3, 3)))  # [10, 20, 20, 8]

    # add bianping layer, 3200 = 20 * 20 * 8
    model.add(Flatten())  # [10, 3200]

    # add first full connection layer
    model.add(Dense(512, activation='sigmoid'))  # [10, 512]

    # add dropout layer
    model.add(Dropout(0.5))

    # add second full connection layer
    model.add(Dense(4, activation='softmax'))  # [10, 4]

    return model
 

#下面的是训练模型,加载数据的模型因为每个人的数据形式不一样都要进行转化,所以有需要的直接去最上面的链接上面去找就可以了。

#关于训练模型,包括下面几个阶段:

    1 调用之前定义的网络模型

    2 定义优化器,具体如何调用优化器呢?keras与pytorch一样都很方便,from keras.optimizers import SGD 

    3 进行模型编译,也就是把模型的损失与优化器,融合到一起加载到模型中方便后面进行模型训练

    4 如果想用自己的损失函数的话,需要自己定义,当然常用的都在keras库中包含了from keras.losses import categorical_crossentropy

    5 model.summary()

    6 model.fit()

    7 model.evaluate()

 

 

def train_model(resultpath):
    model = define_model()

    # if want to use SGD, first define sgd, then set optimizer=sgd
    sgd = SGD(lr=0.001, decay=1e-6, momentum=0, nesterov=True)

    # select lossoptimizer
    model.compile(loss=categorical_crossentropy,
                  optimizer=Adam(), metrics=['accuracy'])
    model.summary()

    # draw the model structure
    plot_model(model, show_shapes=True,
               to_file=os.path.join(resultpath, 'model.png'))

    # load data
    X, Y = load_data(resultpath)

    # split train and test data
    X_train, X_test, Y_train, Y_test = train_test_split(
        X, Y, test_size=0.2, random_state=2)

    # input data to model and train
    history = model.fit(X_train, Y_train, batch_size=2, epochs=10,
                        validation_data=(X_test, Y_test), verbose=1, shuffle=True)

    # evaluate the model
    loss, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test loss:', loss)
    print('Test accuracy:', acc)

 

保存模型,加载模型可参考完整代码如下:

from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from keras.utils.vis_utils import plot_model
from keras.optimizers import SGD
from keras.models import model_from_json
from keras.models import load_model
from keras.utils import np_utils
import numpy as np
import os
from sklearn.model_selection import train_test_split

def load_data(resultpath):
    datapath = os.path.join(resultpath, "data10_4.npz")
    if os.path.exists(datapath):
        data = np.load(datapath)
        X, Y = data["X"], data["Y"]
    else:
        X = np.array(np.arange(432000)).reshape(10, 120, 120, 3)
        Y = [0, 0, 1, 1, 2, 2, 3, 3, 2, 0]
        X = X.astype('float32')
        Y = np_utils.to_categorical(Y, 4)
        np.savez(datapath, X=X, Y=Y)
        print('Saved dataset to dataset.npz.')
    print('X_shape:{}nY_shape:{}'.format(X.shape, Y.shape))
    return X, Y

def define_model():
    model = Sequential()

    # setup first conv layer
    model.add(Conv2D(32, (3, 3), activation="relu",
                     input_shape=(120, 120, 3), padding='same'))  # [10, 120, 120, 32]

    # setup first maxpooling layer
    model.add(MaxPooling2D(pool_size=(2, 2)))  # [10, 60, 60, 32]

    # setup second conv layer
    model.add(Conv2D(8, kernel_size=(3, 3), activation="relu",
                     padding='same'))  # [10, 60, 60, 8]

    # setup second maxpooling layer
    model.add(MaxPooling2D(pool_size=(3, 3)))  # [10, 20, 20, 8]

    # add bianping layer, 3200 = 20 * 20 * 8
    model.add(Flatten())  # [10, 3200]

    # add first full connection layer
    model.add(Dense(512, activation='sigmoid'))  # [10, 512]

    # add dropout layer
    model.add(Dropout(0.5))

    # add second full connection layer
    model.add(Dense(4, activation='softmax'))  # [10, 4]

    return model

def train_model(resultpath):
    model = define_model()

    # if want to use SGD, first define sgd, then set optimizer=sgd
    sgd = SGD(lr=0.001, decay=1e-6, momentum=0, nesterov=True)

    # select lossoptimizer
    model.compile(loss=categorical_crossentropy,
                  optimizer=Adam(), metrics=['accuracy'])
    model.summary()

    # draw the model structure
    plot_model(model, show_shapes=True,
               to_file=os.path.join(resultpath, 'model.png'))

    # load data
    X, Y = load_data(resultpath)

    # split train and test data
    X_train, X_test, Y_train, Y_test = train_test_split(
        X, Y, test_size=0.2, random_state=2)

    # input data to model and train
    history = model.fit(X_train, Y_train, batch_size=2, epochs=10,
                        validation_data=(X_test, Y_test), verbose=1, shuffle=True)

    # evaluate the model
    loss, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test loss:', loss)
    print('Test accuracy:', acc)

    return model

def my_save_model(resultpath):

    model = train_model(resultpath)

    # the first way to save model
    model.save(os.path.join(resultpath, 'my_model.h5'))

    # the secon way : save trained network structure and weights
    model_json = model.to_json()
    open(os.path.join(resultpath, 'my_model_structure.json'), 'w').write(model_json)
    model.save_weights(os.path.join(resultpath, 'my_model_weights.hd5'))

def my_load_model(resultpath):

    # test data
    X = np.array(np.arange(86400)).reshape(2, 120, 120, 3)
    Y = [0, 1]
    X = X.astype('float32')
    Y = np_utils.to_categorical(Y, 4)

    # the first way of load model
    model2 = load_model(os.path.join(resultpath, 'my_model.h5'))
    model2.compile(loss=categorical_crossentropy,
                   optimizer=Adam(), metrics=['accuracy'])

    test_loss, test_acc = model2.evaluate(X, Y, verbose=0)
    print('Test loss:', test_loss)
    print('Test accuracy:', test_acc)

    y = model2.predict_classes(X)
    print("predicct is: ", y)

    # the second way : load model structure and weights
    model = model_from_json(open(os.path.join(resultpath, 'my_model_structure.json')).read())
    model.load_weights(os.path.join(resultpath, 'my_model_weights.hd5'))
    model.compile(loss=categorical_crossentropy,
                  optimizer=Adam(), metrics=['accuracy'])

    test_loss, test_acc = model.evaluate(X, Y, verbose=0)
    print('Test loss:', test_loss)
    print('Test accuracy:', test_acc)

    y = model.predict_classes(X)
    print("predicct is: ", y)

def main():
    resultpath = "result"
    #train_model(resultpath)
    #my_save_model(resultpath)
    my_load_model(resultpath)


if __name__ == "__main__":
    main()

 

 

 

 

最后

以上就是有魅力小笼包为你收集整理的keras的例子,参照可完整实现所有代码的全部内容,希望文章能够帮你解决keras的例子,参照可完整实现所有代码所遇到的程序开发问题。

如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。

本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
点赞(43)

评论列表共有 0 条评论

立即
投稿
返回
顶部