我如何在神经网络中测试实时预测?

问题描述 投票:0回答:1

由于我是新来的,我问一个很普遍的问题。我使用深度学习编写了神经网络的MLP模型。我使用的是我在此处下载的标准数据集enter link description here。从统计的角度来看,我的准确性和f1得分显示出惊人的输出。现在我需要使用实时数据测试该程序。我很高兴听到您关于如何在神经网络中执行实时预测的建议?

from pandas import pandas as pd
from pandas import DataFrame
from numpy import*
import numpy as np

from matplotlib import pyplot as plt
from sklearn.model_selection import GridSearchCV,train_test_split
from sklearn.metrics import confusion_matrix,accuracy_score,roc_curve,auc
from sklearn.neural_network import MLPClassifier
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier

########################################################################################
db = pd.read_csv(r"C:\Users\cert 3\Desktop\Vasou\proposal\code\StackOverFlow\UDP-Flood-CSV.csv")


X = db.iloc[:, 0:4]
y = db.iloc[:, 4]
m, n =  X.shape
MG = X
X = preprocessing.scale(X)

encoder = LabelEncoder()
encoder.fit(y)
encoded_y = encoder.transform(y)
y = to_categorical(encoded_y)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
########################################################################################
mlp = MLPClassifier()
parameter_space = {'hidden_layer_sizes': [(8,12,4), (5,5,2), (4,4,4)],
                   'activation': ['tanh', 'relu'],
                   'solver': ['sgd', 'adam'],
                   'alpha': [0.001,0.01, 0.05, 0.1],
                   'learning_rate': ['constant','adaptive'],
                   'max_iter':[20,50,100]
                   }
clf = GridSearchCV(mlp, parameter_space, n_jobs=-1, cv=3,return_train_score=True)
clf.fit(X_train, y_train)
print('Best parameters found:\n', clf.best_params_, clf.best_score_)
#######################################################################################
cvr = clf.cv_results_
df = DataFrame(cvr)
scores = df['mean_test_score']
h = df['param_hidden_layer_sizes']
alpha = df['param_alpha']
optim = df['param_solver']
l_rate = df['param_learning_rate']
activ = df['param_activation']
itr = df['param_max_iter']
dh = DataFrame({'Scores': scores, 'Itraction':itr, 'Hidden_Layers': h, 'alpha': alpha ,
                'Solver':optim, 'Learning_Rate':l_rate, 'Activation':activ})

########################################################################################
model = Sequential()
model.add(Dense(8, input_dim=n, kernel_initializer='uniform', activation='tanh'))
model.add(Dense(12, activation='tanh'))
model.add(Dense(4, activation='tanh'))
model.add(Dense(2, activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

########################################################################################
hist = model.fit(X_train, y_train, batch_size = 10, epochs = 100, validation_split=0.5)
scoress = model.evaluate(X, y, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scoress[1]*100))
print(hist.history) 
# save model and architecture to single file
model.save("model.h5")
model.save_weights("model_weight.h5")
print("saved model to disk")
# Plot training & validation accuracy values
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('Training vs Test accuracy , DA')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training acc', 'Validation acc'], loc='best')
#plt.show()
#plt.figure()
a = plt.savefig('Accuracy.png', dpi=300, bbox_inches='tight')
plt.close(a)


# Plot training & validation loss values
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Training vs Test Loss , DA')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training loss', 'Validation loss'], loc='best')
#plt.show()
#plt.figure()
b = plt.savefig('Loss.png', dpi=300, bbox_inches='tight')
plt.close(b)

##########################################################################################
y_score = model.predict(X_test)
org = zeros((y_test.shape[0]))
prd = zeros((y_score.shape[0]))
def decode(datum):
    return np.argmax(datum)
for i in range(y_score.shape[0]):
    prd[i] = decode(y_score[i])
for j in range(y_test.shape[0]):
    org[j] = decode(y_test[j])

confusion_matrix(org,prd)
print("Accuracy of MLP: ", "\n", confusion_matrix(org,prd))

f = open("output.txt", "a")
print('Accuracy Score : ' + str(accuracy_score(org,prd)), file=f)
f.close()

##########################################################################################
def generate_results(y_test, y_score):
    fpr, tpr, _ = roc_curve(y_test, y_score)
    roc_auc = auc(fpr, tpr)
    plt.figure()
    plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0.0, 1.05])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver operating characteristic curve')
    #plt.show()
    plt.savefig('False and True comparison.png', dpi=300, bbox_inches='tight')
    print('AUC: %f' % roc_auc)

print('Generating results')
generate_results(y_test[:, 0], y_score[:, 0])

这是我的python代码。

python neural-network real-time prediction
1个回答
0
投票

如果要按需使用此代码,则可以使用shell中的<输入流箭头,例如:python script.py < your_streamer

最后,您必须在包的末尾选择特殊字符以找出捕获整个包的时间。

在python中,input()是使用\ n分隔符的不错选择。

script.py

while True:
    X = np.array(input().split(','), dtype=np.float)
    y = model.predict([X])
    print(X, y)

file.txt作为流媒体。

0.218,0.7451,0.7451,0.574
0.215,0.8854,0.7451,0.745
0.275,0.5744,0.7451,0.574
0.751,0.5744,0.2150,0.885
...
...
...

$ python script.py < file.txt

© www.soinside.com 2019 - 2024. All rights reserved.