1.自信息:
2.信息熵
3.p对Q的KL散度(相对熵)
证明kl散度大于等于0
4.交叉熵
可看出交叉熵==信息熵+相对熵
数据集地址:水果数据集_机器学习水果识别,水果分类数据集-机器学习其他资源-CSDN下载
一,类别型特征和有序性特征 ,转变成onehot
def one_hot():
# 随机生成有序型特征和类别特征作为例子
X_train = np.array([['male', 'low'],
['female', 'low'],
['female', 'middle'],
['male', 'low'],
['female', 'high'],
['male', 'low'],
['female', 'low'],
['female', 'high'],
['male', 'low'],
['male', 'high']])
X_test = np.array([['male', 'low'],
['male', 'low'],
['female', 'middle'],
['female', 'low'],
['female', 'high']])
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# 在训练集上进行编码操作
label_enc1 = LabelEncoder() # 首先将male, female用数字编码
one_hot_enc = OneHotEncoder() # 将数字编码转换为独热编码
label_enc2 = LabelEncoder() # 将low, middle, high用数字编码
tr_feat1_tmp = label_enc1.fit_transform(X_train[:, 0]).reshape(-1, 1) # reshape(-1, 1)保证为一维列向量
tr_feat1 = one_hot_enc.fit_transform(tr_feat1_tmp)
tr_feat1 = tr_feat1.todense()
print('=====male female one hot====')
print(tr_feat1)
tr_feat2 = label_enc2.fit_transform(X_train[:, 1]).reshape(-1, 1)
print('===low middle high class====')
print(tr_feat2)
X_train_enc = np.hstack((tr_feat1, tr_feat2))
print('=====train encode====')
print(X_train_enc)
# 在测试集上进行编码操作
te_feat1_tmp = label_enc1.transform(X_test[:, 0]).reshape(-1, 1) # reshape(-1, 1)保证为一维列向量
te_feat1 = one_hot_enc.transform(te_feat1_tmp)
te_feat1 = te_feat1.todense()
te_feat2 = label_enc2.transform(X_test[:, 1]).reshape(-1, 1)
X_test_enc = np.hstack((te_feat1, te_feat2))
print('====test encode====')
print(X_test_enc)
二,特征归一化
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from mpl_toolkits.mplot3d import Axes3D
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
def load_data():
# 加载数据集
fruits_df = pd.read_table('fruit_data_with_colors.txt')
# print(fruits_df)
print('样本个数:', len(fruits_df))
# 创建目标标签和名称的字典
fruit_name_dict = dict(zip(fruits_df['fruit_label'], fruits_df['fruit_name']))
# 划分数据集
X = fruits_df[['mass', 'width', 'height', 'color_score']]
y = fruits_df['fruit_label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/4, random_state=0)
print('数据集样本数:{},训练集样本数:{},测试集样本数:{}'.format(len(X), len(X_train), len(X_test)))
# print(X_train)
return X_train, X_test, y_train, y_test
#特征归一化
def minmax_scaler(X_train,X_test):
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
# print(X_train_scaled)
#此时scaled得到一个最小最大值,对于test直接transform就行
X_test_scaled = scaler.transform(X_test)
for i in range(4):
print('归一化前,训练数据第{}维特征最大值:{:.3f},最小值:{:.3f}'.format(i + 1,
X_train.iloc[:, i].max(),
X_train.iloc[:, i].min()))
print('归一化后,训练数据第{}维特征最大值:{:.3f},最小值:{:.3f}'.format(i + 1,
X_train_scaled[:, i].max(),
X_train_scaled[:, i].min()))
return X_train_scaled,X_test_scaled
def show_3D(X_train,X_train_scaled):
label_color_dict = {1: 'red', 2: 'green', 3: 'blue', 4: 'yellow'}
colors = list(map(lambda label: label_color_dict[label], y_train))
print(colors)
print(len(colors))
fig = plt.figure()
ax1 = fig.add_subplot(111, projection='3d', aspect='equal')
ax1.scatter(X_train['width'], X_train['height'], X_train['color_score'], c=colors, marker='o', s=100)
ax1.set_xlabel('width')
ax1.set_ylabel('height')
ax1.set_zlabel('color_score')
plt.show()
fig = plt.figure()
ax2 = fig.add_subplot(111, projection='3d', aspect='equal')
ax2.scatter(X_train_scaled[:, 1], X_train_scaled[:, 2], X_train_scaled[:, 3], c=colors, marker='o', s=100)
ax2.set_xlabel('width')
ax2.set_ylabel('height')
ax2.set_zlabel('color_score')
plt.show()
三,交叉验证
#交叉验证
def cross_val(X_train_scaled, y_train):
k_range = [2, 4, 5, 10]
cv_scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn, X_train_scaled, y_train, cv=3)
cv_score = np.mean(scores)
print('k={},验证集上的准确率={:.3f}'.format(k, cv_score))
cv_scores.append(cv_score)
print('np.argmax(cv_scores)=',np.argmax(cv_scores))
best_k = k_range[np.argmax(cv_scores)]
best_knn = KNeighborsClassifier(n_neighbors=best_k)
best_knn.fit(X_train_scaled, y_train)
print('测试集准确率:', best_knn.score(X_test_scaled, y_test))
四,调用validation_curve 查看超参数对训练集和验证集的影响
# 调用validation_curve 查看超参数对训练集和验证集的影响
def show_effect(X_train_scaled, y_train):
from sklearn.model_selection import validation_curve
from sklearn.svm import SVC
c_range = [1e-3, 1e-2, 0.1, 1, 10, 100, 1000, 10000]
train_scores, test_scores = validation_curve(SVC(kernel='linear'), X_train_scaled, y_train,
param_name='C', param_range=c_range,
cv=5, scoring='accuracy')
print(train_scores)
print(train_scores.shape)
train_scores_mean = np.mean(train_scores, axis=1)
# print(train_scores_mean)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
#
plt.figure(figsize=(10, 8))
plt.title('Validation Curve with SVM')
plt.xlabel('C')
plt.ylabel('Score')
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(c_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(c_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(c_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(c_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
# 从上图可知对SVM,C=100为最优参数
svm_model = SVC(kernel='linear', C=1000)
svm_model.fit(X_train_scaled, y_train)
print(svm_model.score(X_test_scaled, y_test))
可看成,刚开始方差较大,然后模型趋于稳定,最后过拟合,C=100为最优参数。
五,grid_search,模型存储与加载
def grid_search(X_train_scaled, y_train):
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
parameters = {'max_depth':[3, 5, 7, 9], 'min_samples_leaf': [1, 2, 3, 4]}
clf = GridSearchCV(DecisionTreeClassifier(), parameters, cv=3, scoring='accuracy')
print(clf)
clf.fit(X_train_scaled, y_train)
print('最优参数:', clf.best_params_)
print('验证集最高得分:', clf.best_score_)
# 获取最优模型
best_model = clf.best_estimator_
print('测试集上准确率:', best_model.score(X_test_scaled, y_test))
return best_model
def model_save(best_model):
# 使用pickle
import pickle
model_path1 = './trained_model1.pkl'
# 保存模型到硬盘
with open(model_path1, 'wb') as f:
pickle.dump(best_model, f)
def load_model(X_test_scaled,y_test):
import pickle
# 加载保存的模型
model_path1 = './trained_model1.pkl'
with open(model_path1, 'rb') as f:
model = pickle.load(f)
# 预测
print('预测值为', model.predict([X_test_scaled[0, :]]))
print('真实值为', y_test.values[0])
1,应用场景举例:气象监测,测量风速: 2,建模: 设激光波束与扫描圆锥中轴夹角为,激光波束聚焦点距离为R(即单波束测程);四个波束聚焦点以90度为间隔分布在扫描圆周上,且上方两波束聚焦点连线水平,下方两波束聚焦点也水平;上方或下方两波束所在平面与扫描圆锥中轴的夹角为a,上方或下方两波束之间夹角为b,扫描圆锥底圆半径为r;扫描圆锥底圆上垂直的两个半径构成直角三角形,其长边上的高为h。如图 90...
最早我是想通过dispatchAction方法去改变选中的省份,但是没有起作用,如果你知道这个方法怎么实现,麻烦你可以告诉我。我实现的方法是另外一种。dispatchAction({ type: 'geoSelect', // 可选,系列 index,可以是一个数组指定多个系列 seriesIndex?: num...
队名火箭少男100组长博客林燊大哥作业博客Alpha 冲鸭鸭鸭鸭!成员冲刺阶段情况林燊(组长)过去两天完成了哪些任务协调各成员之间的工作协助前后端接口的开发测试项目运行的服务器环境训练CTPN模型展示GitHub当日代码/文档签入记录(组内共享)接下来的计划协助算法迁移学习的强分类器以及弱分类器实现扩充数据集并且训练模型优化各个模型结构...
1:Q_INIT_RESOURCE(spreadsheet) //将spreadsheet.qrc这资源文件转换为相应代码,参与应用程序的编译, spreadsheet.qrc包括了图片文件、库文件等,参考http://blog.csdn.net/calm_agan/article/details/8046422。2:QLineEdit //QLineEdit是widget的一个行
通过一段时间测试,发现需要细化一下。一,osgearth源码好像调试的人不多,但是还要进行下去,不能半途而废。这个每天1blog即可。但是要慢慢细调。把重点放在抄源码上,以前搞shader时,因为谁也没吃透cesium源码,所以,shader也加不上去,尴尬了。在...
本篇文章主要是由于计划使用django写一个计划任务出来,可以定时的轮换值班人员名称或者定时执行脚本等功能,百度无数坑之后,终于可以凑合把这套东西部署上。本人英文不好,英文好或者希望深入学习或使用的人,建议去参考官方文档,而且本篇的记录不一定正确,仅仅实现crontab 的功能而已。希望深入学习的人可以参考http://docs.jinkan.org/docs/celery/。首先简单介绍...
回调函数是什么在学习之前还真不知道js回调函数怎么使用及作用了,下面本文章把我在学习回调函数例子给各位同学介绍一下吧,有需了解的同学不防进入参考。回调函数原理:我现在出发,到了通知你”这是一个异步的流程,“我出发”这个过程中(函数执行),“你”可以去做任何事,“到了”(函数执行完毕)“通知你”(回调)进行之后的流程例子1.基本方法function doSomething(callback) {//...
本文介绍EXIF中都保存了哪些数据,这些数据的内容和格式,最后介绍一下EXIF中的缩略图信息。EXIF JPEG文件将EXIF信息存储在JFIF的APP1标记中。EXIF主要保存拍摄照片时的相机参数、图像缩略图、GPS信息等。
点击蓝色“有关SQL”关注我哟加个“星标”,天天与10000人一起快乐成长图 | L近些年,很多数据库公司上岸,MongoDB市值 216亿美金,Cloudera 市值 45亿美金。而其...
Author:DriverMonkeyMail:[email protected]:13410905075QQ:196568501
PS部分快捷键:1.魔棒的作用:比较快捷的抠图工具,对于一些分界线比较明显的图像,通过魔棒工具可以很快速的将图像抠出,魔棒的作用是可以知道你点击的那个地方的颜色,并自动获取附近区域相同的颜色,使它们处于选择状态;2.橡皮擦的作用:像皮擦工具就是把你不要的那块给擦除,你自己可以试下,用它擦图的时候,擦过的地区会是透明的,如果下面还有一层图的话,会透出下层图!!;3....
本题要求编写程序,计算2个正整数的和、差、积、商并输出。题目保证输入和输出全部在整型范围内。输入格式:输入在一行中给出2个正整数A和B。输出格式:在4行中按照格式“A 运算符 B = 结果”顺序输出和、差、积、商。输入样例:3 2输出样例:3 + 2 = 53 - 2 = 13 * 2 = 63 / 2 = 1代码:# include <stdio.h># include <stdlib.h>int main(){ int A,B;