Skip to content

modity #168

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# 神经网络实现代理




实现基于Carla的车辆、行人的感知、规划、控制。

## 环境配置
Expand Down
3 changes: 0 additions & 3 deletions src/chap01_warmup/numpy_tutorial.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,7 @@
#!/usr/bin/env python3
# coding: utf-8

# numpy 练习题

# numpy 的array操作

# 1.导入numpy库
import numpy as np

Expand Down
64 changes: 22 additions & 42 deletions src/chap02_linear_regression/exercise-linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@
# 请按照填空顺序编号分别完成 参数优化,不同基函数的实现

# In[1]:


import numpy as np
import matplotlib.pyplot as plt

Expand All @@ -28,29 +26,25 @@ def load_data(filename):
# 其中以及训练集的x的范围在0-25之间

# In[6]:


def identity_basis(x):
def identity_basis(x): #恒等基函数
ret = np.expand_dims(x, axis=1)
return ret

def multinomial_basis(x, feature_num=10):
'''多项式基函数'''
def multinomial_basis(x, feature_num=10): #多项式基函数
x = np.expand_dims(x, axis=1) # shape(N, 1)
#==========
#todo '''请实现多项式基函数'''
ret = np.power(x, np.arange(feature_num))
#==========
ret = None
return ret

def gaussian_basis(x, feature_num=10):
'''高斯基函数'''
def gaussian_basis(x, feature_num=10):#高斯基函数
#==========
#todo '''请实现高斯基函数'''
#==========
centers = np.linspace(0, 25, feature_num) # 在0-25之间均匀分布的中心
width = centers[1] - centers[0] # 高斯函数的宽度
ret = np.exp(-0.5 * np.power((x[:, np.newaxis] - centers) / width, 2))
#==========
return ret


Expand All @@ -65,14 +59,12 @@ def gaussian_basis(x, feature_num=10):
# 计算出一个优化后的w,请分别使用最小二乘法以及梯度下降两种办法优化w

# In[7]:


def main(x_train, y_train):
"""
训练模型,并返回从x到y的映射。

"""
basis_func = gaussian_basis
basis_func = gaussian_basis # 可以选择不同的基函数
phi0 = np.expand_dims(np.ones_like(x_train), axis=1)
phi1 = basis_func(x_train)
phi = np.concatenate([phi0, phi1], axis=1)
Expand All @@ -81,19 +73,24 @@ def main(x_train, y_train):
#==========
#todo '''计算出一个优化后的w,请分别使用最小二乘法以及梯度下降两种办法优化w'''
#最小二乘法
if method == 'least_squares':
#通过 np.linalg.pinv(phi) 计算伪逆矩阵来求解 w
w = np.dot(np.linalg.pinv(phi), y_train)

w = np.dot(np.linalg.pinv(phi), y_train)
elif method == 'gradient_descent':
#梯度下降(使用时取消注释)
# learning_rate=0.01,
# epochs=1000
# w = np.zeros(phi.shape[1])
learning_rate=0.01,
epochs=1000
w = np.zeros(phi.shape[1])

# for epoch in range(epochs):
# y_pred = np.dot(phi, w)
# error = y_pred - y_train
# gradient = np.dot(phi.T, error) / len(y_train)
# w -= learning_rate * gradient
for epoch in range(epochs):
y_pred = np.dot(phi, w)
error = y_pred - y_train
gradient = np.dot(phi.T, error) / len(y_train)
w -= learning_rate * gradient
# 每100次迭代打印损失
if epoch % 100 == 0:
loss = np.mean(error**2)
print(f'Epoch {epoch}, Loss: {loss:.4f}')
#==========

def f(x):
Expand All @@ -110,8 +107,6 @@ def f(x):
# > 没有需要填写的代码,但是建议读懂

# In[ ]:


def evaluate(ys, ys_pred):
"""评估模型。"""
std = np.sqrt(np.mean(np.abs(ys - ys_pred) ** 2))
Expand All @@ -128,7 +123,7 @@ def evaluate(ys, ys_pred):
print(x_test.shape)

# 使用线性回归训练模型,返回一个函数f()使得y = f(x)
f = main(x_train, y_train)
f = main(x_train, y_train)#

y_train_pred = f(x_train)
std = evaluate(y_train, y_train_pred)
Expand All @@ -148,18 +143,3 @@ def evaluate(ys, ys_pred):
plt.title('gaussian_basis')
plt.legend()
plt.show()


# In[ ]:





# In[ ]:





# In[ ]:
46 changes: 11 additions & 35 deletions src/chap02_linear_regression/linear_regression-tf2.0.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@
# ## 设计基函数(basis function) 以及数据读取

# In[20]:


import numpy as np
import matplotlib.pyplot as plt

Expand Down Expand Up @@ -37,6 +35,7 @@ def load_data(filename, basis_func=gaussian_basis):
with open(filename, 'r') as f:
for line in f:
xys.append(map(float, line.strip().split()))
# 修改1: 添加list()转换
xs, ys = zip(*xys)
xs, ys = np.asarray(xs), np.asarray(ys)

Expand All @@ -50,8 +49,6 @@ def load_data(filename, basis_func=gaussian_basis):
# ## 定义模型

# In[21]:


import tensorflow as tf
from tensorflow.keras import optimizers, layers, Model

Expand All @@ -60,9 +57,9 @@ def __init__(self, ndim):
super(linearModel, self).__init__()
self.w = tf.Variable(
shape=[ndim, 1],
initial_value=tf.random.uniform(
[ndim,1], minval=-0.1, maxval=0.1, dtype=tf.float32))

initial_value=tf.random.uniform( # 修改2: 更规范的初始化
[ndim,1], minval=-0.1, maxval=0.1, dtype=tf.float32),
trainable=True)
@tf.function
def call(self, x):
y = tf.squeeze(tf.matmul(x, self.w), axis=1)
Expand All @@ -77,16 +74,15 @@ def call(self, x):
# ## 训练以及评估

# In[26]:


optimizer = optimizers.Adam(0.1)
@tf.function
def train_one_step(model, xs, ys):
with tf.GradientTape() as tape:
y_preds = model(xs)
loss = tf.reduce_mean(tf.sqrt(1e-12+(ys-y_preds)**2))
grads = tape.gradient(loss, model.w)
optimizer.apply_gradients([(grads, model.w)])
#修改3: 使用trainable_variables
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss

@tf.function
Expand All @@ -101,47 +97,27 @@ def evaluate(ys, ys_pred):


# In[27]:


for i in range(1000):
loss = train_one_step(model, xs, ys)
if i % 100 == 1:
print(f'loss is {loss:.4}')
if i % 100 == 0: # 修改4: 改为每100次都输出
print(f'Step {i}, loss: {loss:.4f}')


y_preds = predict(model, xs)
std = evaluate(ys, y_preds)
print('训练集预测值与真实值的标准差:{:.1f}'.format(std))

(xs_test, ys_test), (o_x_test, o_y_test) = load_data('test.txt')

y_test_preds = predict(model, xs_test)
std = evaluate(ys_test, y_test_preds)
print('训练集预测值与真实值的标准差:{:.1f}'.format(std))
print('测试集预测值与真实值的标准差:{:.1f}'.format(std)) # 修改5: 修正输出描述

plt.figure(figsize=(8, 5)) # 修改6: 调整图像大小
plt.plot(o_x, o_y, 'ro', markersize=3)
plt.plot(o_x_test, y_test_preds, 'k')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Linear Regression')
plt.legend(['train', 'test', 'pred'])
plt.grid(True) # 修改8: 添加网格线
plt.show()


# In[ ]:





# In[ ]:





# In[ ]: